Merge V8 5.8.283.32

Test: Build V8 for arm, arm64, x86, x86_64, mips, mips64 and
set a PAC script from the UI on bullhead

Change-Id: I7cc773b5daca34d869e768a1deebae3876f2dfac
diff --git a/src/compiler/OWNERS b/src/compiler/OWNERS
index 02de4ed..10ffcb0 100644
--- a/src/compiler/OWNERS
+++ b/src/compiler/OWNERS
@@ -6,3 +6,4 @@
 mstarzinger@chromium.org
 mtrofin@chromium.org
 titzer@chromium.org
+danno@chromium.org
diff --git a/src/compiler/access-builder.cc b/src/compiler/access-builder.cc
index 540eb37..2722590 100644
--- a/src/compiler/access-builder.cc
+++ b/src/compiler/access-builder.cc
@@ -9,6 +9,7 @@
 #include "src/frames.h"
 #include "src/handles-inl.h"
 #include "src/heap/heap.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -16,47 +17,67 @@
 
 // static
 FieldAccess AccessBuilder::ForExternalDoubleValue() {
-  FieldAccess access = {kUntaggedBase,          0,
-                        MaybeHandle<Name>(),    Type::Number(),
-                        MachineType::Float64(), kNoWriteBarrier};
+  FieldAccess access = {kUntaggedBase,       0,
+                        MaybeHandle<Name>(), MaybeHandle<Map>(),
+                        Type::Number(),      MachineType::Float64(),
+                        kNoWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForExternalTaggedValue() {
+  FieldAccess access = {kUntaggedBase,       0,
+                        MaybeHandle<Name>(), MaybeHandle<Map>(),
+                        Type::Any(),         MachineType::AnyTagged(),
+                        kNoWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForExternalUint8Value() {
+  FieldAccess access = {kUntaggedBase,           0,
+                        MaybeHandle<Name>(),     MaybeHandle<Map>(),
+                        TypeCache::Get().kUint8, MachineType::Uint8(),
+                        kNoWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForMap() {
-  FieldAccess access = {
-      kTaggedBase,           HeapObject::kMapOffset,       MaybeHandle<Name>(),
-      Type::OtherInternal(), MachineType::TaggedPointer(), kMapWriteBarrier};
+  FieldAccess access = {kTaggedBase,           HeapObject::kMapOffset,
+                        MaybeHandle<Name>(),   MaybeHandle<Map>(),
+                        Type::OtherInternal(), MachineType::TaggedPointer(),
+                        kMapWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForHeapNumberValue() {
-  FieldAccess access = {kTaggedBase,
-                        HeapNumber::kValueOffset,
-                        MaybeHandle<Name>(),
-                        TypeCache::Get().kFloat64,
-                        MachineType::Float64(),
-                        kNoWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,        HeapNumber::kValueOffset,  MaybeHandle<Name>(),
+      MaybeHandle<Map>(), TypeCache::Get().kFloat64, MachineType::Float64(),
+      kNoWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSObjectProperties() {
-  FieldAccess access = {
-      kTaggedBase,      JSObject::kPropertiesOffset,  MaybeHandle<Name>(),
-      Type::Internal(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+  FieldAccess access = {kTaggedBase,         JSObject::kPropertiesOffset,
+                        MaybeHandle<Name>(), MaybeHandle<Map>(),
+                        Type::Internal(),    MachineType::TaggedPointer(),
+                        kPointerWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSObjectElements() {
-  FieldAccess access = {
-      kTaggedBase,      JSObject::kElementsOffset,    MaybeHandle<Name>(),
-      Type::Internal(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+  FieldAccess access = {kTaggedBase,         JSObject::kElementsOffset,
+                        MaybeHandle<Name>(), MaybeHandle<Map>(),
+                        Type::Internal(),    MachineType::TaggedPointer(),
+                        kPointerWriteBarrier};
   return access;
 }
 
@@ -65,126 +86,136 @@
 FieldAccess AccessBuilder::ForJSObjectInObjectProperty(Handle<Map> map,
                                                        int index) {
   int const offset = map->GetInObjectPropertyOffset(index);
-  FieldAccess access = {kTaggedBase,
-                        offset,
-                        MaybeHandle<Name>(),
-                        Type::NonInternal(),
-                        MachineType::AnyTagged(),
+  FieldAccess access = {kTaggedBase,         offset,
+                        MaybeHandle<Name>(), MaybeHandle<Map>(),
+                        Type::NonInternal(), MachineType::AnyTagged(),
                         kFullWriteBarrier};
   return access;
 }
 
+// static
+FieldAccess AccessBuilder::ForJSObjectOffset(
+    int offset, WriteBarrierKind write_barrier_kind) {
+  FieldAccess access = {kTaggedBase,         offset,
+                        MaybeHandle<Name>(), MaybeHandle<Map>(),
+                        Type::NonInternal(), MachineType::AnyTagged(),
+                        write_barrier_kind};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForJSCollectionTable() {
+  FieldAccess access = {kTaggedBase,           JSCollection::kTableOffset,
+                        MaybeHandle<Name>(),   MaybeHandle<Map>(),
+                        Type::OtherInternal(), MachineType::TaggedPointer(),
+                        kPointerWriteBarrier};
+  return access;
+}
 
 // static
 FieldAccess AccessBuilder::ForJSFunctionPrototypeOrInitialMap() {
-  FieldAccess access = {kTaggedBase,
-                        JSFunction::kPrototypeOrInitialMapOffset,
-                        MaybeHandle<Name>(),
-                        Type::Any(),
-                        MachineType::AnyTagged(),
-                        kFullWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,         JSFunction::kPrototypeOrInitialMapOffset,
+      MaybeHandle<Name>(), MaybeHandle<Map>(),
+      Type::Any(),         MachineType::AnyTagged(),
+      kFullWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForJSFunctionContext() {
-  FieldAccess access = {
-      kTaggedBase,      JSFunction::kContextOffset, MaybeHandle<Name>(),
-      Type::Internal(), MachineType::AnyTagged(),   kPointerWriteBarrier};
+  FieldAccess access = {kTaggedBase,         JSFunction::kContextOffset,
+                        MaybeHandle<Name>(), MaybeHandle<Map>(),
+                        Type::Internal(),    MachineType::AnyTagged(),
+                        kPointerWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSFunctionSharedFunctionInfo() {
-  FieldAccess access = {kTaggedBase,
-                        JSFunction::kSharedFunctionInfoOffset,
-                        Handle<Name>(),
-                        Type::OtherInternal(),
-                        MachineType::TaggedPointer(),
-                        kPointerWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,           JSFunction::kSharedFunctionInfoOffset,
+      Handle<Name>(),        MaybeHandle<Map>(),
+      Type::OtherInternal(), MachineType::TaggedPointer(),
+      kPointerWriteBarrier};
   return access;
 }
 
 // static
-FieldAccess AccessBuilder::ForJSFunctionLiterals() {
-  FieldAccess access = {
-      kTaggedBase,      JSFunction::kLiteralsOffset,  Handle<Name>(),
-      Type::Internal(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+FieldAccess AccessBuilder::ForJSFunctionFeedbackVector() {
+  FieldAccess access = {kTaggedBase,         JSFunction::kFeedbackVectorOffset,
+                        Handle<Name>(),      MaybeHandle<Map>(),
+                        Type::Internal(),    MachineType::TaggedPointer(),
+                        kPointerWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForJSFunctionCodeEntry() {
-  FieldAccess access = {
-      kTaggedBase,           JSFunction::kCodeEntryOffset, Handle<Name>(),
-      Type::OtherInternal(), MachineType::Pointer(),       kNoWriteBarrier};
+  FieldAccess access = {kTaggedBase,           JSFunction::kCodeEntryOffset,
+                        Handle<Name>(),        MaybeHandle<Map>(),
+                        Type::OtherInternal(), MachineType::Pointer(),
+                        kNoWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForJSFunctionNextFunctionLink() {
-  FieldAccess access = {kTaggedBase,
-                        JSFunction::kNextFunctionLinkOffset,
-                        Handle<Name>(),
-                        Type::Any(),
-                        MachineType::AnyTagged(),
-                        kPointerWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,         JSFunction::kNextFunctionLinkOffset,
+      Handle<Name>(),      MaybeHandle<Map>(),
+      Type::Any(),         MachineType::AnyTagged(),
+      kPointerWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForJSGeneratorObjectContext() {
-  FieldAccess access = {kTaggedBase,
-                        JSGeneratorObject::kContextOffset,
-                        Handle<Name>(),
-                        Type::Internal(),
-                        MachineType::TaggedPointer(),
+  FieldAccess access = {kTaggedBase,         JSGeneratorObject::kContextOffset,
+                        Handle<Name>(),      MaybeHandle<Map>(),
+                        Type::Internal(),    MachineType::TaggedPointer(),
                         kPointerWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForJSGeneratorObjectContinuation() {
-  FieldAccess access = {kTaggedBase,
-                        JSGeneratorObject::kContinuationOffset,
-                        Handle<Name>(),
-                        Type::SignedSmall(),
-                        MachineType::TaggedSigned(),
-                        kNoWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,         JSGeneratorObject::kContinuationOffset,
+      Handle<Name>(),      MaybeHandle<Map>(),
+      Type::SignedSmall(), MachineType::TaggedSigned(),
+      kNoWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForJSGeneratorObjectInputOrDebugPos() {
-  FieldAccess access = {kTaggedBase,
-                        JSGeneratorObject::kInputOrDebugPosOffset,
-                        Handle<Name>(),
-                        Type::NonInternal(),
-                        MachineType::AnyTagged(),
-                        kFullWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,         JSGeneratorObject::kInputOrDebugPosOffset,
+      Handle<Name>(),      MaybeHandle<Map>(),
+      Type::NonInternal(), MachineType::AnyTagged(),
+      kFullWriteBarrier};
   return access;
 }
 
 // static
-FieldAccess AccessBuilder::ForJSGeneratorObjectOperandStack() {
-  FieldAccess access = {kTaggedBase,
-                        JSGeneratorObject::kOperandStackOffset,
-                        Handle<Name>(),
-                        Type::Internal(),
-                        MachineType::AnyTagged(),
-                        kPointerWriteBarrier};
+FieldAccess AccessBuilder::ForJSGeneratorObjectRegisterFile() {
+  FieldAccess access = {
+      kTaggedBase,         JSGeneratorObject::kRegisterFileOffset,
+      Handle<Name>(),      MaybeHandle<Map>(),
+      Type::Internal(),    MachineType::AnyTagged(),
+      kPointerWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForJSGeneratorObjectResumeMode() {
-  FieldAccess access = {kTaggedBase,
-                        JSGeneratorObject::kResumeModeOffset,
-                        Handle<Name>(),
-                        Type::SignedSmall(),
-                        MachineType::TaggedSigned(),
-                        kNoWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,         JSGeneratorObject::kResumeModeOffset,
+      Handle<Name>(),      MaybeHandle<Map>(),
+      Type::SignedSmall(), MachineType::TaggedSigned(),
+      kNoWriteBarrier};
   return access;
 }
 
@@ -194,6 +225,7 @@
   FieldAccess access = {kTaggedBase,
                         JSArray::kLengthOffset,
                         Handle<Name>(),
+                        MaybeHandle<Map>(),
                         type_cache.kJSArrayLengthType,
                         MachineType::TaggedSigned(),
                         kFullWriteBarrier};
@@ -210,30 +242,28 @@
 
 // static
 FieldAccess AccessBuilder::ForJSArrayBufferBackingStore() {
-  FieldAccess access = {kTaggedBase,
-                        JSArrayBuffer::kBackingStoreOffset,
-                        MaybeHandle<Name>(),
-                        Type::OtherInternal(),
-                        MachineType::Pointer(),
-                        kNoWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,           JSArrayBuffer::kBackingStoreOffset,
+      MaybeHandle<Name>(),   MaybeHandle<Map>(),
+      Type::OtherInternal(), MachineType::Pointer(),
+      kNoWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForJSArrayBufferBitField() {
-  FieldAccess access = {kTaggedBase,           JSArrayBuffer::kBitFieldOffset,
-                        MaybeHandle<Name>(),   TypeCache::Get().kUint8,
-                        MachineType::Uint32(), kNoWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,        JSArrayBuffer::kBitFieldOffset, MaybeHandle<Name>(),
+      MaybeHandle<Map>(), TypeCache::Get().kUint8,        MachineType::Uint32(),
+      kNoWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForJSArrayBufferViewBuffer() {
-  FieldAccess access = {kTaggedBase,
-                        JSArrayBufferView::kBufferOffset,
-                        MaybeHandle<Name>(),
-                        Type::OtherInternal(),
-                        MachineType::TaggedPointer(),
+  FieldAccess access = {kTaggedBase,           JSArrayBufferView::kBufferOffset,
+                        MaybeHandle<Name>(),   MaybeHandle<Map>(),
+                        Type::OtherInternal(), MachineType::TaggedPointer(),
                         kPointerWriteBarrier};
   return access;
 }
@@ -243,6 +273,7 @@
   FieldAccess access = {kTaggedBase,
                         JSArrayBufferView::kByteLengthOffset,
                         MaybeHandle<Name>(),
+                        MaybeHandle<Map>(),
                         TypeCache::Get().kPositiveInteger,
                         MachineType::AnyTagged(),
                         kFullWriteBarrier};
@@ -254,6 +285,7 @@
   FieldAccess access = {kTaggedBase,
                         JSArrayBufferView::kByteOffsetOffset,
                         MaybeHandle<Name>(),
+                        MaybeHandle<Map>(),
                         TypeCache::Get().kPositiveInteger,
                         MachineType::AnyTagged(),
                         kFullWriteBarrier};
@@ -265,6 +297,7 @@
   FieldAccess access = {kTaggedBase,
                         JSTypedArray::kLengthOffset,
                         MaybeHandle<Name>(),
+                        MaybeHandle<Map>(),
                         TypeCache::Get().kJSTypedArrayLengthType,
                         MachineType::TaggedSigned(),
                         kNoWriteBarrier};
@@ -276,6 +309,7 @@
   FieldAccess access = {kTaggedBase,
                         JSDate::kValueOffset,
                         MaybeHandle<Name>(),
+                        MaybeHandle<Map>(),
                         TypeCache::Get().kJSDateValueType,
                         MachineType::AnyTagged(),
                         kFullWriteBarrier};
@@ -284,48 +318,51 @@
 
 // static
 FieldAccess AccessBuilder::ForJSDateField(JSDate::FieldIndex index) {
-  FieldAccess access = {kTaggedBase,
-                        JSDate::kValueOffset + index * kPointerSize,
-                        MaybeHandle<Name>(),
-                        Type::Number(),
-                        MachineType::AnyTagged(),
-                        kFullWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,         JSDate::kValueOffset + index * kPointerSize,
+      MaybeHandle<Name>(), MaybeHandle<Map>(),
+      Type::Number(),      MachineType::AnyTagged(),
+      kFullWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSIteratorResultDone() {
-  FieldAccess access = {
-      kTaggedBase,         JSIteratorResult::kDoneOffset, MaybeHandle<Name>(),
-      Type::NonInternal(), MachineType::AnyTagged(),      kFullWriteBarrier};
+  FieldAccess access = {kTaggedBase,         JSIteratorResult::kDoneOffset,
+                        MaybeHandle<Name>(), MaybeHandle<Map>(),
+                        Type::NonInternal(), MachineType::AnyTagged(),
+                        kFullWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSIteratorResultValue() {
-  FieldAccess access = {
-      kTaggedBase,         JSIteratorResult::kValueOffset, MaybeHandle<Name>(),
-      Type::NonInternal(), MachineType::AnyTagged(),       kFullWriteBarrier};
+  FieldAccess access = {kTaggedBase,         JSIteratorResult::kValueOffset,
+                        MaybeHandle<Name>(), MaybeHandle<Map>(),
+                        Type::NonInternal(), MachineType::AnyTagged(),
+                        kFullWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSRegExpFlags() {
-  FieldAccess access = {
-      kTaggedBase,         JSRegExp::kFlagsOffset,   MaybeHandle<Name>(),
-      Type::NonInternal(), MachineType::AnyTagged(), kFullWriteBarrier};
+  FieldAccess access = {kTaggedBase,         JSRegExp::kFlagsOffset,
+                        MaybeHandle<Name>(), MaybeHandle<Map>(),
+                        Type::NonInternal(), MachineType::AnyTagged(),
+                        kFullWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForJSRegExpSource() {
-  FieldAccess access = {
-      kTaggedBase,         JSRegExp::kSourceOffset,  MaybeHandle<Name>(),
-      Type::NonInternal(), MachineType::AnyTagged(), kFullWriteBarrier};
+  FieldAccess access = {kTaggedBase,         JSRegExp::kSourceOffset,
+                        MaybeHandle<Name>(), MaybeHandle<Map>(),
+                        Type::NonInternal(), MachineType::AnyTagged(),
+                        kFullWriteBarrier};
   return access;
 }
 
@@ -335,6 +372,7 @@
   FieldAccess access = {kTaggedBase,
                         FixedArray::kLengthOffset,
                         MaybeHandle<Name>(),
+                        MaybeHandle<Map>(),
                         TypeCache::Get().kFixedArrayLengthType,
                         MachineType::TaggedSigned(),
                         kNoWriteBarrier};
@@ -343,12 +381,11 @@
 
 // static
 FieldAccess AccessBuilder::ForFixedTypedArrayBaseBasePointer() {
-  FieldAccess access = {kTaggedBase,
-                        FixedTypedArrayBase::kBasePointerOffset,
-                        MaybeHandle<Name>(),
-                        Type::OtherInternal(),
-                        MachineType::AnyTagged(),
-                        kPointerWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,           FixedTypedArrayBase::kBasePointerOffset,
+      MaybeHandle<Name>(),   MaybeHandle<Map>(),
+      Type::OtherInternal(), MachineType::AnyTagged(),
+      kPointerWriteBarrier};
   return access;
 }
 
@@ -357,6 +394,7 @@
   FieldAccess access = {kTaggedBase,
                         FixedTypedArrayBase::kExternalPointerOffset,
                         MaybeHandle<Name>(),
+                        MaybeHandle<Map>(),
                         Type::ExternalPointer(),
                         MachineType::Pointer(),
                         kNoWriteBarrier};
@@ -365,53 +403,51 @@
 
 // static
 FieldAccess AccessBuilder::ForDescriptorArrayEnumCache() {
-  FieldAccess access = {kTaggedBase,
-                        DescriptorArray::kEnumCacheOffset,
-                        Handle<Name>(),
-                        Type::OtherInternal(),
-                        MachineType::TaggedPointer(),
-                        kPointerWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,           DescriptorArray::kEnumCacheOffset,
+      Handle<Name>(),        MaybeHandle<Map>(),
+      Type::OtherInternal(), MachineType::TaggedPointer(),
+      kPointerWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForDescriptorArrayEnumCacheBridgeCache() {
-  FieldAccess access = {kTaggedBase,
-                        DescriptorArray::kEnumCacheBridgeCacheOffset,
-                        Handle<Name>(),
-                        Type::OtherInternal(),
-                        MachineType::TaggedPointer(),
-                        kPointerWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,           DescriptorArray::kEnumCacheBridgeCacheOffset,
+      Handle<Name>(),        MaybeHandle<Map>(),
+      Type::OtherInternal(), MachineType::TaggedPointer(),
+      kPointerWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForMapBitField() {
-  FieldAccess access = {kTaggedBase,          Map::kBitFieldOffset,
-                        Handle<Name>(),       TypeCache::Get().kUint8,
-                        MachineType::Uint8(), kNoWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,        Map::kBitFieldOffset,    Handle<Name>(),
+      MaybeHandle<Map>(), TypeCache::Get().kUint8, MachineType::Uint8(),
+      kNoWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForMapBitField3() {
-  FieldAccess access = {kTaggedBase,          Map::kBitField3Offset,
-                        Handle<Name>(),       TypeCache::Get().kInt32,
-                        MachineType::Int32(), kNoWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,        Map::kBitField3Offset,   Handle<Name>(),
+      MaybeHandle<Map>(), TypeCache::Get().kInt32, MachineType::Int32(),
+      kNoWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForMapDescriptors() {
-  FieldAccess access = {kTaggedBase,
-                        Map::kDescriptorsOffset,
-                        Handle<Name>(),
-                        Type::OtherInternal(),
-                        MachineType::TaggedPointer(),
+  FieldAccess access = {kTaggedBase,           Map::kDescriptorsOffset,
+                        Handle<Name>(),        MaybeHandle<Map>(),
+                        Type::OtherInternal(), MachineType::TaggedPointer(),
                         kPointerWriteBarrier};
   return access;
 }
@@ -419,48 +455,47 @@
 
 // static
 FieldAccess AccessBuilder::ForMapInstanceType() {
-  FieldAccess access = {kTaggedBase,          Map::kInstanceTypeOffset,
-                        Handle<Name>(),       TypeCache::Get().kUint8,
-                        MachineType::Uint8(), kNoWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,        Map::kInstanceTypeOffset, Handle<Name>(),
+      MaybeHandle<Map>(), TypeCache::Get().kUint8,  MachineType::Uint8(),
+      kNoWriteBarrier};
   return access;
 }
 
 
 // static
 FieldAccess AccessBuilder::ForMapPrototype() {
-  FieldAccess access = {
-      kTaggedBase, Map::kPrototypeOffset,        Handle<Name>(),
-      Type::Any(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+  FieldAccess access = {kTaggedBase,         Map::kPrototypeOffset,
+                        Handle<Name>(),      MaybeHandle<Map>(),
+                        Type::Any(),         MachineType::TaggedPointer(),
+                        kPointerWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForModuleRegularExports() {
-  FieldAccess access = {kTaggedBase,
-                        Module::kRegularExportsOffset,
-                        Handle<Name>(),
-                        Type::OtherInternal(),
-                        MachineType::TaggedPointer(),
+  FieldAccess access = {kTaggedBase,           Module::kRegularExportsOffset,
+                        Handle<Name>(),        MaybeHandle<Map>(),
+                        Type::OtherInternal(), MachineType::TaggedPointer(),
                         kPointerWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForModuleRegularImports() {
-  FieldAccess access = {kTaggedBase,
-                        Module::kRegularImportsOffset,
-                        Handle<Name>(),
-                        Type::OtherInternal(),
-                        MachineType::TaggedPointer(),
+  FieldAccess access = {kTaggedBase,           Module::kRegularImportsOffset,
+                        Handle<Name>(),        MaybeHandle<Map>(),
+                        Type::OtherInternal(), MachineType::TaggedPointer(),
                         kPointerWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForNameHashField() {
-  FieldAccess access = {kTaggedBase,           Name::kHashFieldOffset,
-                        Handle<Name>(),        Type::Internal(),
-                        MachineType::Uint32(), kNoWriteBarrier};
+  FieldAccess access = {kTaggedBase,        Name::kHashFieldOffset,
+                        Handle<Name>(),     MaybeHandle<Map>(),
+                        Type::Unsigned32(), MachineType::Uint32(),
+                        kNoWriteBarrier};
   return access;
 }
 
@@ -469,6 +504,7 @@
   FieldAccess access = {kTaggedBase,
                         String::kLengthOffset,
                         Handle<Name>(),
+                        MaybeHandle<Map>(),
                         TypeCache::Get().kStringLengthType,
                         MachineType::TaggedSigned(),
                         kNoWriteBarrier};
@@ -477,33 +513,46 @@
 
 // static
 FieldAccess AccessBuilder::ForConsStringFirst() {
-  FieldAccess access = {
-      kTaggedBase,    ConsString::kFirstOffset,     Handle<Name>(),
-      Type::String(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+  FieldAccess access = {kTaggedBase,         ConsString::kFirstOffset,
+                        Handle<Name>(),      MaybeHandle<Map>(),
+                        Type::String(),      MachineType::TaggedPointer(),
+                        kPointerWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForConsStringSecond() {
-  FieldAccess access = {
-      kTaggedBase,    ConsString::kSecondOffset,    Handle<Name>(),
-      Type::String(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+  FieldAccess access = {kTaggedBase,         ConsString::kSecondOffset,
+                        Handle<Name>(),      MaybeHandle<Map>(),
+                        Type::String(),      MachineType::TaggedPointer(),
+                        kPointerWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForThinStringActual() {
+  FieldAccess access = {kTaggedBase,         ThinString::kActualOffset,
+                        Handle<Name>(),      MaybeHandle<Map>(),
+                        Type::String(),      MachineType::TaggedPointer(),
+                        kPointerWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForSlicedStringOffset() {
-  FieldAccess access = {
-      kTaggedBase,         SlicedString::kOffsetOffset, Handle<Name>(),
-      Type::SignedSmall(), MachineType::TaggedSigned(), kNoWriteBarrier};
+  FieldAccess access = {kTaggedBase,         SlicedString::kOffsetOffset,
+                        Handle<Name>(),      MaybeHandle<Map>(),
+                        Type::SignedSmall(), MachineType::TaggedSigned(),
+                        kNoWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForSlicedStringParent() {
-  FieldAccess access = {
-      kTaggedBase,    SlicedString::kParentOffset,  Handle<Name>(),
-      Type::String(), MachineType::TaggedPointer(), kPointerWriteBarrier};
+  FieldAccess access = {kTaggedBase,         SlicedString::kParentOffset,
+                        Handle<Name>(),      MaybeHandle<Map>(),
+                        Type::String(),      MachineType::TaggedPointer(),
+                        kPointerWriteBarrier};
   return access;
 }
 
@@ -512,6 +561,7 @@
   FieldAccess access = {kTaggedBase,
                         ExternalString::kResourceDataOffset,
                         Handle<Name>(),
+                        MaybeHandle<Map>(),
                         Type::ExternalPointer(),
                         MachineType::Pointer(),
                         kNoWriteBarrier};
@@ -550,23 +600,20 @@
 
 // static
 FieldAccess AccessBuilder::ForJSGlobalObjectGlobalProxy() {
-  FieldAccess access = {kTaggedBase,
-                        JSGlobalObject::kGlobalProxyOffset,
-                        Handle<Name>(),
-                        Type::Receiver(),
-                        MachineType::TaggedPointer(),
+  FieldAccess access = {kTaggedBase,         JSGlobalObject::kGlobalProxyOffset,
+                        Handle<Name>(),      MaybeHandle<Map>(),
+                        Type::Receiver(),    MachineType::TaggedPointer(),
                         kPointerWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForJSGlobalObjectNativeContext() {
-  FieldAccess access = {kTaggedBase,
-                        JSGlobalObject::kNativeContextOffset,
-                        Handle<Name>(),
-                        Type::Internal(),
-                        MachineType::TaggedPointer(),
-                        kPointerWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,         JSGlobalObject::kNativeContextOffset,
+      Handle<Name>(),      MaybeHandle<Map>(),
+      Type::Internal(),    MachineType::TaggedPointer(),
+      kPointerWriteBarrier};
   return access;
 }
 
@@ -575,6 +622,7 @@
   FieldAccess access = {kTaggedBase,
                         JSArrayIterator::kIteratedObjectOffset,
                         Handle<Name>(),
+                        MaybeHandle<Map>(),
                         Type::ReceiverOrUndefined(),
                         MachineType::TaggedPointer(),
                         kPointerWriteBarrier};
@@ -589,6 +637,7 @@
   FieldAccess access = {kTaggedBase,
                         JSArrayIterator::kNextIndexOffset,
                         Handle<Name>(),
+                        MaybeHandle<Map>(),
                         TypeCache::Get().kPositiveSafeInteger,
                         MachineType::AnyTagged(),
                         kFullWriteBarrier};
@@ -614,20 +663,20 @@
 
 // static
 FieldAccess AccessBuilder::ForJSArrayIteratorObjectMap() {
-  FieldAccess access = {kTaggedBase,
-                        JSArrayIterator::kIteratedObjectMapOffset,
-                        Handle<Name>(),
-                        Type::OtherInternal(),
-                        MachineType::TaggedPointer(),
-                        kPointerWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,           JSArrayIterator::kIteratedObjectMapOffset,
+      Handle<Name>(),        MaybeHandle<Map>(),
+      Type::OtherInternal(), MachineType::TaggedPointer(),
+      kPointerWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForJSStringIteratorString() {
-  FieldAccess access = {
-      kTaggedBase,    JSStringIterator::kStringOffset, Handle<Name>(),
-      Type::String(), MachineType::TaggedPointer(),    kPointerWriteBarrier};
+  FieldAccess access = {kTaggedBase,         JSStringIterator::kStringOffset,
+                        Handle<Name>(),      MaybeHandle<Map>(),
+                        Type::String(),      MachineType::TaggedPointer(),
+                        kPointerWriteBarrier};
   return access;
 }
 
@@ -636,6 +685,7 @@
   FieldAccess access = {kTaggedBase,
                         JSStringIterator::kNextIndexOffset,
                         Handle<Name>(),
+                        MaybeHandle<Map>(),
                         TypeCache::Get().kStringLengthType,
                         MachineType::TaggedSigned(),
                         kNoWriteBarrier};
@@ -644,52 +694,53 @@
 
 // static
 FieldAccess AccessBuilder::ForValue() {
-  FieldAccess access = {
-      kTaggedBase,         JSValue::kValueOffset,    Handle<Name>(),
-      Type::NonInternal(), MachineType::AnyTagged(), kFullWriteBarrier};
-  return access;
-}
-
-
-// static
-FieldAccess AccessBuilder::ForArgumentsLength() {
-  FieldAccess access = {
-      kTaggedBase,         JSArgumentsObject::kLengthOffset, Handle<Name>(),
-      Type::NonInternal(), MachineType::AnyTagged(),         kFullWriteBarrier};
-  return access;
-}
-
-
-// static
-FieldAccess AccessBuilder::ForArgumentsCallee() {
-  FieldAccess access = {kTaggedBase,
-                        JSSloppyArgumentsObject::kCalleeOffset,
-                        Handle<Name>(),
-                        Type::NonInternal(),
-                        MachineType::AnyTagged(),
-                        kPointerWriteBarrier};
-  return access;
-}
-
-
-// static
-FieldAccess AccessBuilder::ForFixedArraySlot(size_t index) {
-  int offset = FixedArray::OffsetOfElementAt(static_cast<int>(index));
-  FieldAccess access = {kTaggedBase,
-                        offset,
-                        Handle<Name>(),
-                        Type::NonInternal(),
-                        MachineType::AnyTagged(),
+  FieldAccess access = {kTaggedBase,         JSValue::kValueOffset,
+                        Handle<Name>(),      MaybeHandle<Map>(),
+                        Type::NonInternal(), MachineType::AnyTagged(),
                         kFullWriteBarrier};
   return access;
 }
 
 
 // static
-FieldAccess AccessBuilder::ForCellValue() {
+FieldAccess AccessBuilder::ForArgumentsLength() {
+  FieldAccess access = {kTaggedBase,         JSArgumentsObject::kLengthOffset,
+                        Handle<Name>(),      MaybeHandle<Map>(),
+                        Type::NonInternal(), MachineType::AnyTagged(),
+                        kFullWriteBarrier};
+  return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForArgumentsCallee() {
   FieldAccess access = {
-      kTaggedBase, Cell::kValueOffset,       Handle<Name>(),
-      Type::Any(), MachineType::AnyTagged(), kFullWriteBarrier};
+      kTaggedBase,         JSSloppyArgumentsObject::kCalleeOffset,
+      Handle<Name>(),      MaybeHandle<Map>(),
+      Type::NonInternal(), MachineType::AnyTagged(),
+      kPointerWriteBarrier};
+  return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForFixedArraySlot(
+    size_t index, WriteBarrierKind write_barrier_kind) {
+  int offset = FixedArray::OffsetOfElementAt(static_cast<int>(index));
+  FieldAccess access = {kTaggedBase,         offset,
+                        Handle<Name>(),      MaybeHandle<Map>(),
+                        Type::NonInternal(), MachineType::AnyTagged(),
+                        write_barrier_kind};
+  return access;
+}
+
+
+// static
+FieldAccess AccessBuilder::ForCellValue() {
+  FieldAccess access = {kTaggedBase,      Cell::kValueOffset,
+                        Handle<Name>(),   MaybeHandle<Map>(),
+                        Type::Any(),      MachineType::AnyTagged(),
+                        kFullWriteBarrier};
   return access;
 }
 
@@ -698,31 +749,29 @@
   int offset = Context::kHeaderSize + static_cast<int>(index) * kPointerSize;
   DCHECK_EQ(offset,
             Context::SlotOffset(static_cast<int>(index)) + kHeapObjectTag);
-  FieldAccess access = {kTaggedBase,
-                        offset,
-                        Handle<Name>(),
-                        Type::Any(),
-                        MachineType::AnyTagged(),
+  FieldAccess access = {kTaggedBase,      offset,
+                        Handle<Name>(),   MaybeHandle<Map>(),
+                        Type::Any(),      MachineType::AnyTagged(),
                         kFullWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForContextExtensionScopeInfo() {
-  FieldAccess access = {kTaggedBase,
-                        ContextExtension::kScopeInfoOffset,
-                        Handle<Name>(),
-                        Type::OtherInternal(),
-                        MachineType::AnyTagged(),
-                        kFullWriteBarrier};
+  FieldAccess access = {
+      kTaggedBase,           ContextExtension::kScopeInfoOffset,
+      Handle<Name>(),        MaybeHandle<Map>(),
+      Type::OtherInternal(), MachineType::AnyTagged(),
+      kFullWriteBarrier};
   return access;
 }
 
 // static
 FieldAccess AccessBuilder::ForContextExtensionExtension() {
-  FieldAccess access = {
-      kTaggedBase, ContextExtension::kExtensionOffset, Handle<Name>(),
-      Type::Any(), MachineType::AnyTagged(),           kFullWriteBarrier};
+  FieldAccess access = {kTaggedBase,      ContextExtension::kExtensionOffset,
+                        Handle<Name>(),   MaybeHandle<Map>(),
+                        Type::Any(),      MachineType::AnyTagged(),
+                        kFullWriteBarrier};
   return access;
 }
 
@@ -831,6 +880,68 @@
   return access;
 }
 
+// static
+FieldAccess AccessBuilder::ForHashTableBaseNumberOfElements() {
+  FieldAccess access = {
+      kTaggedBase,
+      FixedArray::OffsetOfElementAt(HashTableBase::kNumberOfElementsIndex),
+      MaybeHandle<Name>(),
+      MaybeHandle<Map>(),
+      Type::SignedSmall(),
+      MachineType::TaggedSigned(),
+      kNoWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForHashTableBaseNumberOfDeletedElement() {
+  FieldAccess access = {
+      kTaggedBase, FixedArray::OffsetOfElementAt(
+                       HashTableBase::kNumberOfDeletedElementsIndex),
+      MaybeHandle<Name>(), MaybeHandle<Map>(), Type::SignedSmall(),
+      MachineType::TaggedSigned(), kNoWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForHashTableBaseCapacity() {
+  FieldAccess access = {
+      kTaggedBase,
+      FixedArray::OffsetOfElementAt(HashTableBase::kCapacityIndex),
+      MaybeHandle<Name>(),
+      MaybeHandle<Map>(),
+      Type::SignedSmall(),
+      MachineType::TaggedSigned(),
+      kNoWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForDictionaryMaxNumberKey() {
+  FieldAccess access = {
+      kTaggedBase,
+      FixedArray::OffsetOfElementAt(NameDictionary::kMaxNumberKeyIndex),
+      MaybeHandle<Name>(),
+      MaybeHandle<Map>(),
+      Type::Any(),
+      MachineType::AnyTagged(),
+      kNoWriteBarrier};
+  return access;
+}
+
+// static
+FieldAccess AccessBuilder::ForDictionaryNextEnumerationIndex() {
+  FieldAccess access = {
+      kTaggedBase,
+      FixedArray::OffsetOfElementAt(NameDictionary::kNextEnumerationIndexIndex),
+      MaybeHandle<Name>(),
+      MaybeHandle<Map>(),
+      Type::SignedSmall(),
+      MachineType::TaggedSigned(),
+      kNoWriteBarrier};
+  return access;
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/access-builder.h b/src/compiler/access-builder.h
index eb8e78f..9d23220 100644
--- a/src/compiler/access-builder.h
+++ b/src/compiler/access-builder.h
@@ -26,6 +26,12 @@
   // Provides access to a double field identified by an external reference.
   static FieldAccess ForExternalDoubleValue();
 
+  // Provides access to a tagged field identified by an external reference.
+  static FieldAccess ForExternalTaggedValue();
+
+  // Provides access to an uint8 field identified by an external reference.
+  static FieldAccess ForExternalUint8Value();
+
   // ===========================================================================
   // Access to heap object fields and elements (based on tagged pointer).
 
@@ -43,6 +49,11 @@
 
   // Provides access to JSObject inobject property fields.
   static FieldAccess ForJSObjectInObjectProperty(Handle<Map> map, int index);
+  static FieldAccess ForJSObjectOffset(
+      int offset, WriteBarrierKind write_barrier_kind = kFullWriteBarrier);
+
+  // Provides access to JSCollecton::table() field.
+  static FieldAccess ForJSCollectionTable();
 
   // Provides access to JSFunction::prototype_or_initial_map() field.
   static FieldAccess ForJSFunctionPrototypeOrInitialMap();
@@ -53,8 +64,8 @@
   // Provides access to JSFunction::shared() field.
   static FieldAccess ForJSFunctionSharedFunctionInfo();
 
-  // Provides access to JSFunction::literals() field.
-  static FieldAccess ForJSFunctionLiterals();
+  // Provides access to JSFunction::feedback_vector() field.
+  static FieldAccess ForJSFunctionFeedbackVector();
 
   // Provides access to JSFunction::code() field.
   static FieldAccess ForJSFunctionCodeEntry();
@@ -71,8 +82,8 @@
   // Provides access to JSGeneratorObject::input_or_debug_pos() field.
   static FieldAccess ForJSGeneratorObjectInputOrDebugPos();
 
-  // Provides access to JSGeneratorObject::operand_stack() field.
-  static FieldAccess ForJSGeneratorObjectOperandStack();
+  // Provides access to JSGeneratorObject::register_file() field.
+  static FieldAccess ForJSGeneratorObjectRegisterFile();
 
   // Provides access to JSGeneratorObject::resume_mode() field.
   static FieldAccess ForJSGeneratorObjectResumeMode();
@@ -164,6 +175,9 @@
   // Provides access to ConsString::second() field.
   static FieldAccess ForConsStringSecond();
 
+  // Provides access to ThinString::actual() field.
+  static FieldAccess ForThinStringActual();
+
   // Provides access to SlicedString::offset() field.
   static FieldAccess ForSlicedStringOffset();
 
@@ -218,7 +232,8 @@
   static FieldAccess ForArgumentsCallee();
 
   // Provides access to FixedArray slots.
-  static FieldAccess ForFixedArraySlot(size_t index);
+  static FieldAccess ForFixedArraySlot(
+      size_t index, WriteBarrierKind write_barrier_kind = kFullWriteBarrier);
 
   // Provides access to Context slots.
   static FieldAccess ForContextSlot(size_t index);
@@ -238,6 +253,15 @@
   static ElementAccess ForTypedArrayElement(ExternalArrayType type,
                                             bool is_external);
 
+  // Provides access to HashTable fields.
+  static FieldAccess ForHashTableBaseNumberOfElements();
+  static FieldAccess ForHashTableBaseNumberOfDeletedElement();
+  static FieldAccess ForHashTableBaseCapacity();
+
+  // Provides access to Dictionary fields.
+  static FieldAccess ForDictionaryMaxNumberKey();
+  static FieldAccess ForDictionaryNextEnumerationIndex();
+
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(AccessBuilder);
 };
diff --git a/src/compiler/access-info.cc b/src/compiler/access-info.cc
index 866b060..8fef2f0 100644
--- a/src/compiler/access-info.cc
+++ b/src/compiler/access-info.cc
@@ -52,6 +52,8 @@
       return os << "Load";
     case AccessMode::kStore:
       return os << "Store";
+    case AccessMode::kStoreInLiteral:
+      return os << "StoreInLiteral";
   }
   UNREACHABLE();
   return os;
@@ -78,11 +80,12 @@
 
 // static
 PropertyAccessInfo PropertyAccessInfo::DataField(
-    MapList const& receiver_maps, FieldIndex field_index,
-    MachineRepresentation field_representation, Type* field_type,
-    MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder,
+    PropertyConstness constness, MapList const& receiver_maps,
+    FieldIndex field_index, MachineRepresentation field_representation,
+    Type* field_type, MaybeHandle<Map> field_map, MaybeHandle<JSObject> holder,
     MaybeHandle<Map> transition_map) {
-  return PropertyAccessInfo(holder, transition_map, field_index,
+  Kind kind = constness == kConst ? kDataConstantField : kDataField;
+  return PropertyAccessInfo(kind, holder, transition_map, field_index,
                             field_representation, field_type, field_map,
                             receiver_maps);
 }
@@ -124,10 +127,10 @@
       field_type_(Type::Any()) {}
 
 PropertyAccessInfo::PropertyAccessInfo(
-    MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map,
+    Kind kind, MaybeHandle<JSObject> holder, MaybeHandle<Map> transition_map,
     FieldIndex field_index, MachineRepresentation field_representation,
     Type* field_type, MaybeHandle<Map> field_map, MapList const& receiver_maps)
-    : kind_(kDataField),
+    : kind_(kind),
       receiver_maps_(receiver_maps),
       transition_map_(transition_map),
       holder_(holder),
@@ -144,13 +147,13 @@
     case kInvalid:
       break;
 
-    case kNotFound:
-      return true;
-
-    case kDataField: {
+    case kDataField:
+    case kDataConstantField: {
       // Check if we actually access the same field.
-      if (this->transition_map_.address() == that->transition_map_.address() &&
+      if (this->kind_ == that->kind_ &&
+          this->transition_map_.address() == that->transition_map_.address() &&
           this->field_index_ == that->field_index_ &&
+          this->field_map_.address() == that->field_map_.address() &&
           this->field_type_->Is(that->field_type_) &&
           that->field_type_->Is(this->field_type_) &&
           this->field_representation_ == that->field_representation_) {
@@ -173,6 +176,8 @@
       }
       return false;
     }
+
+    case kNotFound:
     case kGeneric: {
       this->receiver_maps_.insert(this->receiver_maps_.end(),
                                   that->receiver_maps_.begin(),
@@ -282,7 +287,8 @@
     int const number = descriptors->SearchWithCache(isolate(), *name, *map);
     if (number != DescriptorArray::kNotFound) {
       PropertyDetails const details = descriptors->GetDetails(number);
-      if (access_mode == AccessMode::kStore) {
+      if (access_mode == AccessMode::kStore ||
+          access_mode == AccessMode::kStoreInLiteral) {
         // Don't bother optimizing stores to read-only properties.
         if (details.IsReadOnly()) {
           return false;
@@ -295,14 +301,8 @@
           return LookupTransition(receiver_map, name, holder, access_info);
         }
       }
-      switch (details.type()) {
-        case DATA_CONSTANT: {
-          *access_info = PropertyAccessInfo::DataConstant(
-              MapList{receiver_map},
-              handle(descriptors->GetValue(number), isolate()), holder);
-          return true;
-        }
-        case DATA: {
+      if (details.location() == kField) {
+        if (details.kind() == kData) {
           int index = descriptors->GetFieldIndex(number);
           Representation details_representation = details.representation();
           FieldIndex field_index = FieldIndex::ForPropertyIndex(
@@ -341,11 +341,25 @@
             }
           }
           *access_info = PropertyAccessInfo::DataField(
-              MapList{receiver_map}, field_index, field_representation,
-              field_type, field_map, holder);
+              details.constness(), MapList{receiver_map}, field_index,
+              field_representation, field_type, field_map, holder);
           return true;
+        } else {
+          DCHECK_EQ(kAccessor, details.kind());
+          // TODO(turbofan): Add support for general accessors?
+          return false;
         }
-        case ACCESSOR_CONSTANT: {
+
+      } else {
+        DCHECK_EQ(kDescriptor, details.location());
+        if (details.kind() == kData) {
+          DCHECK(!FLAG_track_constant_fields);
+          *access_info = PropertyAccessInfo::DataConstant(
+              MapList{receiver_map},
+              handle(descriptors->GetValue(number), isolate()), holder);
+          return true;
+        } else {
+          DCHECK_EQ(kAccessor, details.kind());
           Handle<Object> accessors(descriptors->GetValue(number), isolate());
           if (!accessors->IsAccessorPair()) return false;
           Handle<Object> accessor(
@@ -361,15 +375,23 @@
             if (optimization.api_call_info()->fast_handler()->IsCode()) {
               return false;
             }
+            if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
+          }
+          if (access_mode == AccessMode::kLoad) {
+            Handle<Name> cached_property_name;
+            if (FunctionTemplateInfo::TryGetCachedPropertyName(isolate(),
+                                                               accessor)
+                    .ToHandle(&cached_property_name)) {
+              if (ComputePropertyAccessInfo(map, cached_property_name,
+                                            access_mode, access_info)) {
+                return true;
+              }
+            }
           }
           *access_info = PropertyAccessInfo::AccessorConstant(
               MapList{receiver_map}, accessor, holder);
           return true;
         }
-        case ACCESSOR: {
-          // TODO(turbofan): Add support for general accessors?
-          return false;
-        }
       }
       UNREACHABLE();
       return false;
@@ -382,6 +404,11 @@
       return false;
     }
 
+    // Don't search on the prototype when storing in literals
+    if (access_mode == AccessMode::kStoreInLiteral) {
+      return LookupTransition(receiver_map, name, holder, access_info);
+    }
+
     // Don't lookup private symbols on the prototype chain.
     if (name->IsPrivate()) return false;
 
@@ -478,8 +505,9 @@
         field_type = type_cache_.kJSArrayLengthType;
       }
     }
+    // Special fields are always mutable.
     *access_info = PropertyAccessInfo::DataField(
-        MapList{map}, field_index, field_representation, field_type);
+        kMutable, MapList{map}, field_index, field_representation, field_type);
     return true;
   }
   return false;
@@ -503,7 +531,7 @@
     // Don't bother optimizing stores to read-only properties.
     if (details.IsReadOnly()) return false;
     // TODO(bmeurer): Handle transition to data constant?
-    if (details.type() != DATA) return false;
+    if (details.location() != kField) return false;
     int const index = details.field_index();
     Representation details_representation = details.representation();
     FieldIndex field_index = FieldIndex::ForPropertyIndex(
@@ -539,9 +567,10 @@
       }
     }
     dependencies()->AssumeMapNotDeprecated(transition_map);
+    // Transitioning stores are never stores to constant fields.
     *access_info = PropertyAccessInfo::DataField(
-        MapList{map}, field_index, field_representation, field_type, field_map,
-        holder, transition_map);
+        kMutable, MapList{map}, field_index, field_representation, field_type,
+        field_map, holder, transition_map);
     return true;
   }
   return false;
diff --git a/src/compiler/access-info.h b/src/compiler/access-info.h
index 1d485dd..42fa1db 100644
--- a/src/compiler/access-info.h
+++ b/src/compiler/access-info.h
@@ -26,7 +26,8 @@
 class TypeCache;
 
 // Whether we are loading a property or storing to a property.
-enum class AccessMode { kLoad, kStore };
+// For a store during literal creation, do not walk up the prototype chain.
+enum class AccessMode { kLoad, kStore, kStoreInLiteral };
 
 std::ostream& operator<<(std::ostream&, AccessMode);
 
@@ -61,6 +62,7 @@
     kNotFound,
     kDataConstant,
     kDataField,
+    kDataConstantField,
     kAccessorConstant,
     kGeneric
   };
@@ -71,9 +73,9 @@
                                          Handle<Object> constant,
                                          MaybeHandle<JSObject> holder);
   static PropertyAccessInfo DataField(
-      MapList const& receiver_maps, FieldIndex field_index,
-      MachineRepresentation field_representation, Type* field_type,
-      MaybeHandle<Map> field_map = MaybeHandle<Map>(),
+      PropertyConstness constness, MapList const& receiver_maps,
+      FieldIndex field_index, MachineRepresentation field_representation,
+      Type* field_type, MaybeHandle<Map> field_map = MaybeHandle<Map>(),
       MaybeHandle<JSObject> holder = MaybeHandle<JSObject>(),
       MaybeHandle<Map> transition_map = MaybeHandle<Map>());
   static PropertyAccessInfo AccessorConstant(MapList const& receiver_maps,
@@ -88,6 +90,9 @@
   bool IsNotFound() const { return kind() == kNotFound; }
   bool IsDataConstant() const { return kind() == kDataConstant; }
   bool IsDataField() const { return kind() == kDataField; }
+  // TODO(ishell): rename to IsDataConstant() once constant field tracking
+  // is done.
+  bool IsDataConstantField() const { return kind() == kDataConstantField; }
   bool IsAccessorConstant() const { return kind() == kAccessorConstant; }
   bool IsGeneric() const { return kind() == kGeneric; }
 
@@ -110,7 +115,7 @@
                      MapList const& receiver_maps);
   PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
                      Handle<Object> constant, MapList const& receiver_maps);
-  PropertyAccessInfo(MaybeHandle<JSObject> holder,
+  PropertyAccessInfo(Kind kind, MaybeHandle<JSObject> holder,
                      MaybeHandle<Map> transition_map, FieldIndex field_index,
                      MachineRepresentation field_representation,
                      Type* field_type, MaybeHandle<Map> field_map,
diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc
index c473b9b..82039c8 100644
--- a/src/compiler/arm/code-generator-arm.cc
+++ b/src/compiler/arm/code-generator-arm.cc
@@ -32,6 +32,7 @@
       case kFlags_branch:
       case kFlags_deoptimize:
       case kFlags_set:
+      case kFlags_trap:
         return SetCC;
       case kFlags_none:
         return LeaveCC;
@@ -473,7 +474,8 @@
 
   // Check if current frame is an arguments adaptor frame.
   __ ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
-  __ cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ cmp(scratch1,
+         Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ b(ne, &done);
 
   // Load arguments count from current arguments adaptor frame (note, it
@@ -736,10 +738,8 @@
     case kArchDeoptimize: {
       int deopt_state_id =
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
-      Deoptimizer::BailoutType bailout_type =
-          Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      CodeGenResult result = AssembleDeoptimizerCall(
-          deopt_state_id, bailout_type, current_source_position_);
+      CodeGenResult result =
+          AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
       if (result != kSuccess) return result;
       break;
     }
@@ -1504,6 +1504,438 @@
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
+    case kArmFloat32x4Splat: {
+      __ vdup(i.OutputSimd128Register(), i.InputFloatRegister(0));
+      break;
+    }
+    case kArmFloat32x4ExtractLane: {
+      __ ExtractLane(i.OutputFloatRegister(), i.InputSimd128Register(0),
+                     kScratchReg, i.InputInt8(1));
+      break;
+    }
+    case kArmFloat32x4ReplaceLane: {
+      __ ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0),
+                     i.InputFloatRegister(2), kScratchReg, i.InputInt8(1));
+      break;
+    }
+    case kArmFloat32x4FromInt32x4: {
+      __ vcvt_f32_s32(i.OutputSimd128Register(), i.InputSimd128Register(0));
+      break;
+    }
+    case kArmFloat32x4FromUint32x4: {
+      __ vcvt_f32_u32(i.OutputSimd128Register(), i.InputSimd128Register(0));
+      break;
+    }
+    case kArmFloat32x4Abs: {
+      __ vabs(i.OutputSimd128Register(), i.InputSimd128Register(0));
+      break;
+    }
+    case kArmFloat32x4Neg: {
+      __ vneg(i.OutputSimd128Register(), i.InputSimd128Register(0));
+      break;
+    }
+    case kArmFloat32x4Add: {
+      __ vadd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmFloat32x4Sub: {
+      __ vsub(i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmFloat32x4Equal: {
+      __ vceq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmFloat32x4NotEqual: {
+      Simd128Register dst = i.OutputSimd128Register();
+      __ vceq(dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
+      __ vmvn(dst, dst);
+      break;
+    }
+    case kArmInt32x4Splat: {
+      __ vdup(Neon32, i.OutputSimd128Register(), i.InputRegister(0));
+      break;
+    }
+    case kArmInt32x4ExtractLane: {
+      __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonS32,
+                     i.InputInt8(1));
+      break;
+    }
+    case kArmInt32x4ReplaceLane: {
+      __ ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0),
+                     i.InputRegister(2), NeonS32, i.InputInt8(1));
+      break;
+    }
+    case kArmInt32x4FromFloat32x4: {
+      __ vcvt_s32_f32(i.OutputSimd128Register(), i.InputSimd128Register(0));
+      break;
+    }
+    case kArmUint32x4FromFloat32x4: {
+      __ vcvt_u32_f32(i.OutputSimd128Register(), i.InputSimd128Register(0));
+      break;
+    }
+    case kArmInt32x4Neg: {
+      __ vneg(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0));
+      break;
+    }
+    case kArmInt32x4ShiftLeftByScalar: {
+      __ vshl(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputInt5(1));
+      break;
+    }
+    case kArmInt32x4ShiftRightByScalar: {
+      __ vshr(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputInt5(1));
+      break;
+    }
+    case kArmInt32x4Add: {
+      __ vadd(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt32x4Sub: {
+      __ vsub(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt32x4Mul: {
+      __ vmul(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt32x4Min: {
+      __ vmin(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt32x4Max: {
+      __ vmax(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt32x4Equal: {
+      __ vceq(Neon32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt32x4NotEqual: {
+      Simd128Register dst = i.OutputSimd128Register();
+      __ vceq(Neon32, dst, i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      __ vmvn(dst, dst);
+      break;
+    }
+    case kArmInt32x4GreaterThan: {
+      __ vcgt(NeonS32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt32x4GreaterThanOrEqual: {
+      Simd128Register dst = i.OutputSimd128Register();
+      __ vcge(NeonS32, dst, i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint32x4ShiftRightByScalar: {
+      __ vshr(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputInt5(1));
+      break;
+    }
+    case kArmUint32x4Min: {
+      __ vmin(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint32x4Max: {
+      __ vmax(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint32x4GreaterThan: {
+      __ vcgt(NeonU32, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint32x4GreaterThanOrEqual: {
+      Simd128Register dst = i.OutputSimd128Register();
+      __ vcge(NeonU32, dst, i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt16x8Splat: {
+      __ vdup(Neon16, i.OutputSimd128Register(), i.InputRegister(0));
+      break;
+    }
+    case kArmInt16x8ExtractLane: {
+      __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonS16,
+                     i.InputInt8(1));
+      break;
+    }
+    case kArmInt16x8ReplaceLane: {
+      __ ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0),
+                     i.InputRegister(2), NeonS16, i.InputInt8(1));
+      break;
+    }
+    case kArmInt16x8Neg: {
+      __ vneg(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0));
+      break;
+    }
+    case kArmInt16x8ShiftLeftByScalar: {
+      __ vshl(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputInt4(1));
+      break;
+    }
+    case kArmInt16x8ShiftRightByScalar: {
+      __ vshr(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputInt4(1));
+      break;
+    }
+    case kArmInt16x8Add: {
+      __ vadd(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt16x8AddSaturate: {
+      __ vqadd(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+               i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt16x8Sub: {
+      __ vsub(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt16x8SubSaturate: {
+      __ vqsub(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+               i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt16x8Mul: {
+      __ vmul(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt16x8Min: {
+      __ vmin(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt16x8Max: {
+      __ vmax(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt16x8Equal: {
+      __ vceq(Neon16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt16x8NotEqual: {
+      Simd128Register dst = i.OutputSimd128Register();
+      __ vceq(Neon16, dst, i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      __ vmvn(dst, dst);
+      break;
+    }
+    case kArmInt16x8GreaterThan: {
+      __ vcgt(NeonS16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt16x8GreaterThanOrEqual: {
+      Simd128Register dst = i.OutputSimd128Register();
+      __ vcge(NeonS16, dst, i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint16x8ShiftRightByScalar: {
+      __ vshr(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputInt4(1));
+      break;
+    }
+    case kArmUint16x8AddSaturate: {
+      __ vqadd(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+               i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint16x8SubSaturate: {
+      __ vqsub(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+               i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint16x8Min: {
+      __ vmin(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint16x8Max: {
+      __ vmax(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint16x8GreaterThan: {
+      __ vcgt(NeonU16, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint16x8GreaterThanOrEqual: {
+      Simd128Register dst = i.OutputSimd128Register();
+      __ vcge(NeonU16, dst, i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt8x16Splat: {
+      __ vdup(Neon8, i.OutputSimd128Register(), i.InputRegister(0));
+      break;
+    }
+    case kArmInt8x16ExtractLane: {
+      __ ExtractLane(i.OutputRegister(), i.InputSimd128Register(0), NeonS8,
+                     i.InputInt8(1));
+      break;
+    }
+    case kArmInt8x16ReplaceLane: {
+      __ ReplaceLane(i.OutputSimd128Register(), i.InputSimd128Register(0),
+                     i.InputRegister(2), NeonS8, i.InputInt8(1));
+      break;
+    }
+    case kArmInt8x16Neg: {
+      __ vneg(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0));
+      break;
+    }
+    case kArmInt8x16ShiftLeftByScalar: {
+      __ vshl(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputInt3(1));
+      break;
+    }
+    case kArmInt8x16ShiftRightByScalar: {
+      __ vshr(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputInt3(1));
+      break;
+    }
+    case kArmInt8x16Add: {
+      __ vadd(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt8x16AddSaturate: {
+      __ vqadd(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+               i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt8x16Sub: {
+      __ vsub(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt8x16SubSaturate: {
+      __ vqsub(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+               i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt8x16Mul: {
+      __ vmul(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt8x16Min: {
+      __ vmin(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt8x16Max: {
+      __ vmax(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt8x16Equal: {
+      __ vceq(Neon8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt8x16NotEqual: {
+      Simd128Register dst = i.OutputSimd128Register();
+      __ vceq(Neon8, dst, i.InputSimd128Register(0), i.InputSimd128Register(1));
+      __ vmvn(dst, dst);
+      break;
+    }
+    case kArmInt8x16GreaterThan: {
+      __ vcgt(NeonS8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmInt8x16GreaterThanOrEqual: {
+      Simd128Register dst = i.OutputSimd128Register();
+      __ vcge(NeonS8, dst, i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint8x16ShiftRightByScalar: {
+      __ vshr(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputInt3(1));
+      break;
+    }
+    case kArmUint8x16AddSaturate: {
+      __ vqadd(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+               i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint8x16SubSaturate: {
+      __ vqsub(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+               i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint8x16Min: {
+      __ vmin(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint8x16Max: {
+      __ vmax(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint8x16GreaterThan: {
+      __ vcgt(NeonU8, i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmUint8x16GreaterThanOrEqual: {
+      Simd128Register dst = i.OutputSimd128Register();
+      __ vcge(NeonU8, dst, i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmSimd128And: {
+      __ vand(i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmSimd128Or: {
+      __ vorr(i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmSimd128Xor: {
+      __ veor(i.OutputSimd128Register(), i.InputSimd128Register(0),
+              i.InputSimd128Register(1));
+      break;
+    }
+    case kArmSimd128Not: {
+      __ vmvn(i.OutputSimd128Register(), i.InputSimd128Register(0));
+      break;
+    }
+    case kArmSimd32x4Select:
+    case kArmSimd16x8Select:
+    case kArmSimd8x16Select: {
+      // vbsl clobbers the mask input so make sure it was DefineSameAsFirst.
+      DCHECK(i.OutputSimd128Register().is(i.InputSimd128Register(0)));
+      __ vbsl(i.OutputSimd128Register(), i.InputSimd128Register(1),
+              i.InputSimd128Register(2));
+      break;
+    }
     case kCheckedLoadInt8:
       ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsb);
       break;
@@ -1590,6 +2022,69 @@
   if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
 }
 
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+                                     FlagsCondition condition) {
+  class OutOfLineTrap final : public OutOfLineCode {
+   public:
+    OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+        : OutOfLineCode(gen),
+          frame_elided_(frame_elided),
+          instr_(instr),
+          gen_(gen) {}
+
+    void Generate() final {
+      ArmOperandConverter i(gen_, instr_);
+
+      Builtins::Name trap_id =
+          static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
+      bool old_has_frame = __ has_frame();
+      if (frame_elided_) {
+        __ set_has_frame(true);
+        __ EnterFrame(StackFrame::WASM_COMPILED);
+      }
+      GenerateCallToTrap(trap_id);
+      if (frame_elided_) {
+        __ set_has_frame(old_has_frame);
+      }
+    }
+
+   private:
+    void GenerateCallToTrap(Builtins::Name trap_id) {
+      if (trap_id == Builtins::builtin_count) {
+        // We cannot test calls to the runtime in cctest/test-run-wasm.
+        // Therefore we emit a call to C here instead of a call to the runtime.
+        // We use the context register as the scratch register, because we do
+        // not have a context here.
+        __ PrepareCallCFunction(0, 0, cp);
+        __ CallCFunction(
+            ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+            0);
+        __ LeaveFrame(StackFrame::WASM_COMPILED);
+        __ Ret();
+      } else {
+        gen_->AssembleSourcePosition(instr_);
+        __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+                RelocInfo::CODE_TARGET);
+        ReferenceMap* reference_map =
+            new (gen_->zone()) ReferenceMap(gen_->zone());
+        gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+                              Safepoint::kNoLazyDeopt);
+        if (FLAG_debug_code) {
+          __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+        }
+      }
+    }
+
+    bool frame_elided_;
+    Instruction* instr_;
+    CodeGenerator* gen_;
+  };
+  bool frame_elided = !frame_access_state()->has_frame();
+  auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+  Label* tlabel = ool->entry();
+  Condition cc = FlagsConditionToCondition(condition);
+  __ b(cc, tlabel);
+}
 
 // Assembles boolean materializations after an instruction.
 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -1633,16 +2128,19 @@
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, Deoptimizer::BailoutType bailout_type,
-    SourcePosition pos) {
+    int deoptimization_id, SourcePosition pos) {
+  DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
+  DeoptimizeReason deoptimization_reason =
+      GetDeoptimizationReason(deoptimization_id);
+  Deoptimizer::BailoutType bailout_type =
+      deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
+                                                   : Deoptimizer::EAGER;
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   // TODO(turbofan): We should be able to generate better code by sharing the
   // actual final call site and just bl'ing to it here, similar to what we do
   // in the lithium backend.
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
-  DeoptimizeReason deoptimization_reason =
-      GetDeoptimizationReason(deoptimization_id);
   __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   __ CheckConstPool(false, false);
@@ -1824,9 +2322,7 @@
           destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
       switch (src.type()) {
         case Constant::kInt32:
-          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
-              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
-              src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+          if (RelocInfo::IsWasmReference(src.rmode())) {
             __ mov(dst, Operand(src.ToInt32(), src.rmode()));
           } else {
             __ mov(dst, Operand(src.ToInt32()));
@@ -1891,8 +2387,7 @@
         DCHECK(destination->IsDoubleStackSlot());
         __ vstr(src, g.ToMemOperand(destination));
       }
-    } else {
-      DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+    } else if (rep == MachineRepresentation::kFloat32) {
       // GapResolver may give us reg codes that don't map to actual s-registers.
       // Generate code to work around those cases.
       int src_code = LocationOperand::cast(source)->register_code();
@@ -1903,6 +2398,19 @@
         DCHECK(destination->IsFloatStackSlot());
         __ VmovExtended(g.ToMemOperand(destination), src_code, kScratchReg);
       }
+    } else {
+      DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+      QwNeonRegister src = g.ToSimd128Register(source);
+      if (destination->IsSimd128Register()) {
+        QwNeonRegister dst = g.ToSimd128Register(destination);
+        __ Move(dst, src);
+      } else {
+        DCHECK(destination->IsSimd128StackSlot());
+        MemOperand dst = g.ToMemOperand(destination);
+        __ add(kScratchReg, dst.rn(), Operand(dst.offset()));
+        __ vst1(Neon8, NeonListOperand(src.low(), 2),
+                NeonMemOperand(kScratchReg));
+      }
     }
   } else if (source->IsFPStackSlot()) {
     MemOperand src = g.ToMemOperand(source);
@@ -1911,24 +2419,38 @@
     if (destination->IsFPRegister()) {
       if (rep == MachineRepresentation::kFloat64) {
         __ vldr(g.ToDoubleRegister(destination), src);
-      } else {
-        DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+      } else if (rep == MachineRepresentation::kFloat32) {
         // GapResolver may give us reg codes that don't map to actual
         // s-registers. Generate code to work around those cases.
         int dst_code = LocationOperand::cast(destination)->register_code();
         __ VmovExtended(dst_code, src, kScratchReg);
+      } else {
+        DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+        QwNeonRegister dst = g.ToSimd128Register(destination);
+        __ add(kScratchReg, src.rn(), Operand(src.offset()));
+        __ vld1(Neon8, NeonListOperand(dst.low(), 2),
+                NeonMemOperand(kScratchReg));
       }
-    } else {
+    } else if (rep == MachineRepresentation::kFloat64) {
       DCHECK(destination->IsFPStackSlot());
       if (rep == MachineRepresentation::kFloat64) {
         DwVfpRegister temp = kScratchDoubleReg;
         __ vldr(temp, src);
         __ vstr(temp, g.ToMemOperand(destination));
-      } else {
-        DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+      } else if (rep == MachineRepresentation::kFloat32) {
         SwVfpRegister temp = kScratchDoubleReg.low();
         __ vldr(temp, src);
         __ vstr(temp, g.ToMemOperand(destination));
+      } else {
+        DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+        MemOperand dst = g.ToMemOperand(destination);
+        __ add(kScratchReg, src.rn(), Operand(src.offset()));
+        __ vld1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
+                NeonMemOperand(kScratchReg));
+        __ add(kScratchReg, dst.rn(), Operand(dst.offset()));
+        __ vst1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
+                NeonMemOperand(kScratchReg));
+        __ veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
       }
     }
   } else {
@@ -1936,7 +2458,6 @@
   }
 }
 
-
 void CodeGenerator::AssembleSwap(InstructionOperand* source,
                                  InstructionOperand* destination) {
   ArmOperandConverter g(this, nullptr);
@@ -1975,7 +2496,7 @@
       DwVfpRegister src = g.ToDoubleRegister(source);
       if (destination->IsFPRegister()) {
         DwVfpRegister dst = g.ToDoubleRegister(destination);
-        __ vswp(src, dst);
+        __ Swap(src, dst);
       } else {
         DCHECK(destination->IsFPStackSlot());
         MemOperand dst = g.ToMemOperand(destination);
@@ -1983,8 +2504,7 @@
         __ vldr(src, dst);
         __ vstr(temp, dst);
       }
-    } else {
-      DCHECK_EQ(MachineRepresentation::kFloat32, rep);
+    } else if (rep == MachineRepresentation::kFloat32) {
       int src_code = LocationOperand::cast(source)->register_code();
       if (destination->IsFPRegister()) {
         int dst_code = LocationOperand::cast(destination)->register_code();
@@ -1998,29 +2518,55 @@
         __ VmovExtended(src_code, dst, kScratchReg);
         __ vstr(temp.low(), dst);
       }
+    } else {
+      DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+      QwNeonRegister src = g.ToSimd128Register(source);
+      if (destination->IsFPRegister()) {
+        QwNeonRegister dst = g.ToSimd128Register(destination);
+        __ Swap(src, dst);
+      } else {
+        DCHECK(destination->IsFPStackSlot());
+        MemOperand dst = g.ToMemOperand(destination);
+        __ Move(kScratchQuadReg, src);
+        __ add(kScratchReg, dst.rn(), Operand(dst.offset()));
+        __ vld1(Neon8, NeonListOperand(src.low(), 2),
+                NeonMemOperand(kScratchReg));
+        __ vst1(Neon8, NeonListOperand(kScratchQuadReg.low(), 2),
+                NeonMemOperand(kScratchReg));
+        __ veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
+      }
     }
   } else if (source->IsFPStackSlot()) {
     DCHECK(destination->IsFPStackSlot());
-    Register temp_0 = kScratchReg;
-    LowDwVfpRegister temp_1 = kScratchDoubleReg;
-    MemOperand src0 = g.ToMemOperand(source);
-    MemOperand dst0 = g.ToMemOperand(destination);
+    MemOperand src = g.ToMemOperand(source);
+    MemOperand dst = g.ToMemOperand(destination);
     MachineRepresentation rep = LocationOperand::cast(source)->representation();
     if (rep == MachineRepresentation::kFloat64) {
-      MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
-      MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
-      __ vldr(temp_1, dst0);  // Save destination in temp_1.
-      __ ldr(temp_0, src0);   // Then use temp_0 to copy source to destination.
-      __ str(temp_0, dst0);
-      __ ldr(temp_0, src1);
-      __ str(temp_0, dst1);
-      __ vstr(temp_1, src0);
+      __ vldr(kScratchDoubleReg, dst);
+      __ vldr(kDoubleRegZero, src);
+      __ vstr(kScratchDoubleReg, src);
+      __ vstr(kDoubleRegZero, dst);
+      // Restore the 0 register.
+      __ veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
+    } else if (rep == MachineRepresentation::kFloat32) {
+      __ vldr(kScratchDoubleReg.low(), dst);
+      __ vldr(kScratchDoubleReg.high(), src);
+      __ vstr(kScratchDoubleReg.low(), src);
+      __ vstr(kScratchDoubleReg.high(), dst);
     } else {
-      DCHECK_EQ(MachineRepresentation::kFloat32, rep);
-      __ vldr(temp_1.low(), dst0);  // Save destination in temp_1.
-      __ ldr(temp_0, src0);  // Then use temp_0 to copy source to destination.
-      __ str(temp_0, dst0);
-      __ vstr(temp_1.low(), src0);
+      DCHECK_EQ(MachineRepresentation::kSimd128, rep);
+      __ vldr(kScratchDoubleReg, dst);
+      __ vldr(kDoubleRegZero, src);
+      __ vstr(kScratchDoubleReg, src);
+      __ vstr(kDoubleRegZero, dst);
+      src.set_offset(src.offset() + kDoubleSize);
+      dst.set_offset(dst.offset() + kDoubleSize);
+      __ vldr(kScratchDoubleReg, dst);
+      __ vldr(kDoubleRegZero, src);
+      __ vstr(kScratchDoubleReg, src);
+      __ vstr(kDoubleRegZero, dst);
+      // Restore the 0 register.
+      __ veor(kDoubleRegZero, kDoubleRegZero, kDoubleRegZero);
     }
   } else {
     // No other combinations are possible.
diff --git a/src/compiler/arm/instruction-codes-arm.h b/src/compiler/arm/instruction-codes-arm.h
index 07c4033..0c19deb 100644
--- a/src/compiler/arm/instruction-codes-arm.h
+++ b/src/compiler/arm/instruction-codes-arm.h
@@ -119,7 +119,95 @@
   V(ArmLdr)                        \
   V(ArmStr)                        \
   V(ArmPush)                       \
-  V(ArmPoke)
+  V(ArmPoke)                       \
+  V(ArmFloat32x4Splat)             \
+  V(ArmFloat32x4ExtractLane)       \
+  V(ArmFloat32x4ReplaceLane)       \
+  V(ArmFloat32x4FromInt32x4)       \
+  V(ArmFloat32x4FromUint32x4)      \
+  V(ArmFloat32x4Abs)               \
+  V(ArmFloat32x4Neg)               \
+  V(ArmFloat32x4Add)               \
+  V(ArmFloat32x4Sub)               \
+  V(ArmFloat32x4Equal)             \
+  V(ArmFloat32x4NotEqual)          \
+  V(ArmInt32x4Splat)               \
+  V(ArmInt32x4ExtractLane)         \
+  V(ArmInt32x4ReplaceLane)         \
+  V(ArmInt32x4FromFloat32x4)       \
+  V(ArmUint32x4FromFloat32x4)      \
+  V(ArmInt32x4Neg)                 \
+  V(ArmInt32x4ShiftLeftByScalar)   \
+  V(ArmInt32x4ShiftRightByScalar)  \
+  V(ArmInt32x4Add)                 \
+  V(ArmInt32x4Sub)                 \
+  V(ArmInt32x4Mul)                 \
+  V(ArmInt32x4Min)                 \
+  V(ArmInt32x4Max)                 \
+  V(ArmInt32x4Equal)               \
+  V(ArmInt32x4NotEqual)            \
+  V(ArmInt32x4GreaterThan)         \
+  V(ArmInt32x4GreaterThanOrEqual)  \
+  V(ArmUint32x4ShiftRightByScalar) \
+  V(ArmUint32x4Min)                \
+  V(ArmUint32x4Max)                \
+  V(ArmUint32x4GreaterThan)        \
+  V(ArmUint32x4GreaterThanOrEqual) \
+  V(ArmInt16x8Splat)               \
+  V(ArmInt16x8ExtractLane)         \
+  V(ArmInt16x8ReplaceLane)         \
+  V(ArmInt16x8Neg)                 \
+  V(ArmInt16x8ShiftLeftByScalar)   \
+  V(ArmInt16x8ShiftRightByScalar)  \
+  V(ArmInt16x8Add)                 \
+  V(ArmInt16x8AddSaturate)         \
+  V(ArmInt16x8Sub)                 \
+  V(ArmInt16x8SubSaturate)         \
+  V(ArmInt16x8Mul)                 \
+  V(ArmInt16x8Min)                 \
+  V(ArmInt16x8Max)                 \
+  V(ArmInt16x8Equal)               \
+  V(ArmInt16x8NotEqual)            \
+  V(ArmInt16x8GreaterThan)         \
+  V(ArmInt16x8GreaterThanOrEqual)  \
+  V(ArmUint16x8ShiftRightByScalar) \
+  V(ArmUint16x8AddSaturate)        \
+  V(ArmUint16x8SubSaturate)        \
+  V(ArmUint16x8Min)                \
+  V(ArmUint16x8Max)                \
+  V(ArmUint16x8GreaterThan)        \
+  V(ArmUint16x8GreaterThanOrEqual) \
+  V(ArmInt8x16Splat)               \
+  V(ArmInt8x16ExtractLane)         \
+  V(ArmInt8x16ReplaceLane)         \
+  V(ArmInt8x16Neg)                 \
+  V(ArmInt8x16ShiftLeftByScalar)   \
+  V(ArmInt8x16ShiftRightByScalar)  \
+  V(ArmInt8x16Add)                 \
+  V(ArmInt8x16AddSaturate)         \
+  V(ArmInt8x16Sub)                 \
+  V(ArmInt8x16SubSaturate)         \
+  V(ArmInt8x16Mul)                 \
+  V(ArmInt8x16Min)                 \
+  V(ArmInt8x16Max)                 \
+  V(ArmInt8x16Equal)               \
+  V(ArmInt8x16NotEqual)            \
+  V(ArmInt8x16GreaterThan)         \
+  V(ArmInt8x16GreaterThanOrEqual)  \
+  V(ArmUint8x16ShiftRightByScalar) \
+  V(ArmUint8x16AddSaturate)        \
+  V(ArmUint8x16SubSaturate)        \
+  V(ArmUint8x16Min)                \
+  V(ArmUint8x16Max)                \
+  V(ArmUint8x16GreaterThan)        \
+  V(ArmUint8x16GreaterThanOrEqual) \
+  V(ArmSimd128And)                 \
+  V(ArmSimd128Or)                  \
+  V(ArmSimd128Xor)                 \
+  V(ArmSimd128Not)                 \
+  V(ArmSimd32x4Select)             \
+  V(ArmSimd16x8Select)             \
+  V(ArmSimd8x16Select)
 
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
diff --git a/src/compiler/arm/instruction-scheduler-arm.cc b/src/compiler/arm/instruction-scheduler-arm.cc
index 3f38e5d..ba2f219 100644
--- a/src/compiler/arm/instruction-scheduler-arm.cc
+++ b/src/compiler/arm/instruction-scheduler-arm.cc
@@ -108,6 +108,94 @@
     case kArmFloat32Min:
     case kArmFloat64Min:
     case kArmFloat64SilenceNaN:
+    case kArmFloat32x4Splat:
+    case kArmFloat32x4ExtractLane:
+    case kArmFloat32x4ReplaceLane:
+    case kArmFloat32x4FromInt32x4:
+    case kArmFloat32x4FromUint32x4:
+    case kArmFloat32x4Abs:
+    case kArmFloat32x4Neg:
+    case kArmFloat32x4Add:
+    case kArmFloat32x4Sub:
+    case kArmFloat32x4Equal:
+    case kArmFloat32x4NotEqual:
+    case kArmInt32x4Splat:
+    case kArmInt32x4ExtractLane:
+    case kArmInt32x4ReplaceLane:
+    case kArmInt32x4FromFloat32x4:
+    case kArmUint32x4FromFloat32x4:
+    case kArmInt32x4Neg:
+    case kArmInt32x4ShiftLeftByScalar:
+    case kArmInt32x4ShiftRightByScalar:
+    case kArmInt32x4Add:
+    case kArmInt32x4Sub:
+    case kArmInt32x4Mul:
+    case kArmInt32x4Min:
+    case kArmInt32x4Max:
+    case kArmInt32x4Equal:
+    case kArmInt32x4NotEqual:
+    case kArmInt32x4GreaterThan:
+    case kArmInt32x4GreaterThanOrEqual:
+    case kArmUint32x4ShiftRightByScalar:
+    case kArmUint32x4Min:
+    case kArmUint32x4Max:
+    case kArmUint32x4GreaterThan:
+    case kArmUint32x4GreaterThanOrEqual:
+    case kArmInt16x8Splat:
+    case kArmInt16x8ExtractLane:
+    case kArmInt16x8ReplaceLane:
+    case kArmInt16x8Neg:
+    case kArmInt16x8ShiftLeftByScalar:
+    case kArmInt16x8ShiftRightByScalar:
+    case kArmInt16x8Add:
+    case kArmInt16x8AddSaturate:
+    case kArmInt16x8Sub:
+    case kArmInt16x8SubSaturate:
+    case kArmInt16x8Mul:
+    case kArmInt16x8Min:
+    case kArmInt16x8Max:
+    case kArmInt16x8Equal:
+    case kArmInt16x8NotEqual:
+    case kArmInt16x8GreaterThan:
+    case kArmInt16x8GreaterThanOrEqual:
+    case kArmUint16x8ShiftRightByScalar:
+    case kArmUint16x8AddSaturate:
+    case kArmUint16x8SubSaturate:
+    case kArmUint16x8Min:
+    case kArmUint16x8Max:
+    case kArmUint16x8GreaterThan:
+    case kArmUint16x8GreaterThanOrEqual:
+    case kArmInt8x16Splat:
+    case kArmInt8x16ExtractLane:
+    case kArmInt8x16ReplaceLane:
+    case kArmInt8x16Neg:
+    case kArmInt8x16ShiftLeftByScalar:
+    case kArmInt8x16ShiftRightByScalar:
+    case kArmInt8x16Add:
+    case kArmInt8x16AddSaturate:
+    case kArmInt8x16Sub:
+    case kArmInt8x16SubSaturate:
+    case kArmInt8x16Mul:
+    case kArmInt8x16Min:
+    case kArmInt8x16Max:
+    case kArmInt8x16Equal:
+    case kArmInt8x16NotEqual:
+    case kArmInt8x16GreaterThan:
+    case kArmInt8x16GreaterThanOrEqual:
+    case kArmUint8x16ShiftRightByScalar:
+    case kArmUint8x16AddSaturate:
+    case kArmUint8x16SubSaturate:
+    case kArmUint8x16Min:
+    case kArmUint8x16Max:
+    case kArmUint8x16GreaterThan:
+    case kArmUint8x16GreaterThanOrEqual:
+    case kArmSimd128And:
+    case kArmSimd128Or:
+    case kArmSimd128Xor:
+    case kArmSimd128Not:
+    case kArmSimd32x4Select:
+    case kArmSimd16x8Select:
+    case kArmSimd8x16Select:
       return kNoOpcodeFlags;
 
     case kArmVldrF32:
diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc
index 5279d1e..0cffff7 100644
--- a/src/compiler/arm/instruction-selector-arm.cc
+++ b/src/compiler/arm/instruction-selector-arm.cc
@@ -84,7 +84,6 @@
                  g.UseRegister(node->InputAt(0)));
 }
 
-
 void VisitRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
   ArmOperandGenerator g(selector);
   selector->Emit(opcode, g.DefineAsRegister(node),
@@ -92,6 +91,29 @@
                  g.UseRegister(node->InputAt(1)));
 }
 
+void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+  ArmOperandGenerator g(selector);
+  // Use DefineSameAsFirst for ternary ops that clobber their first input,
+  // e.g. the NEON vbsl instruction.
+  selector->Emit(
+      opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+      g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
+}
+
+void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+  ArmOperandGenerator g(selector);
+  int32_t imm = OpParameter<int32_t>(node);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
+}
+
+void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+  ArmOperandGenerator g(selector);
+  int32_t imm = OpParameter<int32_t>(node);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
+                 g.UseRegister(node->InputAt(1)));
+}
 
 template <IrOpcode::Value kOpcode, int kImmMin, int kImmMax,
           AddressingMode kImmMode, AddressingMode kRegMode>
@@ -266,7 +288,10 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->reason(), cont->frame_state());
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else if (cont->IsTrap()) {
+    inputs[input_count++] = g.UseImmediate(cont->trap_id());
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -403,6 +428,9 @@
       break;
     case MachineRepresentation::kWord64:   // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -488,6 +516,9 @@
         break;
       case MachineRepresentation::kWord64:   // Fall through.
       case MachineRepresentation::kSimd128:  // Fall through.
+      case MachineRepresentation::kSimd1x4:  // Fall through.
+      case MachineRepresentation::kSimd1x8:  // Fall through.
+      case MachineRepresentation::kSimd1x16:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -501,6 +532,11 @@
   }
 }
 
+void InstructionSelector::VisitProtectedStore(Node* node) {
+  // TODO(eholk)
+  UNIMPLEMENTED();
+}
+
 void InstructionSelector::VisitUnalignedLoad(Node* node) {
   UnalignedLoadRepresentation load_rep =
       UnalignedLoadRepresentationOf(node->op());
@@ -646,6 +682,9 @@
     case MachineRepresentation::kTagged:   // Fall through.
     case MachineRepresentation::kWord64:   // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -690,6 +729,9 @@
     case MachineRepresentation::kTagged:   // Fall through.
     case MachineRepresentation::kWord64:   // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -884,7 +926,10 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->reason(), cont->frame_state());
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else if (cont->IsTrap()) {
+    inputs[input_count++] = g.UseImmediate(cont->trap_id());
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -1079,15 +1124,8 @@
   VisitShift(this, node, TryMatchROR);
 }
 
-
-void InstructionSelector::VisitWord32Clz(Node* node) {
-  VisitRR(this, kArmClz, node);
-}
-
-
 void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
 
-
 void InstructionSelector::VisitWord32ReverseBits(Node* node) {
   DCHECK(IsSupported(ARMv7));
   VisitRR(this, kArmRbit, node);
@@ -1250,12 +1288,16 @@
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
     InstructionOperand in[] = {temp_operand, result_operand, shift_31};
-    selector->EmitDeoptimize(opcode, 0, nullptr, 3, in, cont->reason(),
-                             cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+    selector->EmitDeoptimize(opcode, 0, nullptr, 3, in, cont->kind(),
+                             cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), temp_operand,
                    result_operand, shift_31);
+  } else {
+    DCHECK(cont->IsTrap());
+    InstructionOperand in[] = {temp_operand, result_operand, shift_31,
+                               g.UseImmediate(cont->trap_id())};
+    selector->Emit(opcode, 0, nullptr, 4, in);
   }
 }
 
@@ -1284,12 +1326,6 @@
   VisitRRR(this, kArmMul, node);
 }
 
-
-void InstructionSelector::VisitInt32MulHigh(Node* node) {
-  VisitRRR(this, kArmSmmul, node);
-}
-
-
 void InstructionSelector::VisitUint32MulHigh(Node* node) {
   ArmOperandGenerator g(this);
   InstructionOperand outputs[] = {g.TempRegister(), g.DefineAsRegister(node)};
@@ -1318,73 +1354,76 @@
   VisitMod(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
 }
 
+#define RR_OP_LIST(V)                                \
+  V(Word32Clz, kArmClz)                              \
+  V(ChangeFloat32ToFloat64, kArmVcvtF64F32)          \
+  V(RoundInt32ToFloat32, kArmVcvtF32S32)             \
+  V(RoundUint32ToFloat32, kArmVcvtF32U32)            \
+  V(ChangeInt32ToFloat64, kArmVcvtF64S32)            \
+  V(ChangeUint32ToFloat64, kArmVcvtF64U32)           \
+  V(TruncateFloat32ToInt32, kArmVcvtS32F32)          \
+  V(TruncateFloat32ToUint32, kArmVcvtU32F32)         \
+  V(ChangeFloat64ToInt32, kArmVcvtS32F64)            \
+  V(ChangeFloat64ToUint32, kArmVcvtU32F64)           \
+  V(TruncateFloat64ToUint32, kArmVcvtU32F64)         \
+  V(TruncateFloat64ToFloat32, kArmVcvtF32F64)        \
+  V(TruncateFloat64ToWord32, kArchTruncateDoubleToI) \
+  V(RoundFloat64ToInt32, kArmVcvtS32F64)             \
+  V(BitcastFloat32ToInt32, kArmVmovU32F32)           \
+  V(BitcastInt32ToFloat32, kArmVmovF32U32)           \
+  V(Float64ExtractLowWord32, kArmVmovLowU32F64)      \
+  V(Float64ExtractHighWord32, kArmVmovHighU32F64)    \
+  V(Float64SilenceNaN, kArmFloat64SilenceNaN)        \
+  V(Float32Abs, kArmVabsF32)                         \
+  V(Float64Abs, kArmVabsF64)                         \
+  V(Float32Neg, kArmVnegF32)                         \
+  V(Float64Neg, kArmVnegF64)                         \
+  V(Float32Sqrt, kArmVsqrtF32)                       \
+  V(Float64Sqrt, kArmVsqrtF64)
 
-void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
-  VisitRR(this, kArmVcvtF64F32, node);
-}
+#define RR_OP_LIST_V8(V)                 \
+  V(Float32RoundDown, kArmVrintmF32)     \
+  V(Float64RoundDown, kArmVrintmF64)     \
+  V(Float32RoundUp, kArmVrintpF32)       \
+  V(Float64RoundUp, kArmVrintpF64)       \
+  V(Float32RoundTruncate, kArmVrintzF32) \
+  V(Float64RoundTruncate, kArmVrintzF64) \
+  V(Float64RoundTiesAway, kArmVrintaF64) \
+  V(Float32RoundTiesEven, kArmVrintnF32) \
+  V(Float64RoundTiesEven, kArmVrintnF64)
 
+#define RRR_OP_LIST(V)          \
+  V(Int32MulHigh, kArmSmmul)    \
+  V(Float32Mul, kArmVmulF32)    \
+  V(Float64Mul, kArmVmulF64)    \
+  V(Float32Div, kArmVdivF32)    \
+  V(Float64Div, kArmVdivF64)    \
+  V(Float32Max, kArmFloat32Max) \
+  V(Float64Max, kArmFloat64Max) \
+  V(Float32Min, kArmFloat32Min) \
+  V(Float64Min, kArmFloat64Min)
 
-void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
-  VisitRR(this, kArmVcvtF32S32, node);
-}
+#define RR_VISITOR(Name, opcode)                      \
+  void InstructionSelector::Visit##Name(Node* node) { \
+    VisitRR(this, opcode, node);                      \
+  }
+RR_OP_LIST(RR_VISITOR)
+#undef RR_VISITOR
 
+#define RR_VISITOR_V8(Name, opcode)                   \
+  void InstructionSelector::Visit##Name(Node* node) { \
+    DCHECK(CpuFeatures::IsSupported(ARMv8));          \
+    VisitRR(this, opcode, node);                      \
+  }
+RR_OP_LIST_V8(RR_VISITOR_V8)
+#undef RR_VISITOR_V8
 
-void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
-  VisitRR(this, kArmVcvtF32U32, node);
-}
-
-
-void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
-  VisitRR(this, kArmVcvtF64S32, node);
-}
-
-
-void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
-  VisitRR(this, kArmVcvtF64U32, node);
-}
-
-
-void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
-  VisitRR(this, kArmVcvtS32F32, node);
-}
-
-
-void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
-  VisitRR(this, kArmVcvtU32F32, node);
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
-  VisitRR(this, kArmVcvtS32F64, node);
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
-  VisitRR(this, kArmVcvtU32F64, node);
-}
-
-void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
-  VisitRR(this, kArmVcvtU32F64, node);
-}
-void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
-  VisitRR(this, kArmVcvtF32F64, node);
-}
-
-void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
-  VisitRR(this, kArchTruncateDoubleToI, node);
-}
-
-void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
-  VisitRR(this, kArmVcvtS32F64, node);
-}
-
-void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
-  VisitRR(this, kArmVmovU32F32, node);
-}
-
-void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
-  VisitRR(this, kArmVmovF32U32, node);
-}
+#define RRR_VISITOR(Name, opcode)                     \
+  void InstructionSelector::Visit##Name(Node* node) { \
+    VisitRRR(this, opcode, node);                     \
+  }
+RRR_OP_LIST(RRR_VISITOR)
+#undef RRR_VISITOR
 
 void InstructionSelector::VisitFloat32Add(Node* node) {
   ArmOperandGenerator g(this);
@@ -1453,132 +1492,12 @@
   VisitRRR(this, kArmVsubF64, node);
 }
 
-void InstructionSelector::VisitFloat32Mul(Node* node) {
-  VisitRRR(this, kArmVmulF32, node);
-}
-
-
-void InstructionSelector::VisitFloat64Mul(Node* node) {
-  VisitRRR(this, kArmVmulF64, node);
-}
-
-
-void InstructionSelector::VisitFloat32Div(Node* node) {
-  VisitRRR(this, kArmVdivF32, node);
-}
-
-
-void InstructionSelector::VisitFloat64Div(Node* node) {
-  VisitRRR(this, kArmVdivF64, node);
-}
-
-
 void InstructionSelector::VisitFloat64Mod(Node* node) {
   ArmOperandGenerator g(this);
   Emit(kArmVmodF64, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
        g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
 }
 
-void InstructionSelector::VisitFloat32Max(Node* node) {
-  VisitRRR(this, kArmFloat32Max, node);
-}
-
-void InstructionSelector::VisitFloat64Max(Node* node) {
-  VisitRRR(this, kArmFloat64Max, node);
-}
-
-void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
-  VisitRR(this, kArmFloat64SilenceNaN, node);
-}
-
-void InstructionSelector::VisitFloat32Min(Node* node) {
-  VisitRRR(this, kArmFloat32Min, node);
-}
-
-void InstructionSelector::VisitFloat64Min(Node* node) {
-  VisitRRR(this, kArmFloat64Min, node);
-}
-
-void InstructionSelector::VisitFloat32Abs(Node* node) {
-  VisitRR(this, kArmVabsF32, node);
-}
-
-
-void InstructionSelector::VisitFloat64Abs(Node* node) {
-  VisitRR(this, kArmVabsF64, node);
-}
-
-void InstructionSelector::VisitFloat32Sqrt(Node* node) {
-  VisitRR(this, kArmVsqrtF32, node);
-}
-
-
-void InstructionSelector::VisitFloat64Sqrt(Node* node) {
-  VisitRR(this, kArmVsqrtF64, node);
-}
-
-
-void InstructionSelector::VisitFloat32RoundDown(Node* node) {
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
-  VisitRR(this, kArmVrintmF32, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundDown(Node* node) {
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
-  VisitRR(this, kArmVrintmF64, node);
-}
-
-
-void InstructionSelector::VisitFloat32RoundUp(Node* node) {
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
-  VisitRR(this, kArmVrintpF32, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundUp(Node* node) {
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
-  VisitRR(this, kArmVrintpF64, node);
-}
-
-
-void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
-  VisitRR(this, kArmVrintzF32, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
-  VisitRR(this, kArmVrintzF64, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
-  VisitRR(this, kArmVrintaF64, node);
-}
-
-
-void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
-  VisitRR(this, kArmVrintnF32, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
-  DCHECK(CpuFeatures::IsSupported(ARMv8));
-  VisitRR(this, kArmVrintnF64, node);
-}
-
-void InstructionSelector::VisitFloat32Neg(Node* node) {
-  VisitRR(this, kArmVnegF32, node);
-}
-
-void InstructionSelector::VisitFloat64Neg(Node* node) {
-  VisitRR(this, kArmVnegF64, node);
-}
-
 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
                                                    InstructionCode opcode) {
   ArmOperandGenerator g(this);
@@ -1641,11 +1560,14 @@
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
-    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
-                             cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
+                             cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(opcode, g.NoOutput(), left, right,
+                   g.UseImmediate(cont->trap_id()));
   }
 }
 
@@ -1835,7 +1757,10 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->reason(), cont->frame_state());
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else if (cont->IsTrap()) {
+    inputs[input_count++] = g.UseImmediate(cont->trap_id());
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -1991,11 +1916,14 @@
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand, value_operand,
-                             cont->reason(), cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
                    value_operand);
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(opcode, g.NoOutput(), value_operand, value_operand,
+                   g.UseImmediate(cont->trap_id()));
   }
 }
 
@@ -2008,14 +1936,29 @@
 }
 
 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kNotEqual, p.kind(), p.reason(), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kEqual, p.kind(), p.reason(), node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+                                          Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
@@ -2151,17 +2094,6 @@
   VisitFloat64Compare(this, node, &cont);
 }
 
-
-void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
-  VisitRR(this, kArmVmovLowU32F64, node);
-}
-
-
-void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
-  VisitRR(this, kArmVmovHighU32F64, node);
-}
-
-
 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
   ArmOperandGenerator g(this);
   Node* left = node->InputAt(0);
@@ -2249,6 +2181,145 @@
   Emit(code, 0, nullptr, input_count, inputs);
 }
 
+#define SIMD_TYPE_LIST(V) \
+  V(Float32x4)            \
+  V(Int32x4)              \
+  V(Int16x8)              \
+  V(Int8x16)
+
+#define SIMD_FORMAT_LIST(V) \
+  V(32x4)                   \
+  V(16x8)                   \
+  V(8x16)
+
+#define SIMD_UNOP_LIST(V)  \
+  V(Float32x4FromInt32x4)  \
+  V(Float32x4FromUint32x4) \
+  V(Float32x4Abs)          \
+  V(Float32x4Neg)          \
+  V(Int32x4FromFloat32x4)  \
+  V(Uint32x4FromFloat32x4) \
+  V(Int32x4Neg)            \
+  V(Int16x8Neg)            \
+  V(Int8x16Neg)            \
+  V(Simd128Not)
+
+#define SIMD_BINOP_LIST(V)      \
+  V(Float32x4Add)               \
+  V(Float32x4Sub)               \
+  V(Float32x4Equal)             \
+  V(Float32x4NotEqual)          \
+  V(Int32x4Add)                 \
+  V(Int32x4Sub)                 \
+  V(Int32x4Mul)                 \
+  V(Int32x4Min)                 \
+  V(Int32x4Max)                 \
+  V(Int32x4Equal)               \
+  V(Int32x4NotEqual)            \
+  V(Int32x4GreaterThan)         \
+  V(Int32x4GreaterThanOrEqual)  \
+  V(Uint32x4Min)                \
+  V(Uint32x4Max)                \
+  V(Uint32x4GreaterThan)        \
+  V(Uint32x4GreaterThanOrEqual) \
+  V(Int16x8Add)                 \
+  V(Int16x8AddSaturate)         \
+  V(Int16x8Sub)                 \
+  V(Int16x8SubSaturate)         \
+  V(Int16x8Mul)                 \
+  V(Int16x8Min)                 \
+  V(Int16x8Max)                 \
+  V(Int16x8Equal)               \
+  V(Int16x8NotEqual)            \
+  V(Int16x8GreaterThan)         \
+  V(Int16x8GreaterThanOrEqual)  \
+  V(Uint16x8AddSaturate)        \
+  V(Uint16x8SubSaturate)        \
+  V(Uint16x8Min)                \
+  V(Uint16x8Max)                \
+  V(Uint16x8GreaterThan)        \
+  V(Uint16x8GreaterThanOrEqual) \
+  V(Int8x16Add)                 \
+  V(Int8x16AddSaturate)         \
+  V(Int8x16Sub)                 \
+  V(Int8x16SubSaturate)         \
+  V(Int8x16Mul)                 \
+  V(Int8x16Min)                 \
+  V(Int8x16Max)                 \
+  V(Int8x16Equal)               \
+  V(Int8x16NotEqual)            \
+  V(Int8x16GreaterThan)         \
+  V(Int8x16GreaterThanOrEqual)  \
+  V(Uint8x16AddSaturate)        \
+  V(Uint8x16SubSaturate)        \
+  V(Uint8x16Min)                \
+  V(Uint8x16Max)                \
+  V(Uint8x16GreaterThan)        \
+  V(Uint8x16GreaterThanOrEqual) \
+  V(Simd128And)                 \
+  V(Simd128Or)                  \
+  V(Simd128Xor)
+
+#define SIMD_SHIFT_OP_LIST(V)   \
+  V(Int32x4ShiftLeftByScalar)   \
+  V(Int32x4ShiftRightByScalar)  \
+  V(Uint32x4ShiftRightByScalar) \
+  V(Int16x8ShiftLeftByScalar)   \
+  V(Int16x8ShiftRightByScalar)  \
+  V(Uint16x8ShiftRightByScalar) \
+  V(Int8x16ShiftLeftByScalar)   \
+  V(Int8x16ShiftRightByScalar)  \
+  V(Uint8x16ShiftRightByScalar)
+
+#define SIMD_VISIT_SPLAT(Type)                              \
+  void InstructionSelector::VisitCreate##Type(Node* node) { \
+    VisitRR(this, kArm##Type##Splat, node);                 \
+  }
+SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
+#undef SIMD_VISIT_SPLAT
+
+#define SIMD_VISIT_EXTRACT_LANE(Type)                              \
+  void InstructionSelector::Visit##Type##ExtractLane(Node* node) { \
+    VisitRRI(this, kArm##Type##ExtractLane, node);                 \
+  }
+SIMD_TYPE_LIST(SIMD_VISIT_EXTRACT_LANE)
+#undef SIMD_VISIT_EXTRACT_LANE
+
+#define SIMD_VISIT_REPLACE_LANE(Type)                              \
+  void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+    VisitRRIR(this, kArm##Type##ReplaceLane, node);                \
+  }
+SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
+#undef SIMD_VISIT_REPLACE_LANE
+
+#define SIMD_VISIT_UNOP(Name)                         \
+  void InstructionSelector::Visit##Name(Node* node) { \
+    VisitRR(this, kArm##Name, node);                  \
+  }
+SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
+#undef SIMD_VISIT_UNOP
+
+#define SIMD_VISIT_BINOP(Name)                        \
+  void InstructionSelector::Visit##Name(Node* node) { \
+    VisitRRR(this, kArm##Name, node);                 \
+  }
+SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
+#undef SIMD_VISIT_BINOP
+
+#define SIMD_VISIT_SHIFT_OP(Name)                     \
+  void InstructionSelector::Visit##Name(Node* node) { \
+    VisitRRI(this, kArm##Name, node);                 \
+  }
+SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
+#undef SIMD_VISIT_SHIFT_OP
+
+#define SIMD_VISIT_SELECT_OP(format)                                \
+  void InstructionSelector::VisitSimd##format##Select(Node* node) { \
+    VisitRRRR(this, kArmSimd##format##Select, node);                \
+  }
+SIMD_FORMAT_LIST(SIMD_VISIT_SELECT_OP)
+#undef SIMD_VISIT_SELECT_OP
+
 // static
 MachineOperatorBuilder::Flags
 InstructionSelector::SupportedMachineOperatorFlags() {
diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc
index 8b1cb57..1cdedb0 100644
--- a/src/compiler/arm64/code-generator-arm64.cc
+++ b/src/compiler/arm64/code-generator-arm64.cc
@@ -209,17 +209,16 @@
     Constant constant = ToConstant(operand);
     switch (constant.type()) {
       case Constant::kInt32:
-        if (constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+        if (RelocInfo::IsWasmSizeReference(constant.rmode())) {
           return Operand(constant.ToInt32(), constant.rmode());
         } else {
           return Operand(constant.ToInt32());
         }
       case Constant::kInt64:
-        if (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
-            constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+        if (RelocInfo::IsWasmPtrReference(constant.rmode())) {
           return Operand(constant.ToInt64(), constant.rmode());
         } else {
-          DCHECK(constant.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+          DCHECK(!RelocInfo::IsWasmSizeReference(constant.rmode()));
           return Operand(constant.ToInt64());
         }
       case Constant::kFloat32:
@@ -571,7 +570,8 @@
 
   // Check if current frame is an arguments adaptor frame.
   __ Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
-  __ Cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ Cmp(scratch1,
+         Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ B(ne, &done);
 
   // Load arguments count from current arguments adaptor frame (note, it
@@ -775,10 +775,8 @@
     case kArchDeoptimize: {
       int deopt_state_id =
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
-      Deoptimizer::BailoutType bailout_type =
-          Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      CodeGenResult result = AssembleDeoptimizerCall(
-          deopt_state_id, bailout_type, current_source_position_);
+      CodeGenResult result =
+          AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
       if (result != kSuccess) return result;
       break;
     }
@@ -1702,6 +1700,67 @@
   if (!IsNextInAssemblyOrder(target)) __ B(GetLabel(target));
 }
 
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+                                     FlagsCondition condition) {
+  class OutOfLineTrap final : public OutOfLineCode {
+   public:
+    OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+        : OutOfLineCode(gen),
+          frame_elided_(frame_elided),
+          instr_(instr),
+          gen_(gen) {}
+    void Generate() final {
+      Arm64OperandConverter i(gen_, instr_);
+      Builtins::Name trap_id =
+          static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
+      bool old_has_frame = __ has_frame();
+      if (frame_elided_) {
+        __ set_has_frame(true);
+        __ EnterFrame(StackFrame::WASM_COMPILED);
+      }
+      GenerateCallToTrap(trap_id);
+      if (frame_elided_) {
+        __ set_has_frame(old_has_frame);
+      }
+    }
+
+   private:
+    void GenerateCallToTrap(Builtins::Name trap_id) {
+      if (trap_id == Builtins::builtin_count) {
+        // We cannot test calls to the runtime in cctest/test-run-wasm.
+        // Therefore we emit a call to C here instead of a call to the runtime.
+        __ CallCFunction(
+            ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+            0);
+        __ LeaveFrame(StackFrame::WASM_COMPILED);
+        __ Ret();
+      } else {
+        DCHECK(csp.Is(__ StackPointer()));
+        // Initialize the jssp because it is required for the runtime call.
+        __ Mov(jssp, csp);
+        gen_->AssembleSourcePosition(instr_);
+        __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+                RelocInfo::CODE_TARGET);
+        ReferenceMap* reference_map =
+            new (gen_->zone()) ReferenceMap(gen_->zone());
+        gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+                              Safepoint::kNoLazyDeopt);
+        if (FLAG_debug_code) {
+          // The trap code should never return.
+          __ Brk(0);
+        }
+      }
+    }
+    bool frame_elided_;
+    Instruction* instr_;
+    CodeGenerator* gen_;
+  };
+  bool frame_elided = !frame_access_state()->has_frame();
+  auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+  Label* tlabel = ool->entry();
+  Condition cc = FlagsConditionToCondition(condition);
+  __ B(cc, tlabel);
+}
 
 // Assemble boolean materializations after this instruction.
 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -1749,13 +1808,16 @@
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, Deoptimizer::BailoutType bailout_type,
-    SourcePosition pos) {
+    int deoptimization_id, SourcePosition pos) {
+  DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
+  DeoptimizeReason deoptimization_reason =
+      GetDeoptimizationReason(deoptimization_id);
+  Deoptimizer::BailoutType bailout_type =
+      deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
+                                                   : Deoptimizer::EAGER;
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
-  DeoptimizeReason deoptimization_reason =
-      GetDeoptimizationReason(deoptimization_id);
   __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
@@ -1828,7 +1890,6 @@
       osr_pc_offset_ = __ pc_offset();
       shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
     }
-
     // Build remainder of frame, including accounting for and filling-in
     // frame-specific header information, e.g. claiming the extra slot that
     // other platforms explicitly push for STUB frames and frames recording
@@ -1843,7 +1904,7 @@
     if (is_stub_frame) {
       UseScratchRegisterScope temps(masm());
       Register temp = temps.AcquireX();
-      __ Mov(temp, Smi::FromInt(info()->GetOutputStackFrameType()));
+      __ Mov(temp, StackFrame::TypeToMarker(info()->GetOutputStackFrameType()));
       __ Str(temp, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
     }
   }
diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc
index 0eef53c..bacf792 100644
--- a/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/src/compiler/arm64/instruction-selector-arm64.cc
@@ -123,7 +123,7 @@
 
   bool CanBeLoadStoreShiftImmediate(Node* node, MachineRepresentation rep) {
     // TODO(arm64): Load and Store on 128 bit Q registers is not supported yet.
-    DCHECK_NE(MachineRepresentation::kSimd128, rep);
+    DCHECK_GT(MachineRepresentation::kSimd128, rep);
     return IsIntegerConstant(node) &&
            (GetIntegerConstantValue(node) == ElementSizeLog2Of(rep));
   }
@@ -436,14 +436,18 @@
     Matcher m_shift(right_node);
     inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
     inputs[input_count++] = g.UseRegister(m_shift.left().node());
-    inputs[input_count++] = g.UseImmediate(m_shift.right().node());
+    // We only need at most the last 6 bits of the shift.
+    inputs[input_count++] =
+        g.UseImmediate(static_cast<int>(m_shift.right().Value() & 0x3F));
   } else if (can_commute && TryMatchAnyShift(selector, node, left_node, &opcode,
                                              !is_add_sub)) {
     if (must_commute_cond) cont->Commute();
     Matcher m_shift(left_node);
     inputs[input_count++] = g.UseRegisterOrImmediateZero(right_node);
     inputs[input_count++] = g.UseRegister(m_shift.left().node());
-    inputs[input_count++] = g.UseImmediate(m_shift.right().node());
+    // We only need at most the last 6 bits of the shift.
+    inputs[input_count++] =
+        g.UseImmediate(static_cast<int>(m_shift.right().Value() & 0x3F));
   } else {
     inputs[input_count++] = g.UseRegisterOrImmediateZero(left_node);
     inputs[input_count++] = g.UseRegister(right_node);
@@ -470,7 +474,10 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->reason(), cont->frame_state());
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else if (cont->IsTrap()) {
+    inputs[input_count++] = g.UseImmediate(cont->trap_id());
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -586,6 +593,9 @@
       immediate_mode = kLoadStoreImm64;
       break;
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -682,6 +692,9 @@
         immediate_mode = kLoadStoreImm64;
         break;
       case MachineRepresentation::kSimd128:  // Fall through.
+      case MachineRepresentation::kSimd1x4:  // Fall through.
+      case MachineRepresentation::kSimd1x8:  // Fall through.
+      case MachineRepresentation::kSimd1x16:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -708,6 +721,11 @@
   }
 }
 
+void InstructionSelector::VisitProtectedStore(Node* node) {
+  // TODO(eholk)
+  UNIMPLEMENTED();
+}
+
 // Architecture supports unaligned access, therefore VisitLoad is used instead
 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
 
@@ -745,6 +763,9 @@
     case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:   // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -797,6 +818,9 @@
     case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:   // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -934,7 +958,8 @@
     uint64_t mask = m.right().Value();
     uint64_t mask_width = base::bits::CountPopulation64(mask);
     uint64_t mask_msb = base::bits::CountLeadingZeros64(mask);
-    if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
+    if ((mask_width != 0) && (mask_width != 64) &&
+        (mask_msb + mask_width == 64)) {
       // The mask must be contiguous, and occupy the least-significant bits.
       DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
 
@@ -1061,6 +1086,7 @@
     // OP is >>> or >> and (K & 0x1f) != 0.
     Int32BinopMatcher mleft(m.left().node());
     if (mleft.right().HasValue() && m.right().HasValue() &&
+        (mleft.right().Value() & 0x1f) != 0 &&
         (mleft.right().Value() & 0x1f) == (m.right().Value() & 0x1f)) {
       DCHECK(m.IsWord32Shr() || m.IsWord32Sar());
       ArchOpcode opcode = m.IsWord32Sar() ? kArm64Sbfx32 : kArm64Ubfx32;
@@ -1218,44 +1244,99 @@
   VisitRRO(this, kArm64Ror, node, kShift64Imm);
 }
 
+#define RR_OP_LIST(V)                                         \
+  V(Word64Clz, kArm64Clz)                                     \
+  V(Word32Clz, kArm64Clz32)                                   \
+  V(Word32ReverseBits, kArm64Rbit32)                          \
+  V(Word64ReverseBits, kArm64Rbit)                            \
+  V(ChangeFloat32ToFloat64, kArm64Float32ToFloat64)           \
+  V(RoundInt32ToFloat32, kArm64Int32ToFloat32)                \
+  V(RoundUint32ToFloat32, kArm64Uint32ToFloat32)              \
+  V(ChangeInt32ToFloat64, kArm64Int32ToFloat64)               \
+  V(ChangeUint32ToFloat64, kArm64Uint32ToFloat64)             \
+  V(TruncateFloat32ToInt32, kArm64Float32ToInt32)             \
+  V(ChangeFloat64ToInt32, kArm64Float64ToInt32)               \
+  V(TruncateFloat32ToUint32, kArm64Float32ToUint32)           \
+  V(ChangeFloat64ToUint32, kArm64Float64ToUint32)             \
+  V(TruncateFloat64ToUint32, kArm64Float64ToUint32)           \
+  V(TruncateFloat64ToFloat32, kArm64Float64ToFloat32)         \
+  V(TruncateFloat64ToWord32, kArchTruncateDoubleToI)          \
+  V(RoundFloat64ToInt32, kArm64Float64ToInt32)                \
+  V(RoundInt64ToFloat32, kArm64Int64ToFloat32)                \
+  V(RoundInt64ToFloat64, kArm64Int64ToFloat64)                \
+  V(RoundUint64ToFloat32, kArm64Uint64ToFloat32)              \
+  V(RoundUint64ToFloat64, kArm64Uint64ToFloat64)              \
+  V(BitcastFloat32ToInt32, kArm64Float64ExtractLowWord32)     \
+  V(BitcastFloat64ToInt64, kArm64U64MoveFloat64)              \
+  V(BitcastInt32ToFloat32, kArm64Float64MoveU64)              \
+  V(BitcastInt64ToFloat64, kArm64Float64MoveU64)              \
+  V(Float32Abs, kArm64Float32Abs)                             \
+  V(Float64Abs, kArm64Float64Abs)                             \
+  V(Float32Sqrt, kArm64Float32Sqrt)                           \
+  V(Float64Sqrt, kArm64Float64Sqrt)                           \
+  V(Float32RoundDown, kArm64Float32RoundDown)                 \
+  V(Float64RoundDown, kArm64Float64RoundDown)                 \
+  V(Float32RoundUp, kArm64Float32RoundUp)                     \
+  V(Float64RoundUp, kArm64Float64RoundUp)                     \
+  V(Float32RoundTruncate, kArm64Float32RoundTruncate)         \
+  V(Float64RoundTruncate, kArm64Float64RoundTruncate)         \
+  V(Float64RoundTiesAway, kArm64Float64RoundTiesAway)         \
+  V(Float32RoundTiesEven, kArm64Float32RoundTiesEven)         \
+  V(Float64RoundTiesEven, kArm64Float64RoundTiesEven)         \
+  V(Float32Neg, kArm64Float32Neg)                             \
+  V(Float64Neg, kArm64Float64Neg)                             \
+  V(Float64ExtractLowWord32, kArm64Float64ExtractLowWord32)   \
+  V(Float64ExtractHighWord32, kArm64Float64ExtractHighWord32) \
+  V(Float64SilenceNaN, kArm64Float64SilenceNaN)
 
-void InstructionSelector::VisitWord64Clz(Node* node) {
-  Arm64OperandGenerator g(this);
-  Emit(kArm64Clz, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
-}
+#define RRR_OP_LIST(V)            \
+  V(Int32Div, kArm64Idiv32)       \
+  V(Int64Div, kArm64Idiv)         \
+  V(Uint32Div, kArm64Udiv32)      \
+  V(Uint64Div, kArm64Udiv)        \
+  V(Int32Mod, kArm64Imod32)       \
+  V(Int64Mod, kArm64Imod)         \
+  V(Uint32Mod, kArm64Umod32)      \
+  V(Uint64Mod, kArm64Umod)        \
+  V(Float32Add, kArm64Float32Add) \
+  V(Float64Add, kArm64Float64Add) \
+  V(Float32Sub, kArm64Float32Sub) \
+  V(Float64Sub, kArm64Float64Sub) \
+  V(Float32Mul, kArm64Float32Mul) \
+  V(Float64Mul, kArm64Float64Mul) \
+  V(Float32Div, kArm64Float32Div) \
+  V(Float64Div, kArm64Float64Div) \
+  V(Float32Max, kArm64Float32Max) \
+  V(Float64Max, kArm64Float64Max) \
+  V(Float32Min, kArm64Float32Min) \
+  V(Float64Min, kArm64Float64Min)
 
+#define RR_VISITOR(Name, opcode)                      \
+  void InstructionSelector::Visit##Name(Node* node) { \
+    VisitRR(this, opcode, node);                      \
+  }
+RR_OP_LIST(RR_VISITOR)
+#undef RR_VISITOR
 
-void InstructionSelector::VisitWord32Clz(Node* node) {
-  Arm64OperandGenerator g(this);
-  Emit(kArm64Clz32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
-}
-
+#define RRR_VISITOR(Name, opcode)                     \
+  void InstructionSelector::Visit##Name(Node* node) { \
+    VisitRRR(this, opcode, node);                     \
+  }
+RRR_OP_LIST(RRR_VISITOR)
+#undef RRR_VISITOR
 
 void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
 
-
 void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
 
-
-void InstructionSelector::VisitWord32ReverseBits(Node* node) {
-  VisitRR(this, kArm64Rbit32, node);
-}
-
-
-void InstructionSelector::VisitWord64ReverseBits(Node* node) {
-  VisitRR(this, kArm64Rbit, node);
-}
-
 void InstructionSelector::VisitWord64ReverseBytes(Node* node) { UNREACHABLE(); }
 
 void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
 
 void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
 
-
 void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
 
-
 void InstructionSelector::VisitInt32Add(Node* node) {
   Arm64OperandGenerator g(this);
   Int32BinopMatcher m(node);
@@ -1377,11 +1458,14 @@
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
     InstructionOperand in[] = {result, result};
-    selector->EmitDeoptimize(opcode, 0, nullptr, 2, in, cont->reason(),
-                             cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+    selector->EmitDeoptimize(opcode, 0, nullptr, 2, in, cont->kind(),
+                             cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), result, result);
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(opcode, g.NoOutput(), result, result,
+                   g.UseImmediate(cont->trap_id()));
   }
 }
 
@@ -1487,94 +1571,6 @@
 }
 
 
-void InstructionSelector::VisitInt32Div(Node* node) {
-  VisitRRR(this, kArm64Idiv32, node);
-}
-
-
-void InstructionSelector::VisitInt64Div(Node* node) {
-  VisitRRR(this, kArm64Idiv, node);
-}
-
-
-void InstructionSelector::VisitUint32Div(Node* node) {
-  VisitRRR(this, kArm64Udiv32, node);
-}
-
-
-void InstructionSelector::VisitUint64Div(Node* node) {
-  VisitRRR(this, kArm64Udiv, node);
-}
-
-
-void InstructionSelector::VisitInt32Mod(Node* node) {
-  VisitRRR(this, kArm64Imod32, node);
-}
-
-
-void InstructionSelector::VisitInt64Mod(Node* node) {
-  VisitRRR(this, kArm64Imod, node);
-}
-
-
-void InstructionSelector::VisitUint32Mod(Node* node) {
-  VisitRRR(this, kArm64Umod32, node);
-}
-
-
-void InstructionSelector::VisitUint64Mod(Node* node) {
-  VisitRRR(this, kArm64Umod, node);
-}
-
-
-void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
-  VisitRR(this, kArm64Float32ToFloat64, node);
-}
-
-
-void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
-  VisitRR(this, kArm64Int32ToFloat32, node);
-}
-
-
-void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
-  VisitRR(this, kArm64Uint32ToFloat32, node);
-}
-
-
-void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
-  VisitRR(this, kArm64Int32ToFloat64, node);
-}
-
-
-void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
-  VisitRR(this, kArm64Uint32ToFloat64, node);
-}
-
-
-void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
-  VisitRR(this, kArm64Float32ToInt32, node);
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
-  VisitRR(this, kArm64Float64ToInt32, node);
-}
-
-
-void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
-  VisitRR(this, kArm64Float32ToUint32, node);
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
-  VisitRR(this, kArm64Float64ToUint32, node);
-}
-
-void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
-  VisitRR(this, kArm64Float64ToUint32, node);
-}
-
 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
   Arm64OperandGenerator g(this);
 
@@ -1729,20 +1725,6 @@
   Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(value));
 }
 
-
-void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
-  VisitRR(this, kArm64Float64ToFloat32, node);
-}
-
-void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
-  VisitRR(this, kArchTruncateDoubleToI, node);
-}
-
-void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
-  VisitRR(this, kArm64Float64ToInt32, node);
-}
-
-
 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
   Arm64OperandGenerator g(this);
   Node* value = node->InputAt(0);
@@ -1751,85 +1733,6 @@
   Emit(kArchNop, g.DefineSameAsFirst(node), g.UseRegister(value));
 }
 
-
-void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
-  VisitRR(this, kArm64Int64ToFloat32, node);
-}
-
-
-void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
-  VisitRR(this, kArm64Int64ToFloat64, node);
-}
-
-
-void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
-  VisitRR(this, kArm64Uint64ToFloat32, node);
-}
-
-
-void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
-  VisitRR(this, kArm64Uint64ToFloat64, node);
-}
-
-
-void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
-  VisitRR(this, kArm64Float64ExtractLowWord32, node);
-}
-
-
-void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
-  VisitRR(this, kArm64U64MoveFloat64, node);
-}
-
-
-void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
-  VisitRR(this, kArm64Float64MoveU64, node);
-}
-
-
-void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
-  VisitRR(this, kArm64Float64MoveU64, node);
-}
-
-
-void InstructionSelector::VisitFloat32Add(Node* node) {
-  VisitRRR(this, kArm64Float32Add, node);
-}
-
-
-void InstructionSelector::VisitFloat64Add(Node* node) {
-  VisitRRR(this, kArm64Float64Add, node);
-}
-
-
-void InstructionSelector::VisitFloat32Sub(Node* node) {
-  VisitRRR(this, kArm64Float32Sub, node);
-}
-
-void InstructionSelector::VisitFloat64Sub(Node* node) {
-  VisitRRR(this, kArm64Float64Sub, node);
-}
-
-void InstructionSelector::VisitFloat32Mul(Node* node) {
-  VisitRRR(this, kArm64Float32Mul, node);
-}
-
-
-void InstructionSelector::VisitFloat64Mul(Node* node) {
-  VisitRRR(this, kArm64Float64Mul, node);
-}
-
-
-void InstructionSelector::VisitFloat32Div(Node* node) {
-  VisitRRR(this, kArm64Float32Div, node);
-}
-
-
-void InstructionSelector::VisitFloat64Div(Node* node) {
-  VisitRRR(this, kArm64Float64Div, node);
-}
-
-
 void InstructionSelector::VisitFloat64Mod(Node* node) {
   Arm64OperandGenerator g(this);
   Emit(kArm64Float64Mod, g.DefineAsFixed(node, d0),
@@ -1837,94 +1740,6 @@
        g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
 }
 
-void InstructionSelector::VisitFloat32Max(Node* node) {
-  VisitRRR(this, kArm64Float32Max, node);
-}
-
-void InstructionSelector::VisitFloat64Max(Node* node) {
-  VisitRRR(this, kArm64Float64Max, node);
-}
-
-void InstructionSelector::VisitFloat32Min(Node* node) {
-  VisitRRR(this, kArm64Float32Min, node);
-}
-
-void InstructionSelector::VisitFloat64Min(Node* node) {
-  VisitRRR(this, kArm64Float64Min, node);
-}
-
-
-void InstructionSelector::VisitFloat32Abs(Node* node) {
-  VisitRR(this, kArm64Float32Abs, node);
-}
-
-
-void InstructionSelector::VisitFloat64Abs(Node* node) {
-  VisitRR(this, kArm64Float64Abs, node);
-}
-
-void InstructionSelector::VisitFloat32Sqrt(Node* node) {
-  VisitRR(this, kArm64Float32Sqrt, node);
-}
-
-
-void InstructionSelector::VisitFloat64Sqrt(Node* node) {
-  VisitRR(this, kArm64Float64Sqrt, node);
-}
-
-
-void InstructionSelector::VisitFloat32RoundDown(Node* node) {
-  VisitRR(this, kArm64Float32RoundDown, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundDown(Node* node) {
-  VisitRR(this, kArm64Float64RoundDown, node);
-}
-
-
-void InstructionSelector::VisitFloat32RoundUp(Node* node) {
-  VisitRR(this, kArm64Float32RoundUp, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundUp(Node* node) {
-  VisitRR(this, kArm64Float64RoundUp, node);
-}
-
-
-void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
-  VisitRR(this, kArm64Float32RoundTruncate, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
-  VisitRR(this, kArm64Float64RoundTruncate, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
-  VisitRR(this, kArm64Float64RoundTiesAway, node);
-}
-
-
-void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
-  VisitRR(this, kArm64Float32RoundTiesEven, node);
-}
-
-
-void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
-  VisitRR(this, kArm64Float64RoundTiesEven, node);
-}
-
-void InstructionSelector::VisitFloat32Neg(Node* node) {
-  VisitRR(this, kArm64Float32Neg, node);
-}
-
-void InstructionSelector::VisitFloat64Neg(Node* node) {
-  VisitRR(this, kArm64Float64Neg, node);
-}
-
 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
                                                    InstructionCode opcode) {
   Arm64OperandGenerator g(this);
@@ -1993,11 +1808,14 @@
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
-    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
-                             cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
+                             cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(opcode, g.NoOutput(), left, right,
+                   g.UseImmediate(cont->trap_id()));
   }
 }
 
@@ -2162,7 +1980,7 @@
   } else {
     DCHECK(cont->IsDeoptimize());
     selector->EmitDeoptimize(cont->Encode(opcode), g.NoOutput(), value,
-                             cont->reason(), cont->frame_state());
+                             cont->kind(), cont->reason(), cont->frame_state());
   }
 }
 
@@ -2513,11 +2331,15 @@
     selector->Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
                    g.UseRegister(value), g.Label(cont->true_block()),
                    g.Label(cont->false_block()));
-  } else {
-    DCHECK(cont->IsDeoptimize());
+  } else if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(cont->Encode(kArm64Tst32), g.NoOutput(),
                              g.UseRegister(value), g.UseRegister(value),
-                             cont->reason(), cont->frame_state());
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(cont->Encode(kArm64Tst32), g.NoOutput(),
+                   g.UseRegister(value), g.UseRegister(value),
+                   g.UseImmediate(cont->trap_id()));
   }
 }
 
@@ -2530,14 +2352,29 @@
 }
 
 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kNotEqual, p.kind(), p.reason(), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kEqual, p.kind(), p.reason(), node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+                                          Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
@@ -2774,21 +2611,6 @@
   VisitFloat64Compare(this, node, &cont);
 }
 
-
-void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
-  Arm64OperandGenerator g(this);
-  Emit(kArm64Float64ExtractLowWord32, g.DefineAsRegister(node),
-       g.UseRegister(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
-  Arm64OperandGenerator g(this);
-  Emit(kArm64Float64ExtractHighWord32, g.DefineAsRegister(node),
-       g.UseRegister(node->InputAt(0)));
-}
-
-
 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
   Arm64OperandGenerator g(this);
   Node* left = node->InputAt(0);
@@ -2823,10 +2645,6 @@
        g.UseRegister(left), g.UseRegister(right));
 }
 
-void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
-  VisitRR(this, kArm64Float64SilenceNaN, node);
-}
-
 void InstructionSelector::VisitAtomicLoad(Node* node) {
   LoadRepresentation load_rep = LoadRepresentationOf(node->op());
   Arm64OperandGenerator g(this);
diff --git a/src/compiler/ast-graph-builder.cc b/src/compiler/ast-graph-builder.cc
index 1b7d116..e199a03 100644
--- a/src/compiler/ast-graph-builder.cc
+++ b/src/compiler/ast-graph-builder.cc
@@ -17,7 +17,9 @@
 #include "src/compiler/node-properties.h"
 #include "src/compiler/operator-properties.h"
 #include "src/compiler/state-values-utils.h"
-#include "src/compiler/type-hint-analyzer.h"
+#include "src/feedback-vector.h"
+#include "src/objects-inl.h"
+#include "src/objects/literal-objects.h"
 
 namespace v8 {
 namespace internal {
@@ -166,8 +168,6 @@
   void ReturnValue(Node* return_value);
   void ThrowValue(Node* exception_value);
 
-  class DeferredCommands;
-
  protected:
   enum Command { CMD_BREAK, CMD_CONTINUE, CMD_RETURN, CMD_THROW };
 
@@ -207,93 +207,6 @@
   int stack_height_;
 };
 
-// Helper class for a try-finally control scope. It can record intercepted
-// control-flow commands that cause entry into a finally-block, and re-apply
-// them after again leaving that block. Special tokens are used to identify
-// paths going through the finally-block to dispatch after leaving the block.
-class AstGraphBuilder::ControlScope::DeferredCommands : public ZoneObject {
- public:
-  explicit DeferredCommands(AstGraphBuilder* owner)
-      : owner_(owner),
-        deferred_(owner->local_zone()),
-        return_token_(nullptr),
-        throw_token_(nullptr) {}
-
-  // One recorded control-flow command.
-  struct Entry {
-    Command command;       // The command type being applied on this path.
-    Statement* statement;  // The target statement for the command or {nullptr}.
-    Node* token;           // A token identifying this particular path.
-  };
-
-  // Records a control-flow command while entering the finally-block. This also
-  // generates a new dispatch token that identifies one particular path.
-  Node* RecordCommand(Command cmd, Statement* stmt, Node* value) {
-    Node* token = nullptr;
-    switch (cmd) {
-      case CMD_BREAK:
-      case CMD_CONTINUE:
-        token = NewPathToken(dispenser_.GetBreakContinueToken());
-        break;
-      case CMD_THROW:
-        if (throw_token_) return throw_token_;
-        token = NewPathToken(TokenDispenserForFinally::kThrowToken);
-        throw_token_ = token;
-        break;
-      case CMD_RETURN:
-        if (return_token_) return return_token_;
-        token = NewPathToken(TokenDispenserForFinally::kReturnToken);
-        return_token_ = token;
-        break;
-    }
-    DCHECK_NOT_NULL(token);
-    deferred_.push_back({cmd, stmt, token});
-    return token;
-  }
-
-  // Returns the dispatch token to be used to identify the implicit fall-through
-  // path at the end of a try-block into the corresponding finally-block.
-  Node* GetFallThroughToken() { return NewPathTokenForImplicitFallThrough(); }
-
-  // Applies all recorded control-flow commands after the finally-block again.
-  // This generates a dynamic dispatch on the token from the entry point.
-  void ApplyDeferredCommands(Node* token, Node* value) {
-    SwitchBuilder dispatch(owner_, static_cast<int>(deferred_.size()));
-    dispatch.BeginSwitch();
-    for (size_t i = 0; i < deferred_.size(); ++i) {
-      Node* condition = NewPathDispatchCondition(token, deferred_[i].token);
-      dispatch.BeginLabel(static_cast<int>(i), condition);
-      dispatch.EndLabel();
-    }
-    for (size_t i = 0; i < deferred_.size(); ++i) {
-      dispatch.BeginCase(static_cast<int>(i));
-      owner_->execution_control()->PerformCommand(
-          deferred_[i].command, deferred_[i].statement, value);
-      dispatch.EndCase();
-    }
-    dispatch.EndSwitch();
-  }
-
- protected:
-  Node* NewPathToken(int token_id) {
-    return owner_->jsgraph()->Constant(token_id);
-  }
-  Node* NewPathTokenForImplicitFallThrough() {
-    return NewPathToken(TokenDispenserForFinally::kFallThroughToken);
-  }
-  Node* NewPathDispatchCondition(Node* t1, Node* t2) {
-    return owner_->NewNode(
-        owner_->javascript()->StrictEqual(CompareOperationHint::kAny), t1, t2);
-  }
-
- private:
-  TokenDispenserForFinally dispenser_;
-  AstGraphBuilder* owner_;
-  ZoneVector<Entry> deferred_;
-  Node* return_token_;
-  Node* throw_token_;
-};
-
 
 // Control scope implementation for a BreakableStatement.
 class AstGraphBuilder::ControlScopeForBreakable : public ControlScope {
@@ -356,65 +269,9 @@
 };
 
 
-// Control scope implementation for a TryCatchStatement.
-class AstGraphBuilder::ControlScopeForCatch : public ControlScope {
- public:
-  ControlScopeForCatch(AstGraphBuilder* owner, TryCatchStatement* stmt,
-                       TryCatchBuilder* control)
-      : ControlScope(owner), control_(control) {
-    builder()->try_nesting_level_++;  // Increment nesting.
-  }
-  ~ControlScopeForCatch() {
-    builder()->try_nesting_level_--;  // Decrement nesting.
-  }
-
- protected:
-  bool Execute(Command cmd, Statement* target, Node** value) override {
-    switch (cmd) {
-      case CMD_THROW:
-        control_->Throw(*value);
-        return true;
-      case CMD_BREAK:
-      case CMD_CONTINUE:
-      case CMD_RETURN:
-        break;
-    }
-    return false;
-  }
-
- private:
-  TryCatchBuilder* control_;
-};
-
-
-// Control scope implementation for a TryFinallyStatement.
-class AstGraphBuilder::ControlScopeForFinally : public ControlScope {
- public:
-  ControlScopeForFinally(AstGraphBuilder* owner, TryFinallyStatement* stmt,
-                         DeferredCommands* commands, TryFinallyBuilder* control)
-      : ControlScope(owner), commands_(commands), control_(control) {
-    builder()->try_nesting_level_++;  // Increment nesting.
-  }
-  ~ControlScopeForFinally() {
-    builder()->try_nesting_level_--;  // Decrement nesting.
-  }
-
- protected:
-  bool Execute(Command cmd, Statement* target, Node** value) override {
-    Node* token = commands_->RecordCommand(cmd, target, *value);
-    control_->LeaveTry(token, *value);
-    return true;
-  }
-
- private:
-  DeferredCommands* commands_;
-  TryFinallyBuilder* control_;
-};
-
 AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
                                  JSGraph* jsgraph, float invocation_frequency,
-                                 LoopAssignmentAnalysis* loop,
-                                 TypeHintAnalysis* type_hint_analysis)
+                                 LoopAssignmentAnalysis* loop)
     : isolate_(info->isolate()),
       local_zone_(local_zone),
       info_(info),
@@ -425,12 +282,10 @@
       globals_(0, local_zone),
       execution_control_(nullptr),
       execution_context_(nullptr),
-      try_nesting_level_(0),
       input_buffer_size_(0),
       input_buffer_(nullptr),
       exit_controls_(local_zone),
       loop_assignment_analysis_(loop),
-      type_hint_analysis_(type_hint_analysis),
       state_values_cache_(jsgraph),
       liveness_analyzer_(static_cast<size_t>(info->scope()->num_stack_slots()),
                          false, local_zone),
@@ -453,7 +308,7 @@
     // calling eval, not the anonymous closure containing the eval code.
     const Operator* op =
         javascript()->LoadContext(0, Context::CLOSURE_INDEX, false);
-    return NewNode(op, current_context());
+    return NewNode(op);
   } else {
     DCHECK(closure_scope->is_function_scope());
     return GetFunctionClosure();
@@ -483,18 +338,6 @@
   return function_context_.get();
 }
 
-
-Node* AstGraphBuilder::GetNewTarget() {
-  if (!new_target_.is_set()) {
-    int params = info()->num_parameters_including_this();
-    int index = Linkage::GetJSCallNewTargetParamIndex(params);
-    const Operator* op = common()->Parameter(index, "%new.target");
-    Node* node = NewNode(op, graph()->start());
-    new_target_.set(node);
-  }
-  return new_target_.get();
-}
-
 Node* AstGraphBuilder::GetEmptyFrameState() {
   if (!empty_frame_state_.is_set()) {
     const Operator* op = common()->FrameState(
@@ -573,15 +416,10 @@
   // Build the arguments object if it is used.
   BuildArgumentsObject(scope->arguments());
 
-  // Build rest arguments array if it is used.
-  Variable* rest_parameter = scope->rest_parameter();
-  BuildRestArgumentsArray(rest_parameter);
-
-  // Build assignment to {.this_function} variable if it is used.
-  BuildThisFunctionVariable(scope->this_function_var());
-
-  // Build assignment to {new.target} variable if it is used.
-  BuildNewTargetVariable(scope->new_target_var());
+  // We don't support new.target and rest parameters here.
+  DCHECK_NULL(scope->new_target_var());
+  DCHECK_NULL(scope->rest_parameter());
+  DCHECK_NULL(scope->this_function_var());
 
   // Emit tracing call if requested to do so.
   if (FLAG_trace) {
@@ -835,7 +673,7 @@
     }
   }
   if (should_update) {
-    const Operator* op = common()->StateValues(count);
+    const Operator* op = common()->StateValues(count, SparseInputMask::Dense());
     (*state_values) = graph()->NewNode(op, count, env_values);
   }
 }
@@ -1092,10 +930,12 @@
   switch (variable->location()) {
     case VariableLocation::UNALLOCATED: {
       DCHECK(!variable->binding_needs_init());
-      FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
+      globals()->push_back(variable->name());
+      FeedbackSlot slot = decl->proxy()->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
       globals()->push_back(handle(Smi::FromInt(slot.ToInt()), isolate()));
       globals()->push_back(isolate()->factory()->undefined_value());
+      globals()->push_back(isolate()->factory()->undefined_value());
       break;
     }
     case VariableLocation::PARAMETER:
@@ -1109,17 +949,10 @@
       if (variable->binding_needs_init()) {
         Node* value = jsgraph()->TheHoleConstant();
         const Operator* op = javascript()->StoreContext(0, variable->index());
-        NewNode(op, current_context(), value);
+        NewNode(op, value);
       }
       break;
-    case VariableLocation::LOOKUP: {
-      DCHECK(!variable->binding_needs_init());
-      Node* name = jsgraph()->Constant(variable->name());
-      const Operator* op = javascript()->CallRuntime(Runtime::kDeclareEvalVar);
-      Node* store = NewNode(op, name);
-      PrepareFrameState(store, decl->proxy()->id());
-      break;
-    }
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -1134,9 +967,16 @@
           decl->fun(), info()->script(), info());
       // Check for stack-overflow exception.
       if (function.is_null()) return SetStackOverflow();
-      FeedbackVectorSlot slot = decl->proxy()->VariableFeedbackSlot();
+      globals()->push_back(variable->name());
+      FeedbackSlot slot = decl->proxy()->VariableFeedbackSlot();
       DCHECK(!slot.IsInvalid());
       globals()->push_back(handle(Smi::FromInt(slot.ToInt()), isolate()));
+
+      // We need the slot where the literals array lives, too.
+      slot = decl->fun()->LiteralFeedbackSlot();
+      DCHECK(!slot.IsInvalid());
+      globals()->push_back(handle(Smi::FromInt(slot.ToInt()), isolate()));
+
       globals()->push_back(function);
       break;
     }
@@ -1151,19 +991,10 @@
       VisitForValue(decl->fun());
       Node* value = environment()->Pop();
       const Operator* op = javascript()->StoreContext(0, variable->index());
-      NewNode(op, current_context(), value);
+      NewNode(op, value);
       break;
     }
-    case VariableLocation::LOOKUP: {
-      VisitForValue(decl->fun());
-      Node* value = environment()->Pop();
-      Node* name = jsgraph()->Constant(variable->name());
-      const Operator* op =
-          javascript()->CallRuntime(Runtime::kDeclareEvalFunction);
-      Node* store = NewNode(op, name, value);
-      PrepareFrameState(store, decl->proxy()->id());
-      break;
-    }
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -1240,14 +1071,8 @@
 
 
 void AstGraphBuilder::VisitWithStatement(WithStatement* stmt) {
-  VisitForValue(stmt->expression());
-  Node* value = environment()->Pop();
-  Node* object = BuildToObject(value, stmt->ToObjectId());
-  Handle<ScopeInfo> scope_info = stmt->scope()->scope_info();
-  const Operator* op = javascript()->CreateWithContext(scope_info);
-  Node* context = NewNode(op, object, GetFunctionClosureForContext());
-  PrepareFrameState(context, stmt->EntryId());
-  VisitInScope(stmt->statement(), stmt->scope(), context);
+  // Dynamic scoping is supported only by going through Ignition first.
+  UNREACHABLE();
 }
 
 
@@ -1277,13 +1102,7 @@
     Node* label = environment()->Pop();
     Node* tag = environment()->Top();
 
-    CompareOperationHint hint;
-    if (!type_hint_analysis_ ||
-        !type_hint_analysis_->GetCompareOperationHint(clause->CompareId(),
-                                                      &hint)) {
-      hint = CompareOperationHint::kAny;
-    }
-
+    CompareOperationHint hint = CompareOperationHint::kAny;
     const Operator* op = javascript()->StrictEqual(hint);
     Node* condition = NewNode(op, tag, label);
     compare_switch.BeginLabel(i, condition);
@@ -1354,218 +1173,32 @@
 
 
 void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
-  VisitForValue(stmt->subject());
-  Node* object = environment()->Pop();
-  BlockBuilder for_block(this);
-  for_block.BeginBlock();
-  // Check for null or undefined before entering loop.
-  Node* is_null_cond =
-      NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), object,
-              jsgraph()->NullConstant());
-  for_block.BreakWhen(is_null_cond, BranchHint::kFalse);
-  Node* is_undefined_cond =
-      NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), object,
-              jsgraph()->UndefinedConstant());
-  for_block.BreakWhen(is_undefined_cond, BranchHint::kFalse);
-  {
-    // Convert object to jsobject.
-    object = BuildToObject(object, stmt->ToObjectId());
-    environment()->Push(object);
-
-    // Prepare for-in cache.
-    Node* prepare = NewNode(javascript()->ForInPrepare(), object);
-    PrepareFrameState(prepare, stmt->PrepareId(),
-                      OutputFrameStateCombine::Push(3));
-    Node* cache_type = NewNode(common()->Projection(0), prepare);
-    Node* cache_array = NewNode(common()->Projection(1), prepare);
-    Node* cache_length = NewNode(common()->Projection(2), prepare);
-
-    // Construct the rest of the environment.
-    environment()->Push(cache_type);
-    environment()->Push(cache_array);
-    environment()->Push(cache_length);
-    environment()->Push(jsgraph()->ZeroConstant());
-
-    // Build the actual loop body.
-    LoopBuilder for_loop(this);
-    for_loop.BeginLoop(GetVariablesAssignedInLoop(stmt), CheckOsrEntry(stmt));
-    {
-      // These stack values are renamed in the case of OSR, so reload them
-      // from the environment.
-      Node* index = environment()->Peek(0);
-      Node* cache_length = environment()->Peek(1);
-      Node* cache_array = environment()->Peek(2);
-      Node* cache_type = environment()->Peek(3);
-      Node* object = environment()->Peek(4);
-
-      // Check loop termination condition (we know that the {index} is always
-      // in Smi range, so we can just set the hint on the comparison below).
-      PrepareEagerCheckpoint(stmt->EntryId());
-      Node* exit_cond =
-          NewNode(javascript()->LessThan(CompareOperationHint::kSignedSmall),
-                  index, cache_length);
-      PrepareFrameState(exit_cond, BailoutId::None());
-      for_loop.BreakUnless(exit_cond);
-
-      // Compute the next enumerated value.
-      Node* value = NewNode(javascript()->ForInNext(), object, cache_array,
-                            cache_type, index);
-      PrepareFrameState(value, stmt->FilterId(),
-                        OutputFrameStateCombine::Push());
-      IfBuilder test_value(this);
-      Node* test_value_cond =
-          NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), value,
-                  jsgraph()->UndefinedConstant());
-      test_value.If(test_value_cond, BranchHint::kFalse);
-      test_value.Then();
-      test_value.Else();
-      {
-        environment()->Push(value);
-        PrepareEagerCheckpoint(stmt->FilterId());
-        value = environment()->Pop();
-        // Bind value and do loop body.
-        VectorSlotPair feedback =
-            CreateVectorSlotPair(stmt->EachFeedbackSlot());
-        VisitForInAssignment(stmt->each(), value, feedback,
-                             stmt->AssignmentId());
-        VisitIterationBody(stmt, &for_loop, stmt->StackCheckId());
-      }
-      test_value.End();
-      for_loop.EndBody();
-
-      // Increment counter and continue (we know that the {index} is always
-      // in Smi range, so we can just set the hint on the increment below).
-      index = environment()->Peek(0);
-      PrepareEagerCheckpoint(stmt->IncrementId());
-      index = NewNode(javascript()->Add(BinaryOperationHint::kSignedSmall),
-                      index, jsgraph()->OneConstant());
-      PrepareFrameState(index, BailoutId::None());
-      environment()->Poke(0, index);
-    }
-    for_loop.EndLoop();
-    environment()->Drop(5);
-  }
-  for_block.EndBlock();
+  // Only the BytecodeGraphBuilder supports for-in.
+  return SetStackOverflow();
 }
 
 
 void AstGraphBuilder::VisitForOfStatement(ForOfStatement* stmt) {
-  LoopBuilder for_loop(this);
-  VisitForEffect(stmt->assign_iterator());
-  for_loop.BeginLoop(GetVariablesAssignedInLoop(stmt), CheckOsrEntry(stmt));
-  VisitForEffect(stmt->next_result());
-  VisitForTest(stmt->result_done());
-  Node* condition = environment()->Pop();
-  for_loop.BreakWhen(condition);
-  VisitForEffect(stmt->assign_each());
-  VisitIterationBody(stmt, &for_loop, stmt->StackCheckId());
-  for_loop.EndBody();
-  for_loop.EndLoop();
+  // Iterator looping is supported only by going through Ignition first.
+  UNREACHABLE();
 }
 
 
 void AstGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
-  TryCatchBuilder try_control(this);
-
-  // Evaluate the try-block inside a control scope. This simulates a handler
-  // that is intercepting 'throw' control commands.
-  try_control.BeginTry();
-  {
-    ControlScopeForCatch scope(this, stmt, &try_control);
-    STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
-    environment()->Push(current_context());
-    Visit(stmt->try_block());
-    environment()->Pop();
-  }
-  try_control.EndTry();
-
-  // If requested, clear message object as we enter the catch block.
-  if (stmt->clear_pending_message()) {
-    Node* the_hole = jsgraph()->TheHoleConstant();
-    NewNode(javascript()->StoreMessage(), the_hole);
-  }
-
-  // Create a catch scope that binds the exception.
-  Node* exception = try_control.GetExceptionNode();
-  Handle<String> name = stmt->variable()->name();
-  Handle<ScopeInfo> scope_info = stmt->scope()->scope_info();
-  const Operator* op = javascript()->CreateCatchContext(name, scope_info);
-  Node* context = NewNode(op, exception, GetFunctionClosureForContext());
-
-  // Evaluate the catch-block.
-  VisitInScope(stmt->catch_block(), stmt->scope(), context);
-  try_control.EndCatch();
+  // Exception handling is supported only by going through Ignition first.
+  UNREACHABLE();
 }
 
 
 void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
-  TryFinallyBuilder try_control(this);
-
-  // We keep a record of all paths that enter the finally-block to be able to
-  // dispatch to the correct continuation point after the statements in the
-  // finally-block have been evaluated.
-  //
-  // The try-finally construct can enter the finally-block in three ways:
-  // 1. By exiting the try-block normally, falling through at the end.
-  // 2. By exiting the try-block with a function-local control flow transfer
-  //    (i.e. through break/continue/return statements).
-  // 3. By exiting the try-block with a thrown exception.
-  Node* fallthrough_result = jsgraph()->TheHoleConstant();
-  ControlScope::DeferredCommands* commands =
-      new (local_zone()) ControlScope::DeferredCommands(this);
-
-  // Evaluate the try-block inside a control scope. This simulates a handler
-  // that is intercepting all control commands.
-  try_control.BeginTry();
-  {
-    ControlScopeForFinally scope(this, stmt, commands, &try_control);
-    STATIC_ASSERT(TryBlockConstant::kElementCount == 1);
-    environment()->Push(current_context());
-    Visit(stmt->try_block());
-    environment()->Pop();
-  }
-  try_control.EndTry(commands->GetFallThroughToken(), fallthrough_result);
-
-  // The result value semantics depend on how the block was entered:
-  //  - ReturnStatement: It represents the return value being returned.
-  //  - ThrowStatement: It represents the exception being thrown.
-  //  - BreakStatement/ContinueStatement: Filled with the hole.
-  //  - Falling through into finally-block: Filled with the hole.
-  Node* result = try_control.GetResultValueNode();
-  Node* token = try_control.GetDispatchTokenNode();
-
-  // The result value, dispatch token and message is expected on the operand
-  // stack (this is in sync with FullCodeGenerator::EnterFinallyBlock).
-  Node* message = NewNode(javascript()->LoadMessage());
-  environment()->Push(token);
-  environment()->Push(result);
-  environment()->Push(message);
-
-  // Clear message object as we enter the finally block.
-  Node* the_hole = jsgraph()->TheHoleConstant();
-  NewNode(javascript()->StoreMessage(), the_hole);
-
-  // Evaluate the finally-block.
-  Visit(stmt->finally_block());
-  try_control.EndFinally();
-
-  // The result value, dispatch token and message is restored from the operand
-  // stack (this is in sync with FullCodeGenerator::ExitFinallyBlock).
-  message = environment()->Pop();
-  result = environment()->Pop();
-  token = environment()->Pop();
-  NewNode(javascript()->StoreMessage(), message);
-
-  // Dynamic dispatch after the finally-block.
-  commands->ApplyDeferredCommands(token, result);
+  // Exception handling is supported only by going through Ignition first.
+  UNREACHABLE();
 }
 
 
 void AstGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
-  Node* node =
-      NewNode(javascript()->CallRuntime(Runtime::kHandleDebuggerStatement));
-  PrepareFrameState(node, stmt->DebugBreakId());
-  environment()->MarkAllLocalsLive();
+  // Debugger statement is supported only by going through Ignition first.
+  UNREACHABLE();
 }
 
 
@@ -1577,112 +1210,14 @@
 
   // Create node to instantiate a new closure.
   PretenureFlag pretenure = expr->pretenure() ? TENURED : NOT_TENURED;
-  const Operator* op = javascript()->CreateClosure(shared_info, pretenure);
+  VectorSlotPair pair = CreateVectorSlotPair(expr->LiteralFeedbackSlot());
+  const Operator* op =
+      javascript()->CreateClosure(shared_info, pair, pretenure);
   Node* value = NewNode(op);
   ast_context()->ProduceValue(expr, value);
 }
 
-
-void AstGraphBuilder::VisitClassLiteral(ClassLiteral* expr) {
-  VisitForValueOrTheHole(expr->extends());
-  VisitForValue(expr->constructor());
-
-  // Create node to instantiate a new class.
-  Node* constructor = environment()->Pop();
-  Node* extends = environment()->Pop();
-  Node* start = jsgraph()->Constant(expr->start_position());
-  Node* end = jsgraph()->Constant(expr->end_position());
-  const Operator* opc = javascript()->CallRuntime(Runtime::kDefineClass);
-  Node* literal = NewNode(opc, extends, constructor, start, end);
-  PrepareFrameState(literal, expr->CreateLiteralId(),
-                    OutputFrameStateCombine::Push());
-  environment()->Push(literal);
-
-  // Load the "prototype" from the constructor.
-  PrepareEagerCheckpoint(expr->CreateLiteralId());
-  Handle<Name> name = isolate()->factory()->prototype_string();
-  VectorSlotPair pair = CreateVectorSlotPair(expr->PrototypeSlot());
-  Node* prototype = BuildNamedLoad(literal, name, pair);
-  PrepareFrameState(prototype, expr->PrototypeId(),
-                    OutputFrameStateCombine::Push());
-  environment()->Push(prototype);
-
-  // Create nodes to store method values into the literal.
-  for (int i = 0; i < expr->properties()->length(); i++) {
-    ClassLiteral::Property* property = expr->properties()->at(i);
-    environment()->Push(environment()->Peek(property->is_static() ? 1 : 0));
-
-    VisitForValue(property->key());
-    Node* name = BuildToName(environment()->Pop(), expr->GetIdForProperty(i));
-    environment()->Push(name);
-
-    // The static prototype property is read only. We handle the non computed
-    // property name case in the parser. Since this is the only case where we
-    // need to check for an own read only property we special case this so we do
-    // not need to do this for every property.
-    if (property->is_static() && property->is_computed_name()) {
-      Node* check = BuildThrowIfStaticPrototype(environment()->Pop(),
-                                                expr->GetIdForProperty(i));
-      environment()->Push(check);
-    }
-
-    VisitForValue(property->value());
-    Node* value = environment()->Pop();
-    Node* key = environment()->Pop();
-    Node* receiver = environment()->Pop();
-
-    BuildSetHomeObject(value, receiver, property);
-
-    switch (property->kind()) {
-      case ClassLiteral::Property::METHOD: {
-        Node* attr = jsgraph()->Constant(DONT_ENUM);
-        Node* set_function_name =
-            jsgraph()->Constant(property->NeedsSetFunctionName());
-        const Operator* op =
-            javascript()->CallRuntime(Runtime::kDefineDataPropertyInLiteral);
-        Node* call = NewNode(op, receiver, key, value, attr, set_function_name);
-        PrepareFrameState(call, BailoutId::None());
-        break;
-      }
-      case ClassLiteral::Property::GETTER: {
-        Node* attr = jsgraph()->Constant(DONT_ENUM);
-        const Operator* op = javascript()->CallRuntime(
-            Runtime::kDefineGetterPropertyUnchecked, 4);
-        NewNode(op, receiver, key, value, attr);
-        break;
-      }
-      case ClassLiteral::Property::SETTER: {
-        Node* attr = jsgraph()->Constant(DONT_ENUM);
-        const Operator* op = javascript()->CallRuntime(
-            Runtime::kDefineSetterPropertyUnchecked, 4);
-        NewNode(op, receiver, key, value, attr);
-        break;
-      }
-      case ClassLiteral::Property::FIELD: {
-        UNREACHABLE();
-        break;
-      }
-    }
-  }
-
-  // Set the constructor to have fast properties.
-  prototype = environment()->Pop();
-  literal = environment()->Pop();
-  const Operator* op = javascript()->CallRuntime(Runtime::kToFastProperties);
-  literal = NewNode(op, literal);
-
-  // Assign to class variable.
-  if (expr->class_variable_proxy() != nullptr) {
-    Variable* var = expr->class_variable_proxy()->var();
-    VectorSlotPair feedback = CreateVectorSlotPair(
-        expr->NeedsProxySlot() ? expr->ProxySlot()
-                               : FeedbackVectorSlot::Invalid());
-    BuildVariableAssignment(var, literal, Token::INIT, feedback,
-                            BailoutId::None());
-  }
-  ast_context()->ProduceValue(expr, literal);
-}
-
+void AstGraphBuilder::VisitClassLiteral(ClassLiteral* expr) { UNREACHABLE(); }
 
 void AstGraphBuilder::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
   UNREACHABLE();
@@ -1734,7 +1269,8 @@
 
   // Create node to materialize a regular expression literal.
   const Operator* op = javascript()->CreateLiteralRegExp(
-      expr->pattern(), expr->flags(), expr->literal_index());
+      expr->pattern(), expr->flags(),
+      FeedbackVector::GetIndex(expr->literal_slot()));
   Node* literal = NewNode(op, closure);
   PrepareFrameState(literal, expr->id(), ast_context()->GetStateCombine());
   ast_context()->ProduceValue(expr, literal);
@@ -1746,8 +1282,8 @@
 
   // Create node to deep-copy the literal boilerplate.
   const Operator* op = javascript()->CreateLiteralObject(
-      expr->constant_properties(), expr->ComputeFlags(true),
-      expr->literal_index(), expr->properties_count());
+      expr->GetOrBuildConstantProperties(isolate()), expr->ComputeFlags(true),
+      FeedbackVector::GetIndex(expr->literal_slot()), expr->properties_count());
   Node* literal = NewNode(op, closure);
   PrepareFrameState(literal, expr->CreateLiteralId(),
                     OutputFrameStateCombine::Push());
@@ -1757,15 +1293,15 @@
   environment()->Push(literal);
 
   // Create nodes to store computed values into the literal.
-  int property_index = 0;
   AccessorTable accessor_table(local_zone());
-  for (; property_index < expr->properties()->length(); property_index++) {
-    ObjectLiteral::Property* property = expr->properties()->at(property_index);
-    if (property->is_computed_name()) break;
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+    DCHECK(!property->is_computed_name());
     if (property->IsCompileTimeValue()) continue;
 
     Literal* key = property->key()->AsLiteral();
     switch (property->kind()) {
+      case ObjectLiteral::Property::SPREAD:
       case ObjectLiteral::Property::CONSTANT:
         UNREACHABLE();
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
@@ -1783,7 +1319,7 @@
             Handle<Name> name = key->AsPropertyName();
             VectorSlotPair feedback =
                 CreateVectorSlotPair(property->GetSlot(0));
-            Node* store = BuildNamedStore(literal, name, value, feedback);
+            Node* store = BuildNamedStoreOwn(literal, name, value, feedback);
             PrepareFrameState(store, key->id(),
                               OutputFrameStateCombine::Ignore());
             BuildSetHomeObject(value, literal, property, 1);
@@ -1818,21 +1354,20 @@
             javascript()->CallRuntime(Runtime::kInternalSetPrototype);
         Node* set_prototype = NewNode(op, receiver, value);
         // SetPrototype should not lazy deopt on an object literal.
-        PrepareFrameState(set_prototype,
-                          expr->GetIdForPropertySet(property_index));
+        PrepareFrameState(set_prototype, expr->GetIdForPropertySet(i));
         break;
       }
       case ObjectLiteral::Property::GETTER:
         if (property->emit_store()) {
           AccessorTable::Iterator it = accessor_table.lookup(key);
-          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->bailout_id = expr->GetIdForPropertySet(i);
           it->second->getter = property;
         }
         break;
       case ObjectLiteral::Property::SETTER:
         if (property->emit_store()) {
           AccessorTable::Iterator it = accessor_table.lookup(key);
-          it->second->bailout_id = expr->GetIdForPropertySet(property_index);
+          it->second->bailout_id = expr->GetIdForPropertySet(i);
           it->second->setter = property;
         }
         break;
@@ -1856,77 +1391,6 @@
     Node* call = NewNode(op, literal, name, getter, setter, attr);
     PrepareFrameState(call, it->second->bailout_id);
   }
-
-  // Object literals have two parts. The "static" part on the left contains no
-  // computed property names, and so we can compute its map ahead of time; see
-  // Runtime_CreateObjectLiteralBoilerplate. The second "dynamic" part starts
-  // with the first computed property name and continues with all properties to
-  // its right. All the code from above initializes the static component of the
-  // object literal, and arranges for the map of the result to reflect the
-  // static order in which the keys appear. For the dynamic properties, we
-  // compile them into a series of "SetOwnProperty" runtime calls. This will
-  // preserve insertion order.
-  for (; property_index < expr->properties()->length(); property_index++) {
-    ObjectLiteral::Property* property = expr->properties()->at(property_index);
-
-    if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
-      environment()->Push(environment()->Top());  // Duplicate receiver.
-      VisitForValue(property->value());
-      Node* value = environment()->Pop();
-      Node* receiver = environment()->Pop();
-      const Operator* op =
-          javascript()->CallRuntime(Runtime::kInternalSetPrototype);
-      Node* call = NewNode(op, receiver, value);
-      PrepareFrameState(call, expr->GetIdForPropertySet(property_index));
-      continue;
-    }
-
-    environment()->Push(environment()->Top());  // Duplicate receiver.
-    VisitForValue(property->key());
-    Node* name = BuildToName(environment()->Pop(),
-                             expr->GetIdForPropertyName(property_index));
-    environment()->Push(name);
-    VisitForValue(property->value());
-    Node* value = environment()->Pop();
-    Node* key = environment()->Pop();
-    Node* receiver = environment()->Pop();
-    BuildSetHomeObject(value, receiver, property);
-    switch (property->kind()) {
-      case ObjectLiteral::Property::CONSTANT:
-      case ObjectLiteral::Property::COMPUTED:
-      case ObjectLiteral::Property::MATERIALIZED_LITERAL: {
-        if (!property->emit_store()) continue;
-        Node* attr = jsgraph()->Constant(NONE);
-        Node* set_function_name =
-            jsgraph()->Constant(property->NeedsSetFunctionName());
-        const Operator* op =
-            javascript()->CallRuntime(Runtime::kDefineDataPropertyInLiteral);
-        Node* call = NewNode(op, receiver, key, value, attr, set_function_name);
-        PrepareFrameState(call, expr->GetIdForPropertySet(property_index));
-        break;
-      }
-      case ObjectLiteral::Property::PROTOTYPE:
-        UNREACHABLE();  // Handled specially above.
-        break;
-      case ObjectLiteral::Property::GETTER: {
-        Node* attr = jsgraph()->Constant(NONE);
-        const Operator* op = javascript()->CallRuntime(
-            Runtime::kDefineGetterPropertyUnchecked, 4);
-        Node* call = NewNode(op, receiver, key, value, attr);
-        PrepareFrameState(call, BailoutId::None());
-        break;
-      }
-      case ObjectLiteral::Property::SETTER: {
-        Node* attr = jsgraph()->Constant(NONE);
-        const Operator* op = javascript()->CallRuntime(
-            Runtime::kDefineSetterPropertyUnchecked, 4);
-        Node* call = NewNode(op, receiver, key, value, attr);
-        PrepareFrameState(call, BailoutId::None());
-        break;
-      }
-    }
-  }
-
   ast_context()->ProduceValue(expr, environment()->Pop());
 }
 
@@ -1947,8 +1411,8 @@
 
   // Create node to deep-copy the literal boilerplate.
   const Operator* op = javascript()->CreateLiteralArray(
-      expr->constant_elements(), expr->ComputeFlags(true),
-      expr->literal_index(), expr->values()->length());
+      expr->GetOrBuildConstantElements(isolate()), expr->ComputeFlags(true),
+      FeedbackVector::GetIndex(expr->literal_slot()), expr->values()->length());
   Node* literal = NewNode(op, closure);
   PrepareFrameState(literal, expr->CreateLiteralId(),
                     OutputFrameStateCombine::Push());
@@ -1978,72 +1442,6 @@
   ast_context()->ProduceValue(expr, environment()->Pop());
 }
 
-void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value,
-                                           const VectorSlotPair& feedback,
-                                           BailoutId bailout_id) {
-  DCHECK(expr->IsValidReferenceExpressionOrThis());
-
-  // Left-hand side can only be a property, a global or a variable slot.
-  Property* property = expr->AsProperty();
-  LhsKind assign_type = Property::GetAssignType(property);
-
-  // Evaluate LHS expression and store the value.
-  switch (assign_type) {
-    case VARIABLE: {
-      Variable* var = expr->AsVariableProxy()->var();
-      BuildVariableAssignment(var, value, Token::ASSIGN, feedback, bailout_id);
-      break;
-    }
-    case NAMED_PROPERTY: {
-      environment()->Push(value);
-      VisitForValue(property->obj());
-      Node* object = environment()->Pop();
-      value = environment()->Pop();
-      Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
-      Node* store = BuildNamedStore(object, name, value, feedback);
-      PrepareFrameState(store, bailout_id, OutputFrameStateCombine::Ignore());
-      break;
-    }
-    case KEYED_PROPERTY: {
-      environment()->Push(value);
-      VisitForValue(property->obj());
-      VisitForValue(property->key());
-      Node* key = environment()->Pop();
-      Node* object = environment()->Pop();
-      value = environment()->Pop();
-      Node* store = BuildKeyedStore(object, key, value, feedback);
-      PrepareFrameState(store, bailout_id, OutputFrameStateCombine::Ignore());
-      break;
-    }
-    case NAMED_SUPER_PROPERTY: {
-      environment()->Push(value);
-      VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
-      Node* home_object = environment()->Pop();
-      Node* receiver = environment()->Pop();
-      value = environment()->Pop();
-      Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
-      Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
-      PrepareFrameState(store, bailout_id, OutputFrameStateCombine::Ignore());
-      break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      environment()->Push(value);
-      VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
-      VisitForValue(property->key());
-      Node* key = environment()->Pop();
-      Node* home_object = environment()->Pop();
-      Node* receiver = environment()->Pop();
-      value = environment()->Pop();
-      Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
-      PrepareFrameState(store, bailout_id, OutputFrameStateCombine::Ignore());
-      break;
-    }
-  }
-}
-
-
 void AstGraphBuilder::VisitAssignment(Assignment* expr) {
   DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
 
@@ -2071,13 +1469,8 @@
       VisitForValue(property->key());
       break;
     case NAMED_SUPER_PROPERTY:
-      VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
-      break;
     case KEYED_SUPER_PROPERTY:
-      VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
-      VisitForValue(property->key());
+      UNREACHABLE();
       break;
   }
 
@@ -2115,28 +1508,10 @@
                           OutputFrameStateCombine::Push());
         break;
       }
-      case NAMED_SUPER_PROPERTY: {
-        Node* home_object = environment()->Top();
-        Node* receiver = environment()->Peek(1);
-        Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
-        VectorSlotPair pair =
-            CreateVectorSlotPair(property->PropertyFeedbackSlot());
-        old_value = BuildNamedSuperLoad(receiver, home_object, name, pair);
-        PrepareFrameState(old_value, property->LoadId(),
-                          OutputFrameStateCombine::Push());
+      case NAMED_SUPER_PROPERTY:
+      case KEYED_SUPER_PROPERTY:
+        UNREACHABLE();
         break;
-      }
-      case KEYED_SUPER_PROPERTY: {
-        Node* key = environment()->Top();
-        Node* home_object = environment()->Peek(1);
-        Node* receiver = environment()->Peek(2);
-        VectorSlotPair pair =
-            CreateVectorSlotPair(property->PropertyFeedbackSlot());
-        old_value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
-        PrepareFrameState(old_value, property->LoadId(),
-                          OutputFrameStateCombine::Push());
-        break;
-      }
     }
     environment()->Push(old_value);
     VisitForValue(expr->value());
@@ -2181,22 +1556,10 @@
                         OutputFrameStateCombine::Push());
       break;
     }
-    case NAMED_SUPER_PROPERTY: {
-      Node* home_object = environment()->Pop();
-      Node* receiver = environment()->Pop();
-      Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
-      Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
-      PrepareFrameState(store, expr->id(), ast_context()->GetStateCombine());
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
       break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      Node* key = environment()->Pop();
-      Node* home_object = environment()->Pop();
-      Node* receiver = environment()->Pop();
-      Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
-      PrepareFrameState(store, expr->id(), ast_context()->GetStateCombine());
-      break;
-    }
   }
 
   ast_context()->ProduceValue(expr, value);
@@ -2205,8 +1568,7 @@
 
 void AstGraphBuilder::VisitYield(Yield* expr) {
   // Generator functions are supported only by going through Ignition first.
-  SetStackOverflow();
-  ast_context()->ProduceValue(expr, jsgraph()->UndefinedConstant());
+  UNREACHABLE();
 }
 
 
@@ -2243,27 +1605,10 @@
       PrepareFrameState(value, expr->LoadId(), OutputFrameStateCombine::Push());
       break;
     }
-    case NAMED_SUPER_PROPERTY: {
-      VisitForValue(expr->obj()->AsSuperPropertyReference()->this_var());
-      VisitForValue(expr->obj()->AsSuperPropertyReference()->home_object());
-      Node* home_object = environment()->Pop();
-      Node* receiver = environment()->Pop();
-      Handle<Name> name = expr->key()->AsLiteral()->AsPropertyName();
-      value = BuildNamedSuperLoad(receiver, home_object, name, pair);
-      PrepareFrameState(value, expr->LoadId(), OutputFrameStateCombine::Push());
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
       break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      VisitForValue(expr->obj()->AsSuperPropertyReference()->this_var());
-      VisitForValue(expr->obj()->AsSuperPropertyReference()->home_object());
-      VisitForValue(expr->key());
-      Node* key = environment()->Pop();
-      Node* home_object = environment()->Pop();
-      Node* receiver = environment()->Pop();
-      value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
-      PrepareFrameState(value, expr->LoadId(), OutputFrameStateCombine::Push());
-      break;
-    }
   }
   ast_context()->ProduceValue(expr, value);
 }
@@ -2272,140 +1617,70 @@
 void AstGraphBuilder::VisitCall(Call* expr) {
   Expression* callee = expr->expression();
   Call::CallType call_type = expr->GetCallType();
+  CHECK(!expr->is_possibly_eval());
 
   // Prepare the callee and the receiver to the function call. This depends on
   // the semantics of the underlying call type.
   ConvertReceiverMode receiver_hint = ConvertReceiverMode::kAny;
   Node* receiver_value = nullptr;
   Node* callee_value = nullptr;
-  if (expr->is_possibly_eval()) {
-    if (callee->AsVariableProxy()->var()->IsLookupSlot()) {
-      Variable* variable = callee->AsVariableProxy()->var();
-      Node* name = jsgraph()->Constant(variable->name());
-      const Operator* op =
-          javascript()->CallRuntime(Runtime::kLoadLookupSlotForCall);
-      Node* pair = NewNode(op, name);
-      callee_value = NewNode(common()->Projection(0), pair);
-      receiver_value = NewNode(common()->Projection(1), pair);
-      PrepareFrameState(pair, expr->LookupId(),
-                        OutputFrameStateCombine::Push(2));
-    } else {
+  switch (call_type) {
+    case Call::GLOBAL_CALL: {
+      VariableProxy* proxy = callee->AsVariableProxy();
+      VectorSlotPair pair = CreateVectorSlotPair(proxy->VariableFeedbackSlot());
+      PrepareEagerCheckpoint(BeforeId(proxy));
+      callee_value = BuildVariableLoad(proxy->var(), expr->expression()->id(),
+                                       pair, OutputFrameStateCombine::Push());
+      receiver_hint = ConvertReceiverMode::kNullOrUndefined;
+      receiver_value = jsgraph()->UndefinedConstant();
+      break;
+    }
+    case Call::NAMED_PROPERTY_CALL: {
+      Property* property = callee->AsProperty();
+      VectorSlotPair feedback =
+          CreateVectorSlotPair(property->PropertyFeedbackSlot());
+      VisitForValue(property->obj());
+      Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
+      Node* object = environment()->Top();
+      callee_value = BuildNamedLoad(object, name, feedback);
+      PrepareFrameState(callee_value, property->LoadId(),
+                        OutputFrameStateCombine::Push());
+      // Note that a property call requires the receiver to be wrapped into
+      // an object for sloppy callees. However the receiver is guaranteed
+      // not to be null or undefined at this point.
+      receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
+      receiver_value = environment()->Pop();
+      break;
+    }
+    case Call::KEYED_PROPERTY_CALL: {
+      Property* property = callee->AsProperty();
+      VectorSlotPair feedback =
+          CreateVectorSlotPair(property->PropertyFeedbackSlot());
+      VisitForValue(property->obj());
+      VisitForValue(property->key());
+      Node* key = environment()->Pop();
+      Node* object = environment()->Top();
+      callee_value = BuildKeyedLoad(object, key, feedback);
+      PrepareFrameState(callee_value, property->LoadId(),
+                        OutputFrameStateCombine::Push());
+      // Note that a property call requires the receiver to be wrapped into
+      // an object for sloppy callees. However the receiver is guaranteed
+      // not to be null or undefined at this point.
+      receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
+      receiver_value = environment()->Pop();
+      break;
+    }
+    case Call::OTHER_CALL:
       VisitForValue(callee);
       callee_value = environment()->Pop();
       receiver_hint = ConvertReceiverMode::kNullOrUndefined;
       receiver_value = jsgraph()->UndefinedConstant();
-    }
-  } else {
-    switch (call_type) {
-      case Call::GLOBAL_CALL: {
-        VariableProxy* proxy = callee->AsVariableProxy();
-        VectorSlotPair pair =
-            CreateVectorSlotPair(proxy->VariableFeedbackSlot());
-        PrepareEagerCheckpoint(BeforeId(proxy));
-        callee_value = BuildVariableLoad(proxy->var(), expr->expression()->id(),
-                                         pair, OutputFrameStateCombine::Push());
-        receiver_hint = ConvertReceiverMode::kNullOrUndefined;
-        receiver_value = jsgraph()->UndefinedConstant();
-        break;
-      }
-      case Call::WITH_CALL: {
-        Variable* variable = callee->AsVariableProxy()->var();
-        Node* name = jsgraph()->Constant(variable->name());
-        const Operator* op =
-            javascript()->CallRuntime(Runtime::kLoadLookupSlotForCall);
-        Node* pair = NewNode(op, name);
-        callee_value = NewNode(common()->Projection(0), pair);
-        receiver_value = NewNode(common()->Projection(1), pair);
-        PrepareFrameState(pair, expr->LookupId(),
-                          OutputFrameStateCombine::Push(2));
-        break;
-      }
-      case Call::NAMED_PROPERTY_CALL: {
-        Property* property = callee->AsProperty();
-        VectorSlotPair feedback =
-            CreateVectorSlotPair(property->PropertyFeedbackSlot());
-        VisitForValue(property->obj());
-        Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
-        Node* object = environment()->Top();
-        callee_value = BuildNamedLoad(object, name, feedback);
-        PrepareFrameState(callee_value, property->LoadId(),
-                          OutputFrameStateCombine::Push());
-        // Note that a property call requires the receiver to be wrapped into
-        // an object for sloppy callees. However the receiver is guaranteed
-        // not to be null or undefined at this point.
-        receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
-        receiver_value = environment()->Pop();
-        break;
-      }
-      case Call::KEYED_PROPERTY_CALL: {
-        Property* property = callee->AsProperty();
-        VectorSlotPair feedback =
-            CreateVectorSlotPair(property->PropertyFeedbackSlot());
-        VisitForValue(property->obj());
-        VisitForValue(property->key());
-        Node* key = environment()->Pop();
-        Node* object = environment()->Top();
-        callee_value = BuildKeyedLoad(object, key, feedback);
-        PrepareFrameState(callee_value, property->LoadId(),
-                          OutputFrameStateCombine::Push());
-        // Note that a property call requires the receiver to be wrapped into
-        // an object for sloppy callees. However the receiver is guaranteed
-        // not to be null or undefined at this point.
-        receiver_hint = ConvertReceiverMode::kNotNullOrUndefined;
-        receiver_value = environment()->Pop();
-        break;
-      }
-      case Call::NAMED_SUPER_PROPERTY_CALL: {
-        Property* property = callee->AsProperty();
-        SuperPropertyReference* super_ref =
-            property->obj()->AsSuperPropertyReference();
-        VisitForValue(super_ref->home_object());
-        VisitForValue(super_ref->this_var());
-        Node* home = environment()->Peek(1);
-        Node* object = environment()->Top();
-        Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
-        callee_value =
-            BuildNamedSuperLoad(object, home, name, VectorSlotPair());
-        PrepareFrameState(callee_value, property->LoadId(),
-                          OutputFrameStateCombine::Push());
-        // Note that a property call requires the receiver to be wrapped into
-        // an object for sloppy callees. Since the receiver is not the target of
-        // the load, it could very well be null or undefined at this point.
-        receiver_value = environment()->Pop();
-        environment()->Drop(1);
-        break;
-      }
-      case Call::KEYED_SUPER_PROPERTY_CALL: {
-        Property* property = callee->AsProperty();
-        SuperPropertyReference* super_ref =
-            property->obj()->AsSuperPropertyReference();
-        VisitForValue(super_ref->home_object());
-        VisitForValue(super_ref->this_var());
-        environment()->Push(environment()->Top());    // Duplicate this_var.
-        environment()->Push(environment()->Peek(2));  // Duplicate home_obj.
-        VisitForValue(property->key());
-        Node* key = environment()->Pop();
-        Node* home = environment()->Pop();
-        Node* object = environment()->Pop();
-        callee_value = BuildKeyedSuperLoad(object, home, key, VectorSlotPair());
-        PrepareFrameState(callee_value, property->LoadId(),
-                          OutputFrameStateCombine::Push());
-        // Note that a property call requires the receiver to be wrapped into
-        // an object for sloppy callees. Since the receiver is not the target of
-        // the load, it could very well be null or undefined at this point.
-        receiver_value = environment()->Pop();
-        environment()->Drop(1);
-        break;
-      }
-      case Call::SUPER_CALL:
-        return VisitCallSuper(expr);
-      case Call::OTHER_CALL:
-        VisitForValue(callee);
-        callee_value = environment()->Pop();
-        receiver_hint = ConvertReceiverMode::kNullOrUndefined;
-        receiver_value = jsgraph()->UndefinedConstant();
-        break;
-    }
+      break;
+    case Call::NAMED_SUPER_PROPERTY_CALL:
+    case Call::KEYED_SUPER_PROPERTY_CALL:
+    case Call::SUPER_CALL:
+    case Call::WITH_CALL:
+      UNREACHABLE();
   }
 
   // The callee and the receiver both have to be pushed onto the operand stack
@@ -2417,41 +1692,13 @@
   ZoneList<Expression*>* args = expr->arguments();
   VisitForValues(args);
 
-  // Resolve callee for a potential direct eval call. This block will mutate the
-  // callee value pushed onto the environment.
-  if (expr->is_possibly_eval() && args->length() > 0) {
-    int arg_count = args->length();
-
-    // Extract callee and source string from the environment.
-    Node* callee = environment()->Peek(arg_count + 1);
-    Node* source = environment()->Peek(arg_count - 1);
-
-    // Create node to ask for help resolving potential eval call. This will
-    // provide a fully resolved callee to patch into the environment.
-    Node* function = GetFunctionClosure();
-    Node* language = jsgraph()->Constant(language_mode());
-    Node* eval_scope_position =
-        jsgraph()->Constant(current_scope()->start_position());
-    Node* eval_position = jsgraph()->Constant(expr->position());
-    const Operator* op =
-        javascript()->CallRuntime(Runtime::kResolvePossiblyDirectEval);
-    Node* new_callee = NewNode(op, callee, source, function, language,
-                               eval_scope_position, eval_position);
-    PrepareFrameState(new_callee, expr->EvalId(),
-                      OutputFrameStateCombine::PokeAt(arg_count + 1));
-
-    // Patch callee on the environment.
-    environment()->Poke(arg_count + 1, new_callee);
-  }
-
   // Create node to perform the function call.
   float const frequency = ComputeCallFrequency(expr->CallFeedbackICSlot());
   VectorSlotPair feedback = CreateVectorSlotPair(expr->CallFeedbackICSlot());
   const Operator* call =
-      javascript()->CallFunction(args->length() + 2, frequency, feedback,
-                                 receiver_hint, expr->tail_call_mode());
-  PrepareEagerCheckpoint(expr->is_possibly_eval() ? expr->EvalId()
-                                                  : expr->CallId());
+      javascript()->Call(args->length() + 2, frequency, feedback, receiver_hint,
+                         expr->tail_call_mode());
+  PrepareEagerCheckpoint(expr->CallId());
   Node* value = ProcessArguments(call, args->length() + 2);
   // The callee passed to the call, we just need to push something here to
   // satisfy the bailout location contract. The fullcodegen code will not
@@ -2463,34 +1710,6 @@
 }
 
 
-void AstGraphBuilder::VisitCallSuper(Call* expr) {
-  SuperCallReference* super = expr->expression()->AsSuperCallReference();
-  DCHECK_NOT_NULL(super);
-
-  // Prepare the callee to the super call.
-  VisitForValue(super->this_function_var());
-  Node* this_function = environment()->Pop();
-  const Operator* op =
-      javascript()->CallRuntime(Runtime::kInlineGetSuperConstructor, 1);
-  Node* super_function = NewNode(op, this_function);
-  environment()->Push(super_function);
-
-  // Evaluate all arguments to the super call.
-  ZoneList<Expression*>* args = expr->arguments();
-  VisitForValues(args);
-
-  // The new target is loaded from the {new.target} variable.
-  VisitForValue(super->new_target_var());
-
-  // Create node to perform the super call.
-  const Operator* call =
-      javascript()->CallConstruct(args->length() + 2, 0.0f, VectorSlotPair());
-  Node* value = ProcessArguments(call, args->length() + 2);
-  PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
-  ast_context()->ProduceValue(expr, value);
-}
-
-
 void AstGraphBuilder::VisitCallNew(CallNew* expr) {
   VisitForValue(expr->expression());
 
@@ -2505,7 +1724,7 @@
   float const frequency = ComputeCallFrequency(expr->CallNewFeedbackSlot());
   VectorSlotPair feedback = CreateVectorSlotPair(expr->CallNewFeedbackSlot());
   const Operator* call =
-      javascript()->CallConstruct(args->length() + 2, frequency, feedback);
+      javascript()->Construct(args->length() + 2, frequency, feedback);
   Node* value = ProcessArguments(call, args->length() + 2);
   PrepareFrameState(value, expr->ReturnId(), OutputFrameStateCombine::Push());
   ast_context()->ProduceValue(expr, value);
@@ -2526,7 +1745,7 @@
   VisitForValues(args);
 
   // Create node to perform the JS runtime call.
-  const Operator* call = javascript()->CallFunction(args->length() + 2);
+  const Operator* call = javascript()->Call(args->length() + 2);
   PrepareEagerCheckpoint(expr->CallId());
   Node* value = ProcessArguments(call, args->length() + 2);
   PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
@@ -2625,35 +1844,10 @@
       stack_depth = 2;
       break;
     }
-    case NAMED_SUPER_PROPERTY: {
-      VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
-      Node* home_object = environment()->Top();
-      Node* receiver = environment()->Peek(1);
-      Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
-      VectorSlotPair pair =
-          CreateVectorSlotPair(property->PropertyFeedbackSlot());
-      old_value = BuildNamedSuperLoad(receiver, home_object, name, pair);
-      PrepareFrameState(old_value, property->LoadId(),
-                        OutputFrameStateCombine::Push());
-      stack_depth = 2;
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
       break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      VisitForValue(property->obj()->AsSuperPropertyReference()->this_var());
-      VisitForValue(property->obj()->AsSuperPropertyReference()->home_object());
-      VisitForValue(property->key());
-      Node* key = environment()->Top();
-      Node* home_object = environment()->Peek(1);
-      Node* receiver = environment()->Peek(2);
-      VectorSlotPair pair =
-          CreateVectorSlotPair(property->PropertyFeedbackSlot());
-      old_value = BuildKeyedSuperLoad(receiver, home_object, key, pair);
-      PrepareFrameState(old_value, property->LoadId(),
-                        OutputFrameStateCombine::Push());
-      stack_depth = 3;
-      break;
-    }
   }
 
   // Convert old value into a number.
@@ -2708,24 +1902,10 @@
                         OutputFrameStateCombine::Push());
       break;
     }
-    case NAMED_SUPER_PROPERTY: {
-      Node* home_object = environment()->Pop();
-      Node* receiver = environment()->Pop();
-      Handle<Name> name = property->key()->AsLiteral()->AsPropertyName();
-      Node* store = BuildNamedSuperStore(receiver, home_object, name, value);
-      PrepareFrameState(store, expr->AssignmentId(),
-                        OutputFrameStateCombine::Push());
+    case NAMED_SUPER_PROPERTY:
+    case KEYED_SUPER_PROPERTY:
+      UNREACHABLE();
       break;
-    }
-    case KEYED_SUPER_PROPERTY: {
-      Node* key = environment()->Pop();
-      Node* home_object = environment()->Pop();
-      Node* receiver = environment()->Pop();
-      Node* store = BuildKeyedSuperStore(receiver, home_object, key, value);
-      PrepareFrameState(store, expr->AssignmentId(),
-                        OutputFrameStateCombine::Push());
-      break;
-    }
   }
 
   // Restore old value for postfix expressions.
@@ -2804,13 +1984,7 @@
     return VisitLiteralCompareNil(expr, sub_expr, jsgraph()->NullConstant());
   }
 
-  CompareOperationHint hint;
-  if (!type_hint_analysis_ ||
-      !type_hint_analysis_->GetCompareOperationHint(
-          expr->CompareOperationFeedbackId(), &hint)) {
-    hint = CompareOperationHint::kAny;
-  }
-
+  CompareOperationHint hint = CompareOperationHint::kAny;
   const Operator* op;
   switch (expr->op()) {
     case Token::EQ:
@@ -2868,6 +2042,10 @@
   UNREACHABLE();
 }
 
+void AstGraphBuilder::VisitGetIterator(GetIterator* expr) {
+  // GetIterator is supported only by going through Ignition first.
+  UNREACHABLE();
+}
 
 void AstGraphBuilder::VisitThisFunction(ThisFunction* expr) {
   Node* value = GetFunctionClosure();
@@ -2877,8 +2055,7 @@
 
 void AstGraphBuilder::VisitSuperPropertyReference(
     SuperPropertyReference* expr) {
-  Node* value = BuildThrowUnsupportedSuperError(expr->id());
-  ast_context()->ProduceValue(expr, value);
+  UNREACHABLE();
 }
 
 
@@ -2898,17 +2075,16 @@
   AstVisitor<AstGraphBuilder>::VisitDeclarations(declarations);
   if (globals()->empty()) return;
   int array_index = 0;
-  Handle<TypeFeedbackVector> feedback_vector(
-      info()->closure()->feedback_vector());
+  Handle<FeedbackVector> feedback_vector(info()->closure()->feedback_vector());
   Handle<FixedArray> data = isolate()->factory()->NewFixedArray(
       static_cast<int>(globals()->size()), TENURED);
   for (Handle<Object> obj : *globals()) data->set(array_index++, *obj);
   int encoded_flags = info()->GetDeclareGlobalsFlags();
   Node* flags = jsgraph()->Constant(encoded_flags);
-  Node* pairs = jsgraph()->Constant(data);
+  Node* decls = jsgraph()->Constant(data);
   Node* vector = jsgraph()->Constant(feedback_vector);
   const Operator* op = javascript()->CallRuntime(Runtime::kDeclareGlobals);
-  Node* call = NewNode(op, pairs, flags, vector);
+  Node* call = NewNode(op, decls, flags, vector);
   PrepareFrameState(call, BailoutId::Declarations());
   globals()->clear();
 }
@@ -2920,20 +2096,12 @@
 }
 
 
-void AstGraphBuilder::VisitInScope(Statement* stmt, Scope* s, Node* context) {
-  ContextScope scope(this, s, context);
-  DCHECK(s->declarations()->is_empty());
-  Visit(stmt);
-}
-
 void AstGraphBuilder::VisitIterationBody(IterationStatement* stmt,
                                          LoopBuilder* loop,
                                          BailoutId stack_check_id) {
   ControlScopeForIteration scope(this, stmt, loop);
-  if (FLAG_turbo_loop_stackcheck || !info()->shared_info()->asm_function()) {
-    Node* node = NewNode(javascript()->StackCheck());
-    PrepareFrameState(node, stack_check_id);
-  }
+  Node* node = NewNode(javascript()->StackCheck());
+  PrepareFrameState(node, stack_check_id);
   Visit(stmt->body());
 }
 
@@ -3063,9 +2231,7 @@
   return current_scope()->language_mode();
 }
 
-
-VectorSlotPair AstGraphBuilder::CreateVectorSlotPair(
-    FeedbackVectorSlot slot) const {
+VectorSlotPair AstGraphBuilder::CreateVectorSlotPair(FeedbackSlot slot) const {
   return VectorSlotPair(handle(info()->closure()->feedback_vector()), slot);
 }
 
@@ -3074,50 +2240,10 @@
   Visit(node->expression());
 }
 
-
-namespace {
-
-// Limit of context chain length to which inline check is possible.
-const int kMaxCheckDepth = 30;
-
-// Sentinel for {TryLoadDynamicVariable} disabling inline checks.
-const uint32_t kFullCheckRequired = -1;
-
-}  // namespace
-
-
-uint32_t AstGraphBuilder::ComputeBitsetForDynamicGlobal(Variable* variable) {
-  DCHECK_EQ(DYNAMIC_GLOBAL, variable->mode());
-  uint32_t check_depths = 0;
-  for (Scope* s = current_scope(); s != nullptr; s = s->outer_scope()) {
-    if (!s->NeedsContext()) continue;
-    if (!s->calls_sloppy_eval()) continue;
-    int depth = current_scope()->ContextChainLength(s);
-    if (depth > kMaxCheckDepth) return kFullCheckRequired;
-    check_depths |= 1 << depth;
-  }
-  return check_depths;
-}
-
-
-uint32_t AstGraphBuilder::ComputeBitsetForDynamicContext(Variable* variable) {
-  DCHECK_EQ(DYNAMIC_LOCAL, variable->mode());
-  uint32_t check_depths = 0;
-  for (Scope* s = current_scope(); s != nullptr; s = s->outer_scope()) {
-    if (!s->NeedsContext()) continue;
-    if (!s->calls_sloppy_eval() && s != variable->scope()) continue;
-    int depth = current_scope()->ContextChainLength(s);
-    if (depth > kMaxCheckDepth) return kFullCheckRequired;
-    check_depths |= 1 << depth;
-    if (s == variable->scope()) break;
-  }
-  return check_depths;
-}
-
-float AstGraphBuilder::ComputeCallFrequency(FeedbackVectorSlot slot) const {
+float AstGraphBuilder::ComputeCallFrequency(FeedbackSlot slot) const {
   if (slot.IsInvalid()) return 0.0f;
-  Handle<TypeFeedbackVector> feedback_vector(
-      info()->closure()->feedback_vector(), isolate());
+  Handle<FeedbackVector> feedback_vector(info()->closure()->feedback_vector(),
+                                         isolate());
   CallICNexus nexus(feedback_vector, slot);
   return nexus.ComputeCallFrequency() * invocation_frequency_;
 }
@@ -3147,7 +2273,8 @@
     Variable* variable = scope->receiver();
     DCHECK_EQ(0, scope->ContextChainLength(variable->scope()));
     const Operator* op = javascript()->StoreContext(0, variable->index());
-    NewNode(op, local_context, receiver);
+    Node* node = NewNode(op, receiver);
+    NodeProperties::ReplaceContextInput(node, local_context);
   }
 
   // Copy parameters into context if necessary.
@@ -3159,7 +2286,8 @@
     // Context variable (at bottom of the context chain).
     DCHECK_EQ(0, scope->ContextChainLength(variable->scope()));
     const Operator* op = javascript()->StoreContext(0, variable->index());
-    NewNode(op, local_context, parameter);
+    Node* node = NewNode(op, parameter);
+    NodeProperties::ReplaceContextInput(node, local_context);
   }
 
   return local_context;
@@ -3171,7 +2299,8 @@
 
   // Allocate a new local context.
   int slot_count = scope->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
-  const Operator* op = javascript()->CreateFunctionContext(slot_count);
+  const Operator* op =
+      javascript()->CreateFunctionContext(slot_count, scope->scope_type());
   Node* local_context = NewNode(op, GetFunctionClosure());
 
   return local_context;
@@ -3224,52 +2353,6 @@
   return object;
 }
 
-Node* AstGraphBuilder::BuildRestArgumentsArray(Variable* rest) {
-  if (rest == nullptr) return nullptr;
-
-  // Allocate and initialize a new arguments object.
-  CreateArgumentsType type = CreateArgumentsType::kRestParameter;
-  const Operator* op = javascript()->CreateArguments(type);
-  Node* object = NewNode(op, GetFunctionClosure());
-  PrepareFrameState(object, BailoutId::None());
-
-  // Assign the object to the {rest} variable. This should never lazy
-  // deopt, so it is fine to send invalid bailout id.
-  DCHECK(rest->IsContextSlot() || rest->IsStackAllocated());
-  BuildVariableAssignment(rest, object, Token::ASSIGN, VectorSlotPair(),
-                          BailoutId::None());
-  return object;
-}
-
-
-Node* AstGraphBuilder::BuildThisFunctionVariable(Variable* this_function_var) {
-  if (this_function_var == nullptr) return nullptr;
-
-  // Retrieve the closure we were called with.
-  Node* this_function = GetFunctionClosure();
-
-  // Assign the object to the {.this_function} variable. This should never lazy
-  // deopt, so it is fine to send invalid bailout id.
-  BuildVariableAssignment(this_function_var, this_function, Token::INIT,
-                          VectorSlotPair(), BailoutId::None());
-  return this_function;
-}
-
-
-Node* AstGraphBuilder::BuildNewTargetVariable(Variable* new_target_var) {
-  if (new_target_var == nullptr) return nullptr;
-
-  // Retrieve the new target we were called with.
-  Node* object = GetNewTarget();
-
-  // Assign the object to the {new.target} variable. This should never lazy
-  // deopt, so it is fine to send invalid bailout id.
-  BuildVariableAssignment(new_target_var, object, Token::INIT, VectorSlotPair(),
-                          BailoutId::None());
-  return object;
-}
-
-
 Node* AstGraphBuilder::BuildHoleCheckThenThrow(Node* value, Variable* variable,
                                                Node* not_hole,
                                                BailoutId bailout_id) {
@@ -3305,25 +2388,6 @@
   return environment()->Pop();
 }
 
-
-Node* AstGraphBuilder::BuildThrowIfStaticPrototype(Node* name,
-                                                   BailoutId bailout_id) {
-  IfBuilder prototype_check(this);
-  Node* prototype_string =
-      jsgraph()->Constant(isolate()->factory()->prototype_string());
-  Node* check = NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
-                        name, prototype_string);
-  prototype_check.If(check);
-  prototype_check.Then();
-  Node* error = BuildThrowStaticPrototypeError(bailout_id);
-  environment()->Push(error);
-  prototype_check.Else();
-  environment()->Push(name);
-  prototype_check.End();
-  return environment()->Pop();
-}
-
-
 Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
                                          BailoutId bailout_id,
                                          const VectorSlotPair& feedback,
@@ -3363,7 +2427,7 @@
                        info()->is_function_context_specializing();
       const Operator* op =
           javascript()->LoadContext(depth, variable->index(), immutable);
-      Node* value = NewNode(op, current_context());
+      Node* value = NewNode(op);
       // TODO(titzer): initialization checks are redundant for already
       // initialized immutable context loads, but only specialization knows.
       // Maybe specializer should be a parameter to the graph builder?
@@ -3373,17 +2437,7 @@
       }
       return value;
     }
-    case VariableLocation::LOOKUP: {
-      // Dynamic lookup of context variable (anywhere in the chain).
-      Handle<String> name = variable->name();
-      if (Node* node = TryLoadDynamicVariable(variable, name, bailout_id,
-                                              feedback, combine, typeof_mode)) {
-        return node;
-      }
-      Node* value = BuildDynamicLoad(name, typeof_mode);
-      PrepareFrameState(value, bailout_id, combine);
-      return value;
-    }
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -3411,15 +2465,7 @@
       // Local var, const, or let variable or context variable.
       return jsgraph()->BooleanConstant(variable->is_this());
     }
-    case VariableLocation::LOOKUP: {
-      // Dynamic lookup of context variable (anywhere in the chain).
-      Node* name = jsgraph()->Constant(variable->name());
-      const Operator* op =
-          javascript()->CallRuntime(Runtime::kDeleteLookupSlot);
-      Node* result = NewNode(op, name);
-      PrepareFrameState(result, bailout_id, combine);
-      return result;
-    }
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -3498,7 +2544,7 @@
         // Perform an initialization check for let declared variables.
         const Operator* op =
             javascript()->LoadContext(depth, variable->index(), false);
-        Node* current = NewNode(op, current_context());
+        Node* current = NewNode(op);
         value = BuildHoleCheckThenThrow(current, variable, value, bailout_id);
       } else if (mode == CONST && op == Token::INIT) {
         // Perform an initialization check for const {this} variables.
@@ -3507,7 +2553,7 @@
         if (variable->is_this()) {
           const Operator* op =
               javascript()->LoadContext(depth, variable->index(), false);
-          Node* current = NewNode(op, current_context());
+          Node* current = NewNode(op);
           value = BuildHoleCheckElseThrow(current, variable, value, bailout_id);
         }
       } else if (mode == CONST && op != Token::INIT &&
@@ -3524,22 +2570,16 @@
         if (variable->binding_needs_init()) {
           const Operator* op =
               javascript()->LoadContext(depth, variable->index(), false);
-          Node* current = NewNode(op, current_context());
+          Node* current = NewNode(op);
           BuildHoleCheckThenThrow(current, variable, value, bailout_id);
         }
         // Assignment to const is exception in all modes.
         return BuildThrowConstAssignError(bailout_id);
       }
       const Operator* op = javascript()->StoreContext(depth, variable->index());
-      return NewNode(op, current_context(), value);
+      return NewNode(op, value);
     }
-    case VariableLocation::LOOKUP: {
-      // Dynamic lookup of context variable (anywhere in the chain).
-      Handle<Name> name = variable->name();
-      Node* store = BuildDynamicStore(name, value);
-      PrepareFrameState(store, bailout_id, combine);
-      return store;
-    }
+    case VariableLocation::LOOKUP:
     case VariableLocation::MODULE:
       UNREACHABLE();
   }
@@ -3551,7 +2591,7 @@
 Node* AstGraphBuilder::BuildKeyedLoad(Node* object, Node* key,
                                       const VectorSlotPair& feedback) {
   const Operator* op = javascript()->LoadProperty(feedback);
-  Node* node = NewNode(op, object, key, GetFunctionClosure());
+  Node* node = NewNode(op, object, key);
   return node;
 }
 
@@ -3559,15 +2599,17 @@
 Node* AstGraphBuilder::BuildNamedLoad(Node* object, Handle<Name> name,
                                       const VectorSlotPair& feedback) {
   const Operator* op = javascript()->LoadNamed(name, feedback);
-  Node* node = NewNode(op, object, GetFunctionClosure());
+  Node* node = NewNode(op, object);
   return node;
 }
 
 
 Node* AstGraphBuilder::BuildKeyedStore(Node* object, Node* key, Node* value,
                                        const VectorSlotPair& feedback) {
+  DCHECK_EQ(feedback.vector()->GetLanguageMode(feedback.slot()),
+            language_mode());
   const Operator* op = javascript()->StoreProperty(language_mode(), feedback);
-  Node* node = NewNode(op, object, key, value, GetFunctionClosure());
+  Node* node = NewNode(op, object, key, value);
   return node;
 }
 
@@ -3575,60 +2617,30 @@
 Node* AstGraphBuilder::BuildNamedStore(Node* object, Handle<Name> name,
                                        Node* value,
                                        const VectorSlotPair& feedback) {
+  DCHECK_EQ(feedback.vector()->GetLanguageMode(feedback.slot()),
+            language_mode());
   const Operator* op =
       javascript()->StoreNamed(language_mode(), name, feedback);
-  Node* node = NewNode(op, object, value, GetFunctionClosure());
+  Node* node = NewNode(op, object, value);
   return node;
 }
 
-
-Node* AstGraphBuilder::BuildNamedSuperLoad(Node* receiver, Node* home_object,
-                                           Handle<Name> name,
-                                           const VectorSlotPair& feedback) {
-  Node* name_node = jsgraph()->Constant(name);
-  const Operator* op = javascript()->CallRuntime(Runtime::kLoadFromSuper);
-  Node* node = NewNode(op, receiver, home_object, name_node);
+Node* AstGraphBuilder::BuildNamedStoreOwn(Node* object, Handle<Name> name,
+                                          Node* value,
+                                          const VectorSlotPair& feedback) {
+  DCHECK_EQ(FeedbackSlotKind::kStoreOwnNamed,
+            feedback.vector()->GetKind(feedback.slot()));
+  const Operator* op = javascript()->StoreNamedOwn(name, feedback);
+  Node* node = NewNode(op, object, value);
   return node;
 }
 
-
-Node* AstGraphBuilder::BuildKeyedSuperLoad(Node* receiver, Node* home_object,
-                                           Node* key,
-                                           const VectorSlotPair& feedback) {
-  const Operator* op = javascript()->CallRuntime(Runtime::kLoadKeyedFromSuper);
-  Node* node = NewNode(op, receiver, home_object, key);
-  return node;
-}
-
-
-Node* AstGraphBuilder::BuildKeyedSuperStore(Node* receiver, Node* home_object,
-                                            Node* key, Node* value) {
-  Runtime::FunctionId function_id = is_strict(language_mode())
-                                        ? Runtime::kStoreKeyedToSuper_Strict
-                                        : Runtime::kStoreKeyedToSuper_Sloppy;
-  const Operator* op = javascript()->CallRuntime(function_id, 4);
-  Node* node = NewNode(op, receiver, home_object, key, value);
-  return node;
-}
-
-
-Node* AstGraphBuilder::BuildNamedSuperStore(Node* receiver, Node* home_object,
-                                            Handle<Name> name, Node* value) {
-  Node* name_node = jsgraph()->Constant(name);
-  Runtime::FunctionId function_id = is_strict(language_mode())
-                                        ? Runtime::kStoreToSuper_Strict
-                                        : Runtime::kStoreToSuper_Sloppy;
-  const Operator* op = javascript()->CallRuntime(function_id, 4);
-  Node* node = NewNode(op, receiver, home_object, name_node, value);
-  return node;
-}
-
-
 Node* AstGraphBuilder::BuildGlobalLoad(Handle<Name> name,
                                        const VectorSlotPair& feedback,
                                        TypeofMode typeof_mode) {
+  DCHECK_EQ(feedback.vector()->GetTypeofMode(feedback.slot()), typeof_mode);
   const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
-  Node* node = NewNode(op, GetFunctionClosure());
+  Node* node = NewNode(op);
   return node;
 }
 
@@ -3637,33 +2649,10 @@
                                         const VectorSlotPair& feedback) {
   const Operator* op =
       javascript()->StoreGlobal(language_mode(), name, feedback);
-  Node* node = NewNode(op, value, GetFunctionClosure());
+  Node* node = NewNode(op, value);
   return node;
 }
 
-
-Node* AstGraphBuilder::BuildDynamicLoad(Handle<Name> name,
-                                        TypeofMode typeof_mode) {
-  Node* name_node = jsgraph()->Constant(name);
-  const Operator* op =
-      javascript()->CallRuntime(typeof_mode == TypeofMode::NOT_INSIDE_TYPEOF
-                                    ? Runtime::kLoadLookupSlot
-                                    : Runtime::kLoadLookupSlotInsideTypeof);
-  Node* node = NewNode(op, name_node);
-  return node;
-}
-
-
-Node* AstGraphBuilder::BuildDynamicStore(Handle<Name> name, Node* value) {
-  Node* name_node = jsgraph()->Constant(name);
-  const Operator* op = javascript()->CallRuntime(
-      is_strict(language_mode()) ? Runtime::kStoreLookupSlot_Strict
-                                 : Runtime::kStoreLookupSlot_Sloppy);
-  Node* node = NewNode(op, name_node, value);
-  return node;
-}
-
-
 Node* AstGraphBuilder::BuildLoadGlobalObject() {
   return BuildLoadNativeContextField(Context::EXTENSION_INDEX);
 }
@@ -3672,30 +2661,20 @@
 Node* AstGraphBuilder::BuildLoadNativeContextField(int index) {
   const Operator* op =
       javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true);
-  Node* native_context = NewNode(op, current_context());
-  return NewNode(javascript()->LoadContext(0, index, true), native_context);
+  Node* native_context = NewNode(op);
+  Node* result = NewNode(javascript()->LoadContext(0, index, true));
+  NodeProperties::ReplaceContextInput(result, native_context);
+  return result;
 }
 
 
 Node* AstGraphBuilder::BuildToBoolean(Node* input, TypeFeedbackId feedback_id) {
   if (Node* node = TryFastToBoolean(input)) return node;
-  ToBooleanHints hints;
-  if (!type_hint_analysis_ ||
-      !type_hint_analysis_->GetToBooleanHints(feedback_id, &hints)) {
-    hints = ToBooleanHint::kAny;
-  }
+  ToBooleanHints hints = ToBooleanHint::kAny;
   return NewNode(javascript()->ToBoolean(hints), input);
 }
 
 
-Node* AstGraphBuilder::BuildToName(Node* input, BailoutId bailout_id) {
-  if (Node* node = TryFastToName(input)) return node;
-  Node* name = NewNode(javascript()->ToName(), input);
-  PrepareFrameState(name, bailout_id, OutputFrameStateCombine::Push());
-  return name;
-}
-
-
 Node* AstGraphBuilder::BuildToObject(Node* input, BailoutId bailout_id) {
   Node* object = NewNode(javascript()->ToObject(), input);
   PrepareFrameState(object, bailout_id, OutputFrameStateCombine::Push());
@@ -3750,28 +2729,6 @@
 }
 
 
-Node* AstGraphBuilder::BuildThrowStaticPrototypeError(BailoutId bailout_id) {
-  const Operator* op =
-      javascript()->CallRuntime(Runtime::kThrowStaticPrototypeError);
-  Node* call = NewNode(op);
-  PrepareFrameState(call, bailout_id);
-  Node* control = NewNode(common()->Throw(), call);
-  UpdateControlDependencyToLeaveFunction(control);
-  return call;
-}
-
-
-Node* AstGraphBuilder::BuildThrowUnsupportedSuperError(BailoutId bailout_id) {
-  const Operator* op =
-      javascript()->CallRuntime(Runtime::kThrowUnsupportedSuperError);
-  Node* call = NewNode(op);
-  PrepareFrameState(call, bailout_id);
-  Node* control = NewNode(common()->Throw(), call);
-  UpdateControlDependencyToLeaveFunction(control);
-  return call;
-}
-
-
 Node* AstGraphBuilder::BuildReturn(Node* return_value) {
   // Emit tracing call if requested to do so.
   if (FLAG_trace) {
@@ -3796,44 +2753,40 @@
 Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op,
                                      TypeFeedbackId feedback_id) {
   const Operator* js_op;
-  BinaryOperationHint hint;
-  if (!type_hint_analysis_ ||
-      !type_hint_analysis_->GetBinaryOperationHint(feedback_id, &hint)) {
-    hint = BinaryOperationHint::kAny;
-  }
+  BinaryOperationHint hint = BinaryOperationHint::kAny;
   switch (op) {
     case Token::BIT_OR:
-      js_op = javascript()->BitwiseOr(hint);
+      js_op = javascript()->BitwiseOr();
       break;
     case Token::BIT_AND:
-      js_op = javascript()->BitwiseAnd(hint);
+      js_op = javascript()->BitwiseAnd();
       break;
     case Token::BIT_XOR:
-      js_op = javascript()->BitwiseXor(hint);
+      js_op = javascript()->BitwiseXor();
       break;
     case Token::SHL:
-      js_op = javascript()->ShiftLeft(hint);
+      js_op = javascript()->ShiftLeft();
       break;
     case Token::SAR:
-      js_op = javascript()->ShiftRight(hint);
+      js_op = javascript()->ShiftRight();
       break;
     case Token::SHR:
-      js_op = javascript()->ShiftRightLogical(hint);
+      js_op = javascript()->ShiftRightLogical();
       break;
     case Token::ADD:
       js_op = javascript()->Add(hint);
       break;
     case Token::SUB:
-      js_op = javascript()->Subtract(hint);
+      js_op = javascript()->Subtract();
       break;
     case Token::MUL:
-      js_op = javascript()->Multiply(hint);
+      js_op = javascript()->Multiply();
       break;
     case Token::DIV:
-      js_op = javascript()->Divide(hint);
+      js_op = javascript()->Divide();
       break;
     case Token::MOD:
-      js_op = javascript()->Modulus(hint);
+      js_op = javascript()->Modulus();
       break;
     default:
       UNREACHABLE();
@@ -3850,109 +2803,6 @@
   return nullptr;
 }
 
-Node* AstGraphBuilder::TryLoadDynamicVariable(Variable* variable,
-                                              Handle<String> name,
-                                              BailoutId bailout_id,
-                                              const VectorSlotPair& feedback,
-                                              OutputFrameStateCombine combine,
-                                              TypeofMode typeof_mode) {
-  VariableMode mode = variable->mode();
-
-  if (mode == DYNAMIC_GLOBAL) {
-    uint32_t bitset = ComputeBitsetForDynamicGlobal(variable);
-    if (bitset == kFullCheckRequired) return nullptr;
-
-    // We are using two blocks to model fast and slow cases.
-    BlockBuilder fast_block(this);
-    BlockBuilder slow_block(this);
-    environment()->Push(jsgraph()->TheHoleConstant());
-    slow_block.BeginBlock();
-    environment()->Pop();
-    fast_block.BeginBlock();
-
-    // Perform checks whether the fast mode applies, by looking for any
-    // extension object which might shadow the optimistic declaration.
-    for (int depth = 0; bitset != 0; bitset >>= 1, depth++) {
-      if ((bitset & 1) == 0) continue;
-      Node* load = NewNode(
-          javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
-          current_context());
-      Node* check =
-          NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), load,
-                  jsgraph()->TheHoleConstant());
-      fast_block.BreakUnless(check, BranchHint::kTrue);
-    }
-
-    // Fast case, because variable is not shadowed.
-    if (Node* constant = TryLoadGlobalConstant(name)) {
-      environment()->Push(constant);
-    } else {
-      // Perform global slot load.
-      Node* fast = BuildGlobalLoad(name, feedback, typeof_mode);
-      PrepareFrameState(fast, bailout_id, combine);
-      environment()->Push(fast);
-    }
-    slow_block.Break();
-    environment()->Pop();
-    fast_block.EndBlock();
-
-    // Slow case, because variable potentially shadowed. Perform dynamic lookup.
-    Node* slow = BuildDynamicLoad(name, typeof_mode);
-    PrepareFrameState(slow, bailout_id, combine);
-    environment()->Push(slow);
-    slow_block.EndBlock();
-
-    return environment()->Pop();
-  }
-
-  if (mode == DYNAMIC_LOCAL) {
-    uint32_t bitset = ComputeBitsetForDynamicContext(variable);
-    if (bitset == kFullCheckRequired) return nullptr;
-
-    // We are using two blocks to model fast and slow cases.
-    BlockBuilder fast_block(this);
-    BlockBuilder slow_block(this);
-    environment()->Push(jsgraph()->TheHoleConstant());
-    slow_block.BeginBlock();
-    environment()->Pop();
-    fast_block.BeginBlock();
-
-    // Perform checks whether the fast mode applies, by looking for any
-    // extension object which might shadow the optimistic declaration.
-    for (int depth = 0; bitset != 0; bitset >>= 1, depth++) {
-      if ((bitset & 1) == 0) continue;
-      Node* load = NewNode(
-          javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
-          current_context());
-      Node* check =
-          NewNode(javascript()->StrictEqual(CompareOperationHint::kAny), load,
-                  jsgraph()->TheHoleConstant());
-      fast_block.BreakUnless(check, BranchHint::kTrue);
-    }
-
-    // Fast case, because variable is not shadowed. Perform context slot load.
-    Variable* local = variable->local_if_not_shadowed();
-    DCHECK(local->location() == VariableLocation::CONTEXT);  // Must be context.
-    Node* fast =
-        BuildVariableLoad(local, bailout_id, feedback, combine, typeof_mode);
-    environment()->Push(fast);
-    slow_block.Break();
-    environment()->Pop();
-    fast_block.EndBlock();
-
-    // Slow case, because variable potentially shadowed. Perform dynamic lookup.
-    Node* slow = BuildDynamicLoad(name, typeof_mode);
-    PrepareFrameState(slow, bailout_id, combine);
-    environment()->Push(slow);
-    slow_block.EndBlock();
-
-    return environment()->Pop();
-  }
-
-  return nullptr;
-}
-
-
 Node* AstGraphBuilder::TryFastToBoolean(Node* input) {
   switch (input->opcode()) {
     case IrOpcode::kNumberConstant: {
@@ -3983,24 +2833,6 @@
 }
 
 
-Node* AstGraphBuilder::TryFastToName(Node* input) {
-  switch (input->opcode()) {
-    case IrOpcode::kHeapConstant: {
-      Handle<HeapObject> object = HeapObjectMatcher(input).Value();
-      if (object->IsName()) return input;
-      break;
-    }
-    case IrOpcode::kJSToString:
-    case IrOpcode::kJSToName:
-    case IrOpcode::kJSTypeOf:
-      return input;
-    default:
-      break;
-  }
-  return nullptr;
-}
-
-
 bool AstGraphBuilder::CheckOsrEntry(IterationStatement* stmt) {
   if (info()->osr_ast_id() == stmt->OsrEntryId()) {
     DCHECK_EQ(-1, info()->osr_expr_stack_height());
@@ -4073,7 +2905,6 @@
   if (!has_context && !has_frame_state && !has_control && !has_effect) {
     result = graph()->NewNode(op, value_input_count, value_inputs, incomplete);
   } else {
-    bool inside_try_scope = try_nesting_level_ > 0;
     int input_count_with_deps = value_input_count;
     if (has_context) ++input_count_with_deps;
     if (has_frame_state) ++input_count_with_deps;
@@ -4107,18 +2938,6 @@
       if (result->op()->EffectOutputCount() > 0) {
         environment_->UpdateEffectDependency(result);
       }
-      // Add implicit exception continuation for throwing nodes.
-      if (!result->op()->HasProperty(Operator::kNoThrow) && inside_try_scope) {
-        // Copy the environment for the success continuation.
-        Environment* success_env = environment()->CopyForConditional();
-        const Operator* op = common()->IfException();
-        Node* effect = environment()->GetEffectDependency();
-        Node* on_exception = graph()->NewNode(op, effect, result);
-        environment_->UpdateControlDependency(on_exception);
-        environment_->UpdateEffectDependency(on_exception);
-        execution_control()->ThrowValue(on_exception);
-        set_environment(success_env);
-      }
       // Add implicit success continuation for throwing nodes.
       if (!result->op()->HasProperty(Operator::kNoThrow)) {
         const Operator* op = common()->IfSuccess();
@@ -4244,8 +3063,7 @@
   Node* osr_context = effect = contexts()->back();
   int last = static_cast<int>(contexts()->size() - 1);
   for (int i = last - 1; i >= 0; i--) {
-    osr_context = effect =
-        graph->NewNode(load_op, osr_context, osr_context, effect);
+    osr_context = effect = graph->NewNode(load_op, osr_context, effect);
     contexts()->at(i) = osr_context;
   }
   UpdateEffectDependency(effect);
@@ -4364,10 +3182,9 @@
 AstGraphBuilderWithPositions::AstGraphBuilderWithPositions(
     Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
     float invocation_frequency, LoopAssignmentAnalysis* loop_assignment,
-    TypeHintAnalysis* type_hint_analysis, SourcePositionTable* source_positions,
-    int inlining_id)
+    SourcePositionTable* source_positions, int inlining_id)
     : AstGraphBuilder(local_zone, info, jsgraph, invocation_frequency,
-                      loop_assignment, type_hint_analysis),
+                      loop_assignment),
       source_positions_(source_positions),
       start_position_(info->shared_info()->start_position(), inlining_id) {}
 
diff --git a/src/compiler/ast-graph-builder.h b/src/compiler/ast-graph-builder.h
index 2013f50..4fd3f35 100644
--- a/src/compiler/ast-graph-builder.h
+++ b/src/compiler/ast-graph-builder.h
@@ -26,7 +26,6 @@
 class LoopAssignmentAnalysis;
 class LoopBuilder;
 class Node;
-class TypeHintAnalysis;
 
 
 // The AstGraphBuilder produces a high-level IR graph, based on an
@@ -39,8 +38,7 @@
  public:
   AstGraphBuilder(Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
                   float invocation_frequency,
-                  LoopAssignmentAnalysis* loop_assignment = nullptr,
-                  TypeHintAnalysis* type_hint_analysis = nullptr);
+                  LoopAssignmentAnalysis* loop_assignment = nullptr);
   virtual ~AstGraphBuilder() {}
 
   // Creates a graph by visiting the entire AST.
@@ -73,8 +71,6 @@
   class ControlScope;
   class ControlScopeForBreakable;
   class ControlScopeForIteration;
-  class ControlScopeForCatch;
-  class ControlScopeForFinally;
   class Environment;
   friend class ControlBuilder;
 
@@ -98,10 +94,6 @@
   // Nodes representing values in the activation record.
   SetOncePointer<Node> function_closure_;
   SetOncePointer<Node> function_context_;
-  SetOncePointer<Node> new_target_;
-
-  // Tracks how many try-blocks are currently entered.
-  int try_nesting_level_;
 
   // Temporary storage for building node input lists.
   int input_buffer_size_;
@@ -119,9 +111,6 @@
   // Result of loop assignment analysis performed before graph creation.
   LoopAssignmentAnalysis* loop_assignment_analysis_;
 
-  // Result of type hint analysis performed before graph creation.
-  TypeHintAnalysis* type_hint_analysis_;
-
   // Cache for StateValues nodes for frame states.
   StateValuesCache state_values_cache_;
 
@@ -171,9 +160,6 @@
   // Get or create the node that represents the incoming function context.
   Node* GetFunctionContext();
 
-  // Get or create the node that represents the incoming new target value.
-  Node* GetNewTarget();
-
   // Get or create the node that represents the empty frame state.
   Node* GetEmptyFrameState();
 
@@ -260,15 +246,10 @@
   Node** EnsureInputBufferSize(int size);
 
   // Named and keyed loads require a VectorSlotPair for successful lowering.
-  VectorSlotPair CreateVectorSlotPair(FeedbackVectorSlot slot) const;
+  VectorSlotPair CreateVectorSlotPair(FeedbackSlot slot) const;
 
-  // Determine which contexts need to be checked for extension objects that
-  // might shadow the optimistic declaration of dynamic lookup variables.
-  uint32_t ComputeBitsetForDynamicGlobal(Variable* variable);
-  uint32_t ComputeBitsetForDynamicContext(Variable* variable);
-
-  // Computes the frequency for JSCallFunction and JSCallConstruct nodes.
-  float ComputeCallFrequency(FeedbackVectorSlot slot) const;
+  // Computes the frequency for JSCall and JSConstruct nodes.
+  float ComputeCallFrequency(FeedbackSlot slot) const;
 
   // ===========================================================================
   // The following build methods all generate graph fragments and return one
@@ -284,15 +265,6 @@
   // Builder to create an arguments object if it is used.
   Node* BuildArgumentsObject(Variable* arguments);
 
-  // Builder to create an array of rest parameters if used.
-  Node* BuildRestArgumentsArray(Variable* rest);
-
-  // Builder that assigns to the {.this_function} internal variable if needed.
-  Node* BuildThisFunctionVariable(Variable* this_function_var);
-
-  // Builder that assigns to the {new.target} internal variable if needed.
-  Node* BuildNewTargetVariable(Variable* new_target_var);
-
   // Builders for variable load and assignment.
   Node* BuildVariableAssignment(Variable* variable, Node* value,
                                 Token::Value op, const VectorSlotPair& slot,
@@ -315,16 +287,8 @@
                         const VectorSlotPair& feedback);
   Node* BuildNamedStore(Node* receiver, Handle<Name> name, Node* value,
                         const VectorSlotPair& feedback);
-
-  // Builders for super property loads and stores.
-  Node* BuildKeyedSuperStore(Node* receiver, Node* home_object, Node* key,
-                             Node* value);
-  Node* BuildNamedSuperStore(Node* receiver, Node* home_object,
-                             Handle<Name> name, Node* value);
-  Node* BuildNamedSuperLoad(Node* receiver, Node* home_object,
-                            Handle<Name> name, const VectorSlotPair& feedback);
-  Node* BuildKeyedSuperLoad(Node* receiver, Node* home_object, Node* key,
-                            const VectorSlotPair& feedback);
+  Node* BuildNamedStoreOwn(Node* receiver, Handle<Name> name, Node* value,
+                           const VectorSlotPair& feedback);
 
   // Builders for global variable loads and stores.
   Node* BuildGlobalLoad(Handle<Name> name, const VectorSlotPair& feedback,
@@ -332,17 +296,12 @@
   Node* BuildGlobalStore(Handle<Name> name, Node* value,
                          const VectorSlotPair& feedback);
 
-  // Builders for dynamic variable loads and stores.
-  Node* BuildDynamicLoad(Handle<Name> name, TypeofMode typeof_mode);
-  Node* BuildDynamicStore(Handle<Name> name, Node* value);
-
   // Builders for accessing the function context.
   Node* BuildLoadGlobalObject();
   Node* BuildLoadNativeContextField(int index);
 
   // Builders for automatic type conversion.
   Node* BuildToBoolean(Node* input, TypeFeedbackId feedback_id);
-  Node* BuildToName(Node* input, BailoutId bailout_id);
   Node* BuildToObject(Node* input, BailoutId bailout_id);
 
   // Builder for adding the [[HomeObject]] to a value if the value came from a
@@ -354,8 +313,6 @@
   Node* BuildThrowError(Node* exception, BailoutId bailout_id);
   Node* BuildThrowReferenceError(Variable* var, BailoutId bailout_id);
   Node* BuildThrowConstAssignError(BailoutId bailout_id);
-  Node* BuildThrowStaticPrototypeError(BailoutId bailout_id);
-  Node* BuildThrowUnsupportedSuperError(BailoutId bailout_id);
 
   // Builders for dynamic hole-checks at runtime.
   Node* BuildHoleCheckThenThrow(Node* value, Variable* var, Node* not_hole,
@@ -363,9 +320,6 @@
   Node* BuildHoleCheckElseThrow(Node* value, Variable* var, Node* for_hole,
                                 BailoutId bailout_id);
 
-  // Builders for conditional errors.
-  Node* BuildThrowIfStaticPrototype(Node* name, BailoutId bailout_id);
-
   // Builders for non-local control flow.
   Node* BuildReturn(Node* return_value);
   Node* BuildThrow(Node* exception_value);
@@ -387,17 +341,8 @@
   // Optimization for variable load from global object.
   Node* TryLoadGlobalConstant(Handle<Name> name);
 
-  // Optimization for variable load of dynamic lookup slot that is most likely
-  // to resolve to a global slot or context slot (inferred from scope chain).
-  Node* TryLoadDynamicVariable(Variable* variable, Handle<String> name,
-                               BailoutId bailout_id,
-                               const VectorSlotPair& feedback,
-                               OutputFrameStateCombine combine,
-                               TypeofMode typeof_mode);
-
   // Optimizations for automatic type conversion.
   Node* TryFastToBoolean(Node* input);
-  Node* TryFastToName(Node* input);
 
   // ===========================================================================
   // The following visitation methods all recursively visit a subtree of the
@@ -408,7 +353,6 @@
 
   // Visit statements.
   void VisitIfNotNull(Statement* stmt);
-  void VisitInScope(Statement* stmt, Scope* scope, Node* context);
 
   // Visit expressions.
   void Visit(Expression* expr);
@@ -449,11 +393,6 @@
   void VisitLiteralCompareTypeof(CompareOperation* expr, Expression* sub_expr,
                                  Handle<String> check);
 
-  // Dispatched from VisitForInStatement.
-  void VisitForInAssignment(Expression* expr, Node* value,
-                            const VectorSlotPair& feedback,
-                            BailoutId bailout_id);
-
   // Dispatched from VisitObjectLiteral.
   void VisitObjectLiteralAccessor(Node* home_object,
                                   ObjectLiteralProperty* property);
@@ -622,7 +561,6 @@
   AstGraphBuilderWithPositions(Zone* local_zone, CompilationInfo* info,
                                JSGraph* jsgraph, float invocation_frequency,
                                LoopAssignmentAnalysis* loop_assignment,
-                               TypeHintAnalysis* type_hint_analysis,
                                SourcePositionTable* source_positions,
                                int inlining_id = SourcePosition::kNotInlined);
 
diff --git a/src/compiler/ast-loop-assignment-analyzer.cc b/src/compiler/ast-loop-assignment-analyzer.cc
index 82eaeb2..8239e3a 100644
--- a/src/compiler/ast-loop-assignment-analyzer.cc
+++ b/src/compiler/ast-loop-assignment-analyzer.cc
@@ -5,6 +5,7 @@
 #include "src/compiler/ast-loop-assignment-analyzer.h"
 #include "src/ast/scopes.h"
 #include "src/compilation-info.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -201,6 +202,7 @@
 
 void ALAA::VisitEmptyParentheses(EmptyParentheses* e) { UNREACHABLE(); }
 
+void ALAA::VisitGetIterator(GetIterator* e) { UNREACHABLE(); }
 
 void ALAA::VisitCaseClause(CaseClause* cc) {
   if (!cc->is_default()) Visit(cc->label());
diff --git a/src/compiler/branch-elimination.cc b/src/compiler/branch-elimination.cc
index 9b36eb1..2d9a084 100644
--- a/src/compiler/branch-elimination.cc
+++ b/src/compiler/branch-elimination.cc
@@ -18,7 +18,9 @@
       jsgraph_(js_graph),
       node_conditions_(zone, js_graph->graph()->NodeCount()),
       zone_(zone),
-      dead_(js_graph->graph()->NewNode(js_graph->common()->Dead())) {}
+      dead_(js_graph->graph()->NewNode(js_graph->common()->Dead())) {
+  NodeProperties::SetType(dead_, Type::None());
+}
 
 BranchElimination::~BranchElimination() {}
 
@@ -83,7 +85,7 @@
   DCHECK(node->opcode() == IrOpcode::kDeoptimizeIf ||
          node->opcode() == IrOpcode::kDeoptimizeUnless);
   bool condition_is_true = node->opcode() == IrOpcode::kDeoptimizeUnless;
-  DeoptimizeReason reason = DeoptimizeReasonOf(node->op());
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   Node* condition = NodeProperties::GetValueInput(node, 0);
   Node* frame_state = NodeProperties::GetValueInput(node, 1);
   Node* effect = NodeProperties::GetEffectInput(node);
@@ -103,9 +105,8 @@
       // with the {control} node that already contains the right information.
       ReplaceWithValue(node, dead(), effect, control);
     } else {
-      control =
-          graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager, reason),
-                           frame_state, effect, control);
+      control = graph()->NewNode(common()->Deoptimize(p.kind(), p.reason()),
+                                 frame_state, effect, control);
       // TODO(bmeurer): This should be on the AdvancedReducer somehow.
       NodeProperties::MergeControlToEnd(graph(), common(), control);
       Revisit(graph()->end());
@@ -143,20 +144,27 @@
 Reduction BranchElimination::ReduceMerge(Node* node) {
   // Shortcut for the case when we do not know anything about some
   // input.
-  for (int i = 0; i < node->InputCount(); i++) {
-    if (node_conditions_.Get(node->InputAt(i)) == nullptr) {
+  Node::Inputs inputs = node->inputs();
+  for (Node* input : inputs) {
+    if (node_conditions_.Get(input) == nullptr) {
       return UpdateConditions(node, nullptr);
     }
   }
 
-  const ControlPathConditions* first = node_conditions_.Get(node->InputAt(0));
+  auto input_it = inputs.begin();
+
+  DCHECK_GT(inputs.count(), 0);
+
+  const ControlPathConditions* first = node_conditions_.Get(*input_it);
+  ++input_it;
   // Make a copy of the first input's conditions and merge with the conditions
   // from other inputs.
   ControlPathConditions* conditions =
       new (zone_->New(sizeof(ControlPathConditions)))
           ControlPathConditions(*first);
-  for (int i = 1; i < node->InputCount(); i++) {
-    conditions->Merge(*(node_conditions_.Get(node->InputAt(i))));
+  auto input_end = inputs.end();
+  for (; input_it != input_end; ++input_it) {
+    conditions->Merge(*(node_conditions_.Get(*input_it)));
   }
 
   return UpdateConditions(node, conditions);
diff --git a/src/compiler/bytecode-analysis.cc b/src/compiler/bytecode-analysis.cc
new file mode 100644
index 0000000..6d8afe1
--- /dev/null
+++ b/src/compiler/bytecode-analysis.cc
@@ -0,0 +1,621 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/bytecode-analysis.h"
+
+#include "src/interpreter/bytecode-array-iterator.h"
+#include "src/interpreter/bytecode-array-random-iterator.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+using namespace interpreter;
+
+BytecodeLoopAssignments::BytecodeLoopAssignments(int parameter_count,
+                                                 int register_count, Zone* zone)
+    : parameter_count_(parameter_count),
+      bit_vector_(new (zone)
+                      BitVector(parameter_count + register_count, zone)) {}
+
+void BytecodeLoopAssignments::Add(interpreter::Register r) {
+  if (r.is_parameter()) {
+    bit_vector_->Add(r.ToParameterIndex(parameter_count_));
+  } else {
+    bit_vector_->Add(parameter_count_ + r.index());
+  }
+}
+
+void BytecodeLoopAssignments::AddPair(interpreter::Register r) {
+  if (r.is_parameter()) {
+    DCHECK(interpreter::Register(r.index() + 1).is_parameter());
+    bit_vector_->Add(r.ToParameterIndex(parameter_count_));
+    bit_vector_->Add(r.ToParameterIndex(parameter_count_) + 1);
+  } else {
+    DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
+    bit_vector_->Add(parameter_count_ + r.index());
+    bit_vector_->Add(parameter_count_ + r.index() + 1);
+  }
+}
+
+void BytecodeLoopAssignments::AddTriple(interpreter::Register r) {
+  if (r.is_parameter()) {
+    DCHECK(interpreter::Register(r.index() + 1).is_parameter());
+    DCHECK(interpreter::Register(r.index() + 2).is_parameter());
+    bit_vector_->Add(r.ToParameterIndex(parameter_count_));
+    bit_vector_->Add(r.ToParameterIndex(parameter_count_) + 1);
+    bit_vector_->Add(r.ToParameterIndex(parameter_count_) + 2);
+  } else {
+    DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
+    DCHECK(!interpreter::Register(r.index() + 2).is_parameter());
+    bit_vector_->Add(parameter_count_ + r.index());
+    bit_vector_->Add(parameter_count_ + r.index() + 1);
+    bit_vector_->Add(parameter_count_ + r.index() + 2);
+  }
+}
+
+void BytecodeLoopAssignments::AddAll() { bit_vector_->AddAll(); }
+
+void BytecodeLoopAssignments::Union(const BytecodeLoopAssignments& other) {
+  bit_vector_->Union(*other.bit_vector_);
+}
+
+bool BytecodeLoopAssignments::ContainsParameter(int index) const {
+  DCHECK_GE(index, 0);
+  DCHECK_LT(index, parameter_count());
+  return bit_vector_->Contains(index);
+}
+
+bool BytecodeLoopAssignments::ContainsLocal(int index) const {
+  DCHECK_GE(index, 0);
+  DCHECK_LT(index, local_count());
+  return bit_vector_->Contains(parameter_count_ + index);
+}
+
+bool BytecodeLoopAssignments::ContainsAccumulator() const {
+  // TODO(leszeks): This assumes the accumulator is always assigned. This is
+  // probably correct, but that assignment is also probably dead, so we should
+  // check liveness.
+  return true;
+}
+
+BytecodeAnalysis::BytecodeAnalysis(Handle<BytecodeArray> bytecode_array,
+                                   Zone* zone, bool do_liveness_analysis)
+    : bytecode_array_(bytecode_array),
+      do_liveness_analysis_(do_liveness_analysis),
+      zone_(zone),
+      loop_stack_(zone),
+      loop_end_index_queue_(zone),
+      end_to_header_(zone),
+      header_to_info_(zone),
+      liveness_map_(bytecode_array->length(), zone) {}
+
+namespace {
+
+void UpdateInLiveness(Bytecode bytecode, BytecodeLivenessState& in_liveness,
+                      const BytecodeArrayAccessor& accessor) {
+  int num_operands = Bytecodes::NumberOfOperands(bytecode);
+  const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
+
+  if (Bytecodes::WritesAccumulator(bytecode)) {
+    in_liveness.MarkAccumulatorDead();
+  }
+  for (int i = 0; i < num_operands; ++i) {
+    switch (operand_types[i]) {
+      case OperandType::kRegOut: {
+        interpreter::Register r = accessor.GetRegisterOperand(i);
+        if (!r.is_parameter()) {
+          in_liveness.MarkRegisterDead(r.index());
+        }
+        break;
+      }
+      case OperandType::kRegOutPair: {
+        interpreter::Register r = accessor.GetRegisterOperand(i);
+        if (!r.is_parameter()) {
+          DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
+          in_liveness.MarkRegisterDead(r.index());
+          in_liveness.MarkRegisterDead(r.index() + 1);
+        }
+        break;
+      }
+      case OperandType::kRegOutTriple: {
+        interpreter::Register r = accessor.GetRegisterOperand(i);
+        if (!r.is_parameter()) {
+          DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
+          DCHECK(!interpreter::Register(r.index() + 2).is_parameter());
+          in_liveness.MarkRegisterDead(r.index());
+          in_liveness.MarkRegisterDead(r.index() + 1);
+          in_liveness.MarkRegisterDead(r.index() + 2);
+        }
+        break;
+      }
+      default:
+        DCHECK(!Bytecodes::IsRegisterOutputOperandType(operand_types[i]));
+        break;
+    }
+  }
+
+  if (Bytecodes::ReadsAccumulator(bytecode)) {
+    in_liveness.MarkAccumulatorLive();
+  }
+  for (int i = 0; i < num_operands; ++i) {
+    switch (operand_types[i]) {
+      case OperandType::kReg: {
+        interpreter::Register r = accessor.GetRegisterOperand(i);
+        if (!r.is_parameter()) {
+          in_liveness.MarkRegisterLive(r.index());
+        }
+        break;
+      }
+      case OperandType::kRegPair: {
+        interpreter::Register r = accessor.GetRegisterOperand(i);
+        if (!r.is_parameter()) {
+          DCHECK(!interpreter::Register(r.index() + 1).is_parameter());
+          in_liveness.MarkRegisterLive(r.index());
+          in_liveness.MarkRegisterLive(r.index() + 1);
+        }
+        break;
+      }
+      case OperandType::kRegList: {
+        interpreter::Register r = accessor.GetRegisterOperand(i++);
+        uint32_t reg_count = accessor.GetRegisterCountOperand(i);
+        if (!r.is_parameter()) {
+          for (uint32_t j = 0; j < reg_count; ++j) {
+            DCHECK(!interpreter::Register(r.index() + j).is_parameter());
+            in_liveness.MarkRegisterLive(r.index() + j);
+          }
+        }
+      }
+      default:
+        DCHECK(!Bytecodes::IsRegisterInputOperandType(operand_types[i]));
+        break;
+    }
+  }
+}
+
+void UpdateOutLiveness(Bytecode bytecode, BytecodeLivenessState& out_liveness,
+                       BytecodeLivenessState* next_bytecode_in_liveness,
+                       const BytecodeArrayAccessor& accessor,
+                       const BytecodeLivenessMap& liveness_map) {
+  int current_offset = accessor.current_offset();
+  const Handle<BytecodeArray>& bytecode_array = accessor.bytecode_array();
+
+  // Update from jump target (if any). Skip loops, we update these manually in
+  // the liveness iterations.
+  if (Bytecodes::IsForwardJump(bytecode)) {
+    int target_offset = accessor.GetJumpTargetOffset();
+    out_liveness.Union(*liveness_map.GetInLiveness(target_offset));
+  }
+
+  // Update from next bytecode (unless there isn't one or this is an
+  // unconditional jump).
+  if (next_bytecode_in_liveness != nullptr &&
+      !Bytecodes::IsUnconditionalJump(bytecode)) {
+    out_liveness.Union(*next_bytecode_in_liveness);
+  }
+
+  // Update from exception handler (if any).
+  if (!interpreter::Bytecodes::IsWithoutExternalSideEffects(bytecode)) {
+    int handler_context;
+    // TODO(leszeks): We should look up this range only once per entry.
+    HandlerTable* table = HandlerTable::cast(bytecode_array->handler_table());
+    int handler_offset =
+        table->LookupRange(current_offset, &handler_context, nullptr);
+
+    if (handler_offset != -1) {
+      out_liveness.Union(*liveness_map.GetInLiveness(handler_offset));
+      out_liveness.MarkRegisterLive(handler_context);
+    }
+  }
+}
+
+void UpdateAssignments(Bytecode bytecode, BytecodeLoopAssignments& assignments,
+                       const BytecodeArrayAccessor& accessor) {
+  int num_operands = Bytecodes::NumberOfOperands(bytecode);
+  const OperandType* operand_types = Bytecodes::GetOperandTypes(bytecode);
+
+  for (int i = 0; i < num_operands; ++i) {
+    switch (operand_types[i]) {
+      case OperandType::kRegOut: {
+        assignments.Add(accessor.GetRegisterOperand(i));
+        break;
+      }
+      case OperandType::kRegOutPair: {
+        assignments.AddPair(accessor.GetRegisterOperand(i));
+        break;
+      }
+      case OperandType::kRegOutTriple: {
+        assignments.AddTriple(accessor.GetRegisterOperand(i));
+        break;
+      }
+      default:
+        DCHECK(!Bytecodes::IsRegisterOutputOperandType(operand_types[i]));
+        break;
+    }
+  }
+}
+
+}  // namespace
+
+void BytecodeAnalysis::Analyze(BailoutId osr_bailout_id) {
+  loop_stack_.push({-1, nullptr});
+
+  BytecodeLivenessState* next_bytecode_in_liveness = nullptr;
+
+  int osr_loop_end_offset =
+      osr_bailout_id.IsNone() ? -1 : osr_bailout_id.ToInt();
+
+  BytecodeArrayRandomIterator iterator(bytecode_array(), zone());
+  for (iterator.GoToEnd(); iterator.IsValid(); --iterator) {
+    Bytecode bytecode = iterator.current_bytecode();
+    int current_offset = iterator.current_offset();
+
+    if (bytecode == Bytecode::kJumpLoop) {
+      // Every byte up to and including the last byte within the backwards jump
+      // instruction is considered part of the loop, set loop end accordingly.
+      int loop_end = current_offset + iterator.current_bytecode_size();
+      PushLoop(iterator.GetJumpTargetOffset(), loop_end);
+
+      // Normally prefixed bytecodes are treated as if the prefix's offset was
+      // the actual bytecode's offset. However, the OSR id is the offset of the
+      // actual JumpLoop bytecode, so we need to find the location of that
+      // bytecode ignoring the prefix.
+      int jump_loop_offset = current_offset + iterator.current_prefix_offset();
+      bool is_osr_loop = (jump_loop_offset == osr_loop_end_offset);
+
+      // Check that is_osr_loop is set iff the osr_loop_end_offset is within
+      // this bytecode.
+      DCHECK(!is_osr_loop ||
+             iterator.OffsetWithinBytecode(osr_loop_end_offset));
+
+      // OSR "assigns" everything to OSR values on entry into an OSR loop, so we
+      // need to make sure to considered everything to be assigned.
+      if (is_osr_loop) {
+        loop_stack_.top().loop_info->assignments().AddAll();
+      }
+
+      // Save the index so that we can do another pass later.
+      if (do_liveness_analysis_) {
+        loop_end_index_queue_.push_back(iterator.current_index());
+      }
+    } else if (loop_stack_.size() > 1) {
+      LoopStackEntry& current_loop = loop_stack_.top();
+      LoopInfo* current_loop_info = current_loop.loop_info;
+
+      // TODO(leszeks): Ideally, we'd only set values that were assigned in
+      // the loop *and* are live when the loop exits. However, this requires
+      // tracking the out-liveness of *all* loop exits, which is not
+      // information we currently have.
+      UpdateAssignments(bytecode, current_loop_info->assignments(), iterator);
+
+      if (current_offset == current_loop.header_offset) {
+        loop_stack_.pop();
+        if (loop_stack_.size() > 1) {
+          // Propagate inner loop assignments to outer loop.
+          loop_stack_.top().loop_info->assignments().Union(
+              current_loop_info->assignments());
+        }
+      }
+    }
+
+    if (do_liveness_analysis_) {
+      BytecodeLiveness& liveness = liveness_map_.InitializeLiveness(
+          current_offset, bytecode_array()->register_count(), zone());
+
+      UpdateOutLiveness(bytecode, *liveness.out, next_bytecode_in_liveness,
+                        iterator, liveness_map_);
+      liveness.in->CopyFrom(*liveness.out);
+      UpdateInLiveness(bytecode, *liveness.in, iterator);
+
+      next_bytecode_in_liveness = liveness.in;
+    }
+  }
+
+  DCHECK_EQ(loop_stack_.size(), 1u);
+  DCHECK_EQ(loop_stack_.top().header_offset, -1);
+
+  if (!do_liveness_analysis_) return;
+
+  // At this point, every bytecode has a valid in and out liveness, except for
+  // propagating liveness across back edges (i.e. JumpLoop). Subsequent liveness
+  // analysis iterations can only add additional liveness bits that are pulled
+  // across these back edges.
+  //
+  // Furthermore, a loop header's in-liveness can only change based on any
+  // bytecodes *after* the loop end --  it cannot change as a result of the
+  // JumpLoop liveness being updated, as the only liveness bits than can be
+  // added to the loop body are those of the loop header.
+  //
+  // So, if we know that the liveness of bytecodes after a loop header won't
+  // change (e.g. because there are no loops in them, or we have already ensured
+  // those loops are valid), we can safely update the loop end and pass over the
+  // loop body, and then never have to pass over that loop end again, because we
+  // have shown that its target, the loop header, can't change from the entries
+  // after the loop, and can't change from any loop body pass.
+  //
+  // This means that in a pass, we can iterate backwards over the bytecode
+  // array, process any loops that we encounter, and on subsequent passes we can
+  // skip processing those loops (though we still have to process inner loops).
+  //
+  // Equivalently, we can queue up loop ends from back to front, and pass over
+  // the loops in that order, as this preserves both the bottom-to-top and
+  // outer-to-inner requirements.
+
+  for (int loop_end_index : loop_end_index_queue_) {
+    iterator.GoToIndex(loop_end_index);
+
+    DCHECK_EQ(iterator.current_bytecode(), Bytecode::kJumpLoop);
+
+    int header_offset = iterator.GetJumpTargetOffset();
+    int end_offset = iterator.current_offset();
+
+    BytecodeLiveness& header_liveness =
+        liveness_map_.GetLiveness(header_offset);
+    BytecodeLiveness& end_liveness = liveness_map_.GetLiveness(end_offset);
+
+    if (!end_liveness.out->UnionIsChanged(*header_liveness.in)) {
+      // Only update the loop body if the loop end liveness changed.
+      continue;
+    }
+    end_liveness.in->CopyFrom(*end_liveness.out);
+    next_bytecode_in_liveness = end_liveness.in;
+
+    // Advance into the loop body.
+    --iterator;
+    for (; iterator.current_offset() > header_offset; --iterator) {
+      Bytecode bytecode = iterator.current_bytecode();
+
+      int current_offset = iterator.current_offset();
+      BytecodeLiveness& liveness = liveness_map_.GetLiveness(current_offset);
+
+      UpdateOutLiveness(bytecode, *liveness.out, next_bytecode_in_liveness,
+                        iterator, liveness_map_);
+      liveness.in->CopyFrom(*liveness.out);
+      UpdateInLiveness(bytecode, *liveness.in, iterator);
+
+      next_bytecode_in_liveness = liveness.in;
+    }
+    // Now we are at the loop header. Since the in-liveness of the header
+    // can't change, we need only to update the out-liveness.
+    UpdateOutLiveness(iterator.current_bytecode(), *header_liveness.out,
+                      next_bytecode_in_liveness, iterator, liveness_map_);
+  }
+
+  DCHECK(LivenessIsValid());
+}
+
+void BytecodeAnalysis::PushLoop(int loop_header, int loop_end) {
+  DCHECK(loop_header < loop_end);
+  DCHECK(loop_stack_.top().header_offset < loop_header);
+  DCHECK(end_to_header_.find(loop_end) == end_to_header_.end());
+  DCHECK(header_to_info_.find(loop_header) == header_to_info_.end());
+
+  int parent_offset = loop_stack_.top().header_offset;
+
+  end_to_header_.insert({loop_end, loop_header});
+  auto it = header_to_info_.insert(
+      {loop_header, LoopInfo(parent_offset, bytecode_array_->parameter_count(),
+                             bytecode_array_->register_count(), zone_)});
+  // Get the loop info pointer from the output of insert.
+  LoopInfo* loop_info = &it.first->second;
+
+  loop_stack_.push({loop_header, loop_info});
+}
+
+bool BytecodeAnalysis::IsLoopHeader(int offset) const {
+  return header_to_info_.find(offset) != header_to_info_.end();
+}
+
+int BytecodeAnalysis::GetLoopOffsetFor(int offset) const {
+  auto loop_end_to_header = end_to_header_.upper_bound(offset);
+  // If there is no next end => offset is not in a loop.
+  if (loop_end_to_header == end_to_header_.end()) {
+    return -1;
+  }
+  // If the header preceeds the offset, this is the loop
+  //
+  //   .> header  <--loop_end_to_header
+  //   |
+  //   |  <--offset
+  //   |
+  //   `- end
+  if (loop_end_to_header->second <= offset) {
+    return loop_end_to_header->second;
+  }
+  // Otherwise there is a (potentially nested) loop after this offset.
+  //
+  //    <--offset
+  //
+  //   .> header
+  //   |
+  //   | .> header  <--loop_end_to_header
+  //   | |
+  //   | `- end
+  //   |
+  //   `- end
+  // We just return the parent of the next loop (might be -1).
+  DCHECK(header_to_info_.upper_bound(offset) != header_to_info_.end());
+
+  return header_to_info_.upper_bound(offset)->second.parent_offset();
+}
+
+const LoopInfo& BytecodeAnalysis::GetLoopInfoFor(int header_offset) const {
+  DCHECK(IsLoopHeader(header_offset));
+
+  return header_to_info_.find(header_offset)->second;
+}
+
+const BytecodeLivenessState* BytecodeAnalysis::GetInLivenessFor(
+    int offset) const {
+  if (!do_liveness_analysis_) return nullptr;
+
+  return liveness_map_.GetInLiveness(offset);
+}
+
+const BytecodeLivenessState* BytecodeAnalysis::GetOutLivenessFor(
+    int offset) const {
+  if (!do_liveness_analysis_) return nullptr;
+
+  return liveness_map_.GetOutLiveness(offset);
+}
+
+std::ostream& BytecodeAnalysis::PrintLivenessTo(std::ostream& os) const {
+  interpreter::BytecodeArrayIterator iterator(bytecode_array());
+
+  for (; !iterator.done(); iterator.Advance()) {
+    int current_offset = iterator.current_offset();
+
+    const BitVector& in_liveness =
+        GetInLivenessFor(current_offset)->bit_vector();
+    const BitVector& out_liveness =
+        GetOutLivenessFor(current_offset)->bit_vector();
+
+    for (int i = 0; i < in_liveness.length(); ++i) {
+      os << (in_liveness.Contains(i) ? "L" : ".");
+    }
+    os << " -> ";
+
+    for (int i = 0; i < out_liveness.length(); ++i) {
+      os << (out_liveness.Contains(i) ? "L" : ".");
+    }
+
+    os << " | " << current_offset << ": ";
+    iterator.PrintTo(os) << std::endl;
+  }
+
+  return os;
+}
+
+#if DEBUG
+bool BytecodeAnalysis::LivenessIsValid() {
+  BytecodeArrayRandomIterator iterator(bytecode_array(), zone());
+
+  BytecodeLivenessState previous_liveness(bytecode_array()->register_count(),
+                                          zone());
+
+  int invalid_offset = -1;
+  int which_invalid = -1;
+
+  BytecodeLivenessState* next_bytecode_in_liveness = nullptr;
+
+  // Ensure that there are no liveness changes if we iterate one more time.
+  for (iterator.GoToEnd(); iterator.IsValid(); --iterator) {
+    Bytecode bytecode = iterator.current_bytecode();
+
+    int current_offset = iterator.current_offset();
+
+    BytecodeLiveness& liveness = liveness_map_.GetLiveness(current_offset);
+
+    previous_liveness.CopyFrom(*liveness.out);
+
+    UpdateOutLiveness(bytecode, *liveness.out, next_bytecode_in_liveness,
+                      iterator, liveness_map_);
+    // UpdateOutLiveness skips kJumpLoop, so we update it manually.
+    if (bytecode == Bytecode::kJumpLoop) {
+      int target_offset = iterator.GetJumpTargetOffset();
+      liveness.out->Union(*liveness_map_.GetInLiveness(target_offset));
+    }
+
+    if (!liveness.out->Equals(previous_liveness)) {
+      // Reset the invalid liveness.
+      liveness.out->CopyFrom(previous_liveness);
+      invalid_offset = current_offset;
+      which_invalid = 1;
+      break;
+    }
+
+    previous_liveness.CopyFrom(*liveness.in);
+
+    liveness.in->CopyFrom(*liveness.out);
+    UpdateInLiveness(bytecode, *liveness.in, iterator);
+
+    if (!liveness.in->Equals(previous_liveness)) {
+      // Reset the invalid liveness.
+      liveness.in->CopyFrom(previous_liveness);
+      invalid_offset = current_offset;
+      which_invalid = 0;
+      break;
+    }
+
+    next_bytecode_in_liveness = liveness.in;
+  }
+
+  if (invalid_offset != -1) {
+    OFStream of(stderr);
+    of << "Invalid liveness:" << std::endl;
+
+    // Dump the bytecode, annotated with the liveness and marking loops.
+
+    int loop_indent = 0;
+
+    BytecodeArrayIterator forward_iterator(bytecode_array());
+    for (; !forward_iterator.done(); forward_iterator.Advance()) {
+      int current_offset = forward_iterator.current_offset();
+      const BitVector& in_liveness =
+          GetInLivenessFor(current_offset)->bit_vector();
+      const BitVector& out_liveness =
+          GetOutLivenessFor(current_offset)->bit_vector();
+
+      for (int i = 0; i < in_liveness.length(); ++i) {
+        of << (in_liveness.Contains(i) ? 'L' : '.');
+      }
+
+      of << " | ";
+
+      for (int i = 0; i < out_liveness.length(); ++i) {
+        of << (out_liveness.Contains(i) ? 'L' : '.');
+      }
+
+      of << " : " << current_offset << " : ";
+
+      // Draw loop back edges by indentin everything between loop headers and
+      // jump loop instructions.
+      if (forward_iterator.current_bytecode() == Bytecode::kJumpLoop) {
+        loop_indent--;
+      }
+      for (int i = 0; i < loop_indent; ++i) {
+        of << " | ";
+      }
+      if (forward_iterator.current_bytecode() == Bytecode::kJumpLoop) {
+        of << " `-" << current_offset;
+      } else if (IsLoopHeader(current_offset)) {
+        of << " .>" << current_offset;
+        loop_indent++;
+      }
+      forward_iterator.PrintTo(of) << std::endl;
+
+      if (current_offset == invalid_offset) {
+        // Underline the invalid liveness.
+        if (which_invalid == 0) {
+          for (int i = 0; i < in_liveness.length(); ++i) {
+            of << '^';
+          }
+        } else {
+          for (int i = 0; i < in_liveness.length() + 3; ++i) {
+            of << ' ';
+          }
+          for (int i = 0; i < out_liveness.length(); ++i) {
+            of << '^';
+          }
+        }
+
+        // Make sure to draw the loop indentation marks on this additional line.
+        of << " : " << current_offset << " : ";
+        for (int i = 0; i < loop_indent; ++i) {
+          of << " | ";
+        }
+
+        of << std::endl;
+      }
+    }
+  }
+
+  return invalid_offset == -1;
+}
+#endif
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/bytecode-analysis.h b/src/compiler/bytecode-analysis.h
new file mode 100644
index 0000000..ad93f8a
--- /dev/null
+++ b/src/compiler/bytecode-analysis.h
@@ -0,0 +1,126 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BYTECODE_ANALYSIS_H_
+#define V8_COMPILER_BYTECODE_ANALYSIS_H_
+
+#include "src/base/hashmap.h"
+#include "src/bit-vector.h"
+#include "src/compiler/bytecode-liveness-map.h"
+#include "src/handles.h"
+#include "src/interpreter/bytecode-register.h"
+#include "src/zone/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+
+class BytecodeArray;
+
+namespace compiler {
+
+class V8_EXPORT_PRIVATE BytecodeLoopAssignments {
+ public:
+  BytecodeLoopAssignments(int parameter_count, int register_count, Zone* zone);
+
+  void Add(interpreter::Register r);
+  void AddPair(interpreter::Register r);
+  void AddTriple(interpreter::Register r);
+  void AddAll();
+  void Union(const BytecodeLoopAssignments& other);
+
+  bool ContainsParameter(int index) const;
+  bool ContainsLocal(int index) const;
+  bool ContainsAccumulator() const;
+
+  int parameter_count() const { return parameter_count_; }
+  int local_count() const { return bit_vector_->length() - parameter_count_; }
+
+ private:
+  int parameter_count_;
+  BitVector* bit_vector_;
+};
+
+struct V8_EXPORT_PRIVATE LoopInfo {
+ public:
+  LoopInfo(int parent_offset, int parameter_count, int register_count,
+           Zone* zone)
+      : parent_offset_(parent_offset),
+        assignments_(parameter_count, register_count, zone) {}
+
+  int parent_offset() const { return parent_offset_; }
+
+  BytecodeLoopAssignments& assignments() { return assignments_; }
+  const BytecodeLoopAssignments& assignments() const { return assignments_; }
+
+ private:
+  // The offset to the parent loop, or -1 if there is no parent.
+  int parent_offset_;
+  BytecodeLoopAssignments assignments_;
+};
+
+class V8_EXPORT_PRIVATE BytecodeAnalysis BASE_EMBEDDED {
+ public:
+  BytecodeAnalysis(Handle<BytecodeArray> bytecode_array, Zone* zone,
+                   bool do_liveness_analysis);
+
+  // Analyze the bytecodes to find the loop ranges, loop nesting, loop
+  // assignments and liveness, under the assumption that there is an OSR bailout
+  // at {osr_bailout_id}.
+  //
+  // No other methods in this class return valid information until this has been
+  // called.
+  void Analyze(BailoutId osr_bailout_id);
+
+  // Return true if the given offset is a loop header
+  bool IsLoopHeader(int offset) const;
+  // Get the loop header offset of the containing loop for arbitrary
+  // {offset}, or -1 if the {offset} is not inside any loop.
+  int GetLoopOffsetFor(int offset) const;
+  // Get the loop info of the loop header at {header_offset}.
+  const LoopInfo& GetLoopInfoFor(int header_offset) const;
+
+  // Gets the in-liveness for the bytecode at {offset}.
+  const BytecodeLivenessState* GetInLivenessFor(int offset) const;
+
+  // Gets the out-liveness for the bytecode at {offset}.
+  const BytecodeLivenessState* GetOutLivenessFor(int offset) const;
+
+  std::ostream& PrintLivenessTo(std::ostream& os) const;
+
+ private:
+  struct LoopStackEntry {
+    int header_offset;
+    LoopInfo* loop_info;
+  };
+
+  void PushLoop(int loop_header, int loop_end);
+
+#if DEBUG
+  bool LivenessIsValid();
+#endif
+
+  Zone* zone() const { return zone_; }
+  Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
+
+ private:
+  Handle<BytecodeArray> bytecode_array_;
+  bool do_liveness_analysis_;
+  Zone* zone_;
+
+  ZoneStack<LoopStackEntry> loop_stack_;
+  ZoneVector<int> loop_end_index_queue_;
+
+  ZoneMap<int, int> end_to_header_;
+  ZoneMap<int, LoopInfo> header_to_info_;
+
+  BytecodeLivenessMap liveness_map_;
+
+  DISALLOW_COPY_AND_ASSIGN(BytecodeAnalysis);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_BYTECODE_ANALYSIS_H_
diff --git a/src/compiler/bytecode-branch-analysis.cc b/src/compiler/bytecode-branch-analysis.cc
deleted file mode 100644
index 4e96a53..0000000
--- a/src/compiler/bytecode-branch-analysis.cc
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/bytecode-branch-analysis.h"
-
-#include "src/interpreter/bytecode-array-iterator.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-BytecodeBranchAnalysis::BytecodeBranchAnalysis(
-    Handle<BytecodeArray> bytecode_array, Zone* zone)
-    : bytecode_array_(bytecode_array),
-      is_backward_target_(bytecode_array->length(), zone),
-      is_forward_target_(bytecode_array->length(), zone),
-      zone_(zone) {}
-
-void BytecodeBranchAnalysis::Analyze() {
-  interpreter::BytecodeArrayIterator iterator(bytecode_array());
-  while (!iterator.done()) {
-    interpreter::Bytecode bytecode = iterator.current_bytecode();
-    int current_offset = iterator.current_offset();
-    if (interpreter::Bytecodes::IsJump(bytecode)) {
-      AddBranch(current_offset, iterator.GetJumpTargetOffset());
-    }
-    iterator.Advance();
-  }
-}
-
-void BytecodeBranchAnalysis::AddBranch(int source_offset, int target_offset) {
-  if (source_offset < target_offset) {
-    is_forward_target_.Add(target_offset);
-  } else {
-    is_backward_target_.Add(target_offset);
-  }
-}
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/bytecode-branch-analysis.h b/src/compiler/bytecode-branch-analysis.h
deleted file mode 100644
index 7d32da8..0000000
--- a/src/compiler/bytecode-branch-analysis.h
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_BYTECODE_BRANCH_ANALYSIS_H_
-#define V8_COMPILER_BYTECODE_BRANCH_ANALYSIS_H_
-
-#include "src/bit-vector.h"
-#include "src/handles.h"
-
-namespace v8 {
-namespace internal {
-
-class BytecodeArray;
-
-namespace compiler {
-
-// A class for identifying branch targets within a bytecode array.
-// This information can be used to construct the local control flow
-// logic for high-level IR graphs built from bytecode.
-//
-// N.B. If this class is used to determine loop headers, then such a
-// usage relies on the only backwards branches in bytecode being jumps
-// back to loop headers.
-class BytecodeBranchAnalysis BASE_EMBEDDED {
- public:
-  BytecodeBranchAnalysis(Handle<BytecodeArray> bytecode_array, Zone* zone);
-
-  // Analyze the bytecodes to find the branch sites and their
-  // targets. No other methods in this class return valid information
-  // until this has been called.
-  void Analyze();
-
-  // Returns true if there are any forward branches to the bytecode at
-  // |offset|.
-  bool forward_branches_target(int offset) const {
-    return is_forward_target_.Contains(offset);
-  }
-
-  // Returns true if there are any backward branches to the bytecode
-  // at |offset|.
-  bool backward_branches_target(int offset) const {
-    return is_backward_target_.Contains(offset);
-  }
-
- private:
-  void AddBranch(int origin_offset, int target_offset);
-
-  Zone* zone() const { return zone_; }
-  Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
-
-  Handle<BytecodeArray> bytecode_array_;
-  BitVector is_backward_target_;
-  BitVector is_forward_target_;
-  Zone* zone_;
-
-  DISALLOW_COPY_AND_ASSIGN(BytecodeBranchAnalysis);
-};
-
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_COMPILER_BYTECODE_BRANCH_ANALYSIS_H_
diff --git a/src/compiler/bytecode-graph-builder.cc b/src/compiler/bytecode-graph-builder.cc
index 34b50df..aaeee66 100644
--- a/src/compiler/bytecode-graph-builder.cc
+++ b/src/compiler/bytecode-graph-builder.cc
@@ -7,12 +7,14 @@
 #include "src/ast/ast.h"
 #include "src/ast/scopes.h"
 #include "src/compilation-info.h"
-#include "src/compiler/bytecode-branch-analysis.h"
 #include "src/compiler/compiler-source-position-table.h"
+#include "src/compiler/js-type-hint-lowering.h"
 #include "src/compiler/linkage.h"
 #include "src/compiler/operator-properties.h"
+#include "src/compiler/simplified-operator.h"
 #include "src/interpreter/bytecodes.h"
 #include "src/objects-inl.h"
+#include "src/objects/literal-objects.h"
 
 namespace v8 {
 namespace internal {
@@ -36,7 +38,6 @@
 
   Node* LookupAccumulator() const;
   Node* LookupRegister(interpreter::Register the_register) const;
-  void MarkAllRegistersLive();
 
   void BindAccumulator(Node* node,
                        FrameStateAttachmentMode mode = kDontAttachFrameState);
@@ -57,7 +58,8 @@
   // Preserve a checkpoint of the environment for the IR graph. Any
   // further mutation of the environment will not affect checkpoints.
   Node* Checkpoint(BailoutId bytecode_offset, OutputFrameStateCombine combine,
-                   bool owner_has_exception);
+                   bool owner_has_exception,
+                   const BytecodeLivenessState* liveness);
 
   // Control dependency tracked by this environment.
   Node* GetControlDependency() const { return control_dependency_; }
@@ -68,30 +70,29 @@
   Node* Context() const { return context_; }
   void SetContext(Node* new_context) { context_ = new_context; }
 
-  Environment* CopyForConditional();
-  Environment* CopyForLoop();
-  Environment* CopyForOsrEntry();
+  Environment* Copy();
   void Merge(Environment* other);
-  void PrepareForOsrEntry();
 
-  void PrepareForLoopExit(Node* loop);
+  void PrepareForOsrEntry();
+  void PrepareForLoop(const BytecodeLoopAssignments& assignments);
+  void PrepareForLoopExit(Node* loop,
+                          const BytecodeLoopAssignments& assignments);
 
  private:
-  Environment(const Environment* copy, LivenessAnalyzerBlock* liveness_block);
-  void PrepareForLoop();
+  explicit Environment(const Environment* copy);
 
-  bool StateValuesRequireUpdate(Node** state_values, int offset, int count);
-  void UpdateStateValues(Node** state_values, int offset, int count);
+  bool StateValuesRequireUpdate(Node** state_values, Node** values, int count);
+  void UpdateStateValues(Node** state_values, Node** values, int count);
+  void UpdateStateValuesWithCache(Node** state_values, Node** values, int count,
+                                  const BitVector* liveness,
+                                  int liveness_offset);
 
   int RegisterToValuesIndex(interpreter::Register the_register) const;
 
-  bool IsLivenessBlockConsistent() const;
-
   Zone* zone() const { return builder_->local_zone(); }
   Graph* graph() const { return builder_->graph(); }
   CommonOperatorBuilder* common() const { return builder_->common(); }
   BytecodeGraphBuilder* builder() const { return builder_; }
-  LivenessAnalyzerBlock* liveness_block() const { return liveness_block_; }
   const NodeVector* values() const { return &values_; }
   NodeVector* values() { return &values_; }
   int register_base() const { return register_base_; }
@@ -100,7 +101,6 @@
   BytecodeGraphBuilder* builder_;
   int register_count_;
   int parameter_count_;
-  LivenessAnalyzerBlock* liveness_block_;
   Node* context_;
   Node* control_dependency_;
   Node* effect_dependency_;
@@ -124,9 +124,6 @@
     : builder_(builder),
       register_count_(register_count),
       parameter_count_(parameter_count),
-      liveness_block_(builder->is_liveness_analysis_enabled_
-                          ? builder_->liveness_analyzer()->NewBlock()
-                          : nullptr),
       context_(context),
       control_dependency_(control_dependency),
       effect_dependency_(control_dependency),
@@ -161,12 +158,10 @@
 }
 
 BytecodeGraphBuilder::Environment::Environment(
-    const BytecodeGraphBuilder::Environment* other,
-    LivenessAnalyzerBlock* liveness_block)
+    const BytecodeGraphBuilder::Environment* other)
     : builder_(other->builder_),
       register_count_(other->register_count_),
       parameter_count_(other->parameter_count_),
-      liveness_block_(liveness_block),
       context_(other->context_),
       control_dependency_(other->control_dependency_),
       effect_dependency_(other->effect_dependency_),
@@ -189,16 +184,7 @@
   }
 }
 
-bool BytecodeGraphBuilder::Environment::IsLivenessBlockConsistent() const {
-  return !builder_->IsLivenessAnalysisEnabled() ==
-         (liveness_block() == nullptr);
-}
-
 Node* BytecodeGraphBuilder::Environment::LookupAccumulator() const {
-  DCHECK(IsLivenessBlockConsistent());
-  if (liveness_block() != nullptr) {
-    liveness_block()->LookupAccumulator();
-  }
   return values()->at(accumulator_base_);
 }
 
@@ -213,32 +199,15 @@
     return builder()->GetNewTarget();
   } else {
     int values_index = RegisterToValuesIndex(the_register);
-    if (liveness_block() != nullptr && !the_register.is_parameter()) {
-      DCHECK(IsLivenessBlockConsistent());
-      liveness_block()->Lookup(the_register.index());
-    }
     return values()->at(values_index);
   }
 }
 
-void BytecodeGraphBuilder::Environment::MarkAllRegistersLive() {
-  DCHECK(IsLivenessBlockConsistent());
-  if (liveness_block() != nullptr) {
-    for (int i = 0; i < register_count(); ++i) {
-      liveness_block()->Lookup(i);
-    }
-  }
-}
-
 void BytecodeGraphBuilder::Environment::BindAccumulator(
     Node* node, FrameStateAttachmentMode mode) {
   if (mode == FrameStateAttachmentMode::kAttachFrameState) {
     builder()->PrepareFrameState(node, OutputFrameStateCombine::PokeAt(0));
   }
-  DCHECK(IsLivenessBlockConsistent());
-  if (liveness_block() != nullptr) {
-    liveness_block()->BindAccumulator();
-  }
   values()->at(accumulator_base_) = node;
 }
 
@@ -251,10 +220,6 @@
                                            accumulator_base_ - values_index));
   }
   values()->at(values_index) = node;
-  if (liveness_block() != nullptr && !the_register.is_parameter()) {
-    DCHECK(IsLivenessBlockConsistent());
-    liveness_block()->Bind(the_register.index());
-  }
 }
 
 void BytecodeGraphBuilder::Environment::BindRegistersToProjections(
@@ -278,45 +243,13 @@
   }
 }
 
-
-BytecodeGraphBuilder::Environment*
-BytecodeGraphBuilder::Environment::CopyForLoop() {
-  PrepareForLoop();
-  if (liveness_block() != nullptr) {
-    // Finish the current block before copying.
-    liveness_block_ = builder_->liveness_analyzer()->NewBlock(liveness_block());
-  }
-  return new (zone()) Environment(this, liveness_block());
-}
-
-BytecodeGraphBuilder::Environment*
-BytecodeGraphBuilder::Environment::CopyForOsrEntry() {
-  return new (zone())
-      Environment(this, builder_->liveness_analyzer()->NewBlock());
-}
-
-BytecodeGraphBuilder::Environment*
-BytecodeGraphBuilder::Environment::CopyForConditional() {
-  LivenessAnalyzerBlock* copy_liveness_block = nullptr;
-  if (liveness_block() != nullptr) {
-    copy_liveness_block =
-        builder_->liveness_analyzer()->NewBlock(liveness_block());
-    liveness_block_ = builder_->liveness_analyzer()->NewBlock(liveness_block());
-  }
-  return new (zone()) Environment(this, copy_liveness_block);
+BytecodeGraphBuilder::Environment* BytecodeGraphBuilder::Environment::Copy() {
+  return new (zone()) Environment(this);
 }
 
 
 void BytecodeGraphBuilder::Environment::Merge(
     BytecodeGraphBuilder::Environment* other) {
-  if (builder_->is_liveness_analysis_enabled_) {
-    if (GetControlDependency()->opcode() != IrOpcode::kLoop) {
-      liveness_block_ =
-          builder()->liveness_analyzer()->NewBlock(liveness_block());
-    }
-    liveness_block()->AddPredecessor(other->liveness_block());
-  }
-
   // Create a merge of the control dependencies of both environments and update
   // the current environment's control dependency accordingly.
   Node* control = builder()->MergeControl(GetControlDependency(),
@@ -337,8 +270,8 @@
   }
 }
 
-
-void BytecodeGraphBuilder::Environment::PrepareForLoop() {
+void BytecodeGraphBuilder::Environment::PrepareForLoop(
+    const BytecodeLoopAssignments& assignments) {
   // Create a control node for the loop header.
   Node* control = builder()->NewLoop();
 
@@ -346,11 +279,23 @@
   Node* effect = builder()->NewEffectPhi(1, GetEffectDependency(), control);
   UpdateEffectDependency(effect);
 
-  // Assume everything in the loop is updated.
+  // Create Phis for any values that may be updated by the end of the loop.
   context_ = builder()->NewPhi(1, context_, control);
-  int size = static_cast<int>(values()->size());
-  for (int i = 0; i < size; i++) {
-    values()->at(i) = builder()->NewPhi(1, values()->at(i), control);
+  for (int i = 0; i < parameter_count(); i++) {
+    if (assignments.ContainsParameter(i)) {
+      values_[i] = builder()->NewPhi(1, values_[i], control);
+    }
+  }
+  for (int i = 0; i < register_count(); i++) {
+    if (assignments.ContainsLocal(i)) {
+      int index = register_base() + i;
+      values_[index] = builder()->NewPhi(1, values_[index], control);
+    }
+  }
+
+  if (assignments.ContainsAccumulator()) {
+    values_[accumulator_base()] =
+        builder()->NewPhi(1, values_[accumulator_base()], control);
   }
 
   // Connect to the loop end.
@@ -384,7 +329,7 @@
 
   BailoutId loop_id(builder_->bytecode_iterator().current_offset());
   Node* frame_state =
-      Checkpoint(loop_id, OutputFrameStateCombine::Ignore(), false);
+      Checkpoint(loop_id, OutputFrameStateCombine::Ignore(), false, nullptr);
   Node* checkpoint =
       graph()->NewNode(common()->Checkpoint(), frame_state, entry, entry);
   UpdateEffectDependency(checkpoint);
@@ -402,22 +347,22 @@
 }
 
 bool BytecodeGraphBuilder::Environment::StateValuesRequireUpdate(
-    Node** state_values, int offset, int count) {
+    Node** state_values, Node** values, int count) {
   if (*state_values == nullptr) {
     return true;
   }
-  DCHECK_EQ((*state_values)->InputCount(), count);
-  DCHECK_LE(static_cast<size_t>(offset + count), values()->size());
-  Node** env_values = (count == 0) ? nullptr : &values()->at(offset);
+  Node::Inputs inputs = (*state_values)->inputs();
+  if (inputs.count() != count) return true;
   for (int i = 0; i < count; i++) {
-    if ((*state_values)->InputAt(i) != env_values[i]) {
+    if (inputs[i] != values[i]) {
       return true;
     }
   }
   return false;
 }
 
-void BytecodeGraphBuilder::Environment::PrepareForLoopExit(Node* loop) {
+void BytecodeGraphBuilder::Environment::PrepareForLoopExit(
+    Node* loop, const BytecodeLoopAssignments& assignments) {
   DCHECK_EQ(loop->opcode(), IrOpcode::kLoop);
 
   Node* control = GetControlDependency();
@@ -431,34 +376,80 @@
                                          GetEffectDependency(), loop_exit);
   UpdateEffectDependency(effect_rename);
 
-  // TODO(jarin) We should also rename context here. However, uncoditional
+  // TODO(jarin) We should also rename context here. However, unconditional
   // renaming confuses global object and native context specialization.
   // We should only rename if the context is assigned in the loop.
 
-  // Rename the environmnent values.
-  for (size_t i = 0; i < values_.size(); i++) {
-    Node* rename =
-        graph()->NewNode(common()->LoopExitValue(), values_[i], loop_exit);
-    values_[i] = rename;
+  // Rename the environment values if they were assigned in the loop.
+  for (int i = 0; i < parameter_count(); i++) {
+    if (assignments.ContainsParameter(i)) {
+      Node* rename =
+          graph()->NewNode(common()->LoopExitValue(), values_[i], loop_exit);
+      values_[i] = rename;
+    }
+  }
+  for (int i = 0; i < register_count(); i++) {
+    if (assignments.ContainsLocal(i)) {
+      Node* rename = graph()->NewNode(common()->LoopExitValue(),
+                                      values_[register_base() + i], loop_exit);
+      values_[register_base() + i] = rename;
+    }
+  }
+
+  if (assignments.ContainsAccumulator()) {
+    Node* rename = graph()->NewNode(common()->LoopExitValue(),
+                                    values_[accumulator_base()], loop_exit);
+    values_[accumulator_base()] = rename;
   }
 }
 
 void BytecodeGraphBuilder::Environment::UpdateStateValues(Node** state_values,
-                                                          int offset,
+                                                          Node** values,
                                                           int count) {
-  if (StateValuesRequireUpdate(state_values, offset, count)) {
-    const Operator* op = common()->StateValues(count);
-    (*state_values) = graph()->NewNode(op, count, &values()->at(offset));
+  if (StateValuesRequireUpdate(state_values, values, count)) {
+    const Operator* op = common()->StateValues(count, SparseInputMask::Dense());
+    (*state_values) = graph()->NewNode(op, count, values);
   }
 }
 
+void BytecodeGraphBuilder::Environment::UpdateStateValuesWithCache(
+    Node** state_values, Node** values, int count, const BitVector* liveness,
+    int liveness_offset) {
+  *state_values = builder_->state_values_cache_.GetNodeForValues(
+      values, static_cast<size_t>(count), liveness, liveness_offset);
+}
+
 Node* BytecodeGraphBuilder::Environment::Checkpoint(
     BailoutId bailout_id, OutputFrameStateCombine combine,
-    bool owner_has_exception) {
-  UpdateStateValues(&parameters_state_values_, 0, parameter_count());
-  UpdateStateValues(&registers_state_values_, register_base(),
-                    register_count());
-  UpdateStateValues(&accumulator_state_values_, accumulator_base(), 1);
+    bool owner_has_exception, const BytecodeLivenessState* liveness) {
+  if (parameter_count() == register_count()) {
+    // Re-use the state-value cache if the number of local registers happens
+    // to match the parameter count.
+    UpdateStateValuesWithCache(&parameters_state_values_, &values()->at(0),
+                               parameter_count(), nullptr, 0);
+  } else {
+    UpdateStateValues(&parameters_state_values_, &values()->at(0),
+                      parameter_count());
+  }
+
+  UpdateStateValuesWithCache(&registers_state_values_,
+                             &values()->at(register_base()), register_count(),
+                             liveness ? &liveness->bit_vector() : nullptr, 0);
+
+  bool accumulator_is_live = !liveness || liveness->AccumulatorIsLive();
+  if (parameter_count() == 1 && accumulator_is_live &&
+      values()->at(accumulator_base()) == values()->at(0)) {
+    // Re-use the parameter state values if there happens to only be one
+    // parameter and the accumulator is live and holds that parameter's value.
+    accumulator_state_values_ = parameters_state_values_;
+  } else {
+    // Otherwise, use the state values cache to hopefully re-use local register
+    // state values (if there is only one local register), or at the very least
+    // re-use previous accumulator state values.
+    UpdateStateValuesWithCache(
+        &accumulator_state_values_, &values()->at(accumulator_base()), 1,
+        liveness ? &liveness->bit_vector() : nullptr, register_count());
+  }
 
   const Operator* op = common()->FrameState(
       bailout_id, combine, builder()->frame_state_function_info());
@@ -467,51 +458,40 @@
       accumulator_state_values_, Context(), builder()->GetFunctionClosure(),
       builder()->graph()->start());
 
-  if (liveness_block() != nullptr) {
-    // If the owning node has an exception, register the checkpoint to the
-    // predecessor so that the checkpoint is used for both the normal and the
-    // exceptional paths. Yes, this is a terrible hack and we might want
-    // to use an explicit frame state for the exceptional path.
-    if (owner_has_exception) {
-      liveness_block()->GetPredecessor()->Checkpoint(result);
-    } else {
-      liveness_block()->Checkpoint(result);
-    }
-  }
-
   return result;
 }
 
 BytecodeGraphBuilder::BytecodeGraphBuilder(
-    Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
-    float invocation_frequency, SourcePositionTable* source_positions,
-    int inlining_id)
+    Zone* local_zone, Handle<SharedFunctionInfo> shared_info,
+    Handle<FeedbackVector> feedback_vector, BailoutId osr_ast_id,
+    JSGraph* jsgraph, float invocation_frequency,
+    SourcePositionTable* source_positions, int inlining_id)
     : local_zone_(local_zone),
       jsgraph_(jsgraph),
       invocation_frequency_(invocation_frequency),
-      bytecode_array_(handle(info->shared_info()->bytecode_array())),
+      bytecode_array_(handle(shared_info->bytecode_array())),
       exception_handler_table_(
           handle(HandlerTable::cast(bytecode_array()->handler_table()))),
-      feedback_vector_(handle(info->closure()->feedback_vector())),
+      feedback_vector_(feedback_vector),
       frame_state_function_info_(common()->CreateFrameStateFunctionInfo(
           FrameStateType::kInterpretedFunction,
           bytecode_array()->parameter_count(),
-          bytecode_array()->register_count(), info->shared_info())),
-      osr_ast_id_(info->osr_ast_id()),
+          bytecode_array()->register_count(), shared_info)),
+      bytecode_iterator_(nullptr),
+      bytecode_analysis_(nullptr),
+      environment_(nullptr),
+      osr_ast_id_(osr_ast_id),
+      osr_loop_offset_(-1),
       merge_environments_(local_zone),
       exception_handlers_(local_zone),
       current_exception_handler_(0),
       input_buffer_size_(0),
       input_buffer_(nullptr),
+      needs_eager_checkpoint_(true),
       exit_controls_(local_zone),
-      is_liveness_analysis_enabled_(FLAG_analyze_environment_liveness &&
-                                    info->is_deoptimization_enabled()),
       state_values_cache_(jsgraph),
-      liveness_analyzer_(
-          static_cast<size_t>(bytecode_array()->register_count()), true,
-          local_zone),
       source_positions_(source_positions),
-      start_position_(info->shared_info()->start_position(), inlining_id) {}
+      start_position_(shared_info->start_position(), inlining_id) {}
 
 Node* BytecodeGraphBuilder::GetNewTarget() {
   if (!new_target_.is_set()) {
@@ -551,14 +531,16 @@
 Node* BytecodeGraphBuilder::BuildLoadNativeContextField(int index) {
   const Operator* op =
       javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true);
-  Node* native_context = NewNode(op, environment()->Context());
-  return NewNode(javascript()->LoadContext(0, index, true), native_context);
+  Node* native_context = NewNode(op);
+  Node* result = NewNode(javascript()->LoadContext(0, index, true));
+  NodeProperties::ReplaceContextInput(result, native_context);
+  return result;
 }
 
 
 VectorSlotPair BytecodeGraphBuilder::CreateVectorSlotPair(int slot_id) {
-  FeedbackVectorSlot slot;
-  if (slot_id >= TypeFeedbackVector::kReservedIndexCount) {
+  FeedbackSlot slot;
+  if (slot_id >= FeedbackVector::kReservedIndexCount) {
     slot = feedback_vector()->ToSlot(slot_id);
   }
   return VectorSlotPair(feedback_vector(), slot);
@@ -587,24 +569,42 @@
   Node* end = graph()->NewNode(common()->End(input_count), input_count, inputs);
   graph()->SetEnd(end);
 
-  ClearNonLiveSlotsInFrameStates();
-
   return true;
 }
 
 void BytecodeGraphBuilder::PrepareEagerCheckpoint() {
-  if (environment()->GetEffectDependency()->opcode() != IrOpcode::kCheckpoint) {
+  if (needs_eager_checkpoint()) {
     // Create an explicit checkpoint node for before the operation. This only
     // needs to happen if we aren't effect-dominated by a {Checkpoint} already.
+    mark_as_needing_eager_checkpoint(false);
     Node* node = NewNode(common()->Checkpoint());
     DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
     DCHECK_EQ(IrOpcode::kDead,
               NodeProperties::GetFrameStateInput(node)->opcode());
     BailoutId bailout_id(bytecode_iterator().current_offset());
+
+    const BytecodeLivenessState* liveness_before =
+        bytecode_analysis()->GetInLivenessFor(
+            bytecode_iterator().current_offset());
+
     Node* frame_state_before = environment()->Checkpoint(
-        bailout_id, OutputFrameStateCombine::Ignore(), false);
+        bailout_id, OutputFrameStateCombine::Ignore(), false, liveness_before);
     NodeProperties::ReplaceFrameStateInput(node, frame_state_before);
+#ifdef DEBUG
+  } else {
+    // In case we skipped checkpoint creation above, we must be able to find an
+    // existing checkpoint that effect-dominates the nodes about to be created.
+    // Starting a search from the current effect-dependency has to succeed.
+    Node* effect = environment()->GetEffectDependency();
+    while (effect->opcode() != IrOpcode::kCheckpoint) {
+      DCHECK(effect->op()->HasProperty(Operator::kNoWrite));
+      DCHECK_EQ(1, effect->op()->EffectInputCount());
+      effect = NodeProperties::GetEffectInput(effect);
+    }
   }
+#else
+  }
+#endif  // DEBUG
 }
 
 void BytecodeGraphBuilder::PrepareFrameState(Node* node,
@@ -617,40 +617,36 @@
               NodeProperties::GetFrameStateInput(node)->opcode());
     BailoutId bailout_id(bytecode_iterator().current_offset());
     bool has_exception = NodeProperties::IsExceptionalCall(node);
-    Node* frame_state_after =
-        environment()->Checkpoint(bailout_id, combine, has_exception);
+
+    const BytecodeLivenessState* liveness_after =
+        bytecode_analysis()->GetOutLivenessFor(
+            bytecode_iterator().current_offset());
+
+    Node* frame_state_after = environment()->Checkpoint(
+        bailout_id, combine, has_exception, liveness_after);
     NodeProperties::ReplaceFrameStateInput(node, frame_state_after);
   }
 }
 
-void BytecodeGraphBuilder::ClearNonLiveSlotsInFrameStates() {
-  if (!IsLivenessAnalysisEnabled()) {
-    return;
-  }
-  NonLiveFrameStateSlotReplacer replacer(
-      &state_values_cache_, jsgraph()->OptimizedOutConstant(),
-      liveness_analyzer()->local_count(), true, local_zone());
-  liveness_analyzer()->Run(&replacer);
-  if (FLAG_trace_environment_liveness) {
-    OFStream os(stdout);
-    liveness_analyzer()->Print(os);
-  }
-}
-
 void BytecodeGraphBuilder::VisitBytecodes(bool stack_check) {
-  BytecodeBranchAnalysis analysis(bytecode_array(), local_zone());
-  BytecodeLoopAnalysis loop_analysis(bytecode_array(), &analysis, local_zone());
-  analysis.Analyze();
-  loop_analysis.Analyze();
-  set_branch_analysis(&analysis);
-  set_loop_analysis(&loop_analysis);
+  BytecodeAnalysis bytecode_analysis(bytecode_array(), local_zone(),
+                                     FLAG_analyze_environment_liveness);
+  bytecode_analysis.Analyze(osr_ast_id_);
+  set_bytecode_analysis(&bytecode_analysis);
 
   interpreter::BytecodeArrayIterator iterator(bytecode_array());
   set_bytecode_iterator(&iterator);
   SourcePositionTableIterator source_position_iterator(
       bytecode_array()->source_position_table());
 
+  if (FLAG_trace_environment_liveness) {
+    OFStream of(stdout);
+
+    bytecode_analysis.PrintLivenessTo(of);
+  }
+
   BuildOSRNormalEntryPoint();
+
   for (; !iterator.done(); iterator.Advance()) {
     int current_offset = iterator.current_offset();
     UpdateCurrentSourcePosition(&source_position_iterator, current_offset);
@@ -658,7 +654,6 @@
     SwitchToMergeEnvironment(current_offset);
     if (environment() != nullptr) {
       BuildLoopHeaderEnvironment(current_offset);
-      BuildOSRLoopEntryPoint(current_offset);
 
       // Skip the first stack check if stack_check is false
       if (!stack_check &&
@@ -677,8 +672,7 @@
       }
     }
   }
-
-  set_branch_analysis(nullptr);
+  set_bytecode_analysis(nullptr);
   set_bytecode_iterator(nullptr);
   DCHECK(exception_handlers_.empty());
 }
@@ -741,27 +735,32 @@
   environment()->BindRegister(bytecode_iterator().GetRegisterOperand(1), value);
 }
 
-Node* BytecodeGraphBuilder::BuildLoadGlobal(uint32_t feedback_slot_index,
+Node* BytecodeGraphBuilder::BuildLoadGlobal(Handle<Name> name,
+                                            uint32_t feedback_slot_index,
                                             TypeofMode typeof_mode) {
   VectorSlotPair feedback = CreateVectorSlotPair(feedback_slot_index);
-  DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC,
-            feedback_vector()->GetKind(feedback.slot()));
-  Handle<Name> name(feedback_vector()->GetName(feedback.slot()));
+  DCHECK(IsLoadGlobalICKind(feedback_vector()->GetKind(feedback.slot())));
   const Operator* op = javascript()->LoadGlobal(name, feedback, typeof_mode);
-  return NewNode(op, GetFunctionClosure());
+  return NewNode(op);
 }
 
 void BytecodeGraphBuilder::VisitLdaGlobal() {
   PrepareEagerCheckpoint();
-  Node* node = BuildLoadGlobal(bytecode_iterator().GetIndexOperand(0),
-                               TypeofMode::NOT_INSIDE_TYPEOF);
+  Handle<Name> name =
+      Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+  uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
+  Node* node =
+      BuildLoadGlobal(name, feedback_slot_index, TypeofMode::NOT_INSIDE_TYPEOF);
   environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitLdaGlobalInsideTypeof() {
   PrepareEagerCheckpoint();
-  Node* node = BuildLoadGlobal(bytecode_iterator().GetIndexOperand(0),
-                               TypeofMode::INSIDE_TYPEOF);
+  Handle<Name> name =
+      Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+  uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
+  Node* node =
+      BuildLoadGlobal(name, feedback_slot_index, TypeofMode::INSIDE_TYPEOF);
   environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
 
@@ -774,7 +773,7 @@
   Node* value = environment()->LookupAccumulator();
 
   const Operator* op = javascript()->StoreGlobal(language_mode, name, feedback);
-  Node* node = NewNode(op, value, GetFunctionClosure());
+  Node* node = NewNode(op, value);
   environment()->RecordAfterState(node, Environment::kAttachFrameState);
 }
 
@@ -786,27 +785,56 @@
   BuildStoreGlobal(LanguageMode::STRICT);
 }
 
+void BytecodeGraphBuilder::VisitStaDataPropertyInLiteral() {
+  PrepareEagerCheckpoint();
+
+  Node* object =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+  Node* name =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
+  Node* value = environment()->LookupAccumulator();
+  int flags = bytecode_iterator().GetFlagOperand(2);
+  VectorSlotPair feedback =
+      CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(3));
+
+  const Operator* op = javascript()->StoreDataPropertyInLiteral(feedback);
+  Node* node = NewNode(op, object, name, value, jsgraph()->Constant(flags));
+  environment()->RecordAfterState(node, Environment::kAttachFrameState);
+}
+
 void BytecodeGraphBuilder::VisitLdaContextSlot() {
-  // TODO(mythria): immutable flag is also set to false. This information is not
-  // available in bytecode array. update this code when the implementation
-  // changes.
   const Operator* op = javascript()->LoadContext(
       bytecode_iterator().GetUnsignedImmediateOperand(2),
       bytecode_iterator().GetIndexOperand(1), false);
+  Node* node = NewNode(op);
   Node* context =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
-  Node* node = NewNode(op, context);
+  NodeProperties::ReplaceContextInput(node, context);
+  environment()->BindAccumulator(node);
+}
+
+void BytecodeGraphBuilder::VisitLdaImmutableContextSlot() {
+  const Operator* op = javascript()->LoadContext(
+      bytecode_iterator().GetUnsignedImmediateOperand(2),
+      bytecode_iterator().GetIndexOperand(1), true);
+  Node* node = NewNode(op);
+  Node* context =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+  NodeProperties::ReplaceContextInput(node, context);
   environment()->BindAccumulator(node);
 }
 
 void BytecodeGraphBuilder::VisitLdaCurrentContextSlot() {
-  // TODO(mythria): immutable flag is also set to false. This information is not
-  // available in bytecode array. update this code when the implementation
-  // changes.
   const Operator* op = javascript()->LoadContext(
       0, bytecode_iterator().GetIndexOperand(0), false);
-  Node* context = environment()->Context();
-  Node* node = NewNode(op, context);
+  Node* node = NewNode(op);
+  environment()->BindAccumulator(node);
+}
+
+void BytecodeGraphBuilder::VisitLdaImmutableCurrentContextSlot() {
+  const Operator* op = javascript()->LoadContext(
+      0, bytecode_iterator().GetIndexOperand(0), true);
+  Node* node = NewNode(op);
   environment()->BindAccumulator(node);
 }
 
@@ -814,18 +842,18 @@
   const Operator* op = javascript()->StoreContext(
       bytecode_iterator().GetUnsignedImmediateOperand(2),
       bytecode_iterator().GetIndexOperand(1));
+  Node* value = environment()->LookupAccumulator();
+  Node* node = NewNode(op, value);
   Node* context =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
-  Node* value = environment()->LookupAccumulator();
-  NewNode(op, context, value);
+  NodeProperties::ReplaceContextInput(node, context);
 }
 
 void BytecodeGraphBuilder::VisitStaCurrentContextSlot() {
   const Operator* op =
       javascript()->StoreContext(0, bytecode_iterator().GetIndexOperand(0));
-  Node* context = environment()->Context();
   Node* value = environment()->LookupAccumulator();
-  NewNode(op, context, value);
+  NewNode(op, value);
 }
 
 void BytecodeGraphBuilder::BuildLdaLookupSlot(TypeofMode typeof_mode) {
@@ -857,15 +885,14 @@
   // the same scope as the variable itself has no way of shadowing it.
   for (uint32_t d = 0; d < depth; d++) {
     Node* extension_slot =
-        NewNode(javascript()->LoadContext(d, Context::EXTENSION_INDEX, false),
-                environment()->Context());
+        NewNode(javascript()->LoadContext(d, Context::EXTENSION_INDEX, false));
 
     Node* check_no_extension =
-        NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
-                extension_slot, jsgraph()->TheHoleConstant());
+        NewNode(simplified()->ReferenceEqual(), extension_slot,
+                jsgraph()->TheHoleConstant());
 
     NewBranch(check_no_extension);
-    Environment* true_environment = environment()->CopyForConditional();
+    Environment* true_environment = environment()->Copy();
 
     {
       NewIfFalse();
@@ -904,8 +931,7 @@
     uint32_t slot_index = bytecode_iterator().GetIndexOperand(1);
 
     const Operator* op = javascript()->LoadContext(depth, slot_index, false);
-    Node* context = environment()->Context();
-    environment()->BindAccumulator(NewNode(op, context));
+    environment()->BindAccumulator(NewNode(op));
   }
 
   // Only build the slow path if there were any slow-path checks.
@@ -930,6 +956,7 @@
 
     fast_environment->Merge(environment());
     set_environment(fast_environment);
+    mark_as_needing_eager_checkpoint(true);
   }
 }
 
@@ -950,8 +977,10 @@
   // Fast path, do a global load.
   {
     PrepareEagerCheckpoint();
-    Node* node =
-        BuildLoadGlobal(bytecode_iterator().GetIndexOperand(1), typeof_mode);
+    Handle<Name> name =
+        Handle<Name>::cast(bytecode_iterator().GetConstantForIndexOperand(0));
+    uint32_t feedback_slot_index = bytecode_iterator().GetIndexOperand(1);
+    Node* node = BuildLoadGlobal(name, feedback_slot_index, typeof_mode);
     environment()->BindAccumulator(node, Environment::kAttachFrameState);
   }
 
@@ -977,6 +1006,7 @@
 
     fast_environment->Merge(environment());
     set_environment(fast_environment);
+    mark_as_needing_eager_checkpoint(true);
   }
 }
 
@@ -1018,7 +1048,7 @@
       CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
 
   const Operator* op = javascript()->LoadNamed(name, feedback);
-  Node* node = NewNode(op, object, GetFunctionClosure());
+  Node* node = NewNode(op, object);
   environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
 
@@ -1031,11 +1061,12 @@
       CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(1));
 
   const Operator* op = javascript()->LoadProperty(feedback);
-  Node* node = NewNode(op, object, key, GetFunctionClosure());
+  Node* node = NewNode(op, object, key);
   environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
 
-void BytecodeGraphBuilder::BuildNamedStore(LanguageMode language_mode) {
+void BytecodeGraphBuilder::BuildNamedStore(LanguageMode language_mode,
+                                           StoreMode store_mode) {
   PrepareEagerCheckpoint();
   Node* value = environment()->LookupAccumulator();
   Node* object =
@@ -1045,17 +1076,31 @@
   VectorSlotPair feedback =
       CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
 
-  const Operator* op = javascript()->StoreNamed(language_mode, name, feedback);
-  Node* node = NewNode(op, object, value, GetFunctionClosure());
+  const Operator* op;
+  if (store_mode == StoreMode::kOwn) {
+    DCHECK_EQ(FeedbackSlotKind::kStoreOwnNamed,
+              feedback.vector()->GetKind(feedback.slot()));
+    op = javascript()->StoreNamedOwn(name, feedback);
+  } else {
+    DCHECK(store_mode == StoreMode::kNormal);
+    DCHECK_EQ(feedback.vector()->GetLanguageMode(feedback.slot()),
+              language_mode);
+    op = javascript()->StoreNamed(language_mode, name, feedback);
+  }
+  Node* node = NewNode(op, object, value);
   environment()->RecordAfterState(node, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitStaNamedPropertySloppy() {
-  BuildNamedStore(LanguageMode::SLOPPY);
+  BuildNamedStore(LanguageMode::SLOPPY, StoreMode::kNormal);
 }
 
 void BytecodeGraphBuilder::VisitStaNamedPropertyStrict() {
-  BuildNamedStore(LanguageMode::STRICT);
+  BuildNamedStore(LanguageMode::STRICT, StoreMode::kNormal);
+}
+
+void BytecodeGraphBuilder::VisitStaNamedOwnProperty() {
+  BuildNamedStore(LanguageMode::STRICT, StoreMode::kOwn);
 }
 
 void BytecodeGraphBuilder::BuildKeyedStore(LanguageMode language_mode) {
@@ -1068,8 +1113,9 @@
   VectorSlotPair feedback =
       CreateVectorSlotPair(bytecode_iterator().GetIndexOperand(2));
 
+  DCHECK_EQ(feedback.vector()->GetLanguageMode(feedback.slot()), language_mode);
   const Operator* op = javascript()->StoreProperty(language_mode, feedback);
-  Node* node = NewNode(op, object, key, value, GetFunctionClosure());
+  Node* node = NewNode(op, object, key, value);
   environment()->RecordAfterState(node, Environment::kAttachFrameState);
 }
 
@@ -1085,8 +1131,7 @@
   int32_t cell_index = bytecode_iterator().GetImmediateOperand(0);
   uint32_t depth = bytecode_iterator().GetUnsignedImmediateOperand(1);
   Node* module =
-      NewNode(javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
-              environment()->Context());
+      NewNode(javascript()->LoadContext(depth, Context::EXTENSION_INDEX, true));
   Node* value = NewNode(javascript()->LoadModule(cell_index), module);
   environment()->BindAccumulator(value);
 }
@@ -1095,8 +1140,7 @@
   int32_t cell_index = bytecode_iterator().GetImmediateOperand(0);
   uint32_t depth = bytecode_iterator().GetUnsignedImmediateOperand(1);
   Node* module =
-      NewNode(javascript()->LoadContext(depth, Context::EXTENSION_INDEX, false),
-              environment()->Context());
+      NewNode(javascript()->LoadContext(depth, Context::EXTENSION_INDEX, true));
   Node* value = environment()->LookupAccumulator();
   NewNode(javascript()->StoreModule(cell_index), module, value);
 }
@@ -1117,12 +1161,14 @@
 void BytecodeGraphBuilder::VisitCreateClosure() {
   Handle<SharedFunctionInfo> shared_info = Handle<SharedFunctionInfo>::cast(
       bytecode_iterator().GetConstantForIndexOperand(0));
+  int const slot_id = bytecode_iterator().GetIndexOperand(1);
+  VectorSlotPair pair = CreateVectorSlotPair(slot_id);
   PretenureFlag tenured =
       interpreter::CreateClosureFlags::PretenuredBit::decode(
-          bytecode_iterator().GetFlagOperand(1))
+          bytecode_iterator().GetFlagOperand(2))
           ? TENURED
           : NOT_TENURED;
-  const Operator* op = javascript()->CreateClosure(shared_info, tenured);
+  const Operator* op = javascript()->CreateClosure(shared_info, pair, tenured);
   Node* closure = NewNode(op);
   environment()->BindAccumulator(closure);
 }
@@ -1138,7 +1184,15 @@
 
 void BytecodeGraphBuilder::VisitCreateFunctionContext() {
   uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(0);
-  const Operator* op = javascript()->CreateFunctionContext(slots);
+  const Operator* op =
+      javascript()->CreateFunctionContext(slots, FUNCTION_SCOPE);
+  Node* context = NewNode(op, GetFunctionClosure());
+  environment()->BindAccumulator(context);
+}
+
+void BytecodeGraphBuilder::VisitCreateEvalContext() {
+  uint32_t slots = bytecode_iterator().GetUnsignedImmediateOperand(0);
+  const Operator* op = javascript()->CreateFunctionContext(slots, EVAL_SCOPE);
   Node* context = NewNode(op, GetFunctionClosure());
   environment()->BindAccumulator(context);
 }
@@ -1198,16 +1252,21 @@
 }
 
 void BytecodeGraphBuilder::VisitCreateArrayLiteral() {
-  Handle<FixedArray> constant_elements = Handle<FixedArray>::cast(
-      bytecode_iterator().GetConstantForIndexOperand(0));
+  Handle<ConstantElementsPair> constant_elements =
+      Handle<ConstantElementsPair>::cast(
+          bytecode_iterator().GetConstantForIndexOperand(0));
   int literal_index = bytecode_iterator().GetIndexOperand(1);
-  int literal_flags = bytecode_iterator().GetFlagOperand(2);
+  int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
+  int literal_flags =
+      interpreter::CreateArrayLiteralFlags::FlagsBits::decode(bytecode_flags);
   // Disable allocation site mementos. Only unoptimized code will collect
   // feedback about allocation site. Once the code is optimized we expect the
   // data to converge. So, we disable allocation site mementos in optimized
   // code. We can revisit this when we have data to the contrary.
   literal_flags |= ArrayLiteral::kDisableMementos;
-  int number_of_elements = constant_elements->length();
+  // TODO(mstarzinger): Thread through number of elements. The below number is
+  // only an estimate and does not match {ArrayLiteral::values::length}.
+  int number_of_elements = constant_elements->constant_values()->length();
   Node* literal = NewNode(
       javascript()->CreateLiteralArray(constant_elements, literal_flags,
                                        literal_index, number_of_elements),
@@ -1216,15 +1275,16 @@
 }
 
 void BytecodeGraphBuilder::VisitCreateObjectLiteral() {
-  PrepareEagerCheckpoint();
-  Handle<FixedArray> constant_properties = Handle<FixedArray>::cast(
-      bytecode_iterator().GetConstantForIndexOperand(0));
+  Handle<BoilerplateDescription> constant_properties =
+      Handle<BoilerplateDescription>::cast(
+          bytecode_iterator().GetConstantForIndexOperand(0));
   int literal_index = bytecode_iterator().GetIndexOperand(1);
   int bytecode_flags = bytecode_iterator().GetFlagOperand(2);
   int literal_flags =
       interpreter::CreateObjectLiteralFlags::FlagsBits::decode(bytecode_flags);
-  // TODO(mstarzinger): Thread through number of properties.
-  int number_of_properties = constant_properties->length() / 2;
+  // TODO(mstarzinger): Thread through number of properties. The below number is
+  // only an estimate and does not match {ObjectLiteral::properties_count}.
+  int number_of_properties = constant_properties->size();
   Node* literal = NewNode(
       javascript()->CreateLiteralObject(constant_properties, literal_flags,
                                         literal_index, number_of_properties),
@@ -1260,13 +1320,13 @@
 
   // Slot index of 0 is used indicate no feedback slot is available. Assert
   // the assumption that slot index 0 is never a valid feedback slot.
-  STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
+  STATIC_ASSERT(FeedbackVector::kReservedIndexCount > 0);
   int const slot_id = bytecode_iterator().GetIndexOperand(3);
   VectorSlotPair feedback = CreateVectorSlotPair(slot_id);
 
   float const frequency = ComputeCallFrequency(slot_id);
-  const Operator* call = javascript()->CallFunction(
-      arg_count + 1, frequency, feedback, receiver_hint, tail_call_mode);
+  const Operator* call = javascript()->Call(arg_count + 1, frequency, feedback,
+                                            receiver_hint, tail_call_mode);
   Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 1);
   environment()->BindAccumulator(value, Environment::kAttachFrameState);
 }
@@ -1275,6 +1335,19 @@
   BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kAny);
 }
 
+void BytecodeGraphBuilder::VisitCallWithSpread() {
+  PrepareEagerCheckpoint();
+  Node* callee =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+  interpreter::Register receiver = bytecode_iterator().GetRegisterOperand(1);
+  size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
+  const Operator* call =
+      javascript()->CallWithSpread(static_cast<int>(arg_count + 1));
+
+  Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 1);
+  environment()->BindAccumulator(value, Environment::kAttachFrameState);
+}
+
 void BytecodeGraphBuilder::VisitCallProperty() {
   BuildCall(TailCallMode::kDisallow, ConvertReceiverMode::kNotNullOrUndefined);
 }
@@ -1295,7 +1368,7 @@
   size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
 
   // Create node to perform the JS runtime call.
-  const Operator* call = javascript()->CallFunction(arg_count + 1);
+  const Operator* call = javascript()->Call(arg_count + 1);
   Node* value = ProcessCallArguments(call, callee, receiver, arg_count + 1);
   environment()->BindAccumulator(value, Environment::kAttachFrameState);
 }
@@ -1340,6 +1413,37 @@
                                             Environment::kAttachFrameState);
 }
 
+Node* BytecodeGraphBuilder::ProcessConstructWithSpreadArguments(
+    const Operator* op, Node* callee, Node* new_target,
+    interpreter::Register first_arg, size_t arity) {
+  Node** all = local_zone()->NewArray<Node*>(arity);
+  all[0] = callee;
+  int first_arg_index = first_arg.index();
+  for (int i = 1; i < static_cast<int>(arity) - 1; ++i) {
+    all[i] = environment()->LookupRegister(
+        interpreter::Register(first_arg_index + i - 1));
+  }
+  all[arity - 1] = new_target;
+  Node* value = MakeNode(op, static_cast<int>(arity), all, false);
+  return value;
+}
+
+void BytecodeGraphBuilder::VisitConstructWithSpread() {
+  PrepareEagerCheckpoint();
+  interpreter::Register callee_reg = bytecode_iterator().GetRegisterOperand(0);
+  interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
+  size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
+
+  Node* new_target = environment()->LookupAccumulator();
+  Node* callee = environment()->LookupRegister(callee_reg);
+
+  const Operator* op =
+      javascript()->ConstructWithSpread(static_cast<int>(arg_count) + 2);
+  Node* value = ProcessConstructWithSpreadArguments(op, callee, new_target,
+                                                    first_arg, arg_count + 2);
+  environment()->BindAccumulator(value, Environment::kAttachFrameState);
+}
+
 void BytecodeGraphBuilder::VisitInvokeIntrinsic() {
   PrepareEagerCheckpoint();
   Runtime::FunctionId functionId = bytecode_iterator().GetIntrinsicIdOperand(0);
@@ -1353,7 +1457,7 @@
   environment()->BindAccumulator(value, Environment::kAttachFrameState);
 }
 
-Node* BytecodeGraphBuilder::ProcessCallNewArguments(
+Node* BytecodeGraphBuilder::ProcessConstructArguments(
     const Operator* call_new_op, Node* callee, Node* new_target,
     interpreter::Register first_arg, size_t arity) {
   Node** all = local_zone()->NewArray<Node*>(arity);
@@ -1368,14 +1472,14 @@
   return value;
 }
 
-void BytecodeGraphBuilder::VisitNew() {
+void BytecodeGraphBuilder::VisitConstruct() {
   PrepareEagerCheckpoint();
   interpreter::Register callee_reg = bytecode_iterator().GetRegisterOperand(0);
   interpreter::Register first_arg = bytecode_iterator().GetRegisterOperand(1);
   size_t arg_count = bytecode_iterator().GetRegisterCountOperand(2);
   // Slot index of 0 is used indicate no feedback slot is available. Assert
   // the assumption that slot index 0 is never a valid feedback slot.
-  STATIC_ASSERT(TypeFeedbackVector::kReservedIndexCount > 0);
+  STATIC_ASSERT(FeedbackVector::kReservedIndexCount > 0);
   int const slot_id = bytecode_iterator().GetIndexOperand(3);
   VectorSlotPair feedback = CreateVectorSlotPair(slot_id);
 
@@ -1383,24 +1487,18 @@
   Node* callee = environment()->LookupRegister(callee_reg);
 
   float const frequency = ComputeCallFrequency(slot_id);
-  const Operator* call = javascript()->CallConstruct(
+  const Operator* call = javascript()->Construct(
       static_cast<int>(arg_count) + 2, frequency, feedback);
-  Node* value = ProcessCallNewArguments(call, callee, new_target, first_arg,
-                                        arg_count + 2);
+  Node* value = ProcessConstructArguments(call, callee, new_target, first_arg,
+                                          arg_count + 2);
   environment()->BindAccumulator(value, Environment::kAttachFrameState);
 }
 
-void BytecodeGraphBuilder::BuildThrow() {
-  PrepareEagerCheckpoint();
-  Node* value = environment()->LookupAccumulator();
-  Node* call = NewNode(javascript()->CallRuntime(Runtime::kThrow), value);
-  environment()->BindAccumulator(call, Environment::kAttachFrameState);
-}
-
 void BytecodeGraphBuilder::VisitThrow() {
   BuildLoopExitsForFunctionExit();
-  BuildThrow();
-  Node* call = environment()->LookupAccumulator();
+  Node* value = environment()->LookupAccumulator();
+  Node* call = NewNode(javascript()->CallRuntime(Runtime::kThrow), value);
+  environment()->BindAccumulator(call, Environment::kAttachFrameState);
   Node* control = NewNode(common()->Throw(), call);
   MergeControlToLeaveFunction(control);
 }
@@ -1413,12 +1511,39 @@
   MergeControlToLeaveFunction(control);
 }
 
-void BytecodeGraphBuilder::BuildBinaryOp(const Operator* js_op) {
+Node* BytecodeGraphBuilder::TryBuildSimplifiedBinaryOp(const Operator* op,
+                                                       Node* left, Node* right,
+                                                       FeedbackSlot slot) {
+  Node* effect = environment()->GetEffectDependency();
+  Node* control = environment()->GetControlDependency();
+  JSTypeHintLowering type_hint_lowering(jsgraph(), feedback_vector());
+  Reduction early_reduction = type_hint_lowering.ReduceBinaryOperation(
+      op, left, right, effect, control, slot);
+  if (early_reduction.Changed()) {
+    Node* node = early_reduction.replacement();
+    if (node->op()->EffectOutputCount() > 0) {
+      environment()->UpdateEffectDependency(node);
+    }
+    return node;
+  }
+  return nullptr;
+}
+
+void BytecodeGraphBuilder::BuildBinaryOp(const Operator* op) {
   PrepareEagerCheckpoint();
   Node* left =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
   Node* right = environment()->LookupAccumulator();
-  Node* node = NewNode(js_op, left, right);
+
+  Node* node = nullptr;
+  FeedbackSlot slot = feedback_vector()->ToSlot(
+      bytecode_iterator().GetIndexOperand(kBinaryOperationHintIndex));
+  if (Node* simplified = TryBuildSimplifiedBinaryOp(op, left, right, slot)) {
+    node = simplified;
+  } else {
+    node = NewNode(op, left, right);
+  }
+
   environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
 
@@ -1426,10 +1551,9 @@
 // feedback.
 BinaryOperationHint BytecodeGraphBuilder::GetBinaryOperationHint(
     int operand_index) {
-  FeedbackVectorSlot slot = feedback_vector()->ToSlot(
+  FeedbackSlot slot = feedback_vector()->ToSlot(
       bytecode_iterator().GetIndexOperand(operand_index));
-  DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_BINARYOP_IC,
-            feedback_vector()->GetKind(slot));
+  DCHECK_EQ(FeedbackSlotKind::kBinaryOp, feedback_vector()->GetKind(slot));
   BinaryOpICNexus nexus(feedback_vector(), slot);
   return nexus.GetBinaryOperationFeedback();
 }
@@ -1441,10 +1565,9 @@
   if (slot_index == 0) {
     return CompareOperationHint::kAny;
   }
-  FeedbackVectorSlot slot =
+  FeedbackSlot slot =
       feedback_vector()->ToSlot(bytecode_iterator().GetIndexOperand(1));
-  DCHECK_EQ(FeedbackVectorSlotKind::INTERPRETER_COMPARE_IC,
-            feedback_vector()->GetKind(slot));
+  DCHECK_EQ(FeedbackSlotKind::kCompareOp, feedback_vector()->GetKind(slot));
   CompareICNexus nexus(feedback_vector(), slot);
   return nexus.GetCompareOperationFeedback();
 }
@@ -1460,61 +1583,58 @@
 }
 
 void BytecodeGraphBuilder::VisitSub() {
-  BuildBinaryOp(javascript()->Subtract(
-      GetBinaryOperationHint(kBinaryOperationHintIndex)));
+  BuildBinaryOp(javascript()->Subtract());
 }
 
 void BytecodeGraphBuilder::VisitMul() {
-  BuildBinaryOp(javascript()->Multiply(
-      GetBinaryOperationHint(kBinaryOperationHintIndex)));
+  BuildBinaryOp(javascript()->Multiply());
 }
 
-void BytecodeGraphBuilder::VisitDiv() {
-  BuildBinaryOp(
-      javascript()->Divide(GetBinaryOperationHint(kBinaryOperationHintIndex)));
-}
+void BytecodeGraphBuilder::VisitDiv() { BuildBinaryOp(javascript()->Divide()); }
 
 void BytecodeGraphBuilder::VisitMod() {
-  BuildBinaryOp(
-      javascript()->Modulus(GetBinaryOperationHint(kBinaryOperationHintIndex)));
+  BuildBinaryOp(javascript()->Modulus());
 }
 
 void BytecodeGraphBuilder::VisitBitwiseOr() {
-  BuildBinaryOp(javascript()->BitwiseOr(
-      GetBinaryOperationHint(kBinaryOperationHintIndex)));
+  BuildBinaryOp(javascript()->BitwiseOr());
 }
 
 void BytecodeGraphBuilder::VisitBitwiseXor() {
-  BuildBinaryOp(javascript()->BitwiseXor(
-      GetBinaryOperationHint(kBinaryOperationHintIndex)));
+  BuildBinaryOp(javascript()->BitwiseXor());
 }
 
 void BytecodeGraphBuilder::VisitBitwiseAnd() {
-  BuildBinaryOp(javascript()->BitwiseAnd(
-      GetBinaryOperationHint(kBinaryOperationHintIndex)));
+  BuildBinaryOp(javascript()->BitwiseAnd());
 }
 
 void BytecodeGraphBuilder::VisitShiftLeft() {
-  BuildBinaryOp(javascript()->ShiftLeft(
-      GetBinaryOperationHint(kBinaryOperationHintIndex)));
+  BuildBinaryOp(javascript()->ShiftLeft());
 }
 
 void BytecodeGraphBuilder::VisitShiftRight() {
-  BuildBinaryOp(javascript()->ShiftRight(
-      GetBinaryOperationHint(kBinaryOperationHintIndex)));
+  BuildBinaryOp(javascript()->ShiftRight());
 }
 
 void BytecodeGraphBuilder::VisitShiftRightLogical() {
-  BuildBinaryOp(javascript()->ShiftRightLogical(
-      GetBinaryOperationHint(kBinaryOperationHintIndex)));
+  BuildBinaryOp(javascript()->ShiftRightLogical());
 }
 
-void BytecodeGraphBuilder::BuildBinaryOpWithImmediate(const Operator* js_op) {
+void BytecodeGraphBuilder::BuildBinaryOpWithImmediate(const Operator* op) {
   PrepareEagerCheckpoint();
   Node* left =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(1));
   Node* right = jsgraph()->Constant(bytecode_iterator().GetImmediateOperand(0));
-  Node* node = NewNode(js_op, left, right);
+
+  Node* node = nullptr;
+  FeedbackSlot slot = feedback_vector()->ToSlot(
+      bytecode_iterator().GetIndexOperand(kBinaryOperationSmiHintIndex));
+  if (Node* simplified = TryBuildSimplifiedBinaryOp(op, left, right, slot)) {
+    node = simplified;
+  } else {
+    node = NewNode(op, left, right);
+  }
+
   environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
 
@@ -1524,62 +1644,73 @@
 }
 
 void BytecodeGraphBuilder::VisitSubSmi() {
-  BuildBinaryOpWithImmediate(javascript()->Subtract(
-      GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
+  BuildBinaryOpWithImmediate(javascript()->Subtract());
 }
 
 void BytecodeGraphBuilder::VisitBitwiseOrSmi() {
-  BuildBinaryOpWithImmediate(javascript()->BitwiseOr(
-      GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
+  BuildBinaryOpWithImmediate(javascript()->BitwiseOr());
 }
 
 void BytecodeGraphBuilder::VisitBitwiseAndSmi() {
-  BuildBinaryOpWithImmediate(javascript()->BitwiseAnd(
-      GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
+  BuildBinaryOpWithImmediate(javascript()->BitwiseAnd());
 }
 
 void BytecodeGraphBuilder::VisitShiftLeftSmi() {
-  BuildBinaryOpWithImmediate(javascript()->ShiftLeft(
-      GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
+  BuildBinaryOpWithImmediate(javascript()->ShiftLeft());
 }
 
 void BytecodeGraphBuilder::VisitShiftRightSmi() {
-  BuildBinaryOpWithImmediate(javascript()->ShiftRight(
-      GetBinaryOperationHint(kBinaryOperationSmiHintIndex)));
+  BuildBinaryOpWithImmediate(javascript()->ShiftRight());
 }
 
 void BytecodeGraphBuilder::VisitInc() {
   PrepareEagerCheckpoint();
   // Note: Use subtract -1 here instead of add 1 to ensure we always convert to
   // a number, not a string.
-  const Operator* js_op =
-      javascript()->Subtract(GetBinaryOperationHint(kCountOperationHintIndex));
-  Node* node = NewNode(js_op, environment()->LookupAccumulator(),
-                       jsgraph()->Constant(-1));
+  Node* left = environment()->LookupAccumulator();
+  Node* right = jsgraph()->Constant(-1);
+  const Operator* op = javascript()->Subtract();
+
+  Node* node = nullptr;
+  FeedbackSlot slot = feedback_vector()->ToSlot(
+      bytecode_iterator().GetIndexOperand(kCountOperationHintIndex));
+  if (Node* simplified = TryBuildSimplifiedBinaryOp(op, left, right, slot)) {
+    node = simplified;
+  } else {
+    node = NewNode(op, left, right);
+  }
+
   environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitDec() {
   PrepareEagerCheckpoint();
-  const Operator* js_op =
-      javascript()->Subtract(GetBinaryOperationHint(kCountOperationHintIndex));
-  Node* node = NewNode(js_op, environment()->LookupAccumulator(),
-                       jsgraph()->OneConstant());
+  Node* left = environment()->LookupAccumulator();
+  Node* right = jsgraph()->OneConstant();
+  const Operator* op = javascript()->Subtract();
+
+  Node* node = nullptr;
+  FeedbackSlot slot = feedback_vector()->ToSlot(
+      bytecode_iterator().GetIndexOperand(kCountOperationHintIndex));
+  if (Node* simplified = TryBuildSimplifiedBinaryOp(op, left, right, slot)) {
+    node = simplified;
+  } else {
+    node = NewNode(op, left, right);
+  }
+
   environment()->BindAccumulator(node, Environment::kAttachFrameState);
 }
 
 void BytecodeGraphBuilder::VisitLogicalNot() {
   Node* value = environment()->LookupAccumulator();
-  Node* node = NewNode(common()->Select(MachineRepresentation::kTagged), value,
-                       jsgraph()->FalseConstant(), jsgraph()->TrueConstant());
+  Node* node = NewNode(simplified()->BooleanNot(), value);
   environment()->BindAccumulator(node);
 }
 
 void BytecodeGraphBuilder::VisitToBooleanLogicalNot() {
   Node* value = NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
                         environment()->LookupAccumulator());
-  Node* node = NewNode(common()->Select(MachineRepresentation::kTagged), value,
-                       jsgraph()->FalseConstant(), jsgraph()->TrueConstant());
+  Node* node = NewNode(simplified()->BooleanNot(), value);
   environment()->BindAccumulator(node);
 }
 
@@ -1607,6 +1738,13 @@
   BuildDelete(LanguageMode::SLOPPY);
 }
 
+void BytecodeGraphBuilder::VisitGetSuperConstructor() {
+  Node* node = NewNode(javascript()->GetSuperConstructor(),
+                       environment()->LookupAccumulator());
+  environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0), node,
+                              Environment::kAttachFrameState);
+}
+
 void BytecodeGraphBuilder::BuildCompareOp(const Operator* js_op) {
   PrepareEagerCheckpoint();
   Node* left =
@@ -1652,8 +1790,30 @@
   BuildCompareOp(javascript()->InstanceOf());
 }
 
+void BytecodeGraphBuilder::VisitTestUndetectable() {
+  Node* object =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+  Node* node = NewNode(jsgraph()->simplified()->ObjectIsUndetectable(), object);
+  environment()->BindAccumulator(node);
+}
+
+void BytecodeGraphBuilder::VisitTestNull() {
+  Node* object =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+  Node* result = NewNode(simplified()->ReferenceEqual(), object,
+                         jsgraph()->NullConstant());
+  environment()->BindAccumulator(result);
+}
+
+void BytecodeGraphBuilder::VisitTestUndefined() {
+  Node* object =
+      environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
+  Node* result = NewNode(simplified()->ReferenceEqual(), object,
+                         jsgraph()->UndefinedConstant());
+  environment()->BindAccumulator(result);
+}
+
 void BytecodeGraphBuilder::BuildCastOperator(const Operator* js_op) {
-  PrepareEagerCheckpoint();
   Node* value = NewNode(js_op, environment()->LookupAccumulator());
   environment()->BindRegister(bytecode_iterator().GetRegisterOperand(0), value,
                               Environment::kAttachFrameState);
@@ -1705,6 +1865,12 @@
   BuildJumpIfNotHole();
 }
 
+void BytecodeGraphBuilder::VisitJumpIfJSReceiver() { BuildJumpIfJSReceiver(); }
+
+void BytecodeGraphBuilder::VisitJumpIfJSReceiverConstant() {
+  BuildJumpIfJSReceiver();
+}
+
 void BytecodeGraphBuilder::VisitJumpIfNull() {
   BuildJumpIfEqual(jsgraph()->NullConstant());
 }
@@ -1729,6 +1895,12 @@
   environment()->RecordAfterState(node, Environment::kAttachFrameState);
 }
 
+void BytecodeGraphBuilder::VisitSetPendingMessage() {
+  Node* previous_message = NewNode(javascript()->LoadMessage());
+  NewNode(javascript()->StoreMessage(), environment()->LookupAccumulator());
+  environment()->BindAccumulator(previous_message);
+}
+
 void BytecodeGraphBuilder::VisitReturn() {
   BuildLoopExitsForFunctionExit();
   Node* pop_node = jsgraph()->ZeroConstant();
@@ -1739,10 +1911,8 @@
 
 void BytecodeGraphBuilder::VisitDebugger() {
   PrepareEagerCheckpoint();
-  Node* call =
-      NewNode(javascript()->CallRuntime(Runtime::kHandleDebuggerStatement));
-  environment()->BindAccumulator(call, Environment::kAttachFrameState);
-  environment()->MarkAllRegistersLive();
+  Node* call = NewNode(javascript()->Debugger());
+  environment()->RecordAfterState(call, Environment::kAttachFrameState);
 }
 
 // We cannot create a graph from the debugger copy of the bytecode array.
@@ -1798,8 +1968,9 @@
   PrepareEagerCheckpoint();
   Node* index =
       environment()->LookupRegister(bytecode_iterator().GetRegisterOperand(0));
-  index = NewNode(javascript()->Add(BinaryOperationHint::kSignedSmall), index,
-                  jsgraph()->OneConstant());
+  index = NewNode(
+      simplified()->SpeculativeNumberAdd(NumberOperationHint::kSignedSmall),
+      index, jsgraph()->OneConstant());
   environment()->BindAccumulator(index, Environment::kAttachFrameState);
 }
 
@@ -1866,33 +2037,45 @@
 void BytecodeGraphBuilder::VisitNop() {}
 
 void BytecodeGraphBuilder::SwitchToMergeEnvironment(int current_offset) {
-  if (merge_environments_[current_offset] != nullptr) {
+  auto it = merge_environments_.find(current_offset);
+  if (it != merge_environments_.end()) {
+    mark_as_needing_eager_checkpoint(true);
     if (environment() != nullptr) {
-      merge_environments_[current_offset]->Merge(environment());
+      it->second->Merge(environment());
     }
-    set_environment(merge_environments_[current_offset]);
+    set_environment(it->second);
   }
 }
 
 void BytecodeGraphBuilder::BuildLoopHeaderEnvironment(int current_offset) {
-  if (branch_analysis()->backward_branches_target(current_offset)) {
-    // Add loop header and store a copy so we can connect merged back
-    // edge inputs to the loop header.
-    merge_environments_[current_offset] = environment()->CopyForLoop();
+  if (bytecode_analysis()->IsLoopHeader(current_offset)) {
+    mark_as_needing_eager_checkpoint(true);
+    const LoopInfo& loop_info =
+        bytecode_analysis()->GetLoopInfoFor(current_offset);
+
+    // Add loop header.
+    environment()->PrepareForLoop(loop_info.assignments());
+
+    BuildOSRLoopEntryPoint(current_offset);
+
+    // Store a copy of the environment so we can connect merged back edge inputs
+    // to the loop header.
+    merge_environments_[current_offset] = environment()->Copy();
   }
 }
 
 void BytecodeGraphBuilder::MergeIntoSuccessorEnvironment(int target_offset) {
   BuildLoopExitsForBranch(target_offset);
-  if (merge_environments_[target_offset] == nullptr) {
+  Environment*& merge_environment = merge_environments_[target_offset];
+  if (merge_environment == nullptr) {
     // Append merge nodes to the environment. We may merge here with another
     // environment. So add a place holder for merge nodes. We may add redundant
     // but will be eliminated in a later pass.
     // TODO(mstarzinger): Be smarter about this!
     NewMerge();
-    merge_environments_[target_offset] = environment();
+    merge_environment = environment();
   } else {
-    merge_environments_[target_offset]->Merge(environment());
+    merge_environment->Merge(environment());
   }
   set_environment(nullptr);
 }
@@ -1903,13 +2086,14 @@
 }
 
 void BytecodeGraphBuilder::BuildOSRLoopEntryPoint(int current_offset) {
-  if (!osr_ast_id_.IsNone() && osr_ast_id_.ToInt() == current_offset) {
+  DCHECK(bytecode_analysis()->IsLoopHeader(current_offset));
+
+  if (!osr_ast_id_.IsNone() && osr_loop_offset_ == current_offset) {
     // For OSR add a special {OsrLoopEntry} node into the current loop header.
     // It will be turned into a usable entry by the OSR deconstruction.
-    Environment* loop_env = merge_environments_[current_offset];
-    Environment* osr_env = loop_env->CopyForOsrEntry();
+    Environment* osr_env = environment()->Copy();
     osr_env->PrepareForOsrEntry();
-    loop_env->Merge(osr_env);
+    environment()->Merge(osr_env);
   }
 }
 
@@ -1918,9 +2102,11 @@
     // For OSR add an {OsrNormalEntry} as the the top-level environment start.
     // It will be replaced with {Dead} by the OSR deconstruction.
     NewNode(common()->OsrNormalEntry());
-    // Note that the requested OSR entry point must be the target of a backward
-    // branch, otherwise there will not be a proper loop header available.
-    DCHECK(branch_analysis()->backward_branches_target(osr_ast_id_.ToInt()));
+    // Translate the offset of the jump instruction to the jump target offset of
+    // that instruction so that the derived BailoutId points to the loop header.
+    osr_loop_offset_ =
+        bytecode_analysis()->GetLoopOffsetFor(osr_ast_id_.ToInt());
+    DCHECK(bytecode_analysis()->IsLoopHeader(osr_loop_offset_));
   }
 }
 
@@ -1928,17 +2114,20 @@
   int origin_offset = bytecode_iterator().current_offset();
   // Only build loop exits for forward edges.
   if (target_offset > origin_offset) {
-    BuildLoopExitsUntilLoop(loop_analysis()->GetLoopOffsetFor(target_offset));
+    BuildLoopExitsUntilLoop(
+        bytecode_analysis()->GetLoopOffsetFor(target_offset));
   }
 }
 
 void BytecodeGraphBuilder::BuildLoopExitsUntilLoop(int loop_offset) {
   int origin_offset = bytecode_iterator().current_offset();
-  int current_loop = loop_analysis()->GetLoopOffsetFor(origin_offset);
+  int current_loop = bytecode_analysis()->GetLoopOffsetFor(origin_offset);
   while (loop_offset < current_loop) {
     Node* loop_node = merge_environments_[current_loop]->GetControlDependency();
-    environment()->PrepareForLoopExit(loop_node);
-    current_loop = loop_analysis()->GetParentLoopFor(current_loop);
+    const LoopInfo& loop_info =
+        bytecode_analysis()->GetLoopInfoFor(current_loop);
+    environment()->PrepareForLoopExit(loop_node, loop_info.assignments());
+    current_loop = loop_info.parent_offset();
   }
 }
 
@@ -1952,7 +2141,7 @@
 
 void BytecodeGraphBuilder::BuildJumpIf(Node* condition) {
   NewBranch(condition);
-  Environment* if_false_environment = environment()->CopyForConditional();
+  Environment* if_false_environment = environment()->Copy();
   NewIfTrue();
   MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
   set_environment(if_false_environment);
@@ -1961,7 +2150,7 @@
 
 void BytecodeGraphBuilder::BuildJumpIfNot(Node* condition) {
   NewBranch(condition);
-  Environment* if_true_environment = environment()->CopyForConditional();
+  Environment* if_true_environment = environment()->Copy();
   NewIfFalse();
   MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
   set_environment(if_true_environment);
@@ -1971,17 +2160,30 @@
 void BytecodeGraphBuilder::BuildJumpIfEqual(Node* comperand) {
   Node* accumulator = environment()->LookupAccumulator();
   Node* condition =
-      NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
-              accumulator, comperand);
+      NewNode(simplified()->ReferenceEqual(), accumulator, comperand);
   BuildJumpIf(condition);
 }
 
 void BytecodeGraphBuilder::BuildJumpIfFalse() {
-  BuildJumpIfNot(environment()->LookupAccumulator());
+  NewBranch(environment()->LookupAccumulator());
+  Environment* if_true_environment = environment()->Copy();
+  environment()->BindAccumulator(jsgraph()->FalseConstant());
+  NewIfFalse();
+  MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
+  if_true_environment->BindAccumulator(jsgraph()->TrueConstant());
+  set_environment(if_true_environment);
+  NewIfTrue();
 }
 
 void BytecodeGraphBuilder::BuildJumpIfTrue() {
-  BuildJumpIf(environment()->LookupAccumulator());
+  NewBranch(environment()->LookupAccumulator());
+  Environment* if_false_environment = environment()->Copy();
+  environment()->BindAccumulator(jsgraph()->TrueConstant());
+  NewIfTrue();
+  MergeIntoSuccessorEnvironment(bytecode_iterator().GetJumpTargetOffset());
+  if_false_environment->BindAccumulator(jsgraph()->FalseConstant());
+  set_environment(if_false_environment);
+  NewIfFalse();
 }
 
 void BytecodeGraphBuilder::BuildJumpIfToBooleanTrue() {
@@ -2000,12 +2202,17 @@
 
 void BytecodeGraphBuilder::BuildJumpIfNotHole() {
   Node* accumulator = environment()->LookupAccumulator();
-  Node* condition =
-      NewNode(javascript()->StrictEqual(CompareOperationHint::kAny),
-              accumulator, jsgraph()->TheHoleConstant());
+  Node* condition = NewNode(simplified()->ReferenceEqual(), accumulator,
+                            jsgraph()->TheHoleConstant());
   BuildJumpIfNot(condition);
 }
 
+void BytecodeGraphBuilder::BuildJumpIfJSReceiver() {
+  Node* accumulator = environment()->LookupAccumulator();
+  Node* condition = NewNode(simplified()->ObjectIsReceiver(), accumulator);
+  BuildJumpIf(condition);
+}
+
 Node** BytecodeGraphBuilder::EnsureInputBufferSize(int size) {
   if (size > input_buffer_size_) {
     size = size + kInputBufferSizeIncrement + input_buffer_size_;
@@ -2093,7 +2300,7 @@
       int handler_offset = exception_handlers_.top().handler_offset_;
       int context_index = exception_handlers_.top().context_register_;
       interpreter::Register context_register(context_index);
-      Environment* success_env = environment()->CopyForConditional();
+      Environment* success_env = environment()->Copy();
       const Operator* op = common()->IfException();
       Node* effect = environment()->GetEffectDependency();
       Node* on_exception = graph()->NewNode(op, effect, result);
@@ -2111,6 +2318,10 @@
       Node* on_success = graph()->NewNode(if_success, result);
       environment()->UpdateControlDependency(on_success);
     }
+    // Ensure checkpoints are created after operations with side-effects.
+    if (has_effect && !result->op()->HasProperty(Operator::kNoWrite)) {
+      mark_as_needing_eager_checkpoint(true);
+    }
   }
 
   return result;
diff --git a/src/compiler/bytecode-graph-builder.h b/src/compiler/bytecode-graph-builder.h
index 6994226..41fcf68 100644
--- a/src/compiler/bytecode-graph-builder.h
+++ b/src/compiler/bytecode-graph-builder.h
@@ -5,12 +5,10 @@
 #ifndef V8_COMPILER_BYTECODE_GRAPH_BUILDER_H_
 #define V8_COMPILER_BYTECODE_GRAPH_BUILDER_H_
 
-#include "src/compiler/bytecode-branch-analysis.h"
-#include "src/compiler/bytecode-loop-analysis.h"
+#include "src/compiler/bytecode-analysis.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/liveness-analyzer.h"
 #include "src/compiler/state-values-utils.h"
-#include "src/compiler/type-hint-analyzer.h"
 #include "src/interpreter/bytecode-array-iterator.h"
 #include "src/interpreter/bytecode-flags.h"
 #include "src/interpreter/bytecodes.h"
@@ -18,9 +16,6 @@
 
 namespace v8 {
 namespace internal {
-
-class CompilationInfo;
-
 namespace compiler {
 
 class SourcePositionTable;
@@ -29,8 +24,10 @@
 // interpreter bytecodes.
 class BytecodeGraphBuilder {
  public:
-  BytecodeGraphBuilder(Zone* local_zone, CompilationInfo* info,
-                       JSGraph* jsgraph, float invocation_frequency,
+  BytecodeGraphBuilder(Zone* local_zone, Handle<SharedFunctionInfo> shared,
+                       Handle<FeedbackVector> feedback_vector,
+                       BailoutId osr_ast_id, JSGraph* jsgraph,
+                       float invocation_frequency,
                        SourcePositionTable* source_positions,
                        int inlining_id = SourcePosition::kNotInlined);
 
@@ -114,9 +111,14 @@
 
   Node* ProcessCallArguments(const Operator* call_op, Node* callee,
                              interpreter::Register receiver, size_t arity);
-  Node* ProcessCallNewArguments(const Operator* call_new_op, Node* callee,
-                                Node* new_target,
-                                interpreter::Register first_arg, size_t arity);
+  Node* ProcessConstructArguments(const Operator* call_new_op, Node* callee,
+                                  Node* new_target,
+                                  interpreter::Register first_arg,
+                                  size_t arity);
+  Node* ProcessConstructWithSpreadArguments(const Operator* op, Node* callee,
+                                            Node* new_target,
+                                            interpreter::Register first_arg,
+                                            size_t arity);
   Node* ProcessCallRuntimeArguments(const Operator* call_runtime_op,
                                     interpreter::Register first_arg,
                                     size_t arity);
@@ -131,14 +133,18 @@
   // Conceptually this frame state is "after" a given operation.
   void PrepareFrameState(Node* node, OutputFrameStateCombine combine);
 
-  // Computes register liveness and replaces dead ones in frame states with the
-  // undefined values.
-  void ClearNonLiveSlotsInFrameStates();
-
   void BuildCreateArguments(CreateArgumentsType type);
-  Node* BuildLoadGlobal(uint32_t feedback_slot_index, TypeofMode typeof_mode);
+  Node* BuildLoadGlobal(Handle<Name> name, uint32_t feedback_slot_index,
+                        TypeofMode typeof_mode);
   void BuildStoreGlobal(LanguageMode language_mode);
-  void BuildNamedStore(LanguageMode language_mode);
+
+  enum class StoreMode {
+    // Check the prototype chain before storing.
+    kNormal,
+    // Store value to the receiver without checking the prototype chain.
+    kOwn,
+  };
+  void BuildNamedStore(LanguageMode language_mode, StoreMode store_mode);
   void BuildKeyedStore(LanguageMode language_mode);
   void BuildLdaLookupSlot(TypeofMode typeof_mode);
   void BuildLdaLookupContextSlot(TypeofMode typeof_mode);
@@ -146,7 +152,6 @@
   void BuildStaLookupSlot(LanguageMode language_mode);
   void BuildCall(TailCallMode tail_call_mode,
                  ConvertReceiverMode receiver_hint);
-  void BuildThrow();
   void BuildBinaryOp(const Operator* op);
   void BuildBinaryOpWithImmediate(const Operator* op);
   void BuildCompareOp(const Operator* op);
@@ -156,6 +161,13 @@
   void BuildForInNext();
   void BuildInvokeIntrinsic();
 
+  // Optional early lowering to the simplified operator level. Returns the node
+  // representing the lowered operation or {nullptr} if no lowering available.
+  // Note that the result has already been wired into the environment just like
+  // any other invocation of {NewNode} would do.
+  Node* TryBuildSimplifiedBinaryOp(const Operator* op, Node* left, Node* right,
+                                   FeedbackSlot slot);
+
   // Check the context chain for extensions, for lookup fast paths.
   Environment* CheckContextExtensions(uint32_t depth);
 
@@ -181,6 +193,7 @@
   void BuildJumpIfToBooleanTrue();
   void BuildJumpIfToBooleanFalse();
   void BuildJumpIfNotHole();
+  void BuildJumpIfJSReceiver();
 
   // Simulates control flow by forward-propagating environments.
   void MergeIntoSuccessorEnvironment(int target_offset);
@@ -203,6 +216,10 @@
   // Simulates entry and exit of exception handlers.
   void EnterAndExitExceptionHandlers(int current_offset);
 
+  // Update the current position of the {SourcePositionTable} to that of the
+  // bytecode at {offset}, if any.
+  void UpdateCurrentSourcePosition(SourcePositionTableIterator* it, int offset);
+
   // Growth increment for the temporary buffer used to construct input lists to
   // new nodes.
   static const int kInputBufferSizeIncrement = 64;
@@ -224,6 +241,9 @@
   Zone* graph_zone() const { return graph()->zone(); }
   JSGraph* jsgraph() const { return jsgraph_; }
   JSOperatorBuilder* javascript() const { return jsgraph_->javascript(); }
+  SimplifiedOperatorBuilder* simplified() const {
+    return jsgraph_->simplified();
+  }
   Zone* local_zone() const { return local_zone_; }
   const Handle<BytecodeArray>& bytecode_array() const {
     return bytecode_array_;
@@ -231,7 +251,7 @@
   const Handle<HandlerTable>& exception_handler_table() const {
     return exception_handler_table_;
   }
-  const Handle<TypeFeedbackVector>& feedback_vector() const {
+  const Handle<FeedbackVector>& feedback_vector() const {
     return feedback_vector_;
   }
   const FrameStateFunctionInfo* frame_state_function_info() const {
@@ -247,24 +267,17 @@
     bytecode_iterator_ = bytecode_iterator;
   }
 
-  const BytecodeBranchAnalysis* branch_analysis() const {
-    return branch_analysis_;
+  const BytecodeAnalysis* bytecode_analysis() const {
+    return bytecode_analysis_;
   }
 
-  void set_branch_analysis(const BytecodeBranchAnalysis* branch_analysis) {
-    branch_analysis_ = branch_analysis;
+  void set_bytecode_analysis(const BytecodeAnalysis* bytecode_analysis) {
+    bytecode_analysis_ = bytecode_analysis;
   }
 
-  const BytecodeLoopAnalysis* loop_analysis() const { return loop_analysis_; }
-
-  void set_loop_analysis(const BytecodeLoopAnalysis* loop_analysis) {
-    loop_analysis_ = loop_analysis;
-  }
-
-  LivenessAnalyzer* liveness_analyzer() { return &liveness_analyzer_; }
-
-  bool IsLivenessAnalysisEnabled() const {
-    return this->is_liveness_analysis_enabled_;
+  bool needs_eager_checkpoint() const { return needs_eager_checkpoint_; }
+  void mark_as_needing_eager_checkpoint(bool value) {
+    needs_eager_checkpoint_ = value;
   }
 
 #define DECLARE_VISIT_BYTECODE(name, ...) void Visit##name();
@@ -276,13 +289,13 @@
   float const invocation_frequency_;
   Handle<BytecodeArray> bytecode_array_;
   Handle<HandlerTable> exception_handler_table_;
-  Handle<TypeFeedbackVector> feedback_vector_;
+  Handle<FeedbackVector> feedback_vector_;
   const FrameStateFunctionInfo* frame_state_function_info_;
   const interpreter::BytecodeArrayIterator* bytecode_iterator_;
-  const BytecodeBranchAnalysis* branch_analysis_;
-  const BytecodeLoopAnalysis* loop_analysis_;
+  const BytecodeAnalysis* bytecode_analysis_;
   Environment* environment_;
   BailoutId osr_ast_id_;
+  int osr_loop_offset_;
 
   // Merge environments are snapshots of the environment at points where the
   // control flow merges. This models a forward data flow propagation of all
@@ -297,6 +310,11 @@
   int input_buffer_size_;
   Node** input_buffer_;
 
+  // Optimization to only create checkpoints when the current position in the
+  // control-flow is not effect-dominated by another checkpoint already. All
+  // operations that do not have observable side-effects can be re-evaluated.
+  bool needs_eager_checkpoint_;
+
   // Nodes representing values in the activation record.
   SetOncePointer<Node> function_context_;
   SetOncePointer<Node> function_closure_;
@@ -305,22 +323,13 @@
   // Control nodes that exit the function body.
   ZoneVector<Node*> exit_controls_;
 
-  bool const is_liveness_analysis_enabled_;
-
   StateValuesCache state_values_cache_;
 
-  // Analyzer of register liveness.
-  LivenessAnalyzer liveness_analyzer_;
-
-  // The Turbofan source position table, to be populated.
+  // The source position table, to be populated.
   SourcePositionTable* source_positions_;
 
   SourcePosition const start_position_;
 
-  // Update [source_positions_]'s current position to that of the bytecode at
-  // [offset], if any.
-  void UpdateCurrentSourcePosition(SourcePositionTableIterator* it, int offset);
-
   static int const kBinaryOperationHintIndex = 1;
   static int const kCountOperationHintIndex = 0;
   static int const kBinaryOperationSmiHintIndex = 2;
diff --git a/src/compiler/bytecode-liveness-map.cc b/src/compiler/bytecode-liveness-map.cc
new file mode 100644
index 0000000..ba98dec
--- /dev/null
+++ b/src/compiler/bytecode-liveness-map.cc
@@ -0,0 +1,42 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/bytecode-liveness-map.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+BytecodeLiveness::BytecodeLiveness(int register_count, Zone* zone)
+    : in(new (zone) BytecodeLivenessState(register_count, zone)),
+      out(new (zone) BytecodeLivenessState(register_count, zone)) {}
+
+BytecodeLivenessMap::BytecodeLivenessMap(int bytecode_size, Zone* zone)
+    : liveness_map_(base::bits::RoundUpToPowerOfTwo32(bytecode_size / 4 + 1),
+                    base::KeyEqualityMatcher<int>(),
+                    ZoneAllocationPolicy(zone)) {}
+
+uint32_t OffsetHash(int offset) { return offset; }
+
+BytecodeLiveness& BytecodeLivenessMap::InitializeLiveness(int offset,
+                                                          int register_count,
+                                                          Zone* zone) {
+  return liveness_map_
+      .LookupOrInsert(offset, OffsetHash(offset),
+                      [&]() { return BytecodeLiveness(register_count, zone); },
+                      ZoneAllocationPolicy(zone))
+      ->value;
+}
+
+BytecodeLiveness& BytecodeLivenessMap::GetLiveness(int offset) {
+  return liveness_map_.Lookup(offset, OffsetHash(offset))->value;
+}
+
+const BytecodeLiveness& BytecodeLivenessMap::GetLiveness(int offset) const {
+  return liveness_map_.Lookup(offset, OffsetHash(offset))->value;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/bytecode-liveness-map.h b/src/compiler/bytecode-liveness-map.h
new file mode 100644
index 0000000..03251f1
--- /dev/null
+++ b/src/compiler/bytecode-liveness-map.h
@@ -0,0 +1,119 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BYTECODE_LIVENESS_MAP_H_
+#define V8_COMPILER_BYTECODE_LIVENESS_MAP_H_
+
+#include "src/base/hashmap.h"
+#include "src/bit-vector.h"
+#include "src/zone/zone.h"
+
+namespace v8 {
+namespace internal {
+
+class Zone;
+
+namespace compiler {
+
+class BytecodeLivenessState : public ZoneObject {
+ public:
+  BytecodeLivenessState(int register_count, Zone* zone)
+      : bit_vector_(register_count + 1, zone) {}
+
+  const BitVector& bit_vector() const { return bit_vector_; }
+
+  BitVector& bit_vector() { return bit_vector_; }
+
+  bool RegisterIsLive(int index) const {
+    DCHECK_GE(index, 0);
+    DCHECK_LT(index, bit_vector_.length() - 1);
+    return bit_vector_.Contains(index);
+  }
+
+  bool AccumulatorIsLive() const {
+    return bit_vector_.Contains(bit_vector_.length() - 1);
+  }
+
+  bool Equals(const BytecodeLivenessState& other) const {
+    return bit_vector_.Equals(other.bit_vector_);
+  }
+
+  void MarkRegisterLive(int index) {
+    DCHECK_GE(index, 0);
+    DCHECK_LT(index, bit_vector_.length() - 1);
+    bit_vector_.Add(index);
+  }
+
+  void MarkRegisterDead(int index) {
+    DCHECK_GE(index, 0);
+    DCHECK_LT(index, bit_vector_.length() - 1);
+    bit_vector_.Remove(index);
+  }
+
+  void MarkAccumulatorLive() { bit_vector_.Add(bit_vector_.length() - 1); }
+
+  void MarkAccumulatorDead() { bit_vector_.Remove(bit_vector_.length() - 1); }
+
+  void MarkAllLive() { bit_vector_.AddAll(); }
+
+  void Union(const BytecodeLivenessState& other) {
+    bit_vector_.Union(other.bit_vector_);
+  }
+
+  bool UnionIsChanged(const BytecodeLivenessState& other) {
+    return bit_vector_.UnionIsChanged(other.bit_vector_);
+  }
+
+  void CopyFrom(const BytecodeLivenessState& other) {
+    bit_vector_.CopyFrom(other.bit_vector_);
+  }
+
+ private:
+  BitVector bit_vector_;
+
+  DISALLOW_COPY_AND_ASSIGN(BytecodeLivenessState);
+};
+
+struct BytecodeLiveness {
+  BytecodeLivenessState* in;
+  BytecodeLivenessState* out;
+
+  BytecodeLiveness(int register_count, Zone* zone);
+};
+
+class V8_EXPORT_PRIVATE BytecodeLivenessMap {
+ public:
+  BytecodeLivenessMap(int size, Zone* zone);
+
+  BytecodeLiveness& InitializeLiveness(int offset, int register_count,
+                                       Zone* zone);
+
+  BytecodeLiveness& GetLiveness(int offset);
+  const BytecodeLiveness& GetLiveness(int offset) const;
+
+  BytecodeLivenessState* GetInLiveness(int offset) {
+    return GetLiveness(offset).in;
+  }
+  const BytecodeLivenessState* GetInLiveness(int offset) const {
+    return GetLiveness(offset).in;
+  }
+
+  BytecodeLivenessState* GetOutLiveness(int offset) {
+    return GetLiveness(offset).out;
+  }
+  const BytecodeLivenessState* GetOutLiveness(int offset) const {
+    return GetLiveness(offset).out;
+  }
+
+ private:
+  base::TemplateHashMapImpl<int, BytecodeLiveness,
+                            base::KeyEqualityMatcher<int>, ZoneAllocationPolicy>
+      liveness_map_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_BYTECODE_LIVENESS_MAP_H_
diff --git a/src/compiler/bytecode-loop-analysis.cc b/src/compiler/bytecode-loop-analysis.cc
deleted file mode 100644
index 03c11f7..0000000
--- a/src/compiler/bytecode-loop-analysis.cc
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/bytecode-loop-analysis.h"
-
-#include "src/compiler/bytecode-branch-analysis.h"
-#include "src/interpreter/bytecode-array-iterator.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-BytecodeLoopAnalysis::BytecodeLoopAnalysis(
-    Handle<BytecodeArray> bytecode_array,
-    const BytecodeBranchAnalysis* branch_analysis, Zone* zone)
-    : bytecode_array_(bytecode_array),
-      branch_analysis_(branch_analysis),
-      zone_(zone),
-      current_loop_offset_(-1),
-      found_current_backedge_(false),
-      backedge_to_header_(zone),
-      loop_header_to_parent_(zone) {}
-
-void BytecodeLoopAnalysis::Analyze() {
-  current_loop_offset_ = -1;
-  found_current_backedge_ = false;
-  interpreter::BytecodeArrayIterator iterator(bytecode_array());
-  while (!iterator.done()) {
-    interpreter::Bytecode bytecode = iterator.current_bytecode();
-    int current_offset = iterator.current_offset();
-    if (branch_analysis_->backward_branches_target(current_offset)) {
-      AddLoopEntry(current_offset);
-    } else if (interpreter::Bytecodes::IsJump(bytecode)) {
-      AddBranch(current_offset, iterator.GetJumpTargetOffset());
-    }
-    iterator.Advance();
-  }
-}
-
-void BytecodeLoopAnalysis::AddLoopEntry(int entry_offset) {
-  if (found_current_backedge_) {
-    // We assume that all backedges of a loop must occur together and before
-    // another loop entry or an outer loop backedge.
-    // This is guaranteed by the invariants from AddBranch, such that every
-    // backedge must either go to the current loop or be the first of the
-    // backedges to the parent loop.
-    // Thus here, the current loop actually ended before and we have a loop
-    // with the same parent.
-    current_loop_offset_ = loop_header_to_parent_[current_loop_offset_];
-    found_current_backedge_ = false;
-  }
-  loop_header_to_parent_[entry_offset] = current_loop_offset_;
-  current_loop_offset_ = entry_offset;
-}
-
-void BytecodeLoopAnalysis::AddBranch(int origin_offset, int target_offset) {
-  // If this is a backedge, record it.
-  if (target_offset < origin_offset) {
-    backedge_to_header_[origin_offset] = target_offset;
-    // Check whether this is actually a backedge of the outer loop and we have
-    // already finished the current loop.
-    if (target_offset < current_loop_offset_) {
-      DCHECK(found_current_backedge_);
-      int parent_offset = loop_header_to_parent_[current_loop_offset_];
-      DCHECK_EQ(target_offset, parent_offset);
-      current_loop_offset_ = parent_offset;
-    } else {
-      DCHECK_EQ(target_offset, current_loop_offset_);
-      found_current_backedge_ = true;
-    }
-  }
-}
-
-int BytecodeLoopAnalysis::GetLoopOffsetFor(int offset) const {
-  auto next_backedge = backedge_to_header_.lower_bound(offset);
-  // If there is no next backedge => offset is not in a loop.
-  if (next_backedge == backedge_to_header_.end()) {
-    return -1;
-  }
-  // If the header preceeds the offset, it is the backedge of the containing
-  // loop.
-  if (next_backedge->second <= offset) {
-    return next_backedge->second;
-  }
-  // Otherwise there is a nested loop after this offset. We just return the
-  // parent of the next nested loop.
-  return loop_header_to_parent_.upper_bound(offset)->second;
-}
-
-int BytecodeLoopAnalysis::GetParentLoopFor(int header_offset) const {
-  auto parent = loop_header_to_parent_.find(header_offset);
-  DCHECK(parent != loop_header_to_parent_.end());
-  return parent->second;
-}
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/bytecode-loop-analysis.h b/src/compiler/bytecode-loop-analysis.h
deleted file mode 100644
index 1a86d7b..0000000
--- a/src/compiler/bytecode-loop-analysis.h
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2016 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_BYTECODE_LOOP_ANALYSIS_H_
-#define V8_COMPILER_BYTECODE_LOOP_ANALYSIS_H_
-
-#include "src/handles.h"
-#include "src/zone/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-
-class BytecodeArray;
-
-namespace compiler {
-
-class BytecodeBranchAnalysis;
-
-class BytecodeLoopAnalysis BASE_EMBEDDED {
- public:
-  BytecodeLoopAnalysis(Handle<BytecodeArray> bytecode_array,
-                       const BytecodeBranchAnalysis* branch_analysis,
-                       Zone* zone);
-
-  // Analyze the bytecodes to find the branch sites and their
-  // targets. No other methods in this class return valid information
-  // until this has been called.
-  void Analyze();
-
-  // Get the loop header offset of the containing loop for arbitrary
-  // {offset}, or -1 if the {offset} is not inside any loop.
-  int GetLoopOffsetFor(int offset) const;
-  // Gets the loop header offset of the parent loop of the loop header
-  // at {header_offset}, or -1 for outer-most loops.
-  int GetParentLoopFor(int header_offset) const;
-
- private:
-  void AddLoopEntry(int entry_offset);
-  void AddBranch(int origin_offset, int target_offset);
-
-  Zone* zone() const { return zone_; }
-  Handle<BytecodeArray> bytecode_array() const { return bytecode_array_; }
-
-  Handle<BytecodeArray> bytecode_array_;
-  const BytecodeBranchAnalysis* branch_analysis_;
-  Zone* zone_;
-
-  int current_loop_offset_;
-  bool found_current_backedge_;
-
-  // Map from the offset of a backedge jump to the offset of the corresponding
-  // loop header. There might be multiple backedges for do-while loops.
-  ZoneMap<int, int> backedge_to_header_;
-  // Map from the offset of a loop header to the offset of its parent's loop
-  // header. This map will have as many entries as there are loops in the
-  // function.
-  ZoneMap<int, int> loop_header_to_parent_;
-
-  DISALLOW_COPY_AND_ASSIGN(BytecodeLoopAnalysis);
-};
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_COMPILER_BYTECODE_LOOP_ANALYSIS_H_
diff --git a/src/compiler/code-assembler.cc b/src/compiler/code-assembler.cc
index 3431098..1ace7da 100644
--- a/src/compiler/code-assembler.cc
+++ b/src/compiler/code-assembler.cc
@@ -19,18 +19,27 @@
 #include "src/interpreter/bytecodes.h"
 #include "src/machine-type.h"
 #include "src/macro-assembler.h"
+#include "src/objects-inl.h"
 #include "src/utils.h"
 #include "src/zone/zone.h"
 
+#define REPEAT_1_TO_2(V, T) V(T) V(T, T)
+#define REPEAT_1_TO_3(V, T) REPEAT_1_TO_2(V, T) V(T, T, T)
+#define REPEAT_1_TO_4(V, T) REPEAT_1_TO_3(V, T) V(T, T, T, T)
+#define REPEAT_1_TO_5(V, T) REPEAT_1_TO_4(V, T) V(T, T, T, T, T)
+#define REPEAT_1_TO_6(V, T) REPEAT_1_TO_5(V, T) V(T, T, T, T, T, T)
+#define REPEAT_1_TO_7(V, T) REPEAT_1_TO_6(V, T) V(T, T, T, T, T, T, T)
+#define REPEAT_1_TO_8(V, T) REPEAT_1_TO_7(V, T) V(T, T, T, T, T, T, T, T)
+#define REPEAT_1_TO_9(V, T) REPEAT_1_TO_8(V, T) V(T, T, T, T, T, T, T, T, T)
+
 namespace v8 {
 namespace internal {
 namespace compiler {
 
-CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone,
-                             const CallInterfaceDescriptor& descriptor,
-                             Code::Flags flags, const char* name,
-                             size_t result_size)
-    : CodeAssembler(
+CodeAssemblerState::CodeAssemblerState(
+    Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
+    Code::Flags flags, const char* name, size_t result_size)
+    : CodeAssemblerState(
           isolate, zone,
           Linkage::GetStubCallDescriptor(
               isolate, zone, descriptor, descriptor.GetStackParameterCount(),
@@ -38,19 +47,20 @@
               MachineType::AnyTagged(), result_size),
           flags, name) {}
 
-CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone, int parameter_count,
-                             Code::Flags flags, const char* name)
-    : CodeAssembler(isolate, zone,
-                    Linkage::GetJSCallDescriptor(
-                        zone, false, parameter_count,
-                        Code::ExtractKindFromFlags(flags) == Code::BUILTIN
-                            ? CallDescriptor::kPushArgumentCount
-                            : CallDescriptor::kNoFlags),
-                    flags, name) {}
+CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
+                                       int parameter_count, Code::Flags flags,
+                                       const char* name)
+    : CodeAssemblerState(isolate, zone,
+                         Linkage::GetJSCallDescriptor(
+                             zone, false, parameter_count,
+                             Code::ExtractKindFromFlags(flags) == Code::BUILTIN
+                                 ? CallDescriptor::kPushArgumentCount
+                                 : CallDescriptor::kNoFlags),
+                         flags, name) {}
 
-CodeAssembler::CodeAssembler(Isolate* isolate, Zone* zone,
-                             CallDescriptor* call_descriptor, Code::Flags flags,
-                             const char* name)
+CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
+                                       CallDescriptor* call_descriptor,
+                                       Code::Flags flags, const char* name)
     : raw_assembler_(new RawMachineAssembler(
           isolate, new (zone) Graph(zone), call_descriptor,
           MachineType::PointerRepresentation(),
@@ -61,56 +71,109 @@
       code_generated_(false),
       variables_(zone) {}
 
+CodeAssemblerState::~CodeAssemblerState() {}
+
+int CodeAssemblerState::parameter_count() const {
+  return static_cast<int>(raw_assembler_->call_descriptor()->ParameterCount());
+}
+
 CodeAssembler::~CodeAssembler() {}
 
-void CodeAssembler::CallPrologue() {}
+class BreakOnNodeDecorator final : public GraphDecorator {
+ public:
+  explicit BreakOnNodeDecorator(NodeId node_id) : node_id_(node_id) {}
 
-void CodeAssembler::CallEpilogue() {}
+  void Decorate(Node* node) final {
+    if (node->id() == node_id_) {
+      base::OS::DebugBreak();
+    }
+  }
 
-Handle<Code> CodeAssembler::GenerateCode() {
-  DCHECK(!code_generated_);
+ private:
+  NodeId node_id_;
+};
 
-  Schedule* schedule = raw_assembler_->Export();
+void CodeAssembler::BreakOnNode(int node_id) {
+  Graph* graph = raw_assembler()->graph();
+  Zone* zone = graph->zone();
+  GraphDecorator* decorator =
+      new (zone) BreakOnNodeDecorator(static_cast<NodeId>(node_id));
+  graph->AddDecorator(decorator);
+}
+
+void CodeAssembler::RegisterCallGenerationCallbacks(
+    const CodeAssemblerCallback& call_prologue,
+    const CodeAssemblerCallback& call_epilogue) {
+  // The callback can be registered only once.
+  DCHECK(!state_->call_prologue_);
+  DCHECK(!state_->call_epilogue_);
+  state_->call_prologue_ = call_prologue;
+  state_->call_epilogue_ = call_epilogue;
+}
+
+void CodeAssembler::UnregisterCallGenerationCallbacks() {
+  state_->call_prologue_ = nullptr;
+  state_->call_epilogue_ = nullptr;
+}
+
+void CodeAssembler::CallPrologue() {
+  if (state_->call_prologue_) {
+    state_->call_prologue_();
+  }
+}
+
+void CodeAssembler::CallEpilogue() {
+  if (state_->call_epilogue_) {
+    state_->call_epilogue_();
+  }
+}
+
+// static
+Handle<Code> CodeAssembler::GenerateCode(CodeAssemblerState* state) {
+  DCHECK(!state->code_generated_);
+
+  RawMachineAssembler* rasm = state->raw_assembler_.get();
+  Schedule* schedule = rasm->Export();
   Handle<Code> code = Pipeline::GenerateCodeForCodeStub(
-      isolate(), raw_assembler_->call_descriptor(), raw_assembler_->graph(),
-      schedule, flags_, name_);
+      rasm->isolate(), rasm->call_descriptor(), rasm->graph(), schedule,
+      state->flags_, state->name_);
 
-  code_generated_ = true;
+  state->code_generated_ = true;
   return code;
 }
 
-bool CodeAssembler::Is64() const { return raw_assembler_->machine()->Is64(); }
+bool CodeAssembler::Is64() const { return raw_assembler()->machine()->Is64(); }
 
 bool CodeAssembler::IsFloat64RoundUpSupported() const {
-  return raw_assembler_->machine()->Float64RoundUp().IsSupported();
+  return raw_assembler()->machine()->Float64RoundUp().IsSupported();
 }
 
 bool CodeAssembler::IsFloat64RoundDownSupported() const {
-  return raw_assembler_->machine()->Float64RoundDown().IsSupported();
+  return raw_assembler()->machine()->Float64RoundDown().IsSupported();
 }
 
 bool CodeAssembler::IsFloat64RoundTiesEvenSupported() const {
-  return raw_assembler_->machine()->Float64RoundTiesEven().IsSupported();
+  return raw_assembler()->machine()->Float64RoundTiesEven().IsSupported();
 }
 
 bool CodeAssembler::IsFloat64RoundTruncateSupported() const {
-  return raw_assembler_->machine()->Float64RoundTruncate().IsSupported();
+  return raw_assembler()->machine()->Float64RoundTruncate().IsSupported();
 }
 
 Node* CodeAssembler::Int32Constant(int32_t value) {
-  return raw_assembler_->Int32Constant(value);
+  return raw_assembler()->Int32Constant(value);
 }
 
 Node* CodeAssembler::Int64Constant(int64_t value) {
-  return raw_assembler_->Int64Constant(value);
+  return raw_assembler()->Int64Constant(value);
 }
 
 Node* CodeAssembler::IntPtrConstant(intptr_t value) {
-  return raw_assembler_->IntPtrConstant(value);
+  return raw_assembler()->IntPtrConstant(value);
 }
 
 Node* CodeAssembler::NumberConstant(double value) {
-  return raw_assembler_->NumberConstant(value);
+  return raw_assembler()->NumberConstant(value);
 }
 
 Node* CodeAssembler::SmiConstant(Smi* value) {
@@ -122,19 +185,23 @@
 }
 
 Node* CodeAssembler::HeapConstant(Handle<HeapObject> object) {
-  return raw_assembler_->HeapConstant(object);
+  return raw_assembler()->HeapConstant(object);
+}
+
+Node* CodeAssembler::CStringConstant(const char* str) {
+  return HeapConstant(factory()->NewStringFromAsciiChecked(str, TENURED));
 }
 
 Node* CodeAssembler::BooleanConstant(bool value) {
-  return raw_assembler_->BooleanConstant(value);
+  return raw_assembler()->BooleanConstant(value);
 }
 
 Node* CodeAssembler::ExternalConstant(ExternalReference address) {
-  return raw_assembler_->ExternalConstant(address);
+  return raw_assembler()->ExternalConstant(address);
 }
 
 Node* CodeAssembler::Float64Constant(double value) {
-  return raw_assembler_->Float64Constant(value);
+  return raw_assembler()->Float64Constant(value);
 }
 
 Node* CodeAssembler::NaNConstant() {
@@ -174,24 +241,48 @@
 }
 
 bool CodeAssembler::ToIntPtrConstant(Node* node, intptr_t& out_value) {
+  if (node->opcode() == IrOpcode::kBitcastWordToTaggedSigned ||
+      node->opcode() == IrOpcode::kBitcastWordToTagged) {
+    node = node->InputAt(0);
+  }
   IntPtrMatcher m(node);
   if (m.HasValue()) out_value = m.Value();
   return m.HasValue();
 }
 
 Node* CodeAssembler::Parameter(int value) {
-  return raw_assembler_->Parameter(value);
+  return raw_assembler()->Parameter(value);
+}
+
+Node* CodeAssembler::GetJSContextParameter() {
+  CallDescriptor* desc = raw_assembler()->call_descriptor();
+  DCHECK(desc->IsJSFunctionCall());
+  return Parameter(Linkage::GetJSCallContextParamIndex(
+      static_cast<int>(desc->JSParameterCount())));
 }
 
 void CodeAssembler::Return(Node* value) {
-  return raw_assembler_->Return(value);
+  return raw_assembler()->Return(value);
+}
+
+void CodeAssembler::Return(Node* value1, Node* value2) {
+  return raw_assembler()->Return(value1, value2);
+}
+
+void CodeAssembler::Return(Node* value1, Node* value2, Node* value3) {
+  return raw_assembler()->Return(value1, value2, value3);
 }
 
 void CodeAssembler::PopAndReturn(Node* pop, Node* value) {
-  return raw_assembler_->PopAndReturn(pop, value);
+  return raw_assembler()->PopAndReturn(pop, value);
 }
 
-void CodeAssembler::DebugBreak() { raw_assembler_->DebugBreak(); }
+void CodeAssembler::DebugBreak() { raw_assembler()->DebugBreak(); }
+
+void CodeAssembler::Unreachable() {
+  DebugBreak();
+  raw_assembler()->Unreachable();
+}
 
 void CodeAssembler::Comment(const char* format, ...) {
   if (!FLAG_code_comments) return;
@@ -210,81 +301,118 @@
   MemCopy(copy + prefix_len, builder.Finalize(), length);
   copy[0] = ';';
   copy[1] = ' ';
-  raw_assembler_->Comment(copy);
+  raw_assembler()->Comment(copy);
 }
 
-void CodeAssembler::Bind(CodeAssembler::Label* label) { return label->Bind(); }
+void CodeAssembler::Bind(Label* label) { return label->Bind(); }
 
 Node* CodeAssembler::LoadFramePointer() {
-  return raw_assembler_->LoadFramePointer();
+  return raw_assembler()->LoadFramePointer();
 }
 
 Node* CodeAssembler::LoadParentFramePointer() {
-  return raw_assembler_->LoadParentFramePointer();
+  return raw_assembler()->LoadParentFramePointer();
 }
 
 Node* CodeAssembler::LoadStackPointer() {
-  return raw_assembler_->LoadStackPointer();
+  return raw_assembler()->LoadStackPointer();
 }
 
 #define DEFINE_CODE_ASSEMBLER_BINARY_OP(name)   \
   Node* CodeAssembler::name(Node* a, Node* b) { \
-    return raw_assembler_->name(a, b);          \
+    return raw_assembler()->name(a, b);         \
   }
 CODE_ASSEMBLER_BINARY_OP_LIST(DEFINE_CODE_ASSEMBLER_BINARY_OP)
 #undef DEFINE_CODE_ASSEMBLER_BINARY_OP
 
+Node* CodeAssembler::IntPtrAdd(Node* left, Node* right) {
+  intptr_t left_constant;
+  bool is_left_constant = ToIntPtrConstant(left, left_constant);
+  intptr_t right_constant;
+  bool is_right_constant = ToIntPtrConstant(right, right_constant);
+  if (is_left_constant) {
+    if (is_right_constant) {
+      return IntPtrConstant(left_constant + right_constant);
+    }
+    if (left_constant == 0) {
+      return right;
+    }
+  } else if (is_right_constant) {
+    if (right_constant == 0) {
+      return left;
+    }
+  }
+  return raw_assembler()->IntPtrAdd(left, right);
+}
+
+Node* CodeAssembler::IntPtrSub(Node* left, Node* right) {
+  intptr_t left_constant;
+  bool is_left_constant = ToIntPtrConstant(left, left_constant);
+  intptr_t right_constant;
+  bool is_right_constant = ToIntPtrConstant(right, right_constant);
+  if (is_left_constant) {
+    if (is_right_constant) {
+      return IntPtrConstant(left_constant - right_constant);
+    }
+  } else if (is_right_constant) {
+    if (right_constant == 0) {
+      return left;
+    }
+  }
+  return raw_assembler()->IntPtrSub(left, right);
+}
+
 Node* CodeAssembler::WordShl(Node* value, int shift) {
-  return (shift != 0) ? raw_assembler_->WordShl(value, IntPtrConstant(shift))
+  return (shift != 0) ? raw_assembler()->WordShl(value, IntPtrConstant(shift))
                       : value;
 }
 
 Node* CodeAssembler::WordShr(Node* value, int shift) {
-  return (shift != 0) ? raw_assembler_->WordShr(value, IntPtrConstant(shift))
+  return (shift != 0) ? raw_assembler()->WordShr(value, IntPtrConstant(shift))
                       : value;
 }
 
 Node* CodeAssembler::Word32Shr(Node* value, int shift) {
-  return (shift != 0) ? raw_assembler_->Word32Shr(value, Int32Constant(shift))
+  return (shift != 0) ? raw_assembler()->Word32Shr(value, Int32Constant(shift))
                       : value;
 }
 
 Node* CodeAssembler::ChangeUint32ToWord(Node* value) {
-  if (raw_assembler_->machine()->Is64()) {
-    value = raw_assembler_->ChangeUint32ToUint64(value);
+  if (raw_assembler()->machine()->Is64()) {
+    value = raw_assembler()->ChangeUint32ToUint64(value);
   }
   return value;
 }
 
 Node* CodeAssembler::ChangeInt32ToIntPtr(Node* value) {
-  if (raw_assembler_->machine()->Is64()) {
-    value = raw_assembler_->ChangeInt32ToInt64(value);
+  if (raw_assembler()->machine()->Is64()) {
+    value = raw_assembler()->ChangeInt32ToInt64(value);
   }
   return value;
 }
 
 Node* CodeAssembler::RoundIntPtrToFloat64(Node* value) {
-  if (raw_assembler_->machine()->Is64()) {
-    return raw_assembler_->RoundInt64ToFloat64(value);
+  if (raw_assembler()->machine()->Is64()) {
+    return raw_assembler()->RoundInt64ToFloat64(value);
   }
-  return raw_assembler_->ChangeInt32ToFloat64(value);
+  return raw_assembler()->ChangeInt32ToFloat64(value);
 }
 
 #define DEFINE_CODE_ASSEMBLER_UNARY_OP(name) \
-  Node* CodeAssembler::name(Node* a) { return raw_assembler_->name(a); }
+  Node* CodeAssembler::name(Node* a) { return raw_assembler()->name(a); }
 CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP)
 #undef DEFINE_CODE_ASSEMBLER_UNARY_OP
 
 Node* CodeAssembler::Load(MachineType rep, Node* base) {
-  return raw_assembler_->Load(rep, base);
+  return raw_assembler()->Load(rep, base);
 }
 
-Node* CodeAssembler::Load(MachineType rep, Node* base, Node* index) {
-  return raw_assembler_->Load(rep, base, index);
+Node* CodeAssembler::Load(MachineType rep, Node* base, Node* offset) {
+  return raw_assembler()->Load(rep, base, offset);
 }
 
-Node* CodeAssembler::AtomicLoad(MachineType rep, Node* base, Node* index) {
-  return raw_assembler_->AtomicLoad(rep, base, index);
+Node* CodeAssembler::AtomicLoad(MachineType rep, Node* base, Node* offset) {
+  return raw_assembler()->AtomicLoad(rep, base, offset);
 }
 
 Node* CodeAssembler::LoadRoot(Heap::RootListIndex root_index) {
@@ -303,28 +431,35 @@
               IntPtrConstant(root_index * kPointerSize));
 }
 
-Node* CodeAssembler::Store(MachineRepresentation rep, Node* base, Node* value) {
-  return raw_assembler_->Store(rep, base, value, kFullWriteBarrier);
+Node* CodeAssembler::Store(Node* base, Node* value) {
+  return raw_assembler()->Store(MachineRepresentation::kTagged, base, value,
+                                kFullWriteBarrier);
 }
 
-Node* CodeAssembler::Store(MachineRepresentation rep, Node* base, Node* index,
-                           Node* value) {
-  return raw_assembler_->Store(rep, base, index, value, kFullWriteBarrier);
+Node* CodeAssembler::Store(Node* base, Node* offset, Node* value) {
+  return raw_assembler()->Store(MachineRepresentation::kTagged, base, offset,
+                                value, kFullWriteBarrier);
+}
+
+Node* CodeAssembler::StoreWithMapWriteBarrier(Node* base, Node* offset,
+                                              Node* value) {
+  return raw_assembler()->Store(MachineRepresentation::kTagged, base, offset,
+                                value, kMapWriteBarrier);
 }
 
 Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
                                          Node* value) {
-  return raw_assembler_->Store(rep, base, value, kNoWriteBarrier);
+  return raw_assembler()->Store(rep, base, value, kNoWriteBarrier);
 }
 
 Node* CodeAssembler::StoreNoWriteBarrier(MachineRepresentation rep, Node* base,
-                                         Node* index, Node* value) {
-  return raw_assembler_->Store(rep, base, index, value, kNoWriteBarrier);
+                                         Node* offset, Node* value) {
+  return raw_assembler()->Store(rep, base, offset, value, kNoWriteBarrier);
 }
 
 Node* CodeAssembler::AtomicStore(MachineRepresentation rep, Node* base,
-                                 Node* index, Node* value) {
-  return raw_assembler_->AtomicStore(rep, base, index, value);
+                                 Node* offset, Node* value) {
+  return raw_assembler()->AtomicStore(rep, base, offset, value);
 }
 
 Node* CodeAssembler::StoreRoot(Heap::RootListIndex root_index, Node* value) {
@@ -336,11 +471,11 @@
 }
 
 Node* CodeAssembler::Retain(Node* value) {
-  return raw_assembler_->Retain(value);
+  return raw_assembler()->Retain(value);
 }
 
 Node* CodeAssembler::Projection(int index, Node* value) {
-  return raw_assembler_->Projection(index, value);
+  return raw_assembler()->Projection(index, value);
 }
 
 void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
@@ -350,11 +485,11 @@
   exception.MergeVariables();
   DCHECK(!node->op()->HasProperty(Operator::kNoThrow));
 
-  raw_assembler_->Continuations(node, success.label_, exception.label_);
+  raw_assembler()->Continuations(node, success.label_, exception.label_);
 
   Bind(&exception);
-  const Operator* op = raw_assembler_->common()->IfException();
-  Node* exception_value = raw_assembler_->AddNode(op, node, node);
+  const Operator* op = raw_assembler()->common()->IfException();
+  Node* exception_value = raw_assembler()->AddNode(op, node, node);
   if (exception_var != nullptr) {
     exception_var->Bind(exception_value);
   }
@@ -363,627 +498,161 @@
   Bind(&success);
 }
 
-Node* CodeAssembler::CallN(CallDescriptor* descriptor, Node* code_target,
-                           Node** args) {
+template <class... TArgs>
+Node* CodeAssembler::CallRuntime(Runtime::FunctionId function, Node* context,
+                                 TArgs... args) {
+  int argc = static_cast<int>(sizeof...(args));
+  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+      zone(), function, argc, Operator::kNoProperties,
+      CallDescriptor::kNoFlags);
+  int return_count = static_cast<int>(desc->ReturnCount());
+
+  Node* centry =
+      HeapConstant(CodeFactory::RuntimeCEntry(isolate(), return_count));
+  Node* ref = ExternalConstant(ExternalReference(function, isolate()));
+  Node* arity = Int32Constant(argc);
+
+  Node* nodes[] = {centry, args..., ref, arity, context};
+
   CallPrologue();
-  Node* return_value = raw_assembler_->CallN(descriptor, code_target, args);
+  Node* return_value = raw_assembler()->CallN(desc, arraysize(nodes), nodes);
   CallEpilogue();
   return return_value;
 }
 
-Node* CodeAssembler::TailCallN(CallDescriptor* descriptor, Node* code_target,
-                               Node** args) {
-  return raw_assembler_->TailCallN(descriptor, code_target, args);
+// Instantiate CallRuntime() with up to 6 arguments.
+#define INSTANTIATE(...)                                       \
+  template V8_EXPORT_PRIVATE Node* CodeAssembler::CallRuntime( \
+      Runtime::FunctionId, __VA_ARGS__);
+REPEAT_1_TO_7(INSTANTIATE, Node*)
+#undef INSTANTIATE
+
+template <class... TArgs>
+Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function,
+                                     Node* context, TArgs... args) {
+  int argc = static_cast<int>(sizeof...(args));
+  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
+      zone(), function, argc, Operator::kNoProperties,
+      CallDescriptor::kSupportsTailCalls);
+  int return_count = static_cast<int>(desc->ReturnCount());
+
+  Node* centry =
+      HeapConstant(CodeFactory::RuntimeCEntry(isolate(), return_count));
+  Node* ref = ExternalConstant(ExternalReference(function, isolate()));
+  Node* arity = Int32Constant(argc);
+
+  Node* nodes[] = {centry, args..., ref, arity, context};
+
+  return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
 }
 
-Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id,
-                                 Node* context) {
-  CallPrologue();
-  Node* return_value = raw_assembler_->CallRuntime0(function_id, context);
-  CallEpilogue();
-  return return_value;
+// Instantiate TailCallRuntime() with up to 6 arguments.
+#define INSTANTIATE(...)                                           \
+  template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallRuntime( \
+      Runtime::FunctionId, __VA_ARGS__);
+REPEAT_1_TO_7(INSTANTIATE, Node*)
+#undef INSTANTIATE
+
+template <class... TArgs>
+Node* CodeAssembler::CallStubR(const CallInterfaceDescriptor& descriptor,
+                               size_t result_size, Node* target, Node* context,
+                               TArgs... args) {
+  Node* nodes[] = {target, args..., context};
+  return CallStubN(descriptor, result_size, arraysize(nodes), nodes);
 }
 
-Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
-                                 Node* arg1) {
-  CallPrologue();
-  Node* return_value = raw_assembler_->CallRuntime1(function_id, arg1, context);
-  CallEpilogue();
-  return return_value;
-}
-
-Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
-                                 Node* arg1, Node* arg2) {
-  CallPrologue();
-  Node* return_value =
-      raw_assembler_->CallRuntime2(function_id, arg1, arg2, context);
-  CallEpilogue();
-  return return_value;
-}
-
-Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
-                                 Node* arg1, Node* arg2, Node* arg3) {
-  CallPrologue();
-  Node* return_value =
-      raw_assembler_->CallRuntime3(function_id, arg1, arg2, arg3, context);
-  CallEpilogue();
-  return return_value;
-}
-
-Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
-                                 Node* arg1, Node* arg2, Node* arg3,
-                                 Node* arg4) {
-  CallPrologue();
-  Node* return_value = raw_assembler_->CallRuntime4(function_id, arg1, arg2,
-                                                    arg3, arg4, context);
-  CallEpilogue();
-  return return_value;
-}
-
-Node* CodeAssembler::CallRuntime(Runtime::FunctionId function_id, Node* context,
-                                 Node* arg1, Node* arg2, Node* arg3, Node* arg4,
-                                 Node* arg5) {
-  CallPrologue();
-  Node* return_value = raw_assembler_->CallRuntime5(function_id, arg1, arg2,
-                                                    arg3, arg4, arg5, context);
-  CallEpilogue();
-  return return_value;
-}
-
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
-                                     Node* context) {
-  return raw_assembler_->TailCallRuntime0(function_id, context);
-}
-
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
-                                     Node* context, Node* arg1) {
-  return raw_assembler_->TailCallRuntime1(function_id, arg1, context);
-}
-
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
-                                     Node* context, Node* arg1, Node* arg2) {
-  return raw_assembler_->TailCallRuntime2(function_id, arg1, arg2, context);
-}
-
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
-                                     Node* context, Node* arg1, Node* arg2,
-                                     Node* arg3) {
-  return raw_assembler_->TailCallRuntime3(function_id, arg1, arg2, arg3,
-                                          context);
-}
-
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
-                                     Node* context, Node* arg1, Node* arg2,
-                                     Node* arg3, Node* arg4) {
-  return raw_assembler_->TailCallRuntime4(function_id, arg1, arg2, arg3, arg4,
-                                          context);
-}
-
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
-                                     Node* context, Node* arg1, Node* arg2,
-                                     Node* arg3, Node* arg4, Node* arg5) {
-  return raw_assembler_->TailCallRuntime5(function_id, arg1, arg2, arg3, arg4,
-                                          arg5, context);
-}
-
-Node* CodeAssembler::TailCallRuntime(Runtime::FunctionId function_id,
-                                     Node* context, Node* arg1, Node* arg2,
-                                     Node* arg3, Node* arg4, Node* arg5,
-                                     Node* arg6) {
-  return raw_assembler_->TailCallRuntime6(function_id, arg1, arg2, arg3, arg4,
-                                          arg5, arg6, context);
-}
-
-Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
-                              Node* arg1, size_t result_size) {
-  Node* target = HeapConstant(callable.code());
-  return CallStub(callable.descriptor(), target, context, arg1, result_size);
-}
-
-Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
-                              Node* arg1, Node* arg2, size_t result_size) {
-  Node* target = HeapConstant(callable.code());
-  return CallStub(callable.descriptor(), target, context, arg1, arg2,
-                  result_size);
-}
-
-Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
-                              Node* arg1, Node* arg2, Node* arg3,
-                              size_t result_size) {
-  Node* target = HeapConstant(callable.code());
-  return CallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
-                  result_size);
-}
-
-Node* CodeAssembler::CallStub(Callable const& callable, Node* context,
-                              Node* arg1, Node* arg2, Node* arg3, Node* arg4,
-                              size_t result_size) {
-  Node* target = HeapConstant(callable.code());
-  return CallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
-                  arg4, result_size);
-}
-
-Node* CodeAssembler::CallStubN(Callable const& callable, Node** args,
-                               size_t result_size) {
-  Node* target = HeapConstant(callable.code());
-  return CallStubN(callable.descriptor(), target, args, result_size);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
-                              Node* target, Node* context, size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  Node** args = zone()->NewArray<Node*>(1);
-  args[0] = context;
-
-  return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
-                              Node* target, Node* context, Node* arg1,
-                              size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  Node** args = zone()->NewArray<Node*>(2);
-  args[0] = arg1;
-  args[1] = context;
-
-  return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
-                              Node* target, Node* context, Node* arg1,
-                              Node* arg2, size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  Node** args = zone()->NewArray<Node*>(3);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = context;
-
-  return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
-                              Node* target, Node* context, Node* arg1,
-                              Node* arg2, Node* arg3, size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  Node** args = zone()->NewArray<Node*>(4);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = arg3;
-  args[3] = context;
-
-  return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
-                              Node* target, Node* context, Node* arg1,
-                              Node* arg2, Node* arg3, Node* arg4,
-                              size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  Node** args = zone()->NewArray<Node*>(5);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = arg3;
-  args[3] = arg4;
-  args[4] = context;
-
-  return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
-                              Node* target, Node* context, Node* arg1,
-                              Node* arg2, Node* arg3, Node* arg4, Node* arg5,
-                              size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  Node** args = zone()->NewArray<Node*>(6);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = arg3;
-  args[3] = arg4;
-  args[4] = arg5;
-  args[5] = context;
-
-  return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
-                              Node* target, Node* context, const Arg& arg1,
-                              const Arg& arg2, size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  const int kArgsCount = 3;
-  Node** args = zone()->NewArray<Node*>(kArgsCount);
-  DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
-  args[arg1.index] = arg1.value;
-  args[arg2.index] = arg2.value;
-  args[kArgsCount - 1] = context;
-  DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
-
-  return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
-                              Node* target, Node* context, const Arg& arg1,
-                              const Arg& arg2, const Arg& arg3,
-                              size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  const int kArgsCount = 4;
-  Node** args = zone()->NewArray<Node*>(kArgsCount);
-  DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
-  args[arg1.index] = arg1.value;
-  args[arg2.index] = arg2.value;
-  args[arg3.index] = arg3.value;
-  args[kArgsCount - 1] = context;
-  DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
-
-  return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
-                              Node* target, Node* context, const Arg& arg1,
-                              const Arg& arg2, const Arg& arg3, const Arg& arg4,
-                              size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  const int kArgsCount = 5;
-  Node** args = zone()->NewArray<Node*>(kArgsCount);
-  DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
-  args[arg1.index] = arg1.value;
-  args[arg2.index] = arg2.value;
-  args[arg3.index] = arg3.value;
-  args[arg4.index] = arg4.value;
-  args[kArgsCount - 1] = context;
-  DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
-
-  return CallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::CallStub(const CallInterfaceDescriptor& descriptor,
-                              Node* target, Node* context, const Arg& arg1,
-                              const Arg& arg2, const Arg& arg3, const Arg& arg4,
-                              const Arg& arg5, size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kNoFlags, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  const int kArgsCount = 6;
-  Node** args = zone()->NewArray<Node*>(kArgsCount);
-  DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
-  args[arg1.index] = arg1.value;
-  args[arg2.index] = arg2.value;
-  args[arg3.index] = arg3.value;
-  args[arg4.index] = arg4.value;
-  args[arg5.index] = arg5.value;
-  args[kArgsCount - 1] = context;
-  DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
-
-  return CallN(call_descriptor, target, args);
-}
+// Instantiate CallStubR() with up to 6 arguments.
+#define INSTANTIATE(...)                                     \
+  template V8_EXPORT_PRIVATE Node* CodeAssembler::CallStubR( \
+      const CallInterfaceDescriptor& descriptor, size_t, Node*, __VA_ARGS__);
+REPEAT_1_TO_7(INSTANTIATE, Node*)
+#undef INSTANTIATE
 
 Node* CodeAssembler::CallStubN(const CallInterfaceDescriptor& descriptor,
-                               int js_parameter_count, Node* target,
-                               Node** args, size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor,
-      descriptor.GetStackParameterCount() + js_parameter_count,
+                               size_t result_size, int input_count,
+                               Node* const* inputs) {
+  // 2 is for target and context.
+  DCHECK_LE(2, input_count);
+  int argc = input_count - 2;
+  DCHECK_LE(descriptor.GetParameterCount(), argc);
+  // Extra arguments not mentioned in the descriptor are passed on the stack.
+  int stack_parameter_count = argc - descriptor.GetRegisterParameterCount();
+  DCHECK_LE(descriptor.GetStackParameterCount(), stack_parameter_count);
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), descriptor, stack_parameter_count,
       CallDescriptor::kNoFlags, Operator::kNoProperties,
       MachineType::AnyTagged(), result_size);
 
-  return CallN(call_descriptor, target, args);
+  CallPrologue();
+  Node* return_value = raw_assembler()->CallN(desc, input_count, inputs);
+  CallEpilogue();
+  return return_value;
 }
 
-Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
-                                  Node* arg1, size_t result_size) {
-  Node* target = HeapConstant(callable.code());
-  return TailCallStub(callable.descriptor(), target, context, arg1,
-                      result_size);
-}
-
-Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
-                                  Node* arg1, Node* arg2, size_t result_size) {
-  Node* target = HeapConstant(callable.code());
-  return TailCallStub(callable.descriptor(), target, context, arg1, arg2,
-                      result_size);
-}
-
-Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
-                                  Node* arg1, Node* arg2, Node* arg3,
-                                  size_t result_size) {
-  Node* target = HeapConstant(callable.code());
-  return TailCallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
-                      result_size);
-}
-
-Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
-                                  Node* arg1, Node* arg2, Node* arg3,
-                                  Node* arg4, size_t result_size) {
-  Node* target = HeapConstant(callable.code());
-  return TailCallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
-                      arg4, result_size);
-}
-
-Node* CodeAssembler::TailCallStub(Callable const& callable, Node* context,
-                                  Node* arg1, Node* arg2, Node* arg3,
-                                  Node* arg4, Node* arg5, size_t result_size) {
-  Node* target = HeapConstant(callable.code());
-  return TailCallStub(callable.descriptor(), target, context, arg1, arg2, arg3,
-                      arg4, arg5, result_size);
-}
-
+template <class... TArgs>
 Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
-                                  Node* target, Node* context, Node* arg1,
-                                  size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+                                  Node* target, Node* context, TArgs... args) {
+  DCHECK_EQ(descriptor.GetParameterCount(), sizeof...(args));
+  size_t result_size = 1;
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
       isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
       CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
       MachineType::AnyTagged(), result_size);
 
-  Node** args = zone()->NewArray<Node*>(2);
-  args[0] = arg1;
-  args[1] = context;
+  Node* nodes[] = {target, args..., context};
 
-  return raw_assembler_->TailCallN(call_descriptor, target, args);
+  return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
 }
 
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
-                                  Node* target, Node* context, Node* arg1,
-                                  Node* arg2, size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
+// Instantiate TailCallStub() with up to 6 arguments.
+#define INSTANTIATE(...)                                        \
+  template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallStub( \
+      const CallInterfaceDescriptor& descriptor, Node*, __VA_ARGS__);
+REPEAT_1_TO_7(INSTANTIATE, Node*)
+#undef INSTANTIATE
 
-  Node** args = zone()->NewArray<Node*>(3);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = context;
-
-  return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
-                                  Node* target, Node* context, Node* arg1,
-                                  Node* arg2, Node* arg3, size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  Node** args = zone()->NewArray<Node*>(4);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = arg3;
-  args[3] = context;
-
-  return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
-                                  Node* target, Node* context, Node* arg1,
-                                  Node* arg2, Node* arg3, Node* arg4,
-                                  size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  Node** args = zone()->NewArray<Node*>(5);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = arg3;
-  args[3] = arg4;
-  args[4] = context;
-
-  return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
-                                  Node* target, Node* context, Node* arg1,
-                                  Node* arg2, Node* arg3, Node* arg4,
-                                  Node* arg5, size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  Node** args = zone()->NewArray<Node*>(6);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = arg3;
-  args[3] = arg4;
-  args[4] = arg5;
-  args[5] = context;
-
-  return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
-                                  Node* target, Node* context, Node* arg1,
-                                  Node* arg2, Node* arg3, Node* arg4,
-                                  Node* arg5, Node* arg6, size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  Node** args = zone()->NewArray<Node*>(7);
-  args[0] = arg1;
-  args[1] = arg2;
-  args[2] = arg3;
-  args[3] = arg4;
-  args[4] = arg5;
-  args[5] = arg6;
-  args[6] = context;
-
-  return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
-                                  Node* target, Node* context, const Arg& arg1,
-                                  const Arg& arg2, const Arg& arg3,
-                                  const Arg& arg4, size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  const int kArgsCount = 5;
-  Node** args = zone()->NewArray<Node*>(kArgsCount);
-  DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
-  args[arg1.index] = arg1.value;
-  args[arg2.index] = arg2.value;
-  args[arg3.index] = arg3.value;
-  args[arg4.index] = arg4.value;
-  args[kArgsCount - 1] = context;
-  DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
-
-  return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
-
-Node* CodeAssembler::TailCallStub(const CallInterfaceDescriptor& descriptor,
-                                  Node* target, Node* context, const Arg& arg1,
-                                  const Arg& arg2, const Arg& arg3,
-                                  const Arg& arg4, const Arg& arg5,
-                                  size_t result_size) {
-  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
-      isolate(), zone(), descriptor, descriptor.GetStackParameterCount(),
-      CallDescriptor::kSupportsTailCalls, Operator::kNoProperties,
-      MachineType::AnyTagged(), result_size);
-
-  const int kArgsCount = 6;
-  Node** args = zone()->NewArray<Node*>(kArgsCount);
-  DCHECK((std::fill(&args[0], &args[kArgsCount], nullptr), true));
-  args[arg1.index] = arg1.value;
-  args[arg2.index] = arg2.value;
-  args[arg3.index] = arg3.value;
-  args[arg4.index] = arg4.value;
-  args[arg5.index] = arg5.value;
-  args[kArgsCount - 1] = context;
-  DCHECK_EQ(0, std::count(&args[0], &args[kArgsCount], nullptr));
-
-  return raw_assembler_->TailCallN(call_descriptor, target, args);
-}
-
+template <class... TArgs>
 Node* CodeAssembler::TailCallBytecodeDispatch(
-    const CallInterfaceDescriptor& interface_descriptor,
-    Node* code_target_address, Node** args) {
-  CallDescriptor* descriptor = Linkage::GetBytecodeDispatchCallDescriptor(
-      isolate(), zone(), interface_descriptor,
-      interface_descriptor.GetStackParameterCount());
-  return raw_assembler_->TailCallN(descriptor, code_target_address, args);
+    const CallInterfaceDescriptor& descriptor, Node* target, TArgs... args) {
+  DCHECK_EQ(descriptor.GetParameterCount(), sizeof...(args));
+  CallDescriptor* desc = Linkage::GetBytecodeDispatchCallDescriptor(
+      isolate(), zone(), descriptor, descriptor.GetStackParameterCount());
+
+  Node* nodes[] = {target, args...};
+  return raw_assembler()->TailCallN(desc, arraysize(nodes), nodes);
 }
 
-Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
-                            Node* function, Node* receiver,
-                            size_t result_size) {
-  const int argc = 0;
-  Node* target = HeapConstant(callable.code());
+// Instantiate TailCallBytecodeDispatch() with 4 arguments.
+template V8_EXPORT_PRIVATE Node* CodeAssembler::TailCallBytecodeDispatch(
+    const CallInterfaceDescriptor& descriptor, Node* target, Node*, Node*,
+    Node*, Node*);
 
-  Node** args = zone()->NewArray<Node*>(argc + 4);
-  args[0] = function;
-  args[1] = Int32Constant(argc);
-  args[2] = receiver;
-  args[3] = context;
-
-  return CallStubN(callable.descriptor(), argc + 1, target, args, result_size);
-}
-
-Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
-                            Node* function, Node* receiver, Node* arg1,
-                            size_t result_size) {
-  const int argc = 1;
-  Node* target = HeapConstant(callable.code());
-
-  Node** args = zone()->NewArray<Node*>(argc + 4);
-  args[0] = function;
-  args[1] = Int32Constant(argc);
-  args[2] = receiver;
-  args[3] = arg1;
-  args[4] = context;
-
-  return CallStubN(callable.descriptor(), argc + 1, target, args, result_size);
-}
-
-Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
-                            Node* function, Node* receiver, Node* arg1,
-                            Node* arg2, size_t result_size) {
-  const int argc = 2;
-  Node* target = HeapConstant(callable.code());
-
-  Node** args = zone()->NewArray<Node*>(argc + 4);
-  args[0] = function;
-  args[1] = Int32Constant(argc);
-  args[2] = receiver;
-  args[3] = arg1;
-  args[4] = arg2;
-  args[5] = context;
-
-  return CallStubN(callable.descriptor(), argc + 1, target, args, result_size);
-}
-
-Node* CodeAssembler::CallJS(Callable const& callable, Node* context,
-                            Node* function, Node* receiver, Node* arg1,
-                            Node* arg2, Node* arg3, size_t result_size) {
-  const int argc = 3;
-  Node* target = HeapConstant(callable.code());
-
-  Node** args = zone()->NewArray<Node*>(argc + 4);
-  args[0] = function;
-  args[1] = Int32Constant(argc);
-  args[2] = receiver;
-  args[3] = arg1;
-  args[4] = arg2;
-  args[5] = arg3;
-  args[6] = context;
-
-  return CallStubN(callable.descriptor(), argc + 1, target, args, result_size);
+Node* CodeAssembler::CallCFunctionN(Signature<MachineType>* signature,
+                                    int input_count, Node* const* inputs) {
+  CallDescriptor* desc = Linkage::GetSimplifiedCDescriptor(zone(), signature);
+  return raw_assembler()->CallN(desc, input_count, inputs);
 }
 
 Node* CodeAssembler::CallCFunction2(MachineType return_type,
                                     MachineType arg0_type,
                                     MachineType arg1_type, Node* function,
                                     Node* arg0, Node* arg1) {
-  return raw_assembler_->CallCFunction2(return_type, arg0_type, arg1_type,
-                                        function, arg0, arg1);
+  return raw_assembler()->CallCFunction2(return_type, arg0_type, arg1_type,
+                                         function, arg0, arg1);
 }
 
-void CodeAssembler::Goto(CodeAssembler::Label* label) {
+Node* CodeAssembler::CallCFunction3(MachineType return_type,
+                                    MachineType arg0_type,
+                                    MachineType arg1_type,
+                                    MachineType arg2_type, Node* function,
+                                    Node* arg0, Node* arg1, Node* arg2) {
+  return raw_assembler()->CallCFunction3(return_type, arg0_type, arg1_type,
+                                         arg2_type, function, arg0, arg1, arg2);
+}
+
+void CodeAssembler::Goto(Label* label) {
   label->MergeVariables();
-  raw_assembler_->Goto(label->label_);
+  raw_assembler()->Goto(label->label_);
 }
 
 void CodeAssembler::GotoIf(Node* condition, Label* true_label) {
@@ -992,18 +661,18 @@
   Bind(&false_label);
 }
 
-void CodeAssembler::GotoUnless(Node* condition, Label* false_label) {
+void CodeAssembler::GotoIfNot(Node* condition, Label* false_label) {
   Label true_label(this);
   Branch(condition, &true_label, false_label);
   Bind(&true_label);
 }
 
-void CodeAssembler::Branch(Node* condition, CodeAssembler::Label* true_label,
-                           CodeAssembler::Label* false_label) {
+void CodeAssembler::Branch(Node* condition, Label* true_label,
+                           Label* false_label) {
   true_label->MergeVariables();
   false_label->MergeVariables();
-  return raw_assembler_->Branch(condition, true_label->label_,
-                                false_label->label_);
+  return raw_assembler()->Branch(condition, true_label->label_,
+                                 false_label->label_);
 }
 
 void CodeAssembler::Switch(Node* index, Label* default_label,
@@ -1017,75 +686,68 @@
     case_labels[i]->MergeVariables();
     default_label->MergeVariables();
   }
-  return raw_assembler_->Switch(index, default_label->label_, case_values,
-                                labels, case_count);
-}
-
-Node* CodeAssembler::Select(Node* condition, Node* true_value,
-                            Node* false_value, MachineRepresentation rep) {
-  Variable value(this, rep);
-  Label vtrue(this), vfalse(this), end(this);
-  Branch(condition, &vtrue, &vfalse);
-
-  Bind(&vtrue);
-  {
-    value.Bind(true_value);
-    Goto(&end);
-  }
-  Bind(&vfalse);
-  {
-    value.Bind(false_value);
-    Goto(&end);
-  }
-
-  Bind(&end);
-  return value.value();
+  return raw_assembler()->Switch(index, default_label->label_, case_values,
+                                 labels, case_count);
 }
 
 // RawMachineAssembler delegate helpers:
-Isolate* CodeAssembler::isolate() const { return raw_assembler_->isolate(); }
+Isolate* CodeAssembler::isolate() const { return raw_assembler()->isolate(); }
 
 Factory* CodeAssembler::factory() const { return isolate()->factory(); }
 
-Zone* CodeAssembler::zone() const { return raw_assembler_->zone(); }
+Zone* CodeAssembler::zone() const { return raw_assembler()->zone(); }
+
+RawMachineAssembler* CodeAssembler::raw_assembler() const {
+  return state_->raw_assembler_.get();
+}
 
 // The core implementation of Variable is stored through an indirection so
 // that it can outlive the often block-scoped Variable declarations. This is
 // needed to ensure that variable binding and merging through phis can
 // properly be verified.
-class CodeAssembler::Variable::Impl : public ZoneObject {
+class CodeAssemblerVariable::Impl : public ZoneObject {
  public:
   explicit Impl(MachineRepresentation rep) : value_(nullptr), rep_(rep) {}
   Node* value_;
   MachineRepresentation rep_;
 };
 
-CodeAssembler::Variable::Variable(CodeAssembler* assembler,
-                                  MachineRepresentation rep)
-    : impl_(new (assembler->zone()) Impl(rep)), assembler_(assembler) {
-  assembler->variables_.insert(impl_);
+CodeAssemblerVariable::CodeAssemblerVariable(CodeAssembler* assembler,
+                                             MachineRepresentation rep)
+    : impl_(new (assembler->zone()) Impl(rep)), state_(assembler->state()) {
+  state_->variables_.insert(impl_);
 }
 
-CodeAssembler::Variable::~Variable() { assembler_->variables_.erase(impl_); }
+CodeAssemblerVariable::CodeAssemblerVariable(CodeAssembler* assembler,
+                                             MachineRepresentation rep,
+                                             Node* initial_value)
+    : CodeAssemblerVariable(assembler, rep) {
+  Bind(initial_value);
+}
 
-void CodeAssembler::Variable::Bind(Node* value) { impl_->value_ = value; }
+CodeAssemblerVariable::~CodeAssemblerVariable() {
+  state_->variables_.erase(impl_);
+}
 
-Node* CodeAssembler::Variable::value() const {
+void CodeAssemblerVariable::Bind(Node* value) { impl_->value_ = value; }
+
+Node* CodeAssemblerVariable::value() const {
   DCHECK_NOT_NULL(impl_->value_);
   return impl_->value_;
 }
 
-MachineRepresentation CodeAssembler::Variable::rep() const {
-  return impl_->rep_;
-}
+MachineRepresentation CodeAssemblerVariable::rep() const { return impl_->rep_; }
 
-bool CodeAssembler::Variable::IsBound() const {
-  return impl_->value_ != nullptr;
-}
+bool CodeAssemblerVariable::IsBound() const { return impl_->value_ != nullptr; }
 
-CodeAssembler::Label::Label(CodeAssembler* assembler, size_t vars_count,
-                            Variable** vars, CodeAssembler::Label::Type type)
-    : bound_(false), merge_count_(0), assembler_(assembler), label_(nullptr) {
+CodeAssemblerLabel::CodeAssemblerLabel(CodeAssembler* assembler,
+                                       size_t vars_count,
+                                       CodeAssemblerVariable** vars,
+                                       CodeAssemblerLabel::Type type)
+    : bound_(false),
+      merge_count_(0),
+      state_(assembler->state()),
+      label_(nullptr) {
   void* buffer = assembler->zone()->New(sizeof(RawMachineLabel));
   label_ = new (buffer)
       RawMachineLabel(type == kDeferred ? RawMachineLabel::kDeferred
@@ -1095,9 +757,11 @@
   }
 }
 
-void CodeAssembler::Label::MergeVariables() {
+CodeAssemblerLabel::~CodeAssemblerLabel() { label_->~RawMachineLabel(); }
+
+void CodeAssemblerLabel::MergeVariables() {
   ++merge_count_;
-  for (auto var : assembler_->variables_) {
+  for (auto var : state_->variables_) {
     size_t count = 0;
     Node* node = var->value_;
     if (node != nullptr) {
@@ -1122,7 +786,7 @@
       auto phi = variable_phis_.find(var);
       if (phi != variable_phis_.end()) {
         DCHECK_NOT_NULL(phi->second);
-        assembler_->raw_assembler_->AppendPhiInput(phi->second, node);
+        state_->raw_assembler_->AppendPhiInput(phi->second, node);
       } else {
         auto i = variable_merges_.find(var);
         if (i != variable_merges_.end()) {
@@ -1141,13 +805,13 @@
   }
 }
 
-void CodeAssembler::Label::Bind() {
+void CodeAssemblerLabel::Bind() {
   DCHECK(!bound_);
-  assembler_->raw_assembler_->Bind(label_);
+  state_->raw_assembler_->Bind(label_);
 
   // Make sure that all variables that have changed along any path up to this
   // point are marked as merge variables.
-  for (auto var : assembler_->variables_) {
+  for (auto var : state_->variables_) {
     Node* shared_value = nullptr;
     auto i = variable_merges_.find(var);
     if (i != variable_merges_.end()) {
@@ -1165,22 +829,23 @@
   }
 
   for (auto var : variable_phis_) {
-    CodeAssembler::Variable::Impl* var_impl = var.first;
+    CodeAssemblerVariable::Impl* var_impl = var.first;
     auto i = variable_merges_.find(var_impl);
-    // If the following assert fires, then a variable that has been marked as
+    // If the following asserts fire, then a variable that has been marked as
     // being merged at the label--either by explicitly marking it so in the
     // label constructor or by having seen different bound values at branches
     // into the label--doesn't have a bound value along all of the paths that
     // have been merged into the label up to this point.
-    DCHECK(i != variable_merges_.end() && i->second.size() == merge_count_);
-    Node* phi = assembler_->raw_assembler_->Phi(
+    DCHECK(i != variable_merges_.end());
+    DCHECK_EQ(i->second.size(), merge_count_);
+    Node* phi = state_->raw_assembler_->Phi(
         var.first->rep_, static_cast<int>(merge_count_), &(i->second[0]));
     variable_phis_[var_impl] = phi;
   }
 
   // Bind all variables to a merge phi, the common value along all paths or
   // null.
-  for (auto var : assembler_->variables_) {
+  for (auto var : state_->variables_) {
     auto i = variable_phis_.find(var);
     if (i != variable_phis_.end()) {
       var->value_ = i->second;
diff --git a/src/compiler/code-assembler.h b/src/compiler/code-assembler.h
index 1f364d9..8808a82 100644
--- a/src/compiler/code-assembler.h
+++ b/src/compiler/code-assembler.h
@@ -12,6 +12,7 @@
 // Do not include anything from src/compiler here!
 #include "src/allocation.h"
 #include "src/builtins/builtins.h"
+#include "src/code-factory.h"
 #include "src/globals.h"
 #include "src/heap/heap.h"
 #include "src/machine-type.h"
@@ -30,10 +31,17 @@
 namespace compiler {
 
 class CallDescriptor;
+class CodeAssemblerLabel;
+class CodeAssemblerVariable;
+class CodeAssemblerState;
 class Node;
 class RawMachineAssembler;
 class RawMachineLabel;
 
+typedef ZoneList<CodeAssemblerVariable*> CodeAssemblerVariableList;
+
+typedef std::function<void()> CodeAssemblerCallback;
+
 #define CODE_ASSEMBLER_COMPARE_BINARY_OP_LIST(V) \
   V(Float32Equal)                                \
   V(Float32LessThan)                             \
@@ -79,9 +87,7 @@
   V(Float64Pow)                            \
   V(Float64InsertLowWord32)                \
   V(Float64InsertHighWord32)               \
-  V(IntPtrAdd)                             \
   V(IntPtrAddWithOverflow)                 \
-  V(IntPtrSub)                             \
   V(IntPtrSubWithOverflow)                 \
   V(IntPtrMul)                             \
   V(Int32Add)                              \
@@ -157,6 +163,7 @@
   V(Float64RoundTiesEven)               \
   V(Float64RoundTruncate)               \
   V(Word32Clz)                          \
+  V(Word32Not)                          \
   V(Word32BinaryNot)
 
 // A "public" interface used by components outside of compiler directory to
@@ -175,22 +182,16 @@
 // clients, CodeAssembler also provides an abstraction for creating variables
 // and enhanced Label functionality to merge variable values along paths where
 // they have differing values, including loops.
+//
+// The CodeAssembler itself is stateless (and instances are expected to be
+// temporary-scoped and short-lived); all its state is encapsulated into
+// a CodeAssemblerState instance.
 class V8_EXPORT_PRIVATE CodeAssembler {
  public:
-  // Create with CallStub linkage.
-  // |result_size| specifies the number of results returned by the stub.
-  // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
-  CodeAssembler(Isolate* isolate, Zone* zone,
-                const CallInterfaceDescriptor& descriptor, Code::Flags flags,
-                const char* name, size_t result_size = 1);
+  explicit CodeAssembler(CodeAssemblerState* state) : state_(state) {}
+  ~CodeAssembler();
 
-  // Create with JSCall linkage.
-  CodeAssembler(Isolate* isolate, Zone* zone, int parameter_count,
-                Code::Flags flags, const char* name);
-
-  virtual ~CodeAssembler();
-
-  Handle<Code> GenerateCode();
+  static Handle<Code> GenerateCode(CodeAssemblerState* state);
 
   bool Is64() const;
   bool IsFloat64RoundUpSupported() const;
@@ -198,24 +199,10 @@
   bool IsFloat64RoundTiesEvenSupported() const;
   bool IsFloat64RoundTruncateSupported() const;
 
-  class Label;
-  class Variable {
-   public:
-    explicit Variable(CodeAssembler* assembler, MachineRepresentation rep);
-    ~Variable();
-    void Bind(Node* value);
-    Node* value() const;
-    MachineRepresentation rep() const;
-    bool IsBound() const;
-
-   private:
-    friend class CodeAssembler;
-    class Impl;
-    Impl* impl_;
-    CodeAssembler* assembler_;
-  };
-
-  typedef ZoneList<Variable*> VariableList;
+  // Shortened aliases for use in CodeAssembler subclasses.
+  typedef CodeAssemblerLabel Label;
+  typedef CodeAssemblerVariable Variable;
+  typedef CodeAssemblerVariableList VariableList;
 
   // ===========================================================================
   // Base Assembler
@@ -229,6 +216,7 @@
   Node* SmiConstant(Smi* value);
   Node* SmiConstant(int value);
   Node* HeapConstant(Handle<HeapObject> object);
+  Node* CStringConstant(const char* str);
   Node* BooleanConstant(bool value);
   Node* ExternalConstant(ExternalReference address);
   Node* Float64Constant(double value);
@@ -240,24 +228,25 @@
   bool ToIntPtrConstant(Node* node, intptr_t& out_value);
 
   Node* Parameter(int value);
+  Node* GetJSContextParameter();
   void Return(Node* value);
+  void Return(Node* value1, Node* value2);
+  void Return(Node* value1, Node* value2, Node* value3);
   void PopAndReturn(Node* pop, Node* value);
 
   void DebugBreak();
+  void Unreachable();
   void Comment(const char* format, ...);
 
   void Bind(Label* label);
   void Goto(Label* label);
   void GotoIf(Node* condition, Label* true_label);
-  void GotoUnless(Node* condition, Label* false_label);
+  void GotoIfNot(Node* condition, Label* false_label);
   void Branch(Node* condition, Label* true_label, Label* false_label);
 
   void Switch(Node* index, Label* default_label, const int32_t* case_values,
               Label** case_labels, size_t case_count);
 
-  Node* Select(Node* condition, Node* true_value, Node* false_value,
-               MachineRepresentation rep = MachineRepresentation::kTagged);
-
   // Access to the frame pointer
   Node* LoadFramePointer();
   Node* LoadParentFramePointer();
@@ -267,19 +256,20 @@
 
   // Load raw memory location.
   Node* Load(MachineType rep, Node* base);
-  Node* Load(MachineType rep, Node* base, Node* index);
-  Node* AtomicLoad(MachineType rep, Node* base, Node* index);
+  Node* Load(MachineType rep, Node* base, Node* offset);
+  Node* AtomicLoad(MachineType rep, Node* base, Node* offset);
 
   // Load a value from the root array.
   Node* LoadRoot(Heap::RootListIndex root_index);
 
   // Store value to raw memory location.
-  Node* Store(MachineRepresentation rep, Node* base, Node* value);
-  Node* Store(MachineRepresentation rep, Node* base, Node* index, Node* value);
+  Node* Store(Node* base, Node* value);
+  Node* Store(Node* base, Node* offset, Node* value);
+  Node* StoreWithMapWriteBarrier(Node* base, Node* offset, Node* value);
   Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* value);
-  Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* index,
+  Node* StoreNoWriteBarrier(MachineRepresentation rep, Node* base, Node* offset,
                             Node* value);
-  Node* AtomicStore(MachineRepresentation rep, Node* base, Node* index,
+  Node* AtomicStore(MachineRepresentation rep, Node* base, Node* offset,
                     Node* value);
 
   // Store a value to the root array.
@@ -290,6 +280,9 @@
   CODE_ASSEMBLER_BINARY_OP_LIST(DECLARE_CODE_ASSEMBLER_BINARY_OP)
 #undef DECLARE_CODE_ASSEMBLER_BINARY_OP
 
+  Node* IntPtrAdd(Node* left, Node* right);
+  Node* IntPtrSub(Node* left, Node* right);
+
   Node* WordShl(Node* value, int shift);
   Node* WordShr(Node* value, int shift);
   Node* Word32Shr(Node* value, int shift);
@@ -316,149 +309,79 @@
   Node* Projection(int index, Node* value);
 
   // Calls
-  Node* CallRuntime(Runtime::FunctionId function_id, Node* context);
-  Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1);
-  Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
-                    Node* arg2);
-  Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
-                    Node* arg2, Node* arg3);
-  Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
-                    Node* arg2, Node* arg3, Node* arg4);
-  Node* CallRuntime(Runtime::FunctionId function_id, Node* context, Node* arg1,
-                    Node* arg2, Node* arg3, Node* arg4, Node* arg5);
+  template <class... TArgs>
+  Node* CallRuntime(Runtime::FunctionId function, Node* context, TArgs... args);
 
-  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context);
-  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
-                        Node* arg1);
-  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
-                        Node* arg1, Node* arg2);
-  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
-                        Node* arg1, Node* arg2, Node* arg3);
-  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
-                        Node* arg1, Node* arg2, Node* arg3, Node* arg4);
-  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
-                        Node* arg1, Node* arg2, Node* arg3, Node* arg4,
-                        Node* arg5);
-  Node* TailCallRuntime(Runtime::FunctionId function_id, Node* context,
-                        Node* arg1, Node* arg2, Node* arg3, Node* arg4,
-                        Node* arg5, Node* arg6);
+  template <class... TArgs>
+  Node* TailCallRuntime(Runtime::FunctionId function, Node* context,
+                        TArgs... args);
 
-  // A pair of a zero-based argument index and a value.
-  // It helps writing arguments order independent code.
-  struct Arg {
-    Arg(int index, Node* value) : index(index), value(value) {}
-
-    int const index;
-    Node* const value;
-  };
-
-  Node* CallStub(Callable const& callable, Node* context, Node* arg1,
-                 size_t result_size = 1);
-  Node* CallStub(Callable const& callable, Node* context, Node* arg1,
-                 Node* arg2, size_t result_size = 1);
-  Node* CallStub(Callable const& callable, Node* context, Node* arg1,
-                 Node* arg2, Node* arg3, size_t result_size = 1);
-  Node* CallStub(Callable const& callable, Node* context, Node* arg1,
-                 Node* arg2, Node* arg3, Node* arg4, size_t result_size = 1);
-  Node* CallStubN(Callable const& callable, Node** args,
-                  size_t result_size = 1);
-
-  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                 Node* context, size_t result_size = 1);
-  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                 Node* context, Node* arg1, size_t result_size = 1);
-  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                 Node* context, Node* arg1, Node* arg2, size_t result_size = 1);
-  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                 Node* context, Node* arg1, Node* arg2, Node* arg3,
-                 size_t result_size = 1);
-  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                 Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
-                 size_t result_size = 1);
-  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                 Node* context, Node* arg1, Node* arg2, Node* arg3, Node* arg4,
-                 Node* arg5, size_t result_size = 1);
-
-  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                 Node* context, const Arg& arg1, const Arg& arg2,
-                 size_t result_size = 1);
-  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                 Node* context, const Arg& arg1, const Arg& arg2,
-                 const Arg& arg3, size_t result_size = 1);
-  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                 Node* context, const Arg& arg1, const Arg& arg2,
-                 const Arg& arg3, const Arg& arg4, size_t result_size = 1);
-  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                 Node* context, const Arg& arg1, const Arg& arg2,
-                 const Arg& arg3, const Arg& arg4, const Arg& arg5,
-                 size_t result_size = 1);
-
-  Node* CallStubN(const CallInterfaceDescriptor& descriptor,
-                  int js_parameter_count, Node* target, Node** args,
-                  size_t result_size = 1);
-  Node* CallStubN(const CallInterfaceDescriptor& descriptor, Node* target,
-                  Node** args, size_t result_size = 1) {
-    return CallStubN(descriptor, 0, target, args, result_size);
+  template <class... TArgs>
+  Node* CallStub(Callable const& callable, Node* context, TArgs... args) {
+    Node* target = HeapConstant(callable.code());
+    return CallStub(callable.descriptor(), target, context, args...);
   }
 
-  Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
-                     size_t result_size = 1);
-  Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
-                     Node* arg2, size_t result_size = 1);
-  Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
-                     Node* arg2, Node* arg3, size_t result_size = 1);
-  Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
-                     Node* arg2, Node* arg3, Node* arg4,
-                     size_t result_size = 1);
-  Node* TailCallStub(Callable const& callable, Node* context, Node* arg1,
-                     Node* arg2, Node* arg3, Node* arg4, Node* arg5,
-                     size_t result_size = 1);
+  template <class... TArgs>
+  Node* CallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                 Node* context, TArgs... args) {
+    return CallStubR(descriptor, 1, target, context, args...);
+  }
 
-  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                     Node* context, Node* arg1, size_t result_size = 1);
-  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                     Node* context, Node* arg1, Node* arg2,
-                     size_t result_size = 1);
-  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                     Node* context, Node* arg1, Node* arg2, Node* arg3,
-                     size_t result_size = 1);
-  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                     Node* context, Node* arg1, Node* arg2, Node* arg3,
-                     Node* arg4, size_t result_size = 1);
-  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                     Node* context, Node* arg1, Node* arg2, Node* arg3,
-                     Node* arg4, Node* arg5, size_t result_size = 1);
-  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                     Node* context, Node* arg1, Node* arg2, Node* arg3,
-                     Node* arg4, Node* arg5, Node* arg6,
-                     size_t result_size = 1);
+  template <class... TArgs>
+  Node* CallStubR(const CallInterfaceDescriptor& descriptor, size_t result_size,
+                  Node* target, Node* context, TArgs... args);
 
-  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                     Node* context, const Arg& arg1, const Arg& arg2,
-                     const Arg& arg3, const Arg& arg4, size_t result_size = 1);
-  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
-                     Node* context, const Arg& arg1, const Arg& arg2,
-                     const Arg& arg3, const Arg& arg4, const Arg& arg5,
-                     size_t result_size = 1);
+  Node* CallStubN(const CallInterfaceDescriptor& descriptor, size_t result_size,
+                  int input_count, Node* const* inputs);
 
+  template <class... TArgs>
+  Node* TailCallStub(Callable const& callable, Node* context, TArgs... args) {
+    Node* target = HeapConstant(callable.code());
+    return TailCallStub(callable.descriptor(), target, context, args...);
+  }
+
+  template <class... TArgs>
+  Node* TailCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                     Node* context, TArgs... args);
+
+  template <class... TArgs>
   Node* TailCallBytecodeDispatch(const CallInterfaceDescriptor& descriptor,
-                                 Node* code_target_address, Node** args);
+                                 Node* target, TArgs... args);
 
+  template <class... TArgs>
   Node* CallJS(Callable const& callable, Node* context, Node* function,
-               Node* receiver, size_t result_size = 1);
-  Node* CallJS(Callable const& callable, Node* context, Node* function,
-               Node* receiver, Node* arg1, size_t result_size = 1);
-  Node* CallJS(Callable const& callable, Node* context, Node* function,
-               Node* receiver, Node* arg1, Node* arg2, size_t result_size = 1);
-  Node* CallJS(Callable const& callable, Node* context, Node* function,
-               Node* receiver, Node* arg1, Node* arg2, Node* arg3,
-               size_t result_size = 1);
+               Node* receiver, TArgs... args) {
+    int argc = static_cast<int>(sizeof...(args));
+    Node* arity = Int32Constant(argc);
+    return CallStub(callable, context, function, arity, receiver, args...);
+  }
+
+  template <class... TArgs>
+  Node* ConstructJS(Callable const& callable, Node* context, Node* new_target,
+                    TArgs... args) {
+    int argc = static_cast<int>(sizeof...(args));
+    Node* arity = Int32Constant(argc);
+    Node* receiver = LoadRoot(Heap::kUndefinedValueRootIndex);
+
+    // Construct(target, new_target, arity, receiver, arguments...)
+    return CallStub(callable, context, new_target, new_target, arity, receiver,
+                    args...);
+  }
+
+  Node* CallCFunctionN(Signature<MachineType>* signature, int input_count,
+                       Node* const* inputs);
 
   // Call to a C function with two arguments.
   Node* CallCFunction2(MachineType return_type, MachineType arg0_type,
                        MachineType arg1_type, Node* function, Node* arg0,
                        Node* arg1);
 
+  // Call to a C function with three arguments.
+  Node* CallCFunction3(MachineType return_type, MachineType arg0_type,
+                       MachineType arg1_type, MachineType arg2_type,
+                       Node* function, Node* arg0, Node* arg1, Node* arg2);
+
   // Exception handling support.
   void GotoIfException(Node* node, Label* if_exception,
                        Variable* exception_var = nullptr);
@@ -468,45 +391,70 @@
   Isolate* isolate() const;
   Zone* zone() const;
 
+  CodeAssemblerState* state() { return state_; }
+
+  void BreakOnNode(int node_id);
+
  protected:
-  // Enables subclasses to perform operations before and after a call.
-  virtual void CallPrologue();
-  virtual void CallEpilogue();
+  void RegisterCallGenerationCallbacks(
+      const CodeAssemblerCallback& call_prologue,
+      const CodeAssemblerCallback& call_epilogue);
+  void UnregisterCallGenerationCallbacks();
 
  private:
-  CodeAssembler(Isolate* isolate, Zone* zone, CallDescriptor* call_descriptor,
-                Code::Flags flags, const char* name);
+  RawMachineAssembler* raw_assembler() const;
 
-  Node* CallN(CallDescriptor* descriptor, Node* code_target, Node** args);
-  Node* TailCallN(CallDescriptor* descriptor, Node* code_target, Node** args);
+  // Calls respective callback registered in the state.
+  void CallPrologue();
+  void CallEpilogue();
 
-  std::unique_ptr<RawMachineAssembler> raw_assembler_;
-  Code::Flags flags_;
-  const char* name_;
-  bool code_generated_;
-  ZoneSet<Variable::Impl*> variables_;
+  CodeAssemblerState* state_;
 
   DISALLOW_COPY_AND_ASSIGN(CodeAssembler);
 };
 
-class CodeAssembler::Label {
+class CodeAssemblerVariable {
+ public:
+  explicit CodeAssemblerVariable(CodeAssembler* assembler,
+                                 MachineRepresentation rep);
+  CodeAssemblerVariable(CodeAssembler* assembler, MachineRepresentation rep,
+                        Node* initial_value);
+  ~CodeAssemblerVariable();
+  void Bind(Node* value);
+  Node* value() const;
+  MachineRepresentation rep() const;
+  bool IsBound() const;
+
+ private:
+  friend class CodeAssemblerLabel;
+  friend class CodeAssemblerState;
+  class Impl;
+  Impl* impl_;
+  CodeAssemblerState* state_;
+};
+
+class CodeAssemblerLabel {
  public:
   enum Type { kDeferred, kNonDeferred };
 
-  explicit Label(
+  explicit CodeAssemblerLabel(
       CodeAssembler* assembler,
-      CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
-      : CodeAssembler::Label(assembler, 0, nullptr, type) {}
-  Label(CodeAssembler* assembler, const VariableList& merged_variables,
-        CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
-      : CodeAssembler::Label(assembler, merged_variables.length(),
-                             &(merged_variables[0]), type) {}
-  Label(CodeAssembler* assembler, size_t count, Variable** vars,
-        CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred);
-  Label(CodeAssembler* assembler, CodeAssembler::Variable* merged_variable,
-        CodeAssembler::Label::Type type = CodeAssembler::Label::kNonDeferred)
-      : Label(assembler, 1, &merged_variable, type) {}
-  ~Label() {}
+      CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred)
+      : CodeAssemblerLabel(assembler, 0, nullptr, type) {}
+  CodeAssemblerLabel(
+      CodeAssembler* assembler,
+      const CodeAssemblerVariableList& merged_variables,
+      CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred)
+      : CodeAssemblerLabel(assembler, merged_variables.length(),
+                           &(merged_variables[0]), type) {}
+  CodeAssemblerLabel(
+      CodeAssembler* assembler, size_t count, CodeAssemblerVariable** vars,
+      CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred);
+  CodeAssemblerLabel(
+      CodeAssembler* assembler, CodeAssemblerVariable* merged_variable,
+      CodeAssemblerLabel::Type type = CodeAssemblerLabel::kNonDeferred)
+      : CodeAssemblerLabel(assembler, 1, &merged_variable, type) {}
+  ~CodeAssemblerLabel();
 
  private:
   friend class CodeAssembler;
@@ -516,14 +464,53 @@
 
   bool bound_;
   size_t merge_count_;
-  CodeAssembler* assembler_;
+  CodeAssemblerState* state_;
   RawMachineLabel* label_;
   // Map of variables that need to be merged to their phi nodes (or placeholders
   // for those phis).
-  std::map<Variable::Impl*, Node*> variable_phis_;
+  std::map<CodeAssemblerVariable::Impl*, Node*> variable_phis_;
   // Map of variables to the list of value nodes that have been added from each
   // merge path in their order of merging.
-  std::map<Variable::Impl*, std::vector<Node*>> variable_merges_;
+  std::map<CodeAssemblerVariable::Impl*, std::vector<Node*>> variable_merges_;
+};
+
+class V8_EXPORT_PRIVATE CodeAssemblerState {
+ public:
+  // Create with CallStub linkage.
+  // |result_size| specifies the number of results returned by the stub.
+  // TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
+  CodeAssemblerState(Isolate* isolate, Zone* zone,
+                     const CallInterfaceDescriptor& descriptor,
+                     Code::Flags flags, const char* name,
+                     size_t result_size = 1);
+
+  // Create with JSCall linkage.
+  CodeAssemblerState(Isolate* isolate, Zone* zone, int parameter_count,
+                     Code::Flags flags, const char* name);
+
+  ~CodeAssemblerState();
+
+  const char* name() const { return name_; }
+  int parameter_count() const;
+
+ private:
+  friend class CodeAssembler;
+  friend class CodeAssemblerLabel;
+  friend class CodeAssemblerVariable;
+
+  CodeAssemblerState(Isolate* isolate, Zone* zone,
+                     CallDescriptor* call_descriptor, Code::Flags flags,
+                     const char* name);
+
+  std::unique_ptr<RawMachineAssembler> raw_assembler_;
+  Code::Flags flags_;
+  const char* name_;
+  bool code_generated_;
+  ZoneSet<CodeAssemblerVariable::Impl*> variables_;
+  CodeAssemblerCallback call_prologue_;
+  CodeAssemblerCallback call_epilogue_;
+
+  DISALLOW_COPY_AND_ASSIGN(CodeAssemblerState);
 };
 
 }  // namespace compiler
diff --git a/src/compiler/code-generator-impl.h b/src/compiler/code-generator-impl.h
index 8bf3a9e..bdedbec 100644
--- a/src/compiler/code-generator-impl.h
+++ b/src/compiler/code-generator-impl.h
@@ -67,6 +67,14 @@
     return static_cast<int16_t>(InputInt32(index));
   }
 
+  uint8_t InputInt3(size_t index) {
+    return static_cast<uint8_t>(InputInt32(index) & 0x7);
+  }
+
+  uint8_t InputInt4(size_t index) {
+    return static_cast<uint8_t>(InputInt32(index) & 0xF);
+  }
+
   uint8_t InputInt5(size_t index) {
     return static_cast<uint8_t>(InputInt32(index) & 0x1F);
   }
diff --git a/src/compiler/code-generator.cc b/src/compiler/code-generator.cc
index c69e86e..bbd9452 100644
--- a/src/compiler/code-generator.cc
+++ b/src/compiler/code-generator.cc
@@ -56,6 +56,7 @@
       jump_tables_(nullptr),
       ools_(nullptr),
       osr_pc_offset_(-1),
+      optimized_out_literal_id_(-1),
       source_position_table_builder_(code->zone(),
                                      info->SourcePositionRecordingMode()) {
   for (int i = 0; i < code->InstructionBlockCount(); ++i) {
@@ -71,6 +72,7 @@
   frame_access_state_ = new (code()->zone()) FrameAccessState(frame);
 }
 
+
 Handle<Code> CodeGenerator::GenerateCode() {
   CompilationInfo* info = this->info();
 
@@ -79,6 +81,11 @@
   // the frame (that is done in AssemblePrologue).
   FrameScope frame_scope(masm(), StackFrame::MANUAL);
 
+  if (info->is_source_positions_enabled()) {
+    SourcePosition source_position(info->shared_info()->start_position());
+    AssembleSourcePosition(source_position);
+  }
+
   // Place function entry hook if requested to do so.
   if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
     ProfileEntryHookStub::MaybeCallEntryHook(masm());
@@ -189,8 +196,7 @@
   // Assemble all eager deoptimization exits.
   for (DeoptimizationExit* exit : deoptimization_exits_) {
     masm()->bind(exit->label());
-    AssembleDeoptimizerCall(exit->deoptimization_id(), Deoptimizer::EAGER,
-                            exit->pos());
+    AssembleDeoptimizerCall(exit->deoptimization_id(), exit->pos());
   }
 
   // Ensure there is space for lazy deoptimization in the code.
@@ -392,6 +398,10 @@
 CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
     Instruction* instr, const InstructionBlock* block) {
   int first_unused_stack_slot;
+  FlagsMode mode = FlagsModeField::decode(instr->opcode());
+  if (mode != kFlags_trap) {
+    AssembleSourcePosition(instr);
+  }
   bool adjust_stack =
       GetSlotAboveSPBeforeTailCall(instr, &first_unused_stack_slot);
   if (adjust_stack) AssembleTailCallBeforeGap(instr, first_unused_stack_slot);
@@ -404,12 +414,10 @@
   if (instr->IsJump() && block->must_deconstruct_frame()) {
     AssembleDeconstructFrame();
   }
-  AssembleSourcePosition(instr);
   // Assemble architecture-specific code for the instruction.
   CodeGenResult result = AssembleArchInstruction(instr);
   if (result != kSuccess) return result;
 
-  FlagsMode mode = FlagsModeField::decode(instr->opcode());
   FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
   switch (mode) {
     case kFlags_branch: {
@@ -461,6 +469,10 @@
       AssembleArchBoolean(instr, condition);
       break;
     }
+    case kFlags_trap: {
+      AssembleArchTrap(instr, condition);
+      break;
+    }
     case kFlags_none: {
       break;
     }
@@ -468,10 +480,14 @@
   return kSuccess;
 }
 
-
 void CodeGenerator::AssembleSourcePosition(Instruction* instr) {
   SourcePosition source_position = SourcePosition::Unknown();
+  if (instr->IsNop() && instr->AreMovesRedundant()) return;
   if (!code()->GetSourcePosition(instr, &source_position)) return;
+  AssembleSourcePosition(source_position);
+}
+
+void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
   if (source_position == current_source_position_) return;
   current_source_position_ = source_position;
   if (!source_position.IsKnown()) return;
@@ -481,7 +497,13 @@
     CompilationInfo* info = this->info();
     if (!info->parse_info()) return;
     std::ostringstream buffer;
-    buffer << "-- " << source_position.InliningStack(info) << " --";
+    buffer << "-- ";
+    if (FLAG_trace_turbo) {
+      buffer << source_position;
+    } else {
+      buffer << source_position.InliningStack(info);
+    }
+    buffer << " --";
     masm()->RecordComment(StrDup(buffer.str().c_str()));
   }
 }
@@ -628,15 +650,6 @@
       deopt_state_id = BuildTranslation(instr, -1, frame_state_offset,
                                         OutputFrameStateCombine::Ignore());
     }
-#if DEBUG
-    // Make sure all the values live in stack slots or they are immediates.
-    // (The values should not live in register because registers are clobbered
-    // by calls.)
-    for (size_t i = 0; i < descriptor->GetSize(); i++) {
-      InstructionOperand* op = instr->InputAt(frame_state_offset + 1 + i);
-      CHECK(op->IsStackSlot() || op->IsFPStackSlot() || op->IsImmediate());
-    }
-#endif
     safepoints()->RecordLazyDeoptimizationIndex(deopt_state_id);
   }
 }
@@ -658,6 +671,13 @@
   return code()->GetDeoptimizationEntry(state_id);
 }
 
+DeoptimizeKind CodeGenerator::GetDeoptimizationKind(
+    int deoptimization_id) const {
+  size_t const index = static_cast<size_t>(deoptimization_id);
+  DCHECK_LT(index, deoptimization_states_.size());
+  return deoptimization_states_[index]->kind();
+}
+
 DeoptimizeReason CodeGenerator::GetDeoptimizationReason(
     int deoptimization_id) const {
   size_t const index = static_cast<size_t>(deoptimization_id);
@@ -666,19 +686,41 @@
 }
 
 void CodeGenerator::TranslateStateValueDescriptor(
-    StateValueDescriptor* desc, Translation* translation,
-    InstructionOperandIterator* iter) {
+    StateValueDescriptor* desc, StateValueList* nested,
+    Translation* translation, InstructionOperandIterator* iter) {
+  // Note:
+  // If translation is null, we just skip the relevant instruction operands.
   if (desc->IsNested()) {
-    translation->BeginCapturedObject(static_cast<int>(desc->size()));
-    for (size_t index = 0; index < desc->fields().size(); index++) {
-      TranslateStateValueDescriptor(&desc->fields()[index], translation, iter);
+    if (translation != nullptr) {
+      translation->BeginCapturedObject(static_cast<int>(nested->size()));
+    }
+    for (auto field : *nested) {
+      TranslateStateValueDescriptor(field.desc, field.nested, translation,
+                                    iter);
+    }
+  } else if (desc->IsArguments()) {
+    if (translation != nullptr) {
+      translation->BeginArgumentsObject(0);
     }
   } else if (desc->IsDuplicate()) {
-    translation->DuplicateObject(static_cast<int>(desc->id()));
+    if (translation != nullptr) {
+      translation->DuplicateObject(static_cast<int>(desc->id()));
+    }
+  } else if (desc->IsPlain()) {
+    InstructionOperand* op = iter->Advance();
+    if (translation != nullptr) {
+      AddTranslationForOperand(translation, iter->instruction(), op,
+                               desc->type());
+    }
   } else {
-    DCHECK(desc->IsPlain());
-    AddTranslationForOperand(translation, iter->instruction(), iter->Advance(),
-                             desc->type());
+    DCHECK(desc->IsOptimizedOut());
+    if (translation != nullptr) {
+      if (optimized_out_literal_id_ == -1) {
+        optimized_out_literal_id_ =
+            DefineDeoptimizationLiteral(isolate()->factory()->optimized_out());
+      }
+      translation->StoreLiteral(optimized_out_literal_id_);
+    }
   }
 }
 
@@ -686,44 +728,41 @@
 void CodeGenerator::TranslateFrameStateDescriptorOperands(
     FrameStateDescriptor* desc, InstructionOperandIterator* iter,
     OutputFrameStateCombine combine, Translation* translation) {
-  for (size_t index = 0; index < desc->GetSize(combine); index++) {
-    switch (combine.kind()) {
-      case OutputFrameStateCombine::kPushOutput: {
-        DCHECK(combine.GetPushCount() <= iter->instruction()->OutputCount());
-        size_t size_without_output =
-            desc->GetSize(OutputFrameStateCombine::Ignore());
-        // If the index is past the existing stack items in values_.
-        if (index >= size_without_output) {
-          // Materialize the result of the call instruction in this slot.
-          AddTranslationForOperand(
-              translation, iter->instruction(),
-              iter->instruction()->OutputAt(index - size_without_output),
-              MachineType::AnyTagged());
-          continue;
-        }
-        break;
+  size_t index = 0;
+  StateValueList* values = desc->GetStateValueDescriptors();
+  for (StateValueList::iterator it = values->begin(); it != values->end();
+       ++it, ++index) {
+    StateValueDescriptor* value_desc = (*it).desc;
+    if (combine.kind() == OutputFrameStateCombine::kPokeAt) {
+      // The result of the call should be placed at position
+      // [index_from_top] in the stack (overwriting whatever was
+      // previously there).
+      size_t index_from_top =
+          desc->GetSize(combine) - 1 - combine.GetOffsetToPokeAt();
+      if (index >= index_from_top &&
+          index < index_from_top + iter->instruction()->OutputCount()) {
+        DCHECK_NOT_NULL(translation);
+        AddTranslationForOperand(
+            translation, iter->instruction(),
+            iter->instruction()->OutputAt(index - index_from_top),
+            MachineType::AnyTagged());
+        // Skip the instruction operands.
+        TranslateStateValueDescriptor(value_desc, (*it).nested, nullptr, iter);
+        continue;
       }
-      case OutputFrameStateCombine::kPokeAt:
-        // The result of the call should be placed at position
-        // [index_from_top] in the stack (overwriting whatever was
-        // previously there).
-        size_t index_from_top =
-            desc->GetSize(combine) - 1 - combine.GetOffsetToPokeAt();
-        if (index >= index_from_top &&
-            index < index_from_top + iter->instruction()->OutputCount()) {
-          AddTranslationForOperand(
-              translation, iter->instruction(),
-              iter->instruction()->OutputAt(index - index_from_top),
-              MachineType::AnyTagged());
-          iter->Advance();  // We do not use this input, but we need to
-                            // advace, as the input got replaced.
-          continue;
-        }
-        break;
     }
-    StateValueDescriptor* value_desc = desc->GetStateValueDescriptor();
-    TranslateStateValueDescriptor(&value_desc->fields()[index], translation,
-                                  iter);
+    TranslateStateValueDescriptor(value_desc, (*it).nested, translation, iter);
+  }
+  DCHECK_EQ(desc->GetSize(OutputFrameStateCombine::Ignore()), index);
+
+  if (combine.kind() == OutputFrameStateCombine::kPushOutput) {
+    DCHECK(combine.GetPushCount() <= iter->instruction()->OutputCount());
+    for (size_t output = 0; output < combine.GetPushCount(); output++) {
+      // Materialize the result of the call instruction in this slot.
+      AddTranslationForOperand(translation, iter->instruction(),
+                               iter->instruction()->OutputAt(output),
+                               MachineType::AnyTagged());
+    }
   }
 }
 
@@ -768,8 +807,9 @@
       translation->BeginTailCallerFrame(shared_info_id);
       break;
     case FrameStateType::kConstructStub:
+      DCHECK(descriptor->bailout_id().IsValidForConstructStub());
       translation->BeginConstructStubFrame(
-          shared_info_id,
+          descriptor->bailout_id(), shared_info_id,
           static_cast<unsigned int>(descriptor->parameters_count()));
       break;
     case FrameStateType::kGetterStub:
@@ -803,7 +843,7 @@
   int deoptimization_id = static_cast<int>(deoptimization_states_.size());
 
   deoptimization_states_.push_back(new (zone()) DeoptimizationState(
-      descriptor->bailout_id(), translation.index(), pc_offset,
+      descriptor->bailout_id(), translation.index(), pc_offset, entry.kind(),
       entry.reason()));
 
   return deoptimization_id;
@@ -823,16 +863,15 @@
     } else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
                type == MachineType::Uint32()) {
       translation->StoreUint32StackSlot(LocationOperand::cast(op)->index());
-    } else if (IsAnyTagged(type.representation())) {
-      translation->StoreStackSlot(LocationOperand::cast(op)->index());
     } else {
-      CHECK(false);
+      CHECK_EQ(MachineRepresentation::kTagged, type.representation());
+      translation->StoreStackSlot(LocationOperand::cast(op)->index());
     }
   } else if (op->IsFPStackSlot()) {
     if (type.representation() == MachineRepresentation::kFloat64) {
       translation->StoreDoubleStackSlot(LocationOperand::cast(op)->index());
     } else {
-      DCHECK_EQ(MachineRepresentation::kFloat32, type.representation());
+      CHECK_EQ(MachineRepresentation::kFloat32, type.representation());
       translation->StoreFloatStackSlot(LocationOperand::cast(op)->index());
     }
   } else if (op->IsRegister()) {
@@ -845,27 +884,26 @@
     } else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
                type == MachineType::Uint32()) {
       translation->StoreUint32Register(converter.ToRegister(op));
-    } else if (IsAnyTagged(type.representation())) {
-      translation->StoreRegister(converter.ToRegister(op));
     } else {
-      CHECK(false);
+      CHECK_EQ(MachineRepresentation::kTagged, type.representation());
+      translation->StoreRegister(converter.ToRegister(op));
     }
   } else if (op->IsFPRegister()) {
     InstructionOperandConverter converter(this, instr);
     if (type.representation() == MachineRepresentation::kFloat64) {
       translation->StoreDoubleRegister(converter.ToDoubleRegister(op));
     } else {
-      DCHECK_EQ(MachineRepresentation::kFloat32, type.representation());
+      CHECK_EQ(MachineRepresentation::kFloat32, type.representation());
       translation->StoreFloatRegister(converter.ToFloatRegister(op));
     }
-  } else if (op->IsImmediate()) {
+  } else {
+    CHECK(op->IsImmediate());
     InstructionOperandConverter converter(this, instr);
     Constant constant = converter.ToConstant(op);
     Handle<Object> constant_object;
     switch (constant.type()) {
       case Constant::kInt32:
-        if (type.representation() == MachineRepresentation::kTagged ||
-            type.representation() == MachineRepresentation::kTaggedSigned) {
+        if (type.representation() == MachineRepresentation::kTagged) {
           // When pointers are 4 bytes, we can use int32 constants to represent
           // Smis.
           DCHECK_EQ(4, kPointerSize);
@@ -888,9 +926,13 @@
                  type.representation() == MachineRepresentation::kNone);
           DCHECK(type.representation() != MachineRepresentation::kNone ||
                  constant.ToInt32() == FrameStateDescriptor::kImpossibleValue);
-
-          constant_object =
-              isolate()->factory()->NewNumberFromInt(constant.ToInt32());
+          if (type == MachineType::Uint32()) {
+            constant_object =
+                isolate()->factory()->NewNumberFromUint(constant.ToInt32());
+          } else {
+            constant_object =
+                isolate()->factory()->NewNumberFromInt(constant.ToInt32());
+          }
         }
         break;
       case Constant::kInt64:
@@ -899,37 +941,28 @@
         // TODO(jarin,bmeurer): We currently pass in raw pointers to the
         // JSFunction::entry here. We should really consider fixing this.
         DCHECK(type.representation() == MachineRepresentation::kWord64 ||
-               type.representation() == MachineRepresentation::kTagged ||
-               type.representation() == MachineRepresentation::kTaggedSigned);
+               type.representation() == MachineRepresentation::kTagged);
         DCHECK_EQ(8, kPointerSize);
         constant_object =
             handle(reinterpret_cast<Smi*>(constant.ToInt64()), isolate());
         DCHECK(constant_object->IsSmi());
         break;
       case Constant::kFloat32:
-        if (type.representation() == MachineRepresentation::kTaggedSigned) {
-          DCHECK(IsSmiDouble(constant.ToFloat32()));
-        } else {
-          DCHECK(type.representation() == MachineRepresentation::kFloat32 ||
-                 CanBeTaggedPointer(type.representation()));
-        }
+        DCHECK(type.representation() == MachineRepresentation::kFloat32 ||
+               type.representation() == MachineRepresentation::kTagged);
         constant_object = isolate()->factory()->NewNumber(constant.ToFloat32());
         break;
       case Constant::kFloat64:
-        if (type.representation() == MachineRepresentation::kTaggedSigned) {
-          DCHECK(IsSmiDouble(constant.ToFloat64()));
-        } else {
-          DCHECK(type.representation() == MachineRepresentation::kFloat64 ||
-                 CanBeTaggedPointer(type.representation()));
-        }
+        DCHECK(type.representation() == MachineRepresentation::kFloat64 ||
+               type.representation() == MachineRepresentation::kTagged);
         constant_object = isolate()->factory()->NewNumber(constant.ToFloat64());
         break;
       case Constant::kHeapObject:
-        DCHECK(CanBeTaggedPointer(type.representation()));
+        DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
         constant_object = constant.ToHeapObject();
         break;
       default:
-        CHECK(false);
+        UNREACHABLE();
     }
     if (constant_object.is_identical_to(info()->closure())) {
       translation->StoreJSFrameFunction();
@@ -937,8 +970,6 @@
       int literal_id = DefineDeoptimizationLiteral(constant_object);
       translation->StoreLiteral(literal_id);
     }
-  } else {
-    CHECK(false);
   }
 }
 
diff --git a/src/compiler/code-generator.h b/src/compiler/code-generator.h
index 7aed85a..74958d0 100644
--- a/src/compiler/code-generator.h
+++ b/src/compiler/code-generator.h
@@ -12,6 +12,7 @@
 #include "src/macro-assembler.h"
 #include "src/safepoint-table.h"
 #include "src/source-position-table.h"
+#include "src/trap-handler/trap-handler.h"
 
 namespace v8 {
 namespace internal {
@@ -65,6 +66,14 @@
 
   Label* GetLabel(RpoNumber rpo) { return &labels_[rpo.ToSize()]; }
 
+  void AssembleSourcePosition(Instruction* instr);
+
+  void AssembleSourcePosition(SourcePosition source_position);
+
+  // Record a safepoint with the given pointer map.
+  void RecordSafepoint(ReferenceMap* references, Safepoint::Kind kind,
+                       int arguments, Safepoint::DeoptMode deopt_mode);
+
  private:
   MacroAssembler* masm() { return &masm_; }
   GapResolver* resolver() { return &resolver_; }
@@ -82,10 +91,6 @@
   // assembling code, in which case, a fall-through can be used.
   bool IsNextInAssemblyOrder(RpoNumber block) const;
 
-  // Record a safepoint with the given pointer map.
-  void RecordSafepoint(ReferenceMap* references, Safepoint::Kind kind,
-                       int arguments, Safepoint::DeoptMode deopt_mode);
-
   // Check if a heap object can be materialized by loading from a heap root,
   // which is cheaper on some platforms than materializing the actual heap
   // object constant.
@@ -100,7 +105,6 @@
   // Assemble code for the specified instruction.
   CodeGenResult AssembleInstruction(Instruction* instr,
                                     const InstructionBlock* block);
-  void AssembleSourcePosition(Instruction* instr);
   void AssembleGaps(Instruction* instr);
 
   // Returns true if a instruction is a tail call that needs to adjust the stack
@@ -116,11 +120,11 @@
   void AssembleArchJump(RpoNumber target);
   void AssembleArchBranch(Instruction* instr, BranchInfo* branch);
   void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
+  void AssembleArchTrap(Instruction* instr, FlagsCondition condition);
   void AssembleArchLookupSwitch(Instruction* instr);
   void AssembleArchTableSwitch(Instruction* instr);
 
   CodeGenResult AssembleDeoptimizerCall(int deoptimization_id,
-                                        Deoptimizer::BailoutType bailout_type,
                                         SourcePosition pos);
 
   // Generates an architecture-specific, descriptor-specific prologue
@@ -205,6 +209,7 @@
   int DefineDeoptimizationLiteral(Handle<Object> literal);
   DeoptimizationEntry const& GetDeoptimizationEntry(Instruction* instr,
                                                     size_t frame_state_offset);
+  DeoptimizeKind GetDeoptimizationKind(int deoptimization_id) const;
   DeoptimizeReason GetDeoptimizationReason(int deoptimization_id) const;
   int BuildTranslation(Instruction* instr, int pc_offset,
                        size_t frame_state_offset,
@@ -213,6 +218,7 @@
       FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
       Translation* translation, OutputFrameStateCombine state_combine);
   void TranslateStateValueDescriptor(StateValueDescriptor* desc,
+                                     StateValueList* nested,
                                      Translation* translation,
                                      InstructionOperandIterator* iter);
   void TranslateFrameStateDescriptorOperands(FrameStateDescriptor* desc,
@@ -232,21 +238,24 @@
   class DeoptimizationState final : public ZoneObject {
    public:
     DeoptimizationState(BailoutId bailout_id, int translation_id, int pc_offset,
-                        DeoptimizeReason reason)
+                        DeoptimizeKind kind, DeoptimizeReason reason)
         : bailout_id_(bailout_id),
           translation_id_(translation_id),
           pc_offset_(pc_offset),
+          kind_(kind),
           reason_(reason) {}
 
     BailoutId bailout_id() const { return bailout_id_; }
     int translation_id() const { return translation_id_; }
     int pc_offset() const { return pc_offset_; }
+    DeoptimizeKind kind() const { return kind_; }
     DeoptimizeReason reason() const { return reason_; }
 
    private:
     BailoutId bailout_id_;
     int translation_id_;
     int pc_offset_;
+    DeoptimizeKind kind_;
     DeoptimizeReason reason_;
   };
 
@@ -279,6 +288,7 @@
   JumpTable* jump_tables_;
   OutOfLineCode* ools_;
   int osr_pc_offset_;
+  int optimized_out_literal_id_;
   SourcePositionTableBuilder source_position_table_builder_;
 };
 
diff --git a/src/compiler/common-operator-reducer.cc b/src/compiler/common-operator-reducer.cc
index 9a36816..70fdf71 100644
--- a/src/compiler/common-operator-reducer.cc
+++ b/src/compiler/common-operator-reducer.cc
@@ -36,7 +36,6 @@
 
 }  // namespace
 
-
 CommonOperatorReducer::CommonOperatorReducer(Editor* editor, Graph* graph,
                                              CommonOperatorBuilder* common,
                                              MachineOperatorBuilder* machine)
@@ -44,8 +43,9 @@
       graph_(graph),
       common_(common),
       machine_(machine),
-      dead_(graph->NewNode(common->Dead())) {}
-
+      dead_(graph->NewNode(common->Dead())) {
+  NodeProperties::SetType(dead_, Type::None());
+}
 
 Reduction CommonOperatorReducer::Reduce(Node* node) {
   switch (node->opcode()) {
@@ -126,7 +126,7 @@
   DCHECK(node->opcode() == IrOpcode::kDeoptimizeIf ||
          node->opcode() == IrOpcode::kDeoptimizeUnless);
   bool condition_is_true = node->opcode() == IrOpcode::kDeoptimizeUnless;
-  DeoptimizeReason reason = DeoptimizeReasonOf(node->op());
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   Node* condition = NodeProperties::GetValueInput(node, 0);
   Node* frame_state = NodeProperties::GetValueInput(node, 1);
   Node* effect = NodeProperties::GetEffectInput(node);
@@ -137,9 +137,10 @@
   // (as guaranteed by the graph reduction logic).
   if (condition->opcode() == IrOpcode::kBooleanNot) {
     NodeProperties::ReplaceValueInput(node, condition->InputAt(0), 0);
-    NodeProperties::ChangeOp(node, condition_is_true
-                                       ? common()->DeoptimizeIf(reason)
-                                       : common()->DeoptimizeUnless(reason));
+    NodeProperties::ChangeOp(
+        node, condition_is_true
+                  ? common()->DeoptimizeIf(p.kind(), p.reason())
+                  : common()->DeoptimizeUnless(p.kind(), p.reason()));
     return Changed(node);
   }
   Decision const decision = DecideCondition(condition);
@@ -147,9 +148,8 @@
   if (condition_is_true == (decision == Decision::kTrue)) {
     ReplaceWithValue(node, dead(), effect, control);
   } else {
-    control =
-        graph()->NewNode(common()->Deoptimize(DeoptimizeKind::kEager, reason),
-                         frame_state, effect, control);
+    control = graph()->NewNode(common()->Deoptimize(p.kind(), p.reason()),
+                               frame_state, effect, control);
     // TODO(bmeurer): This should be on the AdvancedReducer somehow.
     NodeProperties::MergeControlToEnd(graph(), common(), control);
     Revisit(graph()->end());
@@ -195,15 +195,16 @@
 
 Reduction CommonOperatorReducer::ReduceEffectPhi(Node* node) {
   DCHECK_EQ(IrOpcode::kEffectPhi, node->opcode());
-  int const input_count = node->InputCount() - 1;
-  DCHECK_LE(1, input_count);
-  Node* const merge = node->InputAt(input_count);
+  Node::Inputs inputs = node->inputs();
+  int const effect_input_count = inputs.count() - 1;
+  DCHECK_LE(1, effect_input_count);
+  Node* const merge = inputs[effect_input_count];
   DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
-  DCHECK_EQ(input_count, merge->InputCount());
-  Node* const effect = node->InputAt(0);
+  DCHECK_EQ(effect_input_count, merge->InputCount());
+  Node* const effect = inputs[0];
   DCHECK_NE(node, effect);
-  for (int i = 1; i < input_count; ++i) {
-    Node* const input = node->InputAt(i);
+  for (int i = 1; i < effect_input_count; ++i) {
+    Node* const input = inputs[i];
     if (input == node) {
       // Ignore redundant inputs.
       DCHECK_EQ(IrOpcode::kLoop, merge->opcode());
@@ -219,16 +220,18 @@
 
 Reduction CommonOperatorReducer::ReducePhi(Node* node) {
   DCHECK_EQ(IrOpcode::kPhi, node->opcode());
-  int const input_count = node->InputCount() - 1;
-  DCHECK_LE(1, input_count);
-  Node* const merge = node->InputAt(input_count);
+  Node::Inputs inputs = node->inputs();
+  int const value_input_count = inputs.count() - 1;
+  DCHECK_LE(1, value_input_count);
+  Node* const merge = inputs[value_input_count];
   DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
-  DCHECK_EQ(input_count, merge->InputCount());
-  if (input_count == 2) {
-    Node* vtrue = node->InputAt(0);
-    Node* vfalse = node->InputAt(1);
-    Node* if_true = merge->InputAt(0);
-    Node* if_false = merge->InputAt(1);
+  DCHECK_EQ(value_input_count, merge->InputCount());
+  if (value_input_count == 2) {
+    Node* vtrue = inputs[0];
+    Node* vfalse = inputs[1];
+    Node::Inputs merge_inputs = merge->inputs();
+    Node* if_true = merge_inputs[0];
+    Node* if_false = merge_inputs[1];
     if (if_true->opcode() != IrOpcode::kIfTrue) {
       std::swap(if_true, if_false);
       std::swap(vtrue, vfalse);
@@ -265,10 +268,10 @@
       }
     }
   }
-  Node* const value = node->InputAt(0);
+  Node* const value = inputs[0];
   DCHECK_NE(node, value);
-  for (int i = 1; i < input_count; ++i) {
-    Node* const input = node->InputAt(i);
+  for (int i = 1; i < value_input_count; ++i) {
+    Node* const input = inputs[i];
     if (input == node) {
       // Ignore redundant inputs.
       DCHECK_EQ(IrOpcode::kLoop, merge->opcode());
@@ -281,49 +284,91 @@
   return Replace(value);
 }
 
-
 Reduction CommonOperatorReducer::ReduceReturn(Node* node) {
   DCHECK_EQ(IrOpcode::kReturn, node->opcode());
-  Node* const value = node->InputAt(1);
   Node* effect = NodeProperties::GetEffectInput(node);
-  Node* const control = NodeProperties::GetControlInput(node);
-  bool changed = false;
   if (effect->opcode() == IrOpcode::kCheckpoint) {
     // Any {Return} node can never be used to insert a deoptimization point,
     // hence checkpoints can be cut out of the effect chain flowing into it.
     effect = NodeProperties::GetEffectInput(effect);
     NodeProperties::ReplaceEffectInput(node, effect);
-    changed = true;
+    Reduction const reduction = ReduceReturn(node);
+    return reduction.Changed() ? reduction : Changed(node);
   }
+  // TODO(ahaas): Extend the reduction below to multiple return values.
+  if (ValueInputCountOfReturn(node->op()) != 1) {
+    return NoChange();
+  }
+  Node* pop_count = NodeProperties::GetValueInput(node, 0);
+  Node* value = NodeProperties::GetValueInput(node, 1);
+  Node* control = NodeProperties::GetControlInput(node);
   if (value->opcode() == IrOpcode::kPhi &&
       NodeProperties::GetControlInput(value) == control &&
-      effect->opcode() == IrOpcode::kEffectPhi &&
-      NodeProperties::GetControlInput(effect) == control &&
       control->opcode() == IrOpcode::kMerge) {
-    int const control_input_count = control->InputCount();
-    DCHECK_NE(0, control_input_count);
-    DCHECK_EQ(control_input_count, value->InputCount() - 1);
-    DCHECK_EQ(control_input_count, effect->InputCount() - 1);
+    // This optimization pushes {Return} nodes through merges. It checks that
+    // the return value is actually a {Phi} and the return control dependency
+    // is the {Merge} to which the {Phi} belongs.
+
+    // Value1 ... ValueN Control1 ... ControlN
+    //   ^          ^       ^            ^
+    //   |          |       |            |
+    //   +----+-----+       +------+-----+
+    //        |                    |
+    //       Phi --------------> Merge
+    //        ^                    ^
+    //        |                    |
+    //        |  +-----------------+
+    //        |  |
+    //       Return -----> Effect
+    //         ^
+    //         |
+    //        End
+
+    // Now the effect input to the {Return} node can be either an {EffectPhi}
+    // hanging off the same {Merge}, or the {Merge} node is only connected to
+    // the {Return} and the {Phi}, in which case we know that the effect input
+    // must somehow dominate all merged branches.
+
+    Node::Inputs control_inputs = control->inputs();
+    Node::Inputs value_inputs = value->inputs();
+    DCHECK_NE(0, control_inputs.count());
+    DCHECK_EQ(control_inputs.count(), value_inputs.count() - 1);
     DCHECK_EQ(IrOpcode::kEnd, graph()->end()->opcode());
     DCHECK_NE(0, graph()->end()->InputCount());
-    for (int i = 0; i < control_input_count; ++i) {
-      // Create a new {Return} and connect it to {end}. We don't need to mark
-      // {end} as revisit, because we mark {node} as {Dead} below, which was
-      // previously connected to {end}, so we know for sure that at some point
-      // the reducer logic will visit {end} again.
-      Node* ret = graph()->NewNode(common()->Return(), node->InputAt(0),
-                                   value->InputAt(i), effect->InputAt(i),
-                                   control->InputAt(i));
-      NodeProperties::MergeControlToEnd(graph(), common(), ret);
+    if (control->OwnedBy(node, value)) {
+      for (int i = 0; i < control_inputs.count(); ++i) {
+        // Create a new {Return} and connect it to {end}. We don't need to mark
+        // {end} as revisit, because we mark {node} as {Dead} below, which was
+        // previously connected to {end}, so we know for sure that at some point
+        // the reducer logic will visit {end} again.
+        Node* ret = graph()->NewNode(node->op(), pop_count, value_inputs[i],
+                                     effect, control_inputs[i]);
+        NodeProperties::MergeControlToEnd(graph(), common(), ret);
+      }
+      // Mark the Merge {control} and Return {node} as {dead}.
+      Replace(control, dead());
+      return Replace(dead());
+    } else if (effect->opcode() == IrOpcode::kEffectPhi &&
+               NodeProperties::GetControlInput(effect) == control) {
+      Node::Inputs effect_inputs = effect->inputs();
+      DCHECK_EQ(control_inputs.count(), effect_inputs.count() - 1);
+      for (int i = 0; i < control_inputs.count(); ++i) {
+        // Create a new {Return} and connect it to {end}. We don't need to mark
+        // {end} as revisit, because we mark {node} as {Dead} below, which was
+        // previously connected to {end}, so we know for sure that at some point
+        // the reducer logic will visit {end} again.
+        Node* ret = graph()->NewNode(node->op(), pop_count, value_inputs[i],
+                                     effect_inputs[i], control_inputs[i]);
+        NodeProperties::MergeControlToEnd(graph(), common(), ret);
+      }
+      // Mark the Merge {control} and Return {node} as {dead}.
+      Replace(control, dead());
+      return Replace(dead());
     }
-    // Mark the merge {control} and return {node} as {dead}.
-    Replace(control, dead());
-    return Replace(dead());
   }
-  return changed ? Changed(node) : NoChange();
+  return NoChange();
 }
 
-
 Reduction CommonOperatorReducer::ReduceSelect(Node* node) {
   DCHECK_EQ(IrOpcode::kSelect, node->opcode());
   Node* const cond = node->InputAt(0);
diff --git a/src/compiler/common-operator.cc b/src/compiler/common-operator.cc
index 9ce6f71..637b064 100644
--- a/src/compiler/common-operator.cc
+++ b/src/compiler/common-operator.cc
@@ -7,9 +7,11 @@
 #include "src/assembler.h"
 #include "src/base/lazy-instance.h"
 #include "src/compiler/linkage.h"
+#include "src/compiler/node.h"
 #include "src/compiler/opcodes.h"
 #include "src/compiler/operator.h"
 #include "src/handles-inl.h"
+#include "src/objects-inl.h"
 #include "src/zone/zone.h"
 
 namespace v8 {
@@ -35,23 +37,11 @@
   return OpParameter<BranchHint>(op);
 }
 
-DeoptimizeReason DeoptimizeReasonOf(Operator const* const op) {
-  DCHECK(op->opcode() == IrOpcode::kDeoptimizeIf ||
-         op->opcode() == IrOpcode::kDeoptimizeUnless);
-  return OpParameter<DeoptimizeReason>(op);
-}
-
-size_t hash_value(DeoptimizeKind kind) { return static_cast<size_t>(kind); }
-
-std::ostream& operator<<(std::ostream& os, DeoptimizeKind kind) {
-  switch (kind) {
-    case DeoptimizeKind::kEager:
-      return os << "Eager";
-    case DeoptimizeKind::kSoft:
-      return os << "Soft";
-  }
-  UNREACHABLE();
-  return os;
+int ValueInputCountOfReturn(Operator const* const op) {
+  DCHECK(op->opcode() == IrOpcode::kReturn);
+  // Return nodes have a hidden input at index 0 which we ignore in the value
+  // input count.
+  return op->ValueInputCount() - 1;
 }
 
 bool operator==(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
@@ -71,7 +61,9 @@
 }
 
 DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const op) {
-  DCHECK_EQ(IrOpcode::kDeoptimize, op->opcode());
+  DCHECK(op->opcode() == IrOpcode::kDeoptimize ||
+         op->opcode() == IrOpcode::kDeoptimizeIf ||
+         op->opcode() == IrOpcode::kDeoptimizeUnless);
   return OpParameter<DeoptimizeParameters>(op);
 }
 
@@ -171,6 +163,106 @@
   return os << p.value() << "|" << p.rmode() << "|" << p.type();
 }
 
+SparseInputMask::InputIterator::InputIterator(
+    SparseInputMask::BitMaskType bit_mask, Node* parent)
+    : bit_mask_(bit_mask), parent_(parent), real_index_(0) {
+#if DEBUG
+  if (bit_mask_ != SparseInputMask::kDenseBitMask) {
+    DCHECK_EQ(base::bits::CountPopulation(bit_mask_) -
+                  base::bits::CountPopulation(kEndMarker),
+              parent->InputCount());
+  }
+#endif
+}
+
+void SparseInputMask::InputIterator::Advance() {
+  DCHECK(!IsEnd());
+
+  if (IsReal()) {
+    ++real_index_;
+  }
+  bit_mask_ >>= 1;
+}
+
+Node* SparseInputMask::InputIterator::GetReal() const {
+  DCHECK(IsReal());
+  return parent_->InputAt(real_index_);
+}
+
+bool SparseInputMask::InputIterator::IsReal() const {
+  return bit_mask_ == SparseInputMask::kDenseBitMask ||
+         (bit_mask_ & kEntryMask);
+}
+
+bool SparseInputMask::InputIterator::IsEnd() const {
+  return (bit_mask_ == kEndMarker) ||
+         (bit_mask_ == SparseInputMask::kDenseBitMask &&
+          real_index_ >= parent_->InputCount());
+}
+
+int SparseInputMask::CountReal() const {
+  DCHECK(!IsDense());
+  return base::bits::CountPopulation(bit_mask_) -
+         base::bits::CountPopulation(kEndMarker);
+}
+
+SparseInputMask::InputIterator SparseInputMask::IterateOverInputs(Node* node) {
+  DCHECK(IsDense() || CountReal() == node->InputCount());
+  return InputIterator(bit_mask_, node);
+}
+
+bool operator==(SparseInputMask const& lhs, SparseInputMask const& rhs) {
+  return lhs.mask() == rhs.mask();
+}
+
+bool operator!=(SparseInputMask const& lhs, SparseInputMask const& rhs) {
+  return !(lhs == rhs);
+}
+
+size_t hash_value(SparseInputMask const& p) {
+  return base::hash_value(p.mask());
+}
+
+std::ostream& operator<<(std::ostream& os, SparseInputMask const& p) {
+  if (p.IsDense()) {
+    return os << "dense";
+  } else {
+    SparseInputMask::BitMaskType mask = p.mask();
+    DCHECK_NE(mask, SparseInputMask::kDenseBitMask);
+
+    os << "sparse:";
+
+    while (mask != SparseInputMask::kEndMarker) {
+      if (mask & SparseInputMask::kEntryMask) {
+        os << "^";
+      } else {
+        os << ".";
+      }
+      mask >>= 1;
+    }
+    return os;
+  }
+}
+
+bool operator==(TypedStateValueInfo const& lhs,
+                TypedStateValueInfo const& rhs) {
+  return lhs.machine_types() == rhs.machine_types() &&
+         lhs.sparse_input_mask() == rhs.sparse_input_mask();
+}
+
+bool operator!=(TypedStateValueInfo const& lhs,
+                TypedStateValueInfo const& rhs) {
+  return !(lhs == rhs);
+}
+
+size_t hash_value(TypedStateValueInfo const& p) {
+  return base::hash_combine(p.machine_types(), p.sparse_input_mask());
+}
+
+std::ostream& operator<<(std::ostream& os, TypedStateValueInfo const& p) {
+  return os << p.machine_types() << "|" << p.sparse_input_mask();
+}
+
 size_t hash_value(RegionObservability observability) {
   return static_cast<size_t>(observability);
 }
@@ -235,9 +327,23 @@
   return OpParameter<OsrGuardType>(op);
 }
 
+SparseInputMask SparseInputMaskOf(Operator const* op) {
+  DCHECK(op->opcode() == IrOpcode::kStateValues ||
+         op->opcode() == IrOpcode::kTypedStateValues);
+
+  if (op->opcode() == IrOpcode::kTypedStateValues) {
+    return OpParameter<TypedStateValueInfo>(op).sparse_input_mask();
+  }
+  return OpParameter<SparseInputMask>(op);
+}
+
 ZoneVector<MachineType> const* MachineTypesOf(Operator const* op) {
   DCHECK(op->opcode() == IrOpcode::kTypedObjectState ||
          op->opcode() == IrOpcode::kTypedStateValues);
+
+  if (op->opcode() == IrOpcode::kTypedStateValues) {
+    return OpParameter<TypedStateValueInfo>(op).machine_types();
+  }
   return OpParameter<const ZoneVector<MachineType>*>(op);
 }
 
@@ -313,22 +419,37 @@
   V(Soft, InsufficientTypeFeedbackForGenericNamedAccess)
 
 #define CACHED_DEOPTIMIZE_IF_LIST(V) \
-  V(DivisionByZero)                  \
-  V(Hole)                            \
-  V(MinusZero)                       \
-  V(Overflow)                        \
-  V(Smi)
+  V(Eager, DivisionByZero)           \
+  V(Eager, Hole)                     \
+  V(Eager, MinusZero)                \
+  V(Eager, Overflow)                 \
+  V(Eager, Smi)
 
 #define CACHED_DEOPTIMIZE_UNLESS_LIST(V) \
-  V(LostPrecision)                       \
-  V(LostPrecisionOrNaN)                  \
-  V(NoReason)                            \
-  V(NotAHeapNumber)                      \
-  V(NotANumberOrOddball)                 \
-  V(NotASmi)                             \
-  V(OutOfBounds)                         \
-  V(WrongInstanceType)                   \
-  V(WrongMap)
+  V(Eager, LostPrecision)                \
+  V(Eager, LostPrecisionOrNaN)           \
+  V(Eager, NoReason)                     \
+  V(Eager, NotAHeapNumber)               \
+  V(Eager, NotANumberOrOddball)          \
+  V(Eager, NotASmi)                      \
+  V(Eager, OutOfBounds)                  \
+  V(Eager, WrongInstanceType)            \
+  V(Eager, WrongMap)
+
+#define CACHED_TRAP_IF_LIST(V) \
+  V(TrapDivUnrepresentable)    \
+  V(TrapFloatUnrepresentable)
+
+// The reason for a trap.
+#define CACHED_TRAP_UNLESS_LIST(V) \
+  V(TrapUnreachable)               \
+  V(TrapMemOutOfBounds)            \
+  V(TrapDivByZero)                 \
+  V(TrapDivUnrepresentable)        \
+  V(TrapRemByZero)                 \
+  V(TrapFloatUnrepresentable)      \
+  V(TrapFuncInvalid)               \
+  V(TrapFuncSigMismatch)
 
 #define CACHED_PARAMETER_LIST(V) \
   V(0)                           \
@@ -497,38 +618,72 @@
   CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE)
 #undef CACHED_DEOPTIMIZE
 
-  template <DeoptimizeReason kReason>
-  struct DeoptimizeIfOperator final : public Operator1<DeoptimizeReason> {
+  template <DeoptimizeKind kKind, DeoptimizeReason kReason>
+  struct DeoptimizeIfOperator final : public Operator1<DeoptimizeParameters> {
     DeoptimizeIfOperator()
-        : Operator1<DeoptimizeReason>(                   // --
+        : Operator1<DeoptimizeParameters>(               // --
               IrOpcode::kDeoptimizeIf,                   // opcode
               Operator::kFoldable | Operator::kNoThrow,  // properties
               "DeoptimizeIf",                            // name
               2, 1, 1, 0, 1, 1,                          // counts
-              kReason) {}                                // parameter
+              DeoptimizeParameters(kKind, kReason)) {}   // parameter
   };
-#define CACHED_DEOPTIMIZE_IF(Reason)                \
-  DeoptimizeIfOperator<DeoptimizeReason::k##Reason> \
-      kDeoptimizeIf##Reason##Operator;
+#define CACHED_DEOPTIMIZE_IF(Kind, Reason)                                   \
+  DeoptimizeIfOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason> \
+      kDeoptimizeIf##Kind##Reason##Operator;
   CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
 #undef CACHED_DEOPTIMIZE_IF
 
-  template <DeoptimizeReason kReason>
-  struct DeoptimizeUnlessOperator final : public Operator1<DeoptimizeReason> {
+  template <DeoptimizeKind kKind, DeoptimizeReason kReason>
+  struct DeoptimizeUnlessOperator final
+      : public Operator1<DeoptimizeParameters> {
     DeoptimizeUnlessOperator()
-        : Operator1<DeoptimizeReason>(                   // --
+        : Operator1<DeoptimizeParameters>(               // --
               IrOpcode::kDeoptimizeUnless,               // opcode
               Operator::kFoldable | Operator::kNoThrow,  // properties
               "DeoptimizeUnless",                        // name
               2, 1, 1, 0, 1, 1,                          // counts
-              kReason) {}                                // parameter
+              DeoptimizeParameters(kKind, kReason)) {}   // parameter
   };
-#define CACHED_DEOPTIMIZE_UNLESS(Reason)                \
-  DeoptimizeUnlessOperator<DeoptimizeReason::k##Reason> \
-      kDeoptimizeUnless##Reason##Operator;
+#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason)          \
+  DeoptimizeUnlessOperator<DeoptimizeKind::k##Kind,     \
+                           DeoptimizeReason::k##Reason> \
+      kDeoptimizeUnless##Kind##Reason##Operator;
   CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
 #undef CACHED_DEOPTIMIZE_UNLESS
 
+  template <int32_t trap_id>
+  struct TrapIfOperator final : public Operator1<int32_t> {
+    TrapIfOperator()
+        : Operator1<int32_t>(                            // --
+              IrOpcode::kTrapIf,                         // opcode
+              Operator::kFoldable | Operator::kNoThrow,  // properties
+              "TrapIf",                                  // name
+              1, 1, 1, 0, 0, 1,                          // counts
+              trap_id) {}                                // parameter
+  };
+#define CACHED_TRAP_IF(Trap)                                       \
+  TrapIfOperator<static_cast<int32_t>(Builtins::kThrowWasm##Trap)> \
+      kTrapIf##Trap##Operator;
+  CACHED_TRAP_IF_LIST(CACHED_TRAP_IF)
+#undef CACHED_TRAP_IF
+
+  template <int32_t trap_id>
+  struct TrapUnlessOperator final : public Operator1<int32_t> {
+    TrapUnlessOperator()
+        : Operator1<int32_t>(                            // --
+              IrOpcode::kTrapUnless,                     // opcode
+              Operator::kFoldable | Operator::kNoThrow,  // properties
+              "TrapUnless",                              // name
+              1, 1, 1, 0, 0, 1,                          // counts
+              trap_id) {}                                // parameter
+  };
+#define CACHED_TRAP_UNLESS(Trap)                                       \
+  TrapUnlessOperator<static_cast<int32_t>(Builtins::kThrowWasm##Trap)> \
+      kTrapUnless##Trap##Operator;
+  CACHED_TRAP_UNLESS_LIST(CACHED_TRAP_UNLESS)
+#undef CACHED_TRAP_UNLESS
+
   template <MachineRepresentation kRep, int kInputCount>
   struct PhiOperator final : public Operator1<MachineRepresentation> {
     PhiOperator()
@@ -588,13 +743,14 @@
 #undef CACHED_PROJECTION
 
   template <int kInputCount>
-  struct StateValuesOperator final : public Operator {
+  struct StateValuesOperator final : public Operator1<SparseInputMask> {
     StateValuesOperator()
-        : Operator(                           // --
-              IrOpcode::kStateValues,         // opcode
-              Operator::kPure,                // flags
-              "StateValues",                  // name
-              kInputCount, 0, 0, 1, 0, 0) {}  // counts
+        : Operator1<SparseInputMask>(       // --
+              IrOpcode::kStateValues,       // opcode
+              Operator::kPure,              // flags
+              "StateValues",                // name
+              kInputCount, 0, 0, 1, 0, 0,   // counts
+              SparseInputMask::Dense()) {}  // parameter
   };
 #define CACHED_STATE_VALUES(input_count) \
   StateValuesOperator<input_count> kStateValues##input_count##Operator;
@@ -688,45 +844,81 @@
       parameter);                                       // parameter
 }
 
-const Operator* CommonOperatorBuilder::DeoptimizeIf(DeoptimizeReason reason) {
-  switch (reason) {
-#define CACHED_DEOPTIMIZE_IF(Reason) \
-  case DeoptimizeReason::k##Reason:  \
-    return &cache_.kDeoptimizeIf##Reason##Operator;
-    CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
-#undef CACHED_DEOPTIMIZE_IF
-    default:
-      break;
+const Operator* CommonOperatorBuilder::DeoptimizeIf(DeoptimizeKind kind,
+                                                    DeoptimizeReason reason) {
+#define CACHED_DEOPTIMIZE_IF(Kind, Reason)                \
+  if (kind == DeoptimizeKind::k##Kind &&                  \
+      reason == DeoptimizeReason::k##Reason) {            \
+    return &cache_.kDeoptimizeIf##Kind##Reason##Operator; \
   }
+  CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
+#undef CACHED_DEOPTIMIZE_IF
   // Uncached
-  return new (zone()) Operator1<DeoptimizeReason>(  // --
-      IrOpcode::kDeoptimizeIf,                      // opcode
-      Operator::kFoldable | Operator::kNoThrow,     // properties
-      "DeoptimizeIf",                               // name
-      2, 1, 1, 0, 1, 1,                             // counts
-      reason);                                      // parameter
+  DeoptimizeParameters parameter(kind, reason);
+  return new (zone()) Operator1<DeoptimizeParameters>(  // --
+      IrOpcode::kDeoptimizeIf,                          // opcode
+      Operator::kFoldable | Operator::kNoThrow,         // properties
+      "DeoptimizeIf",                                   // name
+      2, 1, 1, 0, 1, 1,                                 // counts
+      parameter);                                       // parameter
 }
 
 const Operator* CommonOperatorBuilder::DeoptimizeUnless(
-    DeoptimizeReason reason) {
-  switch (reason) {
-#define CACHED_DEOPTIMIZE_UNLESS(Reason) \
-  case DeoptimizeReason::k##Reason:      \
-    return &cache_.kDeoptimizeUnless##Reason##Operator;
-    CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
+    DeoptimizeKind kind, DeoptimizeReason reason) {
+#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason)                \
+  if (kind == DeoptimizeKind::k##Kind &&                      \
+      reason == DeoptimizeReason::k##Reason) {                \
+    return &cache_.kDeoptimizeUnless##Kind##Reason##Operator; \
+  }
+  CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
 #undef CACHED_DEOPTIMIZE_UNLESS
+  // Uncached
+  DeoptimizeParameters parameter(kind, reason);
+  return new (zone()) Operator1<DeoptimizeParameters>(  // --
+      IrOpcode::kDeoptimizeUnless,                      // opcode
+      Operator::kFoldable | Operator::kNoThrow,         // properties
+      "DeoptimizeUnless",                               // name
+      2, 1, 1, 0, 1, 1,                                 // counts
+      parameter);                                       // parameter
+}
+
+const Operator* CommonOperatorBuilder::TrapIf(int32_t trap_id) {
+  switch (trap_id) {
+#define CACHED_TRAP_IF(Trap)       \
+  case Builtins::kThrowWasm##Trap: \
+    return &cache_.kTrapIf##Trap##Operator;
+    CACHED_TRAP_IF_LIST(CACHED_TRAP_IF)
+#undef CACHED_TRAP_IF
     default:
       break;
   }
   // Uncached
-  return new (zone()) Operator1<DeoptimizeReason>(  // --
-      IrOpcode::kDeoptimizeUnless,                  // opcode
-      Operator::kFoldable | Operator::kNoThrow,     // properties
-      "DeoptimizeUnless",                           // name
-      2, 1, 1, 0, 1, 1,                             // counts
-      reason);                                      // parameter
+  return new (zone()) Operator1<int>(            // --
+      IrOpcode::kTrapIf,                         // opcode
+      Operator::kFoldable | Operator::kNoThrow,  // properties
+      "TrapIf",                                  // name
+      1, 1, 1, 0, 0, 1,                          // counts
+      trap_id);                                  // parameter
 }
 
+const Operator* CommonOperatorBuilder::TrapUnless(int32_t trap_id) {
+  switch (trap_id) {
+#define CACHED_TRAP_UNLESS(Trap)   \
+  case Builtins::kThrowWasm##Trap: \
+    return &cache_.kTrapUnless##Trap##Operator;
+    CACHED_TRAP_UNLESS_LIST(CACHED_TRAP_UNLESS)
+#undef CACHED_TRAP_UNLESS
+    default:
+      break;
+  }
+  // Uncached
+  return new (zone()) Operator1<int>(            // --
+      IrOpcode::kTrapUnless,                     // opcode
+      Operator::kFoldable | Operator::kNoThrow,  // properties
+      "TrapUnless",                              // name
+      1, 1, 1, 0, 0, 1,                          // counts
+      trap_id);                                  // parameter
+}
 
 const Operator* CommonOperatorBuilder::Switch(size_t control_output_count) {
   return new (zone()) Operator(               // --
@@ -1000,30 +1192,51 @@
   return nullptr;
 }
 
-const Operator* CommonOperatorBuilder::StateValues(int arguments) {
-  switch (arguments) {
+const Operator* CommonOperatorBuilder::StateValues(int arguments,
+                                                   SparseInputMask bitmask) {
+  if (bitmask.IsDense()) {
+    switch (arguments) {
 #define CACHED_STATE_VALUES(arguments) \
   case arguments:                      \
     return &cache_.kStateValues##arguments##Operator;
-    CACHED_STATE_VALUES_LIST(CACHED_STATE_VALUES)
+      CACHED_STATE_VALUES_LIST(CACHED_STATE_VALUES)
 #undef CACHED_STATE_VALUES
-    default:
-      break;
+      default:
+        break;
+    }
   }
+
+#if DEBUG
+  DCHECK(bitmask.IsDense() || bitmask.CountReal() == arguments);
+#endif
+
   // Uncached.
-  return new (zone()) Operator(                 // --
-      IrOpcode::kStateValues, Operator::kPure,  // opcode
-      "StateValues",                            // name
-      arguments, 0, 0, 1, 0, 0);                // counts
+  return new (zone()) Operator1<SparseInputMask>(  // --
+      IrOpcode::kStateValues, Operator::kPure,     // opcode
+      "StateValues",                               // name
+      arguments, 0, 0, 1, 0, 0,                    // counts
+      bitmask);                                    // parameter
 }
 
 const Operator* CommonOperatorBuilder::TypedStateValues(
-    const ZoneVector<MachineType>* types) {
-  return new (zone()) Operator1<const ZoneVector<MachineType>*>(  // --
-      IrOpcode::kTypedStateValues, Operator::kPure,               // opcode
-      "TypedStateValues",                                         // name
-      static_cast<int>(types->size()), 0, 0, 1, 0, 0,             // counts
-      types);                                                     // parameter
+    const ZoneVector<MachineType>* types, SparseInputMask bitmask) {
+#if DEBUG
+  DCHECK(bitmask.IsDense() ||
+         bitmask.CountReal() == static_cast<int>(types->size()));
+#endif
+
+  return new (zone()) Operator1<TypedStateValueInfo>(  // --
+      IrOpcode::kTypedStateValues, Operator::kPure,    // opcode
+      "TypedStateValues",                              // name
+      static_cast<int>(types->size()), 0, 0, 1, 0, 0,  // counts
+      TypedStateValueInfo(types, bitmask));            // parameters
+}
+
+const Operator* CommonOperatorBuilder::ArgumentsObjectState() {
+  return new (zone()) Operator(                          // --
+      IrOpcode::kArgumentsObjectState, Operator::kPure,  // opcode
+      "ArgumentsObjectState",                            // name
+      0, 0, 0, 1, 0, 0);                                 // counts
 }
 
 const Operator* CommonOperatorBuilder::ObjectState(int pointer_slots) {
@@ -1131,7 +1344,6 @@
   }
 }
 
-
 const FrameStateFunctionInfo*
 CommonOperatorBuilder::CreateFrameStateFunctionInfo(
     FrameStateType type, int parameter_count, int local_count,
diff --git a/src/compiler/common-operator.h b/src/compiler/common-operator.h
index 1f258a0..4682959 100644
--- a/src/compiler/common-operator.h
+++ b/src/compiler/common-operator.h
@@ -22,6 +22,7 @@
 struct CommonOperatorGlobalCache;
 class Operator;
 class Type;
+class Node;
 
 // Prediction hint for branches.
 enum class BranchHint : uint8_t { kNone, kTrue, kFalse };
@@ -45,15 +46,8 @@
 
 V8_EXPORT_PRIVATE BranchHint BranchHintOf(const Operator* const);
 
-// Deoptimize reason for Deoptimize, DeoptimizeIf and DeoptimizeUnless.
-DeoptimizeReason DeoptimizeReasonOf(Operator const* const);
-
-// Deoptimize bailout kind.
-enum class DeoptimizeKind : uint8_t { kEager, kSoft };
-
-size_t hash_value(DeoptimizeKind kind);
-
-std::ostream& operator<<(std::ostream&, DeoptimizeKind);
+// Helper function for return nodes, because returns have a hidden value input.
+int ValueInputCountOfReturn(Operator const* const op);
 
 // Parameters for the {Deoptimize} operator.
 class DeoptimizeParameters final {
@@ -158,6 +152,123 @@
 
 size_t hash_value(RelocatablePtrConstantInfo const& p);
 
+// Used to define a sparse set of inputs. This can be used to efficiently encode
+// nodes that can have a lot of inputs, but where many inputs can have the same
+// value.
+class SparseInputMask final {
+ public:
+  typedef uint32_t BitMaskType;
+
+  // The mask representing a dense input set.
+  static const BitMaskType kDenseBitMask = 0x0;
+  // The bits representing the end of a sparse input set.
+  static const BitMaskType kEndMarker = 0x1;
+  // The mask for accessing a sparse input entry in the bitmask.
+  static const BitMaskType kEntryMask = 0x1;
+
+  // The number of bits in the mask, minus one for the end marker.
+  static const int kMaxSparseInputs = (sizeof(BitMaskType) * kBitsPerByte - 1);
+
+  // An iterator over a node's sparse inputs.
+  class InputIterator final {
+   public:
+    InputIterator() {}
+    InputIterator(BitMaskType bit_mask, Node* parent);
+
+    Node* parent() const { return parent_; }
+    int real_index() const { return real_index_; }
+
+    // Advance the iterator to the next sparse input. Only valid if the iterator
+    // has not reached the end.
+    void Advance();
+
+    // Get the current sparse input's real node value. Only valid if the
+    // current sparse input is real.
+    Node* GetReal() const;
+
+    // Get the current sparse input, returning either a real input node if
+    // the current sparse input is real, or the given {empty_value} if the
+    // current sparse input is empty.
+    Node* Get(Node* empty_value) const {
+      return IsReal() ? GetReal() : empty_value;
+    }
+
+    // True if the current sparse input is a real input node.
+    bool IsReal() const;
+
+    // True if the current sparse input is an empty value.
+    bool IsEmpty() const { return !IsReal(); }
+
+    // True if the iterator has reached the end of the sparse inputs.
+    bool IsEnd() const;
+
+   private:
+    BitMaskType bit_mask_;
+    Node* parent_;
+    int real_index_;
+  };
+
+  explicit SparseInputMask(BitMaskType bit_mask) : bit_mask_(bit_mask) {}
+
+  // Provides a SparseInputMask representing a dense input set.
+  static SparseInputMask Dense() { return SparseInputMask(kDenseBitMask); }
+
+  BitMaskType mask() const { return bit_mask_; }
+
+  bool IsDense() const { return bit_mask_ == SparseInputMask::kDenseBitMask; }
+
+  // Counts how many real values are in the sparse array. Only valid for
+  // non-dense masks.
+  int CountReal() const;
+
+  // Returns an iterator over the sparse inputs of {node}.
+  InputIterator IterateOverInputs(Node* node);
+
+ private:
+  //
+  // The sparse input mask has a bitmask specifying if the node's inputs are
+  // represented sparsely. If the bitmask value is 0, then the inputs are dense;
+  // otherwise, they should be interpreted as follows:
+  //
+  //   * The bitmask represents which values are real, with 1 for real values
+  //     and 0 for empty values.
+  //   * The inputs to the node are the real values, in the order of the 1s from
+  //     least- to most-significant.
+  //   * The top bit of the bitmask is a guard indicating the end of the values,
+  //     whether real or empty (and is not representative of a real input
+  //     itself). This is used so that we don't have to additionally store a
+  //     value count.
+  //
+  // So, for N 1s in the bitmask, there are N - 1 inputs into the node.
+  BitMaskType bit_mask_;
+};
+
+bool operator==(SparseInputMask const& lhs, SparseInputMask const& rhs);
+bool operator!=(SparseInputMask const& lhs, SparseInputMask const& rhs);
+
+class TypedStateValueInfo final {
+ public:
+  TypedStateValueInfo(ZoneVector<MachineType> const* machine_types,
+                      SparseInputMask sparse_input_mask)
+      : machine_types_(machine_types), sparse_input_mask_(sparse_input_mask) {}
+
+  ZoneVector<MachineType> const* machine_types() const {
+    return machine_types_;
+  }
+  SparseInputMask sparse_input_mask() const { return sparse_input_mask_; }
+
+ private:
+  ZoneVector<MachineType> const* machine_types_;
+  SparseInputMask sparse_input_mask_;
+};
+
+bool operator==(TypedStateValueInfo const& lhs, TypedStateValueInfo const& rhs);
+bool operator!=(TypedStateValueInfo const& lhs, TypedStateValueInfo const& rhs);
+
+std::ostream& operator<<(std::ostream&, TypedStateValueInfo const&);
+
+size_t hash_value(TypedStateValueInfo const& p);
+
 // Used to mark a region (as identified by BeginRegion/FinishRegion) as either
 // JavaScript-observable or not (i.e. allocations are not JavaScript observable
 // themselves, but transitioning stores are).
@@ -181,6 +292,8 @@
 std::ostream& operator<<(std::ostream&, OsrGuardType);
 OsrGuardType OsrGuardTypeOf(Operator const*);
 
+SparseInputMask SparseInputMaskOf(Operator const*);
+
 ZoneVector<MachineType> const* MachineTypesOf(Operator const*)
     WARN_UNUSED_RESULT;
 
@@ -203,8 +316,11 @@
   const Operator* IfDefault();
   const Operator* Throw();
   const Operator* Deoptimize(DeoptimizeKind kind, DeoptimizeReason reason);
-  const Operator* DeoptimizeIf(DeoptimizeReason reason);
-  const Operator* DeoptimizeUnless(DeoptimizeReason reason);
+  const Operator* DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason);
+  const Operator* DeoptimizeUnless(DeoptimizeKind kind,
+                                   DeoptimizeReason reason);
+  const Operator* TrapIf(int32_t trap_id);
+  const Operator* TrapUnless(int32_t trap_id);
   const Operator* Return(int value_input_count = 1);
   const Operator* Terminate();
 
@@ -243,8 +359,10 @@
   const Operator* Checkpoint();
   const Operator* BeginRegion(RegionObservability);
   const Operator* FinishRegion();
-  const Operator* StateValues(int arguments);
-  const Operator* TypedStateValues(const ZoneVector<MachineType>* types);
+  const Operator* StateValues(int arguments, SparseInputMask bitmask);
+  const Operator* TypedStateValues(const ZoneVector<MachineType>* types,
+                                   SparseInputMask bitmask);
+  const Operator* ArgumentsObjectState();
   const Operator* ObjectState(int pointer_slots);
   const Operator* TypedObjectState(const ZoneVector<MachineType>* types);
   const Operator* FrameState(BailoutId bailout_id,
diff --git a/src/compiler/control-builders.cc b/src/compiler/control-builders.cc
index b159bb2..a0b3ebd 100644
--- a/src/compiler/control-builders.cc
+++ b/src/compiler/control-builders.cc
@@ -4,6 +4,8 @@
 
 #include "src/compiler/control-builders.h"
 
+#include "src/objects-inl.h"
+
 namespace v8 {
 namespace internal {
 namespace compiler {
@@ -180,65 +182,6 @@
   set_environment(break_environment_);
 }
 
-
-void TryCatchBuilder::BeginTry() {
-  exit_environment_ = environment()->CopyAsUnreachable();
-  catch_environment_ = environment()->CopyAsUnreachable();
-  catch_environment_->Push(the_hole());
-}
-
-
-void TryCatchBuilder::Throw(Node* exception) {
-  environment()->Push(exception);
-  catch_environment_->Merge(environment());
-  environment()->Pop();
-  environment()->MarkAsUnreachable();
-}
-
-
-void TryCatchBuilder::EndTry() {
-  exit_environment_->Merge(environment());
-  exception_node_ = catch_environment_->Pop();
-  set_environment(catch_environment_);
-}
-
-
-void TryCatchBuilder::EndCatch() {
-  exit_environment_->Merge(environment());
-  set_environment(exit_environment_);
-}
-
-
-void TryFinallyBuilder::BeginTry() {
-  finally_environment_ = environment()->CopyAsUnreachable();
-  finally_environment_->Push(the_hole());
-  finally_environment_->Push(the_hole());
-}
-
-
-void TryFinallyBuilder::LeaveTry(Node* token, Node* value) {
-  environment()->Push(value);
-  environment()->Push(token);
-  finally_environment_->Merge(environment());
-  environment()->Drop(2);
-}
-
-
-void TryFinallyBuilder::EndTry(Node* fallthrough_token, Node* value) {
-  environment()->Push(value);
-  environment()->Push(fallthrough_token);
-  finally_environment_->Merge(environment());
-  environment()->Drop(2);
-  token_node_ = finally_environment_->Pop();
-  value_node_ = finally_environment_->Pop();
-  set_environment(finally_environment_);
-}
-
-
-void TryFinallyBuilder::EndFinally() {
-  // Nothing to be done here.
-}
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/control-builders.h b/src/compiler/control-builders.h
index a59dcb6..88efd27 100644
--- a/src/compiler/control-builders.h
+++ b/src/compiler/control-builders.h
@@ -145,59 +145,6 @@
   Environment* break_environment_;  // Environment after the block exits.
 };
 
-
-// Tracks control flow for a try-catch statement.
-class TryCatchBuilder final : public ControlBuilder {
- public:
-  explicit TryCatchBuilder(AstGraphBuilder* builder)
-      : ControlBuilder(builder),
-        catch_environment_(nullptr),
-        exit_environment_(nullptr),
-        exception_node_(nullptr) {}
-
-  // Primitive control commands.
-  void BeginTry();
-  void Throw(Node* exception);
-  void EndTry();
-  void EndCatch();
-
-  // Returns the exception value inside the 'catch' body.
-  Node* GetExceptionNode() const { return exception_node_; }
-
- private:
-  Environment* catch_environment_;  // Environment for the 'catch' body.
-  Environment* exit_environment_;   // Environment after the statement.
-  Node* exception_node_;            // Node for exception in 'catch' body.
-};
-
-
-// Tracks control flow for a try-finally statement.
-class TryFinallyBuilder final : public ControlBuilder {
- public:
-  explicit TryFinallyBuilder(AstGraphBuilder* builder)
-      : ControlBuilder(builder),
-        finally_environment_(nullptr),
-        token_node_(nullptr),
-        value_node_(nullptr) {}
-
-  // Primitive control commands.
-  void BeginTry();
-  void LeaveTry(Node* token, Node* value);
-  void EndTry(Node* token, Node* value);
-  void EndFinally();
-
-  // Returns the dispatch token value inside the 'finally' body.
-  Node* GetDispatchTokenNode() const { return token_node_; }
-
-  // Returns the saved result value inside the 'finally' body.
-  Node* GetResultValueNode() const { return value_node_; }
-
- private:
-  Environment* finally_environment_;  // Environment for the 'finally' body.
-  Node* token_node_;                  // Node for token in 'finally' body.
-  Node* value_node_;                  // Node for value in 'finally' body.
-};
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/control-equivalence.h b/src/compiler/control-equivalence.h
index 05777d7..b76e04f 100644
--- a/src/compiler/control-equivalence.h
+++ b/src/compiler/control-equivalence.h
@@ -124,7 +124,11 @@
   void DetermineParticipation(Node* exit);
 
  private:
-  NodeData* GetData(Node* node) { return &node_data_[node->id()]; }
+  NodeData* GetData(Node* node) {
+    size_t const index = node->id();
+    if (index >= node_data_.size()) node_data_.resize(index + 1, EmptyData());
+    return &node_data_[index];
+  }
   int NewClassNumber() { return class_number_++; }
   int NewDFSNumber() { return dfs_number_++; }
 
diff --git a/src/compiler/dead-code-elimination.cc b/src/compiler/dead-code-elimination.cc
index 81bf299..d66a9c5 100644
--- a/src/compiler/dead-code-elimination.cc
+++ b/src/compiler/dead-code-elimination.cc
@@ -18,8 +18,9 @@
     : AdvancedReducer(editor),
       graph_(graph),
       common_(common),
-      dead_(graph->NewNode(common->Dead())) {}
-
+      dead_(graph->NewNode(common->Dead())) {
+  NodeProperties::SetType(dead_, Type::None());
+}
 
 Reduction DeadCodeElimination::Reduce(Node* node) {
   switch (node->opcode()) {
@@ -40,11 +41,11 @@
 
 Reduction DeadCodeElimination::ReduceEnd(Node* node) {
   DCHECK_EQ(IrOpcode::kEnd, node->opcode());
-  int const input_count = node->InputCount();
-  DCHECK_LE(1, input_count);
+  Node::Inputs inputs = node->inputs();
+  DCHECK_LE(1, inputs.count());
   int live_input_count = 0;
-  for (int i = 0; i < input_count; ++i) {
-    Node* const input = node->InputAt(i);
+  for (int i = 0; i < inputs.count(); ++i) {
+    Node* const input = inputs[i];
     // Skip dead inputs.
     if (input->opcode() == IrOpcode::kDead) continue;
     // Compact live inputs.
@@ -53,20 +54,20 @@
   }
   if (live_input_count == 0) {
     return Replace(dead());
-  } else if (live_input_count < input_count) {
+  } else if (live_input_count < inputs.count()) {
     node->TrimInputCount(live_input_count);
     NodeProperties::ChangeOp(node, common()->End(live_input_count));
     return Changed(node);
   }
-  DCHECK_EQ(input_count, live_input_count);
+  DCHECK_EQ(inputs.count(), live_input_count);
   return NoChange();
 }
 
 
 Reduction DeadCodeElimination::ReduceLoopOrMerge(Node* node) {
   DCHECK(IrOpcode::IsMergeOpcode(node->opcode()));
-  int const input_count = node->InputCount();
-  DCHECK_LE(1, input_count);
+  Node::Inputs inputs = node->inputs();
+  DCHECK_LE(1, inputs.count());
   // Count the number of live inputs to {node} and compact them on the fly, also
   // compacting the inputs of the associated {Phi} and {EffectPhi} uses at the
   // same time.  We consider {Loop}s dead even if only the first control input
@@ -74,8 +75,8 @@
   int live_input_count = 0;
   if (node->opcode() != IrOpcode::kLoop ||
       node->InputAt(0)->opcode() != IrOpcode::kDead) {
-    for (int i = 0; i < input_count; ++i) {
-      Node* const input = node->InputAt(i);
+    for (int i = 0; i < inputs.count(); ++i) {
+      Node* const input = inputs[i];
       // Skip dead inputs.
       if (input->opcode() == IrOpcode::kDead) continue;
       // Compact live inputs.
@@ -83,7 +84,7 @@
         node->ReplaceInput(live_input_count, input);
         for (Node* const use : node->uses()) {
           if (NodeProperties::IsPhi(use)) {
-            DCHECK_EQ(input_count + 1, use->InputCount());
+            DCHECK_EQ(inputs.count() + 1, use->InputCount());
             use->ReplaceInput(live_input_count, use->InputAt(i));
           }
         }
@@ -109,9 +110,9 @@
     return Replace(node->InputAt(0));
   }
   DCHECK_LE(2, live_input_count);
-  DCHECK_LE(live_input_count, input_count);
+  DCHECK_LE(live_input_count, inputs.count());
   // Trim input count for the {Merge} or {Loop} node.
-  if (live_input_count < input_count) {
+  if (live_input_count < inputs.count()) {
     // Trim input counts for all phi uses and revisit them.
     for (Node* const use : node->uses()) {
       if (NodeProperties::IsPhi(use)) {
diff --git a/src/compiler/effect-control-linearizer.cc b/src/compiler/effect-control-linearizer.cc
index d4b0576..865e909 100644
--- a/src/compiler/effect-control-linearizer.cc
+++ b/src/compiler/effect-control-linearizer.cc
@@ -13,6 +13,7 @@
 #include "src/compiler/node-properties.h"
 #include "src/compiler/node.h"
 #include "src/compiler/schedule.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -24,7 +25,8 @@
     : js_graph_(js_graph),
       schedule_(schedule),
       temp_zone_(temp_zone),
-      source_positions_(source_positions) {}
+      source_positions_(source_positions),
+      graph_assembler_(js_graph, nullptr, nullptr, temp_zone) {}
 
 Graph* EffectControlLinearizer::graph() const { return js_graph_->graph(); }
 CommonOperatorBuilder* EffectControlLinearizer::common() const {
@@ -596,829 +598,727 @@
                                                    Node* frame_state,
                                                    Node** effect,
                                                    Node** control) {
-  ValueEffectControl state(nullptr, nullptr, nullptr);
+  gasm()->Reset(*effect, *control);
+  Node* result = nullptr;
   switch (node->opcode()) {
     case IrOpcode::kChangeBitToTagged:
-      state = LowerChangeBitToTagged(node, *effect, *control);
+      result = LowerChangeBitToTagged(node);
       break;
     case IrOpcode::kChangeInt31ToTaggedSigned:
-      state = LowerChangeInt31ToTaggedSigned(node, *effect, *control);
+      result = LowerChangeInt31ToTaggedSigned(node);
       break;
     case IrOpcode::kChangeInt32ToTagged:
-      state = LowerChangeInt32ToTagged(node, *effect, *control);
+      result = LowerChangeInt32ToTagged(node);
       break;
     case IrOpcode::kChangeUint32ToTagged:
-      state = LowerChangeUint32ToTagged(node, *effect, *control);
+      result = LowerChangeUint32ToTagged(node);
       break;
     case IrOpcode::kChangeFloat64ToTagged:
-      state = LowerChangeFloat64ToTagged(node, *effect, *control);
+      result = LowerChangeFloat64ToTagged(node);
       break;
     case IrOpcode::kChangeFloat64ToTaggedPointer:
-      state = LowerChangeFloat64ToTaggedPointer(node, *effect, *control);
+      result = LowerChangeFloat64ToTaggedPointer(node);
       break;
     case IrOpcode::kChangeTaggedSignedToInt32:
-      state = LowerChangeTaggedSignedToInt32(node, *effect, *control);
+      result = LowerChangeTaggedSignedToInt32(node);
       break;
     case IrOpcode::kChangeTaggedToBit:
-      state = LowerChangeTaggedToBit(node, *effect, *control);
+      result = LowerChangeTaggedToBit(node);
       break;
     case IrOpcode::kChangeTaggedToInt32:
-      state = LowerChangeTaggedToInt32(node, *effect, *control);
+      result = LowerChangeTaggedToInt32(node);
       break;
     case IrOpcode::kChangeTaggedToUint32:
-      state = LowerChangeTaggedToUint32(node, *effect, *control);
+      result = LowerChangeTaggedToUint32(node);
       break;
     case IrOpcode::kChangeTaggedToFloat64:
-      state = LowerChangeTaggedToFloat64(node, *effect, *control);
+      result = LowerChangeTaggedToFloat64(node);
+      break;
+    case IrOpcode::kChangeTaggedToTaggedSigned:
+      result = LowerChangeTaggedToTaggedSigned(node);
       break;
     case IrOpcode::kTruncateTaggedToBit:
-      state = LowerTruncateTaggedToBit(node, *effect, *control);
+      result = LowerTruncateTaggedToBit(node);
       break;
     case IrOpcode::kTruncateTaggedToFloat64:
-      state = LowerTruncateTaggedToFloat64(node, *effect, *control);
+      result = LowerTruncateTaggedToFloat64(node);
       break;
     case IrOpcode::kCheckBounds:
-      state = LowerCheckBounds(node, frame_state, *effect, *control);
+      result = LowerCheckBounds(node, frame_state);
       break;
     case IrOpcode::kCheckMaps:
-      state = LowerCheckMaps(node, frame_state, *effect, *control);
+      result = LowerCheckMaps(node, frame_state);
       break;
     case IrOpcode::kCheckNumber:
-      state = LowerCheckNumber(node, frame_state, *effect, *control);
+      result = LowerCheckNumber(node, frame_state);
+      break;
+    case IrOpcode::kCheckReceiver:
+      result = LowerCheckReceiver(node, frame_state);
       break;
     case IrOpcode::kCheckString:
-      state = LowerCheckString(node, frame_state, *effect, *control);
+      result = LowerCheckString(node, frame_state);
+      break;
+    case IrOpcode::kCheckInternalizedString:
+      result = LowerCheckInternalizedString(node, frame_state);
       break;
     case IrOpcode::kCheckIf:
-      state = LowerCheckIf(node, frame_state, *effect, *control);
+      result = LowerCheckIf(node, frame_state);
       break;
     case IrOpcode::kCheckedInt32Add:
-      state = LowerCheckedInt32Add(node, frame_state, *effect, *control);
+      result = LowerCheckedInt32Add(node, frame_state);
       break;
     case IrOpcode::kCheckedInt32Sub:
-      state = LowerCheckedInt32Sub(node, frame_state, *effect, *control);
+      result = LowerCheckedInt32Sub(node, frame_state);
       break;
     case IrOpcode::kCheckedInt32Div:
-      state = LowerCheckedInt32Div(node, frame_state, *effect, *control);
+      result = LowerCheckedInt32Div(node, frame_state);
       break;
     case IrOpcode::kCheckedInt32Mod:
-      state = LowerCheckedInt32Mod(node, frame_state, *effect, *control);
+      result = LowerCheckedInt32Mod(node, frame_state);
       break;
     case IrOpcode::kCheckedUint32Div:
-      state = LowerCheckedUint32Div(node, frame_state, *effect, *control);
+      result = LowerCheckedUint32Div(node, frame_state);
       break;
     case IrOpcode::kCheckedUint32Mod:
-      state = LowerCheckedUint32Mod(node, frame_state, *effect, *control);
+      result = LowerCheckedUint32Mod(node, frame_state);
       break;
     case IrOpcode::kCheckedInt32Mul:
-      state = LowerCheckedInt32Mul(node, frame_state, *effect, *control);
+      result = LowerCheckedInt32Mul(node, frame_state);
       break;
     case IrOpcode::kCheckedInt32ToTaggedSigned:
-      state =
-          LowerCheckedInt32ToTaggedSigned(node, frame_state, *effect, *control);
+      result = LowerCheckedInt32ToTaggedSigned(node, frame_state);
       break;
     case IrOpcode::kCheckedUint32ToInt32:
-      state = LowerCheckedUint32ToInt32(node, frame_state, *effect, *control);
+      result = LowerCheckedUint32ToInt32(node, frame_state);
       break;
     case IrOpcode::kCheckedUint32ToTaggedSigned:
-      state = LowerCheckedUint32ToTaggedSigned(node, frame_state, *effect,
-                                               *control);
+      result = LowerCheckedUint32ToTaggedSigned(node, frame_state);
       break;
     case IrOpcode::kCheckedFloat64ToInt32:
-      state = LowerCheckedFloat64ToInt32(node, frame_state, *effect, *control);
+      result = LowerCheckedFloat64ToInt32(node, frame_state);
       break;
     case IrOpcode::kCheckedTaggedSignedToInt32:
-      state =
-          LowerCheckedTaggedSignedToInt32(node, frame_state, *effect, *control);
+      result = LowerCheckedTaggedSignedToInt32(node, frame_state);
       break;
     case IrOpcode::kCheckedTaggedToInt32:
-      state = LowerCheckedTaggedToInt32(node, frame_state, *effect, *control);
+      result = LowerCheckedTaggedToInt32(node, frame_state);
       break;
     case IrOpcode::kCheckedTaggedToFloat64:
-      state = LowerCheckedTaggedToFloat64(node, frame_state, *effect, *control);
+      result = LowerCheckedTaggedToFloat64(node, frame_state);
       break;
     case IrOpcode::kCheckedTaggedToTaggedSigned:
-      state = LowerCheckedTaggedToTaggedSigned(node, frame_state, *effect,
-                                               *control);
+      result = LowerCheckedTaggedToTaggedSigned(node, frame_state);
       break;
     case IrOpcode::kCheckedTaggedToTaggedPointer:
-      state = LowerCheckedTaggedToTaggedPointer(node, frame_state, *effect,
-                                                *control);
+      result = LowerCheckedTaggedToTaggedPointer(node, frame_state);
       break;
     case IrOpcode::kTruncateTaggedToWord32:
-      state = LowerTruncateTaggedToWord32(node, *effect, *control);
+      result = LowerTruncateTaggedToWord32(node);
       break;
     case IrOpcode::kCheckedTruncateTaggedToWord32:
-      state = LowerCheckedTruncateTaggedToWord32(node, frame_state, *effect,
-                                                 *control);
+      result = LowerCheckedTruncateTaggedToWord32(node, frame_state);
       break;
-    case IrOpcode::kObjectIsCallable:
-      state = LowerObjectIsCallable(node, *effect, *control);
+    case IrOpcode::kObjectIsDetectableCallable:
+      result = LowerObjectIsDetectableCallable(node);
+      break;
+    case IrOpcode::kObjectIsNonCallable:
+      result = LowerObjectIsNonCallable(node);
       break;
     case IrOpcode::kObjectIsNumber:
-      state = LowerObjectIsNumber(node, *effect, *control);
+      result = LowerObjectIsNumber(node);
       break;
     case IrOpcode::kObjectIsReceiver:
-      state = LowerObjectIsReceiver(node, *effect, *control);
+      result = LowerObjectIsReceiver(node);
       break;
     case IrOpcode::kObjectIsSmi:
-      state = LowerObjectIsSmi(node, *effect, *control);
+      result = LowerObjectIsSmi(node);
       break;
     case IrOpcode::kObjectIsString:
-      state = LowerObjectIsString(node, *effect, *control);
+      result = LowerObjectIsString(node);
       break;
     case IrOpcode::kObjectIsUndetectable:
-      state = LowerObjectIsUndetectable(node, *effect, *control);
+      result = LowerObjectIsUndetectable(node);
+      break;
+    case IrOpcode::kNewRestParameterElements:
+      result = LowerNewRestParameterElements(node);
+      break;
+    case IrOpcode::kNewUnmappedArgumentsElements:
+      result = LowerNewUnmappedArgumentsElements(node);
       break;
     case IrOpcode::kArrayBufferWasNeutered:
-      state = LowerArrayBufferWasNeutered(node, *effect, *control);
+      result = LowerArrayBufferWasNeutered(node);
       break;
     case IrOpcode::kStringFromCharCode:
-      state = LowerStringFromCharCode(node, *effect, *control);
+      result = LowerStringFromCharCode(node);
       break;
     case IrOpcode::kStringFromCodePoint:
-      state = LowerStringFromCodePoint(node, *effect, *control);
+      result = LowerStringFromCodePoint(node);
+      break;
+    case IrOpcode::kStringIndexOf:
+      result = LowerStringIndexOf(node);
+      break;
+    case IrOpcode::kStringCharAt:
+      result = LowerStringCharAt(node);
       break;
     case IrOpcode::kStringCharCodeAt:
-      state = LowerStringCharCodeAt(node, *effect, *control);
+      result = LowerStringCharCodeAt(node);
       break;
     case IrOpcode::kStringEqual:
-      state = LowerStringEqual(node, *effect, *control);
+      result = LowerStringEqual(node);
       break;
     case IrOpcode::kStringLessThan:
-      state = LowerStringLessThan(node, *effect, *control);
+      result = LowerStringLessThan(node);
       break;
     case IrOpcode::kStringLessThanOrEqual:
-      state = LowerStringLessThanOrEqual(node, *effect, *control);
+      result = LowerStringLessThanOrEqual(node);
       break;
     case IrOpcode::kCheckFloat64Hole:
-      state = LowerCheckFloat64Hole(node, frame_state, *effect, *control);
+      result = LowerCheckFloat64Hole(node, frame_state);
       break;
     case IrOpcode::kCheckTaggedHole:
-      state = LowerCheckTaggedHole(node, frame_state, *effect, *control);
+      result = LowerCheckTaggedHole(node, frame_state);
       break;
     case IrOpcode::kConvertTaggedHoleToUndefined:
-      state = LowerConvertTaggedHoleToUndefined(node, *effect, *control);
+      result = LowerConvertTaggedHoleToUndefined(node);
       break;
     case IrOpcode::kPlainPrimitiveToNumber:
-      state = LowerPlainPrimitiveToNumber(node, *effect, *control);
+      result = LowerPlainPrimitiveToNumber(node);
       break;
     case IrOpcode::kPlainPrimitiveToWord32:
-      state = LowerPlainPrimitiveToWord32(node, *effect, *control);
+      result = LowerPlainPrimitiveToWord32(node);
       break;
     case IrOpcode::kPlainPrimitiveToFloat64:
-      state = LowerPlainPrimitiveToFloat64(node, *effect, *control);
+      result = LowerPlainPrimitiveToFloat64(node);
       break;
     case IrOpcode::kEnsureWritableFastElements:
-      state = LowerEnsureWritableFastElements(node, *effect, *control);
+      result = LowerEnsureWritableFastElements(node);
       break;
     case IrOpcode::kMaybeGrowFastElements:
-      state = LowerMaybeGrowFastElements(node, frame_state, *effect, *control);
+      result = LowerMaybeGrowFastElements(node, frame_state);
       break;
     case IrOpcode::kTransitionElementsKind:
-      state = LowerTransitionElementsKind(node, *effect, *control);
+      LowerTransitionElementsKind(node);
       break;
     case IrOpcode::kLoadTypedElement:
-      state = LowerLoadTypedElement(node, *effect, *control);
+      result = LowerLoadTypedElement(node);
       break;
     case IrOpcode::kStoreTypedElement:
-      state = LowerStoreTypedElement(node, *effect, *control);
+      LowerStoreTypedElement(node);
       break;
     case IrOpcode::kFloat64RoundUp:
-      state = LowerFloat64RoundUp(node, *effect, *control);
+      if (!LowerFloat64RoundUp(node).To(&result)) {
+        return false;
+      }
       break;
     case IrOpcode::kFloat64RoundDown:
-      state = LowerFloat64RoundDown(node, *effect, *control);
+      if (!LowerFloat64RoundDown(node).To(&result)) {
+        return false;
+      }
       break;
     case IrOpcode::kFloat64RoundTruncate:
-      state = LowerFloat64RoundTruncate(node, *effect, *control);
+      if (!LowerFloat64RoundTruncate(node).To(&result)) {
+        return false;
+      }
       break;
     case IrOpcode::kFloat64RoundTiesEven:
-      state = LowerFloat64RoundTiesEven(node, *effect, *control);
+      if (!LowerFloat64RoundTiesEven(node).To(&result)) {
+        return false;
+      }
       break;
     default:
       return false;
   }
-  NodeProperties::ReplaceUses(node, state.value, state.effect, state.control);
-  *effect = state.effect;
-  *control = state.control;
+  *effect = gasm()->ExtractCurrentEffect();
+  *control = gasm()->ExtractCurrentControl();
+  NodeProperties::ReplaceUses(node, result, *effect, *control);
   return true;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node, Node* effect,
-                                                    Node* control) {
+#define __ gasm()->
+
+Node* EffectControlLinearizer::LowerChangeFloat64ToTagged(Node* node) {
   Node* value = node->InputAt(0);
-  return AllocateHeapNumberWithValue(value, effect, control);
+  return AllocateHeapNumberWithValue(value);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeFloat64ToTaggedPointer(Node* node,
-                                                           Node* effect,
-                                                           Node* control) {
+Node* EffectControlLinearizer::LowerChangeFloat64ToTaggedPointer(Node* node) {
   Node* value = node->InputAt(0);
-  return AllocateHeapNumberWithValue(value, effect, control);
+  return AllocateHeapNumberWithValue(value);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeBitToTagged(Node* node, Node* effect,
-                                                Node* control) {
+Node* EffectControlLinearizer::LowerChangeBitToTagged(Node* node) {
   Node* value = node->InputAt(0);
 
-  Node* branch = graph()->NewNode(common()->Branch(), value, control);
+  auto if_true = __ MakeLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
 
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* vtrue = jsgraph()->TrueConstant();
+  __ GotoIf(value, &if_true);
+  __ Goto(&done, __ FalseConstant());
 
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* vfalse = jsgraph()->FalseConstant();
+  __ Bind(&if_true);
+  __ Goto(&done, __ TrueConstant());
 
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                           vtrue, vfalse, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeInt31ToTaggedSigned(Node* node,
-                                                        Node* effect,
-                                                        Node* control) {
+Node* EffectControlLinearizer::LowerChangeInt31ToTaggedSigned(Node* node) {
   Node* value = node->InputAt(0);
-  value = ChangeInt32ToSmi(value);
-  return ValueEffectControl(value, effect, control);
+  return ChangeInt32ToSmi(value);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeInt32ToTagged(Node* node, Node* effect,
-                                                  Node* control) {
+Node* EffectControlLinearizer::LowerChangeInt32ToTagged(Node* node) {
   Node* value = node->InputAt(0);
 
   if (machine()->Is64()) {
-    return ValueEffectControl(ChangeInt32ToSmi(value), effect, control);
+    return ChangeInt32ToSmi(value);
   }
 
-  Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), value, value,
-                               control);
+  auto if_overflow = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
 
-  Node* ovf = graph()->NewNode(common()->Projection(1), add, control);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), ovf, control);
+  Node* add = __ Int32AddWithOverflow(value, value);
+  Node* ovf = __ Projection(1, add);
+  __ GotoIf(ovf, &if_overflow);
+  __ Goto(&done, __ Projection(0, add));
 
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  ValueEffectControl alloc =
-      AllocateHeapNumberWithValue(ChangeInt32ToFloat64(value), effect, if_true);
+  __ Bind(&if_overflow);
+  Node* number = AllocateHeapNumberWithValue(__ ChangeInt32ToFloat64(value));
+  __ Goto(&done, number);
 
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* vfalse = graph()->NewNode(common()->Projection(0), add, if_false);
-
-  Node* merge = graph()->NewNode(common()->Merge(2), alloc.control, if_false);
-  Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                               alloc.value, vfalse, merge);
-  Node* ephi =
-      graph()->NewNode(common()->EffectPhi(2), alloc.effect, effect, merge);
-
-  return ValueEffectControl(phi, ephi, merge);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeUint32ToTagged(Node* node, Node* effect,
-                                                   Node* control) {
+Node* EffectControlLinearizer::LowerChangeUint32ToTagged(Node* node) {
   Node* value = node->InputAt(0);
 
-  Node* check = graph()->NewNode(machine()->Uint32LessThanOrEqual(), value,
-                                 SmiMaxValueConstant());
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+  auto if_not_in_smi_range = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
 
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* vtrue = ChangeUint32ToSmi(value);
+  Node* check = __ Uint32LessThanOrEqual(value, SmiMaxValueConstant());
+  __ GotoUnless(check, &if_not_in_smi_range);
+  __ Goto(&done, ChangeUint32ToSmi(value));
 
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  ValueEffectControl alloc = AllocateHeapNumberWithValue(
-      ChangeUint32ToFloat64(value), effect, if_false);
+  __ Bind(&if_not_in_smi_range);
+  Node* number = AllocateHeapNumberWithValue(__ ChangeUint32ToFloat64(value));
 
-  Node* merge = graph()->NewNode(common()->Merge(2), if_true, alloc.control);
-  Node* phi = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                               vtrue, alloc.value, merge);
-  Node* ephi =
-      graph()->NewNode(common()->EffectPhi(2), effect, alloc.effect, merge);
+  __ Goto(&done, number);
+  __ Bind(&done);
 
-  return ValueEffectControl(phi, ephi, merge);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeTaggedSignedToInt32(Node* node,
-                                                        Node* effect,
-                                                        Node* control) {
+Node* EffectControlLinearizer::LowerChangeTaggedSignedToInt32(Node* node) {
   Node* value = node->InputAt(0);
-  value = ChangeSmiToInt32(value);
-  return ValueEffectControl(value, effect, control);
+  return ChangeSmiToInt32(value);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeTaggedToBit(Node* node, Node* effect,
-                                                Node* control) {
+Node* EffectControlLinearizer::LowerChangeTaggedToBit(Node* node) {
   Node* value = node->InputAt(0);
-  value = graph()->NewNode(machine()->WordEqual(), value,
-                           jsgraph()->TrueConstant());
-  return ValueEffectControl(value, effect, control);
+  return __ WordEqual(value, __ TrueConstant());
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node, Node* effect,
-                                                  Node* control) {
+Node* EffectControlLinearizer::LowerTruncateTaggedToBit(Node* node) {
   Node* value = node->InputAt(0);
-  Node* zero = jsgraph()->Int32Constant(0);
-  Node* fzero = jsgraph()->Float64Constant(0.0);
 
-  // Collect effect/control/value triples.
-  int count = 0;
-  Node* values[6];
-  Node* effects[6];
-  Node* controls[5];
+  auto if_smi = __ MakeDeferredLabel<1>();
+  auto if_heapnumber = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<6>(MachineRepresentation::kBit);
+
+  Node* zero = __ Int32Constant(0);
+  Node* fzero = __ Float64Constant(0.0);
+
+  // Check if {value} is false.
+  __ GotoIf(__ WordEqual(value, __ FalseConstant()), &done, zero);
 
   // Check if {value} is a Smi.
   Node* check_smi = ObjectIsSmi(value);
-  Node* branch_smi = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                      check_smi, control);
+  __ GotoIf(check_smi, &if_smi);
 
-  // If {value} is a Smi, then we only need to check that it's not zero.
-  Node* if_smi = graph()->NewNode(common()->IfTrue(), branch_smi);
-  Node* esmi = effect;
-  {
-    controls[count] = if_smi;
-    effects[count] = esmi;
-    values[count] =
-        graph()->NewNode(machine()->Word32Equal(),
-                         graph()->NewNode(machine()->WordEqual(), value,
-                                          jsgraph()->IntPtrConstant(0)),
-                         zero);
-    count++;
-  }
-  control = graph()->NewNode(common()->IfFalse(), branch_smi);
+  // Check if {value} is the empty string.
+  __ GotoIf(__ WordEqual(value, __ EmptyStringConstant()), &done, zero);
 
-  // Load the map instance type of {value}.
-  Node* value_map = effect = graph()->NewNode(
-      simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
-  Node* value_instance_type = effect = graph()->NewNode(
-      simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
-      effect, control);
+  // Load the map of {value}.
+  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
 
-  // Check if {value} is an Oddball.
-  Node* check_oddball =
-      graph()->NewNode(machine()->Word32Equal(), value_instance_type,
-                       jsgraph()->Int32Constant(ODDBALL_TYPE));
-  Node* branch_oddball = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                          check_oddball, control);
-
-  // The only Oddball {value} that is trueish is true itself.
-  Node* if_oddball = graph()->NewNode(common()->IfTrue(), branch_oddball);
-  Node* eoddball = effect;
-  {
-    controls[count] = if_oddball;
-    effects[count] = eoddball;
-    values[count] = graph()->NewNode(machine()->WordEqual(), value,
-                                     jsgraph()->TrueConstant());
-    count++;
-  }
-  control = graph()->NewNode(common()->IfFalse(), branch_oddball);
-
-  // Check if {value} is a String.
-  Node* check_string =
-      graph()->NewNode(machine()->Int32LessThan(), value_instance_type,
-                       jsgraph()->Int32Constant(FIRST_NONSTRING_TYPE));
-  Node* branch_string =
-      graph()->NewNode(common()->Branch(), check_string, control);
-
-  // For String {value}, we need to check that the length is not zero.
-  Node* if_string = graph()->NewNode(common()->IfTrue(), branch_string);
-  Node* estring = effect;
-  {
-    // Load the {value} length.
-    Node* value_length = estring = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForStringLength()), value,
-        estring, if_string);
-
-    controls[count] = if_string;
-    effects[count] = estring;
-    values[count] =
-        graph()->NewNode(machine()->Word32Equal(),
-                         graph()->NewNode(machine()->WordEqual(), value_length,
-                                          jsgraph()->IntPtrConstant(0)),
-                         zero);
-    count++;
-  }
-  control = graph()->NewNode(common()->IfFalse(), branch_string);
+  // Check if the {value} is undetectable and immediately return false.
+  Node* value_map_bitfield =
+      __ LoadField(AccessBuilder::ForMapBitField(), value_map);
+  __ GotoUnless(
+      __ Word32Equal(__ Word32And(value_map_bitfield,
+                                  __ Int32Constant(1 << Map::kIsUndetectable)),
+                     zero),
+      &done, zero);
 
   // Check if {value} is a HeapNumber.
-  Node* check_heapnumber =
-      graph()->NewNode(machine()->Word32Equal(), value_instance_type,
-                       jsgraph()->Int32Constant(HEAP_NUMBER_TYPE));
-  Node* branch_heapnumber =
-      graph()->NewNode(common()->Branch(), check_heapnumber, control);
+  __ GotoIf(__ WordEqual(value_map, __ HeapNumberMapConstant()),
+            &if_heapnumber);
 
-  // For HeapNumber {value}, just check that its value is not 0.0, -0.0 or NaN.
-  Node* if_heapnumber = graph()->NewNode(common()->IfTrue(), branch_heapnumber);
-  Node* eheapnumber = effect;
+  // All other values that reach here are true.
+  __ Goto(&done, __ Int32Constant(1));
+
+  __ Bind(&if_heapnumber);
   {
-    // Load the raw value of {value}.
-    Node* value_value = eheapnumber = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
-        eheapnumber, if_heapnumber);
-
-    // Check if {value} is not one of 0, -0, or NaN.
-    controls[count] = if_heapnumber;
-    effects[count] = eheapnumber;
-    values[count] = graph()->NewNode(
-        machine()->Float64LessThan(), fzero,
-        graph()->NewNode(machine()->Float64Abs(), value_value));
-    count++;
-  }
-  control = graph()->NewNode(common()->IfFalse(), branch_heapnumber);
-
-  // The {value} is either a JSReceiver, a Symbol or some Simd128Value. In
-  // those cases we can just the undetectable bit on the map, which will only
-  // be set for certain JSReceivers, i.e. document.all.
-  {
-    // Load the {value} map bit field.
-    Node* value_map_bitfield = effect = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForMapBitField()), value_map,
-        effect, control);
-
-    controls[count] = control;
-    effects[count] = effect;
-    values[count] = graph()->NewNode(
-        machine()->Word32Equal(),
-        graph()->NewNode(machine()->Word32And(), value_map_bitfield,
-                         jsgraph()->Int32Constant(1 << Map::kIsUndetectable)),
-        zero);
-    count++;
+    // For HeapNumber {value}, just check that its value is not 0.0, -0.0 or
+    // NaN.
+    Node* value_value =
+        __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+    __ Goto(&done, __ Float64LessThan(fzero, __ Float64Abs(value_value)));
   }
 
-  // Merge the different controls.
-  control = graph()->NewNode(common()->Merge(count), count, controls);
-  effects[count] = control;
-  effect = graph()->NewNode(common()->EffectPhi(count), count + 1, effects);
-  values[count] = control;
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, count),
-                           count + 1, values);
+  __ Bind(&if_smi);
+  {
+    // If {value} is a Smi, then we only need to check that it's not zero.
+    __ Goto(&done,
+            __ Word32Equal(__ WordEqual(value, __ IntPtrConstant(0)), zero));
+  }
 
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeTaggedToInt32(Node* node, Node* effect,
-                                                  Node* control) {
+Node* EffectControlLinearizer::LowerChangeTaggedToInt32(Node* node) {
   Node* value = node->InputAt(0);
 
-  Node* check = ObjectIsSmi(value);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+  auto if_not_smi = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
 
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
+  Node* check = ObjectIsSmi(value);
+  __ GotoUnless(check, &if_not_smi);
+  __ Goto(&done, ChangeSmiToInt32(value));
+
+  __ Bind(&if_not_smi);
+  STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+  Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+  vfalse = __ ChangeFloat64ToInt32(vfalse);
+  __ Goto(&done, vfalse);
+
+  __ Bind(&done);
+  return done.PhiAt(0);
+}
+
+Node* EffectControlLinearizer::LowerChangeTaggedToUint32(Node* node) {
+  Node* value = node->InputAt(0);
+
+  auto if_not_smi = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
+
+  Node* check = ObjectIsSmi(value);
+  __ GotoUnless(check, &if_not_smi);
+  __ Goto(&done, ChangeSmiToInt32(value));
+
+  __ Bind(&if_not_smi);
+  STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+  Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+  vfalse = __ ChangeFloat64ToUint32(vfalse);
+  __ Goto(&done, vfalse);
+
+  __ Bind(&done);
+  return done.PhiAt(0);
+}
+
+Node* EffectControlLinearizer::LowerChangeTaggedToFloat64(Node* node) {
+  return LowerTruncateTaggedToFloat64(node);
+}
+
+Node* EffectControlLinearizer::LowerChangeTaggedToTaggedSigned(Node* node) {
+  Node* value = node->InputAt(0);
+
+  auto if_not_smi = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
+
+  Node* check = ObjectIsSmi(value);
+  __ GotoUnless(check, &if_not_smi);
+  __ Goto(&done, value);
+
+  __ Bind(&if_not_smi);
+  STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+  Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+  vfalse = __ ChangeFloat64ToInt32(vfalse);
+  vfalse = ChangeInt32ToSmi(vfalse);
+  __ Goto(&done, vfalse);
+
+  __ Bind(&done);
+  return done.PhiAt(0);
+}
+
+Node* EffectControlLinearizer::LowerTruncateTaggedToFloat64(Node* node) {
+  Node* value = node->InputAt(0);
+
+  auto if_not_smi = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kFloat64);
+
+  Node* check = ObjectIsSmi(value);
+  __ GotoUnless(check, &if_not_smi);
   Node* vtrue = ChangeSmiToInt32(value);
+  vtrue = __ ChangeInt32ToFloat64(vtrue);
+  __ Goto(&done, vtrue);
 
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* efalse = effect;
-  Node* vfalse;
-  {
-    STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
-    vfalse = efalse = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
-        efalse, if_false);
-    vfalse = graph()->NewNode(machine()->ChangeFloat64ToInt32(), vfalse);
-  }
+  __ Bind(&if_not_smi);
+  STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+  Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+  __ Goto(&done, vfalse);
 
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
-                           vtrue, vfalse, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeTaggedToUint32(Node* node, Node* effect,
-                                                   Node* control) {
-  Node* value = node->InputAt(0);
-
-  Node* check = ObjectIsSmi(value);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  Node* vtrue = ChangeSmiToInt32(value);
-
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* efalse = effect;
-  Node* vfalse;
-  {
-    STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
-    vfalse = efalse = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
-        efalse, if_false);
-    vfalse = graph()->NewNode(machine()->ChangeFloat64ToUint32(), vfalse);
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
-                           vtrue, vfalse, control);
-
-  return ValueEffectControl(value, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerChangeTaggedToFloat64(Node* node, Node* effect,
-                                                    Node* control) {
-  return LowerTruncateTaggedToFloat64(node, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerTruncateTaggedToFloat64(Node* node, Node* effect,
-                                                      Node* control) {
-  Node* value = node->InputAt(0);
-
-  Node* check = ObjectIsSmi(value);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  Node* vtrue;
-  {
-    vtrue = ChangeSmiToInt32(value);
-    vtrue = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue);
-  }
-
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* efalse = effect;
-  Node* vfalse;
-  {
-    STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
-    vfalse = efalse = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
-        efalse, if_false);
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                           vtrue, vfalse, control);
-
-  return ValueEffectControl(value, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckBounds(Node* node, Node* frame_state,
-                                          Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckBounds(Node* node, Node* frame_state) {
   Node* index = node->InputAt(0);
   Node* limit = node->InputAt(1);
 
-  Node* check = graph()->NewNode(machine()->Uint32LessThan(), index, limit);
-  control = effect = graph()->NewNode(
-      common()->DeoptimizeUnless(DeoptimizeReason::kOutOfBounds), check,
-      frame_state, effect, control);
-
-  return ValueEffectControl(index, effect, control);
+  Node* check = __ Uint32LessThan(index, limit);
+  __ DeoptimizeUnless(DeoptimizeReason::kOutOfBounds, check, frame_state);
+  return index;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state,
-                                        Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
+  CheckMapsParameters const& p = CheckMapsParametersOf(node->op());
   Node* value = node->InputAt(0);
 
-  // Load the current map of the {value}.
-  Node* value_map = effect = graph()->NewNode(
-      simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
+  ZoneHandleSet<Map> const& maps = p.maps();
+  size_t const map_count = maps.size();
 
-  int const map_count = node->op()->ValueInputCount() - 1;
-  Node** controls = temp_zone()->NewArray<Node*>(map_count);
-  Node** effects = temp_zone()->NewArray<Node*>(map_count + 1);
+  if (p.flags() & CheckMapsFlag::kTryMigrateInstance) {
+    auto done =
+        __ MakeLabelFor(GraphAssemblerLabelType::kNonDeferred, map_count * 2);
+    auto migrate = __ MakeDeferredLabel<1>();
 
-  for (int i = 0; i < map_count; ++i) {
-    Node* map = node->InputAt(1 + i);
+    // Load the current map of the {value}.
+    Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
 
-    Node* check = graph()->NewNode(machine()->WordEqual(), value_map, map);
-    if (i == map_count - 1) {
-      controls[i] = effects[i] = graph()->NewNode(
-          common()->DeoptimizeUnless(DeoptimizeReason::kWrongMap), check,
-          frame_state, effect, control);
-    } else {
-      control = graph()->NewNode(common()->Branch(), check, control);
-      controls[i] = graph()->NewNode(common()->IfTrue(), control);
-      control = graph()->NewNode(common()->IfFalse(), control);
-      effects[i] = effect;
+    // Perform the map checks.
+    for (size_t i = 0; i < map_count; ++i) {
+      Node* map = __ HeapConstant(maps[i]);
+      Node* check = __ WordEqual(value_map, map);
+      if (i == map_count - 1) {
+        __ GotoUnless(check, &migrate);
+        __ Goto(&done);
+      } else {
+        __ GotoIf(check, &done);
+      }
     }
+
+    // Perform the (deferred) instance migration.
+    __ Bind(&migrate);
+    {
+      // If map is not deprecated the migration attempt does not make sense.
+      Node* bitfield3 =
+          __ LoadField(AccessBuilder::ForMapBitField3(), value_map);
+      Node* if_not_deprecated = __ WordEqual(
+          __ Word32And(bitfield3, __ Int32Constant(Map::Deprecated::kMask)),
+          __ Int32Constant(0));
+      __ DeoptimizeIf(DeoptimizeReason::kWrongMap, if_not_deprecated,
+                      frame_state);
+
+      Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
+      Runtime::FunctionId id = Runtime::kTryMigrateInstance;
+      CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+          graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
+      Node* result =
+          __ Call(desc, __ CEntryStubConstant(1), value,
+                  __ ExternalConstant(ExternalReference(id, isolate())),
+                  __ Int32Constant(1), __ NoContextConstant());
+      Node* check = ObjectIsSmi(result);
+      __ DeoptimizeIf(DeoptimizeReason::kInstanceMigrationFailed, check,
+                      frame_state);
+    }
+
+    // Reload the current map of the {value}.
+    value_map = __ LoadField(AccessBuilder::ForMap(), value);
+
+    // Perform the map checks again.
+    for (size_t i = 0; i < map_count; ++i) {
+      Node* map = __ HeapConstant(maps[i]);
+      Node* check = __ WordEqual(value_map, map);
+      if (i == map_count - 1) {
+        __ DeoptimizeUnless(DeoptimizeReason::kWrongMap, check, frame_state);
+      } else {
+        __ GotoIf(check, &done);
+      }
+    }
+
+    __ Goto(&done);
+    __ Bind(&done);
+  } else {
+    auto done =
+        __ MakeLabelFor(GraphAssemblerLabelType::kNonDeferred, map_count);
+
+    // Load the current map of the {value}.
+    Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+
+    for (size_t i = 0; i < map_count; ++i) {
+      Node* map = __ HeapConstant(maps[i]);
+      Node* check = __ WordEqual(value_map, map);
+      if (i == map_count - 1) {
+        __ DeoptimizeUnless(DeoptimizeReason::kWrongMap, check, frame_state);
+      } else {
+        __ GotoIf(check, &done);
+      }
+    }
+    __ Goto(&done);
+    __ Bind(&done);
   }
-
-  control = graph()->NewNode(common()->Merge(map_count), map_count, controls);
-  effects[map_count] = control;
-  effect =
-      graph()->NewNode(common()->EffectPhi(map_count), map_count + 1, effects);
-
-  return ValueEffectControl(value, effect, control);
+  return value;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckNumber(Node* node, Node* frame_state,
-                                          Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckNumber(Node* node, Node* frame_state) {
   Node* value = node->InputAt(0);
 
+  auto if_not_smi = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>();
+
   Node* check0 = ObjectIsSmi(value);
-  Node* branch0 =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+  __ GotoUnless(check0, &if_not_smi);
+  __ Goto(&done);
 
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* etrue0 = effect;
+  __ Bind(&if_not_smi);
+  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+  Node* check1 = __ WordEqual(value_map, __ HeapNumberMapConstant());
+  __ DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber, check1, frame_state);
+  __ Goto(&done);
 
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* efalse0 = effect;
-  {
-    Node* value_map = efalse0 =
-        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                         value, efalse0, if_false0);
-    Node* check1 = graph()->NewNode(machine()->WordEqual(), value_map,
-                                    jsgraph()->HeapNumberMapConstant());
-    if_false0 = efalse0 = graph()->NewNode(
-        common()->DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber), check1,
-        frame_state, efalse0, if_false0);
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return value;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state,
-                                          Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckReceiver(Node* node,
+                                                  Node* frame_state) {
   Node* value = node->InputAt(0);
 
-  Node* check0 = ObjectIsSmi(value);
-  control = effect =
-      graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kSmi), check0,
-                       frame_state, effect, control);
+  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+  Node* value_instance_type =
+      __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
 
-  Node* value_map = effect = graph()->NewNode(
-      simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
-  Node* value_instance_type = effect = graph()->NewNode(
-      simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
-      effect, control);
-
-  Node* check1 =
-      graph()->NewNode(machine()->Uint32LessThan(), value_instance_type,
-                       jsgraph()->Uint32Constant(FIRST_NONSTRING_TYPE));
-  control = effect = graph()->NewNode(
-      common()->DeoptimizeUnless(DeoptimizeReason::kWrongInstanceType), check1,
-      frame_state, effect, control);
-
-  return ValueEffectControl(value, effect, control);
+  STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+  Node* check = __ Uint32LessThanOrEqual(
+      __ Uint32Constant(FIRST_JS_RECEIVER_TYPE), value_instance_type);
+  __ DeoptimizeUnless(DeoptimizeReason::kNotAJavaScriptObject, check,
+                      frame_state);
+  return value;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckIf(Node* node, Node* frame_state,
-                                      Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckString(Node* node, Node* frame_state) {
   Node* value = node->InputAt(0);
 
-  control = effect =
-      graph()->NewNode(common()->DeoptimizeUnless(DeoptimizeReason::kNoReason),
-                       value, frame_state, effect, control);
+  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+  Node* value_instance_type =
+      __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
 
-  return ValueEffectControl(value, effect, control);
+  Node* check = __ Uint32LessThan(value_instance_type,
+                                  __ Uint32Constant(FIRST_NONSTRING_TYPE));
+  __ DeoptimizeUnless(DeoptimizeReason::kWrongInstanceType, check, frame_state);
+  return value;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedInt32Add(Node* node, Node* frame_state,
-                                              Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckInternalizedString(Node* node,
+                                                            Node* frame_state) {
+  Node* value = node->InputAt(0);
+
+  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+  Node* value_instance_type =
+      __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+
+  Node* check = __ Word32Equal(
+      __ Word32And(value_instance_type,
+                   __ Int32Constant(kIsNotStringMask | kIsNotInternalizedMask)),
+      __ Int32Constant(kInternalizedTag));
+  __ DeoptimizeUnless(DeoptimizeReason::kWrongInstanceType, check, frame_state);
+
+  return value;
+}
+
+Node* EffectControlLinearizer::LowerCheckIf(Node* node, Node* frame_state) {
+  Node* value = node->InputAt(0);
+  __ DeoptimizeUnless(DeoptimizeKind::kEager, DeoptimizeReason::kNoReason,
+                      value, frame_state);
+  return value;
+}
+
+Node* EffectControlLinearizer::LowerCheckedInt32Add(Node* node,
+                                                    Node* frame_state) {
   Node* lhs = node->InputAt(0);
   Node* rhs = node->InputAt(1);
 
-  Node* value =
-      graph()->NewNode(machine()->Int32AddWithOverflow(), lhs, rhs, control);
-
-  Node* check = graph()->NewNode(common()->Projection(1), value, control);
-  control = effect =
-      graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
-                       check, frame_state, effect, control);
-
-  value = graph()->NewNode(common()->Projection(0), value, control);
-
-  return ValueEffectControl(value, effect, control);
+  Node* value = __ Int32AddWithOverflow(lhs, rhs);
+  Node* check = __ Projection(1, value);
+  __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
+  return __ Projection(0, value);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedInt32Sub(Node* node, Node* frame_state,
-                                              Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckedInt32Sub(Node* node,
+                                                    Node* frame_state) {
   Node* lhs = node->InputAt(0);
   Node* rhs = node->InputAt(1);
 
-  Node* value =
-      graph()->NewNode(machine()->Int32SubWithOverflow(), lhs, rhs, control);
-
-  Node* check = graph()->NewNode(common()->Projection(1), value, control);
-  control = effect =
-      graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
-                       check, frame_state, effect, control);
-
-  value = graph()->NewNode(common()->Projection(0), value, control);
-
-  return ValueEffectControl(value, effect, control);
+  Node* value = __ Int32SubWithOverflow(lhs, rhs);
+  Node* check = __ Projection(1, value);
+  __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
+  return __ Projection(0, value);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedInt32Div(Node* node, Node* frame_state,
-                                              Node* effect, Node* control) {
-  Node* zero = jsgraph()->Int32Constant(0);
-  Node* minusone = jsgraph()->Int32Constant(-1);
-  Node* minint = jsgraph()->Int32Constant(std::numeric_limits<int32_t>::min());
-
+Node* EffectControlLinearizer::LowerCheckedInt32Div(Node* node,
+                                                    Node* frame_state) {
   Node* lhs = node->InputAt(0);
   Node* rhs = node->InputAt(1);
 
+  auto if_not_positive = __ MakeDeferredLabel<1>();
+  auto if_is_minint = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
+  auto minint_check_done = __ MakeLabel<2>();
+
+  Node* zero = __ Int32Constant(0);
+
   // Check if {rhs} is positive (and not zero).
-  Node* check0 = graph()->NewNode(machine()->Int32LessThan(), zero, rhs);
-  Node* branch0 =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+  Node* check0 = __ Int32LessThan(zero, rhs);
+  __ GotoUnless(check0, &if_not_positive);
 
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* etrue0 = effect;
-  Node* vtrue0;
-  {
-    // Fast case, no additional checking required.
-    vtrue0 = graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_true0);
-  }
+  // Fast case, no additional checking required.
+  __ Goto(&done, __ Int32Div(lhs, rhs));
 
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* efalse0 = effect;
-  Node* vfalse0;
   {
+    __ Bind(&if_not_positive);
+
     // Check if {rhs} is zero.
-    Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
-    if_false0 = efalse0 = graph()->NewNode(
-        common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
-        frame_state, efalse0, if_false0);
+    Node* check = __ Word32Equal(rhs, zero);
+    __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
 
     // Check if {lhs} is zero, as that would produce minus zero.
-    check = graph()->NewNode(machine()->Word32Equal(), lhs, zero);
-    if_false0 = efalse0 =
-        graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
-                         check, frame_state, efalse0, if_false0);
+    check = __ Word32Equal(lhs, zero);
+    __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check, frame_state);
 
     // Check if {lhs} is kMinInt and {rhs} is -1, in which case we'd have
     // to return -kMinInt, which is not representable.
+    Node* minint = __ Int32Constant(std::numeric_limits<int32_t>::min());
     Node* check1 = graph()->NewNode(machine()->Word32Equal(), lhs, minint);
-    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                     check1, if_false0);
+    __ GotoIf(check1, &if_is_minint);
+    __ Goto(&minint_check_done);
 
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* etrue1 = efalse0;
-    {
-      // Check if {rhs} is -1.
-      Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, minusone);
-      if_true1 = etrue1 =
-          graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
-                           check, frame_state, etrue1, if_true1);
-    }
+    __ Bind(&if_is_minint);
+    // Check if {rhs} is -1.
+    Node* minusone = __ Int32Constant(-1);
+    Node* is_minus_one = __ Word32Equal(rhs, minusone);
+    __ DeoptimizeIf(DeoptimizeReason::kOverflow, is_minus_one, frame_state);
+    __ Goto(&minint_check_done);
 
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* efalse1 = efalse0;
-
-    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    efalse0 =
-        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
-
+    __ Bind(&minint_check_done);
     // Perform the actual integer division.
-    vfalse0 = graph()->NewNode(machine()->Int32Div(), lhs, rhs, if_false0);
+    __ Goto(&done, __ Int32Div(lhs, rhs));
   }
 
-  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
-  Node* value =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2), vtrue0,
-                       vfalse0, control);
+  __ Bind(&done);
+  Node* value = done.PhiAt(0);
 
   // Check if the remainder is non-zero.
-  Node* check =
-      graph()->NewNode(machine()->Word32Equal(), lhs,
-                       graph()->NewNode(machine()->Int32Mul(), rhs, value));
-  control = effect = graph()->NewNode(
-      common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecision), check,
-      frame_state, effect, control);
+  Node* check = __ Word32Equal(lhs, __ Int32Mul(rhs, value));
+  __ DeoptimizeUnless(DeoptimizeReason::kLostPrecision, check, frame_state);
 
-  return ValueEffectControl(value, effect, control);
+  return value;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedInt32Mod(Node* node, Node* frame_state,
-                                              Node* effect, Node* control) {
-  Node* zero = jsgraph()->Int32Constant(0);
-  Node* one = jsgraph()->Int32Constant(1);
-
+Node* EffectControlLinearizer::LowerCheckedInt32Mod(Node* node,
+                                                    Node* frame_state) {
   // General case for signed integer modulus, with optimization for (unknown)
   // power of 2 right hand side.
   //
@@ -1439,1226 +1339,704 @@
   Node* lhs = node->InputAt(0);
   Node* rhs = node->InputAt(1);
 
-  // Check if {rhs} is not strictly positive.
-  Node* check0 = graph()->NewNode(machine()->Int32LessThanOrEqual(), rhs, zero);
-  Node* branch0 =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), check0, control);
+  auto if_rhs_not_positive = __ MakeDeferredLabel<1>();
+  auto if_lhs_negative = __ MakeDeferredLabel<1>();
+  auto if_power_of_two = __ MakeLabel<1>();
+  auto rhs_checked = __ MakeLabel<2>(MachineRepresentation::kWord32);
+  auto done = __ MakeLabel<3>(MachineRepresentation::kWord32);
 
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* etrue0 = effect;
-  Node* vtrue0;
+  Node* zero = __ Int32Constant(0);
+
+  // Check if {rhs} is not strictly positive.
+  Node* check0 = __ Int32LessThanOrEqual(rhs, zero);
+  __ GotoIf(check0, &if_rhs_not_positive);
+  __ Goto(&rhs_checked, rhs);
+
+  __ Bind(&if_rhs_not_positive);
   {
     // Negate {rhs}, might still produce a negative result in case of
     // -2^31, but that is handled safely below.
-    vtrue0 = graph()->NewNode(machine()->Int32Sub(), zero, rhs);
+    Node* vtrue0 = __ Int32Sub(zero, rhs);
 
     // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
-    Node* check = graph()->NewNode(machine()->Word32Equal(), vtrue0, zero);
-    if_true0 = etrue0 = graph()->NewNode(
-        common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
-        frame_state, etrue0, if_true0);
+    Node* check = __ Word32Equal(vtrue0, zero);
+    __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
+    __ Goto(&rhs_checked, vtrue0);
   }
 
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* efalse0 = effect;
-  Node* vfalse0 = rhs;
-
-  // At this point {rhs} is either greater than zero or -2^31, both are
-  // fine for the code that follows.
-  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
-  rhs = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
-                         vtrue0, vfalse0, control);
+  __ Bind(&rhs_checked);
+  rhs = rhs_checked.PhiAt(0);
 
   // Check if {lhs} is negative.
-  Node* check1 = graph()->NewNode(machine()->Int32LessThan(), lhs, zero);
-  Node* branch1 =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), check1, control);
+  Node* check1 = __ Int32LessThan(lhs, zero);
+  __ GotoIf(check1, &if_lhs_negative);
 
-  Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-  Node* etrue1 = effect;
-  Node* vtrue1;
+  // {lhs} non-negative.
   {
-    // Compute the remainder using {lhs % msk}.
-    vtrue1 = graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_true1);
-
-    // Check if we would have to return -0.
-    Node* check = graph()->NewNode(machine()->Word32Equal(), vtrue1, zero);
-    if_true1 = etrue1 =
-        graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
-                         check, frame_state, etrue1, if_true1);
-  }
-
-  Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-  Node* efalse1 = effect;
-  Node* vfalse1;
-  {
-    Node* msk = graph()->NewNode(machine()->Int32Sub(), rhs, one);
+    Node* one = __ Int32Constant(1);
+    Node* msk = __ Int32Sub(rhs, one);
 
     // Check if {rhs} minus one is a valid mask.
-    Node* check2 = graph()->NewNode(
-        machine()->Word32Equal(),
-        graph()->NewNode(machine()->Word32And(), rhs, msk), zero);
-    Node* branch2 = graph()->NewNode(common()->Branch(), check2, if_false1);
-
-    // Compute the remainder using {lhs & msk}.
-    Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
-    Node* vtrue2 = graph()->NewNode(machine()->Word32And(), lhs, msk);
-
+    Node* check2 = __ Word32Equal(__ Word32And(rhs, msk), zero);
+    __ GotoIf(check2, &if_power_of_two);
     // Compute the remainder using the generic {lhs % rhs}.
-    Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
-    Node* vfalse2 =
-        graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_false2);
+    __ Goto(&done, __ Int32Mod(lhs, rhs));
 
-    if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
-    vfalse1 = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
-                               vtrue2, vfalse2, if_false1);
+    __ Bind(&if_power_of_two);
+    // Compute the remainder using {lhs & msk}.
+    __ Goto(&done, __ Word32And(lhs, msk));
   }
 
-  control = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, control);
-  Node* value =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2), vtrue1,
-                       vfalse1, control);
+  __ Bind(&if_lhs_negative);
+  {
+    // Compute the remainder using {lhs % msk}.
+    Node* vtrue1 = __ Int32Mod(lhs, rhs);
 
-  return ValueEffectControl(value, effect, control);
+    // Check if we would have to return -0.
+    Node* check = __ Word32Equal(vtrue1, zero);
+    __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check, frame_state);
+    __ Goto(&done, vtrue1);
+  }
+
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedUint32Div(Node* node, Node* frame_state,
-                                               Node* effect, Node* control) {
-  Node* zero = jsgraph()->Int32Constant(0);
-
+Node* EffectControlLinearizer::LowerCheckedUint32Div(Node* node,
+                                                     Node* frame_state) {
   Node* lhs = node->InputAt(0);
   Node* rhs = node->InputAt(1);
 
+  Node* zero = __ Int32Constant(0);
+
   // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
-  Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
-  control = effect = graph()->NewNode(
-      common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
-      frame_state, effect, control);
+  Node* check = __ Word32Equal(rhs, zero);
+  __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
 
   // Perform the actual unsigned integer division.
-  Node* value = graph()->NewNode(machine()->Uint32Div(), lhs, rhs, control);
+  Node* value = __ Uint32Div(lhs, rhs);
 
   // Check if the remainder is non-zero.
-  check = graph()->NewNode(machine()->Word32Equal(), lhs,
-                           graph()->NewNode(machine()->Int32Mul(), rhs, value));
-  control = effect = graph()->NewNode(
-      common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecision), check,
-      frame_state, effect, control);
-
-  return ValueEffectControl(value, effect, control);
+  check = __ Word32Equal(lhs, __ Int32Mul(rhs, value));
+  __ DeoptimizeUnless(DeoptimizeReason::kLostPrecision, check, frame_state);
+  return value;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedUint32Mod(Node* node, Node* frame_state,
-                                               Node* effect, Node* control) {
-  Node* zero = jsgraph()->Int32Constant(0);
-
+Node* EffectControlLinearizer::LowerCheckedUint32Mod(Node* node,
+                                                     Node* frame_state) {
   Node* lhs = node->InputAt(0);
   Node* rhs = node->InputAt(1);
 
+  Node* zero = __ Int32Constant(0);
+
   // Ensure that {rhs} is not zero, otherwise we'd have to return NaN.
-  Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
-  control = effect = graph()->NewNode(
-      common()->DeoptimizeIf(DeoptimizeReason::kDivisionByZero), check,
-      frame_state, effect, control);
+  Node* check = __ Word32Equal(rhs, zero);
+  __ DeoptimizeIf(DeoptimizeReason::kDivisionByZero, check, frame_state);
 
   // Perform the actual unsigned integer modulus.
-  Node* value = graph()->NewNode(machine()->Uint32Mod(), lhs, rhs, control);
-
-  return ValueEffectControl(value, effect, control);
+  return __ Uint32Mod(lhs, rhs);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedInt32Mul(Node* node, Node* frame_state,
-                                              Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckedInt32Mul(Node* node,
+                                                    Node* frame_state) {
   CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
-  Node* zero = jsgraph()->Int32Constant(0);
   Node* lhs = node->InputAt(0);
   Node* rhs = node->InputAt(1);
 
-  Node* projection =
-      graph()->NewNode(machine()->Int32MulWithOverflow(), lhs, rhs, control);
+  Node* projection = __ Int32MulWithOverflow(lhs, rhs);
+  Node* check = __ Projection(1, projection);
+  __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
 
-  Node* check = graph()->NewNode(common()->Projection(1), projection, control);
-  control = effect =
-      graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
-                       check, frame_state, effect, control);
-
-  Node* value = graph()->NewNode(common()->Projection(0), projection, control);
+  Node* value = __ Projection(0, projection);
 
   if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
-    Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value, zero);
-    Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                         check_zero, control);
+    auto if_zero = __ MakeDeferredLabel<1>();
+    auto check_done = __ MakeLabel<2>();
+    Node* zero = __ Int32Constant(0);
+    Node* check_zero = __ Word32Equal(value, zero);
+    __ GotoIf(check_zero, &if_zero);
+    __ Goto(&check_done);
 
-    Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
-    Node* e_if_zero = effect;
-    {
-      // We may need to return negative zero.
-      Node* or_inputs = graph()->NewNode(machine()->Word32Or(), lhs, rhs);
-      Node* check_or =
-          graph()->NewNode(machine()->Int32LessThan(), or_inputs, zero);
-      if_zero = e_if_zero =
-          graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
-                           check_or, frame_state, e_if_zero, if_zero);
-    }
+    __ Bind(&if_zero);
+    // We may need to return negative zero.
+    Node* check_or = __ Int32LessThan(__ Word32Or(lhs, rhs), zero);
+    __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check_or, frame_state);
+    __ Goto(&check_done);
 
-    Node* if_not_zero = graph()->NewNode(common()->IfFalse(), branch_zero);
-    Node* e_if_not_zero = effect;
-
-    control = graph()->NewNode(common()->Merge(2), if_zero, if_not_zero);
-    effect = graph()->NewNode(common()->EffectPhi(2), e_if_zero, e_if_not_zero,
-                              control);
+    __ Bind(&check_done);
   }
 
-  return ValueEffectControl(value, effect, control);
+  return value;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedInt32ToTaggedSigned(Node* node,
-                                                         Node* frame_state,
-                                                         Node* effect,
-                                                         Node* control) {
+Node* EffectControlLinearizer::LowerCheckedInt32ToTaggedSigned(
+    Node* node, Node* frame_state) {
   DCHECK(SmiValuesAre31Bits());
   Node* value = node->InputAt(0);
 
-  Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), value, value,
-                               control);
-
-  Node* check = graph()->NewNode(common()->Projection(1), add, control);
-  control = effect =
-      graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kOverflow),
-                       check, frame_state, effect, control);
-
-  value = graph()->NewNode(common()->Projection(0), add, control);
-
-  return ValueEffectControl(value, effect, control);
+  Node* add = __ Int32AddWithOverflow(value, value);
+  Node* check = __ Projection(1, add);
+  __ DeoptimizeIf(DeoptimizeReason::kOverflow, check, frame_state);
+  return __ Projection(0, add);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
-                                                   Node* frame_state,
-                                                   Node* effect,
-                                                   Node* control) {
+Node* EffectControlLinearizer::LowerCheckedUint32ToInt32(Node* node,
+                                                         Node* frame_state) {
   Node* value = node->InputAt(0);
-  Node* max_int = jsgraph()->Int32Constant(std::numeric_limits<int32_t>::max());
-  Node* is_safe =
-      graph()->NewNode(machine()->Uint32LessThanOrEqual(), value, max_int);
-  control = effect = graph()->NewNode(
-      common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecision), is_safe,
-      frame_state, effect, control);
-
-  return ValueEffectControl(value, effect, control);
+  Node* max_int = __ Int32Constant(std::numeric_limits<int32_t>::max());
+  Node* is_safe = __ Uint32LessThanOrEqual(value, max_int);
+  __ DeoptimizeUnless(DeoptimizeReason::kLostPrecision, is_safe, frame_state);
+  return value;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedUint32ToTaggedSigned(Node* node,
-                                                          Node* frame_state,
-                                                          Node* effect,
-                                                          Node* control) {
+Node* EffectControlLinearizer::LowerCheckedUint32ToTaggedSigned(
+    Node* node, Node* frame_state) {
   Node* value = node->InputAt(0);
-  Node* check = graph()->NewNode(machine()->Uint32LessThanOrEqual(), value,
-                                 SmiMaxValueConstant());
-  control = effect = graph()->NewNode(
-      common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecision), check,
-      frame_state, effect, control);
-  value = ChangeUint32ToSmi(value);
-
-  return ValueEffectControl(value, effect, control);
+  Node* check = __ Uint32LessThanOrEqual(value, SmiMaxValueConstant());
+  __ DeoptimizeUnless(DeoptimizeReason::kLostPrecision, check, frame_state);
+  return ChangeUint32ToSmi(value);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode,
-                                                    Node* value,
-                                                    Node* frame_state,
-                                                    Node* effect,
-                                                    Node* control) {
-  Node* value32 = graph()->NewNode(machine()->RoundFloat64ToInt32(), value);
-  Node* check_same = graph()->NewNode(
-      machine()->Float64Equal(), value,
-      graph()->NewNode(machine()->ChangeInt32ToFloat64(), value32));
-  control = effect = graph()->NewNode(
-      common()->DeoptimizeUnless(DeoptimizeReason::kLostPrecisionOrNaN),
-      check_same, frame_state, effect, control);
+Node* EffectControlLinearizer::BuildCheckedFloat64ToInt32(
+    CheckForMinusZeroMode mode, Node* value, Node* frame_state) {
+  Node* value32 = __ RoundFloat64ToInt32(value);
+  Node* check_same = __ Float64Equal(value, __ ChangeInt32ToFloat64(value32));
+  __ DeoptimizeUnless(DeoptimizeReason::kLostPrecisionOrNaN, check_same,
+                      frame_state);
 
   if (mode == CheckForMinusZeroMode::kCheckForMinusZero) {
     // Check if {value} is -0.
-    Node* check_zero = graph()->NewNode(machine()->Word32Equal(), value32,
-                                        jsgraph()->Int32Constant(0));
-    Node* branch_zero = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                         check_zero, control);
+    auto if_zero = __ MakeDeferredLabel<1>();
+    auto check_done = __ MakeLabel<2>();
 
-    Node* if_zero = graph()->NewNode(common()->IfTrue(), branch_zero);
-    Node* if_notzero = graph()->NewNode(common()->IfFalse(), branch_zero);
+    Node* check_zero = __ Word32Equal(value32, __ Int32Constant(0));
+    __ GotoIf(check_zero, &if_zero);
+    __ Goto(&check_done);
 
+    __ Bind(&if_zero);
     // In case of 0, we need to check the high bits for the IEEE -0 pattern.
-    Node* check_negative = graph()->NewNode(
-        machine()->Int32LessThan(),
-        graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
-        jsgraph()->Int32Constant(0));
+    Node* check_negative = __ Int32LessThan(__ Float64ExtractHighWord32(value),
+                                            __ Int32Constant(0));
+    __ DeoptimizeIf(DeoptimizeReason::kMinusZero, check_negative, frame_state);
+    __ Goto(&check_done);
 
-    Node* deopt_minus_zero =
-        graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kMinusZero),
-                         check_negative, frame_state, effect, if_zero);
-
-    control =
-        graph()->NewNode(common()->Merge(2), deopt_minus_zero, if_notzero);
-    effect = graph()->NewNode(common()->EffectPhi(2), deopt_minus_zero, effect,
-                              control);
+    __ Bind(&check_done);
   }
-
-  return ValueEffectControl(value32, effect, control);
+  return value32;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedFloat64ToInt32(Node* node,
-                                                    Node* frame_state,
-                                                    Node* effect,
-                                                    Node* control) {
+Node* EffectControlLinearizer::LowerCheckedFloat64ToInt32(Node* node,
+                                                          Node* frame_state) {
+  CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
+  Node* value = node->InputAt(0);
+  return BuildCheckedFloat64ToInt32(mode, value, frame_state);
+}
+
+Node* EffectControlLinearizer::LowerCheckedTaggedSignedToInt32(
+    Node* node, Node* frame_state) {
+  Node* value = node->InputAt(0);
+  Node* check = ObjectIsSmi(value);
+  __ DeoptimizeUnless(DeoptimizeReason::kNotASmi, check, frame_state);
+  return ChangeSmiToInt32(value);
+}
+
+Node* EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
+                                                         Node* frame_state) {
   CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
   Node* value = node->InputAt(0);
 
-  return BuildCheckedFloat64ToInt32(mode, value, frame_state, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedTaggedSignedToInt32(Node* node,
-                                                         Node* frame_state,
-                                                         Node* effect,
-                                                         Node* control) {
-  Node* value = node->InputAt(0);
+  auto if_not_smi = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
 
   Node* check = ObjectIsSmi(value);
-  control = effect =
-      graph()->NewNode(common()->DeoptimizeUnless(DeoptimizeReason::kNotASmi),
-                       check, frame_state, effect, control);
-  value = ChangeSmiToInt32(value);
-
-  return ValueEffectControl(value, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedTaggedToInt32(Node* node,
-                                                   Node* frame_state,
-                                                   Node* effect,
-                                                   Node* control) {
-  CheckForMinusZeroMode mode = CheckMinusZeroModeOf(node->op());
-  Node* value = node->InputAt(0);
-
-  Node* check = ObjectIsSmi(value);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-
+  __ GotoUnless(check, &if_not_smi);
   // In the Smi case, just convert to int32.
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  Node* vtrue = ChangeSmiToInt32(value);
+  __ Goto(&done, ChangeSmiToInt32(value));
 
   // In the non-Smi case, check the heap numberness, load the number and convert
   // to int32.
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* efalse = effect;
-  Node* vfalse;
-  {
-    Node* value_map = efalse =
-        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                         value, efalse, if_false);
-    Node* check = graph()->NewNode(machine()->WordEqual(), value_map,
-                                   jsgraph()->HeapNumberMapConstant());
-    if_false = efalse = graph()->NewNode(
-        common()->DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber), check,
-        frame_state, efalse, if_false);
-    vfalse = efalse = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
-        efalse, if_false);
-    ValueEffectControl state =
-        BuildCheckedFloat64ToInt32(mode, vfalse, frame_state, efalse, if_false);
-    if_false = state.control;
-    efalse = state.effect;
-    vfalse = state.value;
-  }
+  __ Bind(&if_not_smi);
+  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+  Node* check_map = __ WordEqual(value_map, __ HeapNumberMapConstant());
+  __ DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber, check_map,
+                      frame_state);
+  Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+  vfalse = BuildCheckedFloat64ToInt32(mode, vfalse, frame_state);
+  __ Goto(&done, vfalse);
 
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
-                           vtrue, vfalse, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
-    CheckTaggedInputMode mode, Node* value, Node* frame_state, Node* effect,
-    Node* control) {
-  Node* value_map = effect = graph()->NewNode(
-      simplified()->LoadField(AccessBuilder::ForMap()), value, effect, control);
-
-  Node* check_number = graph()->NewNode(machine()->WordEqual(), value_map,
-                                        jsgraph()->HeapNumberMapConstant());
-
+Node* EffectControlLinearizer::BuildCheckedHeapNumberOrOddballToFloat64(
+    CheckTaggedInputMode mode, Node* value, Node* frame_state) {
+  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+  Node* check_number = __ WordEqual(value_map, __ HeapNumberMapConstant());
   switch (mode) {
     case CheckTaggedInputMode::kNumber: {
-      control = effect = graph()->NewNode(
-          common()->DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber),
-          check_number, frame_state, effect, control);
+      __ DeoptimizeUnless(DeoptimizeReason::kNotAHeapNumber, check_number,
+                          frame_state);
       break;
     }
     case CheckTaggedInputMode::kNumberOrOddball: {
-      Node* branch =
-          graph()->NewNode(common()->Branch(), check_number, control);
+      auto check_done = __ MakeLabel<2>();
 
-      Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-      Node* etrue = effect;
-
-      Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+      __ GotoIf(check_number, &check_done);
       // For oddballs also contain the numeric value, let us just check that
       // we have an oddball here.
-      Node* efalse = effect;
-      Node* instance_type = efalse = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
-          value_map, efalse, if_false);
+      Node* instance_type =
+          __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
       Node* check_oddball =
-          graph()->NewNode(machine()->Word32Equal(), instance_type,
-                           jsgraph()->Int32Constant(ODDBALL_TYPE));
-      if_false = efalse = graph()->NewNode(
-          common()->DeoptimizeUnless(DeoptimizeReason::kNotANumberOrOddball),
-          check_oddball, frame_state, efalse, if_false);
+          __ Word32Equal(instance_type, __ Int32Constant(ODDBALL_TYPE));
+      __ DeoptimizeUnless(DeoptimizeReason::kNotANumberOrOddball, check_oddball,
+                          frame_state);
       STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+      __ Goto(&check_done);
 
-      control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-      effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
+      __ Bind(&check_done);
       break;
     }
   }
-
-  value = effect = graph()->NewNode(
-      simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
-      effect, control);
-  return ValueEffectControl(value, effect, control);
+  return __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedTaggedToFloat64(Node* node,
-                                                     Node* frame_state,
-                                                     Node* effect,
-                                                     Node* control) {
+Node* EffectControlLinearizer::LowerCheckedTaggedToFloat64(Node* node,
+                                                           Node* frame_state) {
   CheckTaggedInputMode mode = CheckTaggedInputModeOf(node->op());
   Node* value = node->InputAt(0);
 
+  auto if_smi = __ MakeLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kFloat64);
+
   Node* check = ObjectIsSmi(value);
-  Node* branch = graph()->NewNode(common()->Branch(), check, control);
+  __ GotoIf(check, &if_smi);
 
   // In the Smi case, just convert to int32 and then float64.
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  Node* vtrue = ChangeSmiToInt32(value);
-  vtrue = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue);
-
   // Otherwise, check heap numberness and load the number.
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  ValueEffectControl number_state = BuildCheckedHeapNumberOrOddballToFloat64(
-      mode, value, frame_state, effect, if_false);
+  Node* number =
+      BuildCheckedHeapNumberOrOddballToFloat64(mode, value, frame_state);
+  __ Goto(&done, number);
 
-  Node* merge =
-      graph()->NewNode(common()->Merge(2), if_true, number_state.control);
-  Node* effect_phi = graph()->NewNode(common()->EffectPhi(2), etrue,
-                                      number_state.effect, merge);
-  Node* result =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2), vtrue,
-                       number_state.value, merge);
+  __ Bind(&if_smi);
+  Node* from_smi = ChangeSmiToInt32(value);
+  from_smi = __ ChangeInt32ToFloat64(from_smi);
+  __ Goto(&done, from_smi);
 
-  return ValueEffectControl(result, effect_phi, merge);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedTaggedToTaggedSigned(Node* node,
-                                                          Node* frame_state,
-                                                          Node* effect,
-                                                          Node* control) {
+Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedSigned(
+    Node* node, Node* frame_state) {
   Node* value = node->InputAt(0);
 
   Node* check = ObjectIsSmi(value);
-  control = effect =
-      graph()->NewNode(common()->DeoptimizeUnless(DeoptimizeReason::kNotASmi),
-                       check, frame_state, effect, control);
+  __ DeoptimizeUnless(DeoptimizeReason::kNotASmi, check, frame_state);
 
-  return ValueEffectControl(value, effect, control);
+  return value;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedTaggedToTaggedPointer(Node* node,
-                                                           Node* frame_state,
-                                                           Node* effect,
-                                                           Node* control) {
+Node* EffectControlLinearizer::LowerCheckedTaggedToTaggedPointer(
+    Node* node, Node* frame_state) {
   Node* value = node->InputAt(0);
 
   Node* check = ObjectIsSmi(value);
-  control = effect =
-      graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kSmi), check,
-                       frame_state, effect, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ DeoptimizeIf(DeoptimizeReason::kSmi, check, frame_state);
+  return value;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerTruncateTaggedToWord32(Node* node, Node* effect,
-                                                     Node* control) {
+Node* EffectControlLinearizer::LowerTruncateTaggedToWord32(Node* node) {
   Node* value = node->InputAt(0);
 
+  auto if_not_smi = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
+
   Node* check = ObjectIsSmi(value);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+  __ GotoUnless(check, &if_not_smi);
+  __ Goto(&done, ChangeSmiToInt32(value));
 
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  Node* vtrue = ChangeSmiToInt32(value);
+  __ Bind(&if_not_smi);
+  STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
+  Node* vfalse = __ LoadField(AccessBuilder::ForHeapNumberValue(), value);
+  vfalse = __ TruncateFloat64ToWord32(vfalse);
+  __ Goto(&done, vfalse);
 
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* efalse = effect;
-  Node* vfalse;
-  {
-    STATIC_ASSERT(HeapNumber::kValueOffset == Oddball::kToNumberRawOffset);
-    vfalse = efalse = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), value,
-        efalse, if_false);
-    vfalse = graph()->NewNode(machine()->TruncateFloat64ToWord32(), vfalse);
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
-                           vtrue, vfalse, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckedTruncateTaggedToWord32(Node* node,
-                                                            Node* frame_state,
-                                                            Node* effect,
-                                                            Node* control) {
+Node* EffectControlLinearizer::LowerCheckedTruncateTaggedToWord32(
+    Node* node, Node* frame_state) {
   Node* value = node->InputAt(0);
 
-  Node* check = ObjectIsSmi(value);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+  auto if_not_smi = __ MakeLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kWord32);
 
+  Node* check = ObjectIsSmi(value);
+  __ GotoUnless(check, &if_not_smi);
   // In the Smi case, just convert to int32.
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  Node* vtrue = ChangeSmiToInt32(value);
+  __ Goto(&done, ChangeSmiToInt32(value));
 
   // Otherwise, check that it's a heap number or oddball and truncate the value
   // to int32.
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  ValueEffectControl false_state = BuildCheckedHeapNumberOrOddballToFloat64(
-      CheckTaggedInputMode::kNumberOrOddball, value, frame_state, effect,
-      if_false);
-  false_state.value =
-      graph()->NewNode(machine()->TruncateFloat64ToWord32(), false_state.value);
+  __ Bind(&if_not_smi);
+  Node* number = BuildCheckedHeapNumberOrOddballToFloat64(
+      CheckTaggedInputMode::kNumberOrOddball, value, frame_state);
+  number = __ TruncateFloat64ToWord32(number);
+  __ Goto(&done, number);
 
-  Node* merge =
-      graph()->NewNode(common()->Merge(2), if_true, false_state.control);
-  Node* effect_phi = graph()->NewNode(common()->EffectPhi(2), etrue,
-                                      false_state.effect, merge);
-  Node* result =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2), vtrue,
-                       false_state.value, merge);
-
-  return ValueEffectControl(result, effect_phi, merge);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerObjectIsCallable(Node* node, Node* effect,
-                                               Node* control) {
+Node* EffectControlLinearizer::LowerObjectIsDetectableCallable(Node* node) {
   Node* value = node->InputAt(0);
 
+  auto if_smi = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
+
   Node* check = ObjectIsSmi(value);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+  __ GotoIf(check, &if_smi);
 
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  Node* vtrue = jsgraph()->Int32Constant(0);
+  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+  Node* value_bit_field =
+      __ LoadField(AccessBuilder::ForMapBitField(), value_map);
+  Node* vfalse = __ Word32Equal(
+      __ Int32Constant(1 << Map::kIsCallable),
+      __ Word32And(value_bit_field,
+                   __ Int32Constant((1 << Map::kIsCallable) |
+                                    (1 << Map::kIsUndetectable))));
+  __ Goto(&done, vfalse);
 
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* efalse = effect;
-  Node* vfalse;
-  {
-    Node* value_map = efalse =
-        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                         value, efalse, if_false);
-    Node* value_bit_field = efalse = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForMapBitField()), value_map,
-        efalse, if_false);
-    vfalse = graph()->NewNode(
-        machine()->Word32Equal(),
-        jsgraph()->Int32Constant(1 << Map::kIsCallable),
-        graph()->NewNode(
-            machine()->Word32And(), value_bit_field,
-            jsgraph()->Int32Constant((1 << Map::kIsCallable) |
-                                     (1 << Map::kIsUndetectable))));
-  }
+  __ Bind(&if_smi);
+  __ Goto(&done, __ Int32Constant(0));
 
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
-                           vfalse, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerObjectIsNumber(Node* node, Node* effect,
-                                             Node* control) {
+Node* EffectControlLinearizer::LowerObjectIsNonCallable(Node* node) {
   Node* value = node->InputAt(0);
 
+  auto if_primitive = __ MakeDeferredLabel<2>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
+
+  Node* check0 = ObjectIsSmi(value);
+  __ GotoIf(check0, &if_primitive);
+
+  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+  Node* value_instance_type =
+      __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+  STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+  Node* check1 = __ Uint32LessThanOrEqual(
+      __ Uint32Constant(FIRST_JS_RECEIVER_TYPE), value_instance_type);
+  __ GotoUnless(check1, &if_primitive);
+
+  Node* value_bit_field =
+      __ LoadField(AccessBuilder::ForMapBitField(), value_map);
+  Node* check2 = __ Word32Equal(
+      __ Int32Constant(0),
+      __ Word32And(value_bit_field, __ Int32Constant(1 << Map::kIsCallable)));
+  __ Goto(&done, check2);
+
+  __ Bind(&if_primitive);
+  __ Goto(&done, __ Int32Constant(0));
+
+  __ Bind(&done);
+  return done.PhiAt(0);
+}
+
+Node* EffectControlLinearizer::LowerObjectIsNumber(Node* node) {
+  Node* value = node->InputAt(0);
+
+  auto if_smi = __ MakeLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
+
+  __ GotoIf(ObjectIsSmi(value), &if_smi);
+  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+  __ Goto(&done, __ WordEqual(value_map, __ HeapNumberMapConstant()));
+
+  __ Bind(&if_smi);
+  __ Goto(&done, __ Int32Constant(1));
+
+  __ Bind(&done);
+  return done.PhiAt(0);
+}
+
+Node* EffectControlLinearizer::LowerObjectIsReceiver(Node* node) {
+  Node* value = node->InputAt(0);
+
+  auto if_smi = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
+
+  __ GotoIf(ObjectIsSmi(value), &if_smi);
+
+  STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+  Node* value_instance_type =
+      __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+  Node* result = __ Uint32LessThanOrEqual(
+      __ Uint32Constant(FIRST_JS_RECEIVER_TYPE), value_instance_type);
+  __ Goto(&done, result);
+
+  __ Bind(&if_smi);
+  __ Goto(&done, __ Int32Constant(0));
+
+  __ Bind(&done);
+  return done.PhiAt(0);
+}
+
+Node* EffectControlLinearizer::LowerObjectIsSmi(Node* node) {
+  Node* value = node->InputAt(0);
+  return ObjectIsSmi(value);
+}
+
+Node* EffectControlLinearizer::LowerObjectIsString(Node* node) {
+  Node* value = node->InputAt(0);
+
+  auto if_smi = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
+
   Node* check = ObjectIsSmi(value);
-  Node* branch = graph()->NewNode(common()->Branch(), check, control);
+  __ GotoIf(check, &if_smi);
+  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+  Node* value_instance_type =
+      __ LoadField(AccessBuilder::ForMapInstanceType(), value_map);
+  Node* vfalse = __ Uint32LessThan(value_instance_type,
+                                   __ Uint32Constant(FIRST_NONSTRING_TYPE));
+  __ Goto(&done, vfalse);
 
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  Node* vtrue = jsgraph()->Int32Constant(1);
+  __ Bind(&if_smi);
+  __ Goto(&done, __ Int32Constant(0));
 
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* efalse = effect;
-  Node* vfalse;
-  {
-    Node* value_map = efalse =
-        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                         value, efalse, if_false);
-    vfalse = graph()->NewNode(machine()->WordEqual(), value_map,
-                              jsgraph()->HeapNumberMapConstant());
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
-                           vfalse, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerObjectIsReceiver(Node* node, Node* effect,
-                                               Node* control) {
+Node* EffectControlLinearizer::LowerObjectIsUndetectable(Node* node) {
   Node* value = node->InputAt(0);
 
+  auto if_smi = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kBit);
+
   Node* check = ObjectIsSmi(value);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+  __ GotoIf(check, &if_smi);
 
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  Node* vtrue = jsgraph()->Int32Constant(0);
+  Node* value_map = __ LoadField(AccessBuilder::ForMap(), value);
+  Node* value_bit_field =
+      __ LoadField(AccessBuilder::ForMapBitField(), value_map);
+  Node* vfalse = __ Word32Equal(
+      __ Word32Equal(__ Int32Constant(0),
+                     __ Word32And(value_bit_field,
+                                  __ Int32Constant(1 << Map::kIsUndetectable))),
+      __ Int32Constant(0));
+  __ Goto(&done, vfalse);
 
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* efalse = effect;
-  Node* vfalse;
-  {
-    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
-    Node* value_map = efalse =
-        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                         value, efalse, if_false);
-    Node* value_instance_type = efalse = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
-        efalse, if_false);
-    vfalse = graph()->NewNode(machine()->Uint32LessThanOrEqual(),
-                              jsgraph()->Uint32Constant(FIRST_JS_RECEIVER_TYPE),
-                              value_instance_type);
-  }
+  __ Bind(&if_smi);
+  __ Goto(&done, __ Int32Constant(0));
 
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
-                           vfalse, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerObjectIsSmi(Node* node, Node* effect,
-                                          Node* control) {
-  Node* value = node->InputAt(0);
-  value = ObjectIsSmi(value);
-  return ValueEffectControl(value, effect, control);
+Node* EffectControlLinearizer::LowerNewRestParameterElements(Node* node) {
+  int const formal_parameter_count = ParameterCountOf(node->op());
+
+  Callable const callable = CodeFactory::NewRestParameterElements(isolate());
+  Operator::Properties const properties = node->op()->properties();
+  CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+  return __ Call(desc, __ HeapConstant(callable.code()),
+                 __ IntPtrConstant(formal_parameter_count),
+                 __ NoContextConstant());
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerObjectIsString(Node* node, Node* effect,
-                                             Node* control) {
+Node* EffectControlLinearizer::LowerNewUnmappedArgumentsElements(Node* node) {
+  int const formal_parameter_count = ParameterCountOf(node->op());
+
+  Callable const callable =
+      CodeFactory::NewUnmappedArgumentsElements(isolate());
+  Operator::Properties const properties = node->op()->properties();
+  CallDescriptor::Flags const flags = CallDescriptor::kNoFlags;
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+  return __ Call(desc, __ HeapConstant(callable.code()),
+                 __ IntPtrConstant(formal_parameter_count),
+                 __ NoContextConstant());
+}
+
+Node* EffectControlLinearizer::LowerArrayBufferWasNeutered(Node* node) {
   Node* value = node->InputAt(0);
 
-  Node* check = ObjectIsSmi(value);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
-
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  Node* vtrue = jsgraph()->Int32Constant(0);
-
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* efalse = effect;
-  Node* vfalse;
-  {
-    Node* value_map = efalse =
-        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                         value, efalse, if_false);
-    Node* value_instance_type = efalse = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForMapInstanceType()), value_map,
-        efalse, if_false);
-    vfalse = graph()->NewNode(machine()->Uint32LessThan(), value_instance_type,
-                              jsgraph()->Uint32Constant(FIRST_NONSTRING_TYPE));
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
-                           vfalse, control);
-
-  return ValueEffectControl(value, effect, control);
+  Node* value_bit_field =
+      __ LoadField(AccessBuilder::ForJSArrayBufferBitField(), value);
+  return __ Word32Equal(
+      __ Word32Equal(
+          __ Word32And(value_bit_field,
+                       __ Int32Constant(JSArrayBuffer::WasNeutered::kMask)),
+          __ Int32Constant(0)),
+      __ Int32Constant(0));
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerObjectIsUndetectable(Node* node, Node* effect,
-                                                   Node* control) {
+Node* EffectControlLinearizer::LowerStringCharAt(Node* node) {
+  Node* receiver = node->InputAt(0);
+  Node* position = node->InputAt(1);
+
+  Callable const callable = CodeFactory::StringCharAt(isolate());
+  Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
+  CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+  return __ Call(desc, __ HeapConstant(callable.code()), receiver, position,
+                 __ NoContextConstant());
+}
+
+Node* EffectControlLinearizer::LowerStringCharCodeAt(Node* node) {
+  Node* receiver = node->InputAt(0);
+  Node* position = node->InputAt(1);
+
+  Callable const callable = CodeFactory::StringCharCodeAt(isolate());
+  Operator::Properties properties = Operator::kNoThrow | Operator::kNoWrite;
+  CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties,
+      MachineType::TaggedSigned());
+  return __ Call(desc, __ HeapConstant(callable.code()), receiver, position,
+                 __ NoContextConstant());
+}
+
+Node* EffectControlLinearizer::LowerStringFromCharCode(Node* node) {
   Node* value = node->InputAt(0);
 
-  Node* check = ObjectIsSmi(value);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
-
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  Node* vtrue = jsgraph()->Int32Constant(0);
-
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* efalse = effect;
-  Node* vfalse;
-  {
-    Node* value_map = efalse =
-        graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                         value, efalse, if_false);
-    Node* value_bit_field = efalse = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForMapBitField()), value_map,
-        efalse, if_false);
-    vfalse = graph()->NewNode(
-        machine()->Word32Equal(),
-        graph()->NewNode(
-            machine()->Word32Equal(), jsgraph()->Int32Constant(0),
-            graph()->NewNode(
-                machine()->Word32And(), value_bit_field,
-                jsgraph()->Int32Constant(1 << Map::kIsUndetectable))),
-        jsgraph()->Int32Constant(0));
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kBit, 2), vtrue,
-                           vfalse, control);
-
-  return ValueEffectControl(value, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerArrayBufferWasNeutered(Node* node, Node* effect,
-                                                     Node* control) {
-  Node* value = node->InputAt(0);
-
-  Node* value_bit_field = effect = graph()->NewNode(
-      simplified()->LoadField(AccessBuilder::ForJSArrayBufferBitField()), value,
-      effect, control);
-  value = graph()->NewNode(
-      machine()->Word32Equal(),
-      graph()->NewNode(machine()->Word32Equal(),
-                       graph()->NewNode(machine()->Word32And(), value_bit_field,
-                                        jsgraph()->Int32Constant(
-                                            JSArrayBuffer::WasNeutered::kMask)),
-                       jsgraph()->Int32Constant(0)),
-      jsgraph()->Int32Constant(0));
-
-  return ValueEffectControl(value, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringCharCodeAt(Node* node, Node* effect,
-                                               Node* control) {
-  Node* subject = node->InputAt(0);
-  Node* index = node->InputAt(1);
-
-  // We may need to loop several times for ConsString/SlicedString {subject}s.
-  Node* loop =
-      graph()->NewNode(common()->Loop(4), control, control, control, control);
-  Node* lsubject =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 4),
-                       subject, subject, subject, subject, loop);
-  Node* lindex =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 4), index,
-                       index, index, index, loop);
-  Node* leffect = graph()->NewNode(common()->EffectPhi(4), effect, effect,
-                                   effect, effect, loop);
-
-  control = loop;
-  effect = leffect;
-
-  // Determine the instance type of {lsubject}.
-  Node* lsubject_map = effect =
-      graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                       lsubject, effect, control);
-  Node* lsubject_instance_type = effect = graph()->NewNode(
-      simplified()->LoadField(AccessBuilder::ForMapInstanceType()),
-      lsubject_map, effect, control);
-
-  // Check if {lsubject} is a SeqString.
-  Node* check0 = graph()->NewNode(
-      machine()->Word32Equal(),
-      graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
-                       jsgraph()->Int32Constant(kStringRepresentationMask)),
-      jsgraph()->Int32Constant(kSeqStringTag));
-  Node* branch0 = graph()->NewNode(common()->Branch(), check0, control);
-
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* etrue0 = effect;
-  Node* vtrue0;
-  {
-    // Check if the {lsubject} is a TwoByteSeqString or a OneByteSeqString.
-    Node* check1 = graph()->NewNode(
-        machine()->Word32Equal(),
-        graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
-                         jsgraph()->Int32Constant(kStringEncodingMask)),
-        jsgraph()->Int32Constant(kTwoByteStringTag));
-    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* etrue1 = etrue0;
-    Node* vtrue1 = etrue1 =
-        graph()->NewNode(simplified()->LoadElement(
-                             AccessBuilder::ForSeqTwoByteStringCharacter()),
-                         lsubject, lindex, etrue1, if_true1);
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* efalse1 = etrue0;
-    Node* vfalse1 = efalse1 =
-        graph()->NewNode(simplified()->LoadElement(
-                             AccessBuilder::ForSeqOneByteStringCharacter()),
-                         lsubject, lindex, efalse1, if_false1);
-
-    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    etrue0 =
-        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
-    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
-                              vtrue1, vfalse1, if_true0);
-  }
-
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* efalse0 = effect;
-  Node* vfalse0;
-  {
-    // Check if the {lsubject} is a ConsString.
-    Node* check1 = graph()->NewNode(
-        machine()->Word32Equal(),
-        graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
-                         jsgraph()->Int32Constant(kStringRepresentationMask)),
-        jsgraph()->Int32Constant(kConsStringTag));
-    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* etrue1 = efalse0;
-    {
-      // Load the right hand side of the {lsubject} ConsString.
-      Node* lsubject_second = etrue1 = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForConsStringSecond()),
-          lsubject, etrue1, if_true1);
-
-      // Check whether the right hand side is the empty string (i.e. if
-      // this is really a flat string in a cons string). If that is not
-      // the case we flatten the string first.
-      Node* check2 = graph()->NewNode(machine()->WordEqual(), lsubject_second,
-                                      jsgraph()->EmptyStringConstant());
-      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                       check2, if_true1);
-
-      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
-      Node* etrue2 = etrue1;
-      Node* vtrue2 = etrue2 = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForConsStringFirst()),
-          lsubject, etrue2, if_true2);
-
-      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
-      Node* efalse2 = etrue1;
-      Node* vfalse2;
-      {
-        // Flatten the {lsubject} ConsString first.
-        Operator::Properties properties =
-            Operator::kNoDeopt | Operator::kNoThrow;
-        Runtime::FunctionId id = Runtime::kFlattenString;
-        CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
-            graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
-        vfalse2 = efalse2 = graph()->NewNode(
-            common()->Call(desc), jsgraph()->CEntryStubConstant(1), lsubject,
-            jsgraph()->ExternalConstant(ExternalReference(id, isolate())),
-            jsgraph()->Int32Constant(1), jsgraph()->NoContextConstant(),
-            efalse2, if_false2);
-      }
-
-      // Retry the {loop} with the new subject.
-      loop->ReplaceInput(1, if_true2);
-      lindex->ReplaceInput(1, lindex);
-      leffect->ReplaceInput(1, etrue2);
-      lsubject->ReplaceInput(1, vtrue2);
-      loop->ReplaceInput(2, if_false2);
-      lindex->ReplaceInput(2, lindex);
-      leffect->ReplaceInput(2, efalse2);
-      lsubject->ReplaceInput(2, vfalse2);
-    }
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* efalse1 = efalse0;
-    Node* vfalse1;
-    {
-      // Check if the {lsubject} is an ExternalString.
-      Node* check2 = graph()->NewNode(
-          machine()->Word32Equal(),
-          graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
-                           jsgraph()->Int32Constant(kStringRepresentationMask)),
-          jsgraph()->Int32Constant(kExternalStringTag));
-      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                       check2, if_false1);
-
-      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
-      Node* etrue2 = efalse1;
-      Node* vtrue2;
-      {
-        // Check if the {lsubject} is a short external string.
-        Node* check3 = graph()->NewNode(
-            machine()->Word32Equal(),
-            graph()->NewNode(
-                machine()->Word32And(), lsubject_instance_type,
-                jsgraph()->Int32Constant(kShortExternalStringMask)),
-            jsgraph()->Int32Constant(0));
-        Node* branch3 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
-                                         check3, if_true2);
-
-        Node* if_true3 = graph()->NewNode(common()->IfTrue(), branch3);
-        Node* etrue3 = etrue2;
-        Node* vtrue3;
-        {
-          // Load the actual resource data from the {lsubject}.
-          Node* lsubject_resource_data = etrue3 = graph()->NewNode(
-              simplified()->LoadField(
-                  AccessBuilder::ForExternalStringResourceData()),
-              lsubject, etrue3, if_true3);
-
-          // Check if the {lsubject} is a TwoByteExternalString or a
-          // OneByteExternalString.
-          Node* check4 = graph()->NewNode(
-              machine()->Word32Equal(),
-              graph()->NewNode(machine()->Word32And(), lsubject_instance_type,
-                               jsgraph()->Int32Constant(kStringEncodingMask)),
-              jsgraph()->Int32Constant(kTwoByteStringTag));
-          Node* branch4 =
-              graph()->NewNode(common()->Branch(), check4, if_true3);
-
-          Node* if_true4 = graph()->NewNode(common()->IfTrue(), branch4);
-          Node* etrue4 = etrue3;
-          Node* vtrue4 = etrue4 = graph()->NewNode(
-              simplified()->LoadElement(
-                  AccessBuilder::ForExternalTwoByteStringCharacter()),
-              lsubject_resource_data, lindex, etrue4, if_true4);
-
-          Node* if_false4 = graph()->NewNode(common()->IfFalse(), branch4);
-          Node* efalse4 = etrue3;
-          Node* vfalse4 = efalse4 = graph()->NewNode(
-              simplified()->LoadElement(
-                  AccessBuilder::ForExternalOneByteStringCharacter()),
-              lsubject_resource_data, lindex, efalse4, if_false4);
-
-          if_true3 = graph()->NewNode(common()->Merge(2), if_true4, if_false4);
-          etrue3 = graph()->NewNode(common()->EffectPhi(2), etrue4, efalse4,
-                                    if_true3);
-          vtrue3 =
-              graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
-                               vtrue4, vfalse4, if_true3);
-        }
-
-        Node* if_false3 = graph()->NewNode(common()->IfFalse(), branch3);
-        Node* efalse3 = etrue2;
-        Node* vfalse3;
-        {
-          // The {lsubject} might be compressed, call the runtime.
-          Operator::Properties properties =
-              Operator::kNoDeopt | Operator::kNoThrow;
-          Runtime::FunctionId id = Runtime::kExternalStringGetChar;
-          CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
-              graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
-          vfalse3 = efalse3 = graph()->NewNode(
-              common()->Call(desc), jsgraph()->CEntryStubConstant(1), lsubject,
-              ChangeInt32ToSmi(lindex),
-              jsgraph()->ExternalConstant(ExternalReference(id, isolate())),
-              jsgraph()->Int32Constant(2), jsgraph()->NoContextConstant(),
-              efalse3, if_false3);
-          vfalse3 = ChangeSmiToInt32(vfalse3);
-        }
-
-        if_true2 = graph()->NewNode(common()->Merge(2), if_true3, if_false3);
-        etrue2 =
-            graph()->NewNode(common()->EffectPhi(2), etrue3, efalse3, if_true2);
-        vtrue2 =
-            graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
-                             vtrue3, vfalse3, if_true2);
-      }
-
-      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
-      Node* efalse2 = efalse1;
-      {
-        // The {lsubject} is a SlicedString, continue with its parent.
-        Node* lsubject_parent = efalse2 = graph()->NewNode(
-            simplified()->LoadField(AccessBuilder::ForSlicedStringParent()),
-            lsubject, efalse2, if_false2);
-        Node* lsubject_offset = efalse2 = graph()->NewNode(
-            simplified()->LoadField(AccessBuilder::ForSlicedStringOffset()),
-            lsubject, efalse2, if_false2);
-        Node* lsubject_index = graph()->NewNode(
-            machine()->Int32Add(), lindex, ChangeSmiToInt32(lsubject_offset));
-
-        // Retry the {loop} with the parent subject.
-        loop->ReplaceInput(3, if_false2);
-        leffect->ReplaceInput(3, efalse2);
-        lindex->ReplaceInput(3, lsubject_index);
-        lsubject->ReplaceInput(3, lsubject_parent);
-      }
-
-      if_false1 = if_true2;
-      efalse1 = etrue2;
-      vfalse1 = vtrue2;
-    }
-
-    if_false0 = if_false1;
-    efalse0 = efalse1;
-    vfalse0 = vfalse1;
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
-  Node* value =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2), vtrue0,
-                       vfalse0, control);
-
-  return ValueEffectControl(value, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringFromCharCode(Node* node, Node* effect,
-                                                 Node* control) {
-  Node* value = node->InputAt(0);
+  auto runtime_call = __ MakeDeferredLabel<2>();
+  auto if_undefined = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
 
   // Compute the character code.
-  Node* code =
-      graph()->NewNode(machine()->Word32And(), value,
-                       jsgraph()->Int32Constant(String::kMaxUtf16CodeUnit));
+  Node* code = __ Word32And(value, __ Int32Constant(String::kMaxUtf16CodeUnit));
 
   // Check if the {code} is a one-byte char code.
-  Node* check0 =
-      graph()->NewNode(machine()->Int32LessThanOrEqual(), code,
-                       jsgraph()->Int32Constant(String::kMaxOneByteCharCode));
-  Node* branch0 =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
-
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* efalse0 = effect;
-
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* etrue0 = effect;
+  Node* check0 = __ Int32LessThanOrEqual(
+      code, __ Int32Constant(String::kMaxOneByteCharCode));
+  __ GotoUnless(check0, &runtime_call);
 
   // Load the isolate wide single character string cache.
-  Node* cache =
-      jsgraph()->HeapConstant(factory()->single_character_string_cache());
+  Node* cache = __ HeapConstant(factory()->single_character_string_cache());
 
   // Compute the {cache} index for {code}.
-  Node* index = machine()->Is32()
-                    ? code
-                    : graph()->NewNode(machine()->ChangeUint32ToUint64(), code);
+  Node* index = machine()->Is32() ? code : __ ChangeUint32ToUint64(code);
 
   // Check if we have an entry for the {code} in the single character string
   // cache already.
-  Node* entry = etrue0 = graph()->NewNode(
-      simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()), cache,
-      index, etrue0, if_true0);
+  Node* entry =
+      __ LoadElement(AccessBuilder::ForFixedArrayElement(), cache, index);
 
-  Node* check1 = graph()->NewNode(machine()->WordEqual(), entry,
-                                  jsgraph()->UndefinedConstant());
-  Node* branch1 =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), check1, if_true0);
-
-  // Use the {entry} from the {cache}.
-  Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-  Node* efalse1 = etrue0;
-  Node* vfalse1 = entry;
+  Node* check1 = __ WordEqual(entry, __ UndefinedConstant());
+  __ GotoIf(check1, &runtime_call);
+  __ Goto(&done, entry);
 
   // Let %StringFromCharCode handle this case.
   // TODO(turbofan): At some point we may consider adding a stub for this
   // deferred case, so that we don't need to call to C++ here.
-  Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-  Node* etrue1 = etrue0;
-  Node* vtrue1;
+  __ Bind(&runtime_call);
   {
-    if_true1 = graph()->NewNode(common()->Merge(2), if_true1, if_false0);
-    etrue1 =
-        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse0, if_true1);
     Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
     Runtime::FunctionId id = Runtime::kStringCharFromCode;
     CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
         graph()->zone(), id, 1, properties, CallDescriptor::kNoFlags);
-    vtrue1 = etrue1 = graph()->NewNode(
-        common()->Call(desc), jsgraph()->CEntryStubConstant(1),
-        ChangeInt32ToSmi(code),
-        jsgraph()->ExternalConstant(ExternalReference(id, isolate())),
-        jsgraph()->Int32Constant(1), jsgraph()->NoContextConstant(), etrue1,
-        if_true1);
+    Node* vtrue1 =
+        __ Call(desc, __ CEntryStubConstant(1), ChangeInt32ToSmi(code),
+                __ ExternalConstant(ExternalReference(id, isolate())),
+                __ Int32Constant(1), __ NoContextConstant());
+    __ Goto(&done, vtrue1);
   }
-
-  control = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                           vtrue1, vfalse1, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringFromCodePoint(Node* node, Node* effect,
-                                                  Node* control) {
+Node* EffectControlLinearizer::LowerStringFromCodePoint(Node* node) {
   Node* value = node->InputAt(0);
   Node* code = value;
 
-  Node* etrue0 = effect;
-  Node* vtrue0;
+  auto if_not_single_code = __ MakeDeferredLabel<1>();
+  auto if_not_one_byte = __ MakeDeferredLabel<1>();
+  auto cache_miss = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<4>(MachineRepresentation::kTagged);
 
   // Check if the {code} is a single code unit
-  Node* check0 = graph()->NewNode(machine()->Uint32LessThanOrEqual(), code,
-                                  jsgraph()->Uint32Constant(0xFFFF));
-  Node* branch0 =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+  Node* check0 = __ Uint32LessThanOrEqual(code, __ Uint32Constant(0xFFFF));
+  __ GotoUnless(check0, &if_not_single_code);
 
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
   {
     // Check if the {code} is a one byte character
-    Node* check1 = graph()->NewNode(
-        machine()->Uint32LessThanOrEqual(), code,
-        jsgraph()->Uint32Constant(String::kMaxOneByteCharCode));
-    Node* branch1 =
-        graph()->NewNode(common()->Branch(BranchHint::kTrue), check1, if_true0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* etrue1 = etrue0;
-    Node* vtrue1;
+    Node* check1 = __ Uint32LessThanOrEqual(
+        code, __ Uint32Constant(String::kMaxOneByteCharCode));
+    __ GotoUnless(check1, &if_not_one_byte);
     {
       // Load the isolate wide single character string cache.
-      Node* cache =
-          jsgraph()->HeapConstant(factory()->single_character_string_cache());
+      Node* cache = __ HeapConstant(factory()->single_character_string_cache());
 
       // Compute the {cache} index for {code}.
-      Node* index =
-          machine()->Is32()
-              ? code
-              : graph()->NewNode(machine()->ChangeUint32ToUint64(), code);
+      Node* index = machine()->Is32() ? code : __ ChangeUint32ToUint64(code);
 
       // Check if we have an entry for the {code} in the single character string
       // cache already.
-      Node* entry = etrue1 = graph()->NewNode(
-          simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()),
-          cache, index, etrue1, if_true1);
+      Node* entry =
+          __ LoadElement(AccessBuilder::ForFixedArrayElement(), cache, index);
 
-      Node* check2 = graph()->NewNode(machine()->WordEqual(), entry,
-                                      jsgraph()->UndefinedConstant());
-      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                       check2, if_true1);
-
-      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
-      Node* etrue2 = etrue1;
-      Node* vtrue2;
-      {
-        // Allocate a new SeqOneByteString for {code}.
-        vtrue2 = etrue2 = graph()->NewNode(
-            simplified()->Allocate(NOT_TENURED),
-            jsgraph()->Int32Constant(SeqOneByteString::SizeFor(1)), etrue2,
-            if_true2);
-        etrue2 = graph()->NewNode(
-            simplified()->StoreField(AccessBuilder::ForMap()), vtrue2,
-            jsgraph()->HeapConstant(factory()->one_byte_string_map()), etrue2,
-            if_true2);
-        etrue2 = graph()->NewNode(
-            simplified()->StoreField(AccessBuilder::ForNameHashField()), vtrue2,
-            jsgraph()->IntPtrConstant(Name::kEmptyHashField), etrue2, if_true2);
-        etrue2 = graph()->NewNode(
-            simplified()->StoreField(AccessBuilder::ForStringLength()), vtrue2,
-            jsgraph()->SmiConstant(1), etrue2, if_true2);
-        etrue2 = graph()->NewNode(
-            machine()->Store(StoreRepresentation(MachineRepresentation::kWord8,
-                                                 kNoWriteBarrier)),
-            vtrue2, jsgraph()->IntPtrConstant(SeqOneByteString::kHeaderSize -
-                                              kHeapObjectTag),
-            code, etrue2, if_true2);
-
-        // Remember it in the {cache}.
-        etrue2 = graph()->NewNode(
-            simplified()->StoreElement(AccessBuilder::ForFixedArrayElement()),
-            cache, index, vtrue2, etrue2, if_true2);
-      }
+      Node* check2 = __ WordEqual(entry, __ UndefinedConstant());
+      __ GotoIf(check2, &cache_miss);
 
       // Use the {entry} from the {cache}.
-      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
-      Node* efalse2 = etrue0;
-      Node* vfalse2 = entry;
+      __ Goto(&done, entry);
 
-      if_true1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
-      etrue1 =
-          graph()->NewNode(common()->EffectPhi(2), etrue2, efalse2, if_true1);
-      vtrue1 =
-          graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                           vtrue2, vfalse2, if_true1);
+      __ Bind(&cache_miss);
+      {
+        // Allocate a new SeqOneByteString for {code}.
+        Node* vtrue2 = __ Allocate(
+            NOT_TENURED, __ Int32Constant(SeqOneByteString::SizeFor(1)));
+        __ StoreField(AccessBuilder::ForMap(), vtrue2,
+                      __ HeapConstant(factory()->one_byte_string_map()));
+        __ StoreField(AccessBuilder::ForNameHashField(), vtrue2,
+                      __ IntPtrConstant(Name::kEmptyHashField));
+        __ StoreField(AccessBuilder::ForStringLength(), vtrue2,
+                      __ SmiConstant(1));
+        __ Store(
+            StoreRepresentation(MachineRepresentation::kWord8, kNoWriteBarrier),
+            vtrue2,
+            __ IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag),
+            code);
+
+        // Remember it in the {cache}.
+        __ StoreElement(AccessBuilder::ForFixedArrayElement(), cache, index,
+                        vtrue2);
+        __ Goto(&done, vtrue2);
+      }
     }
 
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* efalse1 = effect;
-    Node* vfalse1;
+    __ Bind(&if_not_one_byte);
     {
       // Allocate a new SeqTwoByteString for {code}.
-      vfalse1 = efalse1 = graph()->NewNode(
-          simplified()->Allocate(NOT_TENURED),
-          jsgraph()->Int32Constant(SeqTwoByteString::SizeFor(1)), efalse1,
-          if_false1);
-      efalse1 = graph()->NewNode(
-          simplified()->StoreField(AccessBuilder::ForMap()), vfalse1,
-          jsgraph()->HeapConstant(factory()->string_map()), efalse1, if_false1);
-      efalse1 = graph()->NewNode(
-          simplified()->StoreField(AccessBuilder::ForNameHashField()), vfalse1,
-          jsgraph()->IntPtrConstant(Name::kEmptyHashField), efalse1, if_false1);
-      efalse1 = graph()->NewNode(
-          simplified()->StoreField(AccessBuilder::ForStringLength()), vfalse1,
-          jsgraph()->SmiConstant(1), efalse1, if_false1);
-      efalse1 = graph()->NewNode(
-          machine()->Store(StoreRepresentation(MachineRepresentation::kWord16,
-                                               kNoWriteBarrier)),
-          vfalse1, jsgraph()->IntPtrConstant(SeqTwoByteString::kHeaderSize -
-                                             kHeapObjectTag),
-          code, efalse1, if_false1);
+      Node* vfalse1 = __ Allocate(
+          NOT_TENURED, __ Int32Constant(SeqTwoByteString::SizeFor(1)));
+      __ StoreField(AccessBuilder::ForMap(), vfalse1,
+                    __ HeapConstant(factory()->string_map()));
+      __ StoreField(AccessBuilder::ForNameHashField(), vfalse1,
+                    __ IntPtrConstant(Name::kEmptyHashField));
+      __ StoreField(AccessBuilder::ForStringLength(), vfalse1,
+                    __ SmiConstant(1));
+      __ Store(
+          StoreRepresentation(MachineRepresentation::kWord16, kNoWriteBarrier),
+          vfalse1,
+          __ IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag),
+          code);
+      __ Goto(&done, vfalse1);
     }
-
-    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    etrue0 =
-        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
-    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                              vtrue1, vfalse1, if_true0);
   }
 
+  __ Bind(&if_not_single_code);
   // Generate surrogate pair string
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* efalse0 = effect;
-  Node* vfalse0;
   {
     switch (UnicodeEncodingOf(node->op())) {
       case UnicodeEncoding::UTF16:
@@ -2666,553 +2044,373 @@
 
       case UnicodeEncoding::UTF32: {
         // Convert UTF32 to UTF16 code units, and store as a 32 bit word.
-        Node* lead_offset = jsgraph()->Int32Constant(0xD800 - (0x10000 >> 10));
+        Node* lead_offset = __ Int32Constant(0xD800 - (0x10000 >> 10));
 
         // lead = (codepoint >> 10) + LEAD_OFFSET
         Node* lead =
-            graph()->NewNode(machine()->Int32Add(),
-                             graph()->NewNode(machine()->Word32Shr(), code,
-                                              jsgraph()->Int32Constant(10)),
-                             lead_offset);
+            __ Int32Add(__ Word32Shr(code, __ Int32Constant(10)), lead_offset);
 
         // trail = (codepoint & 0x3FF) + 0xDC00;
-        Node* trail =
-            graph()->NewNode(machine()->Int32Add(),
-                             graph()->NewNode(machine()->Word32And(), code,
-                                              jsgraph()->Int32Constant(0x3FF)),
-                             jsgraph()->Int32Constant(0xDC00));
+        Node* trail = __ Int32Add(__ Word32And(code, __ Int32Constant(0x3FF)),
+                                  __ Int32Constant(0xDC00));
 
         // codpoint = (trail << 16) | lead;
-        code = graph()->NewNode(machine()->Word32Or(),
-                                graph()->NewNode(machine()->Word32Shl(), trail,
-                                                 jsgraph()->Int32Constant(16)),
-                                lead);
+        code = __ Word32Or(__ Word32Shl(trail, __ Int32Constant(16)), lead);
         break;
       }
     }
 
     // Allocate a new SeqTwoByteString for {code}.
-    vfalse0 = efalse0 =
-        graph()->NewNode(simplified()->Allocate(NOT_TENURED),
-                         jsgraph()->Int32Constant(SeqTwoByteString::SizeFor(2)),
-                         efalse0, if_false0);
-    efalse0 = graph()->NewNode(
-        simplified()->StoreField(AccessBuilder::ForMap()), vfalse0,
-        jsgraph()->HeapConstant(factory()->string_map()), efalse0, if_false0);
-    efalse0 = graph()->NewNode(
-        simplified()->StoreField(AccessBuilder::ForNameHashField()), vfalse0,
-        jsgraph()->IntPtrConstant(Name::kEmptyHashField), efalse0, if_false0);
-    efalse0 = graph()->NewNode(
-        simplified()->StoreField(AccessBuilder::ForStringLength()), vfalse0,
-        jsgraph()->SmiConstant(2), efalse0, if_false0);
-    efalse0 = graph()->NewNode(
-        machine()->Store(StoreRepresentation(MachineRepresentation::kWord32,
-                                             kNoWriteBarrier)),
-        vfalse0, jsgraph()->IntPtrConstant(SeqTwoByteString::kHeaderSize -
-                                           kHeapObjectTag),
-        code, efalse0, if_false0);
+    Node* vfalse0 = __ Allocate(NOT_TENURED,
+                                __ Int32Constant(SeqTwoByteString::SizeFor(2)));
+    __ StoreField(AccessBuilder::ForMap(), vfalse0,
+                  __ HeapConstant(factory()->string_map()));
+    __ StoreField(AccessBuilder::ForNameHashField(), vfalse0,
+                  __ IntPtrConstant(Name::kEmptyHashField));
+    __ StoreField(AccessBuilder::ForStringLength(), vfalse0, __ SmiConstant(2));
+    __ Store(
+        StoreRepresentation(MachineRepresentation::kWord32, kNoWriteBarrier),
+        vfalse0,
+        __ IntPtrConstant(SeqTwoByteString::kHeaderSize - kHeapObjectTag),
+        code);
+    __ Goto(&done, vfalse0);
   }
 
-  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                           vtrue0, vfalse0, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringComparison(Callable const& callable,
-                                               Node* node, Node* effect,
-                                               Node* control) {
+Node* EffectControlLinearizer::LowerStringIndexOf(Node* node) {
+  Node* subject = node->InputAt(0);
+  Node* search_string = node->InputAt(1);
+  Node* position = node->InputAt(2);
+
+  Callable callable = CodeFactory::StringIndexOf(isolate());
   Operator::Properties properties = Operator::kEliminatable;
   CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
   CallDescriptor* desc = Linkage::GetStubCallDescriptor(
       isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
-  node->InsertInput(graph()->zone(), 0,
-                    jsgraph()->HeapConstant(callable.code()));
-  node->AppendInput(graph()->zone(), jsgraph()->NoContextConstant());
-  node->AppendInput(graph()->zone(), effect);
-  NodeProperties::ChangeOp(node, common()->Call(desc));
-  return ValueEffectControl(node, node, control);
+  return __ Call(desc, __ HeapConstant(callable.code()), subject, search_string,
+                 position, __ NoContextConstant());
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringEqual(Node* node, Node* effect,
-                                          Node* control) {
-  return LowerStringComparison(CodeFactory::StringEqual(isolate()), node,
-                               effect, control);
+Node* EffectControlLinearizer::LowerStringComparison(Callable const& callable,
+                                                     Node* node) {
+  Node* lhs = node->InputAt(0);
+  Node* rhs = node->InputAt(1);
+
+  Operator::Properties properties = Operator::kEliminatable;
+  CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+  return __ Call(desc, __ HeapConstant(callable.code()), lhs, rhs,
+                 __ NoContextConstant());
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringLessThan(Node* node, Node* effect,
-                                             Node* control) {
-  return LowerStringComparison(CodeFactory::StringLessThan(isolate()), node,
-                               effect, control);
+Node* EffectControlLinearizer::LowerStringEqual(Node* node) {
+  return LowerStringComparison(CodeFactory::StringEqual(isolate()), node);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStringLessThanOrEqual(Node* node, Node* effect,
-                                                    Node* control) {
+Node* EffectControlLinearizer::LowerStringLessThan(Node* node) {
+  return LowerStringComparison(CodeFactory::StringLessThan(isolate()), node);
+}
+
+Node* EffectControlLinearizer::LowerStringLessThanOrEqual(Node* node) {
   return LowerStringComparison(CodeFactory::StringLessThanOrEqual(isolate()),
-                               node, effect, control);
+                               node);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckFloat64Hole(Node* node, Node* frame_state,
-                                               Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckFloat64Hole(Node* node,
+                                                     Node* frame_state) {
   // If we reach this point w/o eliminating the {node} that's marked
   // with allow-return-hole, we cannot do anything, so just deoptimize
   // in case of the hole NaN (similar to Crankshaft).
   Node* value = node->InputAt(0);
-  Node* check = graph()->NewNode(
-      machine()->Word32Equal(),
-      graph()->NewNode(machine()->Float64ExtractHighWord32(), value),
-      jsgraph()->Int32Constant(kHoleNanUpper32));
-  control = effect =
-      graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kHole), check,
-                       frame_state, effect, control);
-
-  return ValueEffectControl(value, effect, control);
+  Node* check = __ Word32Equal(__ Float64ExtractHighWord32(value),
+                               __ Int32Constant(kHoleNanUpper32));
+  __ DeoptimizeIf(DeoptimizeReason::kHole, check, frame_state);
+  return value;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerCheckTaggedHole(Node* node, Node* frame_state,
-                                              Node* effect, Node* control) {
+Node* EffectControlLinearizer::LowerCheckTaggedHole(Node* node,
+                                                    Node* frame_state) {
   Node* value = node->InputAt(0);
-  Node* check = graph()->NewNode(machine()->WordEqual(), value,
-                                 jsgraph()->TheHoleConstant());
-  control = effect =
-      graph()->NewNode(common()->DeoptimizeIf(DeoptimizeReason::kHole), check,
-                       frame_state, effect, control);
-
-  return ValueEffectControl(value, effect, control);
+  Node* check = __ WordEqual(value, __ TheHoleConstant());
+  __ DeoptimizeIf(DeoptimizeReason::kHole, check, frame_state);
+  return value;
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerConvertTaggedHoleToUndefined(Node* node,
-                                                           Node* effect,
-                                                           Node* control) {
+Node* EffectControlLinearizer::LowerConvertTaggedHoleToUndefined(Node* node) {
   Node* value = node->InputAt(0);
-  Node* check = graph()->NewNode(machine()->WordEqual(), value,
-                                 jsgraph()->TheHoleConstant());
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
 
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* vtrue = jsgraph()->UndefinedConstant();
+  auto if_is_hole = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
 
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* vfalse = value;
+  Node* check = __ WordEqual(value, __ TheHoleConstant());
+  __ GotoIf(check, &if_is_hole);
+  __ Goto(&done, value);
 
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                           vtrue, vfalse, control);
+  __ Bind(&if_is_hole);
+  __ Goto(&done, __ UndefinedConstant());
 
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value, Node* effect,
-                                                     Node* control) {
-  Node* result = effect = graph()->NewNode(
-      simplified()->Allocate(NOT_TENURED),
-      jsgraph()->Int32Constant(HeapNumber::kSize), effect, control);
-  effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
-                            result, jsgraph()->HeapNumberMapConstant(), effect,
-                            control);
-  effect = graph()->NewNode(
-      simplified()->StoreField(AccessBuilder::ForHeapNumberValue()), result,
-      value, effect, control);
-  return ValueEffectControl(result, effect, control);
+Node* EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value) {
+  Node* result = __ Allocate(NOT_TENURED, __ Int32Constant(HeapNumber::kSize));
+  __ StoreField(AccessBuilder::ForMap(), result, __ HeapNumberMapConstant());
+  __ StoreField(AccessBuilder::ForHeapNumberValue(), result, value);
+  return result;
 }
 
 Node* EffectControlLinearizer::ChangeInt32ToSmi(Node* value) {
   if (machine()->Is64()) {
-    value = graph()->NewNode(machine()->ChangeInt32ToInt64(), value);
+    value = __ ChangeInt32ToInt64(value);
   }
-  return graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant());
+  return __ WordShl(value, SmiShiftBitsConstant());
 }
 
 Node* EffectControlLinearizer::ChangeUint32ToSmi(Node* value) {
   if (machine()->Is64()) {
-    value = graph()->NewNode(machine()->ChangeUint32ToUint64(), value);
+    value = __ ChangeUint32ToUint64(value);
   }
-  return graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant());
-}
-
-Node* EffectControlLinearizer::ChangeInt32ToFloat64(Node* value) {
-  return graph()->NewNode(machine()->ChangeInt32ToFloat64(), value);
-}
-
-Node* EffectControlLinearizer::ChangeUint32ToFloat64(Node* value) {
-  return graph()->NewNode(machine()->ChangeUint32ToFloat64(), value);
+  return __ WordShl(value, SmiShiftBitsConstant());
 }
 
 Node* EffectControlLinearizer::ChangeSmiToInt32(Node* value) {
-  value = graph()->NewNode(machine()->WordSar(), value, SmiShiftBitsConstant());
+  value = __ WordSar(value, SmiShiftBitsConstant());
   if (machine()->Is64()) {
-    value = graph()->NewNode(machine()->TruncateInt64ToInt32(), value);
+    value = __ TruncateInt64ToInt32(value);
   }
   return value;
 }
+
 Node* EffectControlLinearizer::ObjectIsSmi(Node* value) {
-  return graph()->NewNode(
-      machine()->WordEqual(),
-      graph()->NewNode(machine()->WordAnd(), value,
-                       jsgraph()->IntPtrConstant(kSmiTagMask)),
-      jsgraph()->IntPtrConstant(kSmiTag));
+  return __ WordEqual(__ WordAnd(value, __ IntPtrConstant(kSmiTagMask)),
+                      __ IntPtrConstant(kSmiTag));
 }
 
 Node* EffectControlLinearizer::SmiMaxValueConstant() {
-  return jsgraph()->Int32Constant(Smi::kMaxValue);
+  return __ Int32Constant(Smi::kMaxValue);
 }
 
 Node* EffectControlLinearizer::SmiShiftBitsConstant() {
-  return jsgraph()->IntPtrConstant(kSmiShiftSize + kSmiTagSize);
+  return __ IntPtrConstant(kSmiShiftSize + kSmiTagSize);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerPlainPrimitiveToNumber(Node* node, Node* effect,
-                                                     Node* control) {
+Node* EffectControlLinearizer::LowerPlainPrimitiveToNumber(Node* node) {
   Node* value = node->InputAt(0);
-  Node* result = effect =
-      graph()->NewNode(ToNumberOperator(), jsgraph()->ToNumberBuiltinConstant(),
-                       value, jsgraph()->NoContextConstant(), effect);
-  return ValueEffectControl(result, effect, control);
+  return __ ToNumber(value);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerPlainPrimitiveToWord32(Node* node, Node* effect,
-                                                     Node* control) {
+Node* EffectControlLinearizer::LowerPlainPrimitiveToWord32(Node* node) {
   Node* value = node->InputAt(0);
 
+  auto if_not_smi = __ MakeDeferredLabel<1>();
+  auto if_to_number_smi = __ MakeLabel<1>();
+  auto done = __ MakeLabel<3>(MachineRepresentation::kWord32);
+
   Node* check0 = ObjectIsSmi(value);
-  Node* branch0 =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+  __ GotoUnless(check0, &if_not_smi);
+  __ Goto(&done, ChangeSmiToInt32(value));
 
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* etrue0 = effect;
-  Node* vtrue0 = ChangeSmiToInt32(value);
+  __ Bind(&if_not_smi);
+  Node* to_number = __ ToNumber(value);
 
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* efalse0 = effect;
-  Node* vfalse0;
-  {
-    vfalse0 = efalse0 = graph()->NewNode(
-        ToNumberOperator(), jsgraph()->ToNumberBuiltinConstant(), value,
-        jsgraph()->NoContextConstant(), efalse0);
+  Node* check1 = ObjectIsSmi(to_number);
+  __ GotoIf(check1, &if_to_number_smi);
+  Node* number = __ LoadField(AccessBuilder::ForHeapNumberValue(), to_number);
+  __ Goto(&done, __ TruncateFloat64ToWord32(number));
 
-    Node* check1 = ObjectIsSmi(vfalse0);
-    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+  __ Bind(&if_to_number_smi);
+  __ Goto(&done, ChangeSmiToInt32(to_number));
 
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* etrue1 = efalse0;
-    Node* vtrue1 = ChangeSmiToInt32(vfalse0);
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* efalse1 = efalse0;
-    Node* vfalse1;
-    {
-      vfalse1 = efalse1 = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), efalse0,
-          efalse1, if_false1);
-      vfalse1 = graph()->NewNode(machine()->TruncateFloat64ToWord32(), vfalse1);
-    }
-
-    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    efalse0 =
-        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
-    vfalse0 = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
-                               vtrue1, vfalse1, if_false0);
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kWord32, 2),
-                           vtrue0, vfalse0, control);
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerPlainPrimitiveToFloat64(Node* node, Node* effect,
-                                                      Node* control) {
+Node* EffectControlLinearizer::LowerPlainPrimitiveToFloat64(Node* node) {
   Node* value = node->InputAt(0);
 
+  auto if_not_smi = __ MakeDeferredLabel<1>();
+  auto if_to_number_smi = __ MakeLabel<1>();
+  auto done = __ MakeLabel<3>(MachineRepresentation::kFloat64);
+
   Node* check0 = ObjectIsSmi(value);
-  Node* branch0 =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+  __ GotoUnless(check0, &if_not_smi);
+  Node* from_smi = ChangeSmiToInt32(value);
+  __ Goto(&done, __ ChangeInt32ToFloat64(from_smi));
 
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* etrue0 = effect;
-  Node* vtrue0;
-  {
-    vtrue0 = ChangeSmiToInt32(value);
-    vtrue0 = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue0);
-  }
+  __ Bind(&if_not_smi);
+  Node* to_number = __ ToNumber(value);
+  Node* check1 = ObjectIsSmi(to_number);
+  __ GotoIf(check1, &if_to_number_smi);
 
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* efalse0 = effect;
-  Node* vfalse0;
-  {
-    vfalse0 = efalse0 = graph()->NewNode(
-        ToNumberOperator(), jsgraph()->ToNumberBuiltinConstant(), value,
-        jsgraph()->NoContextConstant(), efalse0);
+  Node* number = __ LoadField(AccessBuilder::ForHeapNumberValue(), to_number);
+  __ Goto(&done, number);
 
-    Node* check1 = ObjectIsSmi(vfalse0);
-    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+  __ Bind(&if_to_number_smi);
+  Node* number_from_smi = ChangeSmiToInt32(to_number);
+  number_from_smi = __ ChangeInt32ToFloat64(number_from_smi);
+  __ Goto(&done, number_from_smi);
 
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* etrue1 = efalse0;
-    Node* vtrue1;
-    {
-      vtrue1 = ChangeSmiToInt32(vfalse0);
-      vtrue1 = graph()->NewNode(machine()->ChangeInt32ToFloat64(), vtrue1);
-    }
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* efalse1 = efalse0;
-    Node* vfalse1;
-    {
-      vfalse1 = efalse1 = graph()->NewNode(
-          simplified()->LoadField(AccessBuilder::ForHeapNumberValue()), efalse0,
-          efalse1, if_false1);
-    }
-
-    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    efalse0 =
-        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_false0);
-    vfalse0 =
-        graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                         vtrue1, vfalse1, if_false0);
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                           vtrue0, vfalse0, control);
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerEnsureWritableFastElements(Node* node,
-                                                         Node* effect,
-                                                         Node* control) {
+Node* EffectControlLinearizer::LowerEnsureWritableFastElements(Node* node) {
   Node* object = node->InputAt(0);
   Node* elements = node->InputAt(1);
 
+  auto if_not_fixed_array = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
+
   // Load the current map of {elements}.
-  Node* elements_map = effect =
-      graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
-                       elements, effect, control);
+  Node* elements_map = __ LoadField(AccessBuilder::ForMap(), elements);
 
   // Check if {elements} is not a copy-on-write FixedArray.
-  Node* check = graph()->NewNode(machine()->WordEqual(), elements_map,
-                                 jsgraph()->FixedArrayMapConstant());
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
-
+  Node* check = __ WordEqual(elements_map, __ FixedArrayMapConstant());
+  __ GotoUnless(check, &if_not_fixed_array);
   // Nothing to do if the {elements} are not copy-on-write.
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  Node* vtrue = elements;
+  __ Goto(&done, elements);
 
+  __ Bind(&if_not_fixed_array);
   // We need to take a copy of the {elements} and set them up for {object}.
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* efalse = effect;
-  Node* vfalse;
-  {
-    // We need to create a copy of the {elements} for {object}.
-    Operator::Properties properties = Operator::kEliminatable;
-    Callable callable = CodeFactory::CopyFastSmiOrObjectElements(isolate());
-    CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
-    CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
-        isolate(), graph()->zone(), callable.descriptor(), 0, flags,
-        properties);
-    vfalse = efalse = graph()->NewNode(
-        common()->Call(desc), jsgraph()->HeapConstant(callable.code()), object,
-        jsgraph()->NoContextConstant(), efalse);
-  }
+  Operator::Properties properties = Operator::kEliminatable;
+  Callable callable = CodeFactory::CopyFastSmiOrObjectElements(isolate());
+  CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+  CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+      isolate(), graph()->zone(), callable.descriptor(), 0, flags, properties);
+  Node* result = __ Call(desc, __ HeapConstant(callable.code()), object,
+                         __ NoContextConstant());
+  __ Goto(&done, result);
 
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-  Node* value = graph()->NewNode(
-      common()->Phi(MachineRepresentation::kTagged, 2), vtrue, vfalse, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
-                                                    Node* frame_state,
-                                                    Node* effect,
-                                                    Node* control) {
+Node* EffectControlLinearizer::LowerMaybeGrowFastElements(Node* node,
+                                                          Node* frame_state) {
   GrowFastElementsFlags flags = GrowFastElementsFlagsOf(node->op());
   Node* object = node->InputAt(0);
   Node* elements = node->InputAt(1);
   Node* index = node->InputAt(2);
   Node* length = node->InputAt(3);
 
-  Node* check0 = graph()->NewNode((flags & GrowFastElementsFlag::kHoleyElements)
-                                      ? machine()->Uint32LessThanOrEqual()
-                                      : machine()->Word32Equal(),
-                                  length, index);
-  Node* branch0 = graph()->NewNode(common()->Branch(), check0, control);
+  auto done = __ MakeLabel<2>(MachineRepresentation::kTagged);
+  auto done_grow = __ MakeLabel<2>(MachineRepresentation::kTagged);
+  auto if_grow = __ MakeDeferredLabel<1>();
+  auto if_not_grow = __ MakeLabel<1>();
 
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* etrue0 = effect;
-  Node* vtrue0 = elements;
+  Node* check0 = (flags & GrowFastElementsFlag::kHoleyElements)
+                     ? __ Uint32LessThanOrEqual(length, index)
+                     : __ Word32Equal(length, index);
+  __ GotoUnless(check0, &if_not_grow);
   {
     // Load the length of the {elements} backing store.
-    Node* elements_length = etrue0 = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForFixedArrayLength()), elements,
-        etrue0, if_true0);
+    Node* elements_length =
+        __ LoadField(AccessBuilder::ForFixedArrayLength(), elements);
     elements_length = ChangeSmiToInt32(elements_length);
 
     // Check if we need to grow the {elements} backing store.
-    Node* check1 =
-        graph()->NewNode(machine()->Uint32LessThan(), index, elements_length);
-    Node* branch1 =
-        graph()->NewNode(common()->Branch(BranchHint::kTrue), check1, if_true0);
+    Node* check1 = __ Uint32LessThan(index, elements_length);
+    __ GotoUnless(check1, &if_grow);
+    __ Goto(&done_grow, elements);
 
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* etrue1 = etrue0;
-    Node* vtrue1 = vtrue0;
+    __ Bind(&if_grow);
+    // We need to grow the {elements} for {object}.
+    Operator::Properties properties = Operator::kEliminatable;
+    Callable callable =
+        (flags & GrowFastElementsFlag::kDoubleElements)
+            ? CodeFactory::GrowFastDoubleElements(isolate())
+            : CodeFactory::GrowFastSmiOrObjectElements(isolate());
+    CallDescriptor::Flags call_flags = CallDescriptor::kNoFlags;
+    CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+        isolate(), graph()->zone(), callable.descriptor(), 0, call_flags,
+        properties);
+    Node* new_object = __ Call(desc, __ HeapConstant(callable.code()), object,
+                               ChangeInt32ToSmi(index), __ NoContextConstant());
 
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* efalse1 = etrue0;
-    Node* vfalse1 = vtrue0;
-    {
-      // We need to grow the {elements} for {object}.
-      Operator::Properties properties = Operator::kEliminatable;
-      Callable callable =
-          (flags & GrowFastElementsFlag::kDoubleElements)
-              ? CodeFactory::GrowFastDoubleElements(isolate())
-              : CodeFactory::GrowFastSmiOrObjectElements(isolate());
-      CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
-      CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
-          isolate(), graph()->zone(), callable.descriptor(), 0, flags,
-          properties);
-      vfalse1 = efalse1 = graph()->NewNode(
-          common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
-          object, ChangeInt32ToSmi(index), jsgraph()->NoContextConstant(),
-          efalse1);
+    // Ensure that we were able to grow the {elements}.
+    // TODO(turbofan): We use kSmi as reason here similar to Crankshaft,
+    // but maybe we should just introduce a reason that makes sense.
+    __ DeoptimizeIf(DeoptimizeReason::kSmi, ObjectIsSmi(new_object),
+                    frame_state);
+    __ Goto(&done_grow, new_object);
 
-      // Ensure that we were able to grow the {elements}.
-      // TODO(turbofan): We use kSmi as reason here similar to Crankshaft,
-      // but maybe we should just introduce a reason that makes sense.
-      efalse1 = if_false1 = graph()->NewNode(
-          common()->DeoptimizeIf(DeoptimizeReason::kSmi), ObjectIsSmi(vfalse1),
-          frame_state, efalse1, if_false1);
-    }
-
-    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    etrue0 =
-        graph()->NewNode(common()->EffectPhi(2), etrue1, efalse1, if_true0);
-    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2),
-                              vtrue1, vfalse1, if_true0);
+    __ Bind(&done_grow);
 
     // For JSArray {object}s we also need to update the "length".
     if (flags & GrowFastElementsFlag::kArrayObject) {
       // Compute the new {length}.
-      Node* object_length = ChangeInt32ToSmi(graph()->NewNode(
-          machine()->Int32Add(), index, jsgraph()->Int32Constant(1)));
+      Node* object_length =
+          ChangeInt32ToSmi(__ Int32Add(index, __ Int32Constant(1)));
 
       // Update the "length" property of the {object}.
-      etrue0 =
-          graph()->NewNode(simplified()->StoreField(
-                               AccessBuilder::ForJSArrayLength(FAST_ELEMENTS)),
-                           object, object_length, etrue0, if_true0);
+      __ StoreField(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS), object,
+                    object_length);
     }
+    __ Goto(&done, done_grow.PhiAt(0));
   }
 
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* efalse0 = effect;
-  Node* vfalse0 = elements;
+  __ Bind(&if_not_grow);
   {
     // In case of non-holey {elements}, we need to verify that the {index} is
     // in-bounds, otherwise for holey {elements}, the check above already
     // guards the index (and the operator forces {index} to be unsigned).
     if (!(flags & GrowFastElementsFlag::kHoleyElements)) {
-      Node* check1 =
-          graph()->NewNode(machine()->Uint32LessThan(), index, length);
-      efalse0 = if_false0 = graph()->NewNode(
-          common()->DeoptimizeUnless(DeoptimizeReason::kOutOfBounds), check1,
-          frame_state, efalse0, if_false0);
+      Node* check1 = __ Uint32LessThan(index, length);
+      __ DeoptimizeUnless(DeoptimizeReason::kOutOfBounds, check1, frame_state);
     }
+    __ Goto(&done, elements);
   }
-
-  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue0, efalse0, control);
-  Node* value =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kTagged, 2), vtrue0,
-                       vfalse0, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerTransitionElementsKind(Node* node, Node* effect,
-                                                     Node* control) {
+void EffectControlLinearizer::LowerTransitionElementsKind(Node* node) {
   ElementsTransition const transition = ElementsTransitionOf(node->op());
   Node* object = node->InputAt(0);
-  Node* source_map = node->InputAt(1);
-  Node* target_map = node->InputAt(2);
+
+  auto if_map_same = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<2>();
+
+  Node* source_map = __ HeapConstant(transition.source());
+  Node* target_map = __ HeapConstant(transition.target());
 
   // Load the current map of {object}.
-  Node* object_map = effect =
-      graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()), object,
-                       effect, control);
+  Node* object_map = __ LoadField(AccessBuilder::ForMap(), object);
 
   // Check if {object_map} is the same as {source_map}.
-  Node* check =
-      graph()->NewNode(machine()->WordEqual(), object_map, source_map);
-  Node* branch =
-      graph()->NewNode(common()->Branch(BranchHint::kFalse), check, control);
+  Node* check = __ WordEqual(object_map, source_map);
+  __ GotoIf(check, &if_map_same);
+  __ Goto(&done);
 
-  // Migrate the {object} from {source_map} to {target_map}.
-  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-  Node* etrue = effect;
-  {
-    switch (transition) {
-      case ElementsTransition::kFastTransition: {
-        // In-place migration of {object}, just store the {target_map}.
-        etrue =
-            graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
-                             object, target_map, etrue, if_true);
-        break;
-      }
-      case ElementsTransition::kSlowTransition: {
-        // Instance migration, call out to the runtime for {object}.
-        Operator::Properties properties =
-            Operator::kNoDeopt | Operator::kNoThrow;
-        Runtime::FunctionId id = Runtime::kTransitionElementsKind;
-        CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
-            graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
-        etrue = graph()->NewNode(
-            common()->Call(desc), jsgraph()->CEntryStubConstant(1), object,
-            target_map,
-            jsgraph()->ExternalConstant(ExternalReference(id, isolate())),
-            jsgraph()->Int32Constant(2), jsgraph()->NoContextConstant(), etrue,
-            if_true);
-        break;
-      }
+  __ Bind(&if_map_same);
+  switch (transition.mode()) {
+    case ElementsTransition::kFastTransition:
+      // In-place migration of {object}, just store the {target_map}.
+      __ StoreField(AccessBuilder::ForMap(), object, target_map);
+      break;
+    case ElementsTransition::kSlowTransition: {
+      // Instance migration, call out to the runtime for {object}.
+      Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
+      Runtime::FunctionId id = Runtime::kTransitionElementsKind;
+      CallDescriptor const* desc = Linkage::GetRuntimeCallDescriptor(
+          graph()->zone(), id, 2, properties, CallDescriptor::kNoFlags);
+      __ Call(desc, __ CEntryStubConstant(1), object, target_map,
+              __ ExternalConstant(ExternalReference(id, isolate())),
+              __ Int32Constant(2), __ NoContextConstant());
+      break;
     }
   }
+  __ Goto(&done);
 
-  // Nothing to do if the {object} doesn't have the {source_map}.
-  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-  Node* efalse = effect;
-
-  control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-  effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-
-  return ValueEffectControl(nullptr, effect, control);
+  __ Bind(&done);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerLoadTypedElement(Node* node, Node* effect,
-                                               Node* control) {
+Node* EffectControlLinearizer::LowerLoadTypedElement(Node* node) {
   ExternalArrayType array_type = ExternalArrayTypeOf(node->op());
   Node* buffer = node->InputAt(0);
   Node* base = node->InputAt(1);
@@ -3221,24 +2419,20 @@
 
   // We need to keep the {buffer} alive so that the GC will not release the
   // ArrayBuffer (if there's any) as long as we are still operating on it.
-  effect = graph()->NewNode(common()->Retain(), buffer, effect);
+  __ Retain(buffer);
 
-  // Compute the effective storage pointer.
-  Node* storage = effect = graph()->NewNode(machine()->UnsafePointerAdd(), base,
-                                            external, effect, control);
+  // Compute the effective storage pointer, handling the case where the
+  // {external} pointer is the effective storage pointer (i.e. the {base}
+  // is Smi zero).
+  Node* storage = NumberMatcher(base).Is(0) ? external : __ UnsafePointerAdd(
+                                                             base, external);
 
   // Perform the actual typed element access.
-  Node* value = effect = graph()->NewNode(
-      simplified()->LoadElement(
-          AccessBuilder::ForTypedArrayElement(array_type, true)),
-      storage, index, effect, control);
-
-  return ValueEffectControl(value, effect, control);
+  return __ LoadElement(AccessBuilder::ForTypedArrayElement(array_type, true),
+                        storage, index);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerStoreTypedElement(Node* node, Node* effect,
-                                                Node* control) {
+void EffectControlLinearizer::LowerStoreTypedElement(Node* node) {
   ExternalArrayType array_type = ExternalArrayTypeOf(node->op());
   Node* buffer = node->InputAt(0);
   Node* base = node->InputAt(1);
@@ -3248,34 +2442,25 @@
 
   // We need to keep the {buffer} alive so that the GC will not release the
   // ArrayBuffer (if there's any) as long as we are still operating on it.
-  effect = graph()->NewNode(common()->Retain(), buffer, effect);
+  __ Retain(buffer);
 
-  // Compute the effective storage pointer.
-  Node* storage = effect = graph()->NewNode(machine()->UnsafePointerAdd(), base,
-                                            external, effect, control);
+  // Compute the effective storage pointer, handling the case where the
+  // {external} pointer is the effective storage pointer (i.e. the {base}
+  // is Smi zero).
+  Node* storage = NumberMatcher(base).Is(0) ? external : __ UnsafePointerAdd(
+                                                             base, external);
 
   // Perform the actual typed element access.
-  effect = graph()->NewNode(
-      simplified()->StoreElement(
-          AccessBuilder::ForTypedArrayElement(array_type, true)),
-      storage, index, value, effect, control);
-
-  return ValueEffectControl(nullptr, effect, control);
+  __ StoreElement(AccessBuilder::ForTypedArrayElement(array_type, true),
+                  storage, index, value);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerFloat64RoundUp(Node* node, Node* effect,
-                                             Node* control) {
+Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundUp(Node* node) {
   // Nothing to be done if a fast hardware instruction is available.
   if (machine()->Float64RoundUp().IsSupported()) {
-    return ValueEffectControl(node, effect, control);
+    return Nothing<Node*>();
   }
 
-  Node* const one = jsgraph()->Float64Constant(1.0);
-  Node* const zero = jsgraph()->Float64Constant(0.0);
-  Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
-  Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
-  Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
   Node* const input = node->InputAt(0);
 
   // General case for ceil.
@@ -3300,251 +2485,169 @@
   //         let temp2 = (2^52 + temp1) - 2^52 in
   //         let temp3 = (if temp1 < temp2 then temp2 - 1 else temp2) in
   //         -0 - temp3
+
+  auto if_not_positive = __ MakeDeferredLabel<1>();
+  auto if_greater_than_two_52 = __ MakeDeferredLabel<1>();
+  auto if_less_than_minus_two_52 = __ MakeDeferredLabel<1>();
+  auto if_zero = __ MakeDeferredLabel<1>();
+  auto done_temp3 = __ MakeLabel<2>(MachineRepresentation::kFloat64);
+  auto done = __ MakeLabel<6>(MachineRepresentation::kFloat64);
+
+  Node* const zero = __ Float64Constant(0.0);
+  Node* const two_52 = __ Float64Constant(4503599627370496.0E0);
+  Node* const one = __ Float64Constant(1.0);
+
+  Node* check0 = __ Float64LessThan(zero, input);
+  __ GotoUnless(check0, &if_not_positive);
+  {
+    Node* check1 = __ Float64LessThanOrEqual(two_52, input);
+    __ GotoIf(check1, &if_greater_than_two_52);
+    {
+      Node* temp1 = __ Float64Sub(__ Float64Add(two_52, input), two_52);
+      __ GotoUnless(__ Float64LessThan(temp1, input), &done, temp1);
+      __ Goto(&done, __ Float64Add(temp1, one));
+    }
+
+    __ Bind(&if_greater_than_two_52);
+    __ Goto(&done, input);
+  }
+
+  __ Bind(&if_not_positive);
+  {
+    Node* check1 = __ Float64Equal(input, zero);
+    __ GotoIf(check1, &if_zero);
+
+    Node* const minus_two_52 = __ Float64Constant(-4503599627370496.0E0);
+    Node* check2 = __ Float64LessThanOrEqual(input, minus_two_52);
+    __ GotoIf(check2, &if_less_than_minus_two_52);
+
+    {
+      Node* const minus_zero = __ Float64Constant(-0.0);
+      Node* temp1 = __ Float64Sub(minus_zero, input);
+      Node* temp2 = __ Float64Sub(__ Float64Add(two_52, temp1), two_52);
+      Node* check3 = __ Float64LessThan(temp1, temp2);
+      __ GotoUnless(check3, &done_temp3, temp2);
+      __ Goto(&done_temp3, __ Float64Sub(temp2, one));
+
+      __ Bind(&done_temp3);
+      Node* temp3 = done_temp3.PhiAt(0);
+      __ Goto(&done, __ Float64Sub(minus_zero, temp3));
+    }
+    __ Bind(&if_less_than_minus_two_52);
+    __ Goto(&done, input);
+
+    __ Bind(&if_zero);
+    __ Goto(&done, input);
+  }
+  __ Bind(&done);
+  return Just(done.PhiAt(0));
+}
+
+Node* EffectControlLinearizer::BuildFloat64RoundDown(Node* value) {
+  Node* round_down = __ Float64RoundDown(value);
+  if (round_down != nullptr) {
+    return round_down;
+  }
+
+  Node* const input = value;
+
+  // General case for floor.
   //
-  // Note: We do not use the Diamond helper class here, because it really hurts
-  // readability with nested diamonds.
+  //   if 0.0 < input then
+  //     if 2^52 <= input then
+  //       input
+  //     else
+  //       let temp1 = (2^52 + input) - 2^52 in
+  //       if input < temp1 then
+  //         temp1 - 1
+  //       else
+  //         temp1
+  //   else
+  //     if input == 0 then
+  //       input
+  //     else
+  //       if input <= -2^52 then
+  //         input
+  //       else
+  //         let temp1 = -0 - input in
+  //         let temp2 = (2^52 + temp1) - 2^52 in
+  //         if temp2 < temp1 then
+  //           -1 - temp2
+  //         else
+  //           -0 - temp2
 
-  Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
-  Node* branch0 =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+  auto if_not_positive = __ MakeDeferredLabel<1>();
+  auto if_greater_than_two_52 = __ MakeDeferredLabel<1>();
+  auto if_less_than_minus_two_52 = __ MakeDeferredLabel<1>();
+  auto if_temp2_lt_temp1 = __ MakeLabel<1>();
+  auto if_zero = __ MakeDeferredLabel<1>();
+  auto done = __ MakeLabel<7>(MachineRepresentation::kFloat64);
 
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* vtrue0;
+  Node* const zero = __ Float64Constant(0.0);
+  Node* const two_52 = __ Float64Constant(4503599627370496.0E0);
+
+  Node* check0 = __ Float64LessThan(zero, input);
+  __ GotoUnless(check0, &if_not_positive);
   {
-    Node* check1 =
-        graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
-    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* vtrue1 = input;
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* vfalse1;
+    Node* check1 = __ Float64LessThanOrEqual(two_52, input);
+    __ GotoIf(check1, &if_greater_than_two_52);
     {
-      Node* temp1 = graph()->NewNode(
-          machine()->Float64Sub(),
-          graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
-      vfalse1 = graph()->NewNode(
-          common()->Select(MachineRepresentation::kFloat64),
-          graph()->NewNode(machine()->Float64LessThan(), temp1, input),
-          graph()->NewNode(machine()->Float64Add(), temp1, one), temp1);
+      Node* const one = __ Float64Constant(1.0);
+      Node* temp1 = __ Float64Sub(__ Float64Add(two_52, input), two_52);
+      __ GotoUnless(__ Float64LessThan(input, temp1), &done, temp1);
+      __ Goto(&done, __ Float64Sub(temp1, one));
     }
 
-    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                              vtrue1, vfalse1, if_true0);
+    __ Bind(&if_greater_than_two_52);
+    __ Goto(&done, input);
   }
 
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* vfalse0;
+  __ Bind(&if_not_positive);
   {
-    Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
-    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                     check1, if_false0);
+    Node* check1 = __ Float64Equal(input, zero);
+    __ GotoIf(check1, &if_zero);
 
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* vtrue1 = input;
+    Node* const minus_two_52 = __ Float64Constant(-4503599627370496.0E0);
+    Node* check2 = __ Float64LessThanOrEqual(input, minus_two_52);
+    __ GotoIf(check2, &if_less_than_minus_two_52);
 
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* vfalse1;
     {
-      Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
-                                      input, minus_two_52);
-      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                       check2, if_false1);
+      Node* const minus_zero = __ Float64Constant(-0.0);
+      Node* temp1 = __ Float64Sub(minus_zero, input);
+      Node* temp2 = __ Float64Sub(__ Float64Add(two_52, temp1), two_52);
+      Node* check3 = __ Float64LessThan(temp2, temp1);
+      __ GotoIf(check3, &if_temp2_lt_temp1);
+      __ Goto(&done, __ Float64Sub(minus_zero, temp2));
 
-      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
-      Node* vtrue2 = input;
-
-      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
-      Node* vfalse2;
-      {
-        Node* temp1 =
-            graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
-        Node* temp2 = graph()->NewNode(
-            machine()->Float64Sub(),
-            graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
-        Node* temp3 = graph()->NewNode(
-            common()->Select(MachineRepresentation::kFloat64),
-            graph()->NewNode(machine()->Float64LessThan(), temp1, temp2),
-            graph()->NewNode(machine()->Float64Sub(), temp2, one), temp2);
-        vfalse2 = graph()->NewNode(machine()->Float64Sub(), minus_zero, temp3);
-      }
-
-      if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
-      vfalse1 =
-          graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                           vtrue2, vfalse2, if_false1);
+      __ Bind(&if_temp2_lt_temp1);
+      __ Goto(&done, __ Float64Sub(__ Float64Constant(-1.0), temp2));
     }
+    __ Bind(&if_less_than_minus_two_52);
+    __ Goto(&done, input);
 
-    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    vfalse0 =
-        graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                         vtrue1, vfalse1, if_false0);
+    __ Bind(&if_zero);
+    __ Goto(&done, input);
   }
-
-  Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  Node* value =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                       vtrue0, vfalse0, merge0);
-  return ValueEffectControl(value, effect, merge0);
+  __ Bind(&done);
+  return done.PhiAt(0);
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::BuildFloat64RoundDown(Node* value, Node* effect,
-                                               Node* control) {
-  if (machine()->Float64RoundDown().IsSupported()) {
-    value = graph()->NewNode(machine()->Float64RoundDown().op(), value);
-  } else {
-    Node* const one = jsgraph()->Float64Constant(1.0);
-    Node* const zero = jsgraph()->Float64Constant(0.0);
-    Node* const minus_one = jsgraph()->Float64Constant(-1.0);
-    Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
-    Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
-    Node* const minus_two_52 =
-        jsgraph()->Float64Constant(-4503599627370496.0E0);
-    Node* const input = value;
-
-    // General case for floor.
-    //
-    //   if 0.0 < input then
-    //     if 2^52 <= input then
-    //       input
-    //     else
-    //       let temp1 = (2^52 + input) - 2^52 in
-    //       if input < temp1 then
-    //         temp1 - 1
-    //       else
-    //         temp1
-    //   else
-    //     if input == 0 then
-    //       input
-    //     else
-    //       if input <= -2^52 then
-    //         input
-    //       else
-    //         let temp1 = -0 - input in
-    //         let temp2 = (2^52 + temp1) - 2^52 in
-    //         if temp2 < temp1 then
-    //           -1 - temp2
-    //         else
-    //           -0 - temp2
-    //
-    // Note: We do not use the Diamond helper class here, because it really
-    // hurts
-    // readability with nested diamonds.
-
-    Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
-    Node* branch0 =
-        graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
-
-    Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-    Node* vtrue0;
-    {
-      Node* check1 =
-          graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
-      Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
-
-      Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-      Node* vtrue1 = input;
-
-      Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-      Node* vfalse1;
-      {
-        Node* temp1 = graph()->NewNode(
-            machine()->Float64Sub(),
-            graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
-        vfalse1 = graph()->NewNode(
-            common()->Select(MachineRepresentation::kFloat64),
-            graph()->NewNode(machine()->Float64LessThan(), input, temp1),
-            graph()->NewNode(machine()->Float64Sub(), temp1, one), temp1);
-      }
-
-      if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-      vtrue0 =
-          graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                           vtrue1, vfalse1, if_true0);
-    }
-
-    Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-    Node* vfalse0;
-    {
-      Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
-      Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                       check1, if_false0);
-
-      Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-      Node* vtrue1 = input;
-
-      Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-      Node* vfalse1;
-      {
-        Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
-                                        input, minus_two_52);
-        Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                         check2, if_false1);
-
-        Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
-        Node* vtrue2 = input;
-
-        Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
-        Node* vfalse2;
-        {
-          Node* temp1 =
-              graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
-          Node* temp2 = graph()->NewNode(
-              machine()->Float64Sub(),
-              graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
-          vfalse2 = graph()->NewNode(
-              common()->Select(MachineRepresentation::kFloat64),
-              graph()->NewNode(machine()->Float64LessThan(), temp2, temp1),
-              graph()->NewNode(machine()->Float64Sub(), minus_one, temp2),
-              graph()->NewNode(machine()->Float64Sub(), minus_zero, temp2));
-        }
-
-        if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
-        vfalse1 =
-            graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                             vtrue2, vfalse2, if_false1);
-      }
-
-      if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-      vfalse0 =
-          graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                           vtrue1, vfalse1, if_false0);
-    }
-
-    control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-    value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                             vtrue0, vfalse0, control);
-  }
-  return ValueEffectControl(value, effect, control);
-}
-
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerFloat64RoundDown(Node* node, Node* effect,
-                                               Node* control) {
+Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundDown(Node* node) {
   // Nothing to be done if a fast hardware instruction is available.
   if (machine()->Float64RoundDown().IsSupported()) {
-    return ValueEffectControl(node, effect, control);
+    return Nothing<Node*>();
   }
 
   Node* const input = node->InputAt(0);
-  return BuildFloat64RoundDown(input, effect, control);
+  return Just(BuildFloat64RoundDown(input));
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerFloat64RoundTiesEven(Node* node, Node* effect,
-                                                   Node* control) {
+Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundTiesEven(Node* node) {
   // Nothing to be done if a fast hardware instruction is available.
   if (machine()->Float64RoundTiesEven().IsSupported()) {
-    return ValueEffectControl(node, effect, control);
+    return Nothing<Node*>();
   }
 
-  Node* const one = jsgraph()->Float64Constant(1.0);
-  Node* const two = jsgraph()->Float64Constant(2.0);
-  Node* const half = jsgraph()->Float64Constant(0.5);
-  Node* const zero = jsgraph()->Float64Constant(0.0);
   Node* const input = node->InputAt(0);
 
   // Generate case for round ties to even:
@@ -3561,79 +2664,38 @@
   //       value
   //     else
   //       value + 1.0
-  //
-  // Note: We do not use the Diamond helper class here, because it really hurts
-  // readability with nested diamonds.
 
-  ValueEffectControl continuation =
-      BuildFloat64RoundDown(input, effect, control);
-  Node* value = continuation.value;
-  effect = continuation.effect;
-  control = continuation.control;
+  auto if_is_half = __ MakeLabel<1>();
+  auto done = __ MakeLabel<4>(MachineRepresentation::kFloat64);
 
-  Node* temp1 = graph()->NewNode(machine()->Float64Sub(), input, value);
+  Node* value = BuildFloat64RoundDown(input);
+  Node* temp1 = __ Float64Sub(input, value);
 
-  Node* check0 = graph()->NewNode(machine()->Float64LessThan(), temp1, half);
-  Node* branch0 = graph()->NewNode(common()->Branch(), check0, control);
+  Node* const half = __ Float64Constant(0.5);
+  Node* check0 = __ Float64LessThan(temp1, half);
+  __ GotoIf(check0, &done, value);
 
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* vtrue0 = value;
+  Node* const one = __ Float64Constant(1.0);
+  Node* check1 = __ Float64LessThan(half, temp1);
+  __ GotoUnless(check1, &if_is_half);
+  __ Goto(&done, __ Float64Add(value, one));
 
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* vfalse0;
-  {
-    Node* check1 = graph()->NewNode(machine()->Float64LessThan(), half, temp1);
-    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_false0);
+  __ Bind(&if_is_half);
+  Node* temp2 = __ Float64Mod(value, __ Float64Constant(2.0));
+  Node* check2 = __ Float64Equal(temp2, __ Float64Constant(0.0));
+  __ GotoIf(check2, &done, value);
+  __ Goto(&done, __ Float64Add(value, one));
 
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* vtrue1 = graph()->NewNode(machine()->Float64Add(), value, one);
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* vfalse1;
-    {
-      Node* temp2 = graph()->NewNode(machine()->Float64Mod(), value, two);
-
-      Node* check2 = graph()->NewNode(machine()->Float64Equal(), temp2, zero);
-      Node* branch2 = graph()->NewNode(common()->Branch(), check2, if_false1);
-
-      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
-      Node* vtrue2 = value;
-
-      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
-      Node* vfalse2 = graph()->NewNode(machine()->Float64Add(), value, one);
-
-      if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
-      vfalse1 =
-          graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                           vtrue2, vfalse2, if_false1);
-    }
-
-    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    vfalse0 =
-        graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                         vtrue1, vfalse1, if_false0);
-  }
-
-  control = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  value = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                           vtrue0, vfalse0, control);
-
-  return ValueEffectControl(value, effect, control);
+  __ Bind(&done);
+  return Just(done.PhiAt(0));
 }
 
-EffectControlLinearizer::ValueEffectControl
-EffectControlLinearizer::LowerFloat64RoundTruncate(Node* node, Node* effect,
-                                                   Node* control) {
+Maybe<Node*> EffectControlLinearizer::LowerFloat64RoundTruncate(Node* node) {
   // Nothing to be done if a fast hardware instruction is available.
   if (machine()->Float64RoundTruncate().IsSupported()) {
-    return ValueEffectControl(node, effect, control);
+    return Nothing<Node*>();
   }
 
-  Node* const one = jsgraph()->Float64Constant(1.0);
-  Node* const zero = jsgraph()->Float64Constant(0.0);
-  Node* const minus_zero = jsgraph()->Float64Constant(-0.0);
-  Node* const two_52 = jsgraph()->Float64Constant(4503599627370496.0E0);
-  Node* const minus_two_52 = jsgraph()->Float64Constant(-4503599627370496.0E0);
   Node* const input = node->InputAt(0);
 
   // General case for trunc.
@@ -3662,92 +2724,65 @@
   // Note: We do not use the Diamond helper class here, because it really hurts
   // readability with nested diamonds.
 
-  Node* check0 = graph()->NewNode(machine()->Float64LessThan(), zero, input);
-  Node* branch0 =
-      graph()->NewNode(common()->Branch(BranchHint::kTrue), check0, control);
+  auto if_not_positive = __ MakeDeferredLabel<1>();
+  auto if_greater_than_two_52 = __ MakeDeferredLabel<1>();
+  auto if_less_than_minus_two_52 = __ MakeDeferredLabel<1>();
+  auto if_zero = __ MakeDeferredLabel<1>();
+  auto done_temp3 = __ MakeLabel<2>(MachineRepresentation::kFloat64);
+  auto done = __ MakeLabel<6>(MachineRepresentation::kFloat64);
 
-  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
-  Node* vtrue0;
+  Node* const zero = __ Float64Constant(0.0);
+  Node* const two_52 = __ Float64Constant(4503599627370496.0E0);
+  Node* const one = __ Float64Constant(1.0);
+
+  Node* check0 = __ Float64LessThan(zero, input);
+  __ GotoUnless(check0, &if_not_positive);
   {
-    Node* check1 =
-        graph()->NewNode(machine()->Float64LessThanOrEqual(), two_52, input);
-    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
-
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* vtrue1 = input;
-
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* vfalse1;
+    Node* check1 = __ Float64LessThanOrEqual(two_52, input);
+    __ GotoIf(check1, &if_greater_than_two_52);
     {
-      Node* temp1 = graph()->NewNode(
-          machine()->Float64Sub(),
-          graph()->NewNode(machine()->Float64Add(), two_52, input), two_52);
-      vfalse1 = graph()->NewNode(
-          common()->Select(MachineRepresentation::kFloat64),
-          graph()->NewNode(machine()->Float64LessThan(), input, temp1),
-          graph()->NewNode(machine()->Float64Sub(), temp1, one), temp1);
+      Node* temp1 = __ Float64Sub(__ Float64Add(two_52, input), two_52);
+      __ GotoUnless(__ Float64LessThan(input, temp1), &done, temp1);
+      __ Goto(&done, __ Float64Sub(temp1, one));
     }
 
-    if_true0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    vtrue0 = graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                              vtrue1, vfalse1, if_true0);
+    __ Bind(&if_greater_than_two_52);
+    __ Goto(&done, input);
   }
 
-  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
-  Node* vfalse0;
+  __ Bind(&if_not_positive);
   {
-    Node* check1 = graph()->NewNode(machine()->Float64Equal(), input, zero);
-    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                     check1, if_false0);
+    Node* check1 = __ Float64Equal(input, zero);
+    __ GotoIf(check1, &if_zero);
 
-    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
-    Node* vtrue1 = input;
+    Node* const minus_two_52 = __ Float64Constant(-4503599627370496.0E0);
+    Node* check2 = __ Float64LessThanOrEqual(input, minus_two_52);
+    __ GotoIf(check2, &if_less_than_minus_two_52);
 
-    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
-    Node* vfalse1;
     {
-      Node* check2 = graph()->NewNode(machine()->Float64LessThanOrEqual(),
-                                      input, minus_two_52);
-      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
-                                       check2, if_false1);
+      Node* const minus_zero = __ Float64Constant(-0.0);
+      Node* temp1 = __ Float64Sub(minus_zero, input);
+      Node* temp2 = __ Float64Sub(__ Float64Add(two_52, temp1), two_52);
+      Node* check3 = __ Float64LessThan(temp1, temp2);
+      __ GotoUnless(check3, &done_temp3, temp2);
+      __ Goto(&done_temp3, __ Float64Sub(temp2, one));
 
-      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
-      Node* vtrue2 = input;
-
-      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
-      Node* vfalse2;
-      {
-        Node* temp1 =
-            graph()->NewNode(machine()->Float64Sub(), minus_zero, input);
-        Node* temp2 = graph()->NewNode(
-            machine()->Float64Sub(),
-            graph()->NewNode(machine()->Float64Add(), two_52, temp1), two_52);
-        Node* temp3 = graph()->NewNode(
-            common()->Select(MachineRepresentation::kFloat64),
-            graph()->NewNode(machine()->Float64LessThan(), temp1, temp2),
-            graph()->NewNode(machine()->Float64Sub(), temp2, one), temp2);
-        vfalse2 = graph()->NewNode(machine()->Float64Sub(), minus_zero, temp3);
-      }
-
-      if_false1 = graph()->NewNode(common()->Merge(2), if_true2, if_false2);
-      vfalse1 =
-          graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                           vtrue2, vfalse2, if_false1);
+      __ Bind(&done_temp3);
+      Node* temp3 = done_temp3.PhiAt(0);
+      __ Goto(&done, __ Float64Sub(minus_zero, temp3));
     }
+    __ Bind(&if_less_than_minus_two_52);
+    __ Goto(&done, input);
 
-    if_false0 = graph()->NewNode(common()->Merge(2), if_true1, if_false1);
-    vfalse0 =
-        graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                         vtrue1, vfalse1, if_false0);
+    __ Bind(&if_zero);
+    __ Goto(&done, input);
   }
-
-  Node* merge0 = graph()->NewNode(common()->Merge(2), if_true0, if_false0);
-  Node* value =
-      graph()->NewNode(common()->Phi(MachineRepresentation::kFloat64, 2),
-                       vtrue0, vfalse0, merge0);
-  return ValueEffectControl(value, effect, merge0);
+  __ Bind(&done);
+  return Just(done.PhiAt(0));
 }
 
+#undef __
+
 Factory* EffectControlLinearizer::factory() const {
   return isolate()->factory();
 }
@@ -3756,18 +2791,6 @@
   return jsgraph()->isolate();
 }
 
-Operator const* EffectControlLinearizer::ToNumberOperator() {
-  if (!to_number_operator_.is_set()) {
-    Callable callable = CodeFactory::ToNumber(isolate());
-    CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
-    CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-        isolate(), graph()->zone(), callable.descriptor(), 0, flags,
-        Operator::kEliminatable);
-    to_number_operator_.set(common()->Call(desc));
-  }
-  return to_number_operator_.get();
-}
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/effect-control-linearizer.h b/src/compiler/effect-control-linearizer.h
index 4ed03c6..016d602 100644
--- a/src/compiler/effect-control-linearizer.h
+++ b/src/compiler/effect-control-linearizer.h
@@ -6,6 +6,7 @@
 #define V8_COMPILER_EFFECT_CONTROL_LINEARIZER_H_
 
 #include "src/compiler/common-operator.h"
+#include "src/compiler/graph-assembler.h"
 #include "src/compiler/node.h"
 #include "src/compiler/simplified-operator.h"
 #include "src/globals.h"
@@ -38,174 +39,94 @@
   void ProcessNode(Node* node, Node** frame_state, Node** effect,
                    Node** control);
 
-  struct ValueEffectControl {
-    Node* value;
-    Node* effect;
-    Node* control;
-    ValueEffectControl(Node* value, Node* effect, Node* control)
-        : value(value), effect(effect), control(control) {}
-  };
-
   bool TryWireInStateEffect(Node* node, Node* frame_state, Node** effect,
                             Node** control);
-  ValueEffectControl LowerChangeBitToTagged(Node* node, Node* effect,
-                                            Node* control);
-  ValueEffectControl LowerChangeInt31ToTaggedSigned(Node* node, Node* effect,
-                                                    Node* control);
-  ValueEffectControl LowerChangeInt32ToTagged(Node* node, Node* effect,
-                                              Node* control);
-  ValueEffectControl LowerChangeUint32ToTagged(Node* node, Node* effect,
-                                               Node* control);
-  ValueEffectControl LowerChangeFloat64ToTagged(Node* node, Node* effect,
-                                                Node* control);
-  ValueEffectControl LowerChangeFloat64ToTaggedPointer(Node* node, Node* effect,
-                                                       Node* control);
-  ValueEffectControl LowerChangeTaggedSignedToInt32(Node* node, Node* effect,
-                                                    Node* control);
-  ValueEffectControl LowerChangeTaggedToBit(Node* node, Node* effect,
-                                            Node* control);
-  ValueEffectControl LowerChangeTaggedToInt32(Node* node, Node* effect,
-                                              Node* control);
-  ValueEffectControl LowerChangeTaggedToUint32(Node* node, Node* effect,
-                                               Node* control);
-  ValueEffectControl LowerCheckBounds(Node* node, Node* frame_state,
-                                      Node* effect, Node* control);
-  ValueEffectControl LowerCheckMaps(Node* node, Node* frame_state, Node* effect,
-                                    Node* control);
-  ValueEffectControl LowerCheckNumber(Node* node, Node* frame_state,
-                                      Node* effect, Node* control);
-  ValueEffectControl LowerCheckString(Node* node, Node* frame_state,
-                                      Node* effect, Node* control);
-  ValueEffectControl LowerCheckIf(Node* node, Node* frame_state, Node* effect,
-                                  Node* control);
-  ValueEffectControl LowerCheckedInt32Add(Node* node, Node* frame_state,
-                                          Node* effect, Node* control);
-  ValueEffectControl LowerCheckedInt32Sub(Node* node, Node* frame_state,
-                                          Node* effect, Node* control);
-  ValueEffectControl LowerCheckedInt32Div(Node* node, Node* frame_state,
-                                          Node* effect, Node* control);
-  ValueEffectControl LowerCheckedInt32Mod(Node* node, Node* frame_state,
-                                          Node* effect, Node* control);
-  ValueEffectControl LowerCheckedUint32Div(Node* node, Node* frame_state,
-                                           Node* effect, Node* control);
-  ValueEffectControl LowerCheckedUint32Mod(Node* node, Node* frame_state,
-                                           Node* effect, Node* control);
-  ValueEffectControl LowerCheckedInt32Mul(Node* node, Node* frame_state,
-                                          Node* effect, Node* control);
-  ValueEffectControl LowerCheckedInt32ToTaggedSigned(Node* node,
-                                                     Node* frame_state,
-                                                     Node* effect,
-                                                     Node* control);
-  ValueEffectControl LowerCheckedUint32ToInt32(Node* node, Node* frame_state,
-                                               Node* effect, Node* control);
-  ValueEffectControl LowerCheckedUint32ToTaggedSigned(Node* node,
-                                                      Node* frame_state,
-                                                      Node* effect,
-                                                      Node* control);
-  ValueEffectControl LowerCheckedFloat64ToInt32(Node* node, Node* frame_state,
-                                                Node* effect, Node* control);
-  ValueEffectControl LowerCheckedTaggedSignedToInt32(Node* node,
-                                                     Node* frame_state,
-                                                     Node* effect,
-                                                     Node* control);
-  ValueEffectControl LowerCheckedTaggedToInt32(Node* node, Node* frame_state,
-                                               Node* effect, Node* control);
-  ValueEffectControl LowerCheckedTaggedToFloat64(Node* node, Node* frame_state,
-                                                 Node* effect, Node* control);
-  ValueEffectControl LowerCheckedTaggedToTaggedSigned(Node* node,
-                                                      Node* frame_state,
-                                                      Node* effect,
-                                                      Node* control);
-  ValueEffectControl LowerCheckedTaggedToTaggedPointer(Node* node,
-                                                       Node* frame_state,
-                                                       Node* effect,
-                                                       Node* control);
-  ValueEffectControl LowerChangeTaggedToFloat64(Node* node, Node* effect,
-                                                Node* control);
-  ValueEffectControl LowerTruncateTaggedToBit(Node* node, Node* effect,
-                                              Node* control);
-  ValueEffectControl LowerTruncateTaggedToFloat64(Node* node, Node* effect,
-                                                  Node* control);
-  ValueEffectControl LowerTruncateTaggedToWord32(Node* node, Node* effect,
-                                                 Node* control);
-  ValueEffectControl LowerCheckedTruncateTaggedToWord32(Node* node,
-                                                        Node* frame_state,
-                                                        Node* effect,
-                                                        Node* control);
-  ValueEffectControl LowerObjectIsCallable(Node* node, Node* effect,
-                                           Node* control);
-  ValueEffectControl LowerObjectIsNumber(Node* node, Node* effect,
-                                         Node* control);
-  ValueEffectControl LowerObjectIsReceiver(Node* node, Node* effect,
-                                           Node* control);
-  ValueEffectControl LowerObjectIsSmi(Node* node, Node* effect, Node* control);
-  ValueEffectControl LowerObjectIsString(Node* node, Node* effect,
-                                         Node* control);
-  ValueEffectControl LowerObjectIsUndetectable(Node* node, Node* effect,
-                                               Node* control);
-  ValueEffectControl LowerArrayBufferWasNeutered(Node* node, Node* effect,
-                                                 Node* control);
-  ValueEffectControl LowerStringCharCodeAt(Node* node, Node* effect,
-                                           Node* control);
-  ValueEffectControl LowerStringFromCharCode(Node* node, Node* effect,
-                                             Node* control);
-  ValueEffectControl LowerStringFromCodePoint(Node* node, Node* effect,
-                                              Node* control);
-  ValueEffectControl LowerStringEqual(Node* node, Node* effect, Node* control);
-  ValueEffectControl LowerStringLessThan(Node* node, Node* effect,
-                                         Node* control);
-  ValueEffectControl LowerStringLessThanOrEqual(Node* node, Node* effect,
-                                                Node* control);
-  ValueEffectControl LowerCheckFloat64Hole(Node* node, Node* frame_state,
-                                           Node* effect, Node* control);
-  ValueEffectControl LowerCheckTaggedHole(Node* node, Node* frame_state,
-                                          Node* effect, Node* control);
-  ValueEffectControl LowerConvertTaggedHoleToUndefined(Node* node, Node* effect,
-                                                       Node* control);
-  ValueEffectControl LowerPlainPrimitiveToNumber(Node* node, Node* effect,
-                                                 Node* control);
-  ValueEffectControl LowerPlainPrimitiveToWord32(Node* node, Node* effect,
-                                                 Node* control);
-  ValueEffectControl LowerPlainPrimitiveToFloat64(Node* node, Node* effect,
-                                                  Node* control);
-  ValueEffectControl LowerEnsureWritableFastElements(Node* node, Node* effect,
-                                                     Node* control);
-  ValueEffectControl LowerMaybeGrowFastElements(Node* node, Node* frame_state,
-                                                Node* effect, Node* control);
-  ValueEffectControl LowerTransitionElementsKind(Node* node, Node* effect,
-                                                 Node* control);
-  ValueEffectControl LowerLoadTypedElement(Node* node, Node* effect,
-                                           Node* control);
-  ValueEffectControl LowerStoreTypedElement(Node* node, Node* effect,
-                                            Node* control);
+  Node* LowerChangeBitToTagged(Node* node);
+  Node* LowerChangeInt31ToTaggedSigned(Node* node);
+  Node* LowerChangeInt32ToTagged(Node* node);
+  Node* LowerChangeUint32ToTagged(Node* node);
+  Node* LowerChangeFloat64ToTagged(Node* node);
+  Node* LowerChangeFloat64ToTaggedPointer(Node* node);
+  Node* LowerChangeTaggedSignedToInt32(Node* node);
+  Node* LowerChangeTaggedToBit(Node* node);
+  Node* LowerChangeTaggedToInt32(Node* node);
+  Node* LowerChangeTaggedToUint32(Node* node);
+  Node* LowerChangeTaggedToTaggedSigned(Node* node);
+  Node* LowerCheckBounds(Node* node, Node* frame_state);
+  Node* LowerCheckInternalizedString(Node* node, Node* frame_state);
+  Node* LowerCheckMaps(Node* node, Node* frame_state);
+  Node* LowerCheckNumber(Node* node, Node* frame_state);
+  Node* LowerCheckReceiver(Node* node, Node* frame_state);
+  Node* LowerCheckString(Node* node, Node* frame_state);
+  Node* LowerCheckIf(Node* node, Node* frame_state);
+  Node* LowerCheckedInt32Add(Node* node, Node* frame_state);
+  Node* LowerCheckedInt32Sub(Node* node, Node* frame_state);
+  Node* LowerCheckedInt32Div(Node* node, Node* frame_state);
+  Node* LowerCheckedInt32Mod(Node* node, Node* frame_state);
+  Node* LowerCheckedUint32Div(Node* node, Node* frame_state);
+  Node* LowerCheckedUint32Mod(Node* node, Node* frame_state);
+  Node* LowerCheckedInt32Mul(Node* node, Node* frame_state);
+  Node* LowerCheckedInt32ToTaggedSigned(Node* node, Node* frame_state);
+  Node* LowerCheckedUint32ToInt32(Node* node, Node* frame_state);
+  Node* LowerCheckedUint32ToTaggedSigned(Node* node, Node* frame_state);
+  Node* LowerCheckedFloat64ToInt32(Node* node, Node* frame_state);
+  Node* LowerCheckedTaggedSignedToInt32(Node* node, Node* frame_state);
+  Node* LowerCheckedTaggedToInt32(Node* node, Node* frame_state);
+  Node* LowerCheckedTaggedToFloat64(Node* node, Node* frame_state);
+  Node* LowerCheckedTaggedToTaggedSigned(Node* node, Node* frame_state);
+  Node* LowerCheckedTaggedToTaggedPointer(Node* node, Node* frame_state);
+  Node* LowerChangeTaggedToFloat64(Node* node);
+  Node* LowerTruncateTaggedToBit(Node* node);
+  Node* LowerTruncateTaggedToFloat64(Node* node);
+  Node* LowerTruncateTaggedToWord32(Node* node);
+  Node* LowerCheckedTruncateTaggedToWord32(Node* node, Node* frame_state);
+  Node* LowerObjectIsDetectableCallable(Node* node);
+  Node* LowerObjectIsNonCallable(Node* node);
+  Node* LowerObjectIsNumber(Node* node);
+  Node* LowerObjectIsReceiver(Node* node);
+  Node* LowerObjectIsSmi(Node* node);
+  Node* LowerObjectIsString(Node* node);
+  Node* LowerObjectIsUndetectable(Node* node);
+  Node* LowerNewRestParameterElements(Node* node);
+  Node* LowerNewUnmappedArgumentsElements(Node* node);
+  Node* LowerArrayBufferWasNeutered(Node* node);
+  Node* LowerStringCharAt(Node* node);
+  Node* LowerStringCharCodeAt(Node* node);
+  Node* LowerStringFromCharCode(Node* node);
+  Node* LowerStringFromCodePoint(Node* node);
+  Node* LowerStringIndexOf(Node* node);
+  Node* LowerStringEqual(Node* node);
+  Node* LowerStringLessThan(Node* node);
+  Node* LowerStringLessThanOrEqual(Node* node);
+  Node* LowerCheckFloat64Hole(Node* node, Node* frame_state);
+  Node* LowerCheckTaggedHole(Node* node, Node* frame_state);
+  Node* LowerConvertTaggedHoleToUndefined(Node* node);
+  Node* LowerPlainPrimitiveToNumber(Node* node);
+  Node* LowerPlainPrimitiveToWord32(Node* node);
+  Node* LowerPlainPrimitiveToFloat64(Node* node);
+  Node* LowerEnsureWritableFastElements(Node* node);
+  Node* LowerMaybeGrowFastElements(Node* node, Node* frame_state);
+  void LowerTransitionElementsKind(Node* node);
+  Node* LowerLoadTypedElement(Node* node);
+  void LowerStoreTypedElement(Node* node);
 
   // Lowering of optional operators.
-  ValueEffectControl LowerFloat64RoundUp(Node* node, Node* effect,
-                                         Node* control);
-  ValueEffectControl LowerFloat64RoundDown(Node* node, Node* effect,
-                                           Node* control);
-  ValueEffectControl LowerFloat64RoundTiesEven(Node* node, Node* effect,
-                                               Node* control);
-  ValueEffectControl LowerFloat64RoundTruncate(Node* node, Node* effect,
-                                               Node* control);
+  Maybe<Node*> LowerFloat64RoundUp(Node* node);
+  Maybe<Node*> LowerFloat64RoundDown(Node* node);
+  Maybe<Node*> LowerFloat64RoundTiesEven(Node* node);
+  Maybe<Node*> LowerFloat64RoundTruncate(Node* node);
 
-  ValueEffectControl AllocateHeapNumberWithValue(Node* node, Node* effect,
-                                                 Node* control);
-  ValueEffectControl BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode,
-                                                Node* value, Node* frame_state,
-                                                Node* effect, Node* control);
-  ValueEffectControl BuildCheckedHeapNumberOrOddballToFloat64(
-      CheckTaggedInputMode mode, Node* value, Node* frame_state, Node* effect,
-      Node* control);
-  ValueEffectControl BuildFloat64RoundDown(Node* value, Node* effect,
-                                           Node* control);
-  ValueEffectControl LowerStringComparison(Callable const& callable, Node* node,
-                                           Node* effect, Node* control);
+  Node* AllocateHeapNumberWithValue(Node* node);
+  Node* BuildCheckedFloat64ToInt32(CheckForMinusZeroMode mode, Node* value,
+                                   Node* frame_state);
+  Node* BuildCheckedHeapNumberOrOddballToFloat64(CheckTaggedInputMode mode,
+                                                 Node* value,
+                                                 Node* frame_state);
+  Node* BuildFloat64RoundDown(Node* value);
+  Node* LowerStringComparison(Callable const& callable, Node* node);
 
   Node* ChangeInt32ToSmi(Node* value);
   Node* ChangeUint32ToSmi(Node* value);
-  Node* ChangeInt32ToFloat64(Node* value);
-  Node* ChangeUint32ToFloat64(Node* value);
   Node* ChangeSmiToInt32(Node* value);
   Node* ObjectIsSmi(Node* value);
 
@@ -222,15 +143,14 @@
   SimplifiedOperatorBuilder* simplified() const;
   MachineOperatorBuilder* machine() const;
 
-  Operator const* ToNumberOperator();
+  GraphAssembler* gasm() { return &graph_assembler_; }
 
   JSGraph* js_graph_;
   Schedule* schedule_;
   Zone* temp_zone_;
   RegionObservability region_observability_ = RegionObservability::kObservable;
   SourcePositionTable* source_positions_;
-
-  SetOncePointer<Operator const> to_number_operator_;
+  GraphAssembler graph_assembler_;
 };
 
 }  // namespace compiler
diff --git a/src/compiler/escape-analysis-reducer.cc b/src/compiler/escape-analysis-reducer.cc
index f7708f8..c05092e 100644
--- a/src/compiler/escape-analysis-reducer.cc
+++ b/src/compiler/escape-analysis-reducer.cc
@@ -31,7 +31,7 @@
       fully_reduced_(static_cast<int>(jsgraph->graph()->NodeCount() * 2), zone),
       exists_virtual_allocate_(escape_analysis->ExistsVirtualAllocate()) {}
 
-Reduction EscapeAnalysisReducer::Reduce(Node* node) {
+Reduction EscapeAnalysisReducer::ReduceNode(Node* node) {
   if (node->id() < static_cast<NodeId>(fully_reduced_.length()) &&
       fully_reduced_.Contains(node->id())) {
     return NoChange();
@@ -61,8 +61,7 @@
         break;
       }
       bool depends_on_object_state = false;
-      for (int i = 0; i < node->InputCount(); i++) {
-        Node* input = node->InputAt(i);
+      for (Node* input : node->inputs()) {
         switch (input->opcode()) {
           case IrOpcode::kAllocate:
           case IrOpcode::kFinishRegion:
@@ -97,9 +96,18 @@
   return NoChange();
 }
 
+Reduction EscapeAnalysisReducer::Reduce(Node* node) {
+  Reduction reduction = ReduceNode(node);
+  if (reduction.Changed() && node != reduction.replacement()) {
+    escape_analysis()->SetReplacement(node, reduction.replacement());
+  }
+  return reduction;
+}
+
 namespace {
 
-Node* MaybeGuard(JSGraph* jsgraph, Node* original, Node* replacement) {
+Node* MaybeGuard(JSGraph* jsgraph, Zone* zone, Node* original,
+                 Node* replacement) {
   // We might need to guard the replacement if the type of the {replacement}
   // node is not in a sub-type relation to the type of the the {original} node.
   Type* const replacement_type = NodeProperties::GetType(replacement);
@@ -108,10 +116,18 @@
     Node* const control = NodeProperties::GetControlInput(original);
     replacement = jsgraph->graph()->NewNode(
         jsgraph->common()->TypeGuard(original_type), replacement, control);
+    NodeProperties::SetType(replacement, original_type);
   }
   return replacement;
 }
 
+Node* SkipTypeGuards(Node* node) {
+  while (node->opcode() == IrOpcode::kTypeGuard) {
+    node = NodeProperties::GetValueInput(node, 0);
+  }
+  return node;
+}
+
 }  // namespace
 
 Reduction EscapeAnalysisReducer::ReduceLoad(Node* node) {
@@ -120,12 +136,12 @@
   if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
     fully_reduced_.Add(node->id());
   }
-  if (escape_analysis()->IsVirtual(NodeProperties::GetValueInput(node, 0))) {
+  if (escape_analysis()->IsVirtual(
+          SkipTypeGuards(NodeProperties::GetValueInput(node, 0)))) {
     if (Node* rep = escape_analysis()->GetReplacement(node)) {
-      isolate()->counters()->turbo_escape_loads_replaced()->Increment();
       TRACE("Replaced #%d (%s) with #%d (%s)\n", node->id(),
             node->op()->mnemonic(), rep->id(), rep->op()->mnemonic());
-      rep = MaybeGuard(jsgraph(), node, rep);
+      rep = MaybeGuard(jsgraph(), zone(), node, rep);
       ReplaceWithValue(node, rep);
       return Replace(rep);
     }
@@ -140,7 +156,8 @@
   if (node->id() < static_cast<NodeId>(fully_reduced_.length())) {
     fully_reduced_.Add(node->id());
   }
-  if (escape_analysis()->IsVirtual(NodeProperties::GetValueInput(node, 0))) {
+  if (escape_analysis()->IsVirtual(
+          SkipTypeGuards(NodeProperties::GetValueInput(node, 0)))) {
     TRACE("Removed #%d (%s) from effect chain\n", node->id(),
           node->op()->mnemonic());
     RelaxEffectsAndControls(node);
@@ -157,7 +174,6 @@
   }
   if (escape_analysis()->IsVirtual(node)) {
     RelaxEffectsAndControls(node);
-    isolate()->counters()->turbo_escape_allocs_replaced()->Increment();
     TRACE("Removed allocate #%d from effect chain\n", node->id());
     return Changed(node);
   }
@@ -195,14 +211,14 @@
 
 Reduction EscapeAnalysisReducer::ReduceReferenceEqual(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kReferenceEqual);
-  Node* left = NodeProperties::GetValueInput(node, 0);
-  Node* right = NodeProperties::GetValueInput(node, 1);
+  Node* left = SkipTypeGuards(NodeProperties::GetValueInput(node, 0));
+  Node* right = SkipTypeGuards(NodeProperties::GetValueInput(node, 1));
   if (escape_analysis()->IsVirtual(left)) {
     if (escape_analysis()->IsVirtual(right) &&
         escape_analysis()->CompareVirtualObjects(left, right)) {
       ReplaceWithValue(node, jsgraph()->TrueConstant());
       TRACE("Replaced ref eq #%d with true\n", node->id());
-      Replace(jsgraph()->TrueConstant());
+      return Replace(jsgraph()->TrueConstant());
     }
     // Right-hand side is not a virtual object, or a different one.
     ReplaceWithValue(node, jsgraph()->FalseConstant());
@@ -220,7 +236,7 @@
 
 Reduction EscapeAnalysisReducer::ReduceObjectIsSmi(Node* node) {
   DCHECK_EQ(node->opcode(), IrOpcode::kObjectIsSmi);
-  Node* input = NodeProperties::GetValueInput(node, 0);
+  Node* input = SkipTypeGuards(NodeProperties::GetValueInput(node, 0));
   if (escape_analysis()->IsVirtual(input)) {
     ReplaceWithValue(node, jsgraph()->FalseConstant());
     TRACE("Replaced ObjectIsSmi #%d with false\n", node->id());
@@ -313,7 +329,7 @@
                                                    bool node_multiused,
                                                    bool already_cloned,
                                                    bool multiple_users) {
-  Node* input = NodeProperties::GetValueInput(node, node_index);
+  Node* input = SkipTypeGuards(NodeProperties::GetValueInput(node, node_index));
   if (node->id() < static_cast<NodeId>(fully_reduced_.length()) &&
       fully_reduced_.Contains(node->id())) {
     return nullptr;
@@ -364,8 +380,6 @@
 #endif  // DEBUG
 }
 
-Isolate* EscapeAnalysisReducer::isolate() const { return jsgraph_->isolate(); }
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/escape-analysis-reducer.h b/src/compiler/escape-analysis-reducer.h
index 61e7607..01c2ae1 100644
--- a/src/compiler/escape-analysis-reducer.h
+++ b/src/compiler/escape-analysis-reducer.h
@@ -33,6 +33,7 @@
   bool compilation_failed() const { return compilation_failed_; }
 
  private:
+  Reduction ReduceNode(Node* node);
   Reduction ReduceLoad(Node* node);
   Reduction ReduceStore(Node* node);
   Reduction ReduceAllocate(Node* node);
@@ -48,7 +49,6 @@
   JSGraph* jsgraph() const { return jsgraph_; }
   EscapeAnalysis* escape_analysis() const { return escape_analysis_; }
   Zone* zone() const { return zone_; }
-  Isolate* isolate() const;
 
   JSGraph* const jsgraph_;
   EscapeAnalysis* escape_analysis_;
diff --git a/src/compiler/escape-analysis.cc b/src/compiler/escape-analysis.cc
index 0218045..255e74e 100644
--- a/src/compiler/escape-analysis.cc
+++ b/src/compiler/escape-analysis.cc
@@ -12,6 +12,7 @@
 #include "src/compiler/common-operator.h"
 #include "src/compiler/graph-reducer.h"
 #include "src/compiler/js-operator.h"
+#include "src/compiler/linkage.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/node.h"
@@ -201,7 +202,7 @@
   }
   bool UpdateFrom(const VirtualObject& other);
   bool MergeFrom(MergeCache* cache, Node* at, Graph* graph,
-                 CommonOperatorBuilder* common);
+                 CommonOperatorBuilder* common, bool initialMerge);
   void SetObjectState(Node* node) { object_state_ = node; }
   Node* GetObjectState() const { return object_state_; }
   bool IsCopyRequired() const { return status_ & kCopyRequired; }
@@ -252,10 +253,14 @@
 class VirtualState : public ZoneObject {
  public:
   VirtualState(Node* owner, Zone* zone, size_t size)
-      : info_(size, nullptr, zone), owner_(owner) {}
+      : info_(size, nullptr, zone),
+        initialized_(static_cast<int>(size), zone),
+        owner_(owner) {}
 
   VirtualState(Node* owner, const VirtualState& state)
       : info_(state.info_.size(), nullptr, state.info_.get_allocator().zone()),
+        initialized_(state.initialized_.length(),
+                     state.info_.get_allocator().zone()),
         owner_(owner) {
     for (size_t i = 0; i < info_.size(); ++i) {
       if (state.info_[i]) {
@@ -280,6 +285,7 @@
 
  private:
   ZoneVector<VirtualObject*> info_;
+  BitVector initialized_;
   Node* owner_;
 
   DISALLOW_COPY_AND_ASSIGN(VirtualState);
@@ -375,6 +381,7 @@
 
 void VirtualState::SetVirtualObject(Alias alias, VirtualObject* obj) {
   info_[alias] = obj;
+  if (obj) initialized_.Add(alias);
 }
 
 bool VirtualState::UpdateFrom(VirtualState* from, Zone* zone) {
@@ -431,7 +438,6 @@
   }
   return true;
 }
-
 }  // namespace
 
 bool VirtualObject::MergeFields(size_t i, Node* at, MergeCache* cache,
@@ -440,12 +446,21 @@
   int value_input_count = static_cast<int>(cache->fields().size());
   Node* rep = GetField(i);
   if (!rep || !IsCreatedPhi(i)) {
+    Type* phi_type = Type::None();
+    for (Node* input : cache->fields()) {
+      CHECK_NOT_NULL(input);
+      CHECK(!input->IsDead());
+      Type* input_type = NodeProperties::GetType(input);
+      phi_type = Type::Union(phi_type, input_type, graph->zone());
+    }
     Node* control = NodeProperties::GetControlInput(at);
     cache->fields().push_back(control);
     Node* phi = graph->NewNode(
         common->Phi(MachineRepresentation::kTagged, value_input_count),
         value_input_count + 1, &cache->fields().front());
+    NodeProperties::SetType(phi, phi_type);
     SetField(i, phi, true);
+
 #ifdef DEBUG
     if (FLAG_trace_turbo_escape) {
       PrintF("    Creating Phi #%d as merge of", phi->id());
@@ -471,12 +486,15 @@
 }
 
 bool VirtualObject::MergeFrom(MergeCache* cache, Node* at, Graph* graph,
-                              CommonOperatorBuilder* common) {
+                              CommonOperatorBuilder* common,
+                              bool initialMerge) {
   DCHECK(at->opcode() == IrOpcode::kEffectPhi ||
          at->opcode() == IrOpcode::kPhi);
   bool changed = false;
   for (size_t i = 0; i < field_count(); ++i) {
-    if (Node* field = cache->GetFields(i)) {
+    if (!initialMerge && GetField(i) == nullptr) continue;
+    Node* field = cache->GetFields(i);
+    if (field && !IsCreatedPhi(i)) {
       changed = changed || GetField(i) != field;
       SetField(i, field);
       TRACE("    Field %zu agree on rep #%d\n", i, field->id());
@@ -516,8 +534,11 @@
         fields = std::min(obj->field_count(), fields);
       }
     }
-    if (cache->objects().size() == cache->states().size()) {
+    if (cache->objects().size() == cache->states().size() &&
+        (mergeObject || !initialized_.Contains(alias))) {
+      bool initialMerge = false;
       if (!mergeObject) {
+        initialMerge = true;
         VirtualObject* obj = new (zone)
             VirtualObject(cache->objects().front()->id(), this, zone, fields,
                           cache->objects().front()->IsInitialized());
@@ -542,7 +563,9 @@
         PrintF("\n");
       }
 #endif  // DEBUG
-      changed = mergeObject->MergeFrom(cache, at, graph, common) || changed;
+      changed =
+          mergeObject->MergeFrom(cache, at, graph, common, initialMerge) ||
+          changed;
     } else {
       if (mergeObject) {
         TRACE("  Alias %d, virtual object removed\n", alias);
@@ -671,6 +694,15 @@
           RevisitInputs(rep);
           RevisitUses(rep);
         }
+      } else {
+        Node* from = NodeProperties::GetValueInput(node, 0);
+        from = object_analysis_->ResolveReplacement(from);
+        if (SetEscaped(from)) {
+          TRACE("Setting #%d (%s) to escaped because of unresolved load #%i\n",
+                from->id(), from->op()->mnemonic(), node->id());
+          RevisitInputs(from);
+          RevisitUses(from);
+        }
       }
       RevisitUses(node);
       break;
@@ -795,6 +827,7 @@
       case IrOpcode::kSelect:
       // TODO(mstarzinger): The following list of operators will eventually be
       // handled by the EscapeAnalysisReducer (similar to ObjectIsSmi).
+      case IrOpcode::kConvertTaggedHoleToUndefined:
       case IrOpcode::kStringEqual:
       case IrOpcode::kStringLessThan:
       case IrOpcode::kStringLessThanOrEqual:
@@ -802,8 +835,11 @@
       case IrOpcode::kPlainPrimitiveToNumber:
       case IrOpcode::kPlainPrimitiveToWord32:
       case IrOpcode::kPlainPrimitiveToFloat64:
+      case IrOpcode::kStringCharAt:
       case IrOpcode::kStringCharCodeAt:
-      case IrOpcode::kObjectIsCallable:
+      case IrOpcode::kStringIndexOf:
+      case IrOpcode::kObjectIsDetectableCallable:
+      case IrOpcode::kObjectIsNonCallable:
       case IrOpcode::kObjectIsNumber:
       case IrOpcode::kObjectIsReceiver:
       case IrOpcode::kObjectIsString:
@@ -819,9 +855,9 @@
         if (use->op()->EffectInputCount() == 0 &&
             uses->op()->EffectInputCount() > 0 &&
             !IrOpcode::IsJsOpcode(use->opcode())) {
-          TRACE("Encountered unaccounted use by #%d (%s)\n", use->id(),
-                use->op()->mnemonic());
-          UNREACHABLE();
+          V8_Fatal(__FILE__, __LINE__,
+                   "Encountered unaccounted use by #%d (%s)\n", use->id(),
+                   use->op()->mnemonic());
         }
         if (SetEscaped(rep)) {
           TRACE("Setting #%d (%s) to escaped because of use by #%d (%s)\n",
@@ -842,6 +878,7 @@
   }
   if (CheckUsesForEscape(node, true)) {
     RevisitInputs(node);
+    RevisitUses(node);
   }
 }
 
@@ -863,11 +900,15 @@
       virtual_states_(zone),
       replacements_(zone),
       cycle_detection_(zone),
-      cache_(nullptr) {}
+      cache_(nullptr) {
+  // Type slot_not_analyzed_ manually.
+  double v = OpParameter<double>(slot_not_analyzed_);
+  NodeProperties::SetType(slot_not_analyzed_, Type::Range(v, v, zone));
+}
 
 EscapeAnalysis::~EscapeAnalysis() {}
 
-void EscapeAnalysis::Run() {
+bool EscapeAnalysis::Run() {
   replacements_.resize(graph()->NodeCount());
   status_analysis_->AssignAliases();
   if (status_analysis_->AliasCount() > 0) {
@@ -876,6 +917,9 @@
     status_analysis_->ResizeStatusVector();
     RunObjectAnalysis();
     status_analysis_->RunStatusAnalysis();
+    return true;
+  } else {
+    return false;
   }
 }
 
@@ -966,6 +1010,7 @@
           // VirtualObjects, and we want to delay phis to improve performance.
           if (use->opcode() == IrOpcode::kEffectPhi) {
             if (!status_analysis_->IsInQueue(use->id())) {
+              status_analysis_->SetInQueue(use->id(), true);
               queue.push_front(use);
             }
           } else if ((use->opcode() != IrOpcode::kLoadField &&
@@ -1044,6 +1089,19 @@
   return false;
 }
 
+namespace {
+
+bool HasFrameStateInput(const Operator* op) {
+  if (op->opcode() == IrOpcode::kCall || op->opcode() == IrOpcode::kTailCall) {
+    const CallDescriptor* d = CallDescriptorOf(op);
+    return d->NeedsFrameState();
+  } else {
+    return OperatorProperties::HasFrameStateInput(op);
+  }
+}
+
+}  // namespace
+
 bool EscapeAnalysis::Process(Node* node) {
   switch (node->opcode()) {
     case IrOpcode::kAllocate:
@@ -1080,6 +1138,9 @@
       ProcessAllocationUsers(node);
       break;
   }
+  if (HasFrameStateInput(node->op())) {
+    virtual_states_[node->id()]->SetCopyRequired();
+  }
   return true;
 }
 
@@ -1173,8 +1234,7 @@
           static_cast<void*>(virtual_states_[effect->id()]),
           effect->op()->mnemonic(), effect->id(), node->op()->mnemonic(),
           node->id());
-    if (status_analysis_->IsEffectBranchPoint(effect) ||
-        OperatorProperties::HasFrameStateInput(node->op())) {
+    if (status_analysis_->IsEffectBranchPoint(effect)) {
       virtual_states_[node->id()]->SetCopyRequired();
       TRACE(", effect input %s#%d is branch point", effect->op()->mnemonic(),
             effect->id());
@@ -1393,10 +1453,16 @@
       Node* rep = replacement(load);
       if (!rep || !IsEquivalentPhi(rep, cache_->fields())) {
         int value_input_count = static_cast<int>(cache_->fields().size());
+        Type* phi_type = Type::None();
+        for (Node* input : cache_->fields()) {
+          Type* input_type = NodeProperties::GetType(input);
+          phi_type = Type::Union(phi_type, input_type, graph()->zone());
+        }
         cache_->fields().push_back(NodeProperties::GetControlInput(from));
         Node* phi = graph()->NewNode(
             common()->Phi(MachineRepresentation::kTagged, value_input_count),
             value_input_count + 1, &cache_->fields().front());
+        NodeProperties::SetType(phi, phi_type);
         status_analysis_->ResizeStatusVector();
         SetReplacement(load, phi);
         TRACE(" got phi created.\n");
@@ -1583,13 +1649,14 @@
         cache_->fields().clear();
         for (size_t i = 0; i < vobj->field_count(); ++i) {
           if (Node* field = vobj->GetField(i)) {
-            cache_->fields().push_back(field);
+            cache_->fields().push_back(ResolveReplacement(field));
           }
         }
         int input_count = static_cast<int>(cache_->fields().size());
         Node* new_object_state =
             graph()->NewNode(common()->ObjectState(input_count), input_count,
                              &cache_->fields().front());
+        NodeProperties::SetType(new_object_state, Type::OtherInternal());
         vobj->SetObjectState(new_object_state);
         TRACE(
             "Creating object state #%d for vobj %p (from node #%d) at effect "
diff --git a/src/compiler/escape-analysis.h b/src/compiler/escape-analysis.h
index b85efe7..52edc4b 100644
--- a/src/compiler/escape-analysis.h
+++ b/src/compiler/escape-analysis.h
@@ -26,15 +26,17 @@
   EscapeAnalysis(Graph* graph, CommonOperatorBuilder* common, Zone* zone);
   ~EscapeAnalysis();
 
-  void Run();
+  bool Run();
 
   Node* GetReplacement(Node* node);
+  Node* ResolveReplacement(Node* node);
   bool IsVirtual(Node* node);
   bool IsEscaped(Node* node);
   bool CompareVirtualObjects(Node* left, Node* right);
   Node* GetOrCreateObjectState(Node* effect, Node* node);
   bool IsCyclicObjectState(Node* effect, Node* node);
   bool ExistsVirtualAllocate();
+  bool SetReplacement(Node* node, Node* rep);
 
  private:
   void RunObjectAnalysis();
@@ -58,8 +60,6 @@
                                        Node* node);
 
   Node* replacement(Node* node);
-  Node* ResolveReplacement(Node* node);
-  bool SetReplacement(Node* node, Node* rep);
   bool UpdateReplacement(VirtualState* state, Node* node, Node* rep);
 
   VirtualObject* GetVirtualObject(VirtualState* state, Node* node);
diff --git a/src/compiler/frame-elider.cc b/src/compiler/frame-elider.cc
index bb17d12..35d292b 100644
--- a/src/compiler/frame-elider.cc
+++ b/src/compiler/frame-elider.cc
@@ -2,9 +2,10 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/base/adapters.h"
 #include "src/compiler/frame-elider.h"
 
+#include "src/base/adapters.h"
+
 namespace v8 {
 namespace internal {
 namespace compiler {
@@ -114,13 +115,36 @@
     }
   }
 
-  // Propagate towards start ("upwards") if there are successors and all of
-  // them need a frame.
-  for (RpoNumber& succ : block->successors()) {
-    if (!InstructionBlockAt(succ)->needs_frame()) return false;
+  // Propagate towards start ("upwards")
+  bool need_frame_successors = false;
+  if (block->SuccessorCount() == 1) {
+    // For single successors, propagate the needs_frame information.
+    need_frame_successors =
+        InstructionBlockAt(block->successors()[0])->needs_frame();
+  } else {
+    // For multiple successors, each successor must only have a single
+    // predecessor (because the graph is in edge-split form), so each successor
+    // can independently create/dismantle a frame if needed. Given this
+    // independent control, only propagate needs_frame if all non-deferred
+    // blocks need a frame.
+    for (RpoNumber& succ : block->successors()) {
+      InstructionBlock* successor_block = InstructionBlockAt(succ);
+      DCHECK_EQ(1, successor_block->PredecessorCount());
+      if (!successor_block->IsDeferred()) {
+        if (successor_block->needs_frame()) {
+          need_frame_successors = true;
+        } else {
+          return false;
+        }
+      }
+    }
   }
-  block->mark_needs_frame();
-  return true;
+  if (need_frame_successors) {
+    block->mark_needs_frame();
+    return true;
+  } else {
+    return false;
+  }
 }
 
 
diff --git a/src/compiler/frame-states.cc b/src/compiler/frame-states.cc
index a02fb01..ec014da 100644
--- a/src/compiler/frame-states.cc
+++ b/src/compiler/frame-states.cc
@@ -6,6 +6,7 @@
 
 #include "src/base/functional.h"
 #include "src/handles-inl.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/frame.h b/src/compiler/frame.h
index 8d463df..a4d6829 100644
--- a/src/compiler/frame.h
+++ b/src/compiler/frame.h
@@ -113,9 +113,9 @@
 
   int AllocateSpillSlot(int width) {
     int frame_slot_count_before = frame_slot_count_;
-    int slot = AllocateAlignedFrameSlot(width);
-    spill_slot_count_ += (frame_slot_count_ - frame_slot_count_before);
-    return slot;
+    AllocateAlignedFrameSlots(width);
+    spill_slot_count_ += frame_slot_count_ - frame_slot_count_before;
+    return frame_slot_count_ - 1;
   }
 
   int AlignFrame(int alignment = kDoubleSize);
@@ -131,23 +131,15 @@
   static const int kJSFunctionSlot = 3 + StandardFrameConstants::kCPSlotCount;
 
  private:
-  int AllocateAlignedFrameSlot(int width) {
-    DCHECK(width == 4 || width == 8 || width == 16);
-    if (kPointerSize == 4) {
-      // Skip one slot if necessary.
-      if (width > kPointerSize) {
-        frame_slot_count_++;
-        frame_slot_count_ |= 1;
-        // 2 extra slots if width == 16.
-        frame_slot_count_ += (width & 16) / 8;
-      }
-    } else {
-      // No alignment when slots are 8 bytes.
-      DCHECK_EQ(8, kPointerSize);
-      // 1 extra slot if width == 16.
-      frame_slot_count_ += (width & 16) / 16;
-    }
-    return frame_slot_count_++;
+  void AllocateAlignedFrameSlots(int width) {
+    DCHECK_LT(0, width);
+    int new_frame_slots = (width + kPointerSize - 1) / kPointerSize;
+    // Align to 8 bytes if width is a multiple of 8 bytes, and to 16 bytes if
+    // multiple of 16.
+    int align_to = (width & 15) == 0 ? 16 : (width & 7) == 0 ? 8 : kPointerSize;
+    frame_slot_count_ =
+        RoundUp(frame_slot_count_ + new_frame_slots, align_to / kPointerSize);
+    DCHECK_LT(0, frame_slot_count_);
   }
 
  private:
diff --git a/src/compiler/graph-assembler.cc b/src/compiler/graph-assembler.cc
new file mode 100644
index 0000000..dbeff87
--- /dev/null
+++ b/src/compiler/graph-assembler.cc
@@ -0,0 +1,295 @@
+// Copyright 2015 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-assembler.h"
+
+#include "src/code-factory.h"
+#include "src/compiler/linkage.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+GraphAssembler::GraphAssembler(JSGraph* jsgraph, Node* effect, Node* control,
+                               Zone* zone)
+    : temp_zone_(zone),
+      jsgraph_(jsgraph),
+      current_effect_(effect),
+      current_control_(control) {}
+
+Node* GraphAssembler::IntPtrConstant(intptr_t value) {
+  return jsgraph()->IntPtrConstant(value);
+}
+
+Node* GraphAssembler::Int32Constant(int32_t value) {
+  return jsgraph()->Int32Constant(value);
+}
+
+Node* GraphAssembler::UniqueInt32Constant(int32_t value) {
+  return graph()->NewNode(common()->Int32Constant(value));
+}
+
+Node* GraphAssembler::SmiConstant(int32_t value) {
+  return jsgraph()->SmiConstant(value);
+}
+
+Node* GraphAssembler::Uint32Constant(int32_t value) {
+  return jsgraph()->Uint32Constant(value);
+}
+
+Node* GraphAssembler::Float64Constant(double value) {
+  return jsgraph()->Float64Constant(value);
+}
+
+Node* GraphAssembler::HeapConstant(Handle<HeapObject> object) {
+  return jsgraph()->HeapConstant(object);
+}
+
+
+Node* GraphAssembler::ExternalConstant(ExternalReference ref) {
+  return jsgraph()->ExternalConstant(ref);
+}
+
+Node* GraphAssembler::CEntryStubConstant(int result_size) {
+  return jsgraph()->CEntryStubConstant(result_size);
+}
+
+#define SINGLETON_CONST_DEF(Name) \
+  Node* GraphAssembler::Name() { return jsgraph()->Name(); }
+JSGRAPH_SINGLETON_CONSTANT_LIST(SINGLETON_CONST_DEF)
+#undef SINGLETON_CONST_DEF
+
+#define PURE_UNOP_DEF(Name)                            \
+  Node* GraphAssembler::Name(Node* input) {            \
+    return graph()->NewNode(machine()->Name(), input); \
+  }
+PURE_ASSEMBLER_MACH_UNOP_LIST(PURE_UNOP_DEF)
+#undef PURE_UNOP_DEF
+
+#define PURE_BINOP_DEF(Name)                                 \
+  Node* GraphAssembler::Name(Node* left, Node* right) {      \
+    return graph()->NewNode(machine()->Name(), left, right); \
+  }
+PURE_ASSEMBLER_MACH_BINOP_LIST(PURE_BINOP_DEF)
+#undef PURE_BINOP_DEF
+
+#define CHECKED_BINOP_DEF(Name)                                                \
+  Node* GraphAssembler::Name(Node* left, Node* right) {                        \
+    return graph()->NewNode(machine()->Name(), left, right, current_control_); \
+  }
+CHECKED_ASSEMBLER_MACH_BINOP_LIST(CHECKED_BINOP_DEF)
+#undef CHECKED_BINOP_DEF
+
+Node* GraphAssembler::Float64RoundDown(Node* value) {
+  if (machine()->Float64RoundDown().IsSupported()) {
+    return graph()->NewNode(machine()->Float64RoundDown().op(), value);
+  }
+  return nullptr;
+}
+
+Node* GraphAssembler::Projection(int index, Node* value) {
+  return graph()->NewNode(common()->Projection(index), value, current_control_);
+}
+
+Node* GraphAssembler::Allocate(PretenureFlag pretenure, Node* size) {
+  return current_effect_ =
+             graph()->NewNode(simplified()->Allocate(NOT_TENURED), size,
+                              current_effect_, current_control_);
+}
+
+Node* GraphAssembler::LoadField(FieldAccess const& access, Node* object) {
+  return current_effect_ =
+             graph()->NewNode(simplified()->LoadField(access), object,
+                              current_effect_, current_control_);
+}
+
+Node* GraphAssembler::LoadElement(ElementAccess const& access, Node* object,
+                                  Node* index) {
+  return current_effect_ =
+             graph()->NewNode(simplified()->LoadElement(access), object, index,
+                              current_effect_, current_control_);
+}
+
+Node* GraphAssembler::StoreField(FieldAccess const& access, Node* object,
+                                 Node* value) {
+  return current_effect_ =
+             graph()->NewNode(simplified()->StoreField(access), object, value,
+                              current_effect_, current_control_);
+}
+
+Node* GraphAssembler::StoreElement(ElementAccess const& access, Node* object,
+                                   Node* index, Node* value) {
+  return current_effect_ =
+             graph()->NewNode(simplified()->StoreElement(access), object, index,
+                              value, current_effect_, current_control_);
+}
+
+Node* GraphAssembler::Store(StoreRepresentation rep, Node* object, Node* offset,
+                            Node* value) {
+  return current_effect_ =
+             graph()->NewNode(machine()->Store(rep), object, offset, value,
+                              current_effect_, current_control_);
+}
+
+Node* GraphAssembler::Load(MachineType rep, Node* object, Node* offset) {
+  return current_effect_ =
+             graph()->NewNode(machine()->Load(rep), object, offset,
+                              current_effect_, current_control_);
+}
+
+Node* GraphAssembler::Retain(Node* buffer) {
+  return current_effect_ =
+             graph()->NewNode(common()->Retain(), buffer, current_effect_);
+}
+
+Node* GraphAssembler::UnsafePointerAdd(Node* base, Node* external) {
+  return current_effect_ =
+             graph()->NewNode(machine()->UnsafePointerAdd(), base, external,
+                              current_effect_, current_control_);
+}
+
+Node* GraphAssembler::ToNumber(Node* value) {
+  return current_effect_ =
+             graph()->NewNode(ToNumberOperator(), ToNumberBuiltinConstant(),
+                              value, NoContextConstant(), current_effect_);
+}
+
+Node* GraphAssembler::DeoptimizeIf(DeoptimizeReason reason, Node* condition,
+                                   Node* frame_state) {
+  return current_control_ = current_effect_ = graph()->NewNode(
+             common()->DeoptimizeIf(DeoptimizeKind::kEager, reason), condition,
+             frame_state, current_effect_, current_control_);
+}
+
+Node* GraphAssembler::DeoptimizeUnless(DeoptimizeKind kind,
+                                       DeoptimizeReason reason, Node* condition,
+                                       Node* frame_state) {
+  return current_control_ = current_effect_ = graph()->NewNode(
+             common()->DeoptimizeUnless(kind, reason), condition, frame_state,
+             current_effect_, current_control_);
+}
+
+Node* GraphAssembler::DeoptimizeUnless(DeoptimizeReason reason, Node* condition,
+                                       Node* frame_state) {
+  return DeoptimizeUnless(DeoptimizeKind::kEager, reason, condition,
+                          frame_state);
+}
+
+void GraphAssembler::Branch(Node* condition,
+                            GraphAssemblerStaticLabel<1>* if_true,
+                            GraphAssemblerStaticLabel<1>* if_false) {
+  DCHECK_NOT_NULL(current_control_);
+
+  BranchHint hint = BranchHint::kNone;
+  if (if_true->IsDeferred() != if_false->IsDeferred()) {
+    hint = if_false->IsDeferred() ? BranchHint::kTrue : BranchHint::kFalse;
+  }
+
+  Node* branch =
+      graph()->NewNode(common()->Branch(hint), condition, current_control_);
+
+  current_control_ = graph()->NewNode(common()->IfTrue(), branch);
+  MergeState(if_true);
+
+  current_control_ = graph()->NewNode(common()->IfFalse(), branch);
+  MergeState(if_false);
+
+  current_control_ = nullptr;
+  current_effect_ = nullptr;
+}
+
+// Extractors (should be only used when destructing the assembler.
+Node* GraphAssembler::ExtractCurrentControl() {
+  Node* result = current_control_;
+  current_control_ = nullptr;
+  return result;
+}
+
+Node* GraphAssembler::ExtractCurrentEffect() {
+  Node* result = current_effect_;
+  current_effect_ = nullptr;
+  return result;
+}
+
+void GraphAssembler::Reset(Node* effect, Node* control) {
+  current_effect_ = effect;
+  current_control_ = control;
+}
+
+Operator const* GraphAssembler::ToNumberOperator() {
+  if (!to_number_operator_.is_set()) {
+    Callable callable = CodeFactory::ToNumber(jsgraph()->isolate());
+    CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+    CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+        jsgraph()->isolate(), graph()->zone(), callable.descriptor(), 0, flags,
+        Operator::kEliminatable);
+    to_number_operator_.set(common()->Call(desc));
+  }
+  return to_number_operator_.get();
+}
+
+Node* GraphAssemblerLabel::PhiAt(size_t index) {
+  DCHECK(IsBound());
+  return GetBindingsPtrFor(index)[0];
+}
+
+GraphAssemblerLabel::GraphAssemblerLabel(GraphAssemblerLabelType is_deferred,
+                                         size_t merge_count, size_t var_count,
+                                         MachineRepresentation* representations,
+                                         Zone* zone)
+    : is_deferred_(is_deferred == GraphAssemblerLabelType::kDeferred),
+      max_merge_count_(merge_count),
+      var_count_(var_count) {
+  effects_ = zone->NewArray<Node*>(MaxMergeCount() + 1);
+  for (size_t i = 0; i < MaxMergeCount() + 1; i++) {
+    effects_[i] = nullptr;
+  }
+
+  controls_ = zone->NewArray<Node*>(MaxMergeCount());
+  for (size_t i = 0; i < MaxMergeCount(); i++) {
+    controls_[i] = nullptr;
+  }
+
+  size_t num_bindings = (MaxMergeCount() + 1) * PhiCount() + 1;
+  bindings_ = zone->NewArray<Node*>(num_bindings);
+  for (size_t i = 0; i < num_bindings; i++) {
+    bindings_[i] = nullptr;
+  }
+
+  representations_ = zone->NewArray<MachineRepresentation>(PhiCount() + 1);
+  for (size_t i = 0; i < PhiCount(); i++) {
+    representations_[i] = representations[i];
+  }
+}
+
+GraphAssemblerLabel::~GraphAssemblerLabel() {
+  DCHECK(IsBound() || MergedCount() == 0);
+}
+
+Node** GraphAssemblerLabel::GetBindingsPtrFor(size_t phi_index) {
+  DCHECK_LT(phi_index, PhiCount());
+  return &bindings_[phi_index * (MaxMergeCount() + 1)];
+}
+
+void GraphAssemblerLabel::SetBinding(size_t phi_index, size_t merge_index,
+                                     Node* binding) {
+  DCHECK_LT(phi_index, PhiCount());
+  DCHECK_LT(merge_index, MaxMergeCount());
+  bindings_[phi_index * (MaxMergeCount() + 1) + merge_index] = binding;
+}
+
+MachineRepresentation GraphAssemblerLabel::GetRepresentationFor(
+    size_t phi_index) {
+  DCHECK_LT(phi_index, PhiCount());
+  return representations_[phi_index];
+}
+
+Node** GraphAssemblerLabel::GetControlsPtr() { return controls_; }
+
+Node** GraphAssemblerLabel::GetEffectsPtr() { return effects_; }
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/graph-assembler.h b/src/compiler/graph-assembler.h
new file mode 100644
index 0000000..057e781
--- /dev/null
+++ b/src/compiler/graph-assembler.h
@@ -0,0 +1,451 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_ASSEMBLER_H_
+#define V8_COMPILER_GRAPH_ASSEMBLER_H_
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+
+class JSGraph;
+class Graph;
+
+namespace compiler {
+
+#define PURE_ASSEMBLER_MACH_UNOP_LIST(V) \
+  V(ChangeInt32ToInt64)                  \
+  V(ChangeInt32ToFloat64)                \
+  V(ChangeUint32ToFloat64)               \
+  V(ChangeUint32ToUint64)                \
+  V(ChangeFloat64ToInt32)                \
+  V(ChangeFloat64ToUint32)               \
+  V(TruncateInt64ToInt32)                \
+  V(RoundFloat64ToInt32)                 \
+  V(TruncateFloat64ToWord32)             \
+  V(Float64ExtractHighWord32)            \
+  V(Float64Abs)                          \
+  V(BitcastWordToTagged)
+
+#define PURE_ASSEMBLER_MACH_BINOP_LIST(V) \
+  V(WordShl)                              \
+  V(WordSar)                              \
+  V(WordAnd)                              \
+  V(Word32Or)                             \
+  V(Word32And)                            \
+  V(Word32Shr)                            \
+  V(Word32Shl)                            \
+  V(IntAdd)                               \
+  V(IntSub)                               \
+  V(UintLessThan)                         \
+  V(Int32Add)                             \
+  V(Int32Sub)                             \
+  V(Int32Mul)                             \
+  V(Int32LessThanOrEqual)                 \
+  V(Uint32LessThanOrEqual)                \
+  V(Uint32LessThan)                       \
+  V(Int32LessThan)                        \
+  V(Float64Add)                           \
+  V(Float64Sub)                           \
+  V(Float64Mod)                           \
+  V(Float64Equal)                         \
+  V(Float64LessThan)                      \
+  V(Float64LessThanOrEqual)               \
+  V(Word32Equal)                          \
+  V(WordEqual)
+
+#define CHECKED_ASSEMBLER_MACH_BINOP_LIST(V) \
+  V(Int32AddWithOverflow)                    \
+  V(Int32SubWithOverflow)                    \
+  V(Int32MulWithOverflow)                    \
+  V(Int32Mod)                                \
+  V(Int32Div)                                \
+  V(Uint32Mod)                               \
+  V(Uint32Div)
+
+#define JSGRAPH_SINGLETON_CONSTANT_LIST(V) \
+  V(TrueConstant)                          \
+  V(FalseConstant)                         \
+  V(HeapNumberMapConstant)                 \
+  V(NoContextConstant)                     \
+  V(EmptyStringConstant)                   \
+  V(UndefinedConstant)                     \
+  V(TheHoleConstant)                       \
+  V(FixedArrayMapConstant)                 \
+  V(ToNumberBuiltinConstant)               \
+  V(AllocateInNewSpaceStubConstant)        \
+  V(AllocateInOldSpaceStubConstant)
+
+class GraphAssembler;
+
+enum class GraphAssemblerLabelType { kDeferred, kNonDeferred };
+
+// Label with statically known count of incoming branches and phis.
+template <size_t MergeCount, size_t VarCount = 0u>
+class GraphAssemblerStaticLabel {
+ public:
+  Node* PhiAt(size_t index);
+
+  template <typename... Reps>
+  explicit GraphAssemblerStaticLabel(GraphAssemblerLabelType is_deferred,
+                                     Reps... reps)
+      : is_deferred_(is_deferred == GraphAssemblerLabelType::kDeferred) {
+    STATIC_ASSERT(VarCount == sizeof...(reps));
+    MachineRepresentation reps_array[] = {MachineRepresentation::kNone,
+                                          reps...};
+    for (size_t i = 0; i < VarCount; i++) {
+      representations_[i] = reps_array[i + 1];
+    }
+  }
+
+  ~GraphAssemblerStaticLabel() { DCHECK(IsBound() || MergedCount() == 0); }
+
+ private:
+  friend class GraphAssembler;
+
+  void SetBound() {
+    DCHECK(!IsBound());
+    DCHECK_EQ(merged_count_, MergeCount);
+    is_bound_ = true;
+  }
+  bool IsBound() const { return is_bound_; }
+
+  size_t PhiCount() const { return VarCount; }
+  size_t MaxMergeCount() const { return MergeCount; }
+  size_t MergedCount() const { return merged_count_; }
+  bool IsDeferred() const { return is_deferred_; }
+
+  // For each phi, the buffer must have at least MaxMergeCount() + 1
+  // node entries.
+  Node** GetBindingsPtrFor(size_t phi_index) {
+    DCHECK_LT(phi_index, PhiCount());
+    return &bindings_[phi_index * (MergeCount + 1)];
+  }
+  void SetBinding(size_t phi_index, size_t merge_index, Node* binding) {
+    DCHECK_LT(phi_index, PhiCount());
+    DCHECK_LT(merge_index, MergeCount);
+    bindings_[phi_index * (MergeCount + 1) + merge_index] = binding;
+  }
+  MachineRepresentation GetRepresentationFor(size_t phi_index) {
+    DCHECK_LT(phi_index, PhiCount());
+    return representations_[phi_index];
+  }
+  // The controls buffer must have at least MaxMergeCount() entries.
+  Node** GetControlsPtr() { return controls_; }
+  // The effects buffer must have at least MaxMergeCount() + 1 entries.
+  Node** GetEffectsPtr() { return effects_; }
+  void IncrementMergedCount() { merged_count_++; }
+
+  bool is_bound_ = false;
+  bool is_deferred_;
+  size_t merged_count_ = 0;
+  Node* effects_[MergeCount + 1];  // Extra element for control edge,
+                                   // so that we can use the array to
+                                   // construct EffectPhi.
+  Node* controls_[MergeCount];
+  Node* bindings_[(MergeCount + 1) * VarCount + 1];
+  MachineRepresentation representations_[VarCount + 1];
+};
+
+// General label (with zone allocated buffers for incoming branches and phi
+// inputs).
+class GraphAssemblerLabel {
+ public:
+  Node* PhiAt(size_t index);
+
+  GraphAssemblerLabel(GraphAssemblerLabelType is_deferred, size_t merge_count,
+                      size_t var_count, MachineRepresentation* representations,
+                      Zone* zone);
+
+  ~GraphAssemblerLabel();
+
+ private:
+  friend class GraphAssembler;
+
+  void SetBound() {
+    DCHECK(!is_bound_);
+    is_bound_ = true;
+  }
+  bool IsBound() const { return is_bound_; }
+  size_t PhiCount() const { return var_count_; }
+  size_t MaxMergeCount() const { return max_merge_count_; }
+  size_t MergedCount() const { return merged_count_; }
+  bool IsDeferred() const { return is_deferred_; }
+
+  // For each phi, the buffer must have at least MaxMergeCount() + 1
+  // node entries.
+  Node** GetBindingsPtrFor(size_t phi_index);
+  void SetBinding(size_t phi_index, size_t merge_index, Node* binding);
+  MachineRepresentation GetRepresentationFor(size_t phi_index);
+  // The controls buffer must have at least MaxMergeCount() entries.
+  Node** GetControlsPtr();
+  // The effects buffer must have at least MaxMergeCount() + 1 entries.
+  Node** GetEffectsPtr();
+  void IncrementMergedCount() { merged_count_++; }
+
+  bool is_bound_ = false;
+  bool is_deferred_;
+  size_t merged_count_ = 0;
+  size_t max_merge_count_;
+  size_t var_count_;
+  Node** effects_ = nullptr;
+  Node** controls_ = nullptr;
+  Node** bindings_ = nullptr;
+  MachineRepresentation* representations_ = nullptr;
+};
+
+class GraphAssembler {
+ public:
+  GraphAssembler(JSGraph* jsgraph, Node* effect, Node* control, Zone* zone);
+
+  void Reset(Node* effect, Node* control);
+
+  // Create non-deferred label with statically known number of incoming
+  // gotos/branches.
+  template <size_t MergeCount, typename... Reps>
+  static GraphAssemblerStaticLabel<MergeCount, sizeof...(Reps)> MakeLabel(
+      Reps... reps) {
+    return GraphAssemblerStaticLabel<MergeCount, sizeof...(Reps)>(
+        GraphAssemblerLabelType::kNonDeferred, reps...);
+  }
+
+  // Create deferred label with statically known number of incoming
+  // gotos/branches.
+  template <size_t MergeCount, typename... Reps>
+  static GraphAssemblerStaticLabel<MergeCount, sizeof...(Reps)>
+  MakeDeferredLabel(Reps... reps) {
+    return GraphAssemblerStaticLabel<MergeCount, sizeof...(Reps)>(
+        GraphAssemblerLabelType::kDeferred, reps...);
+  }
+
+  // Create label with number of incoming branches supplied at runtime.
+  template <typename... Reps>
+  GraphAssemblerLabel MakeLabelFor(GraphAssemblerLabelType is_deferred,
+                                   size_t merge_count, Reps... reps) {
+    MachineRepresentation reps_array[] = {MachineRepresentation::kNone,
+                                          reps...};
+    return GraphAssemblerLabel(is_deferred, merge_count, sizeof...(reps),
+                               &(reps_array[1]), temp_zone());
+  }
+
+  // Value creation.
+  Node* IntPtrConstant(intptr_t value);
+  Node* Uint32Constant(int32_t value);
+  Node* Int32Constant(int32_t value);
+  Node* UniqueInt32Constant(int32_t value);
+  Node* SmiConstant(int32_t value);
+  Node* Float64Constant(double value);
+  Node* Projection(int index, Node* value);
+  Node* HeapConstant(Handle<HeapObject> object);
+  Node* CEntryStubConstant(int result_size);
+  Node* ExternalConstant(ExternalReference ref);
+
+#define SINGLETON_CONST_DECL(Name) Node* Name();
+  JSGRAPH_SINGLETON_CONSTANT_LIST(SINGLETON_CONST_DECL)
+#undef SINGLETON_CONST_DECL
+
+#define PURE_UNOP_DECL(Name) Node* Name(Node* input);
+  PURE_ASSEMBLER_MACH_UNOP_LIST(PURE_UNOP_DECL)
+#undef PURE_UNOP_DECL
+
+#define BINOP_DECL(Name) Node* Name(Node* left, Node* right);
+  PURE_ASSEMBLER_MACH_BINOP_LIST(BINOP_DECL)
+  CHECKED_ASSEMBLER_MACH_BINOP_LIST(BINOP_DECL)
+#undef BINOP_DECL
+
+  Node* Float64RoundDown(Node* value);
+
+  Node* ToNumber(Node* value);
+  Node* Allocate(PretenureFlag pretenure, Node* size);
+  Node* LoadField(FieldAccess const&, Node* object);
+  Node* LoadElement(ElementAccess const&, Node* object, Node* index);
+  Node* StoreField(FieldAccess const&, Node* object, Node* value);
+  Node* StoreElement(ElementAccess const&, Node* object, Node* index,
+                     Node* value);
+
+  Node* Store(StoreRepresentation rep, Node* object, Node* offset, Node* value);
+  Node* Load(MachineType rep, Node* object, Node* offset);
+
+  Node* Retain(Node* buffer);
+  Node* UnsafePointerAdd(Node* base, Node* external);
+
+  Node* DeoptimizeIf(DeoptimizeReason reason, Node* condition,
+                     Node* frame_state);
+  Node* DeoptimizeUnless(DeoptimizeKind kind, DeoptimizeReason reason,
+                         Node* condition, Node* frame_state);
+  Node* DeoptimizeUnless(DeoptimizeReason reason, Node* condition,
+                         Node* frame_state);
+  template <typename... Args>
+  Node* Call(const CallDescriptor* desc, Args... args);
+  template <typename... Args>
+  Node* Call(const Operator* op, Args... args);
+
+  // Basic control operations.
+  template <class LabelType>
+  void Bind(LabelType* label);
+
+  template <class LabelType, typename... vars>
+  void Goto(LabelType* label, vars...);
+
+  void Branch(Node* condition, GraphAssemblerStaticLabel<1>* if_true,
+              GraphAssemblerStaticLabel<1>* if_false);
+
+  // Control helpers.
+  // {GotoIf(c, l)} is equivalent to {Branch(c, l, templ);Bind(templ)}.
+  template <class LabelType, typename... vars>
+  void GotoIf(Node* condition, LabelType* label, vars...);
+
+  // {GotoUnless(c, l)} is equivalent to {Branch(c, templ, l);Bind(templ)}.
+  template <class LabelType, typename... vars>
+  void GotoUnless(Node* condition, LabelType* label, vars...);
+
+  // Extractors (should be only used when destructing/resetting the assembler).
+  Node* ExtractCurrentControl();
+  Node* ExtractCurrentEffect();
+
+ private:
+  template <class LabelType, typename... Vars>
+  void MergeState(LabelType label, Vars... vars);
+
+  Operator const* ToNumberOperator();
+
+  JSGraph* jsgraph() const { return jsgraph_; }
+  Graph* graph() const { return jsgraph_->graph(); }
+  Zone* temp_zone() const { return temp_zone_; }
+  CommonOperatorBuilder* common() const { return jsgraph()->common(); }
+  MachineOperatorBuilder* machine() const { return jsgraph()->machine(); }
+  SimplifiedOperatorBuilder* simplified() const {
+    return jsgraph()->simplified();
+  }
+
+  SetOncePointer<Operator const> to_number_operator_;
+  Zone* temp_zone_;
+  JSGraph* jsgraph_;
+  Node* current_effect_;
+  Node* current_control_;
+};
+
+template <size_t MergeCount, size_t VarCount>
+Node* GraphAssemblerStaticLabel<MergeCount, VarCount>::PhiAt(size_t index) {
+  DCHECK(IsBound());
+  return GetBindingsPtrFor(index)[0];
+}
+
+template <class LabelType, typename... Vars>
+void GraphAssembler::MergeState(LabelType label, Vars... vars) {
+  DCHECK(!label->IsBound());
+  size_t merged_count = label->MergedCount();
+  DCHECK_LT(merged_count, label->MaxMergeCount());
+  DCHECK_EQ(label->PhiCount(), sizeof...(vars));
+  label->GetEffectsPtr()[merged_count] = current_effect_;
+  label->GetControlsPtr()[merged_count] = current_control_;
+  // We need to start with nullptr to avoid 0-length arrays.
+  Node* var_array[] = {nullptr, vars...};
+  for (size_t i = 0; i < sizeof...(vars); i++) {
+    label->SetBinding(i, merged_count, var_array[i + 1]);
+  }
+  label->IncrementMergedCount();
+}
+
+template <class LabelType>
+void GraphAssembler::Bind(LabelType* label) {
+  DCHECK(current_control_ == nullptr);
+  DCHECK(current_effect_ == nullptr);
+  DCHECK(label->MaxMergeCount() > 0);
+  DCHECK_EQ(label->MaxMergeCount(), label->MergedCount());
+
+  int merge_count = static_cast<int>(label->MaxMergeCount());
+  if (merge_count == 1) {
+    current_control_ = label->GetControlsPtr()[0];
+    current_effect_ = label->GetEffectsPtr()[0];
+    label->SetBound();
+    return;
+  }
+
+  current_control_ = graph()->NewNode(common()->Merge(merge_count), merge_count,
+                                      label->GetControlsPtr());
+
+  Node** effects = label->GetEffectsPtr();
+  current_effect_ = effects[0];
+  for (size_t i = 1; i < label->MaxMergeCount(); i++) {
+    if (current_effect_ != effects[i]) {
+      effects[label->MaxMergeCount()] = current_control_;
+      current_effect_ = graph()->NewNode(common()->EffectPhi(merge_count),
+                                         merge_count + 1, effects);
+      break;
+    }
+  }
+
+  for (size_t var = 0; var < label->PhiCount(); var++) {
+    Node** bindings = label->GetBindingsPtrFor(var);
+    bindings[label->MaxMergeCount()] = current_control_;
+    bindings[0] = graph()->NewNode(
+        common()->Phi(label->GetRepresentationFor(var), merge_count),
+        merge_count + 1, bindings);
+  }
+
+  label->SetBound();
+}
+
+template <class LabelType, typename... Vars>
+void GraphAssembler::Goto(LabelType* label, Vars... vars) {
+  DCHECK_NOT_NULL(current_control_);
+  DCHECK_NOT_NULL(current_effect_);
+  MergeState(label, vars...);
+  current_control_ = nullptr;
+  current_effect_ = nullptr;
+}
+
+template <class LabelType, typename... Vars>
+void GraphAssembler::GotoIf(Node* condition, LabelType* label, Vars... vars) {
+  BranchHint hint =
+      label->IsDeferred() ? BranchHint::kFalse : BranchHint::kNone;
+  Node* branch =
+      graph()->NewNode(common()->Branch(hint), condition, current_control_);
+
+  current_control_ = graph()->NewNode(common()->IfTrue(), branch);
+  MergeState(label, vars...);
+
+  current_control_ = graph()->NewNode(common()->IfFalse(), branch);
+}
+
+template <class LabelType, typename... Vars>
+void GraphAssembler::GotoUnless(Node* condition, LabelType* label,
+                                Vars... vars) {
+  BranchHint hint = label->IsDeferred() ? BranchHint::kTrue : BranchHint::kNone;
+  Node* branch =
+      graph()->NewNode(common()->Branch(hint), condition, current_control_);
+
+  current_control_ = graph()->NewNode(common()->IfFalse(), branch);
+  MergeState(label, vars...);
+
+  current_control_ = graph()->NewNode(common()->IfTrue(), branch);
+}
+
+template <typename... Args>
+Node* GraphAssembler::Call(const CallDescriptor* desc, Args... args) {
+  const Operator* op = common()->Call(desc);
+  return Call(op, args...);
+}
+
+template <typename... Args>
+Node* GraphAssembler::Call(const Operator* op, Args... args) {
+  DCHECK_EQ(IrOpcode::kCall, op->opcode());
+  Node* args_array[] = {args..., current_effect_, current_control_};
+  int size = static_cast<int>(sizeof...(args)) + op->EffectInputCount() +
+             op->ControlInputCount();
+  Node* call = graph()->NewNode(op, size, args_array);
+  DCHECK_EQ(0, op->ControlOutputCount());
+  current_effect_ = call;
+  return call;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_GRAPH_ASSEMBLER_H_
diff --git a/src/compiler/graph-reducer.cc b/src/compiler/graph-reducer.cc
index b13b954..117e569 100644
--- a/src/compiler/graph-reducer.cc
+++ b/src/compiler/graph-reducer.cc
@@ -25,15 +25,17 @@
 
 void Reducer::Finalize() {}
 
-
 GraphReducer::GraphReducer(Zone* zone, Graph* graph, Node* dead)
     : graph_(graph),
       dead_(dead),
       state_(graph, 4),
       reducers_(zone),
       revisit_(zone),
-      stack_(zone) {}
-
+      stack_(zone) {
+  if (dead != nullptr) {
+    NodeProperties::SetType(dead_, Type::None());
+  }
+}
 
 GraphReducer::~GraphReducer() {}
 
@@ -113,17 +115,23 @@
 
   if (node->IsDead()) return Pop();  // Node was killed while on stack.
 
+  Node::Inputs node_inputs = node->inputs();
+
   // Recurse on an input if necessary.
-  int start = entry.input_index < node->InputCount() ? entry.input_index : 0;
-  for (int i = start; i < node->InputCount(); i++) {
-    Node* input = node->InputAt(i);
-    entry.input_index = i + 1;
-    if (input != node && Recurse(input)) return;
+  int start = entry.input_index < node_inputs.count() ? entry.input_index : 0;
+  for (int i = start; i < node_inputs.count(); ++i) {
+    Node* input = node_inputs[i];
+    if (input != node && Recurse(input)) {
+      entry.input_index = i + 1;
+      return;
+    }
   }
-  for (int i = 0; i < start; i++) {
-    Node* input = node->InputAt(i);
-    entry.input_index = i + 1;
-    if (input != node && Recurse(input)) return;
+  for (int i = 0; i < start; ++i) {
+    Node* input = node_inputs[i];
+    if (input != node && Recurse(input)) {
+      entry.input_index = i + 1;
+      return;
+    }
   }
 
   // Remember the max node id before reduction.
@@ -139,10 +147,13 @@
   Node* const replacement = reduction.replacement();
   if (replacement == node) {
     // In-place update of {node}, may need to recurse on an input.
-    for (int i = 0; i < node->InputCount(); ++i) {
-      Node* input = node->InputAt(i);
-      entry.input_index = i + 1;
-      if (input != node && Recurse(input)) return;
+    Node::Inputs node_inputs = node->inputs();
+    for (int i = 0; i < node_inputs.count(); ++i) {
+      Node* input = node_inputs[i];
+      if (input != node && Recurse(input)) {
+        entry.input_index = i + 1;
+        return;
+      }
     }
   }
 
diff --git a/src/compiler/graph-visualizer.cc b/src/compiler/graph-visualizer.cc
index ab20f8f..2cd10a7 100644
--- a/src/compiler/graph-visualizer.cc
+++ b/src/compiler/graph-visualizer.cc
@@ -22,6 +22,7 @@
 #include "src/compiler/schedule.h"
 #include "src/compiler/scheduler.h"
 #include "src/interpreter/bytecodes.h"
+#include "src/objects-inl.h"
 #include "src/ostreams.h"
 
 namespace v8 {
@@ -34,9 +35,15 @@
   EmbeddedVector<char, 256> filename(0);
   std::unique_ptr<char[]> debug_name = info->GetDebugName();
   if (strlen(debug_name.get()) > 0) {
-    SNPrintF(filename, "turbo-%s", debug_name.get());
+    if (info->has_shared_info()) {
+      int attempt = info->shared_info()->opt_count();
+      SNPrintF(filename, "turbo-%s-%i", debug_name.get(), attempt);
+    } else {
+      SNPrintF(filename, "turbo-%s", debug_name.get());
+    }
   } else if (info->has_shared_info()) {
-    SNPrintF(filename, "turbo-%p", static_cast<void*>(info));
+    int attempt = info->shared_info()->opt_count();
+    SNPrintF(filename, "turbo-%p-%i", static_cast<void*>(info), attempt);
   } else {
     SNPrintF(filename, "turbo-none-%s", phase);
   }
@@ -497,7 +504,11 @@
         if (positions != nullptr) {
           SourcePosition position = positions->GetSourcePosition(node);
           if (position.IsKnown()) {
-            os_ << " pos:" << position.ScriptOffset();
+            os_ << " pos:";
+            if (position.isInlined()) {
+              os_ << "inlining(" << position.InliningId() << "),";
+            }
+            os_ << position.ScriptOffset();
           }
         }
         os_ << " <|@\n";
diff --git a/src/compiler/graph.h b/src/compiler/graph.h
index 1e861c7..6fb7cfa 100644
--- a/src/compiler/graph.h
+++ b/src/compiler/graph.h
@@ -104,6 +104,59 @@
     Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9};
     return NewNode(op, arraysize(nodes), nodes);
   }
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+                Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10) {
+    Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9, n10};
+    return NewNode(op, arraysize(nodes), nodes);
+  }
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+                Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
+                Node* n11) {
+    Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11};
+    return NewNode(op, arraysize(nodes), nodes);
+  }
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+                Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
+                Node* n11, Node* n12) {
+    Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11, n12};
+    return NewNode(op, arraysize(nodes), nodes);
+  }
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+                Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
+                Node* n11, Node* n12, Node* n13) {
+    Node* nodes[] = {n1, n2, n3, n4, n5, n6, n7, n8, n9, n10, n11, n12, n13};
+    return NewNode(op, arraysize(nodes), nodes);
+  }
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+                Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
+                Node* n11, Node* n12, Node* n13, Node* n14) {
+    Node* nodes[] = {n1, n2, n3,  n4,  n5,  n6,  n7,
+                     n8, n9, n10, n11, n12, n13, n14};
+    return NewNode(op, arraysize(nodes), nodes);
+  }
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+                Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
+                Node* n11, Node* n12, Node* n13, Node* n14, Node* n15) {
+    Node* nodes[] = {n1, n2,  n3,  n4,  n5,  n6,  n7, n8,
+                     n9, n10, n11, n12, n13, n14, n15};
+    return NewNode(op, arraysize(nodes), nodes);
+  }
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+                Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
+                Node* n11, Node* n12, Node* n13, Node* n14, Node* n15,
+                Node* n16) {
+    Node* nodes[] = {n1, n2,  n3,  n4,  n5,  n6,  n7,  n8,
+                     n9, n10, n11, n12, n13, n14, n15, n16};
+    return NewNode(op, arraysize(nodes), nodes);
+  }
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+                Node* n5, Node* n6, Node* n7, Node* n8, Node* n9, Node* n10,
+                Node* n11, Node* n12, Node* n13, Node* n14, Node* n15,
+                Node* n16, Node* n17) {
+    Node* nodes[] = {n1,  n2,  n3,  n4,  n5,  n6,  n7,  n8, n9,
+                     n10, n11, n12, n13, n14, n15, n16, n17};
+    return NewNode(op, arraysize(nodes), nodes);
+  }
 
   // Clone the {node}, and assign a new node id to the copy.
   Node* CloneNode(const Node* node);
diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc
index 20afdc1..3696990 100644
--- a/src/compiler/ia32/code-generator-ia32.cc
+++ b/src/compiler/ia32/code-generator-ia32.cc
@@ -66,9 +66,7 @@
   Immediate ToImmediate(InstructionOperand* operand) {
     Constant constant = ToConstant(operand);
     if (constant.type() == Constant::kInt32 &&
-        (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
-         constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
-         constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE)) {
+        RelocInfo::IsWasmReference(constant.rmode())) {
       return Immediate(reinterpret_cast<Address>(constant.ToInt32()),
                        constant.rmode());
     }
@@ -185,10 +183,9 @@
   return instr->InputAt(index)->IsImmediate();
 }
 
-
-class OutOfLineLoadInteger final : public OutOfLineCode {
+class OutOfLineLoadZero final : public OutOfLineCode {
  public:
-  OutOfLineLoadInteger(CodeGenerator* gen, Register result)
+  OutOfLineLoadZero(CodeGenerator* gen, Register result)
       : OutOfLineCode(gen), result_(result) {}
 
   void Generate() final { __ xor_(result_, result_); }
@@ -286,68 +283,423 @@
 
 }  // namespace
 
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, OutOfLineLoadNaN)      \
-  do {                                                                \
-    auto result = i.OutputDoubleRegister();                           \
-    auto offset = i.InputRegister(0);                                 \
-    if (instr->InputAt(1)->IsRegister()) {                            \
-      __ cmp(offset, i.InputRegister(1));                             \
-    } else {                                                          \
-      __ cmp(offset, i.InputImmediate(1));                            \
-    }                                                                 \
-    OutOfLineCode* ool = new (zone()) OutOfLineLoadNaN(this, result); \
-    __ j(above_equal, ool->entry());                                  \
-    __ asm_instr(result, i.MemoryOperand(2));                         \
-    __ bind(ool->exit());                                             \
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, OutOfLineLoadNaN,              \
+                                    SingleOrDouble)                           \
+  do {                                                                        \
+    auto result = i.OutputDoubleRegister();                                   \
+    if (instr->InputAt(0)->IsRegister()) {                                    \
+      auto offset = i.InputRegister(0);                                       \
+      if (instr->InputAt(1)->IsRegister()) {                                  \
+        __ cmp(offset, i.InputRegister(1));                                   \
+      } else {                                                                \
+        __ cmp(offset, i.InputImmediate(1));                                  \
+      }                                                                       \
+      OutOfLineCode* ool = new (zone()) OutOfLineLoadNaN(this, result);       \
+      __ j(above_equal, ool->entry());                                        \
+      __ asm_instr(result, i.MemoryOperand(2));                               \
+      __ bind(ool->exit());                                                   \
+    } else {                                                                  \
+      auto index2 = i.InputInt32(0);                                          \
+      auto length = i.InputInt32(1);                                          \
+      auto index1 = i.InputRegister(2);                                       \
+      RelocInfo::Mode rmode_length = i.ToConstant(instr->InputAt(1)).rmode(); \
+      RelocInfo::Mode rmode_buffer = i.ToConstant(instr->InputAt(3)).rmode(); \
+      DCHECK_LE(index2, length);                                              \
+      __ cmp(index1, Immediate(reinterpret_cast<Address>(length - index2),    \
+                               rmode_length));                                \
+      class OutOfLineLoadFloat final : public OutOfLineCode {                 \
+       public:                                                                \
+        OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result,            \
+                           Register buffer, Register index1, int32_t index2,  \
+                           int32_t length, RelocInfo::Mode rmode_length,      \
+                           RelocInfo::Mode rmode_buffer)                      \
+            : OutOfLineCode(gen),                                             \
+              result_(result),                                                \
+              buffer_reg_(buffer),                                            \
+              buffer_int_(0),                                                 \
+              index1_(index1),                                                \
+              index2_(index2),                                                \
+              length_(length),                                                \
+              rmode_length_(rmode_length),                                    \
+              rmode_buffer_(rmode_buffer) {}                                  \
+                                                                              \
+        OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result,            \
+                           int32_t buffer, Register index1, int32_t index2,   \
+                           int32_t length, RelocInfo::Mode rmode_length,      \
+                           RelocInfo::Mode rmode_buffer)                      \
+            : OutOfLineCode(gen),                                             \
+              result_(result),                                                \
+              buffer_reg_({-1}),                                              \
+              buffer_int_(buffer),                                            \
+              index1_(index1),                                                \
+              index2_(index2),                                                \
+              length_(length),                                                \
+              rmode_length_(rmode_length),                                    \
+              rmode_buffer_(rmode_buffer) {}                                  \
+                                                                              \
+        void Generate() final {                                               \
+          Label oob;                                                          \
+          __ push(index1_);                                                   \
+          __ lea(index1_, Operand(index1_, index2_));                         \
+          __ cmp(index1_, Immediate(reinterpret_cast<Address>(length_),       \
+                                    rmode_length_));                          \
+          __ j(above_equal, &oob, Label::kNear);                              \
+          if (buffer_reg_.is_valid()) {                                       \
+            __ asm_instr(result_, Operand(buffer_reg_, index1_, times_1, 0)); \
+          } else {                                                            \
+            __ asm_instr(result_,                                             \
+                         Operand(index1_, buffer_int_, rmode_buffer_));       \
+          }                                                                   \
+          __ pop(index1_);                                                    \
+          __ jmp(exit());                                                     \
+          __ bind(&oob);                                                      \
+          __ pop(index1_);                                                    \
+          __ xorp##SingleOrDouble(result_, result_);                          \
+          __ divs##SingleOrDouble(result_, result_);                          \
+        }                                                                     \
+                                                                              \
+       private:                                                               \
+        XMMRegister const result_;                                            \
+        Register const buffer_reg_;                                           \
+        int32_t const buffer_int_;                                            \
+        Register const index1_;                                               \
+        int32_t const index2_;                                                \
+        int32_t const length_;                                                \
+        RelocInfo::Mode rmode_length_;                                        \
+        RelocInfo::Mode rmode_buffer_;                                        \
+      };                                                                      \
+      if (instr->InputAt(3)->IsRegister()) {                                  \
+        auto buffer = i.InputRegister(3);                                     \
+        OutOfLineCode* ool = new (zone())                                     \
+            OutOfLineLoadFloat(this, result, buffer, index1, index2, length,  \
+                               rmode_length, rmode_buffer);                   \
+        __ j(above_equal, ool->entry());                                      \
+        __ asm_instr(result, Operand(buffer, index1, times_1, index2));       \
+        __ bind(ool->exit());                                                 \
+      } else {                                                                \
+        auto buffer = i.InputInt32(3);                                        \
+        OutOfLineCode* ool = new (zone())                                     \
+            OutOfLineLoadFloat(this, result, buffer, index1, index2, length,  \
+                               rmode_length, rmode_buffer);                   \
+        __ j(above_equal, ool->entry());                                      \
+        __ asm_instr(result, Operand(index1, buffer + index2, rmode_buffer)); \
+        __ bind(ool->exit());                                                 \
+      }                                                                       \
+    }                                                                         \
   } while (false)
 
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                          \
-  do {                                                                    \
-    auto result = i.OutputRegister();                                     \
-    auto offset = i.InputRegister(0);                                     \
-    if (instr->InputAt(1)->IsRegister()) {                                \
-      __ cmp(offset, i.InputRegister(1));                                 \
-    } else {                                                              \
-      __ cmp(offset, i.InputImmediate(1));                                \
-    }                                                                     \
-    OutOfLineCode* ool = new (zone()) OutOfLineLoadInteger(this, result); \
-    __ j(above_equal, ool->entry());                                      \
-    __ asm_instr(result, i.MemoryOperand(2));                             \
-    __ bind(ool->exit());                                                 \
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                               \
+  do {                                                                         \
+    auto result = i.OutputRegister();                                          \
+    if (instr->InputAt(0)->IsRegister()) {                                     \
+      auto offset = i.InputRegister(0);                                        \
+      if (instr->InputAt(1)->IsRegister()) {                                   \
+        __ cmp(offset, i.InputRegister(1));                                    \
+      } else {                                                                 \
+        __ cmp(offset, i.InputImmediate(1));                                   \
+      }                                                                        \
+      OutOfLineCode* ool = new (zone()) OutOfLineLoadZero(this, result);       \
+      __ j(above_equal, ool->entry());                                         \
+      __ asm_instr(result, i.MemoryOperand(2));                                \
+      __ bind(ool->exit());                                                    \
+    } else {                                                                   \
+      auto index2 = i.InputInt32(0);                                           \
+      auto length = i.InputInt32(1);                                           \
+      auto index1 = i.InputRegister(2);                                        \
+      RelocInfo::Mode rmode_length = i.ToConstant(instr->InputAt(1)).rmode();  \
+      RelocInfo::Mode rmode_buffer = i.ToConstant(instr->InputAt(3)).rmode();  \
+      DCHECK_LE(index2, length);                                               \
+      __ cmp(index1, Immediate(reinterpret_cast<Address>(length - index2),     \
+                               rmode_length));                                 \
+      class OutOfLineLoadInteger final : public OutOfLineCode {                \
+       public:                                                                 \
+        OutOfLineLoadInteger(CodeGenerator* gen, Register result,              \
+                             Register buffer, Register index1, int32_t index2, \
+                             int32_t length, RelocInfo::Mode rmode_length,     \
+                             RelocInfo::Mode rmode_buffer)                     \
+            : OutOfLineCode(gen),                                              \
+              result_(result),                                                 \
+              buffer_reg_(buffer),                                             \
+              buffer_int_(0),                                                  \
+              index1_(index1),                                                 \
+              index2_(index2),                                                 \
+              length_(length),                                                 \
+              rmode_length_(rmode_length),                                     \
+              rmode_buffer_(rmode_buffer) {}                                   \
+                                                                               \
+        OutOfLineLoadInteger(CodeGenerator* gen, Register result,              \
+                             int32_t buffer, Register index1, int32_t index2,  \
+                             int32_t length, RelocInfo::Mode rmode_length,     \
+                             RelocInfo::Mode rmode_buffer)                     \
+            : OutOfLineCode(gen),                                              \
+              result_(result),                                                 \
+              buffer_reg_({-1}),                                               \
+              buffer_int_(buffer),                                             \
+              index1_(index1),                                                 \
+              index2_(index2),                                                 \
+              length_(length),                                                 \
+              rmode_length_(rmode_length),                                     \
+              rmode_buffer_(rmode_buffer) {}                                   \
+                                                                               \
+        void Generate() final {                                                \
+          Label oob;                                                           \
+          bool need_cache = !result_.is(index1_);                              \
+          if (need_cache) __ push(index1_);                                    \
+          __ lea(index1_, Operand(index1_, index2_));                          \
+          __ cmp(index1_, Immediate(reinterpret_cast<Address>(length_),        \
+                                    rmode_length_));                           \
+          __ j(above_equal, &oob, Label::kNear);                               \
+          if (buffer_reg_.is_valid()) {                                        \
+            __ asm_instr(result_, Operand(buffer_reg_, index1_, times_1, 0));  \
+          } else {                                                             \
+            __ asm_instr(result_,                                              \
+                         Operand(index1_, buffer_int_, rmode_buffer_));        \
+          }                                                                    \
+          if (need_cache) __ pop(index1_);                                     \
+          __ jmp(exit());                                                      \
+          __ bind(&oob);                                                       \
+          if (need_cache) __ pop(index1_);                                     \
+          __ xor_(result_, result_);                                           \
+        }                                                                      \
+                                                                               \
+       private:                                                                \
+        Register const result_;                                                \
+        Register const buffer_reg_;                                            \
+        int32_t const buffer_int_;                                             \
+        Register const index1_;                                                \
+        int32_t const index2_;                                                 \
+        int32_t const length_;                                                 \
+        RelocInfo::Mode rmode_length_;                                         \
+        RelocInfo::Mode rmode_buffer_;                                         \
+      };                                                                       \
+      if (instr->InputAt(3)->IsRegister()) {                                   \
+        auto buffer = i.InputRegister(3);                                      \
+        OutOfLineCode* ool = new (zone())                                      \
+            OutOfLineLoadInteger(this, result, buffer, index1, index2, length, \
+                                 rmode_length, rmode_buffer);                  \
+        __ j(above_equal, ool->entry());                                       \
+        __ asm_instr(result, Operand(buffer, index1, times_1, index2));        \
+        __ bind(ool->exit());                                                  \
+      } else {                                                                 \
+        auto buffer = i.InputInt32(3);                                         \
+        OutOfLineCode* ool = new (zone())                                      \
+            OutOfLineLoadInteger(this, result, buffer, index1, index2, length, \
+                                 rmode_length, rmode_buffer);                  \
+        __ j(above_equal, ool->entry());                                       \
+        __ asm_instr(result, Operand(index1, buffer + index2, rmode_buffer));  \
+        __ bind(ool->exit());                                                  \
+      }                                                                        \
+    }                                                                          \
   } while (false)
 
-
-#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr)                 \
-  do {                                                          \
-    auto offset = i.InputRegister(0);                           \
-    if (instr->InputAt(1)->IsRegister()) {                      \
-      __ cmp(offset, i.InputRegister(1));                       \
-    } else {                                                    \
-      __ cmp(offset, i.InputImmediate(1));                      \
-    }                                                           \
-    Label done;                                                 \
-    __ j(above_equal, &done, Label::kNear);                     \
-    __ asm_instr(i.MemoryOperand(3), i.InputDoubleRegister(2)); \
-    __ bind(&done);                                             \
+#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr)                               \
+  do {                                                                        \
+    auto value = i.InputDoubleRegister(2);                                    \
+    if (instr->InputAt(0)->IsRegister()) {                                    \
+      auto offset = i.InputRegister(0);                                       \
+      if (instr->InputAt(1)->IsRegister()) {                                  \
+        __ cmp(offset, i.InputRegister(1));                                   \
+      } else {                                                                \
+        __ cmp(offset, i.InputImmediate(1));                                  \
+      }                                                                       \
+      Label done;                                                             \
+      __ j(above_equal, &done, Label::kNear);                                 \
+      __ asm_instr(i.MemoryOperand(3), value);                                \
+      __ bind(&done);                                                         \
+    } else {                                                                  \
+      auto index2 = i.InputInt32(0);                                          \
+      auto length = i.InputInt32(1);                                          \
+      auto index1 = i.InputRegister(3);                                       \
+      RelocInfo::Mode rmode_length = i.ToConstant(instr->InputAt(1)).rmode(); \
+      RelocInfo::Mode rmode_buffer = i.ToConstant(instr->InputAt(4)).rmode(); \
+      DCHECK_LE(index2, length);                                              \
+      __ cmp(index1, Immediate(reinterpret_cast<Address>(length - index2),    \
+                               rmode_length));                                \
+      class OutOfLineStoreFloat final : public OutOfLineCode {                \
+       public:                                                                \
+        OutOfLineStoreFloat(CodeGenerator* gen, Register buffer,              \
+                            Register index1, int32_t index2, int32_t length,  \
+                            XMMRegister value, RelocInfo::Mode rmode_length,  \
+                            RelocInfo::Mode rmode_buffer)                     \
+            : OutOfLineCode(gen),                                             \
+              buffer_reg_(buffer),                                            \
+              buffer_int_(0),                                                 \
+              index1_(index1),                                                \
+              index2_(index2),                                                \
+              length_(length),                                                \
+              value_(value),                                                  \
+              rmode_length_(rmode_length),                                    \
+              rmode_buffer_(rmode_buffer) {}                                  \
+                                                                              \
+        OutOfLineStoreFloat(CodeGenerator* gen, int32_t buffer,               \
+                            Register index1, int32_t index2, int32_t length,  \
+                            XMMRegister value, RelocInfo::Mode rmode_length,  \
+                            RelocInfo::Mode rmode_buffer)                     \
+            : OutOfLineCode(gen),                                             \
+              buffer_reg_({-1}),                                              \
+              buffer_int_(buffer),                                            \
+              index1_(index1),                                                \
+              index2_(index2),                                                \
+              length_(length),                                                \
+              value_(value),                                                  \
+              rmode_length_(rmode_length),                                    \
+              rmode_buffer_(rmode_buffer) {}                                  \
+                                                                              \
+        void Generate() final {                                               \
+          Label oob;                                                          \
+          __ push(index1_);                                                   \
+          __ lea(index1_, Operand(index1_, index2_));                         \
+          __ cmp(index1_, Immediate(reinterpret_cast<Address>(length_),       \
+                                    rmode_length_));                          \
+          __ j(above_equal, &oob, Label::kNear);                              \
+          if (buffer_reg_.is_valid()) {                                       \
+            __ asm_instr(Operand(buffer_reg_, index1_, times_1, 0), value_);  \
+          } else {                                                            \
+            __ asm_instr(Operand(index1_, buffer_int_, rmode_buffer_),        \
+                         value_);                                             \
+          }                                                                   \
+          __ bind(&oob);                                                      \
+          __ pop(index1_);                                                    \
+        }                                                                     \
+                                                                              \
+       private:                                                               \
+        Register const buffer_reg_;                                           \
+        int32_t const buffer_int_;                                            \
+        Register const index1_;                                               \
+        int32_t const index2_;                                                \
+        int32_t const length_;                                                \
+        XMMRegister const value_;                                             \
+        RelocInfo::Mode rmode_length_;                                        \
+        RelocInfo::Mode rmode_buffer_;                                        \
+      };                                                                      \
+      if (instr->InputAt(4)->IsRegister()) {                                  \
+        auto buffer = i.InputRegister(4);                                     \
+        OutOfLineCode* ool = new (zone())                                     \
+            OutOfLineStoreFloat(this, buffer, index1, index2, length, value,  \
+                                rmode_length, rmode_buffer);                  \
+        __ j(above_equal, ool->entry());                                      \
+        __ asm_instr(Operand(buffer, index1, times_1, index2), value);        \
+        __ bind(ool->exit());                                                 \
+      } else {                                                                \
+        auto buffer = i.InputInt32(4);                                        \
+        OutOfLineCode* ool = new (zone())                                     \
+            OutOfLineStoreFloat(this, buffer, index1, index2, length, value,  \
+                                rmode_length, rmode_buffer);                  \
+        __ j(above_equal, ool->entry());                                      \
+        __ asm_instr(Operand(index1, buffer + index2, rmode_buffer), value);  \
+        __ bind(ool->exit());                                                 \
+      }                                                                       \
+    }                                                                         \
   } while (false)
 
+#define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value)                  \
+  do {                                                                         \
+    if (instr->InputAt(0)->IsRegister()) {                                     \
+      auto offset = i.InputRegister(0);                                        \
+      if (instr->InputAt(1)->IsRegister()) {                                   \
+        __ cmp(offset, i.InputRegister(1));                                    \
+      } else {                                                                 \
+        __ cmp(offset, i.InputImmediate(1));                                   \
+      }                                                                        \
+      Label done;                                                              \
+      __ j(above_equal, &done, Label::kNear);                                  \
+      __ asm_instr(i.MemoryOperand(3), value);                                 \
+      __ bind(&done);                                                          \
+    } else {                                                                   \
+      auto index2 = i.InputInt32(0);                                           \
+      auto length = i.InputInt32(1);                                           \
+      auto index1 = i.InputRegister(3);                                        \
+      RelocInfo::Mode rmode_length = i.ToConstant(instr->InputAt(1)).rmode();  \
+      RelocInfo::Mode rmode_buffer = i.ToConstant(instr->InputAt(4)).rmode();  \
+      DCHECK_LE(index2, length);                                               \
+      __ cmp(index1, Immediate(reinterpret_cast<Address>(length - index2),     \
+                               rmode_length));                                 \
+      class OutOfLineStoreInteger final : public OutOfLineCode {               \
+       public:                                                                 \
+        OutOfLineStoreInteger(CodeGenerator* gen, Register buffer,             \
+                              Register index1, int32_t index2, int32_t length, \
+                              Value value, RelocInfo::Mode rmode_length,       \
+                              RelocInfo::Mode rmode_buffer)                    \
+            : OutOfLineCode(gen),                                              \
+              buffer_reg_(buffer),                                             \
+              buffer_int_(0),                                                  \
+              index1_(index1),                                                 \
+              index2_(index2),                                                 \
+              length_(length),                                                 \
+              value_(value),                                                   \
+              rmode_length_(rmode_length),                                     \
+              rmode_buffer_(rmode_buffer) {}                                   \
+                                                                               \
+        OutOfLineStoreInteger(CodeGenerator* gen, int32_t buffer,              \
+                              Register index1, int32_t index2, int32_t length, \
+                              Value value, RelocInfo::Mode rmode_length,       \
+                              RelocInfo::Mode rmode_buffer)                    \
+            : OutOfLineCode(gen),                                              \
+              buffer_reg_({-1}),                                               \
+              buffer_int_(buffer),                                             \
+              index1_(index1),                                                 \
+              index2_(index2),                                                 \
+              length_(length),                                                 \
+              value_(value),                                                   \
+              rmode_length_(rmode_length),                                     \
+              rmode_buffer_(rmode_buffer) {}                                   \
+                                                                               \
+        void Generate() final {                                                \
+          Label oob;                                                           \
+          __ push(index1_);                                                    \
+          __ lea(index1_, Operand(index1_, index2_));                          \
+          __ cmp(index1_, Immediate(reinterpret_cast<Address>(length_),        \
+                                    rmode_length_));                           \
+          __ j(above_equal, &oob, Label::kNear);                               \
+          if (buffer_reg_.is_valid()) {                                        \
+            __ asm_instr(Operand(buffer_reg_, index1_, times_1, 0), value_);   \
+          } else {                                                             \
+            __ asm_instr(Operand(index1_, buffer_int_, rmode_buffer_),         \
+                         value_);                                              \
+          }                                                                    \
+          __ bind(&oob);                                                       \
+          __ pop(index1_);                                                     \
+        }                                                                      \
+                                                                               \
+       private:                                                                \
+        Register const buffer_reg_;                                            \
+        int32_t const buffer_int_;                                             \
+        Register const index1_;                                                \
+        int32_t const index2_;                                                 \
+        int32_t const length_;                                                 \
+        Value const value_;                                                    \
+        RelocInfo::Mode rmode_length_;                                         \
+        RelocInfo::Mode rmode_buffer_;                                         \
+      };                                                                       \
+      if (instr->InputAt(4)->IsRegister()) {                                   \
+        auto buffer = i.InputRegister(4);                                      \
+        OutOfLineCode* ool = new (zone())                                      \
+            OutOfLineStoreInteger(this, buffer, index1, index2, length, value, \
+                                  rmode_length, rmode_buffer);                 \
+        __ j(above_equal, ool->entry());                                       \
+        __ asm_instr(Operand(buffer, index1, times_1, index2), value);         \
+        __ bind(ool->exit());                                                  \
+      } else {                                                                 \
+        auto buffer = i.InputInt32(4);                                         \
+        OutOfLineCode* ool = new (zone())                                      \
+            OutOfLineStoreInteger(this, buffer, index1, index2, length, value, \
+                                  rmode_length, rmode_buffer);                 \
+        __ j(above_equal, ool->entry());                                       \
+        __ asm_instr(Operand(index1, buffer + index2, rmode_buffer), value);   \
+        __ bind(ool->exit());                                                  \
+      }                                                                        \
+    }                                                                          \
+  } while (false)
 
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)            \
-  do {                                                       \
-    auto offset = i.InputRegister(0);                        \
-    if (instr->InputAt(1)->IsRegister()) {                   \
-      __ cmp(offset, i.InputRegister(1));                    \
-    } else {                                                 \
-      __ cmp(offset, i.InputImmediate(1));                   \
-    }                                                        \
-    Label done;                                              \
-    __ j(above_equal, &done, Label::kNear);                  \
-    if (instr->InputAt(2)->IsRegister()) {                   \
-      __ asm_instr(i.MemoryOperand(3), i.InputRegister(2));  \
-    } else {                                                 \
-      __ asm_instr(i.MemoryOperand(3), i.InputImmediate(2)); \
-    }                                                        \
-    __ bind(&done);                                          \
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)                \
+  do {                                                           \
+    if (instr->InputAt(2)->IsRegister()) {                       \
+      Register value = i.InputRegister(2);                       \
+      ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Register);  \
+    } else {                                                     \
+      Immediate value = i.InputImmediate(2);                     \
+      ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Immediate); \
+    }                                                            \
   } while (false)
 
 #define ASSEMBLE_COMPARE(asm_instr)                                   \
@@ -434,7 +786,7 @@
 
   // Check if current frame is an arguments adaptor frame.
   __ cmp(Operand(ebp, StandardFrameConstants::kContextOffset),
-         Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+         Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(not_equal, &done, Label::kNear);
 
   __ push(scratch1);
@@ -641,10 +993,8 @@
     case kArchDeoptimize: {
       int deopt_state_id =
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
-      Deoptimizer::BailoutType bailout_type =
-          Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      CodeGenResult result = AssembleDeoptimizerCall(
-          deopt_state_id, bailout_type, current_source_position_);
+      CodeGenResult result =
+          AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
       if (result != kSuccess) return result;
       break;
     }
@@ -896,10 +1246,10 @@
       } else {
         __ add(i.OutputRegister(0), i.InputRegister(2));
       }
-      __ adc(i.InputRegister(1), Operand(i.InputRegister(3)));
       if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
         __ Move(i.OutputRegister(1), i.InputRegister(1));
       }
+      __ adc(i.OutputRegister(1), Operand(i.InputRegister(3)));
       if (use_temp) {
         __ Move(i.OutputRegister(0), i.TempRegister(0));
       }
@@ -921,10 +1271,10 @@
       } else {
         __ sub(i.OutputRegister(0), i.InputRegister(2));
       }
-      __ sbb(i.InputRegister(1), Operand(i.InputRegister(3)));
       if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
         __ Move(i.OutputRegister(1), i.InputRegister(1));
       }
+      __ sbb(i.OutputRegister(1), Operand(i.InputRegister(3)));
       if (use_temp) {
         __ Move(i.OutputRegister(0), i.TempRegister(0));
       }
@@ -1512,7 +1862,12 @@
       }
       break;
     case kIA32Push:
-      if (instr->InputAt(0)->IsFPRegister()) {
+      if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
+        size_t index = 0;
+        Operand operand = i.MemoryOperand(&index);
+        __ push(operand);
+        frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
+      } else if (instr->InputAt(0)->IsFPRegister()) {
         __ sub(esp, Immediate(kFloatSize));
         __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
         frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
@@ -1567,10 +1922,10 @@
       ASSEMBLE_CHECKED_LOAD_INTEGER(mov);
       break;
     case kCheckedLoadFloat32:
-      ASSEMBLE_CHECKED_LOAD_FLOAT(movss, OutOfLineLoadFloat32NaN);
+      ASSEMBLE_CHECKED_LOAD_FLOAT(movss, OutOfLineLoadFloat32NaN, s);
       break;
     case kCheckedLoadFloat64:
-      ASSEMBLE_CHECKED_LOAD_FLOAT(movsd, OutOfLineLoadFloat64NaN);
+      ASSEMBLE_CHECKED_LOAD_FLOAT(movsd, OutOfLineLoadFloat64NaN, d);
       break;
     case kCheckedStoreWord8:
       ASSEMBLE_CHECKED_STORE_INTEGER(mov_b);
@@ -1611,61 +1966,66 @@
   return kSuccess;
 }  // NOLINT(readability/fn_size)
 
+static Condition FlagsConditionToCondition(FlagsCondition condition) {
+  switch (condition) {
+    case kUnorderedEqual:
+    case kEqual:
+      return equal;
+      break;
+    case kUnorderedNotEqual:
+    case kNotEqual:
+      return not_equal;
+      break;
+    case kSignedLessThan:
+      return less;
+      break;
+    case kSignedGreaterThanOrEqual:
+      return greater_equal;
+      break;
+    case kSignedLessThanOrEqual:
+      return less_equal;
+      break;
+    case kSignedGreaterThan:
+      return greater;
+      break;
+    case kUnsignedLessThan:
+      return below;
+      break;
+    case kUnsignedGreaterThanOrEqual:
+      return above_equal;
+      break;
+    case kUnsignedLessThanOrEqual:
+      return below_equal;
+      break;
+    case kUnsignedGreaterThan:
+      return above;
+      break;
+    case kOverflow:
+      return overflow;
+      break;
+    case kNotOverflow:
+      return no_overflow;
+      break;
+    default:
+      UNREACHABLE();
+      return no_condition;
+      break;
+  }
+}
 
 // Assembles a branch after an instruction.
 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
-  IA32OperandConverter i(this, instr);
   Label::Distance flabel_distance =
       branch->fallthru ? Label::kNear : Label::kFar;
   Label* tlabel = branch->true_label;
   Label* flabel = branch->false_label;
-  switch (branch->condition) {
-    case kUnorderedEqual:
-      __ j(parity_even, flabel, flabel_distance);
-    // Fall through.
-    case kEqual:
-      __ j(equal, tlabel);
-      break;
-    case kUnorderedNotEqual:
-      __ j(parity_even, tlabel);
-    // Fall through.
-    case kNotEqual:
-      __ j(not_equal, tlabel);
-      break;
-    case kSignedLessThan:
-      __ j(less, tlabel);
-      break;
-    case kSignedGreaterThanOrEqual:
-      __ j(greater_equal, tlabel);
-      break;
-    case kSignedLessThanOrEqual:
-      __ j(less_equal, tlabel);
-      break;
-    case kSignedGreaterThan:
-      __ j(greater, tlabel);
-      break;
-    case kUnsignedLessThan:
-      __ j(below, tlabel);
-      break;
-    case kUnsignedGreaterThanOrEqual:
-      __ j(above_equal, tlabel);
-      break;
-    case kUnsignedLessThanOrEqual:
-      __ j(below_equal, tlabel);
-      break;
-    case kUnsignedGreaterThan:
-      __ j(above, tlabel);
-      break;
-    case kOverflow:
-      __ j(overflow, tlabel);
-      break;
-    case kNotOverflow:
-      __ j(no_overflow, tlabel);
-      break;
-    default:
-      UNREACHABLE();
-      break;
+  if (branch->condition == kUnorderedEqual) {
+    __ j(parity_even, flabel, flabel_distance);
+  } else if (branch->condition == kUnorderedNotEqual) {
+    __ j(parity_even, tlabel);
   }
+  __ j(FlagsConditionToCondition(branch->condition), tlabel);
+
   // Add a jump if not falling through to the next block.
   if (!branch->fallthru) __ jmp(flabel);
 }
@@ -1675,6 +2035,73 @@
   if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
 }
 
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+                                     FlagsCondition condition) {
+  class OutOfLineTrap final : public OutOfLineCode {
+   public:
+    OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+        : OutOfLineCode(gen),
+          frame_elided_(frame_elided),
+          instr_(instr),
+          gen_(gen) {}
+
+    void Generate() final {
+      IA32OperandConverter i(gen_, instr_);
+
+      Builtins::Name trap_id =
+          static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
+      bool old_has_frame = __ has_frame();
+      if (frame_elided_) {
+        __ set_has_frame(true);
+        __ EnterFrame(StackFrame::WASM_COMPILED);
+      }
+      GenerateCallToTrap(trap_id);
+      if (frame_elided_) {
+        __ set_has_frame(old_has_frame);
+      }
+    }
+
+   private:
+    void GenerateCallToTrap(Builtins::Name trap_id) {
+      if (trap_id == Builtins::builtin_count) {
+        // We cannot test calls to the runtime in cctest/test-run-wasm.
+        // Therefore we emit a call to C here instead of a call to the runtime.
+        __ PrepareCallCFunction(0, esi);
+        __ CallCFunction(
+            ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+            0);
+        __ LeaveFrame(StackFrame::WASM_COMPILED);
+        __ Ret();
+      } else {
+        gen_->AssembleSourcePosition(instr_);
+        __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+                RelocInfo::CODE_TARGET);
+        ReferenceMap* reference_map =
+            new (gen_->zone()) ReferenceMap(gen_->zone());
+        gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+                              Safepoint::kNoLazyDeopt);
+        if (FLAG_debug_code) {
+          __ ud2();
+        }
+      }
+    }
+
+    bool frame_elided_;
+    Instruction* instr_;
+    CodeGenerator* gen_;
+  };
+  bool frame_elided = !frame_access_state()->has_frame();
+  auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+  Label* tlabel = ool->entry();
+  Label end;
+  if (condition == kUnorderedEqual) {
+    __ j(parity_even, &end);
+  } else if (condition == kUnorderedNotEqual) {
+    __ j(parity_even, tlabel);
+  }
+  __ j(FlagsConditionToCondition(condition), tlabel);
+  __ bind(&end);
+}
 
 // Assembles boolean materializations after an instruction.
 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -1687,58 +2114,17 @@
   Label check;
   DCHECK_NE(0u, instr->OutputCount());
   Register reg = i.OutputRegister(instr->OutputCount() - 1);
-  Condition cc = no_condition;
-  switch (condition) {
-    case kUnorderedEqual:
-      __ j(parity_odd, &check, Label::kNear);
-      __ Move(reg, Immediate(0));
-      __ jmp(&done, Label::kNear);
-    // Fall through.
-    case kEqual:
-      cc = equal;
-      break;
-    case kUnorderedNotEqual:
-      __ j(parity_odd, &check, Label::kNear);
-      __ mov(reg, Immediate(1));
-      __ jmp(&done, Label::kNear);
-    // Fall through.
-    case kNotEqual:
-      cc = not_equal;
-      break;
-    case kSignedLessThan:
-      cc = less;
-      break;
-    case kSignedGreaterThanOrEqual:
-      cc = greater_equal;
-      break;
-    case kSignedLessThanOrEqual:
-      cc = less_equal;
-      break;
-    case kSignedGreaterThan:
-      cc = greater;
-      break;
-    case kUnsignedLessThan:
-      cc = below;
-      break;
-    case kUnsignedGreaterThanOrEqual:
-      cc = above_equal;
-      break;
-    case kUnsignedLessThanOrEqual:
-      cc = below_equal;
-      break;
-    case kUnsignedGreaterThan:
-      cc = above;
-      break;
-    case kOverflow:
-      cc = overflow;
-      break;
-    case kNotOverflow:
-      cc = no_overflow;
-      break;
-    default:
-      UNREACHABLE();
-      break;
+  if (condition == kUnorderedEqual) {
+    __ j(parity_odd, &check, Label::kNear);
+    __ Move(reg, Immediate(0));
+    __ jmp(&done, Label::kNear);
+  } else if (condition == kUnorderedNotEqual) {
+    __ j(parity_odd, &check, Label::kNear);
+    __ mov(reg, Immediate(1));
+    __ jmp(&done, Label::kNear);
   }
+  Condition cc = FlagsConditionToCondition(condition);
+
   __ bind(&check);
   if (reg.is_byte_register()) {
     // setcc for byte registers (al, bl, cl, dl).
@@ -1783,13 +2169,16 @@
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, Deoptimizer::BailoutType bailout_type,
-    SourcePosition pos) {
+    int deoptimization_id, SourcePosition pos) {
+  DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
+  DeoptimizeReason deoptimization_reason =
+      GetDeoptimizationReason(deoptimization_id);
+  Deoptimizer::BailoutType bailout_type =
+      deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
+                                                   : Deoptimizer::EAGER;
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
-  DeoptimizeReason deoptimization_reason =
-      GetDeoptimizationReason(deoptimization_id);
   __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
@@ -2082,7 +2471,7 @@
       __ Move(dst, g.ToImmediate(source));
     } else if (src_constant.type() == Constant::kFloat32) {
       // TODO(turbofan): Can we do better here?
-      uint32_t src = bit_cast<uint32_t>(src_constant.ToFloat32());
+      uint32_t src = src_constant.ToFloat32AsInt();
       if (destination->IsFPRegister()) {
         XMMRegister dst = g.ToDoubleRegister(destination);
         __ Move(dst, src);
@@ -2093,7 +2482,7 @@
       }
     } else {
       DCHECK_EQ(Constant::kFloat64, src_constant.type());
-      uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
+      uint64_t src = src_constant.ToFloat64AsInt();
       uint32_t lower = static_cast<uint32_t>(src);
       uint32_t upper = static_cast<uint32_t>(src >> 32);
       if (destination->IsFPRegister()) {
diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc
index c827c68..a5f72c7 100644
--- a/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/src/compiler/ia32/instruction-selector-ia32.cc
@@ -234,6 +234,9 @@
       break;
     case MachineRepresentation::kWord64:   // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -324,6 +327,9 @@
         break;
       case MachineRepresentation::kWord64:   // Fall through.
       case MachineRepresentation::kSimd128:  // Fall through.
+      case MachineRepresentation::kSimd1x4:  // Fall through.
+      case MachineRepresentation::kSimd1x8:  // Fall through.
+      case MachineRepresentation::kSimd1x16:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -351,6 +357,11 @@
   }
 }
 
+void InstructionSelector::VisitProtectedStore(Node* node) {
+  // TODO(eholk)
+  UNIMPLEMENTED();
+}
+
 // Architecture supports unaligned access, therefore VisitLoad is used instead
 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
 
@@ -386,10 +397,37 @@
     case MachineRepresentation::kTagged:         // Fall through.
     case MachineRepresentation::kWord64:         // Fall through.
     case MachineRepresentation::kSimd128:        // Fall through.
+    case MachineRepresentation::kSimd1x4:        // Fall through.
+    case MachineRepresentation::kSimd1x8:        // Fall through.
+    case MachineRepresentation::kSimd1x16:       // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
   }
+  if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
+    Int32BinopMatcher moffset(offset);
+    InstructionOperand buffer_operand = g.CanBeImmediate(buffer)
+                                            ? g.UseImmediate(buffer)
+                                            : g.UseRegister(buffer);
+    Int32Matcher mlength(length);
+    if (mlength.HasValue() && moffset.right().HasValue() &&
+        moffset.right().Value() >= 0 &&
+        mlength.Value() >= moffset.right().Value()) {
+      Emit(opcode, g.DefineAsRegister(node),
+           g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
+           g.UseRegister(moffset.left().node()), buffer_operand);
+      return;
+    }
+    IntMatcher<int32_t, IrOpcode::kRelocatableInt32Constant> mmlength(length);
+    if (mmlength.HasValue() && moffset.right().HasValue() &&
+        moffset.right().Value() >= 0 &&
+        mmlength.Value() >= moffset.right().Value()) {
+      Emit(opcode, g.DefineAsRegister(node),
+           g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
+           g.UseRegister(moffset.left().node()), buffer_operand);
+      return;
+    }
+  }
   InstructionOperand offset_operand = g.UseRegister(offset);
   InstructionOperand length_operand =
       g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
@@ -435,6 +473,9 @@
     case MachineRepresentation::kTagged:         // Fall through.
     case MachineRepresentation::kWord64:         // Fall through.
     case MachineRepresentation::kSimd128:        // Fall through.
+    case MachineRepresentation::kSimd1x4:        // Fall through.
+    case MachineRepresentation::kSimd1x8:        // Fall through.
+    case MachineRepresentation::kSimd1x16:       // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -445,6 +486,30 @@
                                   rep == MachineRepresentation::kBit)
                                      ? g.UseByteRegister(value)
                                      : g.UseRegister(value));
+  if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
+    Int32BinopMatcher moffset(offset);
+    InstructionOperand buffer_operand = g.CanBeImmediate(buffer)
+                                            ? g.UseImmediate(buffer)
+                                            : g.UseRegister(buffer);
+    Int32Matcher mlength(length);
+    if (mlength.HasValue() && moffset.right().HasValue() &&
+        moffset.right().Value() >= 0 &&
+        mlength.Value() >= moffset.right().Value()) {
+      Emit(opcode, g.NoOutput(), g.UseImmediate(moffset.right().node()),
+           g.UseImmediate(length), value_operand,
+           g.UseRegister(moffset.left().node()), buffer_operand);
+      return;
+    }
+    IntMatcher<int32_t, IrOpcode::kRelocatableInt32Constant> mmlength(length);
+    if (mmlength.HasValue() && moffset.right().HasValue() &&
+        moffset.right().Value() >= 0 &&
+        mmlength.Value() >= moffset.right().Value()) {
+      Emit(opcode, g.NoOutput(), g.UseImmediate(moffset.right().node()),
+           g.UseImmediate(length), value_operand,
+           g.UseRegister(moffset.left().node()), buffer_operand);
+      return;
+    }
+  }
   InstructionOperand offset_operand = g.UseRegister(offset);
   InstructionOperand length_operand =
       g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
@@ -515,7 +580,7 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->reason(), cont->frame_state());
+                             cont->kind(), cont->reason(), cont->frame_state());
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -766,18 +831,83 @@
   VisitShift(this, node, kIA32Ror);
 }
 
+#define RO_OP_LIST(V)                                     \
+  V(Word32Clz, kIA32Lzcnt)                                \
+  V(Word32Ctz, kIA32Tzcnt)                                \
+  V(Word32Popcnt, kIA32Popcnt)                            \
+  V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64)         \
+  V(RoundInt32ToFloat32, kSSEInt32ToFloat32)              \
+  V(ChangeInt32ToFloat64, kSSEInt32ToFloat64)             \
+  V(ChangeUint32ToFloat64, kSSEUint32ToFloat64)           \
+  V(TruncateFloat32ToInt32, kSSEFloat32ToInt32)           \
+  V(TruncateFloat32ToUint32, kSSEFloat32ToUint32)         \
+  V(ChangeFloat64ToInt32, kSSEFloat64ToInt32)             \
+  V(ChangeFloat64ToUint32, kSSEFloat64ToUint32)           \
+  V(TruncateFloat64ToUint32, kSSEFloat64ToUint32)         \
+  V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32)       \
+  V(RoundFloat64ToInt32, kSSEFloat64ToInt32)              \
+  V(BitcastFloat32ToInt32, kIA32BitcastFI)                \
+  V(BitcastInt32ToFloat32, kIA32BitcastIF)                \
+  V(Float32Sqrt, kSSEFloat32Sqrt)                         \
+  V(Float64Sqrt, kSSEFloat64Sqrt)                         \
+  V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32) \
+  V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32)
 
-void InstructionSelector::VisitWord32Clz(Node* node) {
-  IA32OperandGenerator g(this);
-  Emit(kIA32Lzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
+#define RR_OP_LIST(V)                                                         \
+  V(TruncateFloat64ToWord32, kArchTruncateDoubleToI)                          \
+  V(Float32RoundDown, kSSEFloat32Round | MiscField::encode(kRoundDown))       \
+  V(Float64RoundDown, kSSEFloat64Round | MiscField::encode(kRoundDown))       \
+  V(Float32RoundUp, kSSEFloat32Round | MiscField::encode(kRoundUp))           \
+  V(Float64RoundUp, kSSEFloat64Round | MiscField::encode(kRoundUp))           \
+  V(Float32RoundTruncate, kSSEFloat32Round | MiscField::encode(kRoundToZero)) \
+  V(Float64RoundTruncate, kSSEFloat64Round | MiscField::encode(kRoundToZero)) \
+  V(Float32RoundTiesEven,                                                     \
+    kSSEFloat32Round | MiscField::encode(kRoundToNearest))                    \
+  V(Float64RoundTiesEven, kSSEFloat64Round | MiscField::encode(kRoundToNearest))
 
+#define RRO_FLOAT_OP_LIST(V)                    \
+  V(Float32Add, kAVXFloat32Add, kSSEFloat32Add) \
+  V(Float64Add, kAVXFloat64Add, kSSEFloat64Add) \
+  V(Float32Sub, kAVXFloat32Sub, kSSEFloat32Sub) \
+  V(Float64Sub, kAVXFloat64Sub, kSSEFloat64Sub) \
+  V(Float32Mul, kAVXFloat32Mul, kSSEFloat32Mul) \
+  V(Float64Mul, kAVXFloat64Mul, kSSEFloat64Mul) \
+  V(Float32Div, kAVXFloat32Div, kSSEFloat32Div) \
+  V(Float64Div, kAVXFloat64Div, kSSEFloat64Div)
 
-void InstructionSelector::VisitWord32Ctz(Node* node) {
-  IA32OperandGenerator g(this);
-  Emit(kIA32Tzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
+#define FLOAT_UNOP_LIST(V)                      \
+  V(Float32Abs, kAVXFloat32Abs, kSSEFloat32Abs) \
+  V(Float64Abs, kAVXFloat64Abs, kSSEFloat64Abs) \
+  V(Float32Neg, kAVXFloat32Neg, kSSEFloat32Neg) \
+  V(Float64Neg, kAVXFloat64Neg, kSSEFloat64Neg)
 
+#define RO_VISITOR(Name, opcode)                      \
+  void InstructionSelector::Visit##Name(Node* node) { \
+    VisitRO(this, node, opcode);                      \
+  }
+RO_OP_LIST(RO_VISITOR)
+#undef RO_VISITOR
+
+#define RR_VISITOR(Name, opcode)                      \
+  void InstructionSelector::Visit##Name(Node* node) { \
+    VisitRR(this, node, opcode);                      \
+  }
+RR_OP_LIST(RR_VISITOR)
+#undef RR_VISITOR
+
+#define RRO_FLOAT_VISITOR(Name, avx, sse)             \
+  void InstructionSelector::Visit##Name(Node* node) { \
+    VisitRROFloat(this, node, avx, sse);              \
+  }
+RRO_FLOAT_OP_LIST(RRO_FLOAT_VISITOR)
+#undef RRO_FLOAT_VISITOR
+
+#define FLOAT_UNOP_VISITOR(Name, avx, sse)                  \
+  void InstructionSelector::Visit##Name(Node* node) {       \
+    VisitFloatUnop(this, node, node->InputAt(0), avx, sse); \
+  }
+FLOAT_UNOP_LIST(FLOAT_UNOP_VISITOR)
+#undef FLOAT_UNOP_VISITOR
 
 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
 
@@ -785,12 +915,6 @@
 
 void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
 
-void InstructionSelector::VisitWord32Popcnt(Node* node) {
-  IA32OperandGenerator g(this);
-  Emit(kIA32Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
 void InstructionSelector::VisitInt32Add(Node* node) {
   IA32OperandGenerator g(this);
 
@@ -885,16 +1009,6 @@
 }
 
 
-void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
-  VisitRO(this, node, kSSEFloat32ToFloat64);
-}
-
-
-void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
-  VisitRO(this, node, kSSEInt32ToFloat32);
-}
-
-
 void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
   IA32OperandGenerator g(this);
   InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
@@ -902,103 +1016,6 @@
        arraysize(temps), temps);
 }
 
-
-void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
-  VisitRO(this, node, kSSEInt32ToFloat64);
-}
-
-
-void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
-  VisitRO(this, node, kSSEUint32ToFloat64);
-}
-
-
-void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
-  VisitRO(this, node, kSSEFloat32ToInt32);
-}
-
-
-void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
-  VisitRO(this, node, kSSEFloat32ToUint32);
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
-  VisitRO(this, node, kSSEFloat64ToInt32);
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
-  VisitRO(this, node, kSSEFloat64ToUint32);
-}
-
-void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
-  VisitRO(this, node, kSSEFloat64ToUint32);
-}
-
-void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
-  VisitRO(this, node, kSSEFloat64ToFloat32);
-}
-
-void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
-  VisitRR(this, node, kArchTruncateDoubleToI);
-}
-
-void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
-  VisitRO(this, node, kSSEFloat64ToInt32);
-}
-
-
-void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
-  IA32OperandGenerator g(this);
-  Emit(kIA32BitcastFI, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
-  IA32OperandGenerator g(this);
-  Emit(kIA32BitcastIF, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat32Add(Node* node) {
-  VisitRROFloat(this, node, kAVXFloat32Add, kSSEFloat32Add);
-}
-
-
-void InstructionSelector::VisitFloat64Add(Node* node) {
-  VisitRROFloat(this, node, kAVXFloat64Add, kSSEFloat64Add);
-}
-
-
-void InstructionSelector::VisitFloat32Sub(Node* node) {
-  VisitRROFloat(this, node, kAVXFloat32Sub, kSSEFloat32Sub);
-}
-
-void InstructionSelector::VisitFloat64Sub(Node* node) {
-  VisitRROFloat(this, node, kAVXFloat64Sub, kSSEFloat64Sub);
-}
-
-void InstructionSelector::VisitFloat32Mul(Node* node) {
-  VisitRROFloat(this, node, kAVXFloat32Mul, kSSEFloat32Mul);
-}
-
-
-void InstructionSelector::VisitFloat64Mul(Node* node) {
-  VisitRROFloat(this, node, kAVXFloat64Mul, kSSEFloat64Mul);
-}
-
-
-void InstructionSelector::VisitFloat32Div(Node* node) {
-  VisitRROFloat(this, node, kAVXFloat32Div, kSSEFloat32Div);
-}
-
-
-void InstructionSelector::VisitFloat64Div(Node* node) {
-  VisitRROFloat(this, node, kAVXFloat64Div, kSSEFloat64Div);
-}
-
-
 void InstructionSelector::VisitFloat64Mod(Node* node) {
   IA32OperandGenerator g(this);
   InstructionOperand temps[] = {g.TempRegister(eax)};
@@ -1039,80 +1056,10 @@
        arraysize(temps), temps);
 }
 
-
-void InstructionSelector::VisitFloat32Abs(Node* node) {
-  IA32OperandGenerator g(this);
-  VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs);
-}
-
-
-void InstructionSelector::VisitFloat64Abs(Node* node) {
-  IA32OperandGenerator g(this);
-  VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
-}
-
-void InstructionSelector::VisitFloat32Sqrt(Node* node) {
-  VisitRO(this, node, kSSEFloat32Sqrt);
-}
-
-
-void InstructionSelector::VisitFloat64Sqrt(Node* node) {
-  VisitRO(this, node, kSSEFloat64Sqrt);
-}
-
-
-void InstructionSelector::VisitFloat32RoundDown(Node* node) {
-  VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundDown));
-}
-
-
-void InstructionSelector::VisitFloat64RoundDown(Node* node) {
-  VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundDown));
-}
-
-
-void InstructionSelector::VisitFloat32RoundUp(Node* node) {
-  VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundUp));
-}
-
-
-void InstructionSelector::VisitFloat64RoundUp(Node* node) {
-  VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundUp));
-}
-
-
-void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
-  VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToZero));
-}
-
-
-void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
-  VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToZero));
-}
-
-
 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
   UNREACHABLE();
 }
 
-
-void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
-  VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToNearest));
-}
-
-
-void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
-  VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToNearest));
-}
-
-void InstructionSelector::VisitFloat32Neg(Node* node) {
-  VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Neg, kSSEFloat32Neg);
-}
-
-void InstructionSelector::VisitFloat64Neg(Node* node) {
-  VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Neg, kSSEFloat64Neg);
-}
-
 void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
                                                    InstructionCode opcode) {
   IA32OperandGenerator g(this);
@@ -1154,22 +1101,35 @@
     }
   } else {
     // Push any stack arguments.
+    int effect_level = GetEffectLevel(node);
     for (PushParameter input : base::Reversed(*arguments)) {
       // Skip any alignment holes in pushed nodes.
+      Node* input_node = input.node();
       if (input.node() == nullptr) continue;
-      InstructionOperand value =
-          g.CanBeImmediate(input.node())
-              ? g.UseImmediate(input.node())
-              : IsSupported(ATOM) ||
-                        sequence()->IsFP(GetVirtualRegister(input.node()))
-                    ? g.UseRegister(input.node())
-                    : g.Use(input.node());
-      if (input.type() == MachineType::Float32()) {
-        Emit(kIA32PushFloat32, g.NoOutput(), value);
-      } else if (input.type() == MachineType::Float64()) {
-        Emit(kIA32PushFloat64, g.NoOutput(), value);
+      if (g.CanBeMemoryOperand(kIA32Push, node, input_node, effect_level)) {
+        InstructionOperand outputs[1];
+        InstructionOperand inputs[4];
+        size_t input_count = 0;
+        InstructionCode opcode = kIA32Push;
+        AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
+            input_node, inputs, &input_count);
+        opcode |= AddressingModeField::encode(mode);
+        Emit(opcode, 0, outputs, input_count, inputs);
       } else {
-        Emit(kIA32Push, g.NoOutput(), value);
+        InstructionOperand value =
+            g.CanBeImmediate(input.node())
+                ? g.UseImmediate(input.node())
+                : IsSupported(ATOM) ||
+                          sequence()->IsFP(GetVirtualRegister(input.node()))
+                      ? g.UseRegister(input.node())
+                      : g.Use(input.node());
+        if (input.type() == MachineType::Float32()) {
+          Emit(kIA32PushFloat32, g.NoOutput(), value);
+        } else if (input.type() == MachineType::Float64()) {
+          Emit(kIA32PushFloat64, g.NoOutput(), value);
+        } else {
+          Emit(kIA32Push, g.NoOutput(), value);
+        }
       }
     }
   }
@@ -1202,11 +1162,14 @@
     selector->Emit(opcode, 0, nullptr, input_count, inputs);
   } else if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
-                             cont->reason(), cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     InstructionOperand output = g.DefineAsRegister(cont->result());
     selector->Emit(opcode, 1, &output, input_count, inputs);
+  } else {
+    DCHECK(cont->IsTrap());
+    inputs[input_count++] = g.UseImmediate(cont->trap_id());
+    selector->Emit(opcode, 0, nullptr, input_count, inputs);
   }
 }
 
@@ -1220,11 +1183,14 @@
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
-    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
-                             cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
+                             cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(opcode, g.DefineAsByteRegister(cont->result()), left, right);
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(opcode, g.NoOutput(), left, right,
+                   g.UseImmediate(cont->trap_id()));
   }
 }
 
@@ -1240,21 +1206,54 @@
   VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
 }
 
+MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
+  if (hint_node->opcode() == IrOpcode::kLoad) {
+    MachineType hint = LoadRepresentationOf(hint_node->op());
+    if (node->opcode() == IrOpcode::kInt32Constant ||
+        node->opcode() == IrOpcode::kInt64Constant) {
+      int64_t constant = node->opcode() == IrOpcode::kInt32Constant
+                             ? OpParameter<int32_t>(node)
+                             : OpParameter<int64_t>(node);
+      if (hint == MachineType::Int8()) {
+        if (constant >= std::numeric_limits<int8_t>::min() &&
+            constant <= std::numeric_limits<int8_t>::max()) {
+          return hint;
+        }
+      } else if (hint == MachineType::Uint8()) {
+        if (constant >= std::numeric_limits<uint8_t>::min() &&
+            constant <= std::numeric_limits<uint8_t>::max()) {
+          return hint;
+        }
+      } else if (hint == MachineType::Int16()) {
+        if (constant >= std::numeric_limits<int16_t>::min() &&
+            constant <= std::numeric_limits<int16_t>::max()) {
+          return hint;
+        }
+      } else if (hint == MachineType::Uint16()) {
+        if (constant >= std::numeric_limits<uint16_t>::min() &&
+            constant <= std::numeric_limits<uint16_t>::max()) {
+          return hint;
+        }
+      } else if (hint == MachineType::Int32()) {
+        return hint;
+      } else if (hint == MachineType::Uint32()) {
+        if (constant >= 0) return hint;
+      }
+    }
+  }
+  return node->opcode() == IrOpcode::kLoad ? LoadRepresentationOf(node->op())
+                                           : MachineType::None();
+}
+
 // Tries to match the size of the given opcode to that of the operands, if
 // possible.
 InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
                                     Node* right, FlagsContinuation* cont) {
-  // Currently, if one of the two operands is not a Load, we don't know what its
-  // machine representation is, so we bail out.
-  // TODO(epertoso): we can probably get some size information out of immediates
-  // and phi nodes.
-  if (left->opcode() != IrOpcode::kLoad || right->opcode() != IrOpcode::kLoad) {
-    return opcode;
-  }
+  // TODO(epertoso): we can probably get some size information out of phi nodes.
   // If the load representations don't match, both operands will be
   // zero/sign-extended to 32bit.
-  MachineType left_type = LoadRepresentationOf(left->op());
-  MachineType right_type = LoadRepresentationOf(right->op());
+  MachineType left_type = MachineTypeForNarrow(left, right);
+  MachineType right_type = MachineTypeForNarrow(right, left);
   if (left_type == right_type) {
     switch (left_type.representation()) {
       case MachineRepresentation::kBit:
@@ -1332,10 +1331,8 @@
 
   // Match immediates on right side of comparison.
   if (g.CanBeImmediate(right)) {
-    if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
-      // TODO(epertoso): we should use `narrowed_opcode' here once we match
-      // immediates too.
-      return VisitCompareWithMemoryOperand(selector, opcode, left,
+    if (g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level)) {
+      return VisitCompareWithMemoryOperand(selector, narrowed_opcode, left,
                                            g.UseImmediate(right), cont);
     }
     return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
@@ -1352,11 +1349,6 @@
         cont);
   }
 
-  if (g.CanBeBetterLeftOperand(right)) {
-    if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
-    std::swap(left, right);
-  }
-
   return VisitCompare(selector, opcode, left, right, cont,
                       node->op()->HasProperty(Operator::kCommutative));
 }
@@ -1377,8 +1369,8 @@
         selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
                        g.Label(cont->false_block()));
       } else if (cont->IsDeoptimize()) {
-        selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->reason(),
-                                 cont->frame_state());
+        selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->kind(),
+                                 cont->reason(), cont->frame_state());
       } else {
         DCHECK(cont->IsSet());
         selector->Emit(opcode, g.DefineAsRegister(cont->result()));
@@ -1490,14 +1482,29 @@
 }
 
 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kNotEqual, p.kind(), p.reason(), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kEqual, p.kind(), p.reason(), node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+                                          Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
@@ -1633,19 +1640,6 @@
 }
 
 
-void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
-  IA32OperandGenerator g(this);
-  Emit(kSSEFloat64ExtractLowWord32, g.DefineAsRegister(node),
-       g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
-  IA32OperandGenerator g(this);
-  Emit(kSSEFloat64ExtractHighWord32, g.DefineAsRegister(node),
-       g.Use(node->InputAt(0)));
-}
-
 
 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
   IA32OperandGenerator g(this);
diff --git a/src/compiler/instruction-codes.h b/src/compiler/instruction-codes.h
index 6242e98..360069c 100644
--- a/src/compiler/instruction-codes.h
+++ b/src/compiler/instruction-codes.h
@@ -152,7 +152,8 @@
   kFlags_none = 0,
   kFlags_branch = 1,
   kFlags_deoptimize = 2,
-  kFlags_set = 3
+  kFlags_set = 3,
+  kFlags_trap = 4
 };
 
 V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
@@ -205,11 +206,11 @@
 // for code generation. We encode the instruction, addressing mode, and flags
 // continuation into a single InstructionCode which is stored as part of
 // the instruction.
-typedef BitField<ArchOpcode, 0, 8> ArchOpcodeField;
-typedef BitField<AddressingMode, 8, 5> AddressingModeField;
-typedef BitField<FlagsMode, 13, 2> FlagsModeField;
-typedef BitField<FlagsCondition, 15, 5> FlagsConditionField;
-typedef BitField<int, 20, 12> MiscField;
+typedef BitField<ArchOpcode, 0, 9> ArchOpcodeField;
+typedef BitField<AddressingMode, 9, 5> AddressingModeField;
+typedef BitField<FlagsMode, 14, 3> FlagsModeField;
+typedef BitField<FlagsCondition, 17, 5> FlagsConditionField;
+typedef BitField<int, 22, 10> MiscField;
 
 }  // namespace compiler
 }  // namespace internal
diff --git a/src/compiler/instruction-selector-impl.h b/src/compiler/instruction-selector-impl.h
index 6cb87ea..ecda453 100644
--- a/src/compiler/instruction-selector-impl.h
+++ b/src/compiler/instruction-selector-impl.h
@@ -5,8 +5,8 @@
 #ifndef V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
 #define V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
 
-#include "src/compiler/instruction.h"
 #include "src/compiler/instruction-selector.h"
+#include "src/compiler/instruction.h"
 #include "src/compiler/linkage.h"
 #include "src/compiler/schedule.h"
 #include "src/macro-assembler.h"
@@ -182,6 +182,21 @@
                               sequence()->NextVirtualRegister());
   }
 
+  int AllocateVirtualRegister() { return sequence()->NextVirtualRegister(); }
+
+  InstructionOperand DefineSameAsFirstForVreg(int vreg) {
+    return UnallocatedOperand(UnallocatedOperand::SAME_AS_FIRST_INPUT, vreg);
+  }
+
+  InstructionOperand DefineAsRegistertForVreg(int vreg) {
+    return UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER, vreg);
+  }
+
+  InstructionOperand UseRegisterForVreg(int vreg) {
+    return UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+                              UnallocatedOperand::USED_AT_START, vreg);
+  }
+
   InstructionOperand TempDoubleRegister() {
     UnallocatedOperand op = UnallocatedOperand(
         UnallocatedOperand::MUST_HAVE_REGISTER,
@@ -335,9 +350,10 @@
 
   // Creates a new flags continuation for an eager deoptimization exit.
   static FlagsContinuation ForDeoptimize(FlagsCondition condition,
+                                         DeoptimizeKind kind,
                                          DeoptimizeReason reason,
                                          Node* frame_state) {
-    return FlagsContinuation(condition, reason, frame_state);
+    return FlagsContinuation(condition, kind, reason, frame_state);
   }
 
   // Creates a new flags continuation for a boolean value.
@@ -345,14 +361,25 @@
     return FlagsContinuation(condition, result);
   }
 
+  // Creates a new flags continuation for a wasm trap.
+  static FlagsContinuation ForTrap(FlagsCondition condition,
+                                   Runtime::FunctionId trap_id, Node* result) {
+    return FlagsContinuation(condition, trap_id, result);
+  }
+
   bool IsNone() const { return mode_ == kFlags_none; }
   bool IsBranch() const { return mode_ == kFlags_branch; }
   bool IsDeoptimize() const { return mode_ == kFlags_deoptimize; }
   bool IsSet() const { return mode_ == kFlags_set; }
+  bool IsTrap() const { return mode_ == kFlags_trap; }
   FlagsCondition condition() const {
     DCHECK(!IsNone());
     return condition_;
   }
+  DeoptimizeKind kind() const {
+    DCHECK(IsDeoptimize());
+    return kind_;
+  }
   DeoptimizeReason reason() const {
     DCHECK(IsDeoptimize());
     return reason_;
@@ -365,6 +392,10 @@
     DCHECK(IsSet());
     return frame_state_or_result_;
   }
+  Runtime::FunctionId trap_id() const {
+    DCHECK(IsTrap());
+    return trap_id_;
+  }
   BasicBlock* true_block() const {
     DCHECK(IsBranch());
     return true_block_;
@@ -422,10 +453,11 @@
   }
 
  private:
-  FlagsContinuation(FlagsCondition condition, DeoptimizeReason reason,
-                    Node* frame_state)
+  FlagsContinuation(FlagsCondition condition, DeoptimizeKind kind,
+                    DeoptimizeReason reason, Node* frame_state)
       : mode_(kFlags_deoptimize),
         condition_(condition),
+        kind_(kind),
         reason_(reason),
         frame_state_or_result_(frame_state) {
     DCHECK_NOT_NULL(frame_state);
@@ -437,13 +469,24 @@
     DCHECK_NOT_NULL(result);
   }
 
+  FlagsContinuation(FlagsCondition condition, Runtime::FunctionId trap_id,
+                    Node* result)
+      : mode_(kFlags_trap),
+        condition_(condition),
+        frame_state_or_result_(result),
+        trap_id_(trap_id) {
+    DCHECK_NOT_NULL(result);
+  }
+
   FlagsMode const mode_;
   FlagsCondition condition_;
-  DeoptimizeReason reason_;      // Only value if mode_ == kFlags_deoptimize
+  DeoptimizeKind kind_;          // Only valid if mode_ == kFlags_deoptimize
+  DeoptimizeReason reason_;      // Only valid if mode_ == kFlags_deoptimize
   Node* frame_state_or_result_;  // Only valid if mode_ == kFlags_deoptimize
                                  // or mode_ == kFlags_set.
   BasicBlock* true_block_;       // Only valid if mode_ == kFlags_branch.
   BasicBlock* false_block_;      // Only valid if mode_ == kFlags_branch.
+  Runtime::FunctionId trap_id_;  // Only valid if mode_ == kFlags_trap.
 };
 
 }  // namespace compiler
diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc
index 8f899f3..57b6028 100644
--- a/src/compiler/instruction-selector.cc
+++ b/src/compiler/instruction-selector.cc
@@ -14,6 +14,7 @@
 #include "src/compiler/schedule.h"
 #include "src/compiler/state-values-utils.h"
 #include "src/deoptimizer.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -127,7 +128,6 @@
   }
 }
 
-
 Instruction* InstructionSelector::Emit(InstructionCode opcode,
                                        InstructionOperand output,
                                        size_t temp_count,
@@ -414,13 +414,10 @@
   sequence()->MarkAsRepresentation(rep, GetVirtualRegister(node));
 }
 
-
 namespace {
 
-enum class FrameStateInputKind { kAny, kStackSlot };
-
-InstructionOperand OperandForDeopt(OperandGenerator* g, Node* input,
-                                   FrameStateInputKind kind,
+InstructionOperand OperandForDeopt(Isolate* isolate, OperandGenerator* g,
+                                   Node* input, FrameStateInputKind kind,
                                    MachineRepresentation rep) {
   if (rep == MachineRepresentation::kNone) {
     return g->TempImmediate(FrameStateDescriptor::kImpossibleValue);
@@ -432,8 +429,31 @@
     case IrOpcode::kNumberConstant:
     case IrOpcode::kFloat32Constant:
     case IrOpcode::kFloat64Constant:
-    case IrOpcode::kHeapConstant:
       return g->UseImmediate(input);
+    case IrOpcode::kHeapConstant: {
+      if (!CanBeTaggedPointer(rep)) {
+        // If we have inconsistent static and dynamic types, e.g. if we
+        // smi-check a string, we can get here with a heap object that
+        // says it is a smi. In that case, we return an invalid instruction
+        // operand, which will be interpreted as an optimized-out value.
+
+        // TODO(jarin) Ideally, we should turn the current instruction
+        // into an abort (we should never execute it).
+        return InstructionOperand();
+      }
+
+      Handle<HeapObject> constant = OpParameter<Handle<HeapObject>>(input);
+      Heap::RootListIndex root_index;
+      if (isolate->heap()->IsRootHandle(constant, &root_index) &&
+          root_index == Heap::kOptimizedOutRootIndex) {
+        // For an optimized-out object we return an invalid instruction
+        // operand, so that we take the fast path for optimized-out values.
+        return InstructionOperand();
+      }
+
+      return g->UseImmediate(input);
+    }
+    case IrOpcode::kArgumentsObjectState:
     case IrOpcode::kObjectState:
     case IrOpcode::kTypedObjectState:
       UNREACHABLE();
@@ -452,6 +472,7 @@
   return InstructionOperand();
 }
 
+}  // namespace
 
 class StateObjectDeduplicator {
  public:
@@ -477,15 +498,21 @@
   ZoneVector<Node*> objects_;
 };
 
-
 // Returns the number of instruction operands added to inputs.
-size_t AddOperandToStateValueDescriptor(StateValueDescriptor* descriptor,
-                                        InstructionOperandVector* inputs,
-                                        OperandGenerator* g,
-                                        StateObjectDeduplicator* deduplicator,
-                                        Node* input, MachineType type,
-                                        FrameStateInputKind kind, Zone* zone) {
+size_t InstructionSelector::AddOperandToStateValueDescriptor(
+    StateValueList* values, InstructionOperandVector* inputs,
+    OperandGenerator* g, StateObjectDeduplicator* deduplicator, Node* input,
+    MachineType type, FrameStateInputKind kind, Zone* zone) {
+  if (input == nullptr) {
+    values->PushOptimizedOut();
+    return 0;
+  }
+
   switch (input->opcode()) {
+    case IrOpcode::kArgumentsObjectState: {
+      values->PushArguments();
+      return 0;
+    }
     case IrOpcode::kObjectState: {
       UNREACHABLE();
       return 0;
@@ -495,41 +522,45 @@
       if (id == StateObjectDeduplicator::kNotDuplicated) {
         size_t entries = 0;
         id = deduplicator->InsertObject(input);
-        descriptor->fields().push_back(
-            StateValueDescriptor::Recursive(zone, id));
-        StateValueDescriptor* new_desc = &descriptor->fields().back();
+        StateValueList* nested = values->PushRecursiveField(zone, id);
         int const input_count = input->op()->ValueInputCount();
         ZoneVector<MachineType> const* types = MachineTypesOf(input->op());
         for (int i = 0; i < input_count; ++i) {
           entries += AddOperandToStateValueDescriptor(
-              new_desc, inputs, g, deduplicator, input->InputAt(i),
-              types->at(i), kind, zone);
+              nested, inputs, g, deduplicator, input->InputAt(i), types->at(i),
+              kind, zone);
         }
         return entries;
       } else {
         // Crankshaft counts duplicate objects for the running id, so we have
         // to push the input again.
         deduplicator->InsertObject(input);
-        descriptor->fields().push_back(
-            StateValueDescriptor::Duplicate(zone, id));
+        values->PushDuplicate(id);
         return 0;
       }
     }
     default: {
-      inputs->push_back(OperandForDeopt(g, input, kind, type.representation()));
-      descriptor->fields().push_back(StateValueDescriptor::Plain(zone, type));
-      return 1;
+      InstructionOperand op =
+          OperandForDeopt(isolate(), g, input, kind, type.representation());
+      if (op.kind() == InstructionOperand::INVALID) {
+        // Invalid operand means the value is impossible or optimized-out.
+        values->PushOptimizedOut();
+        return 0;
+      } else {
+        inputs->push_back(op);
+        values->PushPlain(type);
+        return 1;
+      }
     }
   }
 }
 
 
 // Returns the number of instruction operands added to inputs.
-size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor,
-                                       Node* state, OperandGenerator* g,
-                                       StateObjectDeduplicator* deduplicator,
-                                       InstructionOperandVector* inputs,
-                                       FrameStateInputKind kind, Zone* zone) {
+size_t InstructionSelector::AddInputsToFrameStateDescriptor(
+    FrameStateDescriptor* descriptor, Node* state, OperandGenerator* g,
+    StateObjectDeduplicator* deduplicator, InstructionOperandVector* inputs,
+    FrameStateInputKind kind, Zone* zone) {
   DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
 
   size_t entries = 0;
@@ -553,8 +584,12 @@
   DCHECK_EQ(descriptor->locals_count(), StateValuesAccess(locals).size());
   DCHECK_EQ(descriptor->stack_count(), StateValuesAccess(stack).size());
 
-  StateValueDescriptor* values_descriptor =
-      descriptor->GetStateValueDescriptor();
+  StateValueList* values_descriptor = descriptor->GetStateValueDescriptors();
+
+  DCHECK_EQ(values_descriptor->size(), 0u);
+  values_descriptor->ReserveSize(
+      descriptor->GetSize(OutputFrameStateCombine::Ignore()));
+
   entries += AddOperandToStateValueDescriptor(
       values_descriptor, inputs, g, deduplicator, function,
       MachineType::AnyTagged(), FrameStateInputKind::kStackSlot, zone);
@@ -583,8 +618,6 @@
   return entries;
 }
 
-}  // namespace
-
 
 // An internal helper class for generating the operands to calls.
 // TODO(bmeurer): Get rid of the CallBuffer business and make
@@ -733,7 +766,8 @@
     }
 
     int const state_id = sequence()->AddDeoptimizationEntry(
-        buffer->frame_state_descriptor, DeoptimizeReason::kNoReason);
+        buffer->frame_state_descriptor, DeoptimizeKind::kEager,
+        DeoptimizeReason::kNoReason);
     buffer->instruction_args.push_back(g.TempImmediate(state_id));
 
     StateObjectDeduplicator deduplicator(instruction_zone());
@@ -796,20 +830,33 @@
   }
 }
 
+bool InstructionSelector::IsSourcePositionUsed(Node* node) {
+  return (source_position_mode_ == kAllSourcePositions ||
+          node->opcode() == IrOpcode::kCall ||
+          node->opcode() == IrOpcode::kTrapIf ||
+          node->opcode() == IrOpcode::kTrapUnless);
+}
+
 void InstructionSelector::VisitBlock(BasicBlock* block) {
   DCHECK(!current_block_);
   current_block_ = block;
-  int current_block_end = static_cast<int>(instructions_.size());
+  auto current_num_instructions = [&] {
+    DCHECK_GE(kMaxInt, instructions_.size());
+    return static_cast<int>(instructions_.size());
+  };
+  int current_block_end = current_num_instructions();
 
   int effect_level = 0;
   for (Node* const node : *block) {
+    SetEffectLevel(node, effect_level);
     if (node->opcode() == IrOpcode::kStore ||
         node->opcode() == IrOpcode::kUnalignedStore ||
         node->opcode() == IrOpcode::kCheckedStore ||
-        node->opcode() == IrOpcode::kCall) {
+        node->opcode() == IrOpcode::kCall ||
+        node->opcode() == IrOpcode::kProtectedLoad ||
+        node->opcode() == IrOpcode::kProtectedStore) {
       ++effect_level;
     }
-    SetEffectLevel(node, effect_level);
   }
 
   // We visit the control first, then the nodes in the block, so the block's
@@ -818,10 +865,25 @@
     SetEffectLevel(block->control_input(), effect_level);
   }
 
+  auto FinishEmittedInstructions = [&](Node* node, int instruction_start) {
+    if (instruction_selection_failed()) return false;
+    if (current_num_instructions() == instruction_start) return true;
+    std::reverse(instructions_.begin() + instruction_start,
+                 instructions_.end());
+    if (!node) return true;
+    SourcePosition source_position = source_positions_->GetSourcePosition(node);
+    if (source_position.IsKnown() && IsSourcePositionUsed(node)) {
+      sequence()->SetSourcePosition(instructions_[instruction_start],
+                                    source_position);
+    }
+    return true;
+  };
+
   // Generate code for the block control "top down", but schedule the code
   // "bottom up".
   VisitControl(block);
-  std::reverse(instructions_.begin() + current_block_end, instructions_.end());
+  if (!FinishEmittedInstructions(block->control_input(), current_block_end))
+    return;
 
   // Visit code in reverse control flow order, because architecture-specific
   // matching may cover more than one node at a time.
@@ -830,19 +892,9 @@
     if (!IsUsed(node) || IsDefined(node)) continue;
     // Generate code for this node "top down", but schedule the code "bottom
     // up".
-    size_t current_node_end = instructions_.size();
+    int current_node_end = current_num_instructions();
     VisitNode(node);
-    if (instruction_selection_failed()) return;
-    std::reverse(instructions_.begin() + current_node_end, instructions_.end());
-    if (instructions_.size() == current_node_end) continue;
-    // Mark source position on first instruction emitted.
-    SourcePosition source_position = source_positions_->GetSourcePosition(node);
-    if (source_position.IsKnown() &&
-        (source_position_mode_ == kAllSourcePositions ||
-         node->opcode() == IrOpcode::kCall)) {
-      sequence()->SetSourcePosition(instructions_[current_node_end],
-                                    source_position);
-    }
+    if (!FinishEmittedInstructions(node, current_node_end)) return;
   }
 
   // We're done with the block.
@@ -862,6 +914,8 @@
   if (block->SuccessorCount() > 1) {
     for (BasicBlock* const successor : block->successors()) {
       for (Node* const node : *successor) {
+        // If this CHECK fails, you might have specified merged variables
+        // for a label with only one predecessor.
         CHECK(!IrOpcode::IsPhiOpcode(node->opcode()));
       }
     }
@@ -1013,6 +1067,12 @@
       return VisitDeoptimizeIf(node);
     case IrOpcode::kDeoptimizeUnless:
       return VisitDeoptimizeUnless(node);
+    case IrOpcode::kTrapIf:
+      return VisitTrapIf(node, static_cast<Runtime::FunctionId>(
+                                   OpParameter<int32_t>(node->op())));
+    case IrOpcode::kTrapUnless:
+      return VisitTrapUnless(node, static_cast<Runtime::FunctionId>(
+                                       OpParameter<int32_t>(node->op())));
     case IrOpcode::kFrameState:
     case IrOpcode::kStateValues:
     case IrOpcode::kObjectState:
@@ -1033,6 +1093,8 @@
     }
     case IrOpcode::kStore:
       return VisitStore(node);
+    case IrOpcode::kProtectedStore:
+      return VisitProtectedStore(node);
     case IrOpcode::kWord32And:
       return MarkAsWord32(node), VisitWord32And(node);
     case IrOpcode::kWord32Or:
@@ -1387,15 +1449,190 @@
     }
     case IrOpcode::kAtomicStore:
       return VisitAtomicStore(node);
-    case IrOpcode::kProtectedLoad:
+    case IrOpcode::kProtectedLoad: {
+      LoadRepresentation type = LoadRepresentationOf(node->op());
+      MarkAsRepresentation(type.representation(), node);
       return VisitProtectedLoad(node);
+    }
     case IrOpcode::kUnsafePointerAdd:
       MarkAsRepresentation(MachineType::PointerRepresentation(), node);
       return VisitUnsafePointerAdd(node);
+    case IrOpcode::kCreateFloat32x4:
+      return MarkAsSimd128(node), VisitCreateFloat32x4(node);
+    case IrOpcode::kFloat32x4ExtractLane:
+      return MarkAsFloat32(node), VisitFloat32x4ExtractLane(node);
+    case IrOpcode::kFloat32x4ReplaceLane:
+      return MarkAsSimd128(node), VisitFloat32x4ReplaceLane(node);
+    case IrOpcode::kFloat32x4FromInt32x4:
+      return MarkAsSimd128(node), VisitFloat32x4FromInt32x4(node);
+    case IrOpcode::kFloat32x4FromUint32x4:
+      return MarkAsSimd128(node), VisitFloat32x4FromUint32x4(node);
+    case IrOpcode::kFloat32x4Abs:
+      return MarkAsSimd128(node), VisitFloat32x4Abs(node);
+    case IrOpcode::kFloat32x4Neg:
+      return MarkAsSimd128(node), VisitFloat32x4Neg(node);
+    case IrOpcode::kFloat32x4Add:
+      return MarkAsSimd128(node), VisitFloat32x4Add(node);
+    case IrOpcode::kFloat32x4Sub:
+      return MarkAsSimd128(node), VisitFloat32x4Sub(node);
+    case IrOpcode::kFloat32x4Equal:
+      return MarkAsSimd1x4(node), VisitFloat32x4Equal(node);
+    case IrOpcode::kFloat32x4NotEqual:
+      return MarkAsSimd1x4(node), VisitFloat32x4NotEqual(node);
     case IrOpcode::kCreateInt32x4:
       return MarkAsSimd128(node), VisitCreateInt32x4(node);
     case IrOpcode::kInt32x4ExtractLane:
       return MarkAsWord32(node), VisitInt32x4ExtractLane(node);
+    case IrOpcode::kInt32x4ReplaceLane:
+      return MarkAsSimd128(node), VisitInt32x4ReplaceLane(node);
+    case IrOpcode::kInt32x4FromFloat32x4:
+      return MarkAsSimd128(node), VisitInt32x4FromFloat32x4(node);
+    case IrOpcode::kUint32x4FromFloat32x4:
+      return MarkAsSimd128(node), VisitUint32x4FromFloat32x4(node);
+    case IrOpcode::kInt32x4Neg:
+      return MarkAsSimd128(node), VisitInt32x4Neg(node);
+    case IrOpcode::kInt32x4ShiftLeftByScalar:
+      return MarkAsSimd128(node), VisitInt32x4ShiftLeftByScalar(node);
+    case IrOpcode::kInt32x4ShiftRightByScalar:
+      return MarkAsSimd128(node), VisitInt32x4ShiftRightByScalar(node);
+    case IrOpcode::kInt32x4Add:
+      return MarkAsSimd128(node), VisitInt32x4Add(node);
+    case IrOpcode::kInt32x4Sub:
+      return MarkAsSimd128(node), VisitInt32x4Sub(node);
+    case IrOpcode::kInt32x4Mul:
+      return MarkAsSimd128(node), VisitInt32x4Mul(node);
+    case IrOpcode::kInt32x4Min:
+      return MarkAsSimd128(node), VisitInt32x4Min(node);
+    case IrOpcode::kInt32x4Max:
+      return MarkAsSimd128(node), VisitInt32x4Max(node);
+    case IrOpcode::kInt32x4Equal:
+      return MarkAsSimd1x4(node), VisitInt32x4Equal(node);
+    case IrOpcode::kInt32x4NotEqual:
+      return MarkAsSimd1x4(node), VisitInt32x4NotEqual(node);
+    case IrOpcode::kInt32x4GreaterThan:
+      return MarkAsSimd1x4(node), VisitInt32x4GreaterThan(node);
+    case IrOpcode::kInt32x4GreaterThanOrEqual:
+      return MarkAsSimd1x4(node), VisitInt32x4GreaterThanOrEqual(node);
+    case IrOpcode::kUint32x4ShiftRightByScalar:
+      return MarkAsSimd128(node), VisitUint32x4ShiftRightByScalar(node);
+    case IrOpcode::kUint32x4Min:
+      return MarkAsSimd128(node), VisitUint32x4Min(node);
+    case IrOpcode::kUint32x4Max:
+      return MarkAsSimd128(node), VisitUint32x4Max(node);
+    case IrOpcode::kUint32x4GreaterThan:
+      return MarkAsSimd1x4(node), VisitUint32x4GreaterThan(node);
+    case IrOpcode::kUint32x4GreaterThanOrEqual:
+      return MarkAsSimd1x4(node), VisitUint32x4GreaterThanOrEqual(node);
+    case IrOpcode::kCreateInt16x8:
+      return MarkAsSimd128(node), VisitCreateInt16x8(node);
+    case IrOpcode::kInt16x8ExtractLane:
+      return MarkAsWord32(node), VisitInt16x8ExtractLane(node);
+    case IrOpcode::kInt16x8ReplaceLane:
+      return MarkAsSimd128(node), VisitInt16x8ReplaceLane(node);
+    case IrOpcode::kInt16x8Neg:
+      return MarkAsSimd128(node), VisitInt16x8Neg(node);
+    case IrOpcode::kInt16x8ShiftLeftByScalar:
+      return MarkAsSimd128(node), VisitInt16x8ShiftLeftByScalar(node);
+    case IrOpcode::kInt16x8ShiftRightByScalar:
+      return MarkAsSimd128(node), VisitInt16x8ShiftRightByScalar(node);
+    case IrOpcode::kInt16x8Add:
+      return MarkAsSimd128(node), VisitInt16x8Add(node);
+    case IrOpcode::kInt16x8AddSaturate:
+      return MarkAsSimd128(node), VisitInt16x8AddSaturate(node);
+    case IrOpcode::kInt16x8Sub:
+      return MarkAsSimd128(node), VisitInt16x8Sub(node);
+    case IrOpcode::kInt16x8SubSaturate:
+      return MarkAsSimd128(node), VisitInt16x8SubSaturate(node);
+    case IrOpcode::kInt16x8Mul:
+      return MarkAsSimd128(node), VisitInt16x8Mul(node);
+    case IrOpcode::kInt16x8Min:
+      return MarkAsSimd128(node), VisitInt16x8Min(node);
+    case IrOpcode::kInt16x8Max:
+      return MarkAsSimd128(node), VisitInt16x8Max(node);
+    case IrOpcode::kInt16x8Equal:
+      return MarkAsSimd1x8(node), VisitInt16x8Equal(node);
+    case IrOpcode::kInt16x8NotEqual:
+      return MarkAsSimd1x8(node), VisitInt16x8NotEqual(node);
+    case IrOpcode::kInt16x8GreaterThan:
+      return MarkAsSimd1x8(node), VisitInt16x8GreaterThan(node);
+    case IrOpcode::kInt16x8GreaterThanOrEqual:
+      return MarkAsSimd1x8(node), VisitInt16x8GreaterThanOrEqual(node);
+    case IrOpcode::kUint16x8ShiftRightByScalar:
+      return MarkAsSimd128(node), VisitUint16x8ShiftRightByScalar(node);
+    case IrOpcode::kUint16x8AddSaturate:
+      return MarkAsSimd128(node), VisitUint16x8AddSaturate(node);
+    case IrOpcode::kUint16x8SubSaturate:
+      return MarkAsSimd128(node), VisitUint16x8SubSaturate(node);
+    case IrOpcode::kUint16x8Min:
+      return MarkAsSimd128(node), VisitUint16x8Min(node);
+    case IrOpcode::kUint16x8Max:
+      return MarkAsSimd128(node), VisitUint16x8Max(node);
+    case IrOpcode::kUint16x8GreaterThan:
+      return MarkAsSimd1x8(node), VisitUint16x8GreaterThan(node);
+    case IrOpcode::kUint16x8GreaterThanOrEqual:
+      return MarkAsSimd1x8(node), VisitUint16x8GreaterThanOrEqual(node);
+    case IrOpcode::kCreateInt8x16:
+      return MarkAsSimd128(node), VisitCreateInt8x16(node);
+    case IrOpcode::kInt8x16ExtractLane:
+      return MarkAsWord32(node), VisitInt8x16ExtractLane(node);
+    case IrOpcode::kInt8x16ReplaceLane:
+      return MarkAsSimd128(node), VisitInt8x16ReplaceLane(node);
+    case IrOpcode::kInt8x16Neg:
+      return MarkAsSimd128(node), VisitInt8x16Neg(node);
+    case IrOpcode::kInt8x16ShiftLeftByScalar:
+      return MarkAsSimd128(node), VisitInt8x16ShiftLeftByScalar(node);
+    case IrOpcode::kInt8x16ShiftRightByScalar:
+      return MarkAsSimd128(node), VisitInt8x16ShiftRightByScalar(node);
+    case IrOpcode::kInt8x16Add:
+      return MarkAsSimd128(node), VisitInt8x16Add(node);
+    case IrOpcode::kInt8x16AddSaturate:
+      return MarkAsSimd128(node), VisitInt8x16AddSaturate(node);
+    case IrOpcode::kInt8x16Sub:
+      return MarkAsSimd128(node), VisitInt8x16Sub(node);
+    case IrOpcode::kInt8x16SubSaturate:
+      return MarkAsSimd128(node), VisitInt8x16SubSaturate(node);
+    case IrOpcode::kInt8x16Mul:
+      return MarkAsSimd128(node), VisitInt8x16Mul(node);
+    case IrOpcode::kInt8x16Min:
+      return MarkAsSimd128(node), VisitInt8x16Min(node);
+    case IrOpcode::kInt8x16Max:
+      return MarkAsSimd128(node), VisitInt8x16Max(node);
+    case IrOpcode::kInt8x16Equal:
+      return MarkAsSimd1x16(node), VisitInt8x16Equal(node);
+    case IrOpcode::kInt8x16NotEqual:
+      return MarkAsSimd1x16(node), VisitInt8x16NotEqual(node);
+    case IrOpcode::kInt8x16GreaterThan:
+      return MarkAsSimd1x16(node), VisitInt8x16GreaterThan(node);
+    case IrOpcode::kInt8x16GreaterThanOrEqual:
+      return MarkAsSimd1x16(node), VisitInt8x16GreaterThanOrEqual(node);
+    case IrOpcode::kUint8x16ShiftRightByScalar:
+      return MarkAsSimd128(node), VisitUint8x16ShiftRightByScalar(node);
+    case IrOpcode::kUint8x16AddSaturate:
+      return MarkAsSimd128(node), VisitUint8x16AddSaturate(node);
+    case IrOpcode::kUint8x16SubSaturate:
+      return MarkAsSimd128(node), VisitUint8x16SubSaturate(node);
+    case IrOpcode::kUint8x16Min:
+      return MarkAsSimd128(node), VisitUint8x16Min(node);
+    case IrOpcode::kUint8x16Max:
+      return MarkAsSimd128(node), VisitUint8x16Max(node);
+    case IrOpcode::kUint8x16GreaterThan:
+      return MarkAsSimd1x16(node), VisitUint8x16GreaterThan(node);
+    case IrOpcode::kUint8x16GreaterThanOrEqual:
+      return MarkAsSimd1x16(node), VisitUint16x8GreaterThanOrEqual(node);
+    case IrOpcode::kSimd128And:
+      return MarkAsSimd128(node), VisitSimd128And(node);
+    case IrOpcode::kSimd128Or:
+      return MarkAsSimd128(node), VisitSimd128Or(node);
+    case IrOpcode::kSimd128Xor:
+      return MarkAsSimd128(node), VisitSimd128Xor(node);
+    case IrOpcode::kSimd128Not:
+      return MarkAsSimd128(node), VisitSimd128Not(node);
+    case IrOpcode::kSimd32x4Select:
+      return MarkAsSimd128(node), VisitSimd32x4Select(node);
+    case IrOpcode::kSimd16x8Select:
+      return MarkAsSimd128(node), VisitSimd16x8Select(node);
+    case IrOpcode::kSimd8x16Select:
+      return MarkAsSimd128(node), VisitSimd8x16Select(node);
     default:
       V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
                node->opcode(), node->op()->mnemonic(), node->id());
@@ -1538,7 +1775,7 @@
 }
 
 void InstructionSelector::VisitStackSlot(Node* node) {
-  int size = 1 << ElementSizeLog2Of(StackSlotRepresentationOf(node->op()));
+  int size = StackSlotSizeOf(node->op());
   int slot = frame_->AllocateSpillSlot(size);
   OperandGenerator g(this);
 
@@ -1547,8 +1784,7 @@
 }
 
 void InstructionSelector::VisitBitcastTaggedToWord(Node* node) {
-  OperandGenerator g(this);
-  Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(node->InputAt(0)));
+  EmitIdentity(node);
 }
 
 void InstructionSelector::VisitBitcastWordToTagged(Node* node) {
@@ -1697,7 +1933,6 @@
   UNIMPLEMENTED();
 }
 
-
 void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
   UNIMPLEMENTED();
 }
@@ -1723,13 +1958,288 @@
 void InstructionSelector::VisitWord32PairSar(Node* node) { UNIMPLEMENTED(); }
 #endif  // V8_TARGET_ARCH_64_BIT
 
-#if !V8_TARGET_ARCH_X64
+#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
 void InstructionSelector::VisitCreateInt32x4(Node* node) { UNIMPLEMENTED(); }
 
 void InstructionSelector::VisitInt32x4ExtractLane(Node* node) {
   UNIMPLEMENTED();
 }
-#endif  // !V8_TARGET_ARCH_X64
+
+void InstructionSelector::VisitInt32x4ReplaceLane(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt32x4Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32x4Sub(Node* node) { UNIMPLEMENTED(); }
+
+#endif  // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM
+
+#if !V8_TARGET_ARCH_ARM
+void InstructionSelector::VisitCreateFloat32x4(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitFloat32x4ExtractLane(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitFloat32x4ReplaceLane(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitFloat32x4FromInt32x4(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitFloat32x4FromUint32x4(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitFloat32x4Abs(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitFloat32x4Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitFloat32x4Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitFloat32x4Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitFloat32x4Equal(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitFloat32x4NotEqual(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt32x4FromFloat32x4(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint32x4FromFloat32x4(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt32x4Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32x4ShiftLeftByScalar(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt32x4ShiftRightByScalar(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt32x4Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32x4Max(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32x4Min(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32x4Equal(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32x4NotEqual(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32x4LessThan(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt32x4LessThanOrEqual(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt32x4GreaterThan(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt32x4GreaterThanOrEqual(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint32x4ShiftRightByScalar(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint32x4Max(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitUint32x4Min(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitUint32x4GreaterThan(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint32x4GreaterThanOrEqual(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitCreateInt16x8(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8ExtractLane(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt16x8ReplaceLane(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt16x8Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8ShiftLeftByScalar(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt16x8ShiftRightByScalar(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt16x8Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8AddSaturate(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt16x8Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8SubSaturate(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt16x8Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8Max(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8Min(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8Equal(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8NotEqual(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8LessThan(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt16x8LessThanOrEqual(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt16x8GreaterThan(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt16x8GreaterThanOrEqual(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint16x8ShiftRightByScalar(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint16x8AddSaturate(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint16x8SubSaturate(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint16x8Max(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitUint16x8Min(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitUint16x8GreaterThan(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint16x8GreaterThanOrEqual(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitCreateInt8x16(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16ExtractLane(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt8x16ReplaceLane(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt8x16Neg(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16ShiftLeftByScalar(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt8x16ShiftRightByScalar(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt8x16Add(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16AddSaturate(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt8x16Sub(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16SubSaturate(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt8x16Mul(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16Max(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16Min(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16Equal(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16NotEqual(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16LessThan(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitInt8x16LessThanOrEqual(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt8x16GreaterThan(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitInt8x16GreaterThanOrEqual(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint8x16ShiftRightByScalar(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint8x16AddSaturate(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint8x16SubSaturate(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint8x16Max(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitUint8x16Min(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitUint8x16GreaterThan(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitUint8x16GreaterThanOrEqual(Node* node) {
+  UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitSimd128And(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitSimd128Or(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitSimd128Xor(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitSimd128Not(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitSimd32x4Select(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitSimd16x8Select(Node* node) { UNIMPLEMENTED(); }
+
+void InstructionSelector::VisitSimd8x16Select(Node* node) { UNIMPLEMENTED(); }
+#endif  // !V8_TARGET_ARCH_ARM
 
 void InstructionSelector::VisitFinishRegion(Node* node) { EmitIdentity(node); }
 
@@ -1970,7 +2480,8 @@
   DCHECK_GE(input_count, 1);
   auto value_locations = zone()->NewArray<InstructionOperand>(input_count);
   Node* pop_count = ret->InputAt(0);
-  value_locations[0] = pop_count->opcode() == IrOpcode::kInt32Constant
+  value_locations[0] = (pop_count->opcode() == IrOpcode::kInt32Constant ||
+                        pop_count->opcode() == IrOpcode::kInt64Constant)
                            ? g.UseImmediate(pop_count)
                            : g.UseRegister(pop_count);
   for (int i = 1; i < input_count; ++i) {
@@ -1980,32 +2491,31 @@
   Emit(kArchRet, 0, nullptr, input_count, value_locations);
 }
 
-Instruction* InstructionSelector::EmitDeoptimize(InstructionCode opcode,
-                                                 InstructionOperand output,
-                                                 InstructionOperand a,
-                                                 DeoptimizeReason reason,
-                                                 Node* frame_state) {
+Instruction* InstructionSelector::EmitDeoptimize(
+    InstructionCode opcode, InstructionOperand output, InstructionOperand a,
+    DeoptimizeKind kind, DeoptimizeReason reason, Node* frame_state) {
   size_t output_count = output.IsInvalid() ? 0 : 1;
   InstructionOperand inputs[] = {a};
   size_t input_count = arraysize(inputs);
   return EmitDeoptimize(opcode, output_count, &output, input_count, inputs,
-                        reason, frame_state);
+                        kind, reason, frame_state);
 }
 
 Instruction* InstructionSelector::EmitDeoptimize(
     InstructionCode opcode, InstructionOperand output, InstructionOperand a,
-    InstructionOperand b, DeoptimizeReason reason, Node* frame_state) {
+    InstructionOperand b, DeoptimizeKind kind, DeoptimizeReason reason,
+    Node* frame_state) {
   size_t output_count = output.IsInvalid() ? 0 : 1;
   InstructionOperand inputs[] = {a, b};
   size_t input_count = arraysize(inputs);
   return EmitDeoptimize(opcode, output_count, &output, input_count, inputs,
-                        reason, frame_state);
+                        kind, reason, frame_state);
 }
 
 Instruction* InstructionSelector::EmitDeoptimize(
     InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
-    size_t input_count, InstructionOperand* inputs, DeoptimizeReason reason,
-    Node* frame_state) {
+    size_t input_count, InstructionOperand* inputs, DeoptimizeKind kind,
+    DeoptimizeReason reason, Node* frame_state) {
   OperandGenerator g(this);
   FrameStateDescriptor* const descriptor = GetFrameStateDescriptor(frame_state);
   InstructionOperandVector args(instruction_zone());
@@ -2014,7 +2524,8 @@
     args.push_back(inputs[i]);
   }
   opcode |= MiscField::encode(static_cast<int>(input_count));
-  int const state_id = sequence()->AddDeoptimizationEntry(descriptor, reason);
+  int const state_id =
+      sequence()->AddDeoptimizationEntry(descriptor, kind, reason);
   args.push_back(g.TempImmediate(state_id));
   StateObjectDeduplicator deduplicator(instruction_zone());
   AddInputsToFrameStateDescriptor(descriptor, frame_state, &g, &deduplicator,
@@ -2033,16 +2544,7 @@
 void InstructionSelector::VisitDeoptimize(DeoptimizeKind kind,
                                           DeoptimizeReason reason,
                                           Node* value) {
-  InstructionCode opcode = kArchDeoptimize;
-  switch (kind) {
-    case DeoptimizeKind::kEager:
-      opcode |= MiscField::encode(Deoptimizer::EAGER);
-      break;
-    case DeoptimizeKind::kSoft:
-      opcode |= MiscField::encode(Deoptimizer::SOFT);
-      break;
-  }
-  EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, reason, value);
+  EmitDeoptimize(kArchDeoptimize, 0, nullptr, 0, nullptr, kind, reason, value);
 }
 
 
diff --git a/src/compiler/instruction-selector.h b/src/compiler/instruction-selector.h
index 65ba8f7..d811aa4 100644
--- a/src/compiler/instruction-selector.h
+++ b/src/compiler/instruction-selector.h
@@ -26,6 +26,7 @@
 class Linkage;
 class OperandGenerator;
 struct SwitchInfo;
+class StateObjectDeduplicator;
 
 // This struct connects nodes of parameters which are going to be pushed on the
 // call stack with their parameter index in the call descriptor of the callee.
@@ -42,6 +43,8 @@
   MachineType type_;
 };
 
+enum class FrameStateInputKind { kAny, kStackSlot };
+
 // Instruction selection generates an InstructionSequence for a given Schedule.
 class V8_EXPORT_PRIVATE InstructionSelector final {
  public:
@@ -111,14 +114,15 @@
   // ===========================================================================
 
   Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
-                              InstructionOperand a, DeoptimizeReason reason,
-                              Node* frame_state);
+                              InstructionOperand a, DeoptimizeKind kind,
+                              DeoptimizeReason reason, Node* frame_state);
   Instruction* EmitDeoptimize(InstructionCode opcode, InstructionOperand output,
                               InstructionOperand a, InstructionOperand b,
-                              DeoptimizeReason reason, Node* frame_state);
+                              DeoptimizeKind kind, DeoptimizeReason reason,
+                              Node* frame_state);
   Instruction* EmitDeoptimize(InstructionCode opcode, size_t output_count,
                               InstructionOperand* outputs, size_t input_count,
-                              InstructionOperand* inputs,
+                              InstructionOperand* inputs, DeoptimizeKind kind,
                               DeoptimizeReason reason, Node* frame_state);
 
   // ===========================================================================
@@ -259,6 +263,27 @@
   void MarkAsSimd128(Node* node) {
     MarkAsRepresentation(MachineRepresentation::kSimd128, node);
   }
+  void MarkAsSimd1x4(Node* node) {
+    if (kSimdMaskRegisters) {
+      MarkAsRepresentation(MachineRepresentation::kSimd1x4, node);
+    } else {
+      MarkAsSimd128(node);
+    }
+  }
+  void MarkAsSimd1x8(Node* node) {
+    if (kSimdMaskRegisters) {
+      MarkAsRepresentation(MachineRepresentation::kSimd1x8, node);
+    } else {
+      MarkAsSimd128(node);
+    }
+  }
+  void MarkAsSimd1x16(Node* node) {
+    if (kSimdMaskRegisters) {
+      MarkAsRepresentation(MachineRepresentation::kSimd1x16, node);
+    } else {
+      MarkAsSimd128(node);
+    }
+  }
   void MarkAsReference(Node* node) {
     MarkAsRepresentation(MachineRepresentation::kTagged, node);
   }
@@ -286,6 +311,17 @@
   int GetTempsCountForTailCallFromJSFunction();
 
   FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
+  size_t AddInputsToFrameStateDescriptor(FrameStateDescriptor* descriptor,
+                                         Node* state, OperandGenerator* g,
+                                         StateObjectDeduplicator* deduplicator,
+                                         InstructionOperandVector* inputs,
+                                         FrameStateInputKind kind, Zone* zone);
+  size_t AddOperandToStateValueDescriptor(StateValueList* values,
+                                          InstructionOperandVector* inputs,
+                                          OperandGenerator* g,
+                                          StateObjectDeduplicator* deduplicator,
+                                          Node* input, MachineType type,
+                                          FrameStateInputKind kind, Zone* zone);
 
   // ===========================================================================
   // ============= Architecture-specific graph covering methods. ===============
@@ -307,8 +343,7 @@
 
 #define DECLARE_GENERATOR(x) void Visit##x(Node* node);
   MACHINE_OP_LIST(DECLARE_GENERATOR)
-  MACHINE_SIMD_RETURN_NUM_OP_LIST(DECLARE_GENERATOR)
-  MACHINE_SIMD_RETURN_SIMD_OP_LIST(DECLARE_GENERATOR)
+  MACHINE_SIMD_OP_LIST(DECLARE_GENERATOR)
 #undef DECLARE_GENERATOR
 
   void VisitFinishRegion(Node* node);
@@ -321,6 +356,8 @@
   void VisitCall(Node* call, BasicBlock* handler = nullptr);
   void VisitDeoptimizeIf(Node* node);
   void VisitDeoptimizeUnless(Node* node);
+  void VisitTrapIf(Node* node, Runtime::FunctionId func_id);
+  void VisitTrapUnless(Node* node, Runtime::FunctionId func_id);
   void VisitTailCall(Node* call);
   void VisitGoto(BasicBlock* target);
   void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
@@ -351,6 +388,7 @@
   bool instruction_selection_failed() { return instruction_selection_failed_; }
 
   void MarkPairProjectionsAsWord32(Node* node);
+  bool IsSourcePositionUsed(Node* node);
 
   // ===========================================================================
 
diff --git a/src/compiler/instruction.cc b/src/compiler/instruction.cc
index 3b2311a..1067d20 100644
--- a/src/compiler/instruction.cc
+++ b/src/compiler/instruction.cc
@@ -2,11 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/compiler/instruction.h"
+
 #include "src/compiler/common-operator.h"
 #include "src/compiler/graph.h"
-#include "src/compiler/instruction.h"
 #include "src/compiler/schedule.h"
 #include "src/compiler/state-values-utils.h"
+#include "src/source-position.h"
 
 namespace v8 {
 namespace internal {
@@ -208,6 +210,15 @@
         case MachineRepresentation::kSimd128:
           os << "|s128";
           break;
+        case MachineRepresentation::kSimd1x4:
+          os << "|s1x4";
+          break;
+        case MachineRepresentation::kSimd1x8:
+          os << "|s1x8";
+          break;
+        case MachineRepresentation::kSimd1x16:
+          os << "|s1x16";
+          break;
         case MachineRepresentation::kTaggedSigned:
           os << "|ts";
           break;
@@ -433,6 +444,8 @@
       return os << "deoptimize";
     case kFlags_set:
       return os << "set";
+    case kFlags_trap:
+      return os << "trap";
   }
   UNREACHABLE();
   return os;
@@ -886,6 +899,9 @@
     case MachineRepresentation::kFloat32:
     case MachineRepresentation::kFloat64:
     case MachineRepresentation::kSimd128:
+    case MachineRepresentation::kSimd1x4:
+    case MachineRepresentation::kSimd1x8:
+    case MachineRepresentation::kSimd1x16:
     case MachineRepresentation::kTaggedSigned:
     case MachineRepresentation::kTaggedPointer:
     case MachineRepresentation::kTagged:
@@ -924,9 +940,11 @@
 }
 
 int InstructionSequence::AddDeoptimizationEntry(
-    FrameStateDescriptor* descriptor, DeoptimizeReason reason) {
+    FrameStateDescriptor* descriptor, DeoptimizeKind kind,
+    DeoptimizeReason reason) {
   int deoptimization_id = static_cast<int>(deoptimization_entries_.size());
-  deoptimization_entries_.push_back(DeoptimizationEntry(descriptor, reason));
+  deoptimization_entries_.push_back(
+      DeoptimizationEntry(descriptor, kind, reason));
   return deoptimization_id;
 }
 
@@ -985,8 +1003,18 @@
 }
 
 const RegisterConfiguration*
-InstructionSequence::GetRegisterConfigurationForTesting() {
-  return GetRegConfig();
+    InstructionSequence::registerConfigurationForTesting_ = nullptr;
+
+const RegisterConfiguration*
+InstructionSequence::RegisterConfigurationForTesting() {
+  DCHECK(registerConfigurationForTesting_ != nullptr);
+  return registerConfigurationForTesting_;
+}
+
+void InstructionSequence::SetRegisterConfigurationForTesting(
+    const RegisterConfiguration* regConfig) {
+  registerConfigurationForTesting_ = regConfig;
+  GetRegConfig = InstructionSequence::RegisterConfigurationForTesting;
 }
 
 FrameStateDescriptor::FrameStateDescriptor(
diff --git a/src/compiler/instruction.h b/src/compiler/instruction.h
index 327c8c1..ee7865d 100644
--- a/src/compiler/instruction.h
+++ b/src/compiler/instruction.h
@@ -484,6 +484,9 @@
       case MachineRepresentation::kFloat32:
       case MachineRepresentation::kFloat64:
       case MachineRepresentation::kSimd128:
+      case MachineRepresentation::kSimd1x4:
+      case MachineRepresentation::kSimd1x8:
+      case MachineRepresentation::kSimd1x16:
       case MachineRepresentation::kTaggedSigned:
       case MachineRepresentation::kTaggedPointer:
       case MachineRepresentation::kTagged:
@@ -1065,16 +1068,33 @@
   }
 
   float ToFloat32() const {
+    // TODO(ahaas): We should remove this function. If value_ has the bit
+    // representation of a signalling NaN, then returning it as float can cause
+    // the signalling bit to flip, and value_ is returned as a quiet NaN.
     DCHECK_EQ(kFloat32, type());
     return bit_cast<float>(static_cast<int32_t>(value_));
   }
 
+  uint32_t ToFloat32AsInt() const {
+    DCHECK_EQ(kFloat32, type());
+    return bit_cast<uint32_t>(static_cast<int32_t>(value_));
+  }
+
   double ToFloat64() const {
+    // TODO(ahaas): We should remove this function. If value_ has the bit
+    // representation of a signalling NaN, then returning it as float can cause
+    // the signalling bit to flip, and value_ is returned as a quiet NaN.
     if (type() == kInt32) return ToInt32();
     DCHECK_EQ(kFloat64, type());
     return bit_cast<double>(value_);
   }
 
+  uint64_t ToFloat64AsInt() const {
+    if (type() == kInt32) return ToInt32();
+    DCHECK_EQ(kFloat64, type());
+    return bit_cast<uint64_t>(value_);
+  }
+
   ExternalReference ToExternalReference() const {
     DCHECK_EQ(kExternalReference, type());
     return bit_cast<ExternalReference>(static_cast<intptr_t>(value_));
@@ -1104,52 +1124,132 @@
 // Forward declarations.
 class FrameStateDescriptor;
 
-
-enum class StateValueKind { kPlain, kNested, kDuplicate };
-
+enum class StateValueKind : uint8_t {
+  kArguments,
+  kPlain,
+  kOptimizedOut,
+  kNested,
+  kDuplicate
+};
 
 class StateValueDescriptor {
  public:
-  explicit StateValueDescriptor(Zone* zone)
+  StateValueDescriptor()
       : kind_(StateValueKind::kPlain),
         type_(MachineType::AnyTagged()),
-        id_(0),
-        fields_(zone) {}
+        id_(0) {}
 
-  static StateValueDescriptor Plain(Zone* zone, MachineType type) {
-    return StateValueDescriptor(StateValueKind::kPlain, zone, type, 0);
+  static StateValueDescriptor Arguments() {
+    return StateValueDescriptor(StateValueKind::kArguments,
+                                MachineType::AnyTagged(), 0);
   }
-  static StateValueDescriptor Recursive(Zone* zone, size_t id) {
-    return StateValueDescriptor(StateValueKind::kNested, zone,
+  static StateValueDescriptor Plain(MachineType type) {
+    return StateValueDescriptor(StateValueKind::kPlain, type, 0);
+  }
+  static StateValueDescriptor OptimizedOut() {
+    return StateValueDescriptor(StateValueKind::kOptimizedOut,
+                                MachineType::AnyTagged(), 0);
+  }
+  static StateValueDescriptor Recursive(size_t id) {
+    return StateValueDescriptor(StateValueKind::kNested,
                                 MachineType::AnyTagged(), id);
   }
-  static StateValueDescriptor Duplicate(Zone* zone, size_t id) {
-    return StateValueDescriptor(StateValueKind::kDuplicate, zone,
+  static StateValueDescriptor Duplicate(size_t id) {
+    return StateValueDescriptor(StateValueKind::kDuplicate,
                                 MachineType::AnyTagged(), id);
   }
 
-  size_t size() { return fields_.size(); }
-  ZoneVector<StateValueDescriptor>& fields() { return fields_; }
-  int IsPlain() { return kind_ == StateValueKind::kPlain; }
-  int IsNested() { return kind_ == StateValueKind::kNested; }
-  int IsDuplicate() { return kind_ == StateValueKind::kDuplicate; }
+  bool IsArguments() const { return kind_ == StateValueKind::kArguments; }
+  bool IsPlain() const { return kind_ == StateValueKind::kPlain; }
+  bool IsOptimizedOut() const { return kind_ == StateValueKind::kOptimizedOut; }
+  bool IsNested() const { return kind_ == StateValueKind::kNested; }
+  bool IsDuplicate() const { return kind_ == StateValueKind::kDuplicate; }
   MachineType type() const { return type_; }
-  MachineType GetOperandType(size_t index) const {
-    return fields_[index].type_;
-  }
   size_t id() const { return id_; }
 
  private:
-  StateValueDescriptor(StateValueKind kind, Zone* zone, MachineType type,
-                       size_t id)
-      : kind_(kind), type_(type), id_(id), fields_(zone) {}
+  StateValueDescriptor(StateValueKind kind, MachineType type, size_t id)
+      : kind_(kind), type_(type), id_(id) {}
 
   StateValueKind kind_;
   MachineType type_;
   size_t id_;
-  ZoneVector<StateValueDescriptor> fields_;
 };
 
+class StateValueList {
+ public:
+  explicit StateValueList(Zone* zone) : fields_(zone), nested_(zone) {}
+
+  size_t size() { return fields_.size(); }
+
+  struct Value {
+    StateValueDescriptor* desc;
+    StateValueList* nested;
+
+    Value(StateValueDescriptor* desc, StateValueList* nested)
+        : desc(desc), nested(nested) {}
+  };
+
+  class iterator {
+   public:
+    // Bare minimum of operators needed for range iteration.
+    bool operator!=(const iterator& other) const {
+      return field_iterator != other.field_iterator;
+    }
+    bool operator==(const iterator& other) const {
+      return field_iterator == other.field_iterator;
+    }
+    iterator& operator++() {
+      if (field_iterator->IsNested()) {
+        nested_iterator++;
+      }
+      ++field_iterator;
+      return *this;
+    }
+    Value operator*() {
+      StateValueDescriptor* desc = &(*field_iterator);
+      StateValueList* nested = desc->IsNested() ? *nested_iterator : nullptr;
+      return Value(desc, nested);
+    }
+
+   private:
+    friend class StateValueList;
+
+    iterator(ZoneVector<StateValueDescriptor>::iterator it,
+             ZoneVector<StateValueList*>::iterator nested)
+        : field_iterator(it), nested_iterator(nested) {}
+
+    ZoneVector<StateValueDescriptor>::iterator field_iterator;
+    ZoneVector<StateValueList*>::iterator nested_iterator;
+  };
+
+  void ReserveSize(size_t size) { fields_.reserve(size); }
+
+  StateValueList* PushRecursiveField(Zone* zone, size_t id) {
+    fields_.push_back(StateValueDescriptor::Recursive(id));
+    StateValueList* nested =
+        new (zone->New(sizeof(StateValueList))) StateValueList(zone);
+    nested_.push_back(nested);
+    return nested;
+  }
+  void PushArguments() { fields_.push_back(StateValueDescriptor::Arguments()); }
+  void PushDuplicate(size_t id) {
+    fields_.push_back(StateValueDescriptor::Duplicate(id));
+  }
+  void PushPlain(MachineType type) {
+    fields_.push_back(StateValueDescriptor::Plain(type));
+  }
+  void PushOptimizedOut() {
+    fields_.push_back(StateValueDescriptor::OptimizedOut());
+  }
+
+  iterator begin() { return iterator(fields_.begin(), nested_.begin()); }
+  iterator end() { return iterator(fields_.end(), nested_.end()); }
+
+ private:
+  ZoneVector<StateValueDescriptor> fields_;
+  ZoneVector<StateValueList*> nested_;
+};
 
 class FrameStateDescriptor : public ZoneObject {
  public:
@@ -1178,10 +1278,7 @@
   size_t GetFrameCount() const;
   size_t GetJSFrameCount() const;
 
-  MachineType GetType(size_t index) const {
-    return values_.GetOperandType(index);
-  }
-  StateValueDescriptor* GetStateValueDescriptor() { return &values_; }
+  StateValueList* GetStateValueDescriptors() { return &values_; }
 
   static const int kImpossibleValue = 0xdead;
 
@@ -1192,7 +1289,7 @@
   size_t parameters_count_;
   size_t locals_count_;
   size_t stack_count_;
-  StateValueDescriptor values_;
+  StateValueList values_;
   MaybeHandle<SharedFunctionInfo> const shared_info_;
   FrameStateDescriptor* outer_state_;
 };
@@ -1202,14 +1299,17 @@
 class DeoptimizationEntry final {
  public:
   DeoptimizationEntry() {}
-  DeoptimizationEntry(FrameStateDescriptor* descriptor, DeoptimizeReason reason)
-      : descriptor_(descriptor), reason_(reason) {}
+  DeoptimizationEntry(FrameStateDescriptor* descriptor, DeoptimizeKind kind,
+                      DeoptimizeReason reason)
+      : descriptor_(descriptor), kind_(kind), reason_(reason) {}
 
   FrameStateDescriptor* descriptor() const { return descriptor_; }
+  DeoptimizeKind kind() const { return kind_; }
   DeoptimizeReason reason() const { return reason_; }
 
  private:
   FrameStateDescriptor* descriptor_ = nullptr;
+  DeoptimizeKind kind_ = DeoptimizeKind::kEager;
   DeoptimizeReason reason_ = DeoptimizeReason::kNoReason;
 };
 
@@ -1469,7 +1569,7 @@
   }
 
   int AddDeoptimizationEntry(FrameStateDescriptor* descriptor,
-                             DeoptimizeReason reason);
+                             DeoptimizeKind kind, DeoptimizeReason reason);
   DeoptimizationEntry const& GetDeoptimizationEntry(int deoptimization_id);
   int GetDeoptimizationEntryCount() const {
     return static_cast<int>(deoptimization_entries_.size());
@@ -1500,7 +1600,9 @@
   void ValidateDeferredBlockEntryPaths() const;
   void ValidateSSA() const;
 
-  const RegisterConfiguration* GetRegisterConfigurationForTesting();
+  static void SetRegisterConfigurationForTesting(
+      const RegisterConfiguration* regConfig);
+  static void ClearRegisterConfigurationForTesting();
 
  private:
   friend V8_EXPORT_PRIVATE std::ostream& operator<<(
@@ -1508,6 +1610,9 @@
 
   typedef ZoneMap<const Instruction*, SourcePosition> SourcePositionMap;
 
+  static const RegisterConfiguration* RegisterConfigurationForTesting();
+  static const RegisterConfiguration* registerConfigurationForTesting_;
+
   Isolate* isolate_;
   Zone* const zone_;
   InstructionBlocks* const instruction_blocks_;
diff --git a/src/compiler/int64-lowering.cc b/src/compiler/int64-lowering.cc
index 62523ca..06c9272 100644
--- a/src/compiler/int64-lowering.cc
+++ b/src/compiler/int64-lowering.cc
@@ -12,6 +12,7 @@
 #include "src/compiler/node-properties.h"
 
 #include "src/compiler/node.h"
+#include "src/objects-inl.h"
 #include "src/wasm/wasm-module.h"
 #include "src/zone/zone.h"
 
@@ -61,7 +62,8 @@
           // that they are processed after all other nodes.
           PreparePhiReplacement(input);
           stack_.push_front({input, 0});
-        } else if (input->opcode() == IrOpcode::kEffectPhi) {
+        } else if (input->opcode() == IrOpcode::kEffectPhi ||
+                   input->opcode() == IrOpcode::kLoop) {
           stack_.push_front({input, 0});
         } else {
           stack_.push_back({input, 0});
@@ -104,6 +106,9 @@
 
 void Int64Lowering::GetIndexNodes(Node* index, Node*& index_low,
                                   Node*& index_high) {
+  if (HasReplacementLow(index)) {
+    index = GetReplacementLow(index);
+  }
 #if defined(V8_TARGET_LITTLE_ENDIAN)
   index_low = index;
   index_high = graph()->NewNode(machine()->Int32Add(), index,
@@ -233,9 +238,7 @@
         NodeProperties::ChangeOp(node, store_op);
         ReplaceNode(node, node, high_node);
       } else {
-        if (HasReplacementLow(node->InputAt(2))) {
-          node->ReplaceInput(2, GetReplacementLow(node->InputAt(2)));
-        }
+        DefaultLowering(node, true);
       }
       break;
     }
@@ -824,7 +827,7 @@
   ReplaceNode(node, replacement, nullptr);
 }
 
-bool Int64Lowering::DefaultLowering(Node* node) {
+bool Int64Lowering::DefaultLowering(Node* node, bool low_word_only) {
   bool something_changed = false;
   for (int i = NodeProperties::PastValueIndex(node) - 1; i >= 0; i--) {
     Node* input = node->InputAt(i);
@@ -832,7 +835,7 @@
       something_changed = true;
       node->ReplaceInput(i, GetReplacementLow(input));
     }
-    if (HasReplacementHigh(input)) {
+    if (!low_word_only && HasReplacementHigh(input)) {
       something_changed = true;
       node->InsertInput(zone(), i + 1, GetReplacementHigh(input));
     }
diff --git a/src/compiler/int64-lowering.h b/src/compiler/int64-lowering.h
index 66a54e9..811c2b2 100644
--- a/src/compiler/int64-lowering.h
+++ b/src/compiler/int64-lowering.h
@@ -47,7 +47,7 @@
   void PrepareReplacements(Node* node);
   void PushNode(Node* node);
   void LowerNode(Node* node);
-  bool DefaultLowering(Node* node);
+  bool DefaultLowering(Node* node, bool low_word_only = false);
   void LowerComparison(Node* node, const Operator* signed_op,
                        const Operator* unsigned_op);
   void PrepareProjectionReplacements(Node* node);
diff --git a/src/compiler/js-builtin-reducer.cc b/src/compiler/js-builtin-reducer.cc
index 2962e24..24eb5ce 100644
--- a/src/compiler/js-builtin-reducer.cc
+++ b/src/compiler/js-builtin-reducer.cc
@@ -4,9 +4,12 @@
 
 #include "src/compiler/js-builtin-reducer.h"
 
+#include "src/base/bits.h"
+#include "src/code-factory.h"
 #include "src/compilation-dependencies.h"
 #include "src/compiler/access-builder.h"
 #include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/simplified-operator.h"
@@ -18,17 +21,16 @@
 namespace internal {
 namespace compiler {
 
-
-// Helper class to access JSCallFunction nodes that are potential candidates
+// Helper class to access JSCall nodes that are potential candidates
 // for reduction when they have a BuiltinFunctionId associated with them.
 class JSCallReduction {
  public:
   explicit JSCallReduction(Node* node) : node_(node) {}
 
-  // Determines whether the node is a JSCallFunction operation that targets a
+  // Determines whether the node is a JSCall operation that targets a
   // constant callee being a well-known builtin with a BuiltinFunctionId.
   bool HasBuiltinFunctionId() {
-    if (node_->opcode() != IrOpcode::kJSCallFunction) return false;
+    if (node_->opcode() != IrOpcode::kJSCall) return false;
     HeapObjectMatcher m(NodeProperties::GetValueInput(node_, 0));
     if (!m.HasValue() || !m.Value()->IsJSFunction()) return false;
     Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
@@ -37,7 +39,7 @@
 
   // Retrieves the BuiltinFunctionId as described above.
   BuiltinFunctionId GetBuiltinFunctionId() {
-    DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
+    DCHECK_EQ(IrOpcode::kJSCall, node_->opcode());
     HeapObjectMatcher m(NodeProperties::GetValueInput(node_, 0));
     Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
     return function->shared()->builtin_function_id();
@@ -78,13 +80,13 @@
   Node* right() { return GetJSCallInput(1); }
 
   int GetJSCallArity() {
-    DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
+    DCHECK_EQ(IrOpcode::kJSCall, node_->opcode());
     // Skip first (i.e. callee) and second (i.e. receiver) operand.
     return node_->op()->ValueInputCount() - 2;
   }
 
   Node* GetJSCallInput(int index) {
-    DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
+    DCHECK_EQ(IrOpcode::kJSCall, node_->opcode());
     DCHECK_LT(index, GetJSCallArity());
     // Skip first (i.e. callee) and second (i.e. receiver) operand.
     return NodeProperties::GetValueInput(node_, index + 2);
@@ -107,39 +109,14 @@
 
 namespace {
 
-// TODO(turbofan): Shall we move this to the NodeProperties? Or some (untyped)
-// alias analyzer?
-bool IsSame(Node* a, Node* b) {
-  if (a == b) {
-    return true;
-  } else if (a->opcode() == IrOpcode::kCheckHeapObject) {
-    return IsSame(a->InputAt(0), b);
-  } else if (b->opcode() == IrOpcode::kCheckHeapObject) {
-    return IsSame(a, b->InputAt(0));
-  }
-  return false;
-}
-
 MaybeHandle<Map> GetMapWitness(Node* node) {
+  ZoneHandleSet<Map> maps;
   Node* receiver = NodeProperties::GetValueInput(node, 1);
   Node* effect = NodeProperties::GetEffectInput(node);
-  // Check if the {node} is dominated by a CheckMaps with a single map
-  // for the {receiver}, and if so use that map for the lowering below.
-  for (Node* dominator = effect;;) {
-    if (dominator->opcode() == IrOpcode::kCheckMaps &&
-        IsSame(dominator->InputAt(0), receiver)) {
-      if (dominator->op()->ValueInputCount() == 2) {
-        HeapObjectMatcher m(dominator->InputAt(1));
-        if (m.HasValue()) return Handle<Map>::cast(m.Value());
-      }
-      return MaybeHandle<Map>();
-    }
-    if (dominator->op()->EffectInputCount() != 1) {
-      // Didn't find any appropriate CheckMaps node.
-      return MaybeHandle<Map>();
-    }
-    dominator = NodeProperties::GetEffectInput(dominator);
+  if (NodeProperties::InferReceiverMaps(receiver, effect, &maps)) {
+    if (maps.size() == 1) return MaybeHandle<Map>(maps[0]);
   }
+  return MaybeHandle<Map>();
 }
 
 // TODO(turbofan): This was copied from Crankshaft, might be too restrictive.
@@ -235,17 +212,27 @@
   Node* control = NodeProperties::GetControlInput(node);
 
   if (iter_kind == ArrayIteratorKind::kTypedArray) {
-    // For JSTypedArray iterator methods, deopt if the buffer is neutered. This
-    // is potentially a deopt loop, but should be extremely unlikely.
-    DCHECK_EQ(JS_TYPED_ARRAY_TYPE, receiver_map->instance_type());
-    Node* buffer = effect = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
-        receiver, effect, control);
+    // See if we can skip the neutering check.
+    if (isolate()->IsArrayBufferNeuteringIntact()) {
+      // Add a code dependency so we are deoptimized in case an ArrayBuffer
+      // gets neutered.
+      dependencies()->AssumePropertyCell(
+          factory()->array_buffer_neutering_protector());
+    } else {
+      // For JSTypedArray iterator methods, deopt if the buffer is neutered.
+      // This is potentially a deopt loop, but should be extremely unlikely.
+      DCHECK_EQ(JS_TYPED_ARRAY_TYPE, receiver_map->instance_type());
+      Node* buffer = effect = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+          receiver, effect, control);
 
-    Node* check = effect = graph()->NewNode(
-        simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
-    check = graph()->NewNode(simplified()->BooleanNot(), check);
-    effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+      // Deoptimize if the {buffer} has been neutered.
+      Node* check = effect = graph()->NewNode(
+          simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
+      check = graph()->NewNode(simplified()->BooleanNot(), check);
+      effect =
+          graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+    }
   }
 
   int map_index = -1;
@@ -310,6 +297,7 @@
   Node* value = effect = graph()->NewNode(
       simplified()->Allocate(NOT_TENURED),
       jsgraph()->Constant(JSArrayIterator::kSize), effect, control);
+  NodeProperties::SetType(value, Type::OtherObject());
   effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
                             value, jsgraph()->Constant(map), effect, control);
   effect = graph()->NewNode(
@@ -403,12 +391,17 @@
       } else {
         // For value/entry iteration, first step is a mapcheck to ensure
         // inlining is still valid.
+        Node* array_map = etrue1 =
+            graph()->NewNode(simplified()->LoadField(AccessBuilder::ForMap()),
+                             array, etrue1, if_true1);
         Node* orig_map = etrue1 =
             graph()->NewNode(simplified()->LoadField(
                                  AccessBuilder::ForJSArrayIteratorObjectMap()),
                              iterator, etrue1, if_true1);
-        etrue1 = graph()->NewNode(simplified()->CheckMaps(1), array, orig_map,
-                                  etrue1, if_true1);
+        Node* check_map = graph()->NewNode(simplified()->ReferenceEqual(),
+                                           array_map, orig_map);
+        etrue1 = graph()->NewNode(simplified()->CheckIf(), check_map, etrue1,
+                                  if_true1);
       }
 
       if (kind != IterationKind::kKeys) {
@@ -536,11 +529,20 @@
         simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
         array, efalse0, if_false0);
 
-    Node* check1 = efalse0 = graph()->NewNode(
-        simplified()->ArrayBufferWasNeutered(), buffer, efalse0, if_false0);
-    check1 = graph()->NewNode(simplified()->BooleanNot(), check1);
-    efalse0 =
-        graph()->NewNode(simplified()->CheckIf(), check1, efalse0, if_false0);
+    // See if we can skip the neutering check.
+    if (isolate()->IsArrayBufferNeuteringIntact()) {
+      // Add a code dependency so we are deoptimized in case an ArrayBuffer
+      // gets neutered.
+      dependencies()->AssumePropertyCell(
+          factory()->array_buffer_neutering_protector());
+    } else {
+      // Deoptimize if the array buffer was neutered.
+      Node* check1 = efalse0 = graph()->NewNode(
+          simplified()->ArrayBufferWasNeutered(), buffer, efalse0, if_false0);
+      check1 = graph()->NewNode(simplified()->BooleanNot(), check1);
+      efalse0 =
+          graph()->NewNode(simplified()->CheckIf(), check1, efalse0, if_false0);
+    }
 
     Node* length = efalse0 = graph()->NewNode(
         simplified()->LoadField(AccessBuilder::ForJSTypedArrayLength()), array,
@@ -813,20 +815,42 @@
 
 // ES6 section 22.1.3.18 Array.prototype.push ( )
 Reduction JSBuiltinReducer::ReduceArrayPush(Node* node) {
-  Handle<Map> receiver_map;
   // We need exactly target, receiver and value parameters.
   if (node->op()->ValueInputCount() != 3) return NoChange();
   Node* receiver = NodeProperties::GetValueInput(node, 1);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
   Node* value = NodeProperties::GetValueInput(node, 2);
-  if (GetMapWitness(node).ToHandle(&receiver_map) &&
-      CanInlineArrayResizeOperation(receiver_map)) {
+  ZoneHandleSet<Map> receiver_maps;
+  NodeProperties::InferReceiverMapsResult result =
+      NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+  if (receiver_maps.size() != 1) return NoChange();
+  DCHECK_NE(NodeProperties::kNoReceiverMaps, result);
+
+  // TODO(turbofan): Relax this to deal with multiple {receiver} maps.
+  Handle<Map> receiver_map = receiver_maps[0];
+  if (CanInlineArrayResizeOperation(receiver_map)) {
     // Install code dependencies on the {receiver} prototype maps and the
     // global array protector cell.
     dependencies()->AssumePropertyCell(factory()->array_protector());
     dependencies()->AssumePrototypeMapsStable(receiver_map);
 
+    // If the {receiver_maps} information is not reliable, we need
+    // to check that the {receiver} still has one of these maps.
+    if (result == NodeProperties::kUnreliableReceiverMaps) {
+      if (receiver_map->is_stable()) {
+        dependencies()->AssumeMapStable(receiver_map);
+      } else {
+        // TODO(turbofan): This is a potential - yet unlikely - deoptimization
+        // loop, since we might not learn from this deoptimization in baseline
+        // code. We need a way to learn from deoptimizations in optimized to
+        // address these problems.
+        effect = graph()->NewNode(
+            simplified()->CheckMaps(CheckMapsFlag::kNone, receiver_maps),
+            receiver, effect, control);
+      }
+    }
+
     // TODO(turbofan): Perform type checks on the {value}. We are not guaranteed
     // to learn from these checks in case they fail, as the witness (i.e. the
     // map check from the LoadIC for a.push) might not be executed in baseline
@@ -890,39 +914,24 @@
                             InstanceType instance_type) {
   for (Node* dominator = effect;;) {
     if (dominator->opcode() == IrOpcode::kCheckMaps &&
-        IsSame(dominator->InputAt(0), receiver)) {
+        NodeProperties::IsSame(dominator->InputAt(0), receiver)) {
+      ZoneHandleSet<Map> const& maps =
+          CheckMapsParametersOf(dominator->op()).maps();
       // Check if all maps have the given {instance_type}.
-      for (int i = 1; i < dominator->op()->ValueInputCount(); ++i) {
-        Node* const map = NodeProperties::GetValueInput(dominator, i);
-        Type* const map_type = NodeProperties::GetType(map);
-        if (!map_type->IsHeapConstant()) return false;
-        Handle<Map> const map_value =
-            Handle<Map>::cast(map_type->AsHeapConstant()->Value());
-        if (map_value->instance_type() != instance_type) return false;
+      for (size_t i = 0; i < maps.size(); ++i) {
+        if (maps[i]->instance_type() != instance_type) return false;
       }
       return true;
     }
-    switch (dominator->opcode()) {
-      case IrOpcode::kStoreField: {
-        FieldAccess const& access = FieldAccessOf(dominator->op());
-        if (access.base_is_tagged == kTaggedBase &&
-            access.offset == HeapObject::kMapOffset) {
-          return false;
-        }
-        break;
-      }
-      case IrOpcode::kStoreElement:
-      case IrOpcode::kStoreTypedElement:
-        break;
-      default: {
-        DCHECK_EQ(1, dominator->op()->EffectOutputCount());
-        if (dominator->op()->EffectInputCount() != 1 ||
-            !dominator->op()->HasProperty(Operator::kNoWrite)) {
-          // Didn't find any appropriate CheckMaps node.
-          return false;
-        }
-        break;
-      }
+    // The instance type doesn't change for JSReceiver values, so we
+    // don't need to pay attention to potentially side-effecting nodes
+    // here. Strings and internal structures like FixedArray and
+    // FixedDoubleArray are weird here, but we don't use this function then.
+    DCHECK_LE(FIRST_JS_RECEIVER_TYPE, instance_type);
+    DCHECK_EQ(1, dominator->op()->EffectOutputCount());
+    if (dominator->op()->EffectInputCount() != 1) {
+      // Didn't find any appropriate CheckMaps node.
+      return false;
     }
     dominator = NodeProperties::GetEffectInput(dominator);
   }
@@ -930,6 +939,14 @@
 
 }  // namespace
 
+// ES6 section 20.3.3.1 Date.now ( )
+Reduction JSBuiltinReducer::ReduceDateNow(Node* node) {
+  NodeProperties::RemoveValueInputs(node);
+  NodeProperties::ChangeOp(
+      node, javascript()->CallRuntime(Runtime::kDateCurrentTime));
+  return Changed(node);
+}
+
 // ES6 section 20.3.4.10 Date.prototype.getTime ( )
 Reduction JSBuiltinReducer::ReduceDateGetTime(Node* node) {
   Node* receiver = NodeProperties::GetValueInput(node, 1);
@@ -945,34 +962,6 @@
   return NoChange();
 }
 
-// ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] ( V )
-Reduction JSBuiltinReducer::ReduceFunctionHasInstance(Node* node) {
-  Node* receiver = NodeProperties::GetValueInput(node, 1);
-  Node* object = (node->op()->ValueInputCount() >= 3)
-                     ? NodeProperties::GetValueInput(node, 2)
-                     : jsgraph()->UndefinedConstant();
-  Node* context = NodeProperties::GetContextInput(node);
-  Node* frame_state = NodeProperties::GetFrameStateInput(node);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-
-  // TODO(turbofan): If JSOrdinaryToInstance raises an exception, the
-  // stack trace doesn't contain the @@hasInstance call; we have the
-  // corresponding bug in the baseline case. Some massaging of the frame
-  // state would be necessary here.
-
-  // Morph this {node} into a JSOrdinaryHasInstance node.
-  node->ReplaceInput(0, receiver);
-  node->ReplaceInput(1, object);
-  node->ReplaceInput(2, context);
-  node->ReplaceInput(3, frame_state);
-  node->ReplaceInput(4, effect);
-  node->ReplaceInput(5, control);
-  node->TrimInputCount(6);
-  NodeProperties::ChangeOp(node, javascript()->OrdinaryHasInstance());
-  return Changed(node);
-}
-
 // ES6 section 18.2.2 isFinite ( number )
 Reduction JSBuiltinReducer::ReduceGlobalIsFinite(Node* node) {
   JSCallReduction r(node);
@@ -1485,6 +1474,117 @@
   return NoChange();
 }
 
+// ES6 section #sec-object.create Object.create(proto, properties)
+Reduction JSBuiltinReducer::ReduceObjectCreate(Node* node) {
+  // We need exactly target, receiver and value parameters.
+  int arg_count = node->op()->ValueInputCount();
+  if (arg_count != 3) return NoChange();
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+  Node* prototype = NodeProperties::GetValueInput(node, 2);
+  Type* prototype_type = NodeProperties::GetType(prototype);
+  Handle<Map> instance_map;
+  if (!prototype_type->IsHeapConstant()) return NoChange();
+  Handle<HeapObject> prototype_const =
+      prototype_type->AsHeapConstant()->Value();
+  if (!prototype_const->IsNull(isolate()) && !prototype_const->IsJSReceiver()) {
+    return NoChange();
+  }
+  instance_map = Map::GetObjectCreateMap(prototype_const);
+  Node* properties = jsgraph()->EmptyFixedArrayConstant();
+  if (instance_map->is_dictionary_map()) {
+    // Allocated an empty NameDictionary as backing store for the properties.
+    Handle<Map> map(isolate()->heap()->hash_table_map(), isolate());
+    int capacity =
+        NameDictionary::ComputeCapacity(NameDictionary::kInitialCapacity);
+    DCHECK(base::bits::IsPowerOfTwo32(capacity));
+    int length = NameDictionary::EntryToIndex(capacity);
+    int size = NameDictionary::SizeFor(length);
+
+    effect = graph()->NewNode(
+        common()->BeginRegion(RegionObservability::kNotObservable), effect);
+
+    Node* value = effect =
+        graph()->NewNode(simplified()->Allocate(NOT_TENURED),
+                         jsgraph()->Constant(size), effect, control);
+    effect =
+        graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
+                         value, jsgraph()->HeapConstant(map), effect, control);
+
+    // Initialize FixedArray fields.
+    effect = graph()->NewNode(
+        simplified()->StoreField(AccessBuilder::ForFixedArrayLength()), value,
+        jsgraph()->SmiConstant(length), effect, control);
+    // Initialize HashTable fields.
+    effect =
+        graph()->NewNode(simplified()->StoreField(
+                             AccessBuilder::ForHashTableBaseNumberOfElements()),
+                         value, jsgraph()->SmiConstant(0), effect, control);
+    effect = graph()->NewNode(
+        simplified()->StoreField(
+            AccessBuilder::ForHashTableBaseNumberOfDeletedElement()),
+        value, jsgraph()->SmiConstant(0), effect, control);
+    effect = graph()->NewNode(
+        simplified()->StoreField(AccessBuilder::ForHashTableBaseCapacity()),
+        value, jsgraph()->SmiConstant(capacity), effect, control);
+    // Initialize Dictionary fields.
+    Node* undefined = jsgraph()->UndefinedConstant();
+    effect = graph()->NewNode(
+        simplified()->StoreField(AccessBuilder::ForDictionaryMaxNumberKey()),
+        value, undefined, effect, control);
+    effect = graph()->NewNode(
+        simplified()->StoreField(
+            AccessBuilder::ForDictionaryNextEnumerationIndex()),
+        value, jsgraph()->SmiConstant(PropertyDetails::kInitialIndex), effect,
+        control);
+    // Initialize hte Properties fields.
+    for (int index = NameDictionary::kNextEnumerationIndexIndex + 1;
+         index < length; index++) {
+      effect = graph()->NewNode(
+          simplified()->StoreField(
+              AccessBuilder::ForFixedArraySlot(index, kNoWriteBarrier)),
+          value, undefined, effect, control);
+    }
+    properties = effect =
+        graph()->NewNode(common()->FinishRegion(), value, effect);
+  }
+
+  int const instance_size = instance_map->instance_size();
+  if (instance_size > kMaxRegularHeapObjectSize) return NoChange();
+  dependencies()->AssumeInitialMapCantChange(instance_map);
+
+  // Emit code to allocate the JSObject instance for the given
+  // {instance_map}.
+  effect = graph()->NewNode(
+      common()->BeginRegion(RegionObservability::kNotObservable), effect);
+  Node* value = effect =
+      graph()->NewNode(simplified()->Allocate(NOT_TENURED),
+                       jsgraph()->Constant(instance_size), effect, control);
+  effect =
+      graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()), value,
+                       jsgraph()->HeapConstant(instance_map), effect, control);
+  effect = graph()->NewNode(
+      simplified()->StoreField(AccessBuilder::ForJSObjectProperties()), value,
+      properties, effect, control);
+  effect = graph()->NewNode(
+      simplified()->StoreField(AccessBuilder::ForJSObjectElements()), value,
+      jsgraph()->EmptyFixedArrayConstant(), effect, control);
+  // Initialize Object fields.
+  Node* undefined = jsgraph()->UndefinedConstant();
+  for (int offset = JSObject::kHeaderSize; offset < instance_size;
+       offset += kPointerSize) {
+    effect = graph()->NewNode(
+        simplified()->StoreField(
+            AccessBuilder::ForJSObjectOffset(offset, kNoWriteBarrier)),
+        value, undefined, effect, control);
+  }
+  value = effect = graph()->NewNode(common()->FinishRegion(), value, effect);
+
+  // replace it
+  ReplaceWithValue(node, value, effect, control);
+  return Replace(value);
+}
+
 // ES6 section 21.1.2.1 String.fromCharCode ( ...codeUnits )
 Reduction JSBuiltinReducer::ReduceStringFromCharCode(Node* node) {
   JSCallReduction r(node);
@@ -1509,7 +1609,7 @@
   // the lowering below.
   for (Node* dominator = effect;;) {
     if (dominator->opcode() == IrOpcode::kCheckString &&
-        IsSame(dominator->InputAt(0), receiver)) {
+        NodeProperties::IsSame(dominator->InputAt(0), receiver)) {
       return dominator;
     }
     if (dominator->op()->EffectInputCount() != 1) {
@@ -1531,8 +1631,17 @@
     Node* effect = NodeProperties::GetEffectInput(node);
     Node* control = NodeProperties::GetControlInput(node);
 
-    if (index_type->Is(Type::Unsigned32())) {
+    if (index_type->Is(Type::Integral32OrMinusZeroOrNaN())) {
       if (Node* receiver = GetStringWitness(node)) {
+        if (!index_type->Is(Type::Unsigned32())) {
+          // Map -0 and NaN to 0 (as per ToInteger), and the values in
+          // the [-2^31,-1] range to the [2^31,2^32-1] range, which will
+          // be considered out-of-bounds as well, because of the maximal
+          // String length limit in V8.
+          STATIC_ASSERT(String::kMaxLength <= kMaxInt);
+          index = graph()->NewNode(simplified()->NumberToUint32(), index);
+        }
+
         // Determine the {receiver} length.
         Node* receiver_length = effect = graph()->NewNode(
             simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
@@ -1544,16 +1653,10 @@
         Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
                                         check, control);
 
+        // Return the character from the {receiver} as single character string.
         Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-        Node* vtrue;
-        {
-          // Load the character from the {receiver}.
-          vtrue = graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
-                                   index, if_true);
-
-          // Return it as single character string.
-          vtrue = graph()->NewNode(simplified()->StringFromCharCode(), vtrue);
-        }
+        Node* vtrue = graph()->NewNode(simplified()->StringCharAt(), receiver,
+                                       index, if_true);
 
         // Return the empty string otherwise.
         Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
@@ -1582,8 +1685,17 @@
     Node* effect = NodeProperties::GetEffectInput(node);
     Node* control = NodeProperties::GetControlInput(node);
 
-    if (index_type->Is(Type::Unsigned32())) {
+    if (index_type->Is(Type::Integral32OrMinusZeroOrNaN())) {
       if (Node* receiver = GetStringWitness(node)) {
+        if (!index_type->Is(Type::Unsigned32())) {
+          // Map -0 and NaN to 0 (as per ToInteger), and the values in
+          // the [-2^31,-1] range to the [2^31,2^32-1] range, which will
+          // be considered out-of-bounds as well, because of the maximal
+          // String length limit in V8.
+          STATIC_ASSERT(String::kMaxLength <= kMaxInt);
+          index = graph()->NewNode(simplified()->NumberToUint32(), index);
+        }
+
         // Determine the {receiver} length.
         Node* receiver_length = effect = graph()->NewNode(
             simplified()->LoadField(AccessBuilder::ForStringLength()), receiver,
@@ -1618,6 +1730,34 @@
   return NoChange();
 }
 
+// ES6 String.prototype.indexOf(searchString [, position])
+// #sec-string.prototype.indexof
+Reduction JSBuiltinReducer::ReduceStringIndexOf(Node* node) {
+  // We need at least target, receiver and search_string parameters.
+  if (node->op()->ValueInputCount() >= 3) {
+    Node* search_string = NodeProperties::GetValueInput(node, 2);
+    Type* search_string_type = NodeProperties::GetType(search_string);
+    Node* position = (node->op()->ValueInputCount() >= 4)
+                         ? NodeProperties::GetValueInput(node, 3)
+                         : jsgraph()->ZeroConstant();
+    Type* position_type = NodeProperties::GetType(position);
+
+    if (search_string_type->Is(Type::String()) &&
+        position_type->Is(Type::SignedSmall())) {
+      if (Node* receiver = GetStringWitness(node)) {
+        RelaxEffectsAndControls(node);
+        node->ReplaceInput(0, receiver);
+        node->ReplaceInput(1, search_string);
+        node->ReplaceInput(2, position);
+        node->TrimInputCount(3);
+        NodeProperties::ChangeOp(node, simplified()->StringIndexOf());
+        return Changed(node);
+      }
+    }
+  }
+  return NoChange();
+}
+
 Reduction JSBuiltinReducer::ReduceStringIterator(Node* node) {
   if (Node* receiver = GetStringWitness(node)) {
     Node* effect = NodeProperties::GetEffectInput(node);
@@ -1632,6 +1772,7 @@
     Node* value = effect = graph()->NewNode(
         simplified()->Allocate(NOT_TENURED),
         jsgraph()->Constant(JSStringIterator::kSize), effect, control);
+    NodeProperties::SetType(value, Type::OtherObject());
     effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
                               value, map, effect, control);
     effect = graph()->NewNode(
@@ -1805,21 +1946,29 @@
   Node* control = NodeProperties::GetControlInput(node);
   if (HasInstanceTypeWitness(receiver, effect, instance_type)) {
     // Load the {receiver}s field.
-    Node* receiver_value = effect = graph()->NewNode(
-        simplified()->LoadField(access), receiver, effect, control);
+    Node* value = effect = graph()->NewNode(simplified()->LoadField(access),
+                                            receiver, effect, control);
 
-    // Check if the {receiver}s buffer was neutered.
-    Node* receiver_buffer = effect = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
-        receiver, effect, control);
-    Node* check = effect =
-        graph()->NewNode(simplified()->ArrayBufferWasNeutered(),
-                         receiver_buffer, effect, control);
+    // See if we can skip the neutering check.
+    if (isolate()->IsArrayBufferNeuteringIntact()) {
+      // Add a code dependency so we are deoptimized in case an ArrayBuffer
+      // gets neutered.
+      dependencies()->AssumePropertyCell(
+          factory()->array_buffer_neutering_protector());
+    } else {
+      // Check if the {receiver}s buffer was neutered.
+      Node* receiver_buffer = effect = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+          receiver, effect, control);
+      Node* check = effect =
+          graph()->NewNode(simplified()->ArrayBufferWasNeutered(),
+                           receiver_buffer, effect, control);
 
-    // Default to zero if the {receiver}s buffer was neutered.
-    Node* value = graph()->NewNode(
-        common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
-        check, jsgraph()->ZeroConstant(), receiver_value);
+      // Default to zero if the {receiver}s buffer was neutered.
+      value = graph()->NewNode(
+          common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+          check, jsgraph()->ZeroConstant(), value);
+    }
 
     ReplaceWithValue(node, value, effect, control);
     return Replace(value);
@@ -1846,11 +1995,10 @@
       return ReduceArrayPop(node);
     case kArrayPush:
       return ReduceArrayPush(node);
+    case kDateNow:
+      return ReduceDateNow(node);
     case kDateGetTime:
       return ReduceDateGetTime(node);
-    case kFunctionHasInstance:
-      return ReduceFunctionHasInstance(node);
-      break;
     case kGlobalIsFinite:
       reduction = ReduceGlobalIsFinite(node);
       break;
@@ -1971,6 +2119,9 @@
     case kNumberParseInt:
       reduction = ReduceNumberParseInt(node);
       break;
+    case kObjectCreate:
+      reduction = ReduceObjectCreate(node);
+      break;
     case kStringFromCharCode:
       reduction = ReduceStringFromCharCode(node);
       break;
@@ -1978,6 +2129,8 @@
       return ReduceStringCharAt(node);
     case kStringCharCodeAt:
       return ReduceStringCharCodeAt(node);
+    case kStringIndexOf:
+      return ReduceStringIndexOf(node);
     case kStringIterator:
       return ReduceStringIterator(node);
     case kStringIteratorNext:
diff --git a/src/compiler/js-builtin-reducer.h b/src/compiler/js-builtin-reducer.h
index 4af3084..6ff06e3 100644
--- a/src/compiler/js-builtin-reducer.h
+++ b/src/compiler/js-builtin-reducer.h
@@ -57,8 +57,8 @@
                                          IterationKind kind);
   Reduction ReduceArrayPop(Node* node);
   Reduction ReduceArrayPush(Node* node);
+  Reduction ReduceDateNow(Node* node);
   Reduction ReduceDateGetTime(Node* node);
-  Reduction ReduceFunctionHasInstance(Node* node);
   Reduction ReduceGlobalIsFinite(Node* node);
   Reduction ReduceGlobalIsNaN(Node* node);
   Reduction ReduceMathAbs(Node* node);
@@ -99,9 +99,11 @@
   Reduction ReduceNumberIsNaN(Node* node);
   Reduction ReduceNumberIsSafeInteger(Node* node);
   Reduction ReduceNumberParseInt(Node* node);
+  Reduction ReduceObjectCreate(Node* node);
   Reduction ReduceStringCharAt(Node* node);
   Reduction ReduceStringCharCodeAt(Node* node);
   Reduction ReduceStringFromCharCode(Node* node);
+  Reduction ReduceStringIndexOf(Node* node);
   Reduction ReduceStringIterator(Node* node);
   Reduction ReduceStringIteratorNext(Node* node);
   Reduction ReduceArrayBufferViewAccessor(Node* node,
diff --git a/src/compiler/js-call-reducer.cc b/src/compiler/js-call-reducer.cc
index e48fce9..c0deb91 100644
--- a/src/compiler/js-call-reducer.cc
+++ b/src/compiler/js-call-reducer.cc
@@ -4,11 +4,15 @@
 
 #include "src/compiler/js-call-reducer.h"
 
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/compilation-dependencies.h"
 #include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/simplified-operator.h"
+#include "src/feedback-vector-inl.h"
 #include "src/objects-inl.h"
-#include "src/type-feedback-vector-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -16,10 +20,14 @@
 
 Reduction JSCallReducer::Reduce(Node* node) {
   switch (node->opcode()) {
-    case IrOpcode::kJSCallConstruct:
-      return ReduceJSCallConstruct(node);
-    case IrOpcode::kJSCallFunction:
-      return ReduceJSCallFunction(node);
+    case IrOpcode::kJSConstruct:
+      return ReduceJSConstruct(node);
+    case IrOpcode::kJSConstructWithSpread:
+      return ReduceJSConstructWithSpread(node);
+    case IrOpcode::kJSCall:
+      return ReduceJSCall(node);
+    case IrOpcode::kJSCallWithSpread:
+      return ReduceJSCallWithSpread(node);
     default:
       break;
   }
@@ -29,9 +37,9 @@
 
 // ES6 section 22.1.1 The Array Constructor
 Reduction JSCallReducer::ReduceArrayConstructor(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+  DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
   Node* target = NodeProperties::GetValueInput(node, 0);
-  CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+  CallParameters const& p = CallParametersOf(node->op());
 
   // Check if we have an allocation site from the CallIC.
   Handle<AllocationSite> site;
@@ -58,8 +66,8 @@
 
 // ES6 section 20.1.1 The Number Constructor
 Reduction JSCallReducer::ReduceNumberConstructor(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
-  CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+  DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+  CallParameters const& p = CallParametersOf(node->op());
 
   // Turn the {node} into a {JSToNumber} call.
   DCHECK_LE(2u, p.arity());
@@ -73,9 +81,13 @@
 
 // ES6 section 19.2.3.1 Function.prototype.apply ( thisArg, argArray )
 Reduction JSCallReducer::ReduceFunctionPrototypeApply(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+  DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
   Node* target = NodeProperties::GetValueInput(node, 0);
-  CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+  CallParameters const& p = CallParametersOf(node->op());
+  // Tail calls to Function.prototype.apply are not properly supported
+  // down the pipeline, so we disable this optimization completely for
+  // tail calls (for now).
+  if (p.tail_call_mode() == TailCallMode::kAllow) return NoChange();
   Handle<JSFunction> apply =
       Handle<JSFunction>::cast(HeapObjectMatcher(target).Value());
   size_t arity = p.arity();
@@ -101,35 +113,65 @@
       if (edge.from() == node) continue;
       return NoChange();
     }
+    // Check if the arguments can be handled in the fast case (i.e. we don't
+    // have aliased sloppy arguments), and compute the {start_index} for
+    // rest parameters.
+    CreateArgumentsType const type = CreateArgumentsTypeOf(arg_array->op());
+    Node* frame_state = NodeProperties::GetFrameStateInput(arg_array);
+    FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+    int formal_parameter_count;
+    int start_index = 0;
+    {
+      Handle<SharedFunctionInfo> shared;
+      if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+      formal_parameter_count = shared->internal_formal_parameter_count();
+    }
+    if (type == CreateArgumentsType::kMappedArguments) {
+      // Mapped arguments (sloppy mode) cannot be handled if they are aliased.
+      if (formal_parameter_count != 0) return NoChange();
+    } else if (type == CreateArgumentsType::kRestParameter) {
+      start_index = formal_parameter_count;
+    }
+    // Check if are applying to inlined arguments or to the arguments of
+    // the outermost function.
+    Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
+    if (outer_state->opcode() != IrOpcode::kFrameState) {
+      // TODO(jarin,bmeurer): Support the NewUnmappedArgumentsElement and
+      // NewRestParameterElements in the EscapeAnalysis and Deoptimizer
+      // instead, then we don't need this hack.
+      // Only works with zero formal parameters because of lacking deoptimizer
+      // support.
+      if (type != CreateArgumentsType::kRestParameter &&
+          formal_parameter_count == 0) {
+        // There are no other uses of the {arg_array} except in StateValues,
+        // so we just replace {arg_array} with a marker for the Deoptimizer
+        // that this refers to the arguments object.
+        Node* arguments = graph()->NewNode(common()->ArgumentsObjectState());
+        ReplaceWithValue(arg_array, arguments);
+      }
+
+      // Reduce {node} to a JSCallForwardVarargs operation, which just
+      // re-pushes the incoming arguments and calls the {target}.
+      node->RemoveInput(0);  // Function.prototype.apply
+      node->RemoveInput(2);  // arguments
+      NodeProperties::ChangeOp(node, javascript()->CallForwardVarargs(
+                                         start_index, p.tail_call_mode()));
+      return Changed(node);
+    }
     // Get to the actual frame state from which to extract the arguments;
     // we can only optimize this in case the {node} was already inlined into
     // some other function (and same for the {arg_array}).
-    CreateArgumentsType type = CreateArgumentsTypeOf(arg_array->op());
-    Node* frame_state = NodeProperties::GetFrameStateInput(arg_array);
-    Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
-    if (outer_state->opcode() != IrOpcode::kFrameState) return NoChange();
     FrameStateInfo outer_info = OpParameter<FrameStateInfo>(outer_state);
     if (outer_info.type() == FrameStateType::kArgumentsAdaptor) {
       // Need to take the parameters from the arguments adaptor.
       frame_state = outer_state;
     }
-    FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
-    int start_index = 0;
-    if (type == CreateArgumentsType::kMappedArguments) {
-      // Mapped arguments (sloppy mode) cannot be handled if they are aliased.
-      Handle<SharedFunctionInfo> shared;
-      if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
-      if (shared->internal_formal_parameter_count() != 0) return NoChange();
-    } else if (type == CreateArgumentsType::kRestParameter) {
-      Handle<SharedFunctionInfo> shared;
-      if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
-      start_index = shared->internal_formal_parameter_count();
-    }
     // Remove the argArray input from the {node}.
     node->RemoveInput(static_cast<int>(--arity));
-    // Add the actual parameters to the {node}, skipping the receiver.
+    // Add the actual parameters to the {node}, skipping the receiver,
+    // starting from {start_index}.
     Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
-    for (int i = start_index + 1; i < state_info.parameter_count(); ++i) {
+    for (int i = start_index + 1; i < parameters->InputCount(); ++i) {
       node->InsertInput(graph()->zone(), static_cast<int>(arity),
                         parameters->InputAt(i));
       ++arity;
@@ -140,24 +182,25 @@
   } else {
     return NoChange();
   }
-  // Change {node} to the new {JSCallFunction} operator.
+  // Change {node} to the new {JSCall} operator.
   NodeProperties::ChangeOp(
-      node, javascript()->CallFunction(arity, p.frequency(), VectorSlotPair(),
-                                       convert_mode, p.tail_call_mode()));
+      node,
+      javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode,
+                         p.tail_call_mode()));
   // Change context of {node} to the Function.prototype.apply context,
   // to ensure any exception is thrown in the correct context.
   NodeProperties::ReplaceContextInput(
       node, jsgraph()->HeapConstant(handle(apply->context(), isolate())));
-  // Try to further reduce the JSCallFunction {node}.
-  Reduction const reduction = ReduceJSCallFunction(node);
+  // Try to further reduce the JSCall {node}.
+  Reduction const reduction = ReduceJSCall(node);
   return reduction.Changed() ? reduction : Changed(node);
 }
 
 
 // ES6 section 19.2.3.3 Function.prototype.call (thisArg, ...args)
 Reduction JSCallReducer::ReduceFunctionPrototypeCall(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
-  CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+  DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+  CallParameters const& p = CallParametersOf(node->op());
   Handle<JSFunction> call = Handle<JSFunction>::cast(
       HeapObjectMatcher(NodeProperties::GetValueInput(node, 0)).Value());
   // Change context of {node} to the Function.prototype.call context,
@@ -182,83 +225,276 @@
     --arity;
   }
   NodeProperties::ChangeOp(
-      node, javascript()->CallFunction(arity, p.frequency(), VectorSlotPair(),
-                                       convert_mode, p.tail_call_mode()));
-  // Try to further reduce the JSCallFunction {node}.
-  Reduction const reduction = ReduceJSCallFunction(node);
+      node,
+      javascript()->Call(arity, p.frequency(), VectorSlotPair(), convert_mode,
+                         p.tail_call_mode()));
+  // Try to further reduce the JSCall {node}.
+  Reduction const reduction = ReduceJSCall(node);
   return reduction.Changed() ? reduction : Changed(node);
 }
 
+// ES6 section 19.2.3.6 Function.prototype [ @@hasInstance ] (V)
+Reduction JSCallReducer::ReduceFunctionPrototypeHasInstance(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+  Node* receiver = NodeProperties::GetValueInput(node, 1);
+  Node* object = (node->op()->ValueInputCount() >= 3)
+                     ? NodeProperties::GetValueInput(node, 2)
+                     : jsgraph()->UndefinedConstant();
+  Node* context = NodeProperties::GetContextInput(node);
+  Node* frame_state = NodeProperties::GetFrameStateInput(node);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+
+  // TODO(turbofan): If JSOrdinaryToInstance raises an exception, the
+  // stack trace doesn't contain the @@hasInstance call; we have the
+  // corresponding bug in the baseline case. Some massaging of the frame
+  // state would be necessary here.
+
+  // Morph this {node} into a JSOrdinaryHasInstance node.
+  node->ReplaceInput(0, receiver);
+  node->ReplaceInput(1, object);
+  node->ReplaceInput(2, context);
+  node->ReplaceInput(3, frame_state);
+  node->ReplaceInput(4, effect);
+  node->ReplaceInput(5, control);
+  node->TrimInputCount(6);
+  NodeProperties::ChangeOp(node, javascript()->OrdinaryHasInstance());
+  return Changed(node);
+}
+
 namespace {
 
-// TODO(turbofan): Shall we move this to the NodeProperties? Or some (untyped)
-// alias analyzer?
-bool IsSame(Node* a, Node* b) {
-  if (a == b) {
-    return true;
-  } else if (a->opcode() == IrOpcode::kCheckHeapObject) {
-    return IsSame(a->InputAt(0), b);
-  } else if (b->opcode() == IrOpcode::kCheckHeapObject) {
-    return IsSame(a, b->InputAt(0));
+bool CanInlineApiCall(Isolate* isolate, Node* node,
+                      Handle<FunctionTemplateInfo> function_template_info) {
+  DCHECK(node->opcode() == IrOpcode::kJSCall);
+  if (V8_UNLIKELY(FLAG_runtime_stats)) return false;
+  if (function_template_info->call_code()->IsUndefined(isolate)) {
+    return false;
   }
-  return false;
-}
-
-// TODO(turbofan): Share with similar functionality in JSInliningHeuristic
-// and JSNativeContextSpecialization, i.e. move to NodeProperties helper?!
-MaybeHandle<Map> InferReceiverMap(Node* node) {
-  Node* receiver = NodeProperties::GetValueInput(node, 1);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  // Check if the {node} is dominated by a CheckMaps with a single map
-  // for the {receiver}, and if so use that map for the lowering below.
-  for (Node* dominator = effect;;) {
-    if (dominator->opcode() == IrOpcode::kCheckMaps &&
-        IsSame(dominator->InputAt(0), receiver)) {
-      if (dominator->op()->ValueInputCount() == 2) {
-        HeapObjectMatcher m(dominator->InputAt(1));
-        if (m.HasValue()) return Handle<Map>::cast(m.Value());
-      }
-      return MaybeHandle<Map>();
-    }
-    if (dominator->op()->EffectInputCount() != 1) {
-      // Didn't find any appropriate CheckMaps node.
-      return MaybeHandle<Map>();
-    }
-    dominator = NodeProperties::GetEffectInput(dominator);
+  CallParameters const& params = CallParametersOf(node->op());
+  // CallApiCallbackStub expects the target in a register, so we count it out,
+  // and counts the receiver as an implicit argument, so we count the receiver
+  // out too.
+  int const argc = static_cast<int>(params.arity()) - 2;
+  if (argc > CallApiCallbackStub::kArgMax || !params.feedback().IsValid()) {
+    return false;
   }
+  HeapObjectMatcher receiver(NodeProperties::GetValueInput(node, 1));
+  if (!receiver.HasValue()) {
+    return false;
+  }
+  return receiver.Value()->IsUndefined(isolate) ||
+         (receiver.Value()->map()->IsJSObjectMap() &&
+          !receiver.Value()->map()->is_access_check_needed());
 }
 
 }  // namespace
 
+JSCallReducer::HolderLookup JSCallReducer::LookupHolder(
+    Handle<JSObject> object,
+    Handle<FunctionTemplateInfo> function_template_info,
+    Handle<JSObject>* holder) {
+  DCHECK(object->map()->IsJSObjectMap());
+  Handle<Map> object_map(object->map());
+  Handle<FunctionTemplateInfo> expected_receiver_type;
+  if (!function_template_info->signature()->IsUndefined(isolate())) {
+    expected_receiver_type =
+        handle(FunctionTemplateInfo::cast(function_template_info->signature()));
+  }
+  if (expected_receiver_type.is_null() ||
+      expected_receiver_type->IsTemplateFor(*object_map)) {
+    *holder = Handle<JSObject>::null();
+    return kHolderIsReceiver;
+  }
+  while (object_map->has_hidden_prototype()) {
+    Handle<JSObject> prototype(JSObject::cast(object_map->prototype()));
+    object_map = handle(prototype->map());
+    if (expected_receiver_type->IsTemplateFor(*object_map)) {
+      *holder = prototype;
+      return kHolderFound;
+    }
+  }
+  return kHolderNotFound;
+}
+
 // ES6 section B.2.2.1.1 get Object.prototype.__proto__
 Reduction JSCallReducer::ReduceObjectPrototypeGetProto(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
+  DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+  Node* receiver = NodeProperties::GetValueInput(node, 1);
+  Node* effect = NodeProperties::GetEffectInput(node);
 
   // Try to determine the {receiver} map.
-  Handle<Map> receiver_map;
-  if (InferReceiverMap(node).ToHandle(&receiver_map)) {
-    // Check if we can constant-fold the {receiver} map.
-    if (!receiver_map->IsJSProxyMap() &&
-        !receiver_map->has_hidden_prototype() &&
-        !receiver_map->is_access_check_needed()) {
-      Handle<Object> receiver_prototype(receiver_map->prototype(), isolate());
-      Node* value = jsgraph()->Constant(receiver_prototype);
-      ReplaceWithValue(node, value);
-      return Replace(value);
+  ZoneHandleSet<Map> receiver_maps;
+  NodeProperties::InferReceiverMapsResult result =
+      NodeProperties::InferReceiverMaps(receiver, effect, &receiver_maps);
+  if (result == NodeProperties::kReliableReceiverMaps) {
+    Handle<Map> candidate_map(
+        receiver_maps[0]->GetPrototypeChainRootMap(isolate()));
+    Handle<Object> candidate_prototype(candidate_map->prototype(), isolate());
+
+    // Check if we can constant-fold the {candidate_prototype}.
+    for (size_t i = 0; i < receiver_maps.size(); ++i) {
+      Handle<Map> const receiver_map(
+          receiver_maps[i]->GetPrototypeChainRootMap(isolate()));
+      if (receiver_map->IsJSProxyMap() ||
+          receiver_map->has_hidden_prototype() ||
+          receiver_map->is_access_check_needed() ||
+          receiver_map->prototype() != *candidate_prototype) {
+        return NoChange();
+      }
     }
+    Node* value = jsgraph()->Constant(candidate_prototype);
+    ReplaceWithValue(node, value);
+    return Replace(value);
   }
 
   return NoChange();
 }
 
-Reduction JSCallReducer::ReduceJSCallFunction(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
-  CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+Reduction JSCallReducer::ReduceCallApiFunction(
+    Node* node, Node* target,
+    Handle<FunctionTemplateInfo> function_template_info) {
+  Isolate* isolate = this->isolate();
+  CHECK(!isolate->serializer_enabled());
+  HeapObjectMatcher m(target);
+  DCHECK(m.HasValue() && m.Value()->IsJSFunction());
+  if (!CanInlineApiCall(isolate, node, function_template_info)) {
+    return NoChange();
+  }
+  Handle<CallHandlerInfo> call_handler_info(
+      handle(CallHandlerInfo::cast(function_template_info->call_code())));
+  Handle<Object> data(call_handler_info->data(), isolate);
+
+  Node* receiver_node = NodeProperties::GetValueInput(node, 1);
+  CallParameters const& params = CallParametersOf(node->op());
+
+  Handle<HeapObject> receiver = HeapObjectMatcher(receiver_node).Value();
+  bool const receiver_is_undefined = receiver->IsUndefined(isolate);
+  if (receiver_is_undefined) {
+    receiver = handle(Handle<JSFunction>::cast(m.Value())->global_proxy());
+  } else {
+    DCHECK(receiver->map()->IsJSObjectMap() &&
+           !receiver->map()->is_access_check_needed());
+  }
+
+  Handle<JSObject> holder;
+  HolderLookup lookup = LookupHolder(Handle<JSObject>::cast(receiver),
+                                     function_template_info, &holder);
+  if (lookup == kHolderNotFound) return NoChange();
+  if (receiver_is_undefined) {
+    receiver_node = jsgraph()->HeapConstant(receiver);
+    NodeProperties::ReplaceValueInput(node, receiver_node, 1);
+  }
+  Node* holder_node =
+      lookup == kHolderFound ? jsgraph()->HeapConstant(holder) : receiver_node;
+
+  Zone* zone = graph()->zone();
+  // Same as CanInlineApiCall: exclude the target (which goes in a register) and
+  // the receiver (which is implicitly counted by CallApiCallbackStub) from the
+  // arguments count.
+  int const argc = static_cast<int>(params.arity() - 2);
+  CallApiCallbackStub stub(isolate, argc, data->IsUndefined(isolate), false);
+  CallInterfaceDescriptor cid = stub.GetCallInterfaceDescriptor();
+  CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
+      isolate, zone, cid,
+      cid.GetStackParameterCount() + argc + 1 /* implicit receiver */,
+      CallDescriptor::kNeedsFrameState, Operator::kNoProperties,
+      MachineType::AnyTagged(), 1);
+  ApiFunction api_function(v8::ToCData<Address>(call_handler_info->callback()));
+  ExternalReference function_reference(
+      &api_function, ExternalReference::DIRECT_API_CALL, isolate);
+
+  // CallApiCallbackStub's register arguments: code, target, call data, holder,
+  // function address.
+  node->InsertInput(zone, 0, jsgraph()->HeapConstant(stub.GetCode()));
+  node->InsertInput(zone, 2, jsgraph()->Constant(data));
+  node->InsertInput(zone, 3, holder_node);
+  node->InsertInput(zone, 4, jsgraph()->ExternalConstant(function_reference));
+  NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
+  return Changed(node);
+}
+
+Reduction JSCallReducer::ReduceSpreadCall(Node* node, int arity) {
+  DCHECK(node->opcode() == IrOpcode::kJSCallWithSpread ||
+         node->opcode() == IrOpcode::kJSConstructWithSpread);
+
+  // Do check to make sure we can actually avoid iteration.
+  if (!isolate()->initial_array_iterator_prototype_map()->is_stable()) {
+    return NoChange();
+  }
+
+  Node* spread = NodeProperties::GetValueInput(node, arity);
+
+  // Check if spread is an arguments object, and {node} is the only value user
+  // of spread (except for value uses in frame states).
+  if (spread->opcode() != IrOpcode::kJSCreateArguments) return NoChange();
+  for (Edge edge : spread->use_edges()) {
+    if (edge.from()->opcode() == IrOpcode::kStateValues) continue;
+    if (!NodeProperties::IsValueEdge(edge)) continue;
+    if (edge.from() == node) continue;
+    return NoChange();
+  }
+
+  // Get to the actual frame state from which to extract the arguments;
+  // we can only optimize this in case the {node} was already inlined into
+  // some other function (and same for the {spread}).
+  CreateArgumentsType type = CreateArgumentsTypeOf(spread->op());
+  Node* frame_state = NodeProperties::GetFrameStateInput(spread);
+  Node* outer_state = frame_state->InputAt(kFrameStateOuterStateInput);
+  if (outer_state->opcode() != IrOpcode::kFrameState) return NoChange();
+  FrameStateInfo outer_info = OpParameter<FrameStateInfo>(outer_state);
+  if (outer_info.type() == FrameStateType::kArgumentsAdaptor) {
+    // Need to take the parameters from the arguments adaptor.
+    frame_state = outer_state;
+  }
+  FrameStateInfo state_info = OpParameter<FrameStateInfo>(frame_state);
+  int start_index = 0;
+  if (type == CreateArgumentsType::kMappedArguments) {
+    // Mapped arguments (sloppy mode) cannot be handled if they are aliased.
+    Handle<SharedFunctionInfo> shared;
+    if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+    if (shared->internal_formal_parameter_count() != 0) return NoChange();
+  } else if (type == CreateArgumentsType::kRestParameter) {
+    Handle<SharedFunctionInfo> shared;
+    if (!state_info.shared_info().ToHandle(&shared)) return NoChange();
+    start_index = shared->internal_formal_parameter_count();
+
+    // Only check the array iterator protector when we have a rest object.
+    if (!isolate()->IsArrayIteratorLookupChainIntact()) return NoChange();
+    // Add a code dependency on the array iterator protector.
+    dependencies()->AssumePropertyCell(factory()->array_iterator_protector());
+  }
+
+  dependencies()->AssumeMapStable(
+      isolate()->initial_array_iterator_prototype_map());
+
+  node->RemoveInput(arity--);
+
+  // Add the actual parameters to the {node}, skipping the receiver.
+  Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
+  for (int i = start_index + 1; i < state_info.parameter_count(); ++i) {
+    node->InsertInput(graph()->zone(), static_cast<int>(++arity),
+                      parameters->InputAt(i));
+  }
+
+  if (node->opcode() == IrOpcode::kJSCallWithSpread) {
+    NodeProperties::ChangeOp(
+        node, javascript()->Call(arity + 1, 7, VectorSlotPair()));
+  } else {
+    NodeProperties::ChangeOp(
+        node, javascript()->Construct(arity + 2, 7, VectorSlotPair()));
+  }
+  return Changed(node);
+}
+
+Reduction JSCallReducer::ReduceJSCall(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+  CallParameters const& p = CallParametersOf(node->op());
   Node* target = NodeProperties::GetValueInput(node, 0);
   Node* control = NodeProperties::GetControlInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
 
-  // Try to specialize JSCallFunction {node}s with constant {target}s.
+  // Try to specialize JSCall {node}s with constant {target}s.
   HeapObjectMatcher m(target);
   if (m.HasValue()) {
     if (m.Value()->IsJSFunction()) {
@@ -274,12 +510,17 @@
         return Changed(node);
       }
 
+      // Don't inline cross native context.
+      if (function->native_context() != *native_context()) return NoChange();
+
       // Check for known builtin functions.
       switch (shared->code()->builtin_index()) {
         case Builtins::kFunctionPrototypeApply:
           return ReduceFunctionPrototypeApply(node);
         case Builtins::kFunctionPrototypeCall:
           return ReduceFunctionPrototypeCall(node);
+        case Builtins::kFunctionPrototypeHasInstance:
+          return ReduceFunctionPrototypeHasInstance(node);
         case Builtins::kNumberConstructor:
           return ReduceNumberConstructor(node);
         case Builtins::kObjectPrototypeGetProto:
@@ -292,6 +533,12 @@
       if (*function == function->native_context()->array_function()) {
         return ReduceArrayConstructor(node);
       }
+
+      if (shared->IsApiFunction()) {
+        return ReduceCallApiFunction(
+            node, target,
+            handle(FunctionTemplateInfo::cast(shared->function_data())));
+      }
     } else if (m.Value()->IsJSBoundFunction()) {
       Handle<JSBoundFunction> function =
           Handle<JSBoundFunction>::cast(m.Value());
@@ -300,9 +547,9 @@
       Handle<Object> bound_this(function->bound_this(), isolate());
       Handle<FixedArray> bound_arguments(function->bound_arguments(),
                                          isolate());
-      CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+      CallParameters const& p = CallParametersOf(node->op());
       ConvertReceiverMode const convert_mode =
-          (bound_this->IsNull(isolate()) || bound_this->IsUndefined(isolate()))
+          (bound_this->IsNullOrUndefined(isolate()))
               ? ConvertReceiverMode::kNullOrUndefined
               : ConvertReceiverMode::kNotNullOrUndefined;
       size_t arity = p.arity();
@@ -319,11 +566,12 @@
             jsgraph()->Constant(handle(bound_arguments->get(i), isolate())));
         arity++;
       }
-      NodeProperties::ChangeOp(node, javascript()->CallFunction(
-                                         arity, p.frequency(), VectorSlotPair(),
-                                         convert_mode, p.tail_call_mode()));
-      // Try to further reduce the JSCallFunction {node}.
-      Reduction const reduction = ReduceJSCallFunction(node);
+      NodeProperties::ChangeOp(
+          node,
+          javascript()->Call(arity, p.frequency(), VectorSlotPair(),
+                             convert_mode, p.tail_call_mode()));
+      // Try to further reduce the JSCall {node}.
+      Reduction const reduction = ReduceJSCall(node);
       return reduction.Changed() ? reduction : Changed(node);
     }
 
@@ -332,26 +580,36 @@
     return NoChange();
   }
 
-  // Not much we can do if deoptimization support is disabled.
-  if (!(flags() & kDeoptimizationEnabled)) return NoChange();
-
   // Extract feedback from the {node} using the CallICNexus.
   if (!p.feedback().IsValid()) return NoChange();
   CallICNexus nexus(p.feedback().vector(), p.feedback().slot());
-  if (nexus.IsUninitialized() && (flags() & kBailoutOnUninitialized)) {
-    Node* frame_state = NodeProperties::FindFrameStateBefore(node);
-    Node* deoptimize = graph()->NewNode(
-        common()->Deoptimize(
-            DeoptimizeKind::kSoft,
-            DeoptimizeReason::kInsufficientTypeFeedbackForCall),
-        frame_state, effect, control);
-    // TODO(bmeurer): This should be on the AdvancedReducer somehow.
-    NodeProperties::MergeControlToEnd(graph(), common(), deoptimize);
-    Revisit(graph()->end());
-    node->TrimInputCount(0);
-    NodeProperties::ChangeOp(node, common()->Dead());
+  if (nexus.IsUninitialized()) {
+    // TODO(turbofan): Tail-calling to a CallIC stub is not supported.
+    if (p.tail_call_mode() == TailCallMode::kAllow) return NoChange();
+
+    // Insert a CallIC here to collect feedback for uninitialized calls.
+    int const arg_count = static_cast<int>(p.arity() - 2);
+    Callable callable = CodeFactory::CallIC(isolate(), p.convert_mode());
+    CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
+    CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
+        isolate(), graph()->zone(), callable.descriptor(), arg_count + 1,
+        flags);
+    Node* stub_code = jsgraph()->HeapConstant(callable.code());
+    Node* stub_arity = jsgraph()->Constant(arg_count);
+    Node* slot_index =
+        jsgraph()->Constant(FeedbackVector::GetIndex(p.feedback().slot()));
+    Node* feedback_vector = jsgraph()->HeapConstant(p.feedback().vector());
+    node->InsertInput(graph()->zone(), 0, stub_code);
+    node->InsertInput(graph()->zone(), 2, stub_arity);
+    node->InsertInput(graph()->zone(), 3, slot_index);
+    node->InsertInput(graph()->zone(), 4, feedback_vector);
+    NodeProperties::ChangeOp(node, common()->Call(desc));
     return Changed(node);
   }
+
+  // Not much we can do if deoptimization support is disabled.
+  if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
   Handle<Object> feedback(nexus.GetFeedback(), isolate());
   if (feedback->IsAllocationSite()) {
     // Retrieve the Array function from the {node}.
@@ -379,22 +637,30 @@
       effect =
           graph()->NewNode(simplified()->CheckIf(), check, effect, control);
 
-      // Specialize the JSCallFunction node to the {target_function}.
+      // Specialize the JSCall node to the {target_function}.
       NodeProperties::ReplaceValueInput(node, target_function, 0);
       NodeProperties::ReplaceEffectInput(node, effect);
 
-      // Try to further reduce the JSCallFunction {node}.
-      Reduction const reduction = ReduceJSCallFunction(node);
+      // Try to further reduce the JSCall {node}.
+      Reduction const reduction = ReduceJSCall(node);
       return reduction.Changed() ? reduction : Changed(node);
     }
   }
   return NoChange();
 }
 
+Reduction JSCallReducer::ReduceJSCallWithSpread(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCallWithSpread, node->opcode());
+  CallWithSpreadParameters const& p = CallWithSpreadParametersOf(node->op());
+  DCHECK_LE(3u, p.arity());
+  int arity = static_cast<int>(p.arity() - 1);
 
-Reduction JSCallReducer::ReduceJSCallConstruct(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCallConstruct, node->opcode());
-  CallConstructParameters const& p = CallConstructParametersOf(node->op());
+  return ReduceSpreadCall(node, arity);
+}
+
+Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode());
+  ConstructParameters const& p = ConstructParametersOf(node->op());
   DCHECK_LE(2u, p.arity());
   int const arity = static_cast<int>(p.arity() - 2);
   Node* target = NodeProperties::GetValueInput(node, 0);
@@ -402,7 +668,7 @@
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
 
-  // Try to specialize JSCallConstruct {node}s with constant {target}s.
+  // Try to specialize JSConstruct {node}s with constant {target}s.
   HeapObjectMatcher m(target);
   if (m.HasValue()) {
     if (m.Value()->IsJSFunction()) {
@@ -412,10 +678,14 @@
       if (!function->IsConstructor()) {
         NodeProperties::ReplaceValueInputs(node, target);
         NodeProperties::ChangeOp(
-            node, javascript()->CallRuntime(Runtime::kThrowCalledNonCallable));
+            node, javascript()->CallRuntime(
+                      Runtime::kThrowConstructedNonConstructable));
         return Changed(node);
       }
 
+      // Don't inline cross native context.
+      if (function->native_context() != *native_context()) return NoChange();
+
       // Check for the ArrayConstructor.
       if (*function == function->native_context()->array_function()) {
         // Check if we have an allocation site.
@@ -487,15 +757,15 @@
       effect =
           graph()->NewNode(simplified()->CheckIf(), check, effect, control);
 
-      // Specialize the JSCallConstruct node to the {target_function}.
+      // Specialize the JSConstruct node to the {target_function}.
       NodeProperties::ReplaceValueInput(node, target_function, 0);
       NodeProperties::ReplaceEffectInput(node, effect);
       if (target == new_target) {
         NodeProperties::ReplaceValueInput(node, target_function, arity + 1);
       }
 
-      // Try to further reduce the JSCallConstruct {node}.
-      Reduction const reduction = ReduceJSCallConstruct(node);
+      // Try to further reduce the JSConstruct {node}.
+      Reduction const reduction = ReduceJSConstruct(node);
       return reduction.Changed() ? reduction : Changed(node);
     }
   }
@@ -503,10 +773,22 @@
   return NoChange();
 }
 
+Reduction JSCallReducer::ReduceJSConstructWithSpread(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSConstructWithSpread, node->opcode());
+  ConstructWithSpreadParameters const& p =
+      ConstructWithSpreadParametersOf(node->op());
+  DCHECK_LE(3u, p.arity());
+  int arity = static_cast<int>(p.arity() - 2);
+
+  return ReduceSpreadCall(node, arity);
+}
+
 Graph* JSCallReducer::graph() const { return jsgraph()->graph(); }
 
 Isolate* JSCallReducer::isolate() const { return jsgraph()->isolate(); }
 
+Factory* JSCallReducer::factory() const { return isolate()->factory(); }
+
 CommonOperatorBuilder* JSCallReducer::common() const {
   return jsgraph()->common();
 }
diff --git a/src/compiler/js-call-reducer.h b/src/compiler/js-call-reducer.h
index 81153f9..10b8ee8 100644
--- a/src/compiler/js-call-reducer.h
+++ b/src/compiler/js-call-reducer.h
@@ -10,6 +10,11 @@
 
 namespace v8 {
 namespace internal {
+
+// Forward declarations.
+class CompilationDependencies;
+class Factory;
+
 namespace compiler {
 
 // Forward declarations.
@@ -18,48 +23,65 @@
 class JSOperatorBuilder;
 class SimplifiedOperatorBuilder;
 
-// Performs strength reduction on {JSCallConstruct} and {JSCallFunction} nodes,
+// Performs strength reduction on {JSConstruct} and {JSCall} nodes,
 // which might allow inlining or other optimizations to be performed afterwards.
 class JSCallReducer final : public AdvancedReducer {
  public:
   // Flags that control the mode of operation.
   enum Flag {
     kNoFlags = 0u,
-    kBailoutOnUninitialized = 1u << 0,
-    kDeoptimizationEnabled = 1u << 1
+    kDeoptimizationEnabled = 1u << 0,
   };
   typedef base::Flags<Flag> Flags;
 
   JSCallReducer(Editor* editor, JSGraph* jsgraph, Flags flags,
-                Handle<Context> native_context)
+                Handle<Context> native_context,
+                CompilationDependencies* dependencies)
       : AdvancedReducer(editor),
         jsgraph_(jsgraph),
         flags_(flags),
-        native_context_(native_context) {}
+        native_context_(native_context),
+        dependencies_(dependencies) {}
 
   Reduction Reduce(Node* node) final;
 
  private:
   Reduction ReduceArrayConstructor(Node* node);
+  Reduction ReduceCallApiFunction(
+      Node* node, Node* target,
+      Handle<FunctionTemplateInfo> function_template_info);
   Reduction ReduceNumberConstructor(Node* node);
   Reduction ReduceFunctionPrototypeApply(Node* node);
   Reduction ReduceFunctionPrototypeCall(Node* node);
+  Reduction ReduceFunctionPrototypeHasInstance(Node* node);
   Reduction ReduceObjectPrototypeGetProto(Node* node);
-  Reduction ReduceJSCallConstruct(Node* node);
-  Reduction ReduceJSCallFunction(Node* node);
+  Reduction ReduceSpreadCall(Node* node, int arity);
+  Reduction ReduceJSConstruct(Node* node);
+  Reduction ReduceJSConstructWithSpread(Node* node);
+  Reduction ReduceJSCall(Node* node);
+  Reduction ReduceJSCallWithSpread(Node* node);
+
+  enum HolderLookup { kHolderNotFound, kHolderIsReceiver, kHolderFound };
+
+  HolderLookup LookupHolder(Handle<JSObject> object,
+                            Handle<FunctionTemplateInfo> function_template_info,
+                            Handle<JSObject>* holder);
 
   Graph* graph() const;
   Flags flags() const { return flags_; }
   JSGraph* jsgraph() const { return jsgraph_; }
   Isolate* isolate() const;
+  Factory* factory() const;
   Handle<Context> native_context() const { return native_context_; }
   CommonOperatorBuilder* common() const;
   JSOperatorBuilder* javascript() const;
   SimplifiedOperatorBuilder* simplified() const;
+  CompilationDependencies* dependencies() const { return dependencies_; }
 
   JSGraph* const jsgraph_;
   Flags const flags_;
   Handle<Context> const native_context_;
+  CompilationDependencies* const dependencies_;
 };
 
 DEFINE_OPERATORS_FOR_FLAGS(JSCallReducer::Flags)
diff --git a/src/compiler/js-context-specialization.cc b/src/compiler/js-context-specialization.cc
index e02fc49..9a2edc1 100644
--- a/src/compiler/js-context-specialization.cc
+++ b/src/compiler/js-context-specialization.cc
@@ -28,50 +28,81 @@
   return NoChange();
 }
 
+Reduction JSContextSpecialization::SimplifyJSLoadContext(Node* node,
+                                                         Node* new_context,
+                                                         size_t new_depth) {
+  DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
+  const ContextAccess& access = ContextAccessOf(node->op());
+  DCHECK_LE(new_depth, access.depth());
 
-MaybeHandle<Context> JSContextSpecialization::GetSpecializationContext(
-    Node* node) {
-  DCHECK(node->opcode() == IrOpcode::kJSLoadContext ||
-         node->opcode() == IrOpcode::kJSStoreContext);
-  Node* const object = NodeProperties::GetValueInput(node, 0);
-  return NodeProperties::GetSpecializationContext(object, context());
+  if (new_depth == access.depth() &&
+      new_context == NodeProperties::GetContextInput(node)) {
+    return NoChange();
+  }
+
+  const Operator* op = jsgraph_->javascript()->LoadContext(
+      new_depth, access.index(), access.immutable());
+  NodeProperties::ReplaceContextInput(node, new_context);
+  NodeProperties::ChangeOp(node, op);
+  return Changed(node);
 }
 
+Reduction JSContextSpecialization::SimplifyJSStoreContext(Node* node,
+                                                          Node* new_context,
+                                                          size_t new_depth) {
+  DCHECK_EQ(IrOpcode::kJSStoreContext, node->opcode());
+  const ContextAccess& access = ContextAccessOf(node->op());
+  DCHECK_LE(new_depth, access.depth());
+
+  if (new_depth == access.depth() &&
+      new_context == NodeProperties::GetContextInput(node)) {
+    return NoChange();
+  }
+
+  const Operator* op =
+      jsgraph_->javascript()->StoreContext(new_depth, access.index());
+  NodeProperties::ReplaceContextInput(node, new_context);
+  NodeProperties::ChangeOp(node, op);
+  return Changed(node);
+}
 
 Reduction JSContextSpecialization::ReduceJSLoadContext(Node* node) {
   DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
 
-  // Get the specialization context from the node.
-  Handle<Context> context;
-  if (!GetSpecializationContext(node).ToHandle(&context)) return NoChange();
-
-  // Find the right parent context.
   const ContextAccess& access = ContextAccessOf(node->op());
-  for (size_t i = access.depth(); i > 0; --i) {
-    context = handle(context->previous(), isolate());
+  size_t depth = access.depth();
+
+  // First walk up the context chain in the graph as far as possible.
+  Node* outer = NodeProperties::GetOuterContext(node, &depth);
+
+  Handle<Context> concrete;
+  if (!NodeProperties::GetSpecializationContext(outer, context())
+           .ToHandle(&concrete)) {
+    // We do not have a concrete context object, so we can only partially reduce
+    // the load by folding-in the outer context node.
+    return SimplifyJSLoadContext(node, outer, depth);
   }
 
-  // If the access itself is mutable, only fold-in the parent.
-  if (!access.immutable()) {
-    // The access does not have to look up a parent, nothing to fold.
-    if (access.depth() == 0) {
-      return NoChange();
-    }
-    const Operator* op = jsgraph_->javascript()->LoadContext(
-        0, access.index(), access.immutable());
-    node->ReplaceInput(0, jsgraph_->Constant(context));
-    NodeProperties::ChangeOp(node, op);
-    return Changed(node);
+  // Now walk up the concrete context chain for the remaining depth.
+  for (; depth > 0; --depth) {
+    concrete = handle(concrete->previous(), isolate());
   }
-  Handle<Object> value =
-      handle(context->get(static_cast<int>(access.index())), isolate());
+
+  if (!access.immutable()) {
+    // We found the requested context object but since the context slot is
+    // mutable we can only partially reduce the load.
+    return SimplifyJSLoadContext(node, jsgraph()->Constant(concrete), depth);
+  }
 
   // Even though the context slot is immutable, the context might have escaped
   // before the function to which it belongs has initialized the slot.
-  // We must be conservative and check if the value in the slot is currently the
-  // hole or undefined. If it is neither of these, then it must be initialized.
+  // We must be conservative and check if the value in the slot is currently
+  // the hole or undefined. Only if it is neither of these, can we be sure that
+  // it won't change anymore.
+  Handle<Object> value(concrete->get(static_cast<int>(access.index())),
+                       isolate());
   if (value->IsUndefined(isolate()) || value->IsTheHole(isolate())) {
-    return NoChange();
+    return SimplifyJSLoadContext(node, jsgraph()->Constant(concrete), depth);
   }
 
   // Success. The context load can be replaced with the constant.
@@ -86,24 +117,27 @@
 Reduction JSContextSpecialization::ReduceJSStoreContext(Node* node) {
   DCHECK_EQ(IrOpcode::kJSStoreContext, node->opcode());
 
-  // Get the specialization context from the node.
-  Handle<Context> context;
-  if (!GetSpecializationContext(node).ToHandle(&context)) return NoChange();
-
-  // The access does not have to look up a parent, nothing to fold.
   const ContextAccess& access = ContextAccessOf(node->op());
-  if (access.depth() == 0) {
-    return NoChange();
+  size_t depth = access.depth();
+
+  // First walk up the context chain in the graph until we reduce the depth to 0
+  // or hit a node that does not have a CreateXYZContext operator.
+  Node* outer = NodeProperties::GetOuterContext(node, &depth);
+
+  Handle<Context> concrete;
+  if (!NodeProperties::GetSpecializationContext(outer, context())
+           .ToHandle(&concrete)) {
+    // We do not have a concrete context object, so we can only partially reduce
+    // the load by folding-in the outer context node.
+    return SimplifyJSStoreContext(node, outer, depth);
   }
 
-  // Find the right parent context.
-  for (size_t i = access.depth(); i > 0; --i) {
-    context = handle(context->previous(), isolate());
+  // Now walk up the concrete context chain for the remaining depth.
+  for (; depth > 0; --depth) {
+    concrete = handle(concrete->previous(), isolate());
   }
 
-  node->ReplaceInput(0, jsgraph_->Constant(context));
-  NodeProperties::ChangeOp(node, javascript()->StoreContext(0, access.index()));
-  return Changed(node);
+  return SimplifyJSStoreContext(node, jsgraph()->Constant(concrete), depth);
 }
 
 
diff --git a/src/compiler/js-context-specialization.h b/src/compiler/js-context-specialization.h
index ef784fc..99172af 100644
--- a/src/compiler/js-context-specialization.h
+++ b/src/compiler/js-context-specialization.h
@@ -30,8 +30,10 @@
   Reduction ReduceJSLoadContext(Node* node);
   Reduction ReduceJSStoreContext(Node* node);
 
-  // Returns the {Context} to specialize {node} to (if any).
-  MaybeHandle<Context> GetSpecializationContext(Node* node);
+  Reduction SimplifyJSStoreContext(Node* node, Node* new_context,
+                                   size_t new_depth);
+  Reduction SimplifyJSLoadContext(Node* node, Node* new_context,
+                                  size_t new_depth);
 
   Isolate* isolate() const;
   JSOperatorBuilder* javascript() const;
diff --git a/src/compiler/js-create-lowering.cc b/src/compiler/js-create-lowering.cc
index c54b76b..f3ceb2b 100644
--- a/src/compiler/js-create-lowering.cc
+++ b/src/compiler/js-create-lowering.cc
@@ -12,11 +12,12 @@
 #include "src/compiler/js-graph.h"
 #include "src/compiler/js-operator.h"
 #include "src/compiler/linkage.h"
-#include "src/compiler/node.h"
 #include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
 #include "src/compiler/operator-properties.h"
 #include "src/compiler/simplified-operator.h"
 #include "src/compiler/state-values-utils.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -38,6 +39,7 @@
   // Primitive allocation of static size.
   void Allocate(int size, PretenureFlag pretenure = NOT_TENURED,
                 Type* type = Type::Any()) {
+    DCHECK_LE(size, kMaxRegularHeapObjectSize);
     effect_ = graph()->NewNode(
         common()->BeginRegion(RegionObservability::kNotObservable), effect_);
     allocation_ =
@@ -161,7 +163,9 @@
           }
         }
       }
-    } else if (!boilerplate->HasFastDoubleElements()) {
+    } else if (boilerplate->HasFastDoubleElements()) {
+      if (elements->Size() > kMaxRegularHeapObjectSize) return false;
+    } else {
       return false;
     }
   }
@@ -176,7 +180,8 @@
   int limit = boilerplate->map()->NumberOfOwnDescriptors();
   for (int i = 0; i < limit; i++) {
     PropertyDetails details = descriptors->GetDetails(i);
-    if (details.type() != DATA) continue;
+    if (details.location() != kField) continue;
+    DCHECK_EQ(kData, details.kind());
     if ((*max_properties)-- == 0) return false;
     FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
     if (boilerplate->IsUnboxedDoubleField(field_index)) continue;
@@ -206,8 +211,6 @@
       return ReduceJSCreateArguments(node);
     case IrOpcode::kJSCreateArray:
       return ReduceJSCreateArray(node);
-    case IrOpcode::kJSCreateClosure:
-      return ReduceJSCreateClosure(node);
     case IrOpcode::kJSCreateIterResultObject:
       return ReduceJSCreateIterResultObject(node);
     case IrOpcode::kJSCreateKeyValueArray:
@@ -236,6 +239,7 @@
   Node* const new_target = NodeProperties::GetValueInput(node, 1);
   Type* const new_target_type = NodeProperties::GetType(new_target);
   Node* const effect = NodeProperties::GetEffectInput(node);
+  Node* const control = NodeProperties::GetControlInput(node);
   // Extract constructor and original constructor function.
   if (target_type->IsHeapConstant() && new_target_type->IsHeapConstant() &&
       new_target_type->AsHeapConstant()->Value()->IsJSFunction()) {
@@ -263,7 +267,7 @@
 
       // Emit code to allocate the JSObject instance for the
       // {original_constructor}.
-      AllocationBuilder a(jsgraph(), effect, graph()->start());
+      AllocationBuilder a(jsgraph(), effect, control);
       a.Allocate(instance_size);
       a.Store(AccessBuilder::ForMap(), initial_map);
       a.Store(AccessBuilder::ForJSObjectProperties(),
@@ -274,6 +278,7 @@
         a.Store(AccessBuilder::ForJSObjectInObjectProperty(initial_map, i),
                 jsgraph()->UndefinedConstant());
       }
+      RelaxControls(node);
       a.FinishAndChange(node);
       return Changed(node);
     }
@@ -294,46 +299,130 @@
   if (outer_state->opcode() != IrOpcode::kFrameState) {
     switch (type) {
       case CreateArgumentsType::kMappedArguments: {
-        // TODO(mstarzinger): Duplicate parameters are not handled yet.
+        // TODO(bmeurer): Make deoptimization mandatory for the various
+        // arguments objects, so that we always have a shared_info here.
         Handle<SharedFunctionInfo> shared_info;
-        if (!state_info.shared_info().ToHandle(&shared_info) ||
-            shared_info->has_duplicate_parameters()) {
-          return NoChange();
+        if (state_info.shared_info().ToHandle(&shared_info)) {
+          // TODO(mstarzinger): Duplicate parameters are not handled yet.
+          if (shared_info->has_duplicate_parameters()) return NoChange();
+          // If there is no aliasing, the arguments object elements are not
+          // special in any way, we can just return an unmapped backing store.
+          if (shared_info->internal_formal_parameter_count() == 0) {
+            Node* const callee = NodeProperties::GetValueInput(node, 0);
+            Node* effect = NodeProperties::GetEffectInput(node);
+            // Allocate the elements backing store.
+            Node* const elements = effect = graph()->NewNode(
+                simplified()->NewUnmappedArgumentsElements(0), effect);
+            Node* const length = effect = graph()->NewNode(
+                simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+                elements, effect, control);
+            // Load the arguments object map.
+            Node* const arguments_map = jsgraph()->HeapConstant(
+                handle(native_context()->sloppy_arguments_map(), isolate()));
+            // Actually allocate and initialize the arguments object.
+            AllocationBuilder a(jsgraph(), effect, control);
+            Node* properties = jsgraph()->EmptyFixedArrayConstant();
+            STATIC_ASSERT(JSSloppyArgumentsObject::kSize == 5 * kPointerSize);
+            a.Allocate(JSSloppyArgumentsObject::kSize);
+            a.Store(AccessBuilder::ForMap(), arguments_map);
+            a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+            a.Store(AccessBuilder::ForJSObjectElements(), elements);
+            a.Store(AccessBuilder::ForArgumentsLength(), length);
+            a.Store(AccessBuilder::ForArgumentsCallee(), callee);
+            RelaxControls(node);
+            a.FinishAndChange(node);
+          } else {
+            Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
+            Operator::Properties properties = node->op()->properties();
+            CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+                isolate(), graph()->zone(), callable.descriptor(), 0,
+                CallDescriptor::kNoFlags, properties);
+            const Operator* new_op = common()->Call(desc);
+            Node* stub_code = jsgraph()->HeapConstant(callable.code());
+            node->InsertInput(graph()->zone(), 0, stub_code);
+            node->RemoveInput(3);  // Remove the frame state.
+            NodeProperties::ChangeOp(node, new_op);
+          }
+          return Changed(node);
         }
-        Callable callable = CodeFactory::FastNewSloppyArguments(isolate());
-        Operator::Properties properties = node->op()->properties();
-        CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-            isolate(), graph()->zone(), callable.descriptor(), 0,
-            CallDescriptor::kNoFlags, properties);
-        const Operator* new_op = common()->Call(desc);
-        Node* stub_code = jsgraph()->HeapConstant(callable.code());
-        node->InsertInput(graph()->zone(), 0, stub_code);
-        node->RemoveInput(3);  // Remove the frame state.
-        NodeProperties::ChangeOp(node, new_op);
-        return Changed(node);
+        return NoChange();
       }
       case CreateArgumentsType::kUnmappedArguments: {
-        Callable callable = CodeFactory::FastNewStrictArguments(isolate());
-        Operator::Properties properties = node->op()->properties();
-        CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-            isolate(), graph()->zone(), callable.descriptor(), 0,
-            CallDescriptor::kNeedsFrameState, properties);
-        const Operator* new_op = common()->Call(desc);
-        Node* stub_code = jsgraph()->HeapConstant(callable.code());
-        node->InsertInput(graph()->zone(), 0, stub_code);
-        NodeProperties::ChangeOp(node, new_op);
+        Handle<SharedFunctionInfo> shared_info;
+        if (state_info.shared_info().ToHandle(&shared_info)) {
+          Node* effect = NodeProperties::GetEffectInput(node);
+          // Allocate the elements backing store.
+          Node* const elements = effect = graph()->NewNode(
+              simplified()->NewUnmappedArgumentsElements(
+                  shared_info->internal_formal_parameter_count()),
+              effect);
+          Node* const length = effect = graph()->NewNode(
+              simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+              elements, effect, control);
+          // Load the arguments object map.
+          Node* const arguments_map = jsgraph()->HeapConstant(
+              handle(native_context()->strict_arguments_map(), isolate()));
+          // Actually allocate and initialize the arguments object.
+          AllocationBuilder a(jsgraph(), effect, control);
+          Node* properties = jsgraph()->EmptyFixedArrayConstant();
+          STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize);
+          a.Allocate(JSStrictArgumentsObject::kSize);
+          a.Store(AccessBuilder::ForMap(), arguments_map);
+          a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+          a.Store(AccessBuilder::ForJSObjectElements(), elements);
+          a.Store(AccessBuilder::ForArgumentsLength(), length);
+          RelaxControls(node);
+          a.FinishAndChange(node);
+        } else {
+          Callable callable = CodeFactory::FastNewStrictArguments(isolate());
+          Operator::Properties properties = node->op()->properties();
+          CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+              isolate(), graph()->zone(), callable.descriptor(), 0,
+              CallDescriptor::kNeedsFrameState, properties);
+          const Operator* new_op = common()->Call(desc);
+          Node* stub_code = jsgraph()->HeapConstant(callable.code());
+          node->InsertInput(graph()->zone(), 0, stub_code);
+          NodeProperties::ChangeOp(node, new_op);
+        }
         return Changed(node);
       }
       case CreateArgumentsType::kRestParameter: {
-        Callable callable = CodeFactory::FastNewRestParameter(isolate());
-        Operator::Properties properties = node->op()->properties();
-        CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-            isolate(), graph()->zone(), callable.descriptor(), 0,
-            CallDescriptor::kNeedsFrameState, properties);
-        const Operator* new_op = common()->Call(desc);
-        Node* stub_code = jsgraph()->HeapConstant(callable.code());
-        node->InsertInput(graph()->zone(), 0, stub_code);
-        NodeProperties::ChangeOp(node, new_op);
+        Handle<SharedFunctionInfo> shared_info;
+        if (state_info.shared_info().ToHandle(&shared_info)) {
+          Node* effect = NodeProperties::GetEffectInput(node);
+          // Allocate the elements backing store.
+          Node* const elements = effect = graph()->NewNode(
+              simplified()->NewRestParameterElements(
+                  shared_info->internal_formal_parameter_count()),
+              effect);
+          Node* const length = effect = graph()->NewNode(
+              simplified()->LoadField(AccessBuilder::ForFixedArrayLength()),
+              elements, effect, control);
+          // Load the JSArray object map.
+          Node* const jsarray_map = jsgraph()->HeapConstant(handle(
+              native_context()->js_array_fast_elements_map_index(), isolate()));
+          // Actually allocate and initialize the jsarray.
+          AllocationBuilder a(jsgraph(), effect, control);
+          Node* properties = jsgraph()->EmptyFixedArrayConstant();
+          STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize);
+          a.Allocate(JSArray::kSize);
+          a.Store(AccessBuilder::ForMap(), jsarray_map);
+          a.Store(AccessBuilder::ForJSObjectProperties(), properties);
+          a.Store(AccessBuilder::ForJSObjectElements(), elements);
+          a.Store(AccessBuilder::ForJSArrayLength(FAST_ELEMENTS), length);
+          RelaxControls(node);
+          a.FinishAndChange(node);
+        } else {
+          Callable callable = CodeFactory::FastNewRestParameter(isolate());
+          Operator::Properties properties = node->op()->properties();
+          CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+              isolate(), graph()->zone(), callable.descriptor(), 0,
+              CallDescriptor::kNeedsFrameState, properties);
+          const Operator* new_op = common()->Call(desc);
+          Node* stub_code = jsgraph()->HeapConstant(callable.code());
+          node->InsertInput(graph()->zone(), 0, stub_code);
+          NodeProperties::ChangeOp(node, new_op);
+        }
         return Changed(node);
       }
     }
@@ -662,43 +751,6 @@
   return ReduceNewArrayToStubCall(node, site);
 }
 
-Reduction JSCreateLowering::ReduceJSCreateClosure(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCreateClosure, node->opcode());
-  CreateClosureParameters const& p = CreateClosureParametersOf(node->op());
-  Handle<SharedFunctionInfo> shared = p.shared_info();
-
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-  Node* context = NodeProperties::GetContextInput(node);
-  int const function_map_index =
-      Context::FunctionMapIndex(shared->language_mode(), shared->kind());
-  Node* function_map = jsgraph()->HeapConstant(
-      handle(Map::cast(native_context()->get(function_map_index)), isolate()));
-  // Note that it is only safe to embed the raw entry point of the compile
-  // lazy stub into the code, because that stub is immortal and immovable.
-  Node* compile_entry = jsgraph()->PointerConstant(
-      jsgraph()->isolate()->builtins()->CompileLazy()->entry());
-  Node* empty_fixed_array = jsgraph()->EmptyFixedArrayConstant();
-  Node* empty_literals_array = jsgraph()->EmptyLiteralsArrayConstant();
-  Node* the_hole = jsgraph()->TheHoleConstant();
-  Node* undefined = jsgraph()->UndefinedConstant();
-  AllocationBuilder a(jsgraph(), effect, control);
-  STATIC_ASSERT(JSFunction::kSize == 9 * kPointerSize);
-  a.Allocate(JSFunction::kSize, p.pretenure());
-  a.Store(AccessBuilder::ForMap(), function_map);
-  a.Store(AccessBuilder::ForJSObjectProperties(), empty_fixed_array);
-  a.Store(AccessBuilder::ForJSObjectElements(), empty_fixed_array);
-  a.Store(AccessBuilder::ForJSFunctionLiterals(), empty_literals_array);
-  a.Store(AccessBuilder::ForJSFunctionPrototypeOrInitialMap(), the_hole);
-  a.Store(AccessBuilder::ForJSFunctionSharedFunctionInfo(), shared);
-  a.Store(AccessBuilder::ForJSFunctionContext(), context);
-  a.Store(AccessBuilder::ForJSFunctionCodeEntry(), compile_entry);
-  a.Store(AccessBuilder::ForJSFunctionNextFunctionLink(), undefined);
-  RelaxControls(node);
-  a.FinishAndChange(node);
-  return Changed(node);
-}
-
 Reduction JSCreateLowering::ReduceJSCreateIterResultObject(Node* node) {
   DCHECK_EQ(IrOpcode::kJSCreateIterResultObject, node->opcode());
   Node* value = NodeProperties::GetValueInput(node, 0);
@@ -760,9 +812,10 @@
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
 
-  Handle<LiteralsArray> literals_array;
-  if (GetSpecializationLiterals(node).ToHandle(&literals_array)) {
-    Handle<Object> literal(literals_array->literal(p.index()), isolate());
+  Handle<FeedbackVector> feedback_vector;
+  if (GetSpecializationFeedbackVector(node).ToHandle(&feedback_vector)) {
+    FeedbackSlot slot(FeedbackVector::ToSlot(p.index()));
+    Handle<Object> literal(feedback_vector->Get(slot), isolate());
     if (literal->IsAllocationSite()) {
       Handle<AllocationSite> site = Handle<AllocationSite>::cast(literal);
       Handle<JSObject> boilerplate(JSObject::cast(site->transition_info()),
@@ -785,7 +838,10 @@
 
 Reduction JSCreateLowering::ReduceJSCreateFunctionContext(Node* node) {
   DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, node->opcode());
-  int slot_count = OpParameter<int>(node->op());
+  const CreateFunctionContextParameters& parameters =
+      CreateFunctionContextParametersOf(node->op());
+  int slot_count = parameters.slot_count();
+  ScopeType scope_type = parameters.scope_type();
   Node* const closure = NodeProperties::GetValueInput(node, 0);
 
   // Use inline allocation for function contexts up to a size limit.
@@ -798,7 +854,18 @@
     AllocationBuilder a(jsgraph(), effect, control);
     STATIC_ASSERT(Context::MIN_CONTEXT_SLOTS == 4);  // Ensure fully covered.
     int context_length = slot_count + Context::MIN_CONTEXT_SLOTS;
-    a.AllocateArray(context_length, factory()->function_context_map());
+    Handle<Map> map;
+    switch (scope_type) {
+      case EVAL_SCOPE:
+        map = factory()->eval_context_map();
+        break;
+      case FUNCTION_SCOPE:
+        map = factory()->function_context_map();
+        break;
+      default:
+        UNREACHABLE();
+    }
+    a.AllocateArray(context_length, map);
     a.Store(AccessBuilder::ForContextSlot(Context::CLOSURE_INDEX), closure);
     a.Store(AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX), context);
     a.Store(AccessBuilder::ForContextSlot(Context::EXTENSION_INDEX), extension);
@@ -929,6 +996,7 @@
   AllocationBuilder a(jsgraph(), effect, control);
   a.AllocateArray(argument_count, factory()->fixed_array_map());
   for (int i = 0; i < argument_count; ++i, ++parameters_it) {
+    DCHECK_NOT_NULL((*parameters_it).node);
     a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
   }
   return a.Finish();
@@ -958,6 +1026,7 @@
   AllocationBuilder a(jsgraph(), effect, control);
   a.AllocateArray(num_elements, factory()->fixed_array_map());
   for (int i = 0; i < num_elements; ++i, ++parameters_it) {
+    DCHECK_NOT_NULL((*parameters_it).node);
     a.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
   }
   return a.Finish();
@@ -987,18 +1056,19 @@
   // Prepare an iterator over argument values recorded in the frame state.
   Node* const parameters = frame_state->InputAt(kFrameStateParametersInput);
   StateValuesAccess parameters_access(parameters);
-  auto paratemers_it = ++parameters_access.begin();
+  auto parameters_it = ++parameters_access.begin();
 
   // The unmapped argument values recorded in the frame state are stored yet
   // another indirection away and then linked into the parameter map below,
   // whereas mapped argument values are replaced with a hole instead.
   AllocationBuilder aa(jsgraph(), effect, control);
   aa.AllocateArray(argument_count, factory()->fixed_array_map());
-  for (int i = 0; i < mapped_count; ++i, ++paratemers_it) {
+  for (int i = 0; i < mapped_count; ++i, ++parameters_it) {
     aa.Store(AccessBuilder::ForFixedArraySlot(i), jsgraph()->TheHoleConstant());
   }
-  for (int i = mapped_count; i < argument_count; ++i, ++paratemers_it) {
-    aa.Store(AccessBuilder::ForFixedArraySlot(i), (*paratemers_it).node);
+  for (int i = mapped_count; i < argument_count; ++i, ++parameters_it) {
+    DCHECK_NOT_NULL((*parameters_it).node);
+    aa.Store(AccessBuilder::ForFixedArraySlot(i), (*parameters_it).node);
   }
   Node* arguments = aa.Finish();
 
@@ -1081,13 +1151,15 @@
   for (int i = 0; i < boilerplate_nof; ++i) {
     PropertyDetails const property_details =
         boilerplate_map->instance_descriptors()->GetDetails(i);
-    if (property_details.type() != DATA) continue;
+    if (property_details.location() != kField) continue;
+    DCHECK_EQ(kData, property_details.kind());
     Handle<Name> property_name(
         boilerplate_map->instance_descriptors()->GetKey(i), isolate());
     FieldIndex index = FieldIndex::ForDescriptor(*boilerplate_map, i);
-    FieldAccess access = {
-        kTaggedBase, index.offset(),           property_name,
-        Type::Any(), MachineType::AnyTagged(), kFullWriteBarrier};
+    FieldAccess access = {kTaggedBase,      index.offset(),
+                          property_name,    MaybeHandle<Map>(),
+                          Type::Any(),      MachineType::AnyTagged(),
+                          kFullWriteBarrier};
     Node* value;
     if (boilerplate->IsUnboxedDoubleField(index)) {
       access.machine_type = MachineType::Float64();
@@ -1104,23 +1176,15 @@
                                              boilerplate_object, site_context);
         site_context->ExitScope(current_site, boilerplate_object);
       } else if (property_details.representation().IsDouble()) {
+        double number = Handle<HeapNumber>::cast(boilerplate_value)->value();
         // Allocate a mutable HeapNumber box and store the value into it.
-        effect = graph()->NewNode(
-            common()->BeginRegion(RegionObservability::kNotObservable), effect);
-        value = effect = graph()->NewNode(
-            simplified()->Allocate(pretenure),
-            jsgraph()->Constant(HeapNumber::kSize), effect, control);
-        effect = graph()->NewNode(
-            simplified()->StoreField(AccessBuilder::ForMap()), value,
-            jsgraph()->HeapConstant(factory()->mutable_heap_number_map()),
-            effect, control);
-        effect = graph()->NewNode(
-            simplified()->StoreField(AccessBuilder::ForHeapNumberValue()),
-            value, jsgraph()->Constant(
-                       Handle<HeapNumber>::cast(boilerplate_value)->value()),
-            effect, control);
-        value = effect =
-            graph()->NewNode(common()->FinishRegion(), value, effect);
+        AllocationBuilder builder(jsgraph(), effect, control);
+        builder.Allocate(HeapNumber::kSize, pretenure);
+        builder.Store(AccessBuilder::ForMap(),
+                      factory()->mutable_heap_number_map());
+        builder.Store(AccessBuilder::ForHeapNumberValue(),
+                      jsgraph()->Constant(number));
+        value = effect = builder.Finish();
       } else if (property_details.representation().IsSmi()) {
         // Ensure that value is stored as smi.
         value = boilerplate_value->IsUninitialized(isolate())
@@ -1156,7 +1220,7 @@
         AccessBuilder::ForJSArrayLength(boilerplate_array->GetElementsKind()),
         handle(boilerplate_array->length(), isolate()));
   }
-  for (auto const inobject_field : inobject_fields) {
+  for (auto const& inobject_field : inobject_fields) {
     builder.Store(inobject_field.first, inobject_field.second);
   }
   return builder.Finish();
@@ -1242,13 +1306,13 @@
   return builder.Finish();
 }
 
-MaybeHandle<LiteralsArray> JSCreateLowering::GetSpecializationLiterals(
+MaybeHandle<FeedbackVector> JSCreateLowering::GetSpecializationFeedbackVector(
     Node* node) {
   Node* const closure = NodeProperties::GetValueInput(node, 0);
   switch (closure->opcode()) {
     case IrOpcode::kHeapConstant: {
       Handle<HeapObject> object = OpParameter<Handle<HeapObject>>(closure);
-      return handle(Handle<JSFunction>::cast(object)->literals());
+      return handle(Handle<JSFunction>::cast(object)->feedback_vector());
     }
     case IrOpcode::kParameter: {
       int const index = ParameterIndexOf(closure->op());
@@ -1256,14 +1320,14 @@
       // {Parameter} indices start at -1, so value outputs of {Start} look like
       // this: closure, receiver, param0, ..., paramN, context.
       if (index == -1) {
-        return literals_array_;
+        return feedback_vector_;
       }
       break;
     }
     default:
       break;
   }
-  return MaybeHandle<LiteralsArray>();
+  return MaybeHandle<FeedbackVector>();
 }
 
 Factory* JSCreateLowering::factory() const { return isolate()->factory(); }
diff --git a/src/compiler/js-create-lowering.h b/src/compiler/js-create-lowering.h
index b5390f1..eea75d3 100644
--- a/src/compiler/js-create-lowering.h
+++ b/src/compiler/js-create-lowering.h
@@ -33,12 +33,13 @@
     : public NON_EXPORTED_BASE(AdvancedReducer) {
  public:
   JSCreateLowering(Editor* editor, CompilationDependencies* dependencies,
-                   JSGraph* jsgraph, MaybeHandle<LiteralsArray> literals_array,
+                   JSGraph* jsgraph,
+                   MaybeHandle<FeedbackVector> feedback_vector,
                    Handle<Context> native_context, Zone* zone)
       : AdvancedReducer(editor),
         dependencies_(dependencies),
         jsgraph_(jsgraph),
-        literals_array_(literals_array),
+        feedback_vector_(feedback_vector),
         native_context_(native_context),
         zone_(zone) {}
   ~JSCreateLowering() final {}
@@ -49,7 +50,6 @@
   Reduction ReduceJSCreate(Node* node);
   Reduction ReduceJSCreateArguments(Node* node);
   Reduction ReduceJSCreateArray(Node* node);
-  Reduction ReduceJSCreateClosure(Node* node);
   Reduction ReduceJSCreateIterResultObject(Node* node);
   Reduction ReduceJSCreateKeyValueArray(Node* node);
   Reduction ReduceJSCreateLiteral(Node* node);
@@ -79,8 +79,8 @@
 
   Reduction ReduceNewArrayToStubCall(Node* node, Handle<AllocationSite> site);
 
-  // Infers the LiteralsArray to use for a given {node}.
-  MaybeHandle<LiteralsArray> GetSpecializationLiterals(Node* node);
+  // Infers the FeedbackVector to use for a given {node}.
+  MaybeHandle<FeedbackVector> GetSpecializationFeedbackVector(Node* node);
 
   Factory* factory() const;
   Graph* graph() const;
@@ -96,7 +96,7 @@
 
   CompilationDependencies* const dependencies_;
   JSGraph* const jsgraph_;
-  MaybeHandle<LiteralsArray> const literals_array_;
+  MaybeHandle<FeedbackVector> const feedback_vector_;
   Handle<Context> const native_context_;
   Zone* const zone_;
 };
diff --git a/src/compiler/js-frame-specialization.cc b/src/compiler/js-frame-specialization.cc
index 55ec1bf..73e1b7d 100644
--- a/src/compiler/js-frame-specialization.cc
+++ b/src/compiler/js-frame-specialization.cc
@@ -27,6 +27,9 @@
 }
 
 Reduction JSFrameSpecialization::ReduceOsrValue(Node* node) {
+  // JSFrameSpecialization should never run on interpreted frames, since the
+  // code below assumes standard stack frame layouts.
+  DCHECK(!frame()->is_interpreted());
   DCHECK_EQ(IrOpcode::kOsrValue, node->opcode());
   Handle<Object> value;
   int index = OsrValueIndexOf(node->op());
diff --git a/src/compiler/js-generic-lowering.cc b/src/compiler/js-generic-lowering.cc
index 250a9c2..79a3377 100644
--- a/src/compiler/js-generic-lowering.cc
+++ b/src/compiler/js-generic-lowering.cc
@@ -5,6 +5,7 @@
 #include "src/compiler/js-generic-lowering.h"
 
 #include "src/ast/ast.h"
+#include "src/builtins/builtins-constructor.h"
 #include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/compiler/common-operator.h"
@@ -13,6 +14,7 @@
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/operator-properties.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -87,11 +89,12 @@
 
 void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
                                             CallDescriptor::Flags flags,
-                                            Operator::Properties properties) {
+                                            Operator::Properties properties,
+                                            int result_size) {
   const CallInterfaceDescriptor& descriptor = callable.descriptor();
   CallDescriptor* desc = Linkage::GetStubCallDescriptor(
       isolate(), zone(), descriptor, descriptor.GetStackParameterCount(), flags,
-      properties);
+      properties, MachineType::AnyTagged(), result_size);
   Node* stub_code = jsgraph()->HeapConstant(callable.code());
   node->InsertInput(zone(), 0, stub_code);
   NodeProperties::ChangeOp(node, common()->Call(desc));
@@ -142,6 +145,15 @@
                       Operator::kEliminatable);
 }
 
+void JSGenericLowering::LowerJSClassOf(Node* node) {
+  // The %_ClassOf intrinsic doesn't need the current context.
+  NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
+  Callable callable = CodeFactory::ClassOf(isolate());
+  node->AppendInput(zone(), graph()->start());
+  ReplaceWithStubCall(node, callable, CallDescriptor::kNoAllocate,
+                      Operator::kEliminatable);
+}
+
 void JSGenericLowering::LowerJSTypeOf(Node* node) {
   // The typeof operator doesn't need the current context.
   NodeProperties::ReplaceContextInput(node, jsgraph()->NoContextConstant());
@@ -153,75 +165,37 @@
 
 
 void JSGenericLowering::LowerJSLoadProperty(Node* node) {
-  Node* closure = NodeProperties::GetValueInput(node, 2);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
   CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   const PropertyAccess& p = PropertyAccessOf(node->op());
   Callable callable = CodeFactory::KeyedLoadICInOptimizedCode(isolate());
-  // Load the type feedback vector from the closure.
-  Node* literals = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), closure,
-      jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
-      effect, control);
-  Node* vector = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), literals,
-      jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
-                                kHeapObjectTag),
-      effect, control);
+  Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
   node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
-  node->ReplaceInput(3, vector);
-  node->ReplaceInput(6, effect);
+  node->InsertInput(zone(), 3, vector);
   ReplaceWithStubCall(node, callable, flags);
 }
 
 
 void JSGenericLowering::LowerJSLoadNamed(Node* node) {
-  Node* closure = NodeProperties::GetValueInput(node, 1);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
   CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   NamedAccess const& p = NamedAccessOf(node->op());
   Callable callable = CodeFactory::LoadICInOptimizedCode(isolate());
-  // Load the type feedback vector from the closure.
-  Node* literals = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), closure,
-      jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
-      effect, control);
-  Node* vector = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), literals,
-      jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
-                                kHeapObjectTag),
-      effect, control);
+  Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
   node->InsertInput(zone(), 1, jsgraph()->HeapConstant(p.name()));
   node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
-  node->ReplaceInput(3, vector);
-  node->ReplaceInput(6, effect);
+  node->InsertInput(zone(), 3, vector);
   ReplaceWithStubCall(node, callable, flags);
 }
 
 
 void JSGenericLowering::LowerJSLoadGlobal(Node* node) {
-  Node* closure = NodeProperties::GetValueInput(node, 0);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
   CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   const LoadGlobalParameters& p = LoadGlobalParametersOf(node->op());
   Callable callable =
       CodeFactory::LoadGlobalICInOptimizedCode(isolate(), p.typeof_mode());
-  // Load the type feedback vector from the closure.
-  Node* literals = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), closure,
-      jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
-      effect, control);
-  Node* vector = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), literals,
-      jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
-                                kHeapObjectTag),
-      effect, control);
-  node->InsertInput(zone(), 0, jsgraph()->SmiConstant(p.feedback().index()));
-  node->ReplaceInput(1, vector);
-  node->ReplaceInput(4, effect);
+  Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
+  node->InsertInput(zone(), 0, jsgraph()->HeapConstant(p.name()));
+  node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.feedback().index()));
+  node->InsertInput(zone(), 2, vector);
   ReplaceWithStubCall(node, callable, flags);
 }
 
@@ -230,33 +204,19 @@
   Node* receiver = NodeProperties::GetValueInput(node, 0);
   Node* key = NodeProperties::GetValueInput(node, 1);
   Node* value = NodeProperties::GetValueInput(node, 2);
-  Node* closure = NodeProperties::GetValueInput(node, 3);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
   CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   PropertyAccess const& p = PropertyAccessOf(node->op());
-  LanguageMode language_mode = p.language_mode();
   Callable callable =
-      CodeFactory::KeyedStoreICInOptimizedCode(isolate(), language_mode);
-  // Load the type feedback vector from the closure.
-  Node* literals = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), closure,
-      jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
-      effect, control);
-  Node* vector = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), literals,
-      jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
-                                kHeapObjectTag),
-      effect, control);
+      CodeFactory::KeyedStoreICInOptimizedCode(isolate(), p.language_mode());
+  Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
   typedef StoreWithVectorDescriptor Descriptor;
-  node->InsertInputs(zone(), 0, 1);
+  node->InsertInputs(zone(), 0, 2);
   node->ReplaceInput(Descriptor::kReceiver, receiver);
   node->ReplaceInput(Descriptor::kName, key);
   node->ReplaceInput(Descriptor::kValue, value);
   node->ReplaceInput(Descriptor::kSlot,
                      jsgraph()->SmiConstant(p.feedback().index()));
   node->ReplaceInput(Descriptor::kVector, vector);
-  node->ReplaceInput(7, effect);
   ReplaceWithStubCall(node, callable, flags);
 }
 
@@ -264,39 +224,42 @@
 void JSGenericLowering::LowerJSStoreNamed(Node* node) {
   Node* receiver = NodeProperties::GetValueInput(node, 0);
   Node* value = NodeProperties::GetValueInput(node, 1);
-  Node* closure = NodeProperties::GetValueInput(node, 2);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
   CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   NamedAccess const& p = NamedAccessOf(node->op());
   Callable callable =
       CodeFactory::StoreICInOptimizedCode(isolate(), p.language_mode());
-  // Load the type feedback vector from the closure.
-  Node* literals = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), closure,
-      jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
-      effect, control);
-  Node* vector = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), literals,
-      jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
-                                kHeapObjectTag),
-      effect, control);
+  Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
   typedef StoreWithVectorDescriptor Descriptor;
-  node->InsertInputs(zone(), 0, 2);
+  node->InsertInputs(zone(), 0, 3);
   node->ReplaceInput(Descriptor::kReceiver, receiver);
   node->ReplaceInput(Descriptor::kName, jsgraph()->HeapConstant(p.name()));
   node->ReplaceInput(Descriptor::kValue, value);
   node->ReplaceInput(Descriptor::kSlot,
                      jsgraph()->SmiConstant(p.feedback().index()));
   node->ReplaceInput(Descriptor::kVector, vector);
-  node->ReplaceInput(7, effect);
   ReplaceWithStubCall(node, callable, flags);
 }
 
+void JSGenericLowering::LowerJSStoreNamedOwn(Node* node) {
+  Node* receiver = NodeProperties::GetValueInput(node, 0);
+  Node* value = NodeProperties::GetValueInput(node, 1);
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+  StoreNamedOwnParameters const& p = StoreNamedOwnParametersOf(node->op());
+  Callable callable = CodeFactory::StoreOwnICInOptimizedCode(isolate());
+  Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
+  typedef StoreWithVectorDescriptor Descriptor;
+  node->InsertInputs(zone(), 0, 3);
+  node->ReplaceInput(Descriptor::kReceiver, receiver);
+  node->ReplaceInput(Descriptor::kName, jsgraph()->HeapConstant(p.name()));
+  node->ReplaceInput(Descriptor::kValue, value);
+  node->ReplaceInput(Descriptor::kSlot,
+                     jsgraph()->SmiConstant(p.feedback().index()));
+  node->ReplaceInput(Descriptor::kVector, vector);
+  ReplaceWithStubCall(node, callable, flags);
+}
 
 void JSGenericLowering::LowerJSStoreGlobal(Node* node) {
   Node* value = NodeProperties::GetValueInput(node, 0);
-  Node* closure = NodeProperties::GetValueInput(node, 1);
   Node* context = NodeProperties::GetContextInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
@@ -304,16 +267,7 @@
   const StoreGlobalParameters& p = StoreGlobalParametersOf(node->op());
   Callable callable =
       CodeFactory::StoreICInOptimizedCode(isolate(), p.language_mode());
-  // Load the type feedback vector from the closure.
-  Node* literals = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), closure,
-      jsgraph()->IntPtrConstant(JSFunction::kLiteralsOffset - kHeapObjectTag),
-      effect, control);
-  Node* vector = effect = graph()->NewNode(
-      machine()->Load(MachineType::AnyTagged()), literals,
-      jsgraph()->IntPtrConstant(LiteralsArray::kFeedbackVectorOffset -
-                                kHeapObjectTag),
-      effect, control);
+  Node* vector = jsgraph()->HeapConstant(p.feedback().vector());
   // Load global object from the context.
   Node* native_context = effect =
       graph()->NewNode(machine()->Load(MachineType::AnyTagged()), context,
@@ -325,7 +279,7 @@
       jsgraph()->IntPtrConstant(Context::SlotOffset(Context::EXTENSION_INDEX)),
       effect, control);
   typedef StoreWithVectorDescriptor Descriptor;
-  node->InsertInputs(zone(), 0, 3);
+  node->InsertInputs(zone(), 0, 4);
   node->ReplaceInput(Descriptor::kReceiver, global);
   node->ReplaceInput(Descriptor::kName, jsgraph()->HeapConstant(p.name()));
   node->ReplaceInput(Descriptor::kValue, value);
@@ -336,6 +290,13 @@
   ReplaceWithStubCall(node, callable, flags);
 }
 
+void JSGenericLowering::LowerJSStoreDataPropertyInLiteral(Node* node) {
+  DataPropertyParameters const& p = DataPropertyParametersOf(node->op());
+  node->InsertInputs(zone(), 4, 2);
+  node->ReplaceInput(4, jsgraph()->HeapConstant(p.feedback().vector()));
+  node->ReplaceInput(5, jsgraph()->SmiConstant(p.feedback().index()));
+  ReplaceWithRuntimeCall(node, Runtime::kDefineDataPropertyInLiteral);
+}
 
 void JSGenericLowering::LowerJSDeleteProperty(Node* node) {
   LanguageMode language_mode = OpParameter<LanguageMode>(node);
@@ -344,6 +305,11 @@
                                    : Runtime::kDeleteProperty_Sloppy);
 }
 
+void JSGenericLowering::LowerJSGetSuperConstructor(Node* node) {
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+  Callable callable = CodeFactory::GetSuperConstructor(isolate());
+  ReplaceWithStubCall(node, callable, flags);
+}
 
 void JSGenericLowering::LowerJSInstanceOf(Node* node) {
   CallDescriptor::Flags flags = FrameStateFlagForCall(node);
@@ -358,40 +324,12 @@
 }
 
 void JSGenericLowering::LowerJSLoadContext(Node* node) {
-  const ContextAccess& access = ContextAccessOf(node->op());
-  for (size_t i = 0; i < access.depth(); ++i) {
-    node->ReplaceInput(
-        0, graph()->NewNode(machine()->Load(MachineType::AnyTagged()),
-                            NodeProperties::GetValueInput(node, 0),
-                            jsgraph()->Int32Constant(
-                                Context::SlotOffset(Context::PREVIOUS_INDEX)),
-                            NodeProperties::GetEffectInput(node),
-                            graph()->start()));
-  }
-  node->ReplaceInput(1, jsgraph()->Int32Constant(Context::SlotOffset(
-                            static_cast<int>(access.index()))));
-  node->AppendInput(zone(), graph()->start());
-  NodeProperties::ChangeOp(node, machine()->Load(MachineType::AnyTagged()));
+  UNREACHABLE();  // Eliminated in typed lowering.
 }
 
 
 void JSGenericLowering::LowerJSStoreContext(Node* node) {
-  const ContextAccess& access = ContextAccessOf(node->op());
-  for (size_t i = 0; i < access.depth(); ++i) {
-    node->ReplaceInput(
-        0, graph()->NewNode(machine()->Load(MachineType::AnyTagged()),
-                            NodeProperties::GetValueInput(node, 0),
-                            jsgraph()->Int32Constant(
-                                Context::SlotOffset(Context::PREVIOUS_INDEX)),
-                            NodeProperties::GetEffectInput(node),
-                            graph()->start()));
-  }
-  node->ReplaceInput(2, NodeProperties::GetValueInput(node, 1));
-  node->ReplaceInput(1, jsgraph()->Int32Constant(Context::SlotOffset(
-                            static_cast<int>(access.index()))));
-  NodeProperties::ChangeOp(
-      node, machine()->Store(StoreRepresentation(MachineRepresentation::kTagged,
-                                                 kFullWriteBarrier)));
+  UNREACHABLE();  // Eliminated in typed lowering.
 }
 
 
@@ -438,11 +376,18 @@
   Handle<SharedFunctionInfo> const shared_info = p.shared_info();
   node->InsertInput(zone(), 0, jsgraph()->HeapConstant(shared_info));
 
-  // Use the FastNewClosureStub only for functions allocated in new space.
+  // Use the FastNewClosurebuiltin only for functions allocated in new
+  // space.
   if (p.pretenure() == NOT_TENURED) {
     Callable callable = CodeFactory::FastNewClosure(isolate());
+    node->InsertInput(zone(), 1,
+                      jsgraph()->HeapConstant(p.feedback().vector()));
+    node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
     ReplaceWithStubCall(node, callable, flags);
   } else {
+    node->InsertInput(zone(), 1,
+                      jsgraph()->HeapConstant(p.feedback().vector()));
+    node->InsertInput(zone(), 2, jsgraph()->SmiConstant(p.feedback().index()));
     ReplaceWithRuntimeCall(node, (p.pretenure() == TENURED)
                                      ? Runtime::kNewClosure_Tenured
                                      : Runtime::kNewClosure);
@@ -451,14 +396,20 @@
 
 
 void JSGenericLowering::LowerJSCreateFunctionContext(Node* node) {
-  int const slot_count = OpParameter<int>(node->op());
+  const CreateFunctionContextParameters& parameters =
+      CreateFunctionContextParametersOf(node->op());
+  int slot_count = parameters.slot_count();
+  ScopeType scope_type = parameters.scope_type();
   CallDescriptor::Flags flags = FrameStateFlagForCall(node);
 
-  if (slot_count <= FastNewFunctionContextStub::kMaximumSlots) {
-    Callable callable = CodeFactory::FastNewFunctionContext(isolate());
+  if (slot_count <=
+      ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
+    Callable callable =
+        CodeFactory::FastNewFunctionContext(isolate(), scope_type);
     node->InsertInput(zone(), 1, jsgraph()->Int32Constant(slot_count));
     ReplaceWithStubCall(node, callable, flags);
   } else {
+    node->InsertInput(zone(), 1, jsgraph()->SmiConstant(scope_type));
     ReplaceWithRuntimeCall(node, Runtime::kNewFunctionContext);
   }
 }
@@ -478,11 +429,13 @@
   node->InsertInput(zone(), 1, jsgraph()->SmiConstant(p.index()));
   node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
 
-  // Use the FastCloneShallowArrayStub only for shallow boilerplates up to the
-  // initial length limit for arrays with "fast" elements kind.
+  // Use the FastCloneShallowArray builtin only for shallow boilerplates without
+  // properties up to the number of elements that the stubs can handle.
   if ((p.flags() & ArrayLiteral::kShallowElements) != 0 &&
-      p.length() < JSArray::kInitialMaxFastElementArray) {
-    Callable callable = CodeFactory::FastCloneShallowArray(isolate());
+      p.length() <
+          ConstructorBuiltinsAssembler::kMaximumClonedShallowArrayElements) {
+    Callable callable = CodeFactory::FastCloneShallowArray(
+        isolate(), DONT_TRACK_ALLOCATION_SITE);
     ReplaceWithStubCall(node, callable, flags);
   } else {
     node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
@@ -498,10 +451,11 @@
   node->InsertInput(zone(), 2, jsgraph()->HeapConstant(p.constant()));
   node->InsertInput(zone(), 3, jsgraph()->SmiConstant(p.flags()));
 
-  // Use the FastCloneShallowObjectStub only for shallow boilerplates without
-  // elements up to the number of properties that the stubs can handle.
+  // Use the FastCloneShallowObject builtin only for shallow boilerplates
+  // without elements up to the number of properties that the stubs can handle.
   if ((p.flags() & ObjectLiteral::kShallowProperties) != 0 &&
-      p.length() <= FastCloneShallowObjectStub::kMaximumClonedProperties) {
+      p.length() <=
+          ConstructorBuiltinsAssembler::kMaximumClonedShallowObjectProperties) {
     Callable callable =
         CodeFactory::FastCloneShallowObject(isolate(), p.length());
     ReplaceWithStubCall(node, callable, flags);
@@ -554,9 +508,8 @@
   ReplaceWithRuntimeCall(node, Runtime::kNewScriptContext);
 }
 
-
-void JSGenericLowering::LowerJSCallConstruct(Node* node) {
-  CallConstructParameters const& p = CallConstructParametersOf(node->op());
+void JSGenericLowering::LowerJSConstruct(Node* node) {
+  ConstructParameters const& p = ConstructParametersOf(node->op());
   int const arg_count = static_cast<int>(p.arity() - 2);
   CallDescriptor::Flags flags = FrameStateFlagForCall(node);
   Callable callable = CodeFactory::Construct(isolate());
@@ -574,9 +527,44 @@
   NodeProperties::ChangeOp(node, common()->Call(desc));
 }
 
+void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
+  ConstructWithSpreadParameters const& p =
+      ConstructWithSpreadParametersOf(node->op());
+  int const arg_count = static_cast<int>(p.arity() - 2);
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+  Callable callable = CodeFactory::ConstructWithSpread(isolate());
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
+  Node* stub_code = jsgraph()->HeapConstant(callable.code());
+  Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+  Node* new_target = node->InputAt(arg_count + 1);
+  Node* receiver = jsgraph()->UndefinedConstant();
+  node->RemoveInput(arg_count + 1);  // Drop new target.
+  node->InsertInput(zone(), 0, stub_code);
+  node->InsertInput(zone(), 2, new_target);
+  node->InsertInput(zone(), 3, stub_arity);
+  node->InsertInput(zone(), 4, receiver);
+  NodeProperties::ChangeOp(node, common()->Call(desc));
+}
 
-void JSGenericLowering::LowerJSCallFunction(Node* node) {
-  CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+void JSGenericLowering::LowerJSCallForwardVarargs(Node* node) {
+  CallForwardVarargsParameters p = CallForwardVarargsParametersOf(node->op());
+  Callable callable = CodeFactory::CallForwardVarargs(isolate());
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+  if (p.tail_call_mode() == TailCallMode::kAllow) {
+    flags |= CallDescriptor::kSupportsTailCalls;
+  }
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), callable.descriptor(), 1, flags);
+  Node* stub_code = jsgraph()->HeapConstant(callable.code());
+  Node* start_index = jsgraph()->Uint32Constant(p.start_index());
+  node->InsertInput(zone(), 0, stub_code);
+  node->InsertInput(zone(), 2, start_index);
+  NodeProperties::ChangeOp(node, common()->Call(desc));
+}
+
+void JSGenericLowering::LowerJSCall(Node* node) {
+  CallParameters const& p = CallParametersOf(node->op());
   int const arg_count = static_cast<int>(p.arity() - 2);
   ConvertReceiverMode const mode = p.convert_mode();
   Callable callable = CodeFactory::Call(isolate(), mode);
@@ -593,6 +581,19 @@
   NodeProperties::ChangeOp(node, common()->Call(desc));
 }
 
+void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
+  CallWithSpreadParameters const& p = CallWithSpreadParametersOf(node->op());
+  int const arg_count = static_cast<int>(p.arity() - 2);
+  Callable callable = CodeFactory::CallWithSpread(isolate());
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      isolate(), zone(), callable.descriptor(), arg_count + 1, flags);
+  Node* stub_code = jsgraph()->HeapConstant(callable.code());
+  Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+  node->InsertInput(zone(), 0, stub_code);
+  node->InsertInput(zone(), 2, stub_arity);
+  NodeProperties::ChangeOp(node, common()->Call(desc));
+}
 
 void JSGenericLowering::LowerJSCallRuntime(Node* node) {
   const CallRuntimeParameters& p = CallRuntimeParametersOf(node->op());
@@ -604,33 +605,24 @@
 }
 
 void JSGenericLowering::LowerJSForInNext(Node* node) {
-  ReplaceWithRuntimeCall(node, Runtime::kForInNext);
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+  Callable callable = CodeFactory::ForInNext(isolate());
+  ReplaceWithStubCall(node, callable, flags);
 }
 
-
 void JSGenericLowering::LowerJSForInPrepare(Node* node) {
-  ReplaceWithRuntimeCall(node, Runtime::kForInPrepare);
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+  Callable callable = CodeFactory::ForInPrepare(isolate());
+  ReplaceWithStubCall(node, callable, flags, node->op()->properties(), 3);
 }
 
 void JSGenericLowering::LowerJSLoadMessage(Node* node) {
-  ExternalReference message_address =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  node->RemoveInput(NodeProperties::FirstContextIndex(node));
-  node->InsertInput(zone(), 0, jsgraph()->ExternalConstant(message_address));
-  node->InsertInput(zone(), 1, jsgraph()->IntPtrConstant(0));
-  NodeProperties::ChangeOp(node, machine()->Load(MachineType::AnyTagged()));
+  UNREACHABLE();  // Eliminated in typed lowering.
 }
 
 
 void JSGenericLowering::LowerJSStoreMessage(Node* node) {
-  ExternalReference message_address =
-      ExternalReference::address_of_pending_message_obj(isolate());
-  node->RemoveInput(NodeProperties::FirstContextIndex(node));
-  node->InsertInput(zone(), 0, jsgraph()->ExternalConstant(message_address));
-  node->InsertInput(zone(), 1, jsgraph()->IntPtrConstant(0));
-  StoreRepresentation representation(MachineRepresentation::kTagged,
-                                     kNoWriteBarrier);
-  NodeProperties::ChangeOp(node, machine()->Store(representation));
+  UNREACHABLE();  // Eliminated in typed lowering.
 }
 
 void JSGenericLowering::LowerJSLoadModule(Node* node) {
@@ -695,6 +687,11 @@
   ReplaceWithRuntimeCall(node, Runtime::kStackGuard);
 }
 
+void JSGenericLowering::LowerJSDebugger(Node* node) {
+  CallDescriptor::Flags flags = FrameStateFlagForCall(node);
+  Callable callable = CodeFactory::HandleDebuggerStatement(isolate());
+  ReplaceWithStubCall(node, callable, flags);
+}
 
 Zone* JSGenericLowering::zone() const { return graph()->zone(); }
 
diff --git a/src/compiler/js-generic-lowering.h b/src/compiler/js-generic-lowering.h
index 38ee431..88d0b45 100644
--- a/src/compiler/js-generic-lowering.h
+++ b/src/compiler/js-generic-lowering.h
@@ -38,7 +38,8 @@
   // Helpers to replace existing nodes with a generic call.
   void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags);
   void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags,
-                           Operator::Properties properties);
+                           Operator::Properties properties,
+                           int result_size = 1);
   void ReplaceWithRuntimeCall(Node* node, Runtime::FunctionId f, int args = -1);
 
   Zone* zone() const;
diff --git a/src/compiler/js-global-object-specialization.cc b/src/compiler/js-global-object-specialization.cc
deleted file mode 100644
index e9ff060..0000000
--- a/src/compiler/js-global-object-specialization.cc
+++ /dev/null
@@ -1,287 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/js-global-object-specialization.h"
-
-#include "src/compilation-dependencies.h"
-#include "src/compiler/access-builder.h"
-#include "src/compiler/common-operator.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/js-operator.h"
-#include "src/compiler/node-properties.h"
-#include "src/compiler/simplified-operator.h"
-#include "src/compiler/type-cache.h"
-#include "src/lookup.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-struct JSGlobalObjectSpecialization::ScriptContextTableLookupResult {
-  Handle<Context> context;
-  bool immutable;
-  int index;
-};
-
-JSGlobalObjectSpecialization::JSGlobalObjectSpecialization(
-    Editor* editor, JSGraph* jsgraph, Handle<JSGlobalObject> global_object,
-    CompilationDependencies* dependencies)
-    : AdvancedReducer(editor),
-      jsgraph_(jsgraph),
-      global_object_(global_object),
-      dependencies_(dependencies),
-      type_cache_(TypeCache::Get()) {}
-
-Reduction JSGlobalObjectSpecialization::Reduce(Node* node) {
-  switch (node->opcode()) {
-    case IrOpcode::kJSLoadGlobal:
-      return ReduceJSLoadGlobal(node);
-    case IrOpcode::kJSStoreGlobal:
-      return ReduceJSStoreGlobal(node);
-    default:
-      break;
-  }
-  return NoChange();
-}
-
-namespace {
-
-FieldAccess ForPropertyCellValue(MachineRepresentation representation,
-                                 Type* type, Handle<Name> name) {
-  WriteBarrierKind kind = kFullWriteBarrier;
-  if (representation == MachineRepresentation::kTaggedSigned) {
-    kind = kNoWriteBarrier;
-  } else if (representation == MachineRepresentation::kTaggedPointer) {
-    kind = kPointerWriteBarrier;
-  }
-  MachineType r = MachineType::TypeForRepresentation(representation);
-  FieldAccess access = {kTaggedBase, PropertyCell::kValueOffset, name, type, r,
-                        kind};
-  return access;
-}
-}  // namespace
-
-Reduction JSGlobalObjectSpecialization::ReduceJSLoadGlobal(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode());
-  Handle<Name> name = LoadGlobalParametersOf(node->op()).name();
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-
-  // Try to lookup the name on the script context table first (lexical scoping).
-  ScriptContextTableLookupResult result;
-  if (LookupInScriptContextTable(name, &result)) {
-    if (result.context->is_the_hole(isolate(), result.index)) return NoChange();
-    Node* context = jsgraph()->HeapConstant(result.context);
-    Node* value = effect = graph()->NewNode(
-        javascript()->LoadContext(0, result.index, result.immutable), context,
-        context, effect);
-    ReplaceWithValue(node, value, effect);
-    return Replace(value);
-  }
-
-  // Lookup on the global object instead.  We only deal with own data
-  // properties of the global object here (represented as PropertyCell).
-  LookupIterator it(global_object(), name, LookupIterator::OWN);
-  if (it.state() != LookupIterator::DATA) return NoChange();
-  if (!it.GetHolder<JSObject>()->IsJSGlobalObject()) return NoChange();
-  Handle<PropertyCell> property_cell = it.GetPropertyCell();
-  PropertyDetails property_details = property_cell->property_details();
-  Handle<Object> property_cell_value(property_cell->value(), isolate());
-
-  // Load from non-configurable, read-only data property on the global
-  // object can be constant-folded, even without deoptimization support.
-  if (!property_details.IsConfigurable() && property_details.IsReadOnly()) {
-    Node* value = jsgraph()->Constant(property_cell_value);
-    ReplaceWithValue(node, value);
-    return Replace(value);
-  }
-
-  // Record a code dependency on the cell if we can benefit from the
-  // additional feedback, or the global property is configurable (i.e.
-  // can be deleted or reconfigured to an accessor property).
-  if (property_details.cell_type() != PropertyCellType::kMutable ||
-      property_details.IsConfigurable()) {
-    dependencies()->AssumePropertyCell(property_cell);
-  }
-
-  // Load from constant/undefined global property can be constant-folded.
-  if (property_details.cell_type() == PropertyCellType::kConstant ||
-      property_details.cell_type() == PropertyCellType::kUndefined) {
-    Node* value = jsgraph()->Constant(property_cell_value);
-    ReplaceWithValue(node, value);
-    return Replace(value);
-  }
-
-  // Load from constant type cell can benefit from type feedback.
-  Type* property_cell_value_type = Type::NonInternal();
-  MachineRepresentation representation = MachineRepresentation::kTagged;
-  if (property_details.cell_type() == PropertyCellType::kConstantType) {
-    // Compute proper type based on the current value in the cell.
-    if (property_cell_value->IsSmi()) {
-      property_cell_value_type = Type::SignedSmall();
-      representation = MachineRepresentation::kTaggedSigned;
-    } else if (property_cell_value->IsNumber()) {
-      property_cell_value_type = Type::Number();
-      representation = MachineRepresentation::kTaggedPointer;
-    } else {
-      // TODO(turbofan): Track the property_cell_value_map on the FieldAccess
-      // below and use it in LoadElimination to eliminate map checks.
-      Handle<Map> property_cell_value_map(
-          Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
-      property_cell_value_type = Type::For(property_cell_value_map);
-      representation = MachineRepresentation::kTaggedPointer;
-    }
-  }
-  Node* value = effect =
-      graph()->NewNode(simplified()->LoadField(ForPropertyCellValue(
-                           representation, property_cell_value_type, name)),
-                       jsgraph()->HeapConstant(property_cell), effect, control);
-  ReplaceWithValue(node, value, effect, control);
-  return Replace(value);
-}
-
-
-Reduction JSGlobalObjectSpecialization::ReduceJSStoreGlobal(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode());
-  Handle<Name> name = StoreGlobalParametersOf(node->op()).name();
-  Node* value = NodeProperties::GetValueInput(node, 0);
-  Node* effect = NodeProperties::GetEffectInput(node);
-  Node* control = NodeProperties::GetControlInput(node);
-
-  // Try to lookup the name on the script context table first (lexical scoping).
-  ScriptContextTableLookupResult result;
-  if (LookupInScriptContextTable(name, &result)) {
-    if (result.context->is_the_hole(isolate(), result.index)) return NoChange();
-    if (result.immutable) return NoChange();
-    Node* context = jsgraph()->HeapConstant(result.context);
-    effect = graph()->NewNode(javascript()->StoreContext(0, result.index),
-                              context, value, context, effect, control);
-    ReplaceWithValue(node, value, effect, control);
-    return Replace(value);
-  }
-
-  // Lookup on the global object instead.  We only deal with own data
-  // properties of the global object here (represented as PropertyCell).
-  LookupIterator it(global_object(), name, LookupIterator::OWN);
-  if (it.state() != LookupIterator::DATA) return NoChange();
-  if (!it.GetHolder<JSObject>()->IsJSGlobalObject()) return NoChange();
-  Handle<PropertyCell> property_cell = it.GetPropertyCell();
-  PropertyDetails property_details = property_cell->property_details();
-  Handle<Object> property_cell_value(property_cell->value(), isolate());
-
-  // Don't even bother trying to lower stores to read-only data properties.
-  if (property_details.IsReadOnly()) return NoChange();
-  switch (property_details.cell_type()) {
-    case PropertyCellType::kUndefined: {
-      return NoChange();
-    }
-    case PropertyCellType::kConstant: {
-      // Record a code dependency on the cell, and just deoptimize if the new
-      // value doesn't match the previous value stored inside the cell.
-      dependencies()->AssumePropertyCell(property_cell);
-      Node* check = graph()->NewNode(simplified()->ReferenceEqual(), value,
-                                     jsgraph()->Constant(property_cell_value));
-      effect =
-          graph()->NewNode(simplified()->CheckIf(), check, effect, control);
-      break;
-    }
-    case PropertyCellType::kConstantType: {
-      // Record a code dependency on the cell, and just deoptimize if the new
-      // values' type doesn't match the type of the previous value in the cell.
-      dependencies()->AssumePropertyCell(property_cell);
-      Type* property_cell_value_type;
-      MachineRepresentation representation = MachineRepresentation::kTagged;
-      if (property_cell_value->IsHeapObject()) {
-        // We cannot do anything if the {property_cell_value}s map is no
-        // longer stable.
-        Handle<Map> property_cell_value_map(
-            Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
-        if (!property_cell_value_map->is_stable()) return NoChange();
-        dependencies()->AssumeMapStable(property_cell_value_map);
-
-        // Check that the {value} is a HeapObject.
-        value = effect = graph()->NewNode(simplified()->CheckHeapObject(),
-                                          value, effect, control);
-
-        // Check {value} map agains the {property_cell} map.
-        effect = graph()->NewNode(
-            simplified()->CheckMaps(1), value,
-            jsgraph()->HeapConstant(property_cell_value_map), effect, control);
-        property_cell_value_type = Type::OtherInternal();
-        representation = MachineRepresentation::kTaggedPointer;
-      } else {
-        // Check that the {value} is a Smi.
-        value = effect =
-            graph()->NewNode(simplified()->CheckSmi(), value, effect, control);
-        property_cell_value_type = Type::SignedSmall();
-        representation = MachineRepresentation::kTaggedSigned;
-      }
-      effect = graph()->NewNode(
-          simplified()->StoreField(ForPropertyCellValue(
-              representation, property_cell_value_type, name)),
-          jsgraph()->HeapConstant(property_cell), value, effect, control);
-      break;
-    }
-    case PropertyCellType::kMutable: {
-      // Store to non-configurable, data property on the global can be lowered
-      // to a field store, even without recording a code dependency on the cell,
-      // because the property cannot be deleted or reconfigured to an accessor
-      // or interceptor property.
-      if (property_details.IsConfigurable()) {
-        // Protect lowering by recording a code dependency on the cell.
-        dependencies()->AssumePropertyCell(property_cell);
-      }
-      effect = graph()->NewNode(
-          simplified()->StoreField(ForPropertyCellValue(
-              MachineRepresentation::kTagged, Type::NonInternal(), name)),
-          jsgraph()->HeapConstant(property_cell), value, effect, control);
-      break;
-    }
-  }
-  ReplaceWithValue(node, value, effect, control);
-  return Replace(value);
-}
-
-bool JSGlobalObjectSpecialization::LookupInScriptContextTable(
-    Handle<Name> name, ScriptContextTableLookupResult* result) {
-  if (!name->IsString()) return false;
-  Handle<ScriptContextTable> script_context_table(
-      global_object()->native_context()->script_context_table(), isolate());
-  ScriptContextTable::LookupResult lookup_result;
-  if (!ScriptContextTable::Lookup(script_context_table,
-                                  Handle<String>::cast(name), &lookup_result)) {
-    return false;
-  }
-  Handle<Context> script_context = ScriptContextTable::GetContext(
-      script_context_table, lookup_result.context_index);
-  result->context = script_context;
-  result->immutable = lookup_result.mode == CONST;
-  result->index = lookup_result.slot_index;
-  return true;
-}
-
-Graph* JSGlobalObjectSpecialization::graph() const {
-  return jsgraph()->graph();
-}
-
-Isolate* JSGlobalObjectSpecialization::isolate() const {
-  return jsgraph()->isolate();
-}
-
-CommonOperatorBuilder* JSGlobalObjectSpecialization::common() const {
-  return jsgraph()->common();
-}
-
-JSOperatorBuilder* JSGlobalObjectSpecialization::javascript() const {
-  return jsgraph()->javascript();
-}
-
-SimplifiedOperatorBuilder* JSGlobalObjectSpecialization::simplified() const {
-  return jsgraph()->simplified();
-}
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/js-global-object-specialization.h b/src/compiler/js-global-object-specialization.h
deleted file mode 100644
index 50bdd80..0000000
--- a/src/compiler/js-global-object-specialization.h
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_JS_GLOBAL_OBJECT_SPECIALIZATION_H_
-#define V8_COMPILER_JS_GLOBAL_OBJECT_SPECIALIZATION_H_
-
-#include "src/compiler/graph-reducer.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class CompilationDependencies;
-
-namespace compiler {
-
-// Forward declarations.
-class CommonOperatorBuilder;
-class JSGraph;
-class JSOperatorBuilder;
-class SimplifiedOperatorBuilder;
-class TypeCache;
-
-// Specializes a given JSGraph to a given global object, potentially constant
-// folding some {JSLoadGlobal} nodes or strength reducing some {JSStoreGlobal}
-// nodes.
-class JSGlobalObjectSpecialization final : public AdvancedReducer {
- public:
-  JSGlobalObjectSpecialization(Editor* editor, JSGraph* jsgraph,
-                               Handle<JSGlobalObject> global_object,
-                               CompilationDependencies* dependencies);
-
-  Reduction Reduce(Node* node) final;
-
- private:
-  Reduction ReduceJSLoadGlobal(Node* node);
-  Reduction ReduceJSStoreGlobal(Node* node);
-
-  struct ScriptContextTableLookupResult;
-  bool LookupInScriptContextTable(Handle<Name> name,
-                                  ScriptContextTableLookupResult* result);
-
-  Graph* graph() const;
-  JSGraph* jsgraph() const { return jsgraph_; }
-  Isolate* isolate() const;
-  CommonOperatorBuilder* common() const;
-  JSOperatorBuilder* javascript() const;
-  SimplifiedOperatorBuilder* simplified() const;
-  Handle<JSGlobalObject> global_object() const { return global_object_; }
-  CompilationDependencies* dependencies() const { return dependencies_; }
-
-  JSGraph* const jsgraph_;
-  Handle<JSGlobalObject> const global_object_;
-  CompilationDependencies* const dependencies_;
-  TypeCache const& type_cache_;
-
-  DISALLOW_COPY_AND_ASSIGN(JSGlobalObjectSpecialization);
-};
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_COMPILER_JS_GLOBAL_OBJECT_SPECIALIZATION_H_
diff --git a/src/compiler/js-graph.cc b/src/compiler/js-graph.cc
index 8626cd1..b51623a 100644
--- a/src/compiler/js-graph.cc
+++ b/src/compiler/js-graph.cc
@@ -2,10 +2,12 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/code-stubs.h"
 #include "src/compiler/js-graph.h"
+
+#include "src/code-stubs.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/typer.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -31,11 +33,26 @@
 
 Node* JSGraph::CEntryStubConstant(int result_size, SaveFPRegsMode save_doubles,
                                   ArgvMode argv_mode, bool builtin_exit_frame) {
-  if (save_doubles == kDontSaveFPRegs && argv_mode == kArgvOnStack &&
-      result_size == 1) {
+  if (save_doubles == kDontSaveFPRegs && argv_mode == kArgvOnStack) {
+    DCHECK(result_size >= 1 && result_size <= 3);
+    if (!builtin_exit_frame) {
+      CachedNode key;
+      if (result_size == 1) {
+        key = kCEntryStub1Constant;
+      } else if (result_size == 2) {
+        key = kCEntryStub2Constant;
+      } else {
+        DCHECK(result_size == 3);
+        key = kCEntryStub3Constant;
+      }
+      return CACHED(
+          key, HeapConstant(CEntryStub(isolate(), result_size, save_doubles,
+                                       argv_mode, builtin_exit_frame)
+                                .GetCode()));
+    }
     CachedNode key = builtin_exit_frame
-                         ? kCEntryStubWithBuiltinExitFrameConstant
-                         : kCEntryStubConstant;
+                         ? kCEntryStub1WithBuiltinExitFrameConstant
+                         : kCEntryStub1Constant;
     return CACHED(key,
                   HeapConstant(CEntryStub(isolate(), result_size, save_doubles,
                                           argv_mode, builtin_exit_frame)
@@ -51,11 +68,6 @@
                 HeapConstant(factory()->empty_fixed_array()));
 }
 
-Node* JSGraph::EmptyLiteralsArrayConstant() {
-  return CACHED(kEmptyLiteralsArrayConstant,
-                HeapConstant(factory()->empty_literals_array()));
-}
-
 Node* JSGraph::EmptyStringConstant() {
   return CACHED(kEmptyStringConstant, HeapConstant(factory()->empty_string()));
 }
@@ -264,7 +276,8 @@
 }
 
 Node* JSGraph::EmptyStateValues() {
-  return CACHED(kEmptyStateValues, graph()->NewNode(common()->StateValues(0)));
+  return CACHED(kEmptyStateValues, graph()->NewNode(common()->StateValues(
+                                       0, SparseInputMask::Dense())));
 }
 
 Node* JSGraph::Dead() {
diff --git a/src/compiler/js-graph.h b/src/compiler/js-graph.h
index c2c0c77..8f81555 100644
--- a/src/compiler/js-graph.h
+++ b/src/compiler/js-graph.h
@@ -49,7 +49,6 @@
                            ArgvMode argv_mode = kArgvOnStack,
                            bool builtin_exit_frame = false);
   Node* EmptyFixedArrayConstant();
-  Node* EmptyLiteralsArrayConstant();
   Node* EmptyStringConstant();
   Node* FixedArrayMapConstant();
   Node* FixedDoubleArrayMapConstant();
@@ -162,10 +161,11 @@
     kAllocateInNewSpaceStubConstant,
     kAllocateInOldSpaceStubConstant,
     kToNumberBuiltinConstant,
-    kCEntryStubConstant,
-    kCEntryStubWithBuiltinExitFrameConstant,
+    kCEntryStub1Constant,
+    kCEntryStub2Constant,
+    kCEntryStub3Constant,
+    kCEntryStub1WithBuiltinExitFrameConstant,
     kEmptyFixedArrayConstant,
-    kEmptyLiteralsArrayConstant,
     kEmptyStringConstant,
     kFixedArrayMapConstant,
     kFixedDoubleArrayMapConstant,
diff --git a/src/compiler/js-inlining-heuristic.cc b/src/compiler/js-inlining-heuristic.cc
index d6229c2..6f99fbb 100644
--- a/src/compiler/js-inlining-heuristic.cc
+++ b/src/compiler/js-inlining-heuristic.cc
@@ -22,7 +22,7 @@
 namespace {
 
 int CollectFunctions(Node* node, Handle<JSFunction>* functions,
-                     int functions_size) {
+                     int functions_size, Handle<SharedFunctionInfo>& shared) {
   DCHECK_NE(0, functions_size);
   HeapObjectMatcher m(node);
   if (m.HasValue() && m.Value()->IsJSFunction()) {
@@ -39,23 +39,29 @@
     }
     return value_input_count;
   }
+  if (m.IsJSCreateClosure()) {
+    CreateClosureParameters const& p = CreateClosureParametersOf(m.op());
+    functions[0] = Handle<JSFunction>::null();
+    shared = p.shared_info();
+    return 1;
+  }
   return 0;
 }
 
-bool CanInlineFunction(Handle<JSFunction> function) {
+bool CanInlineFunction(Handle<SharedFunctionInfo> shared) {
   // Built-in functions are handled by the JSBuiltinReducer.
-  if (function->shared()->HasBuiltinFunctionId()) return false;
+  if (shared->HasBuiltinFunctionId()) return false;
 
-  // Don't inline builtins.
-  if (function->shared()->IsBuiltin()) return false;
+  // Only choose user code for inlining.
+  if (!shared->IsUserJavaScript()) return false;
 
   // Quick check on the size of the AST to avoid parsing large candidate.
-  if (function->shared()->ast_node_count() > FLAG_max_inlined_nodes) {
+  if (shared->ast_node_count() > FLAG_max_inlined_nodes) {
     return false;
   }
 
   // Avoid inlining across the boundary of asm.js code.
-  if (function->shared()->asm_function()) return false;
+  if (shared->asm_function()) return false;
   return true;
 }
 
@@ -72,8 +78,8 @@
   Node* callee = node->InputAt(0);
   Candidate candidate;
   candidate.node = node;
-  candidate.num_functions =
-      CollectFunctions(callee, candidate.functions, kMaxCallPolymorphism);
+  candidate.num_functions = CollectFunctions(
+      callee, candidate.functions, kMaxCallPolymorphism, candidate.shared_info);
   if (candidate.num_functions == 0) {
     return NoChange();
   } else if (candidate.num_functions > 1 && !FLAG_polymorphic_inlining) {
@@ -87,11 +93,14 @@
   // Functions marked with %SetForceInlineFlag are immediately inlined.
   bool can_inline = false, force_inline = true;
   for (int i = 0; i < candidate.num_functions; ++i) {
-    Handle<JSFunction> function = candidate.functions[i];
-    if (!function->shared()->force_inline()) {
+    Handle<SharedFunctionInfo> shared =
+        candidate.functions[i].is_null()
+            ? candidate.shared_info
+            : handle(candidate.functions[i]->shared());
+    if (!shared->force_inline()) {
       force_inline = false;
     }
-    if (CanInlineFunction(function)) {
+    if (CanInlineFunction(shared)) {
       can_inline = true;
     }
   }
@@ -117,11 +126,11 @@
   }
 
   // Gather feedback on how often this call site has been hit before.
-  if (node->opcode() == IrOpcode::kJSCallFunction) {
-    CallFunctionParameters const p = CallFunctionParametersOf(node->op());
+  if (node->opcode() == IrOpcode::kJSCall) {
+    CallParameters const p = CallParametersOf(node->op());
     candidate.frequency = p.frequency();
   } else {
-    CallConstructParameters const p = CallConstructParametersOf(node->op());
+    ConstructParameters const p = ConstructParametersOf(node->op());
     candidate.frequency = p.frequency();
   }
 
@@ -167,15 +176,18 @@
   int const num_calls = candidate.num_functions;
   Node* const node = candidate.node;
   if (num_calls == 1) {
-    Handle<JSFunction> function = candidate.functions[0];
-    Reduction const reduction = inliner_.ReduceJSCall(node, function);
+    Handle<SharedFunctionInfo> shared =
+        candidate.functions[0].is_null()
+            ? candidate.shared_info
+            : handle(candidate.functions[0]->shared());
+    Reduction const reduction = inliner_.ReduceJSCall(node);
     if (reduction.Changed()) {
-      cumulative_count_ += function->shared()->ast_node_count();
+      cumulative_count_ += shared->ast_node_count();
     }
     return reduction;
   }
 
-  // Expand the JSCallFunction/JSCallConstruct node to a subgraph first if
+  // Expand the JSCall/JSConstruct node to a subgraph first if
   // we have multiple known target functions.
   DCHECK_LT(1, num_calls);
   Node* calls[kMaxCallPolymorphism + 1];
@@ -192,6 +204,8 @@
 
   // Create the appropriate control flow to dispatch to the cloned calls.
   for (int i = 0; i < num_calls; ++i) {
+    // TODO(2206): Make comparison be based on underlying SharedFunctionInfo
+    // instead of the target JSFunction reference directly.
     Node* target = jsgraph()->HeapConstant(candidate.functions[i]);
     if (i != (num_calls - 1)) {
       Node* check =
@@ -255,7 +269,7 @@
   for (int i = 0; i < num_calls; ++i) {
     Handle<JSFunction> function = candidate.functions[i];
     Node* node = calls[i];
-    Reduction const reduction = inliner_.ReduceJSCall(node, function);
+    Reduction const reduction = inliner_.ReduceJSCall(node);
     if (reduction.Changed()) {
       cumulative_count_ += function->shared()->ast_node_count();
     }
@@ -281,9 +295,12 @@
     PrintF("  #%d:%s, frequency:%g\n", candidate.node->id(),
            candidate.node->op()->mnemonic(), candidate.frequency);
     for (int i = 0; i < candidate.num_functions; ++i) {
-      Handle<JSFunction> function = candidate.functions[i];
-      PrintF("  - size:%d, name: %s\n", function->shared()->ast_node_count(),
-             function->shared()->DebugName()->ToCString().get());
+      Handle<SharedFunctionInfo> shared =
+          candidate.functions[i].is_null()
+              ? candidate.shared_info
+              : handle(candidate.functions[i]->shared());
+      PrintF("  - size:%d, name: %s\n", shared->ast_node_count(),
+             shared->DebugName()->ToCString().get());
     }
   }
 }
diff --git a/src/compiler/js-inlining-heuristic.h b/src/compiler/js-inlining-heuristic.h
index aca8011..b834cb0 100644
--- a/src/compiler/js-inlining-heuristic.h
+++ b/src/compiler/js-inlining-heuristic.h
@@ -37,6 +37,11 @@
 
   struct Candidate {
     Handle<JSFunction> functions[kMaxCallPolymorphism];
+    // TODO(2206): For now polymorphic inlining is treated orthogonally to
+    // inlining based on SharedFunctionInfo. This should be unified and the
+    // above array should be switched to SharedFunctionInfo instead. Currently
+    // we use {num_functions == 1 && functions[0].is_null()} as an indicator.
+    Handle<SharedFunctionInfo> shared_info;
     int num_functions;
     Node* node = nullptr;    // The call site at which to inline.
     float frequency = 0.0f;  // Relative frequency of this call site.
diff --git a/src/compiler/js-inlining.cc b/src/compiler/js-inlining.cc
index 0e122a6..c87be6c 100644
--- a/src/compiler/js-inlining.cc
+++ b/src/compiler/js-inlining.cc
@@ -4,25 +4,21 @@
 
 #include "src/compiler/js-inlining.h"
 
-#include "src/ast/ast-numbering.h"
 #include "src/ast/ast.h"
 #include "src/compilation-info.h"
 #include "src/compiler.h"
 #include "src/compiler/all-nodes.h"
-#include "src/compiler/ast-graph-builder.h"
-#include "src/compiler/ast-loop-assignment-analyzer.h"
 #include "src/compiler/bytecode-graph-builder.h"
 #include "src/compiler/common-operator.h"
+#include "src/compiler/compiler-source-position-table.h"
 #include "src/compiler/graph-reducer.h"
 #include "src/compiler/js-operator.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/operator-properties.h"
 #include "src/compiler/simplified-operator.h"
-#include "src/compiler/type-hint-analyzer.h"
 #include "src/isolate-inl.h"
 #include "src/parsing/parse-info.h"
-#include "src/parsing/rewriter.h"
 
 namespace v8 {
 namespace internal {
@@ -35,45 +31,45 @@
 
 
 // Provides convenience accessors for the common layout of nodes having either
-// the {JSCallFunction} or the {JSCallConstruct} operator.
+// the {JSCall} or the {JSConstruct} operator.
 class JSCallAccessor {
  public:
   explicit JSCallAccessor(Node* call) : call_(call) {
-    DCHECK(call->opcode() == IrOpcode::kJSCallFunction ||
-           call->opcode() == IrOpcode::kJSCallConstruct);
+    DCHECK(call->opcode() == IrOpcode::kJSCall ||
+           call->opcode() == IrOpcode::kJSConstruct);
   }
 
   Node* target() {
-    // Both, {JSCallFunction} and {JSCallConstruct}, have same layout here.
+    // Both, {JSCall} and {JSConstruct}, have same layout here.
     return call_->InputAt(0);
   }
 
   Node* receiver() {
-    DCHECK_EQ(IrOpcode::kJSCallFunction, call_->opcode());
+    DCHECK_EQ(IrOpcode::kJSCall, call_->opcode());
     return call_->InputAt(1);
   }
 
   Node* new_target() {
-    DCHECK_EQ(IrOpcode::kJSCallConstruct, call_->opcode());
+    DCHECK_EQ(IrOpcode::kJSConstruct, call_->opcode());
     return call_->InputAt(formal_arguments() + 1);
   }
 
   Node* frame_state() {
-    // Both, {JSCallFunction} and {JSCallConstruct}, have frame state.
+    // Both, {JSCall} and {JSConstruct}, have frame state.
     return NodeProperties::GetFrameStateInput(call_);
   }
 
   int formal_arguments() {
-    // Both, {JSCallFunction} and {JSCallConstruct}, have two extra inputs:
-    //  - JSCallConstruct: Includes target function and new target.
-    //  - JSCallFunction: Includes target function and receiver.
+    // Both, {JSCall} and {JSConstruct}, have two extra inputs:
+    //  - JSConstruct: Includes target function and new target.
+    //  - JSCall: Includes target function and receiver.
     return call_->op()->ValueInputCount() - 2;
   }
 
   float frequency() const {
-    return (call_->opcode() == IrOpcode::kJSCallFunction)
-               ? CallFunctionParametersOf(call_->op()).frequency()
-               : CallConstructParametersOf(call_->op()).frequency();
+    return (call_->opcode() == IrOpcode::kJSCall)
+               ? CallParametersOf(call_->op()).frequency()
+               : ConstructParametersOf(call_->op()).frequency();
   }
 
  private:
@@ -224,9 +220,9 @@
   }
 }
 
-
 Node* JSInliner::CreateArtificialFrameState(Node* node, Node* outer_frame_state,
                                             int parameter_count,
+                                            BailoutId bailout_id,
                                             FrameStateType frame_state_type,
                                             Handle<SharedFunctionInfo> shared) {
   const FrameStateFunctionInfo* state_info =
@@ -234,15 +230,15 @@
                                              parameter_count + 1, 0, shared);
 
   const Operator* op = common()->FrameState(
-      BailoutId(-1), OutputFrameStateCombine::Ignore(), state_info);
-  const Operator* op0 = common()->StateValues(0);
+      bailout_id, OutputFrameStateCombine::Ignore(), state_info);
+  const Operator* op0 = common()->StateValues(0, SparseInputMask::Dense());
   Node* node0 = graph()->NewNode(op0);
   NodeVector params(local_zone_);
   for (int parameter = 0; parameter < parameter_count + 1; ++parameter) {
     params.push_back(node->InputAt(1 + parameter));
   }
-  const Operator* op_param =
-      common()->StateValues(static_cast<int>(params.size()));
+  const Operator* op_param = common()->StateValues(
+      static_cast<int>(params.size()), SparseInputMask::Dense());
   Node* params_node = graph()->NewNode(
       op_param, static_cast<int>(params.size()), &params.front());
   return graph()->NewNode(op, params_node, node0, node0,
@@ -273,7 +269,7 @@
 
   const Operator* op = common()->FrameState(
       BailoutId(-1), OutputFrameStateCombine::Ignore(), state_info);
-  const Operator* op0 = common()->StateValues(0);
+  const Operator* op0 = common()->StateValues(0, SparseInputMask::Dense());
   Node* node0 = graph()->NewNode(op0);
   return graph()->NewNode(op, node0, node0, node0,
                           jsgraph()->UndefinedConstant(), function,
@@ -282,19 +278,6 @@
 
 namespace {
 
-// TODO(turbofan): Shall we move this to the NodeProperties? Or some (untyped)
-// alias analyzer?
-bool IsSame(Node* a, Node* b) {
-  if (a == b) {
-    return true;
-  } else if (a->opcode() == IrOpcode::kCheckHeapObject) {
-    return IsSame(a->InputAt(0), b);
-  } else if (b->opcode() == IrOpcode::kCheckHeapObject) {
-    return IsSame(a, b->InputAt(0));
-  }
-  return false;
-}
-
 // TODO(bmeurer): Unify this with the witness helper functions in the
 // js-builtin-reducer.cc once we have a better understanding of the
 // map tracking we want to do, and eventually changed the CheckMaps
@@ -307,41 +290,39 @@
 // function, which either returns the map set from the CheckMaps or
 // a singleton set from a StoreField.
 bool NeedsConvertReceiver(Node* receiver, Node* effect) {
-  for (Node* dominator = effect;;) {
-    if (dominator->opcode() == IrOpcode::kCheckMaps &&
-        IsSame(dominator->InputAt(0), receiver)) {
-      // Check if all maps have the given {instance_type}.
-      for (int i = 1; i < dominator->op()->ValueInputCount(); ++i) {
-        HeapObjectMatcher m(NodeProperties::GetValueInput(dominator, i));
-        if (!m.HasValue()) return true;
-        Handle<Map> const map = Handle<Map>::cast(m.Value());
-        if (!map->IsJSReceiverMap()) return true;
-      }
+  // Check if the {receiver} is already a JSReceiver.
+  switch (receiver->opcode()) {
+    case IrOpcode::kJSConstruct:
+    case IrOpcode::kJSConstructWithSpread:
+    case IrOpcode::kJSCreate:
+    case IrOpcode::kJSCreateArguments:
+    case IrOpcode::kJSCreateArray:
+    case IrOpcode::kJSCreateClosure:
+    case IrOpcode::kJSCreateIterResultObject:
+    case IrOpcode::kJSCreateKeyValueArray:
+    case IrOpcode::kJSCreateLiteralArray:
+    case IrOpcode::kJSCreateLiteralObject:
+    case IrOpcode::kJSCreateLiteralRegExp:
+    case IrOpcode::kJSConvertReceiver:
+    case IrOpcode::kJSGetSuperConstructor:
+    case IrOpcode::kJSToObject: {
       return false;
     }
-    switch (dominator->opcode()) {
-      case IrOpcode::kStoreField: {
-        FieldAccess const& access = FieldAccessOf(dominator->op());
-        if (access.base_is_tagged == kTaggedBase &&
-            access.offset == HeapObject::kMapOffset) {
-          return true;
+    default: {
+      // We don't really care about the exact maps here, just the instance
+      // types, which don't change across potential side-effecting operations.
+      ZoneHandleSet<Map> maps;
+      NodeProperties::InferReceiverMapsResult result =
+          NodeProperties::InferReceiverMaps(receiver, effect, &maps);
+      if (result != NodeProperties::kNoReceiverMaps) {
+        // Check if all {maps} are actually JSReceiver maps.
+        for (size_t i = 0; i < maps.size(); ++i) {
+          if (!maps[i]->IsJSReceiverMap()) return true;
         }
-        break;
+        return false;
       }
-      case IrOpcode::kStoreElement:
-      case IrOpcode::kStoreTypedElement:
-        break;
-      default: {
-        DCHECK_EQ(1, dominator->op()->EffectOutputCount());
-        if (dominator->op()->EffectInputCount() != 1 ||
-            !dominator->op()->HasProperty(Operator::kNoWrite)) {
-          // Didn't find any appropriate CheckMaps node.
-          return true;
-        }
-        break;
-      }
+      return true;
     }
-    dominator = NodeProperties::GetEffectInput(dominator);
   }
 }
 
@@ -365,25 +346,124 @@
 
 }  // namespace
 
-
-Reduction JSInliner::Reduce(Node* node) {
-  if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange();
+// Determines whether the call target of the given call {node} is statically
+// known and can be used as an inlining candidate. The {SharedFunctionInfo} of
+// the call target is provided (the exact closure might be unknown).
+bool JSInliner::DetermineCallTarget(
+    Node* node, Handle<SharedFunctionInfo>& shared_info_out) {
+  DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
+  HeapObjectMatcher match(node->InputAt(0));
 
   // This reducer can handle both normal function calls as well a constructor
   // calls whenever the target is a constant function object, as follows:
-  //  - JSCallFunction(target:constant, receiver, args...)
-  //  - JSCallConstruct(target:constant, args..., new.target)
-  HeapObjectMatcher match(node->InputAt(0));
-  if (!match.HasValue() || !match.Value()->IsJSFunction()) return NoChange();
-  Handle<JSFunction> function = Handle<JSFunction>::cast(match.Value());
+  //  - JSCall(target:constant, receiver, args...)
+  //  - JSConstruct(target:constant, args..., new.target)
+  if (match.HasValue() && match.Value()->IsJSFunction()) {
+    Handle<JSFunction> function = Handle<JSFunction>::cast(match.Value());
 
-  return ReduceJSCall(node, function);
+    // Disallow cross native-context inlining for now. This means that all parts
+    // of the resulting code will operate on the same global object. This also
+    // prevents cross context leaks, where we could inline functions from a
+    // different context and hold on to that context (and closure) from the code
+    // object.
+    // TODO(turbofan): We might want to revisit this restriction later when we
+    // have a need for this, and we know how to model different native contexts
+    // in the same graph in a compositional way.
+    if (function->context()->native_context() !=
+        info_->context()->native_context()) {
+      return false;
+    }
+
+    shared_info_out = handle(function->shared());
+    return true;
+  }
+
+  // This reducer can also handle calls where the target is statically known to
+  // be the result of a closure instantiation operation, as follows:
+  //  - JSCall(JSCreateClosure[shared](context), receiver, args...)
+  //  - JSConstruct(JSCreateClosure[shared](context), args..., new.target)
+  if (match.IsJSCreateClosure()) {
+    CreateClosureParameters const& p = CreateClosureParametersOf(match.op());
+
+    // Disallow inlining in case the instantiation site was never run and hence
+    // the vector cell does not contain a valid feedback vector for the call
+    // target.
+    // TODO(turbofan): We might consider to eagerly create the feedback vector
+    // in such a case (in {DetermineCallContext} below) eventually.
+    FeedbackSlot slot = p.feedback().slot();
+    Handle<Cell> cell(Cell::cast(p.feedback().vector()->Get(slot)));
+    if (!cell->value()->IsFeedbackVector()) return false;
+
+    shared_info_out = p.shared_info();
+    return true;
+  }
+
+  return false;
 }
 
-Reduction JSInliner::ReduceJSCall(Node* node, Handle<JSFunction> function) {
+// Determines statically known information about the call target (assuming that
+// the call target is known according to {DetermineCallTarget} above). The
+// following static information is provided:
+//  - context         : The context (as SSA value) bound by the call target.
+//  - feedback_vector : The target is guaranteed to use this feedback vector.
+void JSInliner::DetermineCallContext(
+    Node* node, Node*& context_out,
+    Handle<FeedbackVector>& feedback_vector_out) {
   DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
+  HeapObjectMatcher match(node->InputAt(0));
+
+  if (match.HasValue() && match.Value()->IsJSFunction()) {
+    Handle<JSFunction> function = Handle<JSFunction>::cast(match.Value());
+
+    // If the target function was never invoked, its literals array might not
+    // contain a feedback vector. We ensure at this point that it is created.
+    JSFunction::EnsureLiterals(function);
+
+    // The inlinee specializes to the context from the JSFunction object.
+    context_out = jsgraph()->Constant(handle(function->context()));
+    feedback_vector_out = handle(function->feedback_vector());
+    return;
+  }
+
+  if (match.IsJSCreateClosure()) {
+    CreateClosureParameters const& p = CreateClosureParametersOf(match.op());
+
+    // Load the feedback vector of the target by looking up its vector cell at
+    // the instantiation site (we only decide to inline if it's populated).
+    FeedbackSlot slot = p.feedback().slot();
+    Handle<Cell> cell(Cell::cast(p.feedback().vector()->Get(slot)));
+    DCHECK(cell->value()->IsFeedbackVector());
+
+    // The inlinee uses the locally provided context at instantiation.
+    context_out = NodeProperties::GetContextInput(match.node());
+    feedback_vector_out = handle(FeedbackVector::cast(cell->value()));
+    return;
+  }
+
+  // Must succeed.
+  UNREACHABLE();
+}
+
+Reduction JSInliner::Reduce(Node* node) {
+  if (!IrOpcode::IsInlineeOpcode(node->opcode())) return NoChange();
+  return ReduceJSCall(node);
+}
+
+Reduction JSInliner::ReduceJSCall(Node* node) {
+  DCHECK(IrOpcode::IsInlineeOpcode(node->opcode()));
+  Handle<SharedFunctionInfo> shared_info;
   JSCallAccessor call(node);
-  Handle<SharedFunctionInfo> shared_info(function->shared());
+
+  // Determine the call target.
+  if (!DetermineCallTarget(node, shared_info)) return NoChange();
+
+  // Inlining is only supported in the bytecode pipeline.
+  if (!info_->is_optimizing_from_bytecode()) {
+    TRACE("Not inlining %s into %s due to use of the deprecated pipeline\n",
+          shared_info->DebugName()->ToCString().get(),
+          info_->shared_info()->DebugName()->ToCString().get());
+    return NoChange();
+  }
 
   // Function must be inlineable.
   if (!shared_info->IsInlineable()) {
@@ -394,7 +474,7 @@
   }
 
   // Constructor must be constructable.
-  if (node->opcode() == IrOpcode::kJSCallConstruct &&
+  if (node->opcode() == IrOpcode::kJSConstruct &&
       IsNonConstructible(shared_info)) {
     TRACE("Not inlining %s into %s because constructor is not constructable.\n",
           shared_info->DebugName()->ToCString().get(),
@@ -402,9 +482,21 @@
     return NoChange();
   }
 
+  // TODO(706642): Don't inline derived class constructors for now, as the
+  // inlining logic doesn't deal properly with derived class constructors
+  // that return a primitive, i.e. it's not in sync with what the Parser
+  // and the JSConstructSub does.
+  if (node->opcode() == IrOpcode::kJSConstruct &&
+      IsDerivedConstructor(shared_info->kind())) {
+    TRACE("Not inlining %s into %s because constructor is derived.\n",
+          shared_info->DebugName()->ToCString().get(),
+          info_->shared_info()->DebugName()->ToCString().get());
+    return NoChange();
+  }
+
   // Class constructors are callable, but [[Call]] will raise an exception.
   // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList ).
-  if (node->opcode() == IrOpcode::kJSCallFunction &&
+  if (node->opcode() == IrOpcode::kJSCall &&
       IsClassConstructor(shared_info->kind())) {
     TRACE("Not inlining %s into %s because callee is a class constructor.\n",
           shared_info->DebugName()->ToCString().get(),
@@ -420,22 +512,6 @@
     return NoChange();
   }
 
-  // Disallow cross native-context inlining for now. This means that all parts
-  // of the resulting code will operate on the same global object.
-  // This also prevents cross context leaks for asm.js code, where we could
-  // inline functions from a different context and hold on to that context (and
-  // closure) from the code object.
-  // TODO(turbofan): We might want to revisit this restriction later when we
-  // have a need for this, and we know how to model different native contexts
-  // in the same graph in a compositional way.
-  if (function->context()->native_context() !=
-      info_->context()->native_context()) {
-    TRACE("Not inlining %s into %s because of different native contexts\n",
-          shared_info->DebugName()->ToCString().get(),
-          info_->shared_info()->DebugName()->ToCString().get());
-    return NoChange();
-  }
-
   // TODO(turbofan): TranslatedState::GetAdaptedArguments() currently relies on
   // not inlining recursive functions. We might want to relax that at some
   // point.
@@ -484,14 +560,13 @@
     }
   }
 
-  Zone zone(info_->isolate()->allocator(), ZONE_NAME);
-  ParseInfo parse_info(&zone, shared_info);
-  CompilationInfo info(&parse_info, function);
+  ParseInfo parse_info(shared_info);
+  CompilationInfo info(parse_info.zone(), &parse_info,
+                       Handle<JSFunction>::null());
   if (info_->is_deoptimization_enabled()) info.MarkAsDeoptimizationEnabled();
-  if (info_->is_type_feedback_enabled()) info.MarkAsTypeFeedbackEnabled();
-  if (info_->is_optimizing_from_bytecode()) info.MarkAsOptimizeFromBytecode();
+  info.MarkAsOptimizeFromBytecode();
 
-  if (info.is_optimizing_from_bytecode() && !Compiler::EnsureBytecode(&info)) {
+  if (!Compiler::EnsureBytecode(&info)) {
     TRACE("Not inlining %s into %s because bytecode generation failed\n",
           shared_info->DebugName()->ToCString().get(),
           info_->shared_info()->DebugName()->ToCString().get());
@@ -501,25 +576,6 @@
     return NoChange();
   }
 
-  if (!info.is_optimizing_from_bytecode() &&
-      !Compiler::ParseAndAnalyze(info.parse_info())) {
-    TRACE("Not inlining %s into %s because parsing failed\n",
-          shared_info->DebugName()->ToCString().get(),
-          info_->shared_info()->DebugName()->ToCString().get());
-    if (info_->isolate()->has_pending_exception()) {
-      info_->isolate()->clear_pending_exception();
-    }
-    return NoChange();
-  }
-
-  if (!info.is_optimizing_from_bytecode() &&
-      !Compiler::EnsureDeoptimizationSupport(&info)) {
-    TRACE("Not inlining %s into %s because deoptimization support failed\n",
-          shared_info->DebugName()->ToCString().get(),
-          info_->shared_info()->DebugName()->ToCString().get());
-    return NoChange();
-  }
-
   // Remember that we inlined this function. This needs to be called right
   // after we ensure deoptimization support so that the code flusher
   // does not remove the code with the deoptimization support.
@@ -534,39 +590,20 @@
         shared_info->DebugName()->ToCString().get(),
         info_->shared_info()->DebugName()->ToCString().get());
 
-  // If function was lazily compiled, its literals array may not yet be set up.
-  JSFunction::EnsureLiterals(function);
+  // Determine the targets feedback vector and its context.
+  Node* context;
+  Handle<FeedbackVector> feedback_vector;
+  DetermineCallContext(node, context, feedback_vector);
 
   // Create the subgraph for the inlinee.
   Node* start;
   Node* end;
-  if (info.is_optimizing_from_bytecode()) {
+  {
     // Run the BytecodeGraphBuilder to create the subgraph.
     Graph::SubgraphScope scope(graph());
-    BytecodeGraphBuilder graph_builder(&zone, &info, jsgraph(),
-                                       call.frequency(), source_positions_,
-                                       inlining_id);
-    graph_builder.CreateGraph(false);
-
-    // Extract the inlinee start/end nodes.
-    start = graph()->start();
-    end = graph()->end();
-  } else {
-    // Run the loop assignment analyzer on the inlinee.
-    AstLoopAssignmentAnalyzer loop_assignment_analyzer(&zone, &info);
-    LoopAssignmentAnalysis* loop_assignment =
-        loop_assignment_analyzer.Analyze();
-
-    // Run the type hint analyzer on the inlinee.
-    TypeHintAnalyzer type_hint_analyzer(&zone);
-    TypeHintAnalysis* type_hint_analysis =
-        type_hint_analyzer.Analyze(handle(shared_info->code(), info.isolate()));
-
-    // Run the AstGraphBuilder to create the subgraph.
-    Graph::SubgraphScope scope(graph());
-    AstGraphBuilderWithPositions graph_builder(
-        &zone, &info, jsgraph(), call.frequency(), loop_assignment,
-        type_hint_analysis, source_positions_, inlining_id);
+    BytecodeGraphBuilder graph_builder(
+        parse_info.zone(), shared_info, feedback_vector, BailoutId::None(),
+        jsgraph(), call.frequency(), source_positions_, inlining_id);
     graph_builder.CreateGraph(false);
 
     // Extract the inlinee start/end nodes.
@@ -600,20 +637,38 @@
   Node* frame_state = call.frame_state();
   Node* new_target = jsgraph()->UndefinedConstant();
 
-  // Inline {JSCallConstruct} requires some additional magic.
-  if (node->opcode() == IrOpcode::kJSCallConstruct) {
+  // Inline {JSConstruct} requires some additional magic.
+  if (node->opcode() == IrOpcode::kJSConstruct) {
+    // Swizzle the inputs of the {JSConstruct} node to look like inputs to a
+    // normal {JSCall} node so that the rest of the inlining machinery
+    // behaves as if we were dealing with a regular function invocation.
+    new_target = call.new_target();  // Retrieve new target value input.
+    node->RemoveInput(call.formal_arguments() + 1);  // Drop new target.
+    node->InsertInput(graph()->zone(), 1, new_target);
+
     // Insert nodes around the call that model the behavior required for a
     // constructor dispatch (allocate implicit receiver and check return value).
     // This models the behavior usually accomplished by our {JSConstructStub}.
     // Note that the context has to be the callers context (input to call node).
+    // Also note that by splitting off the {JSCreate} piece of the constructor
+    // call, we create an observable deoptimization point after the receiver
+    // instantiation but before the invocation (i.e. inside {JSConstructStub}
+    // where execution continues at {construct_stub_create_deopt_pc_offset}).
     Node* receiver = jsgraph()->TheHoleConstant();  // Implicit receiver.
     if (NeedsImplicitReceiver(shared_info)) {
-      Node* frame_state_before = NodeProperties::FindFrameStateBefore(node);
       Node* effect = NodeProperties::GetEffectInput(node);
+      Node* control = NodeProperties::GetControlInput(node);
       Node* context = NodeProperties::GetContextInput(node);
-      Node* create = graph()->NewNode(javascript()->Create(), call.target(),
-                                      call.new_target(), context,
-                                      frame_state_before, effect);
+      Node* frame_state_inside = CreateArtificialFrameState(
+          node, frame_state, call.formal_arguments(),
+          BailoutId::ConstructStubCreate(), FrameStateType::kConstructStub,
+          info.shared_info());
+      Node* create =
+          graph()->NewNode(javascript()->Create(), call.target(), new_target,
+                           context, frame_state_inside, effect, control);
+      Node* success = graph()->NewNode(common()->IfSuccess(), create);
+      uncaught_subcalls.push_back(create);  // Adds {IfException}.
+      NodeProperties::ReplaceControlInput(node, success);
       NodeProperties::ReplaceEffectInput(node, create);
       // Insert a check of the return value to determine whether the return
       // value or the implicit receiver should be selected as a result of the
@@ -628,42 +683,26 @@
       NodeProperties::ReplaceValueInput(check, node, 0);   // Fix-up input.
       receiver = create;  // The implicit receiver.
     }
-
-    // Swizzle the inputs of the {JSCallConstruct} node to look like inputs to a
-    // normal {JSCallFunction} node so that the rest of the inlining machinery
-    // behaves as if we were dealing with a regular function invocation.
-    new_target = call.new_target();  // Retrieve new target value input.
-    node->RemoveInput(call.formal_arguments() + 1);  // Drop new target.
-    node->InsertInput(graph()->zone(), 1, receiver);
+    node->ReplaceInput(1, receiver);
 
     // Insert a construct stub frame into the chain of frame states. This will
     // reconstruct the proper frame when deoptimizing within the constructor.
     frame_state = CreateArtificialFrameState(
         node, frame_state, call.formal_arguments(),
-        FrameStateType::kConstructStub, info.shared_info());
+        BailoutId::ConstructStubInvoke(), FrameStateType::kConstructStub,
+        info.shared_info());
   }
 
-  // The inlinee specializes to the context from the JSFunction object.
-  // TODO(turbofan): We might want to load the context from the JSFunction at
-  // runtime in case we only know the SharedFunctionInfo once we have dynamic
-  // type feedback in the compiler.
-  Node* context = jsgraph()->Constant(handle(function->context()));
-
   // Insert a JSConvertReceiver node for sloppy callees. Note that the context
-  // passed into this node has to be the callees context (loaded above). Note
-  // that the frame state passed to the JSConvertReceiver must be the frame
-  // state _before_ the call; it is not necessary to fiddle with the receiver
-  // in that frame state tho, as the conversion of the receiver can be repeated
-  // any number of times, it's not observable.
-  if (node->opcode() == IrOpcode::kJSCallFunction &&
+  // passed into this node has to be the callees context (loaded above).
+  if (node->opcode() == IrOpcode::kJSCall &&
       is_sloppy(shared_info->language_mode()) && !shared_info->native()) {
     Node* effect = NodeProperties::GetEffectInput(node);
     if (NeedsConvertReceiver(call.receiver(), effect)) {
-      const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
-      Node* frame_state_before = NodeProperties::FindFrameStateBefore(node);
-      Node* convert = effect = graph()->NewNode(
-          javascript()->ConvertReceiver(p.convert_mode()), call.receiver(),
-          context, frame_state_before, effect, start);
+      const CallParameters& p = CallParametersOf(node->op());
+      Node* convert = effect =
+          graph()->NewNode(javascript()->ConvertReceiver(p.convert_mode()),
+                           call.receiver(), context, effect, start);
       NodeProperties::ReplaceValueInput(node, convert, 1);
       NodeProperties::ReplaceEffectInput(node, effect);
     }
@@ -676,8 +715,8 @@
   // the case when the outermost function inlines a tail call (it should remove
   // potential arguments adaptor frame that belongs to outermost function when
   // deopt happens).
-  if (node->opcode() == IrOpcode::kJSCallFunction) {
-    const CallFunctionParameters& p = CallFunctionParametersOf(node->op());
+  if (node->opcode() == IrOpcode::kJSCall) {
+    const CallParameters& p = CallParametersOf(node->op());
     if (p.tail_call_mode() == TailCallMode::kAllow) {
       frame_state = CreateTailCallerFrameState(node, frame_state);
     }
@@ -691,7 +730,7 @@
   DCHECK_EQ(parameter_count, start->op()->ValueOutputCount() - 5);
   if (call.formal_arguments() != parameter_count) {
     frame_state = CreateArtificialFrameState(
-        node, frame_state, call.formal_arguments(),
+        node, frame_state, call.formal_arguments(), BailoutId::None(),
         FrameStateType::kArgumentsAdaptor, shared_info);
   }
 
diff --git a/src/compiler/js-inlining.h b/src/compiler/js-inlining.h
index 9bb8ec4..e40e6a7 100644
--- a/src/compiler/js-inlining.h
+++ b/src/compiler/js-inlining.h
@@ -11,7 +11,7 @@
 namespace v8 {
 namespace internal {
 
-// Forward declarations.
+class BailoutId;
 class CompilationInfo;
 
 namespace compiler {
@@ -36,7 +36,7 @@
 
   // Can be used by inlining heuristics or by testing code directly, without
   // using the above generic reducer interface of the inlining machinery.
-  Reduction ReduceJSCall(Node* node, Handle<JSFunction> function);
+  Reduction ReduceJSCall(Node* node);
 
  private:
   CommonOperatorBuilder* common() const;
@@ -50,8 +50,13 @@
   JSGraph* const jsgraph_;
   SourcePositionTable* const source_positions_;
 
+  bool DetermineCallTarget(Node* node,
+                           Handle<SharedFunctionInfo>& shared_info_out);
+  void DetermineCallContext(Node* node, Node*& context_out,
+                            Handle<FeedbackVector>& feedback_vector_out);
+
   Node* CreateArtificialFrameState(Node* node, Node* outer_frame_state,
-                                   int parameter_count,
+                                   int parameter_count, BailoutId bailout_id,
                                    FrameStateType frame_state_type,
                                    Handle<SharedFunctionInfo> shared);
 
diff --git a/src/compiler/js-intrinsic-lowering.cc b/src/compiler/js-intrinsic-lowering.cc
index 5290323..8a866ee 100644
--- a/src/compiler/js-intrinsic-lowering.cc
+++ b/src/compiler/js-intrinsic-lowering.cc
@@ -32,6 +32,8 @@
   switch (f->function_id) {
     case Runtime::kInlineCreateIterResultObject:
       return ReduceCreateIterResultObject(node);
+    case Runtime::kInlineDebugIsActive:
+      return ReduceDebugIsActive(node);
     case Runtime::kInlineDeoptimizeNow:
       return ReduceDeoptimizeNow(node);
     case Runtime::kInlineGeneratorClose:
@@ -40,12 +42,14 @@
       return ReduceGeneratorGetInputOrDebugPos(node);
     case Runtime::kInlineGeneratorGetResumeMode:
       return ReduceGeneratorGetResumeMode(node);
+    case Runtime::kInlineGeneratorGetContext:
+      return ReduceGeneratorGetContext(node);
     case Runtime::kInlineIsArray:
       return ReduceIsInstanceType(node, JS_ARRAY_TYPE);
     case Runtime::kInlineIsTypedArray:
       return ReduceIsInstanceType(node, JS_TYPED_ARRAY_TYPE);
-    case Runtime::kInlineIsRegExp:
-      return ReduceIsInstanceType(node, JS_REGEXP_TYPE);
+    case Runtime::kInlineIsJSProxy:
+      return ReduceIsInstanceType(node, JS_PROXY_TYPE);
     case Runtime::kInlineIsJSReceiver:
       return ReduceIsJSReceiver(node);
     case Runtime::kInlineIsSmi:
@@ -54,8 +58,6 @@
       return ReduceFixedArrayGet(node);
     case Runtime::kInlineFixedArraySet:
       return ReduceFixedArraySet(node);
-    case Runtime::kInlineRegExpExec:
-      return ReduceRegExpExec(node);
     case Runtime::kInlineSubString:
       return ReduceSubString(node);
     case Runtime::kInlineToInteger:
@@ -70,10 +72,29 @@
       return ReduceToString(node);
     case Runtime::kInlineCall:
       return ReduceCall(node);
-    case Runtime::kInlineNewObject:
-      return ReduceNewObject(node);
     case Runtime::kInlineGetSuperConstructor:
       return ReduceGetSuperConstructor(node);
+    case Runtime::kInlineArrayBufferViewGetByteLength:
+      return ReduceArrayBufferViewField(
+          node, AccessBuilder::ForJSArrayBufferViewByteLength());
+    case Runtime::kInlineArrayBufferViewGetByteOffset:
+      return ReduceArrayBufferViewField(
+          node, AccessBuilder::ForJSArrayBufferViewByteOffset());
+    case Runtime::kInlineMaxSmi:
+      return ReduceMaxSmi(node);
+    case Runtime::kInlineTypedArrayGetLength:
+      return ReduceArrayBufferViewField(node,
+                                        AccessBuilder::ForJSTypedArrayLength());
+    case Runtime::kInlineTypedArrayMaxSizeInHeap:
+      return ReduceTypedArrayMaxSizeInHeap(node);
+    case Runtime::kInlineJSCollectionGetTable:
+      return ReduceJSCollectionGetTable(node);
+    case Runtime::kInlineStringGetRawHashField:
+      return ReduceStringGetRawHashField(node);
+    case Runtime::kInlineTheHole:
+      return ReduceTheHole(node);
+    case Runtime::kInlineClassOf:
+      return ReduceClassOf(node);
     default:
       break;
   }
@@ -90,6 +111,15 @@
                 context, effect);
 }
 
+Reduction JSIntrinsicLowering::ReduceDebugIsActive(Node* node) {
+  Node* const value = jsgraph()->ExternalConstant(
+      ExternalReference::debug_is_active_address(isolate()));
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  Node* const control = NodeProperties::GetControlInput(node);
+  Operator const* const op =
+      simplified()->LoadField(AccessBuilder::ForExternalUint8Value());
+  return Change(node, op, value, effect, control);
+}
 
 Reduction JSIntrinsicLowering::ReduceDeoptimizeNow(Node* node) {
   if (mode() != kDeoptimizationEnabled) return NoChange();
@@ -133,6 +163,16 @@
   return Change(node, op, generator, effect, control);
 }
 
+Reduction JSIntrinsicLowering::ReduceGeneratorGetContext(Node* node) {
+  Node* const generator = NodeProperties::GetValueInput(node, 0);
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  Node* const control = NodeProperties::GetControlInput(node);
+  Operator const* const op =
+      simplified()->LoadField(AccessBuilder::ForJSGeneratorObjectContext());
+
+  return Change(node, op, generator, effect, control);
+}
+
 Reduction JSIntrinsicLowering::ReduceGeneratorGetResumeMode(Node* node) {
   Node* const generator = NodeProperties::GetValueInput(node, 0);
   Node* const effect = NodeProperties::GetEffectInput(node);
@@ -228,11 +268,6 @@
 }
 
 
-Reduction JSIntrinsicLowering::ReduceRegExpExec(Node* node) {
-  return Change(node, CodeFactory::RegExpExec(isolate()), 4);
-}
-
-
 Reduction JSIntrinsicLowering::ReduceSubString(Node* node) {
   return Change(node, CodeFactory::SubString(isolate()), 3);
 }
@@ -271,16 +306,12 @@
 Reduction JSIntrinsicLowering::ReduceCall(Node* node) {
   size_t const arity = CallRuntimeParametersOf(node->op()).arity();
   NodeProperties::ChangeOp(
-      node, javascript()->CallFunction(arity, 0.0f, VectorSlotPair(),
-                                       ConvertReceiverMode::kAny,
-                                       TailCallMode::kDisallow));
+      node,
+      javascript()->Call(arity, 0.0f, VectorSlotPair(),
+                         ConvertReceiverMode::kAny, TailCallMode::kDisallow));
   return Changed(node);
 }
 
-Reduction JSIntrinsicLowering::ReduceNewObject(Node* node) {
-  return Change(node, CodeFactory::FastNewObject(isolate()), 0);
-}
-
 Reduction JSIntrinsicLowering::ReduceGetSuperConstructor(Node* node) {
   Node* active_function = NodeProperties::GetValueInput(node, 0);
   Node* effect = NodeProperties::GetEffectInput(node);
@@ -292,6 +323,75 @@
                 active_function_map, effect, control);
 }
 
+Reduction JSIntrinsicLowering::ReduceArrayBufferViewField(
+    Node* node, FieldAccess const& access) {
+  Node* receiver = NodeProperties::GetValueInput(node, 0);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+
+  // Load the {receiver}s field.
+  Node* value = effect = graph()->NewNode(simplified()->LoadField(access),
+                                          receiver, effect, control);
+
+  // Check if the {receiver}s buffer was neutered.
+  Node* receiver_buffer = effect = graph()->NewNode(
+      simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+      receiver, effect, control);
+  Node* check = effect = graph()->NewNode(
+      simplified()->ArrayBufferWasNeutered(), receiver_buffer, effect, control);
+
+  // Default to zero if the {receiver}s buffer was neutered.
+  value = graph()->NewNode(
+      common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+      check, jsgraph()->ZeroConstant(), value);
+
+  ReplaceWithValue(node, value, effect, control);
+  return Replace(value);
+}
+
+Reduction JSIntrinsicLowering::ReduceMaxSmi(Node* node) {
+  Node* value = jsgraph()->Constant(Smi::kMaxValue);
+  ReplaceWithValue(node, value);
+  return Replace(value);
+}
+
+Reduction JSIntrinsicLowering::ReduceTypedArrayMaxSizeInHeap(Node* node) {
+  Node* value = jsgraph()->Constant(FLAG_typed_array_max_size_in_heap);
+  ReplaceWithValue(node, value);
+  return Replace(value);
+}
+
+Reduction JSIntrinsicLowering::ReduceJSCollectionGetTable(Node* node) {
+  Node* collection = NodeProperties::GetValueInput(node, 0);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+  return Change(node,
+                simplified()->LoadField(AccessBuilder::ForJSCollectionTable()),
+                collection, effect, control);
+}
+
+Reduction JSIntrinsicLowering::ReduceStringGetRawHashField(Node* node) {
+  Node* string = NodeProperties::GetValueInput(node, 0);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+  return Change(node,
+                simplified()->LoadField(AccessBuilder::ForNameHashField()),
+                string, effect, control);
+}
+
+Reduction JSIntrinsicLowering::ReduceTheHole(Node* node) {
+  Node* value = jsgraph()->TheHoleConstant();
+  ReplaceWithValue(node, value);
+  return Replace(value);
+}
+
+Reduction JSIntrinsicLowering::ReduceClassOf(Node* node) {
+  RelaxEffectsAndControls(node);
+  node->TrimInputCount(2);
+  NodeProperties::ChangeOp(node, javascript()->ClassOf());
+  return Changed(node);
+}
+
 Reduction JSIntrinsicLowering::Change(Node* node, const Operator* op, Node* a,
                                       Node* b) {
   RelaxControls(node);
diff --git a/src/compiler/js-intrinsic-lowering.h b/src/compiler/js-intrinsic-lowering.h
index 6e984ff..f3e3e2a 100644
--- a/src/compiler/js-intrinsic-lowering.h
+++ b/src/compiler/js-intrinsic-lowering.h
@@ -21,6 +21,7 @@
 
 // Forward declarations.
 class CommonOperatorBuilder;
+struct FieldAccess;
 class JSOperatorBuilder;
 class JSGraph;
 class SimplifiedOperatorBuilder;
@@ -40,8 +41,10 @@
 
  private:
   Reduction ReduceCreateIterResultObject(Node* node);
+  Reduction ReduceDebugIsActive(Node* node);
   Reduction ReduceDeoptimizeNow(Node* node);
   Reduction ReduceGeneratorClose(Node* node);
+  Reduction ReduceGeneratorGetContext(Node* node);
   Reduction ReduceGeneratorGetInputOrDebugPos(Node* node);
   Reduction ReduceGeneratorGetResumeMode(Node* node);
   Reduction ReduceIsInstanceType(Node* node, InstanceType instance_type);
@@ -49,7 +52,6 @@
   Reduction ReduceIsSmi(Node* node);
   Reduction ReduceFixedArrayGet(Node* node);
   Reduction ReduceFixedArraySet(Node* node);
-  Reduction ReduceRegExpExec(Node* node);
   Reduction ReduceSubString(Node* node);
   Reduction ReduceToInteger(Node* node);
   Reduction ReduceToLength(Node* node);
@@ -57,9 +59,24 @@
   Reduction ReduceToObject(Node* node);
   Reduction ReduceToString(Node* node);
   Reduction ReduceCall(Node* node);
-  Reduction ReduceNewObject(Node* node);
   Reduction ReduceGetSuperConstructor(Node* node);
 
+  // TODO(turbofan): typedarray.js support; drop once TypedArrays are
+  // converted to proper CodeStubAssembler based builtins.
+  Reduction ReduceArrayBufferViewField(Node* node, FieldAccess const& access);
+  Reduction ReduceMaxSmi(Node* node);
+  Reduction ReduceTypedArrayMaxSizeInHeap(Node* node);
+
+  // TODO(turbofan): collection.js support; drop once Maps and Sets are
+  // converted to proper CodeStubAssembler based builtins.
+  Reduction ReduceJSCollectionGetTable(Node* node);
+  Reduction ReduceStringGetRawHashField(Node* node);
+  Reduction ReduceTheHole(Node* node);
+
+  // TODO(turbofan): JavaScript builtins support; drop once all uses of
+  // %_ClassOf in JavaScript builtins are eliminated.
+  Reduction ReduceClassOf(Node* node);
+
   Reduction Change(Node* node, const Operator* op);
   Reduction Change(Node* node, const Operator* op, Node* a, Node* b);
   Reduction Change(Node* node, const Operator* op, Node* a, Node* b, Node* c);
diff --git a/src/compiler/js-native-context-specialization.cc b/src/compiler/js-native-context-specialization.cc
index a849fec..c32ee26 100644
--- a/src/compiler/js-native-context-specialization.cc
+++ b/src/compiler/js-native-context-specialization.cc
@@ -14,9 +14,9 @@
 #include "src/compiler/linkage.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/type-cache.h"
+#include "src/feedback-vector.h"
 #include "src/field-index-inl.h"
 #include "src/isolate-inl.h"
-#include "src/type-feedback-vector.h"
 
 namespace v8 {
 namespace internal {
@@ -55,6 +55,12 @@
 
 }  // namespace
 
+struct JSNativeContextSpecialization::ScriptContextTableLookupResult {
+  Handle<Context> context;
+  bool immutable;
+  int index;
+};
+
 JSNativeContextSpecialization::JSNativeContextSpecialization(
     Editor* editor, JSGraph* jsgraph, Flags flags,
     Handle<Context> native_context, CompilationDependencies* dependencies,
@@ -62,6 +68,8 @@
     : AdvancedReducer(editor),
       jsgraph_(jsgraph),
       flags_(flags),
+      global_object_(native_context->global_object()),
+      global_proxy_(JSGlobalProxy::cast(native_context->global_proxy())),
       native_context_(native_context),
       dependencies_(dependencies),
       zone_(zone),
@@ -69,10 +77,20 @@
 
 Reduction JSNativeContextSpecialization::Reduce(Node* node) {
   switch (node->opcode()) {
+    case IrOpcode::kJSAdd:
+      return ReduceJSAdd(node);
+    case IrOpcode::kJSGetSuperConstructor:
+      return ReduceJSGetSuperConstructor(node);
     case IrOpcode::kJSInstanceOf:
       return ReduceJSInstanceOf(node);
+    case IrOpcode::kJSOrdinaryHasInstance:
+      return ReduceJSOrdinaryHasInstance(node);
     case IrOpcode::kJSLoadContext:
       return ReduceJSLoadContext(node);
+    case IrOpcode::kJSLoadGlobal:
+      return ReduceJSLoadGlobal(node);
+    case IrOpcode::kJSStoreGlobal:
+      return ReduceJSStoreGlobal(node);
     case IrOpcode::kJSLoadNamed:
       return ReduceJSLoadNamed(node);
     case IrOpcode::kJSStoreNamed:
@@ -81,12 +99,75 @@
       return ReduceJSLoadProperty(node);
     case IrOpcode::kJSStoreProperty:
       return ReduceJSStoreProperty(node);
+    case IrOpcode::kJSStoreNamedOwn:
+      return ReduceJSStoreNamedOwn(node);
+    case IrOpcode::kJSStoreDataPropertyInLiteral:
+      return ReduceJSStoreDataPropertyInLiteral(node);
     default:
       break;
   }
   return NoChange();
 }
 
+Reduction JSNativeContextSpecialization::ReduceJSAdd(Node* node) {
+  // TODO(turbofan): This has to run together with the inlining and
+  // native context specialization to be able to leverage the string
+  // constant-folding for optimizing property access, but we should
+  // nevertheless find a better home for this at some point.
+  DCHECK_EQ(IrOpcode::kJSAdd, node->opcode());
+
+  // Constant-fold string concatenation.
+  HeapObjectBinopMatcher m(node);
+  if (m.left().HasValue() && m.left().Value()->IsString() &&
+      m.right().HasValue() && m.right().Value()->IsString()) {
+    Handle<String> left = Handle<String>::cast(m.left().Value());
+    Handle<String> right = Handle<String>::cast(m.right().Value());
+    if (left->length() + right->length() <= String::kMaxLength) {
+      Handle<String> result =
+          factory()->NewConsString(left, right).ToHandleChecked();
+      Node* value = jsgraph()->HeapConstant(result);
+      ReplaceWithValue(node, value);
+      return Replace(value);
+    }
+  }
+  return NoChange();
+}
+
+Reduction JSNativeContextSpecialization::ReduceJSGetSuperConstructor(
+    Node* node) {
+  DCHECK_EQ(IrOpcode::kJSGetSuperConstructor, node->opcode());
+  Node* constructor = NodeProperties::GetValueInput(node, 0);
+
+  // If deoptimization is disabled, we cannot optimize.
+  if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+  // Check if the input is a known JSFunction.
+  HeapObjectMatcher m(constructor);
+  if (!m.HasValue()) return NoChange();
+  Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value());
+  Handle<Map> function_map(function->map(), isolate());
+  Handle<Object> function_prototype(function_map->prototype(), isolate());
+
+  // We can constant-fold the super constructor access if the
+  // {function}s map is stable, i.e. we can use a code dependency
+  // to guard against [[Prototype]] changes of {function}.
+  if (function_map->is_stable()) {
+    Node* value = jsgraph()->Constant(function_prototype);
+    dependencies()->AssumeMapStable(function_map);
+    if (function_prototype->IsConstructor()) {
+      ReplaceWithValue(node, value);
+      return Replace(value);
+    } else {
+      node->InsertInput(graph()->zone(), 0, value);
+      NodeProperties::ChangeOp(
+          node, javascript()->CallRuntime(Runtime::kThrowNotSuperConstructor));
+      return Changed(node);
+    }
+  }
+
+  return NoChange();
+}
+
 Reduction JSNativeContextSpecialization::ReduceJSInstanceOf(Node* node) {
   DCHECK_EQ(IrOpcode::kJSInstanceOf, node->opcode());
   Node* object = NodeProperties::GetValueInput(node, 0);
@@ -125,15 +206,16 @@
       }
 
       // Monomorphic property access.
-      effect =
-          BuildCheckMaps(constructor, effect, control, MapList{receiver_map});
+      effect = BuildCheckMaps(constructor, effect, control,
+                              access_info.receiver_maps());
 
       // Lower to OrdinaryHasInstance(C, O).
       NodeProperties::ReplaceValueInput(node, constructor, 0);
       NodeProperties::ReplaceValueInput(node, object, 1);
       NodeProperties::ReplaceEffectInput(node, effect);
       NodeProperties::ChangeOp(node, javascript()->OrdinaryHasInstance());
-      return Changed(node);
+      Reduction const reduction = ReduceJSOrdinaryHasInstance(node);
+      return reduction.Changed() ? reduction : Changed(node);
     }
   } else if (access_info.IsDataConstant()) {
     DCHECK(access_info.constant()->IsCallable());
@@ -145,8 +227,8 @@
     }
 
     // Monomorphic property access.
-    effect =
-        BuildCheckMaps(constructor, effect, control, MapList{receiver_map});
+    effect = BuildCheckMaps(constructor, effect, control,
+                            access_info.receiver_maps());
 
     // Call the @@hasInstance handler.
     Node* target = jsgraph()->Constant(access_info.constant());
@@ -156,8 +238,8 @@
     node->ReplaceInput(5, effect);
     NodeProperties::ChangeOp(
         node,
-        javascript()->CallFunction(3, 0.0f, VectorSlotPair(),
-                                   ConvertReceiverMode::kNotNullOrUndefined));
+        javascript()->Call(3, 0.0f, VectorSlotPair(),
+                           ConvertReceiverMode::kNotNullOrUndefined));
 
     // Rewire the value uses of {node} to ToBoolean conversion of the result.
     Node* value = graph()->NewNode(javascript()->ToBoolean(ToBooleanHint::kAny),
@@ -174,6 +256,31 @@
   return NoChange();
 }
 
+Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
+    Node* node) {
+  DCHECK_EQ(IrOpcode::kJSOrdinaryHasInstance, node->opcode());
+  Node* constructor = NodeProperties::GetValueInput(node, 0);
+  Node* object = NodeProperties::GetValueInput(node, 1);
+
+  // Check if the {constructor} is a JSBoundFunction.
+  HeapObjectMatcher m(constructor);
+  if (m.HasValue() && m.Value()->IsJSBoundFunction()) {
+    // OrdinaryHasInstance on bound functions turns into a recursive
+    // invocation of the instanceof operator again.
+    // ES6 section 7.3.19 OrdinaryHasInstance (C, O) step 2.
+    Handle<JSBoundFunction> function = Handle<JSBoundFunction>::cast(m.Value());
+    Handle<JSReceiver> bound_target_function(function->bound_target_function());
+    NodeProperties::ReplaceValueInput(node, object, 0);
+    NodeProperties::ReplaceValueInput(
+        node, jsgraph()->HeapConstant(bound_target_function), 1);
+    NodeProperties::ChangeOp(node, javascript()->InstanceOf());
+    Reduction const reduction = ReduceJSInstanceOf(node);
+    return reduction.Changed() ? reduction : Changed(node);
+  }
+
+  return NoChange();
+}
+
 Reduction JSNativeContextSpecialization::ReduceJSLoadContext(Node* node) {
   DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
   ContextAccess const& access = ContextAccessOf(node->op());
@@ -188,24 +295,292 @@
   return NoChange();
 }
 
+namespace {
+
+FieldAccess ForPropertyCellValue(MachineRepresentation representation,
+                                 Type* type, MaybeHandle<Map> map,
+                                 Handle<Name> name) {
+  WriteBarrierKind kind = kFullWriteBarrier;
+  if (representation == MachineRepresentation::kTaggedSigned) {
+    kind = kNoWriteBarrier;
+  } else if (representation == MachineRepresentation::kTaggedPointer) {
+    kind = kPointerWriteBarrier;
+  }
+  MachineType r = MachineType::TypeForRepresentation(representation);
+  FieldAccess access = {
+      kTaggedBase, PropertyCell::kValueOffset, name, map, type, r, kind};
+  return access;
+}
+
+}  // namespace
+
+Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
+    Node* node, Node* receiver, Node* value, Handle<Name> name,
+    AccessMode access_mode, Node* index) {
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+
+  // Lookup on the global object. We only deal with own data properties
+  // of the global object here (represented as PropertyCell).
+  LookupIterator it(global_object(), name, LookupIterator::OWN);
+  it.TryLookupCachedProperty();
+  if (it.state() != LookupIterator::DATA) return NoChange();
+  if (!it.GetHolder<JSObject>()->IsJSGlobalObject()) return NoChange();
+  Handle<PropertyCell> property_cell = it.GetPropertyCell();
+  PropertyDetails property_details = property_cell->property_details();
+  Handle<Object> property_cell_value(property_cell->value(), isolate());
+  PropertyCellType property_cell_type = property_details.cell_type();
+
+  // We have additional constraints for stores.
+  if (access_mode == AccessMode::kStore) {
+    if (property_details.IsReadOnly()) {
+      // Don't even bother trying to lower stores to read-only data properties.
+      return NoChange();
+    } else if (property_cell_type == PropertyCellType::kUndefined) {
+      // There's no fast-path for dealing with undefined property cells.
+      return NoChange();
+    } else if (property_cell_type == PropertyCellType::kConstantType) {
+      // There's also no fast-path to store to a global cell which pretended
+      // to be stable, but is no longer stable now.
+      if (property_cell_value->IsHeapObject() &&
+          !Handle<HeapObject>::cast(property_cell_value)->map()->is_stable()) {
+        return NoChange();
+      }
+    }
+  }
+
+  // Ensure that {index} matches the specified {name} (if {index} is given).
+  if (index != nullptr) {
+    Node* check = graph()->NewNode(simplified()->ReferenceEqual(), index,
+                                   jsgraph()->HeapConstant(name));
+    effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+  }
+
+  // Check if we have a {receiver} to validate. If so, we need to check that
+  // the {receiver} is actually the JSGlobalProxy for the native context that
+  // we are specializing to.
+  if (receiver != nullptr) {
+    Node* check = graph()->NewNode(simplified()->ReferenceEqual(), receiver,
+                                   jsgraph()->HeapConstant(global_proxy()));
+    effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+  }
+
+  if (access_mode == AccessMode::kLoad) {
+    // Load from non-configurable, read-only data property on the global
+    // object can be constant-folded, even without deoptimization support.
+    if (!property_details.IsConfigurable() && property_details.IsReadOnly()) {
+      value = jsgraph()->Constant(property_cell_value);
+    } else {
+      // Record a code dependency on the cell if we can benefit from the
+      // additional feedback, or the global property is configurable (i.e.
+      // can be deleted or reconfigured to an accessor property).
+      if (property_details.cell_type() != PropertyCellType::kMutable ||
+          property_details.IsConfigurable()) {
+        dependencies()->AssumePropertyCell(property_cell);
+      }
+
+      // Load from constant/undefined global property can be constant-folded.
+      if (property_details.cell_type() == PropertyCellType::kConstant ||
+          property_details.cell_type() == PropertyCellType::kUndefined) {
+        value = jsgraph()->Constant(property_cell_value);
+      } else {
+        // Load from constant type cell can benefit from type feedback.
+        MaybeHandle<Map> map;
+        Type* property_cell_value_type = Type::NonInternal();
+        MachineRepresentation representation = MachineRepresentation::kTagged;
+        if (property_details.cell_type() == PropertyCellType::kConstantType) {
+          // Compute proper type based on the current value in the cell.
+          if (property_cell_value->IsSmi()) {
+            property_cell_value_type = Type::SignedSmall();
+            representation = MachineRepresentation::kTaggedSigned;
+          } else if (property_cell_value->IsNumber()) {
+            property_cell_value_type = Type::Number();
+            representation = MachineRepresentation::kTaggedPointer;
+          } else {
+            Handle<Map> property_cell_value_map(
+                Handle<HeapObject>::cast(property_cell_value)->map(),
+                isolate());
+            property_cell_value_type = Type::For(property_cell_value_map);
+            representation = MachineRepresentation::kTaggedPointer;
+
+            // We can only use the property cell value map for map check
+            // elimination if it's stable, i.e. the HeapObject wasn't
+            // mutated without the cell state being updated.
+            if (property_cell_value_map->is_stable()) {
+              dependencies()->AssumeMapStable(property_cell_value_map);
+              map = property_cell_value_map;
+            }
+          }
+        }
+        value = effect = graph()->NewNode(
+            simplified()->LoadField(ForPropertyCellValue(
+                representation, property_cell_value_type, map, name)),
+            jsgraph()->HeapConstant(property_cell), effect, control);
+      }
+    }
+  } else {
+    DCHECK_EQ(AccessMode::kStore, access_mode);
+    DCHECK(!property_details.IsReadOnly());
+    switch (property_details.cell_type()) {
+      case PropertyCellType::kUndefined: {
+        UNREACHABLE();
+        break;
+      }
+      case PropertyCellType::kConstant: {
+        // Record a code dependency on the cell, and just deoptimize if the new
+        // value doesn't match the previous value stored inside the cell.
+        dependencies()->AssumePropertyCell(property_cell);
+        Node* check =
+            graph()->NewNode(simplified()->ReferenceEqual(), value,
+                             jsgraph()->Constant(property_cell_value));
+        effect =
+            graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+        break;
+      }
+      case PropertyCellType::kConstantType: {
+        // Record a code dependency on the cell, and just deoptimize if the new
+        // values' type doesn't match the type of the previous value in the
+        // cell.
+        dependencies()->AssumePropertyCell(property_cell);
+        Type* property_cell_value_type;
+        MachineRepresentation representation = MachineRepresentation::kTagged;
+        if (property_cell_value->IsHeapObject()) {
+          // We cannot do anything if the {property_cell_value}s map is no
+          // longer stable.
+          Handle<Map> property_cell_value_map(
+              Handle<HeapObject>::cast(property_cell_value)->map(), isolate());
+          DCHECK(property_cell_value_map->is_stable());
+          dependencies()->AssumeMapStable(property_cell_value_map);
+
+          // Check that the {value} is a HeapObject.
+          value = effect = graph()->NewNode(simplified()->CheckHeapObject(),
+                                            value, effect, control);
+
+          // Check {value} map agains the {property_cell} map.
+          effect =
+              graph()->NewNode(simplified()->CheckMaps(
+                                   CheckMapsFlag::kNone,
+                                   ZoneHandleSet<Map>(property_cell_value_map)),
+                               value, effect, control);
+          property_cell_value_type = Type::OtherInternal();
+          representation = MachineRepresentation::kTaggedPointer;
+        } else {
+          // Check that the {value} is a Smi.
+          value = effect = graph()->NewNode(simplified()->CheckSmi(), value,
+                                            effect, control);
+          property_cell_value_type = Type::SignedSmall();
+          representation = MachineRepresentation::kTaggedSigned;
+        }
+        effect = graph()->NewNode(simplified()->StoreField(ForPropertyCellValue(
+                                      representation, property_cell_value_type,
+                                      MaybeHandle<Map>(), name)),
+                                  jsgraph()->HeapConstant(property_cell), value,
+                                  effect, control);
+        break;
+      }
+      case PropertyCellType::kMutable: {
+        // Record a code dependency on the cell, and just deoptimize if the
+        // property ever becomes read-only.
+        dependencies()->AssumePropertyCell(property_cell);
+        effect = graph()->NewNode(
+            simplified()->StoreField(ForPropertyCellValue(
+                MachineRepresentation::kTagged, Type::NonInternal(),
+                MaybeHandle<Map>(), name)),
+            jsgraph()->HeapConstant(property_cell), value, effect, control);
+        break;
+      }
+    }
+  }
+
+  ReplaceWithValue(node, value, effect, control);
+  return Replace(value);
+}
+
+Reduction JSNativeContextSpecialization::ReduceJSLoadGlobal(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSLoadGlobal, node->opcode());
+  Handle<Name> name = LoadGlobalParametersOf(node->op()).name();
+  Node* effect = NodeProperties::GetEffectInput(node);
+
+  // Try to lookup the name on the script context table first (lexical scoping).
+  ScriptContextTableLookupResult result;
+  if (LookupInScriptContextTable(name, &result)) {
+    if (result.context->is_the_hole(isolate(), result.index)) return NoChange();
+    Node* context = jsgraph()->HeapConstant(result.context);
+    Node* value = effect = graph()->NewNode(
+        javascript()->LoadContext(0, result.index, result.immutable), context,
+        effect);
+    ReplaceWithValue(node, value, effect);
+    return Replace(value);
+  }
+
+  // Not much we can do if deoptimization support is disabled.
+  if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+  // Lookup the {name} on the global object instead.
+  return ReduceGlobalAccess(node, nullptr, nullptr, name, AccessMode::kLoad);
+}
+
+Reduction JSNativeContextSpecialization::ReduceJSStoreGlobal(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSStoreGlobal, node->opcode());
+  Handle<Name> name = StoreGlobalParametersOf(node->op()).name();
+  Node* value = NodeProperties::GetValueInput(node, 0);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+
+  // Try to lookup the name on the script context table first (lexical scoping).
+  ScriptContextTableLookupResult result;
+  if (LookupInScriptContextTable(name, &result)) {
+    if (result.context->is_the_hole(isolate(), result.index)) return NoChange();
+    if (result.immutable) return NoChange();
+    Node* context = jsgraph()->HeapConstant(result.context);
+    effect = graph()->NewNode(javascript()->StoreContext(0, result.index),
+                              value, context, effect, control);
+    ReplaceWithValue(node, value, effect, control);
+    return Replace(value);
+  }
+
+  // Not much we can do if deoptimization support is disabled.
+  if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+  // Lookup the {name} on the global object instead.
+  return ReduceGlobalAccess(node, nullptr, value, name, AccessMode::kStore);
+}
+
 Reduction JSNativeContextSpecialization::ReduceNamedAccess(
     Node* node, Node* value, MapHandleList const& receiver_maps,
     Handle<Name> name, AccessMode access_mode, LanguageMode language_mode,
-    Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot, Node* index) {
+    Handle<FeedbackVector> vector, FeedbackSlot slot, Node* index) {
   DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
          node->opcode() == IrOpcode::kJSStoreNamed ||
          node->opcode() == IrOpcode::kJSLoadProperty ||
-         node->opcode() == IrOpcode::kJSStoreProperty);
+         node->opcode() == IrOpcode::kJSStoreProperty ||
+         node->opcode() == IrOpcode::kJSStoreNamedOwn);
   Node* receiver = NodeProperties::GetValueInput(node, 0);
   Node* context = NodeProperties::GetContextInput(node);
-  Node* frame_state_eager = NodeProperties::FindFrameStateBefore(node);
-  Node* frame_state_lazy = NodeProperties::GetFrameStateInput(node);
+  Node* frame_state = NodeProperties::GetFrameStateInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
 
   // Not much we can do if deoptimization support is disabled.
   if (!(flags() & kDeoptimizationEnabled)) return NoChange();
 
+  // Check if we have an access o.x or o.x=v where o is the current
+  // native contexts' global proxy, and turn that into a direct access
+  // to the current native contexts' global object instead.
+  if (receiver_maps.length() == 1) {
+    Handle<Map> receiver_map = receiver_maps.first();
+    if (receiver_map->IsJSGlobalProxyMap()) {
+      Object* maybe_constructor = receiver_map->GetConstructor();
+      // Detached global proxies have |null| as their constructor.
+      if (maybe_constructor->IsJSFunction() &&
+          JSFunction::cast(maybe_constructor)->native_context() ==
+              *native_context()) {
+        return ReduceGlobalAccess(node, receiver, value, name, access_mode,
+                                  index);
+      }
+    }
+  }
+
   // Compute property access infos for the receiver maps.
   AccessInfoFactory access_info_factory(dependencies(), native_context(),
                                         graph()->zone());
@@ -217,7 +592,7 @@
 
   // TODO(turbofan): Add support for inlining into try blocks.
   bool is_exceptional = NodeProperties::IsExceptionalCall(node);
-  for (auto access_info : access_infos) {
+  for (const auto& access_info : access_infos) {
     if (access_info.IsAccessorConstant()) {
       // Accessor in try-blocks are not supported yet.
       if (is_exceptional || !(flags() & kAccessorInliningEnabled)) {
@@ -227,7 +602,7 @@
       // We do not handle generic calls in try blocks.
       if (is_exceptional) return NoChange();
       // We only handle the generic store IC case.
-      if (vector->GetKind(slot) != FeedbackVectorSlotKind::STORE_IC) {
+      if (!vector->IsStoreIC(slot)) {
         return NoChange();
       }
     }
@@ -260,15 +635,14 @@
                                            receiver, effect, control);
     } else {
       // Monomorphic property access.
-      receiver = effect = graph()->NewNode(simplified()->CheckHeapObject(),
-                                           receiver, effect, control);
+      receiver = BuildCheckHeapObject(receiver, &effect, control);
       effect = BuildCheckMaps(receiver, effect, control,
                               access_info.receiver_maps());
     }
 
     // Generate the actual property access.
     ValueEffectControl continuation = BuildPropertyAccess(
-        receiver, value, context, frame_state_lazy, effect, control, name,
+        receiver, value, context, frame_state, effect, control, name,
         access_info, access_mode, language_mode, vector, slot);
     value = continuation.value();
     effect = continuation.effect();
@@ -299,8 +673,7 @@
       receiverissmi_control = graph()->NewNode(common()->IfTrue(), branch);
       receiverissmi_effect = effect;
     } else {
-      receiver = effect = graph()->NewNode(simplified()->CheckHeapObject(),
-                                           receiver, effect, control);
+      receiver = BuildCheckHeapObject(receiver, &effect, control);
     }
 
     // Load the {receiver} map. The resulting effect is the dominating effect
@@ -369,20 +742,14 @@
           this_effect =
               graph()->NewNode(common()->EffectPhi(this_control_count),
                                this_control_count + 1, &this_effects.front());
-
-          // TODO(turbofan): The effect/control linearization will not find a
-          // FrameState after the EffectPhi that is generated above.
-          this_effect =
-              graph()->NewNode(common()->Checkpoint(), frame_state_eager,
-                               this_effect, this_control);
         }
       }
 
       // Generate the actual property access.
-      ValueEffectControl continuation = BuildPropertyAccess(
-          this_receiver, this_value, context, frame_state_lazy, this_effect,
-          this_control, name, access_info, access_mode, language_mode, vector,
-          slot);
+      ValueEffectControl continuation =
+          BuildPropertyAccess(this_receiver, this_value, context, frame_state,
+                              this_effect, this_control, name, access_info,
+                              access_mode, language_mode, vector, slot);
       values.push_back(continuation.value());
       effects.push_back(continuation.effect());
       controls.push_back(continuation.control());
@@ -418,10 +785,20 @@
     Node* node, Node* value, FeedbackNexus const& nexus, Handle<Name> name,
     AccessMode access_mode, LanguageMode language_mode) {
   DCHECK(node->opcode() == IrOpcode::kJSLoadNamed ||
-         node->opcode() == IrOpcode::kJSStoreNamed);
+         node->opcode() == IrOpcode::kJSStoreNamed ||
+         node->opcode() == IrOpcode::kJSStoreNamedOwn);
   Node* const receiver = NodeProperties::GetValueInput(node, 0);
   Node* const effect = NodeProperties::GetEffectInput(node);
 
+  if (flags() & kDeoptimizationEnabled) {
+    // Check if we are accessing the current native contexts' global proxy.
+    HeapObjectMatcher m(receiver);
+    if (m.HasValue() && m.Value().is_identical_to(global_proxy())) {
+      // Optimize accesses to the current native contexts' global proxy.
+      return ReduceGlobalAccess(node, nullptr, value, name, access_mode);
+    }
+  }
+
   // Check if the {nexus} reports type feedback for the IC.
   if (nexus.IsUninitialized()) {
     if ((flags() & kDeoptimizationEnabled) &&
@@ -452,7 +829,6 @@
                            language_mode, nexus.vector_handle(), nexus.slot());
 }
 
-
 Reduction JSNativeContextSpecialization::ReduceJSLoadNamed(Node* node) {
   DCHECK_EQ(IrOpcode::kJSLoadNamed, node->opcode());
   NamedAccess const& p = NamedAccessOf(node->op());
@@ -514,6 +890,19 @@
                                     AccessMode::kStore, p.language_mode());
 }
 
+Reduction JSNativeContextSpecialization::ReduceJSStoreNamedOwn(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSStoreNamedOwn, node->opcode());
+  StoreNamedOwnParameters const& p = StoreNamedOwnParametersOf(node->op());
+  Node* const value = NodeProperties::GetValueInput(node, 1);
+
+  // Extract receiver maps from the IC using the StoreOwnICNexus.
+  if (!p.feedback().IsValid()) return NoChange();
+  StoreOwnICNexus nexus(p.feedback().vector(), p.feedback().slot());
+
+  // Try to lower the creation of a named property based on the {receiver_maps}.
+  return ReduceNamedAccessFromNexus(node, value, nexus, p.name(),
+                                    AccessMode::kStoreInLiteral, STRICT);
+}
 
 Reduction JSNativeContextSpecialization::ReduceElementAccess(
     Node* node, Node* index, Node* value, MapHandleList const& receiver_maps,
@@ -547,12 +936,9 @@
     index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
                                       length, effect, control);
 
-    // Load the character from the {receiver}.
-    value = graph()->NewNode(simplified()->StringCharCodeAt(), receiver, index,
+    // Return the character from the {receiver} as single character string.
+    value = graph()->NewNode(simplified()->StringCharAt(), receiver, index,
                              control);
-
-    // Return it as a single character string.
-    value = graph()->NewNode(simplified()->StringFromCharCode(), value);
   } else {
     // Retrieve the native context from the given {node}.
     // Compute element access infos for the receiver maps.
@@ -609,8 +995,7 @@
     }
 
     // Ensure that {receiver} is a heap object.
-    receiver = effect = graph()->NewNode(simplified()->CheckHeapObject(),
-                                         receiver, effect, control);
+    receiver = BuildCheckHeapObject(receiver, &effect, control);
 
     // Check for the monomorphic case.
     if (access_infos.size() == 1) {
@@ -621,13 +1006,13 @@
         Handle<Map> const transition_source = transition.first;
         Handle<Map> const transition_target = transition.second;
         effect = graph()->NewNode(
-            simplified()->TransitionElementsKind(
+            simplified()->TransitionElementsKind(ElementsTransition(
                 IsSimpleMapChangeTransition(transition_source->elements_kind(),
                                             transition_target->elements_kind())
                     ? ElementsTransition::kFastTransition
-                    : ElementsTransition::kSlowTransition),
-            receiver, jsgraph()->HeapConstant(transition_source),
-            jsgraph()->HeapConstant(transition_target), effect, control);
+                    : ElementsTransition::kSlowTransition,
+                transition_source, transition_target)),
+            receiver, effect, control);
       }
 
       // TODO(turbofan): The effect/control linearization will not find a
@@ -672,14 +1057,13 @@
           Handle<Map> const transition_target = transition.second;
           this_effect = graph()->NewNode(
               simplified()->TransitionElementsKind(
-                  IsSimpleMapChangeTransition(
-                      transition_source->elements_kind(),
-                      transition_target->elements_kind())
-                      ? ElementsTransition::kFastTransition
-                      : ElementsTransition::kSlowTransition),
-              receiver, jsgraph()->HeapConstant(transition_source),
-              jsgraph()->HeapConstant(transition_target), this_effect,
-              this_control);
+                  ElementsTransition(IsSimpleMapChangeTransition(
+                                         transition_source->elements_kind(),
+                                         transition_target->elements_kind())
+                                         ? ElementsTransition::kFastTransition
+                                         : ElementsTransition::kSlowTransition,
+                                     transition_source, transition_target)),
+              receiver, this_effect, this_control);
         }
 
         // Load the {receiver} map.
@@ -723,11 +1107,6 @@
             this_effect =
                 graph()->NewNode(common()->EffectPhi(this_control_count),
                                  this_control_count + 1, &this_effects.front());
-
-            // TODO(turbofan): The effect/control linearization will not find a
-            // FrameState after the EffectPhi that is generated above.
-            this_effect = graph()->NewNode(common()->Checkpoint(), frame_state,
-                                           this_effect, this_control);
           }
         }
 
@@ -806,12 +1185,9 @@
         index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
                                           length, effect, control);
 
-        // Load the character from the {receiver}.
-        value = graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
-                                 index, control);
-
-        // Return it as a single character string.
-        value = graph()->NewNode(simplified()->StringFromCharCode(), value);
+        // Return the character from the {receiver} as single character string.
+        value = graph()->NewNode(simplified()->StringCharAt(), receiver, index,
+                                 control);
         ReplaceWithValue(node, value, effect, control);
         return Replace(value);
       }
@@ -944,10 +1320,11 @@
     Node* receiver, Node* value, Node* context, Node* frame_state, Node* effect,
     Node* control, Handle<Name> name, PropertyAccessInfo const& access_info,
     AccessMode access_mode, LanguageMode language_mode,
-    Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot) {
+    Handle<FeedbackVector> vector, FeedbackSlot slot) {
   // Determine actual holder and perform prototype chain checks.
   Handle<JSObject> holder;
   if (access_info.holder().ToHandle(&holder)) {
+    DCHECK_NE(AccessMode::kStoreInLiteral, access_mode);
     AssumePrototypesStable(access_info.receiver_maps(), holder);
   }
 
@@ -981,16 +1358,16 @@
             common()->FrameState(BailoutId::None(),
                                  OutputFrameStateCombine::Ignore(),
                                  frame_info0),
-            graph()->NewNode(common()->StateValues(1), receiver),
+            graph()->NewNode(common()->StateValues(1, SparseInputMask::Dense()),
+                             receiver),
             jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(),
             context, target, frame_state);
 
         // Introduce the call to the getter function.
         if (access_info.constant()->IsJSFunction()) {
           value = effect = graph()->NewNode(
-              javascript()->CallFunction(
-                  2, 0.0f, VectorSlotPair(),
-                  ConvertReceiverMode::kNotNullOrUndefined),
+              javascript()->Call(2, 0.0f, VectorSlotPair(),
+                                 ConvertReceiverMode::kNotNullOrUndefined),
               target, receiver, context, frame_state0, effect, control);
           control = graph()->NewNode(common()->IfSuccess(), value);
         } else {
@@ -998,16 +1375,16 @@
           Handle<FunctionTemplateInfo> function_template_info(
               Handle<FunctionTemplateInfo>::cast(access_info.constant()));
           DCHECK(!function_template_info->call_code()->IsUndefined(isolate()));
-          ZoneVector<Node*> stack_parameters(graph()->zone());
           ValueEffectControl value_effect_control = InlineApiCall(
-              receiver, context, target, frame_state0, &stack_parameters,
-              effect, control, shared_info, function_template_info);
+              receiver, context, target, frame_state0, nullptr, effect, control,
+              shared_info, function_template_info);
           value = value_effect_control.value();
           effect = value_effect_control.effect();
           control = value_effect_control.control();
         }
         break;
       }
+      case AccessMode::kStoreInLiteral:
       case AccessMode::kStore: {
         // We need a FrameState for the setter stub to restore the correct
         // context and return the appropriate value to fullcodegen.
@@ -1018,16 +1395,16 @@
             common()->FrameState(BailoutId::None(),
                                  OutputFrameStateCombine::Ignore(),
                                  frame_info0),
-            graph()->NewNode(common()->StateValues(2), receiver, value),
+            graph()->NewNode(common()->StateValues(2, SparseInputMask::Dense()),
+                             receiver, value),
             jsgraph()->EmptyStateValues(), jsgraph()->EmptyStateValues(),
             context, target, frame_state);
 
         // Introduce the call to the setter function.
         if (access_info.constant()->IsJSFunction()) {
           effect = graph()->NewNode(
-              javascript()->CallFunction(
-                  3, 0.0f, VectorSlotPair(),
-                  ConvertReceiverMode::kNotNullOrUndefined),
+              javascript()->Call(3, 0.0f, VectorSlotPair(),
+                                 ConvertReceiverMode::kNotNullOrUndefined),
               target, receiver, value, context, frame_state0, effect, control);
           control = graph()->NewNode(common()->IfSuccess(), effect);
         } else {
@@ -1035,11 +1412,9 @@
           Handle<FunctionTemplateInfo> function_template_info(
               Handle<FunctionTemplateInfo>::cast(access_info.constant()));
           DCHECK(!function_template_info->call_code()->IsUndefined(isolate()));
-          ZoneVector<Node*> stack_parameters(graph()->zone());
-          stack_parameters.push_back(value);
           ValueEffectControl value_effect_control = InlineApiCall(
-              receiver, context, target, frame_state0, &stack_parameters,
-              effect, control, shared_info, function_template_info);
+              receiver, context, target, frame_state0, value, effect, control,
+              shared_info, function_template_info);
           value = value_effect_control.value();
           effect = value_effect_control.effect();
           control = value_effect_control.control();
@@ -1047,7 +1422,7 @@
         break;
       }
     }
-  } else if (access_info.IsDataField()) {
+  } else if (access_info.IsDataField() || access_info.IsDataConstantField()) {
     FieldIndex const field_index = access_info.field_index();
     Type* const field_type = access_info.field_type();
     MachineRepresentation const field_representation =
@@ -1059,14 +1434,36 @@
       // Optimize immutable property loads.
       HeapObjectMatcher m(receiver);
       if (m.HasValue() && m.Value()->IsJSObject()) {
+        // TODO(ishell): Use something simpler like
+        //
+        // Handle<Object> value =
+        //     JSObject::FastPropertyAt(Handle<JSObject>::cast(m.Value()),
+        //                              Representation::Tagged(), field_index);
+        //
+        // here, once we have the immutable bit in the access_info.
+
         // TODO(turbofan): Given that we already have the field_index here, we
         // might be smarter in the future and not rely on the LookupIterator,
         // but for now let's just do what Crankshaft does.
         LookupIterator it(m.Value(), name,
                           LookupIterator::OWN_SKIP_INTERCEPTOR);
-        if (it.IsFound() && it.IsReadOnly() && !it.IsConfigurable()) {
-          Node* value = jsgraph()->Constant(JSReceiver::GetDataProperty(&it));
-          return ValueEffectControl(value, effect, control);
+        if (it.state() == LookupIterator::DATA) {
+          bool is_reaonly_non_configurable =
+              it.IsReadOnly() && !it.IsConfigurable();
+          if (is_reaonly_non_configurable ||
+              (FLAG_track_constant_fields &&
+               access_info.IsDataConstantField())) {
+            Node* value = jsgraph()->Constant(JSReceiver::GetDataProperty(&it));
+            if (!is_reaonly_non_configurable) {
+              // It's necessary to add dependency on the map that introduced
+              // the field.
+              DCHECK(access_info.IsDataConstantField());
+              DCHECK(!it.is_dictionary_holder());
+              Handle<Map> field_owner_map = it.GetFieldOwnerMap();
+              dependencies()->AssumeFieldOwner(field_owner_map);
+            }
+            return ValueEffectControl(value, effect, control);
+          }
         }
       }
     }
@@ -1080,6 +1477,7 @@
         kTaggedBase,
         field_index.offset(),
         name,
+        MaybeHandle<Map>(),
         field_type,
         MachineType::TypeForRepresentation(field_representation),
         kFullWriteBarrier};
@@ -1090,6 +1488,7 @@
           FieldAccess const storage_access = {kTaggedBase,
                                               field_index.offset(),
                                               name,
+                                              MaybeHandle<Map>(),
                                               Type::OtherInternal(),
                                               MachineType::TaggedPointer(),
                                               kPointerWriteBarrier};
@@ -1099,13 +1498,27 @@
           field_access.offset = HeapNumber::kValueOffset;
           field_access.name = MaybeHandle<Name>();
         }
+      } else if (field_representation ==
+                 MachineRepresentation::kTaggedPointer) {
+        // Remember the map of the field value, if its map is stable. This is
+        // used by the LoadElimination to eliminate map checks on the result.
+        Handle<Map> field_map;
+        if (access_info.field_map().ToHandle(&field_map)) {
+          if (field_map->is_stable()) {
+            dependencies()->AssumeMapStable(field_map);
+            field_access.map = field_map;
+          }
+        }
       }
-      // TODO(turbofan): Track the field_map (if any) on the {field_access} and
-      // use it in LoadElimination to eliminate map checks.
       value = effect = graph()->NewNode(simplified()->LoadField(field_access),
                                         storage, effect, control);
     } else {
-      DCHECK_EQ(AccessMode::kStore, access_mode);
+      bool store_to_constant_field = FLAG_track_constant_fields &&
+                                     (access_mode == AccessMode::kStore) &&
+                                     access_info.IsDataConstantField();
+
+      DCHECK(access_mode == AccessMode::kStore ||
+             access_mode == AccessMode::kStoreInLiteral);
       switch (field_representation) {
         case MachineRepresentation::kFloat64: {
           value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
@@ -1138,6 +1551,7 @@
               FieldAccess const storage_access = {kTaggedBase,
                                                   field_index.offset(),
                                                   name,
+                                                  MaybeHandle<Map>(),
                                                   Type::OtherInternal(),
                                                   MachineType::TaggedPointer(),
                                                   kPointerWriteBarrier};
@@ -1149,29 +1563,62 @@
               field_access.machine_type = MachineType::Float64();
             }
           }
-          break;
-        }
-        case MachineRepresentation::kTaggedSigned: {
-          value = effect = graph()->NewNode(simplified()->CheckSmi(), value,
-                                            effect, control);
-          field_access.write_barrier_kind = kNoWriteBarrier;
-          break;
-        }
-        case MachineRepresentation::kTaggedPointer: {
-          // Ensure that {value} is a HeapObject.
-          value = effect = graph()->NewNode(simplified()->CheckHeapObject(),
-                                            value, effect, control);
-          Handle<Map> field_map;
-          if (access_info.field_map().ToHandle(&field_map)) {
-            // Emit a map check for the value.
-            effect = graph()->NewNode(simplified()->CheckMaps(1), value,
-                                      jsgraph()->HeapConstant(field_map),
-                                      effect, control);
+          if (store_to_constant_field) {
+            DCHECK(!access_info.HasTransitionMap());
+            // If the field is constant check that the value we are going
+            // to store matches current value.
+            Node* current_value = effect =
+                graph()->NewNode(simplified()->LoadField(field_access), storage,
+                                 effect, control);
+
+            Node* check = graph()->NewNode(simplified()->NumberEqual(),
+                                           current_value, value);
+            effect = graph()->NewNode(simplified()->CheckIf(), check, effect,
+                                      control);
+            return ValueEffectControl(value, effect, control);
           }
-          field_access.write_barrier_kind = kPointerWriteBarrier;
           break;
         }
+        case MachineRepresentation::kTaggedSigned:
+        case MachineRepresentation::kTaggedPointer:
         case MachineRepresentation::kTagged:
+          if (store_to_constant_field) {
+            DCHECK(!access_info.HasTransitionMap());
+            // If the field is constant check that the value we are going
+            // to store matches current value.
+            Node* current_value = effect =
+                graph()->NewNode(simplified()->LoadField(field_access), storage,
+                                 effect, control);
+
+            Node* check = graph()->NewNode(simplified()->ReferenceEqual(),
+                                           current_value, value);
+            effect = graph()->NewNode(simplified()->CheckIf(), check, effect,
+                                      control);
+            return ValueEffectControl(value, effect, control);
+          }
+
+          if (field_representation == MachineRepresentation::kTaggedSigned) {
+            value = effect = graph()->NewNode(simplified()->CheckSmi(), value,
+                                              effect, control);
+            field_access.write_barrier_kind = kNoWriteBarrier;
+
+          } else if (field_representation ==
+                     MachineRepresentation::kTaggedPointer) {
+            // Ensure that {value} is a HeapObject.
+            value = BuildCheckHeapObject(value, &effect, control);
+            Handle<Map> field_map;
+            if (access_info.field_map().ToHandle(&field_map)) {
+              // Emit a map check for the value.
+              effect = graph()->NewNode(
+                  simplified()->CheckMaps(CheckMapsFlag::kNone,
+                                          ZoneHandleSet<Map>(field_map)),
+                  value, effect, control);
+            }
+            field_access.write_barrier_kind = kPointerWriteBarrier;
+
+          } else {
+            DCHECK_EQ(MachineRepresentation::kTagged, field_representation);
+          }
           break;
         case MachineRepresentation::kNone:
         case MachineRepresentation::kBit:
@@ -1181,6 +1628,9 @@
         case MachineRepresentation::kWord64:
         case MachineRepresentation::kFloat32:
         case MachineRepresentation::kSimd128:
+        case MachineRepresentation::kSimd1x4:
+        case MachineRepresentation::kSimd1x8:
+        case MachineRepresentation::kSimd1x16:
           UNREACHABLE();
           break;
       }
@@ -1202,7 +1652,8 @@
   } else {
     DCHECK(access_info.IsGeneric());
     DCHECK_EQ(AccessMode::kStore, access_mode);
-    DCHECK_EQ(FeedbackVectorSlotKind::STORE_IC, vector->GetKind(slot));
+    DCHECK(vector->IsStoreIC(slot));
+    DCHECK_EQ(vector->GetLanguageMode(slot), language_mode);
     Callable callable =
         CodeFactory::StoreICInOptimizedCode(isolate(), language_mode);
     const CallInterfaceDescriptor& descriptor = callable.descriptor();
@@ -1226,6 +1677,85 @@
   return ValueEffectControl(value, effect, control);
 }
 
+Reduction JSNativeContextSpecialization::ReduceJSStoreDataPropertyInLiteral(
+    Node* node) {
+  DCHECK_EQ(IrOpcode::kJSStoreDataPropertyInLiteral, node->opcode());
+
+  // If deoptimization is disabled, we cannot optimize.
+  if (!(flags() & kDeoptimizationEnabled)) return NoChange();
+
+  DataPropertyParameters const& p = DataPropertyParametersOf(node->op());
+
+  if (!p.feedback().IsValid()) return NoChange();
+
+  StoreDataPropertyInLiteralICNexus nexus(p.feedback().vector(),
+                                          p.feedback().slot());
+  if (nexus.IsUninitialized()) {
+    return NoChange();
+  }
+
+  if (nexus.ic_state() == MEGAMORPHIC) {
+    return NoChange();
+  }
+
+  DCHECK_EQ(MONOMORPHIC, nexus.ic_state());
+
+  Map* map = nexus.FindFirstMap();
+  if (map == nullptr) {
+    // Maps are weakly held in the type feedback vector, we may not have one.
+    return NoChange();
+  }
+
+  Handle<Map> receiver_map(map, isolate());
+  Handle<Name> cached_name =
+      handle(Name::cast(nexus.GetFeedbackExtra()), isolate());
+
+  PropertyAccessInfo access_info;
+  AccessInfoFactory access_info_factory(dependencies(), native_context(),
+                                        graph()->zone());
+  if (!access_info_factory.ComputePropertyAccessInfo(
+          receiver_map, cached_name, AccessMode::kStoreInLiteral,
+          &access_info)) {
+    return NoChange();
+  }
+
+  if (access_info.IsGeneric()) {
+    return NoChange();
+  }
+
+  Node* receiver = NodeProperties::GetValueInput(node, 0);
+  Node* effect = NodeProperties::GetEffectInput(node);
+  Node* control = NodeProperties::GetControlInput(node);
+
+  // Monomorphic property access.
+  receiver = BuildCheckHeapObject(receiver, &effect, control);
+
+  effect =
+      BuildCheckMaps(receiver, effect, control, access_info.receiver_maps());
+
+  // Ensure that {name} matches the cached name.
+  Node* name = NodeProperties::GetValueInput(node, 1);
+  Node* check = graph()->NewNode(simplified()->ReferenceEqual(), name,
+                                 jsgraph()->HeapConstant(cached_name));
+  effect = graph()->NewNode(simplified()->CheckIf(), check, effect, control);
+
+  Node* value = NodeProperties::GetValueInput(node, 2);
+  Node* context = NodeProperties::GetContextInput(node);
+  Node* frame_state_lazy = NodeProperties::GetFrameStateInput(node);
+
+  // Generate the actual property access.
+  ValueEffectControl continuation = BuildPropertyAccess(
+      receiver, value, context, frame_state_lazy, effect, control, cached_name,
+      access_info, AccessMode::kStoreInLiteral, LanguageMode::SLOPPY,
+      p.feedback().vector(), p.feedback().slot());
+  value = continuation.value();
+  effect = continuation.effect();
+  control = continuation.control();
+
+  ReplaceWithValue(node, value, effect, control);
+  return Replace(value);
+}
+
 namespace {
 
 ExternalArrayType GetArrayTypeFromElementsKind(ElementsKind kind) {
@@ -1249,42 +1779,79 @@
     Node* receiver, Node* index, Node* value, Node* effect, Node* control,
     ElementAccessInfo const& access_info, AccessMode access_mode,
     KeyedAccessStoreMode store_mode) {
+  DCHECK_NE(AccessMode::kStoreInLiteral, access_mode);
+
   // TODO(bmeurer): We currently specialize based on elements kind. We should
   // also be able to properly support strings and other JSObjects here.
   ElementsKind elements_kind = access_info.elements_kind();
   MapList const& receiver_maps = access_info.receiver_maps();
 
-  // Load the elements for the {receiver}.
-  Node* elements = effect = graph()->NewNode(
-      simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
-      effect, control);
-
-  // Don't try to store to a copy-on-write backing store.
-  if (access_mode == AccessMode::kStore &&
-      IsFastSmiOrObjectElementsKind(elements_kind) &&
-      store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
-    effect =
-        graph()->NewNode(simplified()->CheckMaps(1), elements,
-                         jsgraph()->FixedArrayMapConstant(), effect, control);
-  }
-
   if (IsFixedTypedArrayElementsKind(elements_kind)) {
-    // Load the {receiver}s length.
-    Node* length = effect = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForJSTypedArrayLength()),
-        receiver, effect, control);
+    Node* buffer;
+    Node* length;
+    Node* base_pointer;
+    Node* external_pointer;
 
-    // Check if the {receiver}s buffer was neutered.
-    Node* buffer = effect = graph()->NewNode(
-        simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
-        receiver, effect, control);
-    Node* check = effect = graph()->NewNode(
-        simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
+    // Check if we can constant-fold information about the {receiver} (i.e.
+    // for asm.js-like code patterns).
+    HeapObjectMatcher m(receiver);
+    if (m.HasValue() && m.Value()->IsJSTypedArray()) {
+      Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(m.Value());
 
-    // Default to zero if the {receiver}s buffer was neutered.
-    length = graph()->NewNode(
-        common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
-        check, jsgraph()->ZeroConstant(), length);
+      // Determine the {receiver}s (known) length.
+      length = jsgraph()->Constant(typed_array->length_value());
+
+      // Check if the {receiver}s buffer was neutered.
+      buffer = jsgraph()->HeapConstant(typed_array->GetBuffer());
+
+      // Load the (known) base and external pointer for the {receiver}. The
+      // {external_pointer} might be invalid if the {buffer} was neutered, so
+      // we need to make sure that any access is properly guarded.
+      base_pointer = jsgraph()->ZeroConstant();
+      external_pointer = jsgraph()->PointerConstant(
+          FixedTypedArrayBase::cast(typed_array->elements())
+              ->external_pointer());
+    } else {
+      // Load the {receiver}s length.
+      length = effect = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForJSTypedArrayLength()),
+          receiver, effect, control);
+
+      // Load the buffer for the {receiver}.
+      buffer = effect = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForJSArrayBufferViewBuffer()),
+          receiver, effect, control);
+
+      // Load the elements for the {receiver}.
+      Node* elements = effect = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForJSObjectElements()),
+          receiver, effect, control);
+
+      // Load the base and external pointer for the {receiver}s {elements}.
+      base_pointer = effect = graph()->NewNode(
+          simplified()->LoadField(
+              AccessBuilder::ForFixedTypedArrayBaseBasePointer()),
+          elements, effect, control);
+      external_pointer = effect = graph()->NewNode(
+          simplified()->LoadField(
+              AccessBuilder::ForFixedTypedArrayBaseExternalPointer()),
+          elements, effect, control);
+    }
+
+    // See if we can skip the neutering check.
+    if (isolate()->IsArrayBufferNeuteringIntact()) {
+      // Add a code dependency so we are deoptimized in case an ArrayBuffer
+      // gets neutered.
+      dependencies()->AssumePropertyCell(
+          factory()->array_buffer_neutering_protector());
+    } else {
+      // Default to zero if the {receiver}s buffer was neutered.
+      Node* check = effect = graph()->NewNode(
+          simplified()->ArrayBufferWasNeutered(), buffer, effect, control);
+      length = graph()->NewNode(
+          common()->Select(MachineRepresentation::kTagged, BranchHint::kFalse),
+          check, jsgraph()->ZeroConstant(), length);
+    }
 
     if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
       // Check that the {index} is a valid array index, we do the actual
@@ -1295,21 +1862,10 @@
                                         effect, control);
     } else {
       // Check that the {index} is in the valid range for the {receiver}.
-      DCHECK_EQ(STANDARD_STORE, store_mode);
       index = effect = graph()->NewNode(simplified()->CheckBounds(), index,
                                         length, effect, control);
     }
 
-    // Load the base and external pointer for the {receiver}.
-    Node* base_pointer = effect = graph()->NewNode(
-        simplified()->LoadField(
-            AccessBuilder::ForFixedTypedArrayBaseBasePointer()),
-        elements, effect, control);
-    Node* external_pointer = effect = graph()->NewNode(
-        simplified()->LoadField(
-            AccessBuilder::ForFixedTypedArrayBaseExternalPointer()),
-        elements, effect, control);
-
     // Access the actual element.
     ExternalArrayType external_array_type =
         GetArrayTypeFromElementsKind(elements_kind);
@@ -1320,6 +1876,9 @@
             base_pointer, external_pointer, index, effect, control);
         break;
       }
+      case AccessMode::kStoreInLiteral:
+        UNREACHABLE();
+        break;
       case AccessMode::kStore: {
         // Ensure that the {value} is actually a Number.
         value = effect = graph()->NewNode(simplified()->CheckNumber(), value,
@@ -1360,7 +1919,6 @@
               graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
         } else {
           // Perform the actual store
-          DCHECK_EQ(STANDARD_STORE, store_mode);
           effect = graph()->NewNode(
               simplified()->StoreTypedElement(external_array_type), buffer,
               base_pointer, external_pointer, index, value, effect, control);
@@ -1369,6 +1927,22 @@
       }
     }
   } else {
+    // Load the elements for the {receiver}.
+    Node* elements = effect = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForJSObjectElements()), receiver,
+        effect, control);
+
+    // Don't try to store to a copy-on-write backing store.
+    if (access_mode == AccessMode::kStore &&
+        IsFastSmiOrObjectElementsKind(elements_kind) &&
+        store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
+      effect = graph()->NewNode(
+          simplified()->CheckMaps(
+              CheckMapsFlag::kNone,
+              ZoneHandleSet<Map>(factory()->fixed_array_map())),
+          elements, effect, control);
+    }
+
     // Check if the {receiver} is a JSArray.
     bool receiver_is_jsarray = HasOnlyJSArrayMaps(receiver_maps);
 
@@ -1500,25 +2074,25 @@
 
 JSNativeContextSpecialization::ValueEffectControl
 JSNativeContextSpecialization::InlineApiCall(
-    Node* receiver, Node* context, Node* target, Node* frame_state,
-    ZoneVector<Node*>* stack_parameters, Node* effect, Node* control,
-    Handle<SharedFunctionInfo> shared_info,
+    Node* receiver, Node* context, Node* target, Node* frame_state, Node* value,
+    Node* effect, Node* control, Handle<SharedFunctionInfo> shared_info,
     Handle<FunctionTemplateInfo> function_template_info) {
   Handle<CallHandlerInfo> call_handler_info = handle(
       CallHandlerInfo::cast(function_template_info->call_code()), isolate());
   Handle<Object> call_data_object(call_handler_info->data(), isolate());
 
+  // Only setters have a value.
+  int const argc = value == nullptr ? 0 : 1;
   // The stub always expects the receiver as the first param on the stack.
   CallApiCallbackStub stub(
-      isolate(), static_cast<int>(stack_parameters->size()),
-      call_data_object->IsUndefined(isolate()),
-      true /* TODO(epertoso): similar to CallOptimization */);
+      isolate(), argc, call_data_object->IsUndefined(isolate()),
+      true /* FunctionTemplateInfo doesn't have an associated context. */);
   CallInterfaceDescriptor call_interface_descriptor =
       stub.GetCallInterfaceDescriptor();
   CallDescriptor* call_descriptor = Linkage::GetStubCallDescriptor(
       isolate(), graph()->zone(), call_interface_descriptor,
-      call_interface_descriptor.GetStackParameterCount() +
-          static_cast<int>(stack_parameters->size()) + 1,
+      call_interface_descriptor.GetStackParameterCount() + argc +
+          1 /* implicit receiver */,
       CallDescriptor::kNeedsFrameState, Operator::kNoProperties,
       MachineType::AnyTagged(), 1);
 
@@ -1529,42 +2103,62 @@
           &function, ExternalReference::DIRECT_API_CALL, isolate())));
   Node* code = jsgraph()->HeapConstant(stub.GetCode());
 
-  ZoneVector<Node*> inputs(zone());
-  inputs.push_back(code);
-
-  // CallApiCallbackStub's register arguments.
-  inputs.push_back(target);
-  inputs.push_back(data);
-  inputs.push_back(receiver);
-  inputs.push_back(function_reference);
-
-  // Stack parameters: CallApiCallbackStub expects the first one to be the
-  // receiver.
-  inputs.push_back(receiver);
-  for (Node* node : *stack_parameters) {
-    inputs.push_back(node);
+  // Add CallApiCallbackStub's register argument as well.
+  Node* inputs[11] = {
+      code, target, data, receiver /* holder */, function_reference, receiver};
+  int index = 6 + argc;
+  inputs[index++] = context;
+  inputs[index++] = frame_state;
+  inputs[index++] = effect;
+  inputs[index++] = control;
+  // This needs to stay here because of the edge case described in
+  // http://crbug.com/675648.
+  if (value != nullptr) {
+    inputs[6] = value;
   }
-  inputs.push_back(context);
-  inputs.push_back(frame_state);
-  inputs.push_back(effect);
-  inputs.push_back(control);
 
   Node* effect0;
   Node* value0 = effect0 =
-      graph()->NewNode(common()->Call(call_descriptor),
-                       static_cast<int>(inputs.size()), inputs.data());
+      graph()->NewNode(common()->Call(call_descriptor), index, inputs);
   Node* control0 = graph()->NewNode(common()->IfSuccess(), value0);
   return ValueEffectControl(value0, effect0, control0);
 }
 
+Node* JSNativeContextSpecialization::BuildCheckHeapObject(Node* receiver,
+                                                          Node** effect,
+                                                          Node* control) {
+  switch (receiver->opcode()) {
+    case IrOpcode::kHeapConstant:
+    case IrOpcode::kJSCreate:
+    case IrOpcode::kJSCreateArguments:
+    case IrOpcode::kJSCreateArray:
+    case IrOpcode::kJSCreateClosure:
+    case IrOpcode::kJSCreateIterResultObject:
+    case IrOpcode::kJSCreateLiteralArray:
+    case IrOpcode::kJSCreateLiteralObject:
+    case IrOpcode::kJSCreateLiteralRegExp:
+    case IrOpcode::kJSConvertReceiver:
+    case IrOpcode::kJSToName:
+    case IrOpcode::kJSToString:
+    case IrOpcode::kJSToObject:
+    case IrOpcode::kJSTypeOf: {
+      return receiver;
+    }
+    default: {
+      return *effect = graph()->NewNode(simplified()->CheckHeapObject(),
+                                        receiver, *effect, control);
+    }
+  }
+}
+
 Node* JSNativeContextSpecialization::BuildCheckMaps(
     Node* receiver, Node* effect, Node* control,
-    std::vector<Handle<Map>> const& maps) {
+    std::vector<Handle<Map>> const& receiver_maps) {
   HeapObjectMatcher m(receiver);
   if (m.HasValue()) {
     Handle<Map> receiver_map(m.Value()->map(), isolate());
     if (receiver_map->is_stable()) {
-      for (Handle<Map> map : maps) {
+      for (Handle<Map> map : receiver_maps) {
         if (map.is_identical_to(receiver_map)) {
           dependencies()->AssumeMapStable(receiver_map);
           return effect;
@@ -1572,17 +2166,16 @@
       }
     }
   }
-  int const map_input_count = static_cast<int>(maps.size());
-  int const input_count = 1 + map_input_count + 1 + 1;
-  Node** inputs = zone()->NewArray<Node*>(input_count);
-  inputs[0] = receiver;
-  for (int i = 0; i < map_input_count; ++i) {
-    inputs[1 + i] = jsgraph()->HeapConstant(maps[i]);
+  ZoneHandleSet<Map> maps;
+  CheckMapsFlags flags = CheckMapsFlag::kNone;
+  for (Handle<Map> map : receiver_maps) {
+    maps.insert(map, graph()->zone());
+    if (map->is_migration_target()) {
+      flags |= CheckMapsFlag::kTryMigrateInstance;
+    }
   }
-  inputs[input_count - 2] = effect;
-  inputs[input_count - 1] = control;
-  return graph()->NewNode(simplified()->CheckMaps(map_input_count), input_count,
-                          inputs);
+  return graph()->NewNode(simplified()->CheckMaps(flags, maps), receiver,
+                          effect, control);
 }
 
 void JSNativeContextSpecialization::AssumePrototypesStable(
@@ -1640,15 +2233,14 @@
     MapHandleList* receiver_maps) {
   DCHECK_EQ(0, receiver_maps->length());
   // See if we can infer a concrete type for the {receiver}.
-  Handle<Map> receiver_map;
-  if (InferReceiverMap(receiver, effect).ToHandle(&receiver_map)) {
-    // We can assume that the {receiver} still has the infered {receiver_map}.
-    receiver_maps->Add(receiver_map);
+  if (InferReceiverMaps(receiver, effect, receiver_maps)) {
+    // We can assume that the {receiver} still has the infered {receiver_maps}.
     return true;
   }
   // Try to extract some maps from the {nexus}.
   if (nexus.ExtractMaps(receiver_maps) != 0) {
     // Try to filter impossible candidates based on infered root map.
+    Handle<Map> receiver_map;
     if (InferReceiverRootMap(receiver).ToHandle(&receiver_map)) {
       for (int i = receiver_maps->length(); --i >= 0;) {
         if (receiver_maps->at(i)->FindRootMap() != *receiver_map) {
@@ -1661,38 +2253,28 @@
   return false;
 }
 
-MaybeHandle<Map> JSNativeContextSpecialization::InferReceiverMap(Node* receiver,
-                                                                 Node* effect) {
-  HeapObjectMatcher m(receiver);
-  if (m.HasValue()) {
-    Handle<Map> receiver_map(m.Value()->map(), isolate());
-    if (receiver_map->is_stable()) return receiver_map;
-  } else if (m.IsJSCreate()) {
-    HeapObjectMatcher mtarget(m.InputAt(0));
-    HeapObjectMatcher mnewtarget(m.InputAt(1));
-    if (mtarget.HasValue() && mnewtarget.HasValue()) {
-      Handle<JSFunction> constructor =
-          Handle<JSFunction>::cast(mtarget.Value());
-      if (constructor->has_initial_map()) {
-        Handle<Map> initial_map(constructor->initial_map(), isolate());
-        if (initial_map->constructor_or_backpointer() == *mnewtarget.Value()) {
-          // Walk up the {effect} chain to see if the {receiver} is the
-          // dominating effect and there's no other observable write in
-          // between.
-          while (true) {
-            if (receiver == effect) return initial_map;
-            if (!effect->op()->HasProperty(Operator::kNoWrite) ||
-                effect->op()->EffectInputCount() != 1) {
-              break;
-            }
-            effect = NodeProperties::GetEffectInput(effect);
-          }
-        }
-      }
+bool JSNativeContextSpecialization::InferReceiverMaps(
+    Node* receiver, Node* effect, MapHandleList* receiver_maps) {
+  ZoneHandleSet<Map> maps;
+  NodeProperties::InferReceiverMapsResult result =
+      NodeProperties::InferReceiverMaps(receiver, effect, &maps);
+  if (result == NodeProperties::kReliableReceiverMaps) {
+    for (size_t i = 0; i < maps.size(); ++i) {
+      receiver_maps->Add(maps[i]);
     }
+    return true;
+  } else if (result == NodeProperties::kUnreliableReceiverMaps) {
+    // For untrusted receiver maps, we can still use the information
+    // if the maps are stable.
+    for (size_t i = 0; i < maps.size(); ++i) {
+      if (!maps[i]->is_stable()) return false;
+    }
+    for (size_t i = 0; i < maps.size(); ++i) {
+      receiver_maps->Add(maps[i]);
+    }
+    return true;
   }
-  // TODO(turbofan): Go hunting for CheckMaps(receiver) in the effect chain?
-  return MaybeHandle<Map>();
+  return false;
 }
 
 MaybeHandle<Map> JSNativeContextSpecialization::InferReceiverRootMap(
@@ -1718,6 +2300,24 @@
   return MaybeHandle<Map>();
 }
 
+bool JSNativeContextSpecialization::LookupInScriptContextTable(
+    Handle<Name> name, ScriptContextTableLookupResult* result) {
+  if (!name->IsString()) return false;
+  Handle<ScriptContextTable> script_context_table(
+      global_object()->native_context()->script_context_table(), isolate());
+  ScriptContextTable::LookupResult lookup_result;
+  if (!ScriptContextTable::Lookup(script_context_table,
+                                  Handle<String>::cast(name), &lookup_result)) {
+    return false;
+  }
+  Handle<Context> script_context = ScriptContextTable::GetContext(
+      script_context_table, lookup_result.context_index);
+  result->context = script_context;
+  result->immutable = lookup_result.mode == CONST;
+  result->index = lookup_result.slot_index;
+  return true;
+}
+
 Graph* JSNativeContextSpecialization::graph() const {
   return jsgraph()->graph();
 }
diff --git a/src/compiler/js-native-context-specialization.h b/src/compiler/js-native-context-specialization.h
index 2d07061..249c52d 100644
--- a/src/compiler/js-native-context-specialization.h
+++ b/src/compiler/js-native-context-specialization.h
@@ -8,7 +8,7 @@
 #include "src/base/flags.h"
 #include "src/compiler/graph-reducer.h"
 #include "src/deoptimize-reason.h"
-#include "src/type-feedback-vector.h"
+#include "src/feedback-vector.h"
 
 namespace v8 {
 namespace internal {
@@ -53,12 +53,19 @@
   Reduction Reduce(Node* node) final;
 
  private:
+  Reduction ReduceJSAdd(Node* node);
+  Reduction ReduceJSGetSuperConstructor(Node* node);
   Reduction ReduceJSInstanceOf(Node* node);
+  Reduction ReduceJSOrdinaryHasInstance(Node* node);
   Reduction ReduceJSLoadContext(Node* node);
+  Reduction ReduceJSLoadGlobal(Node* node);
+  Reduction ReduceJSStoreGlobal(Node* node);
   Reduction ReduceJSLoadNamed(Node* node);
   Reduction ReduceJSStoreNamed(Node* node);
   Reduction ReduceJSLoadProperty(Node* node);
   Reduction ReduceJSStoreProperty(Node* node);
+  Reduction ReduceJSStoreNamedOwn(Node* node);
+  Reduction ReduceJSStoreDataPropertyInLiteral(Node* node);
 
   Reduction ReduceElementAccess(Node* node, Node* index, Node* value,
                                 MapHandleList const& receiver_maps,
@@ -79,8 +86,11 @@
                               MapHandleList const& receiver_maps,
                               Handle<Name> name, AccessMode access_mode,
                               LanguageMode language_mode,
-                              Handle<TypeFeedbackVector> vector,
-                              FeedbackVectorSlot slot, Node* index = nullptr);
+                              Handle<FeedbackVector> vector, FeedbackSlot slot,
+                              Node* index = nullptr);
+  Reduction ReduceGlobalAccess(Node* node, Node* receiver, Node* value,
+                               Handle<Name> name, AccessMode access_mode,
+                               Node* index = nullptr);
 
   Reduction ReduceSoftDeoptimize(Node* node, DeoptimizeReason reason);
 
@@ -105,8 +115,8 @@
       Node* receiver, Node* value, Node* context, Node* frame_state,
       Node* effect, Node* control, Handle<Name> name,
       PropertyAccessInfo const& access_info, AccessMode access_mode,
-      LanguageMode language_mode, Handle<TypeFeedbackVector> vector,
-      FeedbackVectorSlot slot);
+      LanguageMode language_mode, Handle<FeedbackVector> vector,
+      FeedbackSlot slot);
 
   // Construct the appropriate subgraph for element access.
   ValueEffectControl BuildElementAccess(Node* receiver, Node* index,
@@ -116,6 +126,9 @@
                                         AccessMode access_mode,
                                         KeyedAccessStoreMode store_mode);
 
+  // Construct an appropriate heap object check.
+  Node* BuildCheckHeapObject(Node* receiver, Node** effect, Node* control);
+
   // Construct an appropriate map check.
   Node* BuildCheckMaps(Node* receiver, Node* effect, Node* control,
                        std::vector<Handle<Map>> const& maps);
@@ -136,20 +149,27 @@
                            FeedbackNexus const& nexus,
                            MapHandleList* receiver_maps);
 
-  // Try to infer a map for the given {receiver} at the current {effect}.
-  // If a map is returned then you can be sure that the {receiver} definitely
-  // has the returned map at this point in the program (identified by {effect}).
-  MaybeHandle<Map> InferReceiverMap(Node* receiver, Node* effect);
+  // Try to infer maps for the given {receiver} at the current {effect}.
+  // If maps are returned then you can be sure that the {receiver} definitely
+  // has one of the returned maps at this point in the program (identified
+  // by {effect}).
+  bool InferReceiverMaps(Node* receiver, Node* effect,
+                         MapHandleList* receiver_maps);
   // Try to infer a root map for the {receiver} independent of the current
   // program location.
   MaybeHandle<Map> InferReceiverRootMap(Node* receiver);
 
   ValueEffectControl InlineApiCall(
       Node* receiver, Node* context, Node* target, Node* frame_state,
-      ZoneVector<Node*>* stack_parameters, Node* effect, Node* control,
+      Node* parameter, Node* effect, Node* control,
       Handle<SharedFunctionInfo> shared_info,
       Handle<FunctionTemplateInfo> function_template_info);
 
+  // Script context lookup logic.
+  struct ScriptContextTableLookupResult;
+  bool LookupInScriptContextTable(Handle<Name> name,
+                                  ScriptContextTableLookupResult* result);
+
   Graph* graph() const;
   JSGraph* jsgraph() const { return jsgraph_; }
   Isolate* isolate() const;
@@ -159,12 +179,16 @@
   SimplifiedOperatorBuilder* simplified() const;
   MachineOperatorBuilder* machine() const;
   Flags flags() const { return flags_; }
+  Handle<JSGlobalObject> global_object() const { return global_object_; }
+  Handle<JSGlobalProxy> global_proxy() const { return global_proxy_; }
   Handle<Context> native_context() const { return native_context_; }
   CompilationDependencies* dependencies() const { return dependencies_; }
   Zone* zone() const { return zone_; }
 
   JSGraph* const jsgraph_;
   Flags const flags_;
+  Handle<JSGlobalObject> global_object_;
+  Handle<JSGlobalProxy> global_proxy_;
   Handle<Context> native_context_;
   CompilationDependencies* const dependencies_;
   Zone* const zone_;
diff --git a/src/compiler/js-operator.cc b/src/compiler/js-operator.cc
index f64630c..a8f5692 100644
--- a/src/compiler/js-operator.cc
+++ b/src/compiler/js-operator.cc
@@ -9,8 +9,9 @@
 #include "src/base/lazy-instance.h"
 #include "src/compiler/opcodes.h"
 #include "src/compiler/operator.h"
+#include "src/feedback-vector.h"
 #include "src/handles-inl.h"
-#include "src/type-feedback-vector.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -20,7 +21,7 @@
 
 
 int VectorSlotPair::index() const {
-  return vector_.is_null() ? -1 : vector_->GetIndex(slot_);
+  return vector_.is_null() ? -1 : FeedbackVector::GetIndex(slot_);
 }
 
 
@@ -51,48 +52,99 @@
   return OpParameter<ToBooleanHints>(op);
 }
 
-
-bool operator==(CallConstructParameters const& lhs,
-                CallConstructParameters const& rhs) {
+bool operator==(ConstructParameters const& lhs,
+                ConstructParameters const& rhs) {
   return lhs.arity() == rhs.arity() && lhs.frequency() == rhs.frequency() &&
          lhs.feedback() == rhs.feedback();
 }
 
-
-bool operator!=(CallConstructParameters const& lhs,
-                CallConstructParameters const& rhs) {
+bool operator!=(ConstructParameters const& lhs,
+                ConstructParameters const& rhs) {
   return !(lhs == rhs);
 }
 
-
-size_t hash_value(CallConstructParameters const& p) {
+size_t hash_value(ConstructParameters const& p) {
   return base::hash_combine(p.arity(), p.frequency(), p.feedback());
 }
 
-
-std::ostream& operator<<(std::ostream& os, CallConstructParameters const& p) {
+std::ostream& operator<<(std::ostream& os, ConstructParameters const& p) {
   return os << p.arity() << ", " << p.frequency();
 }
 
-
-CallConstructParameters const& CallConstructParametersOf(Operator const* op) {
-  DCHECK_EQ(IrOpcode::kJSCallConstruct, op->opcode());
-  return OpParameter<CallConstructParameters>(op);
+ConstructParameters const& ConstructParametersOf(Operator const* op) {
+  DCHECK_EQ(IrOpcode::kJSConstruct, op->opcode());
+  return OpParameter<ConstructParameters>(op);
 }
 
+bool operator==(ConstructWithSpreadParameters const& lhs,
+                ConstructWithSpreadParameters const& rhs) {
+  return lhs.arity() == rhs.arity();
+}
 
-std::ostream& operator<<(std::ostream& os, CallFunctionParameters const& p) {
+bool operator!=(ConstructWithSpreadParameters const& lhs,
+                ConstructWithSpreadParameters const& rhs) {
+  return !(lhs == rhs);
+}
+
+size_t hash_value(ConstructWithSpreadParameters const& p) {
+  return base::hash_combine(p.arity());
+}
+
+std::ostream& operator<<(std::ostream& os,
+                         ConstructWithSpreadParameters const& p) {
+  return os << p.arity();
+}
+
+ConstructWithSpreadParameters const& ConstructWithSpreadParametersOf(
+    Operator const* op) {
+  DCHECK_EQ(IrOpcode::kJSConstructWithSpread, op->opcode());
+  return OpParameter<ConstructWithSpreadParameters>(op);
+}
+
+std::ostream& operator<<(std::ostream& os, CallParameters const& p) {
   os << p.arity() << ", " << p.frequency() << ", " << p.convert_mode() << ", "
      << p.tail_call_mode();
   return os;
 }
 
-
-const CallFunctionParameters& CallFunctionParametersOf(const Operator* op) {
-  DCHECK_EQ(IrOpcode::kJSCallFunction, op->opcode());
-  return OpParameter<CallFunctionParameters>(op);
+const CallParameters& CallParametersOf(const Operator* op) {
+  DCHECK_EQ(IrOpcode::kJSCall, op->opcode());
+  return OpParameter<CallParameters>(op);
 }
 
+std::ostream& operator<<(std::ostream& os,
+                         CallForwardVarargsParameters const& p) {
+  return os << p.start_index() << ", " << p.tail_call_mode();
+}
+
+CallForwardVarargsParameters const& CallForwardVarargsParametersOf(
+    Operator const* op) {
+  DCHECK_EQ(IrOpcode::kJSCallForwardVarargs, op->opcode());
+  return OpParameter<CallForwardVarargsParameters>(op);
+}
+
+bool operator==(CallWithSpreadParameters const& lhs,
+                CallWithSpreadParameters const& rhs) {
+  return lhs.arity() == rhs.arity();
+}
+
+bool operator!=(CallWithSpreadParameters const& lhs,
+                CallWithSpreadParameters const& rhs) {
+  return !(lhs == rhs);
+}
+
+size_t hash_value(CallWithSpreadParameters const& p) {
+  return base::hash_combine(p.arity());
+}
+
+std::ostream& operator<<(std::ostream& os, CallWithSpreadParameters const& p) {
+  return os << p.arity();
+}
+
+CallWithSpreadParameters const& CallWithSpreadParametersOf(Operator const* op) {
+  DCHECK_EQ(IrOpcode::kJSCallWithSpread, op->opcode());
+  return OpParameter<CallWithSpreadParameters>(op);
+}
 
 bool operator==(CallRuntimeParameters const& lhs,
                 CallRuntimeParameters const& rhs) {
@@ -191,6 +243,84 @@
   return OpParameter<CreateCatchContextParameters>(op);
 }
 
+CreateFunctionContextParameters::CreateFunctionContextParameters(
+    int slot_count, ScopeType scope_type)
+    : slot_count_(slot_count), scope_type_(scope_type) {}
+
+bool operator==(CreateFunctionContextParameters const& lhs,
+                CreateFunctionContextParameters const& rhs) {
+  return lhs.slot_count() == rhs.slot_count() &&
+         lhs.scope_type() == rhs.scope_type();
+}
+
+bool operator!=(CreateFunctionContextParameters const& lhs,
+                CreateFunctionContextParameters const& rhs) {
+  return !(lhs == rhs);
+}
+
+size_t hash_value(CreateFunctionContextParameters const& parameters) {
+  return base::hash_combine(parameters.slot_count(),
+                            static_cast<int>(parameters.scope_type()));
+}
+
+std::ostream& operator<<(std::ostream& os,
+                         CreateFunctionContextParameters const& parameters) {
+  return os << parameters.slot_count() << ", " << parameters.scope_type();
+}
+
+CreateFunctionContextParameters const& CreateFunctionContextParametersOf(
+    Operator const* op) {
+  DCHECK_EQ(IrOpcode::kJSCreateFunctionContext, op->opcode());
+  return OpParameter<CreateFunctionContextParameters>(op);
+}
+
+bool operator==(StoreNamedOwnParameters const& lhs,
+                StoreNamedOwnParameters const& rhs) {
+  return lhs.name().location() == rhs.name().location() &&
+         lhs.feedback() == rhs.feedback();
+}
+
+bool operator!=(StoreNamedOwnParameters const& lhs,
+                StoreNamedOwnParameters const& rhs) {
+  return !(lhs == rhs);
+}
+
+size_t hash_value(StoreNamedOwnParameters const& p) {
+  return base::hash_combine(p.name().location(), p.feedback());
+}
+
+std::ostream& operator<<(std::ostream& os, StoreNamedOwnParameters const& p) {
+  return os << Brief(*p.name());
+}
+
+StoreNamedOwnParameters const& StoreNamedOwnParametersOf(const Operator* op) {
+  DCHECK_EQ(IrOpcode::kJSStoreNamedOwn, op->opcode());
+  return OpParameter<StoreNamedOwnParameters>(op);
+}
+
+bool operator==(DataPropertyParameters const& lhs,
+                DataPropertyParameters const& rhs) {
+  return lhs.feedback() == rhs.feedback();
+}
+
+bool operator!=(DataPropertyParameters const& lhs,
+                DataPropertyParameters const& rhs) {
+  return !(lhs == rhs);
+}
+
+size_t hash_value(DataPropertyParameters const& p) {
+  return base::hash_combine(p.feedback());
+}
+
+std::ostream& operator<<(std::ostream& os, DataPropertyParameters const& p) {
+  return os;
+}
+
+DataPropertyParameters const& DataPropertyParametersOf(const Operator* op) {
+  DCHECK(op->opcode() == IrOpcode::kJSStoreDataPropertyInLiteral);
+  return OpParameter<DataPropertyParameters>(op);
+}
+
 bool operator==(NamedAccess const& lhs, NamedAccess const& rhs) {
   return lhs.name().location() == rhs.name().location() &&
          lhs.language_mode() == rhs.language_mode() &&
@@ -350,6 +480,7 @@
 bool operator==(CreateClosureParameters const& lhs,
                 CreateClosureParameters const& rhs) {
   return lhs.pretenure() == rhs.pretenure() &&
+         lhs.feedback() == rhs.feedback() &&
          lhs.shared_info().location() == rhs.shared_info().location();
 }
 
@@ -361,7 +492,8 @@
 
 
 size_t hash_value(CreateClosureParameters const& p) {
-  return base::hash_combine(p.pretenure(), p.shared_info().location());
+  return base::hash_combine(p.pretenure(), p.shared_info().location(),
+                            p.feedback());
 }
 
 
@@ -410,17 +542,7 @@
 }
 
 BinaryOperationHint BinaryOperationHintOf(const Operator* op) {
-  DCHECK(op->opcode() == IrOpcode::kJSBitwiseOr ||
-         op->opcode() == IrOpcode::kJSBitwiseXor ||
-         op->opcode() == IrOpcode::kJSBitwiseAnd ||
-         op->opcode() == IrOpcode::kJSShiftLeft ||
-         op->opcode() == IrOpcode::kJSShiftRight ||
-         op->opcode() == IrOpcode::kJSShiftRightLogical ||
-         op->opcode() == IrOpcode::kJSAdd ||
-         op->opcode() == IrOpcode::kJSSubtract ||
-         op->opcode() == IrOpcode::kJSMultiply ||
-         op->opcode() == IrOpcode::kJSDivide ||
-         op->opcode() == IrOpcode::kJSModulus);
+  DCHECK_EQ(IrOpcode::kJSAdd, op->opcode());
   return OpParameter<BinaryOperationHint>(op);
 }
 
@@ -436,39 +558,41 @@
   return OpParameter<CompareOperationHint>(op);
 }
 
-#define CACHED_OP_LIST(V)                                   \
-  V(ToInteger, Operator::kNoProperties, 1, 1)               \
-  V(ToLength, Operator::kNoProperties, 1, 1)                \
-  V(ToName, Operator::kNoProperties, 1, 1)                  \
-  V(ToNumber, Operator::kNoProperties, 1, 1)                \
-  V(ToObject, Operator::kFoldable, 1, 1)                    \
-  V(ToString, Operator::kNoProperties, 1, 1)                \
-  V(Create, Operator::kEliminatable, 2, 1)                  \
-  V(CreateIterResultObject, Operator::kEliminatable, 2, 1)  \
-  V(CreateKeyValueArray, Operator::kEliminatable, 2, 1)     \
-  V(HasProperty, Operator::kNoProperties, 2, 1)             \
-  V(TypeOf, Operator::kPure, 1, 1)                          \
-  V(InstanceOf, Operator::kNoProperties, 2, 1)              \
-  V(OrdinaryHasInstance, Operator::kNoProperties, 2, 1)     \
-  V(ForInNext, Operator::kNoProperties, 4, 1)               \
-  V(ForInPrepare, Operator::kNoProperties, 1, 3)            \
-  V(LoadMessage, Operator::kNoThrow, 0, 1)                  \
-  V(StoreMessage, Operator::kNoThrow, 1, 0)                 \
-  V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1) \
-  V(StackCheck, Operator::kNoWrite, 0, 0)
+#define CACHED_OP_LIST(V)                                       \
+  V(BitwiseOr, Operator::kNoProperties, 2, 1)                   \
+  V(BitwiseXor, Operator::kNoProperties, 2, 1)                  \
+  V(BitwiseAnd, Operator::kNoProperties, 2, 1)                  \
+  V(ShiftLeft, Operator::kNoProperties, 2, 1)                   \
+  V(ShiftRight, Operator::kNoProperties, 2, 1)                  \
+  V(ShiftRightLogical, Operator::kNoProperties, 2, 1)           \
+  V(Subtract, Operator::kNoProperties, 2, 1)                    \
+  V(Multiply, Operator::kNoProperties, 2, 1)                    \
+  V(Divide, Operator::kNoProperties, 2, 1)                      \
+  V(Modulus, Operator::kNoProperties, 2, 1)                     \
+  V(ToInteger, Operator::kNoProperties, 1, 1)                   \
+  V(ToLength, Operator::kNoProperties, 1, 1)                    \
+  V(ToName, Operator::kNoProperties, 1, 1)                      \
+  V(ToNumber, Operator::kNoProperties, 1, 1)                    \
+  V(ToObject, Operator::kFoldable, 1, 1)                        \
+  V(ToString, Operator::kNoProperties, 1, 1)                    \
+  V(Create, Operator::kNoProperties, 2, 1)                      \
+  V(CreateIterResultObject, Operator::kEliminatable, 2, 1)      \
+  V(CreateKeyValueArray, Operator::kEliminatable, 2, 1)         \
+  V(HasProperty, Operator::kNoProperties, 2, 1)                 \
+  V(ClassOf, Operator::kPure, 1, 1)                             \
+  V(TypeOf, Operator::kPure, 1, 1)                              \
+  V(InstanceOf, Operator::kNoProperties, 2, 1)                  \
+  V(OrdinaryHasInstance, Operator::kNoProperties, 2, 1)         \
+  V(ForInNext, Operator::kNoProperties, 4, 1)                   \
+  V(ForInPrepare, Operator::kNoProperties, 1, 3)                \
+  V(LoadMessage, Operator::kNoThrow | Operator::kNoWrite, 0, 1) \
+  V(StoreMessage, Operator::kNoRead | Operator::kNoThrow, 1, 0) \
+  V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1)     \
+  V(StackCheck, Operator::kNoWrite, 0, 0)                       \
+  V(Debugger, Operator::kNoProperties, 0, 0)                    \
+  V(GetSuperConstructor, Operator::kNoWrite, 1, 1)
 
-#define BINARY_OP_LIST(V) \
-  V(BitwiseOr)            \
-  V(BitwiseXor)           \
-  V(BitwiseAnd)           \
-  V(ShiftLeft)            \
-  V(ShiftRight)           \
-  V(ShiftRightLogical)    \
-  V(Add)                  \
-  V(Subtract)             \
-  V(Multiply)             \
-  V(Divide)               \
-  V(Modulus)
+#define BINARY_OP_LIST(V) V(Add)
 
 #define COMPARE_OP_LIST(V)                    \
   V(Equal, Operator::kNoProperties)           \
@@ -513,20 +637,24 @@
   BINARY_OP_LIST(BINARY_OP)
 #undef BINARY_OP
 
-#define COMPARE_OP(Name, properties)                                      \
-  template <CompareOperationHint kHint>                                   \
-  struct Name##Operator final : public Operator1<CompareOperationHint> {  \
-    Name##Operator()                                                      \
-        : Operator1<CompareOperationHint>(                                \
-              IrOpcode::kJS##Name, properties, "JS" #Name, 2, 1, 1, 1, 1, \
-              Operator::ZeroIfNoThrow(properties), kHint) {}              \
-  };                                                                      \
-  Name##Operator<CompareOperationHint::kNone> k##Name##NoneOperator;      \
-  Name##Operator<CompareOperationHint::kSignedSmall>                      \
-      k##Name##SignedSmallOperator;                                       \
-  Name##Operator<CompareOperationHint::kNumber> k##Name##NumberOperator;  \
-  Name##Operator<CompareOperationHint::kNumberOrOddball>                  \
-      k##Name##NumberOrOddballOperator;                                   \
+#define COMPARE_OP(Name, properties)                                         \
+  template <CompareOperationHint kHint>                                      \
+  struct Name##Operator final : public Operator1<CompareOperationHint> {     \
+    Name##Operator()                                                         \
+        : Operator1<CompareOperationHint>(                                   \
+              IrOpcode::kJS##Name, properties, "JS" #Name, 2, 1, 1, 1, 1,    \
+              Operator::ZeroIfNoThrow(properties), kHint) {}                 \
+  };                                                                         \
+  Name##Operator<CompareOperationHint::kNone> k##Name##NoneOperator;         \
+  Name##Operator<CompareOperationHint::kSignedSmall>                         \
+      k##Name##SignedSmallOperator;                                          \
+  Name##Operator<CompareOperationHint::kNumber> k##Name##NumberOperator;     \
+  Name##Operator<CompareOperationHint::kNumberOrOddball>                     \
+      k##Name##NumberOrOddballOperator;                                      \
+  Name##Operator<CompareOperationHint::kInternalizedString>                  \
+      k##Name##InternalizedStringOperator;                                   \
+  Name##Operator<CompareOperationHint::kString> k##Name##StringOperator;     \
+  Name##Operator<CompareOperationHint::kReceiver> k##Name##ReceiverOperator; \
   Name##Operator<CompareOperationHint::kAny> k##Name##AnyOperator;
   COMPARE_OP_LIST(COMPARE_OP)
 #undef COMPARE_OP
@@ -578,6 +706,12 @@
         return &cache_.k##Name##NumberOperator;                        \
       case CompareOperationHint::kNumberOrOddball:                     \
         return &cache_.k##Name##NumberOrOddballOperator;               \
+      case CompareOperationHint::kInternalizedString:                  \
+        return &cache_.k##Name##InternalizedStringOperator;            \
+      case CompareOperationHint::kString:                              \
+        return &cache_.k##Name##StringOperator;                        \
+      case CompareOperationHint::kReceiver:                            \
+        return &cache_.k##Name##ReceiverOperator;                      \
       case CompareOperationHint::kAny:                                 \
         return &cache_.k##Name##AnyOperator;                           \
     }                                                                  \
@@ -587,6 +721,17 @@
 COMPARE_OP_LIST(COMPARE_OP)
 #undef COMPARE_OP
 
+const Operator* JSOperatorBuilder::StoreDataPropertyInLiteral(
+    const VectorSlotPair& feedback) {
+  DataPropertyParameters parameters(feedback);
+  return new (zone()) Operator1<DataPropertyParameters>(  // --
+      IrOpcode::kJSStoreDataPropertyInLiteral,
+      Operator::kNoThrow,              // opcode
+      "JSStoreDataPropertyInLiteral",  // name
+      4, 1, 1, 0, 1, 0,                // counts
+      parameters);                     // parameter
+}
+
 const Operator* JSOperatorBuilder::ToBoolean(ToBooleanHints hints) {
   // TODO(turbofan): Cache most important versions of this operator.
   return new (zone()) Operator1<ToBooleanHints>(  //--
@@ -596,18 +741,37 @@
       hints);                                     // parameter
 }
 
-const Operator* JSOperatorBuilder::CallFunction(
-    size_t arity, float frequency, VectorSlotPair const& feedback,
-    ConvertReceiverMode convert_mode, TailCallMode tail_call_mode) {
-  CallFunctionParameters parameters(arity, frequency, feedback, tail_call_mode,
-                                    convert_mode);
-  return new (zone()) Operator1<CallFunctionParameters>(   // --
-      IrOpcode::kJSCallFunction, Operator::kNoProperties,  // opcode
-      "JSCallFunction",                                    // name
-      parameters.arity(), 1, 1, 1, 1, 2,                   // inputs/outputs
-      parameters);                                         // parameter
+const Operator* JSOperatorBuilder::CallForwardVarargs(
+    uint32_t start_index, TailCallMode tail_call_mode) {
+  CallForwardVarargsParameters parameters(start_index, tail_call_mode);
+  return new (zone()) Operator1<CallForwardVarargsParameters>(   // --
+      IrOpcode::kJSCallForwardVarargs, Operator::kNoProperties,  // opcode
+      "JSCallForwardVarargs",                                    // name
+      2, 1, 1, 1, 1, 2,                                          // counts
+      parameters);                                               // parameter
 }
 
+const Operator* JSOperatorBuilder::Call(size_t arity, float frequency,
+                                        VectorSlotPair const& feedback,
+                                        ConvertReceiverMode convert_mode,
+                                        TailCallMode tail_call_mode) {
+  CallParameters parameters(arity, frequency, feedback, tail_call_mode,
+                            convert_mode);
+  return new (zone()) Operator1<CallParameters>(   // --
+      IrOpcode::kJSCall, Operator::kNoProperties,  // opcode
+      "JSCall",                                    // name
+      parameters.arity(), 1, 1, 1, 1, 2,           // inputs/outputs
+      parameters);                                 // parameter
+}
+
+const Operator* JSOperatorBuilder::CallWithSpread(uint32_t arity) {
+  CallWithSpreadParameters parameters(arity);
+  return new (zone()) Operator1<CallWithSpreadParameters>(   // --
+      IrOpcode::kJSCallWithSpread, Operator::kNoProperties,  // opcode
+      "JSCallWithSpread",                                    // name
+      parameters.arity(), 1, 1, 1, 1, 2,                     // counts
+      parameters);                                           // parameter
+}
 
 const Operator* JSOperatorBuilder::CallRuntime(Runtime::FunctionId id) {
   const Runtime::Function* f = Runtime::FunctionForId(id);
@@ -633,16 +797,24 @@
       parameters);                                        // parameter
 }
 
-const Operator* JSOperatorBuilder::CallConstruct(
-    uint32_t arity, float frequency, VectorSlotPair const& feedback) {
-  CallConstructParameters parameters(arity, frequency, feedback);
-  return new (zone()) Operator1<CallConstructParameters>(   // --
-      IrOpcode::kJSCallConstruct, Operator::kNoProperties,  // opcode
-      "JSCallConstruct",                                    // name
-      parameters.arity(), 1, 1, 1, 1, 2,                    // counts
-      parameters);                                          // parameter
+const Operator* JSOperatorBuilder::Construct(uint32_t arity, float frequency,
+                                             VectorSlotPair const& feedback) {
+  ConstructParameters parameters(arity, frequency, feedback);
+  return new (zone()) Operator1<ConstructParameters>(   // --
+      IrOpcode::kJSConstruct, Operator::kNoProperties,  // opcode
+      "JSConstruct",                                    // name
+      parameters.arity(), 1, 1, 1, 1, 2,                // counts
+      parameters);                                      // parameter
 }
 
+const Operator* JSOperatorBuilder::ConstructWithSpread(uint32_t arity) {
+  ConstructWithSpreadParameters parameters(arity);
+  return new (zone()) Operator1<ConstructWithSpreadParameters>(   // --
+      IrOpcode::kJSConstructWithSpread, Operator::kNoProperties,  // opcode
+      "JSConstructWithSpread",                                    // name
+      parameters.arity(), 1, 1, 1, 1, 2,                          // counts
+      parameters);                                                // parameter
+}
 
 const Operator* JSOperatorBuilder::ConvertReceiver(
     ConvertReceiverMode convert_mode) {
@@ -659,7 +831,7 @@
   return new (zone()) Operator1<NamedAccess>(           // --
       IrOpcode::kJSLoadNamed, Operator::kNoProperties,  // opcode
       "JSLoadNamed",                                    // name
-      2, 1, 1, 1, 1, 2,                                 // counts
+      1, 1, 1, 1, 1, 2,                                 // counts
       access);                                          // parameter
 }
 
@@ -669,7 +841,7 @@
   return new (zone()) Operator1<PropertyAccess>(           // --
       IrOpcode::kJSLoadProperty, Operator::kNoProperties,  // opcode
       "JSLoadProperty",                                    // name
-      3, 1, 1, 1, 1, 2,                                    // counts
+      2, 1, 1, 1, 1, 2,                                    // counts
       access);                                             // parameter
 }
 
@@ -696,7 +868,7 @@
   return new (zone()) Operator1<NamedAccess>(            // --
       IrOpcode::kJSStoreNamed, Operator::kNoProperties,  // opcode
       "JSStoreNamed",                                    // name
-      3, 1, 1, 0, 1, 2,                                  // counts
+      2, 1, 1, 0, 1, 2,                                  // counts
       access);                                           // parameter
 }
 
@@ -707,10 +879,19 @@
   return new (zone()) Operator1<PropertyAccess>(            // --
       IrOpcode::kJSStoreProperty, Operator::kNoProperties,  // opcode
       "JSStoreProperty",                                    // name
-      4, 1, 1, 0, 1, 2,                                     // counts
+      3, 1, 1, 0, 1, 2,                                     // counts
       access);                                              // parameter
 }
 
+const Operator* JSOperatorBuilder::StoreNamedOwn(
+    Handle<Name> name, VectorSlotPair const& feedback) {
+  StoreNamedOwnParameters parameters(name, feedback);
+  return new (zone()) Operator1<StoreNamedOwnParameters>(   // --
+      IrOpcode::kJSStoreNamedOwn, Operator::kNoProperties,  // opcode
+      "JSStoreNamedOwn",                                    // name
+      2, 1, 1, 0, 1, 2,                                     // counts
+      parameters);                                          // parameter
+}
 
 const Operator* JSOperatorBuilder::DeleteProperty(LanguageMode language_mode) {
   return new (zone()) Operator1<LanguageMode>(               // --
@@ -728,7 +909,7 @@
   return new (zone()) Operator1<LoadGlobalParameters>(   // --
       IrOpcode::kJSLoadGlobal, Operator::kNoProperties,  // opcode
       "JSLoadGlobal",                                    // name
-      1, 1, 1, 1, 1, 2,                                  // counts
+      0, 1, 1, 1, 1, 2,                                  // counts
       parameters);                                       // parameter
 }
 
@@ -740,7 +921,7 @@
   return new (zone()) Operator1<StoreGlobalParameters>(   // --
       IrOpcode::kJSStoreGlobal, Operator::kNoProperties,  // opcode
       "JSStoreGlobal",                                    // name
-      2, 1, 1, 0, 1, 2,                                   // counts
+      1, 1, 1, 0, 1, 2,                                   // counts
       parameters);                                        // parameter
 }
 
@@ -752,7 +933,7 @@
       IrOpcode::kJSLoadContext,                  // opcode
       Operator::kNoWrite | Operator::kNoThrow,   // flags
       "JSLoadContext",                           // name
-      1, 1, 0, 1, 1, 0,                          // counts
+      0, 1, 0, 1, 1, 0,                          // counts
       access);                                   // parameter
 }
 
@@ -763,7 +944,7 @@
       IrOpcode::kJSStoreContext,                 // opcode
       Operator::kNoRead | Operator::kNoThrow,    // flags
       "JSStoreContext",                          // name
-      2, 1, 1, 0, 1, 0,                          // counts
+      1, 1, 1, 0, 1, 0,                          // counts
       access);                                   // parameter
 }
 
@@ -806,10 +987,10 @@
       parameters);                                        // parameter
 }
 
-
 const Operator* JSOperatorBuilder::CreateClosure(
-    Handle<SharedFunctionInfo> shared_info, PretenureFlag pretenure) {
-  CreateClosureParameters parameters(shared_info, pretenure);
+    Handle<SharedFunctionInfo> shared_info, VectorSlotPair const& feedback,
+    PretenureFlag pretenure) {
+  CreateClosureParameters parameters(shared_info, feedback, pretenure);
   return new (zone()) Operator1<CreateClosureParameters>(  // --
       IrOpcode::kJSCreateClosure, Operator::kNoThrow,      // opcode
       "JSCreateClosure",                                   // name
@@ -818,8 +999,8 @@
 }
 
 const Operator* JSOperatorBuilder::CreateLiteralArray(
-    Handle<FixedArray> constant_elements, int literal_flags, int literal_index,
-    int number_of_elements) {
+    Handle<ConstantElementsPair> constant_elements, int literal_flags,
+    int literal_index, int number_of_elements) {
   CreateLiteralParameters parameters(constant_elements, number_of_elements,
                                      literal_flags, literal_index);
   return new (zone()) Operator1<CreateLiteralParameters>(        // --
@@ -830,7 +1011,7 @@
 }
 
 const Operator* JSOperatorBuilder::CreateLiteralObject(
-    Handle<FixedArray> constant_properties, int literal_flags,
+    Handle<BoilerplateDescription> constant_properties, int literal_flags,
     int literal_index, int number_of_properties) {
   CreateLiteralParameters parameters(constant_properties, number_of_properties,
                                      literal_flags, literal_index);
@@ -853,13 +1034,14 @@
       parameters);                                                // parameter
 }
 
-
-const Operator* JSOperatorBuilder::CreateFunctionContext(int slot_count) {
-  return new (zone()) Operator1<int>(                               // --
+const Operator* JSOperatorBuilder::CreateFunctionContext(int slot_count,
+                                                         ScopeType scope_type) {
+  CreateFunctionContextParameters parameters(slot_count, scope_type);
+  return new (zone()) Operator1<CreateFunctionContextParameters>(   // --
       IrOpcode::kJSCreateFunctionContext, Operator::kNoProperties,  // opcode
       "JSCreateFunctionContext",                                    // name
       1, 1, 1, 1, 1, 2,                                             // counts
-      slot_count);                                                  // parameter
+      parameters);                                                  // parameter
 }
 
 const Operator* JSOperatorBuilder::CreateCatchContext(
@@ -882,22 +1064,21 @@
 }
 
 const Operator* JSOperatorBuilder::CreateBlockContext(
-    const Handle<ScopeInfo>& scpope_info) {
+    const Handle<ScopeInfo>& scope_info) {
   return new (zone()) Operator1<Handle<ScopeInfo>>(              // --
       IrOpcode::kJSCreateBlockContext, Operator::kNoProperties,  // opcode
       "JSCreateBlockContext",                                    // name
       1, 1, 1, 1, 1, 2,                                          // counts
-      scpope_info);                                              // parameter
+      scope_info);                                               // parameter
 }
 
-
 const Operator* JSOperatorBuilder::CreateScriptContext(
-    const Handle<ScopeInfo>& scpope_info) {
+    const Handle<ScopeInfo>& scope_info) {
   return new (zone()) Operator1<Handle<ScopeInfo>>(               // --
       IrOpcode::kJSCreateScriptContext, Operator::kNoProperties,  // opcode
       "JSCreateScriptContext",                                    // name
       1, 1, 1, 1, 1, 2,                                           // counts
-      scpope_info);                                               // parameter
+      scope_info);                                                // parameter
 }
 
 }  // namespace compiler
diff --git a/src/compiler/js-operator.h b/src/compiler/js-operator.h
index 9cdd305..730b4b9 100644
--- a/src/compiler/js-operator.h
+++ b/src/compiler/js-operator.h
@@ -7,36 +7,43 @@
 
 #include "src/base/compiler-specific.h"
 #include "src/globals.h"
+#include "src/handles.h"
 #include "src/runtime/runtime.h"
 #include "src/type-hints.h"
 
 namespace v8 {
 namespace internal {
+
+class AllocationSite;
+class BoilerplateDescription;
+class ConstantElementsPair;
+class SharedFunctionInfo;
+class FeedbackVector;
+
 namespace compiler {
 
 // Forward declarations.
 class Operator;
 struct JSOperatorGlobalCache;
 
-
-// Defines a pair of {TypeFeedbackVector} and {TypeFeedbackVectorSlot}, which
+// Defines a pair of {FeedbackVector} and {FeedbackSlot}, which
 // is used to access the type feedback for a certain {Node}.
 class V8_EXPORT_PRIVATE VectorSlotPair {
  public:
   VectorSlotPair();
-  VectorSlotPair(Handle<TypeFeedbackVector> vector, FeedbackVectorSlot slot)
+  VectorSlotPair(Handle<FeedbackVector> vector, FeedbackSlot slot)
       : vector_(vector), slot_(slot) {}
 
   bool IsValid() const { return !vector_.is_null() && !slot_.IsInvalid(); }
 
-  Handle<TypeFeedbackVector> vector() const { return vector_; }
-  FeedbackVectorSlot slot() const { return slot_; }
+  Handle<FeedbackVector> vector() const { return vector_; }
+  FeedbackSlot slot() const { return slot_; }
 
   int index() const;
 
  private:
-  const Handle<TypeFeedbackVector> vector_;
-  const FeedbackVectorSlot slot_;
+  const Handle<FeedbackVector> vector_;
+  const FeedbackSlot slot_;
 };
 
 bool operator==(VectorSlotPair const&, VectorSlotPair const&);
@@ -54,11 +61,11 @@
 
 
 // Defines the arity and the feedback for a JavaScript constructor call. This is
-// used as a parameter by JSCallConstruct operators.
-class CallConstructParameters final {
+// used as a parameter by JSConstruct operators.
+class ConstructParameters final {
  public:
-  CallConstructParameters(uint32_t arity, float frequency,
-                          VectorSlotPair const& feedback)
+  ConstructParameters(uint32_t arity, float frequency,
+                      VectorSlotPair const& feedback)
       : arity_(arity), frequency_(frequency), feedback_(feedback) {}
 
   uint32_t arity() const { return arity_; }
@@ -71,24 +78,83 @@
   VectorSlotPair const feedback_;
 };
 
-bool operator==(CallConstructParameters const&, CallConstructParameters const&);
-bool operator!=(CallConstructParameters const&, CallConstructParameters const&);
+bool operator==(ConstructParameters const&, ConstructParameters const&);
+bool operator!=(ConstructParameters const&, ConstructParameters const&);
 
-size_t hash_value(CallConstructParameters const&);
+size_t hash_value(ConstructParameters const&);
 
-std::ostream& operator<<(std::ostream&, CallConstructParameters const&);
+std::ostream& operator<<(std::ostream&, ConstructParameters const&);
 
-CallConstructParameters const& CallConstructParametersOf(Operator const*);
+ConstructParameters const& ConstructParametersOf(Operator const*);
 
+// Defines the arity for a JavaScript constructor call with a spread as the last
+// parameters. This is used as a parameter by JSConstructWithSpread
+// operators.
+class ConstructWithSpreadParameters final {
+ public:
+  explicit ConstructWithSpreadParameters(uint32_t arity) : arity_(arity) {}
+
+  uint32_t arity() const { return arity_; }
+
+ private:
+  uint32_t const arity_;
+};
+
+bool operator==(ConstructWithSpreadParameters const&,
+                ConstructWithSpreadParameters const&);
+bool operator!=(ConstructWithSpreadParameters const&,
+                ConstructWithSpreadParameters const&);
+
+size_t hash_value(ConstructWithSpreadParameters const&);
+
+std::ostream& operator<<(std::ostream&, ConstructWithSpreadParameters const&);
+
+ConstructWithSpreadParameters const& ConstructWithSpreadParametersOf(
+    Operator const*);
+
+// Defines the flags for a JavaScript call forwarding parameters. This
+// is used as parameter by JSCallForwardVarargs operators.
+class CallForwardVarargsParameters final {
+ public:
+  CallForwardVarargsParameters(uint32_t start_index,
+                               TailCallMode tail_call_mode)
+      : bit_field_(StartIndexField::encode(start_index) |
+                   TailCallModeField::encode(tail_call_mode)) {}
+
+  uint32_t start_index() const { return StartIndexField::decode(bit_field_); }
+  TailCallMode tail_call_mode() const {
+    return TailCallModeField::decode(bit_field_);
+  }
+
+  bool operator==(CallForwardVarargsParameters const& that) const {
+    return this->bit_field_ == that.bit_field_;
+  }
+  bool operator!=(CallForwardVarargsParameters const& that) const {
+    return !(*this == that);
+  }
+
+ private:
+  friend size_t hash_value(CallForwardVarargsParameters const& p) {
+    return p.bit_field_;
+  }
+
+  typedef BitField<uint32_t, 0, 30> StartIndexField;
+  typedef BitField<TailCallMode, 31, 1> TailCallModeField;
+
+  uint32_t const bit_field_;
+};
+
+std::ostream& operator<<(std::ostream&, CallForwardVarargsParameters const&);
+
+CallForwardVarargsParameters const& CallForwardVarargsParametersOf(
+    Operator const*) WARN_UNUSED_RESULT;
 
 // Defines the arity and the call flags for a JavaScript function call. This is
-// used as a parameter by JSCallFunction operators.
-class CallFunctionParameters final {
+// used as a parameter by JSCall operators.
+class CallParameters final {
  public:
-  CallFunctionParameters(size_t arity, float frequency,
-                         VectorSlotPair const& feedback,
-                         TailCallMode tail_call_mode,
-                         ConvertReceiverMode convert_mode)
+  CallParameters(size_t arity, float frequency, VectorSlotPair const& feedback,
+                 TailCallMode tail_call_mode, ConvertReceiverMode convert_mode)
       : bit_field_(ArityField::encode(arity) |
                    ConvertReceiverModeField::encode(convert_mode) |
                    TailCallModeField::encode(tail_call_mode)),
@@ -105,17 +171,15 @@
   }
   VectorSlotPair const& feedback() const { return feedback_; }
 
-  bool operator==(CallFunctionParameters const& that) const {
+  bool operator==(CallParameters const& that) const {
     return this->bit_field_ == that.bit_field_ &&
            this->frequency_ == that.frequency_ &&
            this->feedback_ == that.feedback_;
   }
-  bool operator!=(CallFunctionParameters const& that) const {
-    return !(*this == that);
-  }
+  bool operator!=(CallParameters const& that) const { return !(*this == that); }
 
  private:
-  friend size_t hash_value(CallFunctionParameters const& p) {
+  friend size_t hash_value(CallParameters const& p) {
     return base::hash_combine(p.bit_field_, p.frequency_, p.feedback_);
   }
 
@@ -128,12 +192,35 @@
   VectorSlotPair const feedback_;
 };
 
-size_t hash_value(CallFunctionParameters const&);
+size_t hash_value(CallParameters const&);
 
-std::ostream& operator<<(std::ostream&, CallFunctionParameters const&);
+std::ostream& operator<<(std::ostream&, CallParameters const&);
 
-const CallFunctionParameters& CallFunctionParametersOf(const Operator* op);
+const CallParameters& CallParametersOf(const Operator* op);
 
+// Defines the arity for a JavaScript constructor call with a spread as the last
+// parameters. This is used as a parameter by JSConstructWithSpread
+// operators.
+class CallWithSpreadParameters final {
+ public:
+  explicit CallWithSpreadParameters(uint32_t arity) : arity_(arity) {}
+
+  uint32_t arity() const { return arity_; }
+
+ private:
+  uint32_t const arity_;
+};
+
+bool operator==(CallWithSpreadParameters const&,
+                CallWithSpreadParameters const&);
+bool operator!=(CallWithSpreadParameters const&,
+                CallWithSpreadParameters const&);
+
+size_t hash_value(CallWithSpreadParameters const&);
+
+std::ostream& operator<<(std::ostream&, CallWithSpreadParameters const&);
+
+CallWithSpreadParameters const& CallWithSpreadParametersOf(Operator const*);
 
 // Defines the arity and the ID for a runtime function call. This is used as a
 // parameter by JSCallRuntime operators.
@@ -216,6 +303,79 @@
 CreateCatchContextParameters const& CreateCatchContextParametersOf(
     Operator const*);
 
+// Defines the slot count and ScopeType for a new function or eval context. This
+// is used as a parameter by the JSCreateFunctionContext operator.
+class CreateFunctionContextParameters final {
+ public:
+  CreateFunctionContextParameters(int slot_count, ScopeType scope_type);
+
+  int slot_count() const { return slot_count_; }
+  ScopeType scope_type() const { return scope_type_; }
+
+ private:
+  int const slot_count_;
+  ScopeType const scope_type_;
+};
+
+bool operator==(CreateFunctionContextParameters const& lhs,
+                CreateFunctionContextParameters const& rhs);
+bool operator!=(CreateFunctionContextParameters const& lhs,
+                CreateFunctionContextParameters const& rhs);
+
+size_t hash_value(CreateFunctionContextParameters const& parameters);
+
+std::ostream& operator<<(std::ostream& os,
+                         CreateFunctionContextParameters const& parameters);
+
+CreateFunctionContextParameters const& CreateFunctionContextParametersOf(
+    Operator const*);
+
+// Defines parameters for JSStoreNamedOwn operator.
+class StoreNamedOwnParameters final {
+ public:
+  StoreNamedOwnParameters(Handle<Name> name, VectorSlotPair const& feedback)
+      : name_(name), feedback_(feedback) {}
+
+  Handle<Name> name() const { return name_; }
+  VectorSlotPair const& feedback() const { return feedback_; }
+
+ private:
+  Handle<Name> const name_;
+  VectorSlotPair const feedback_;
+};
+
+bool operator==(StoreNamedOwnParameters const&, StoreNamedOwnParameters const&);
+bool operator!=(StoreNamedOwnParameters const&, StoreNamedOwnParameters const&);
+
+size_t hash_value(StoreNamedOwnParameters const&);
+
+std::ostream& operator<<(std::ostream&, StoreNamedOwnParameters const&);
+
+const StoreNamedOwnParameters& StoreNamedOwnParametersOf(const Operator* op);
+
+// Defines the feedback, i.e., vector and index, for storing a data property in
+// an object literal. This is
+// used as a parameter by the JSStoreDataPropertyInLiteral operator.
+class DataPropertyParameters final {
+ public:
+  explicit DataPropertyParameters(VectorSlotPair const& feedback)
+      : feedback_(feedback) {}
+
+  VectorSlotPair const& feedback() const { return feedback_; }
+
+ private:
+  VectorSlotPair const feedback_;
+};
+
+bool operator==(DataPropertyParameters const&, DataPropertyParameters const&);
+bool operator!=(DataPropertyParameters const&, DataPropertyParameters const&);
+
+size_t hash_value(DataPropertyParameters const&);
+
+std::ostream& operator<<(std::ostream&, DataPropertyParameters const&);
+
+const DataPropertyParameters& DataPropertyParametersOf(const Operator* op);
+
 // Defines the property of an object for a named access. This is
 // used as a parameter by the JSLoadNamed and JSStoreNamed operators.
 class NamedAccess final {
@@ -361,14 +521,17 @@
 class CreateClosureParameters final {
  public:
   CreateClosureParameters(Handle<SharedFunctionInfo> shared_info,
+                          VectorSlotPair const& feedback,
                           PretenureFlag pretenure)
-      : shared_info_(shared_info), pretenure_(pretenure) {}
+      : shared_info_(shared_info), feedback_(feedback), pretenure_(pretenure) {}
 
   Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
+  VectorSlotPair const& feedback() const { return feedback_; }
   PretenureFlag pretenure() const { return pretenure_; }
 
  private:
   const Handle<SharedFunctionInfo> shared_info_;
+  VectorSlotPair const feedback_;
   const PretenureFlag pretenure_;
 };
 
@@ -432,17 +595,17 @@
   const Operator* LessThanOrEqual(CompareOperationHint hint);
   const Operator* GreaterThanOrEqual(CompareOperationHint hint);
 
-  const Operator* BitwiseOr(BinaryOperationHint hint);
-  const Operator* BitwiseXor(BinaryOperationHint hint);
-  const Operator* BitwiseAnd(BinaryOperationHint hint);
-  const Operator* ShiftLeft(BinaryOperationHint hint);
-  const Operator* ShiftRight(BinaryOperationHint hint);
-  const Operator* ShiftRightLogical(BinaryOperationHint hint);
+  const Operator* BitwiseOr();
+  const Operator* BitwiseXor();
+  const Operator* BitwiseAnd();
+  const Operator* ShiftLeft();
+  const Operator* ShiftRight();
+  const Operator* ShiftRightLogical();
   const Operator* Add(BinaryOperationHint hint);
-  const Operator* Subtract(BinaryOperationHint hint);
-  const Operator* Multiply(BinaryOperationHint hint);
-  const Operator* Divide(BinaryOperationHint hint);
-  const Operator* Modulus(BinaryOperationHint hint);
+  const Operator* Subtract();
+  const Operator* Multiply();
+  const Operator* Divide();
+  const Operator* Modulus();
 
   const Operator* ToBoolean(ToBooleanHints hints);
   const Operator* ToInteger();
@@ -456,28 +619,33 @@
   const Operator* CreateArguments(CreateArgumentsType type);
   const Operator* CreateArray(size_t arity, Handle<AllocationSite> site);
   const Operator* CreateClosure(Handle<SharedFunctionInfo> shared_info,
+                                VectorSlotPair const& feedback,
                                 PretenureFlag pretenure);
   const Operator* CreateIterResultObject();
   const Operator* CreateKeyValueArray();
-  const Operator* CreateLiteralArray(Handle<FixedArray> constant_elements,
+  const Operator* CreateLiteralArray(Handle<ConstantElementsPair> constant,
                                      int literal_flags, int literal_index,
                                      int number_of_elements);
-  const Operator* CreateLiteralObject(Handle<FixedArray> constant_properties,
+  const Operator* CreateLiteralObject(Handle<BoilerplateDescription> constant,
                                       int literal_flags, int literal_index,
                                       int number_of_properties);
   const Operator* CreateLiteralRegExp(Handle<String> constant_pattern,
                                       int literal_flags, int literal_index);
 
-  const Operator* CallFunction(
+  const Operator* CallForwardVarargs(uint32_t start_index,
+                                     TailCallMode tail_call_mode);
+  const Operator* Call(
       size_t arity, float frequency = 0.0f,
       VectorSlotPair const& feedback = VectorSlotPair(),
       ConvertReceiverMode convert_mode = ConvertReceiverMode::kAny,
       TailCallMode tail_call_mode = TailCallMode::kDisallow);
+  const Operator* CallWithSpread(uint32_t arity);
   const Operator* CallRuntime(Runtime::FunctionId id);
   const Operator* CallRuntime(Runtime::FunctionId id, size_t arity);
   const Operator* CallRuntime(const Runtime::Function* function, size_t arity);
-  const Operator* CallConstruct(uint32_t arity, float frequency,
-                                VectorSlotPair const& feedback);
+  const Operator* Construct(uint32_t arity, float frequency,
+                            VectorSlotPair const& feedback);
+  const Operator* ConstructWithSpread(uint32_t arity);
 
   const Operator* ConvertReceiver(ConvertReceiverMode convert_mode);
 
@@ -489,10 +657,16 @@
   const Operator* StoreNamed(LanguageMode language_mode, Handle<Name> name,
                              VectorSlotPair const& feedback);
 
+  const Operator* StoreNamedOwn(Handle<Name> name,
+                                VectorSlotPair const& feedback);
+  const Operator* StoreDataPropertyInLiteral(const VectorSlotPair& feedback);
+
   const Operator* DeleteProperty(LanguageMode language_mode);
 
   const Operator* HasProperty();
 
+  const Operator* GetSuperConstructor();
+
   const Operator* LoadGlobal(const Handle<Name>& name,
                              const VectorSlotPair& feedback,
                              TypeofMode typeof_mode = NOT_INSIDE_TYPEOF);
@@ -506,6 +680,7 @@
   const Operator* LoadModule(int32_t cell_index);
   const Operator* StoreModule(int32_t cell_index);
 
+  const Operator* ClassOf();
   const Operator* TypeOf();
   const Operator* InstanceOf();
   const Operator* OrdinaryHasInstance();
@@ -524,8 +699,9 @@
   const Operator* GeneratorRestoreRegister(int index);
 
   const Operator* StackCheck();
+  const Operator* Debugger();
 
-  const Operator* CreateFunctionContext(int slot_count);
+  const Operator* CreateFunctionContext(int slot_count, ScopeType scope_type);
   const Operator* CreateCatchContext(const Handle<String>& name,
                                      const Handle<ScopeInfo>& scope_info);
   const Operator* CreateWithContext(const Handle<ScopeInfo>& scope_info);
diff --git a/src/compiler/js-type-hint-lowering.cc b/src/compiler/js-type-hint-lowering.cc
new file mode 100644
index 0000000..e30e016
--- /dev/null
+++ b/src/compiler/js-type-hint-lowering.cc
@@ -0,0 +1,153 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-type-hint-lowering.h"
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/operator-properties.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/feedback-vector.h"
+#include "src/type-hints.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSSpeculativeBinopBuilder final {
+ public:
+  JSSpeculativeBinopBuilder(JSTypeHintLowering* lowering, const Operator* op,
+                            Node* left, Node* right, Node* effect,
+                            Node* control, FeedbackSlot slot)
+      : lowering_(lowering),
+        op_(op),
+        left_(left),
+        right_(right),
+        effect_(effect),
+        control_(control),
+        slot_(slot) {}
+
+  BinaryOperationHint GetBinaryOperationHint() {
+    DCHECK_EQ(FeedbackSlotKind::kBinaryOp, feedback_vector()->GetKind(slot_));
+    BinaryOpICNexus nexus(feedback_vector(), slot_);
+    return nexus.GetBinaryOperationFeedback();
+  }
+
+  bool GetBinaryNumberOperationHint(NumberOperationHint* hint) {
+    switch (GetBinaryOperationHint()) {
+      case BinaryOperationHint::kSignedSmall:
+        *hint = NumberOperationHint::kSignedSmall;
+        return true;
+      case BinaryOperationHint::kSigned32:
+        *hint = NumberOperationHint::kSigned32;
+        return true;
+      case BinaryOperationHint::kNumberOrOddball:
+        *hint = NumberOperationHint::kNumberOrOddball;
+        return true;
+      case BinaryOperationHint::kAny:
+      case BinaryOperationHint::kNone:
+      case BinaryOperationHint::kString:
+        break;
+    }
+    return false;
+  }
+
+  const Operator* SpeculativeNumberOp(NumberOperationHint hint) {
+    switch (op_->opcode()) {
+      case IrOpcode::kJSAdd:
+        return simplified()->SpeculativeNumberAdd(hint);
+      case IrOpcode::kJSSubtract:
+        return simplified()->SpeculativeNumberSubtract(hint);
+      case IrOpcode::kJSMultiply:
+        return simplified()->SpeculativeNumberMultiply(hint);
+      case IrOpcode::kJSDivide:
+        return simplified()->SpeculativeNumberDivide(hint);
+      case IrOpcode::kJSModulus:
+        return simplified()->SpeculativeNumberModulus(hint);
+      case IrOpcode::kJSBitwiseAnd:
+        return simplified()->SpeculativeNumberBitwiseAnd(hint);
+      case IrOpcode::kJSBitwiseOr:
+        return simplified()->SpeculativeNumberBitwiseOr(hint);
+      case IrOpcode::kJSBitwiseXor:
+        return simplified()->SpeculativeNumberBitwiseXor(hint);
+      case IrOpcode::kJSShiftLeft:
+        return simplified()->SpeculativeNumberShiftLeft(hint);
+      case IrOpcode::kJSShiftRight:
+        return simplified()->SpeculativeNumberShiftRight(hint);
+      case IrOpcode::kJSShiftRightLogical:
+        return simplified()->SpeculativeNumberShiftRightLogical(hint);
+      default:
+        break;
+    }
+    UNREACHABLE();
+    return nullptr;
+  }
+
+  Node* BuildSpeculativeOperator(const Operator* op) {
+    DCHECK_EQ(2, op->ValueInputCount());
+    DCHECK_EQ(1, op->EffectInputCount());
+    DCHECK_EQ(1, op->ControlInputCount());
+    DCHECK_EQ(false, OperatorProperties::HasFrameStateInput(op));
+    DCHECK_EQ(false, OperatorProperties::HasContextInput(op));
+    DCHECK_EQ(1, op->EffectOutputCount());
+    DCHECK_EQ(0, op->ControlOutputCount());
+    return graph()->NewNode(op, left_, right_, effect_, control_);
+  }
+
+  JSGraph* jsgraph() const { return lowering_->jsgraph(); }
+  Graph* graph() const { return jsgraph()->graph(); }
+  JSOperatorBuilder* javascript() { return jsgraph()->javascript(); }
+  SimplifiedOperatorBuilder* simplified() { return jsgraph()->simplified(); }
+  CommonOperatorBuilder* common() { return jsgraph()->common(); }
+  const Handle<FeedbackVector>& feedback_vector() const {
+    return lowering_->feedback_vector();
+  }
+
+ private:
+  JSTypeHintLowering* lowering_;
+  const Operator* op_;
+  Node* left_;
+  Node* right_;
+  Node* effect_;
+  Node* control_;
+  FeedbackSlot slot_;
+};
+
+JSTypeHintLowering::JSTypeHintLowering(JSGraph* jsgraph,
+                                       Handle<FeedbackVector> feedback_vector)
+    : jsgraph_(jsgraph), feedback_vector_(feedback_vector) {}
+
+Reduction JSTypeHintLowering::ReduceBinaryOperation(const Operator* op,
+                                                    Node* left, Node* right,
+                                                    Node* effect, Node* control,
+                                                    FeedbackSlot slot) {
+  switch (op->opcode()) {
+    case IrOpcode::kJSBitwiseOr:
+    case IrOpcode::kJSBitwiseXor:
+    case IrOpcode::kJSBitwiseAnd:
+    case IrOpcode::kJSShiftLeft:
+    case IrOpcode::kJSShiftRight:
+    case IrOpcode::kJSShiftRightLogical:
+    case IrOpcode::kJSAdd:
+    case IrOpcode::kJSSubtract:
+    case IrOpcode::kJSMultiply:
+    case IrOpcode::kJSDivide:
+    case IrOpcode::kJSModulus: {
+      JSSpeculativeBinopBuilder b(this, op, left, right, effect, control, slot);
+      NumberOperationHint hint;
+      if (b.GetBinaryNumberOperationHint(&hint)) {
+        Node* node = b.BuildSpeculativeOperator(b.SpeculativeNumberOp(hint));
+        return Reduction(node);
+      }
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+  return Reduction();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/js-type-hint-lowering.h b/src/compiler/js-type-hint-lowering.h
new file mode 100644
index 0000000..d1dd1a8
--- /dev/null
+++ b/src/compiler/js-type-hint-lowering.h
@@ -0,0 +1,54 @@
+// Copyright 2017 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_TYPE_HINT_LOWERING_H_
+#define V8_COMPILER_JS_TYPE_HINT_LOWERING_H_
+
+#include "src/compiler/graph-reducer.h"
+#include "src/handles.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class JSGraph;
+
+// The type-hint lowering consumes feedback about data operations (i.e. unary
+// and binary operations) to emit nodes using speculative simplified operators
+// in favor of the generic JavaScript operators.
+//
+// This lowering is implemented as an early reduction and can be applied before
+// nodes are placed into the initial graph. It provides the ability to shortcut
+// the JavaScript-level operators and directly emit simplified-level operators
+// even during initial graph building. This is the reason this lowering doesn't
+// follow the interface of the reducer framework used after graph construction.
+class JSTypeHintLowering {
+ public:
+  JSTypeHintLowering(JSGraph* jsgraph, Handle<FeedbackVector> feedback_vector);
+
+  // Potential reduction of binary (arithmetic, logical and shift) operations.
+  Reduction ReduceBinaryOperation(const Operator* op, Node* left, Node* right,
+                                  Node* effect, Node* control,
+                                  FeedbackSlot slot);
+
+ private:
+  friend class JSSpeculativeBinopBuilder;
+
+  JSGraph* jsgraph() const { return jsgraph_; }
+  const Handle<FeedbackVector>& feedback_vector() const {
+    return feedback_vector_;
+  }
+
+  JSGraph* jsgraph_;
+  Handle<FeedbackVector> feedback_vector_;
+
+  DISALLOW_COPY_AND_ASSIGN(JSTypeHintLowering);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_JS_TYPE_HINT_LOWERING_H_
diff --git a/src/compiler/js-typed-lowering.cc b/src/compiler/js-typed-lowering.cc
index dbbeca6..31accbd 100644
--- a/src/compiler/js-typed-lowering.cc
+++ b/src/compiler/js-typed-lowering.cc
@@ -16,6 +16,7 @@
 #include "src/compiler/operator-properties.h"
 #include "src/compiler/type-cache.h"
 #include "src/compiler/types.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -30,30 +31,6 @@
   JSBinopReduction(JSTypedLowering* lowering, Node* node)
       : lowering_(lowering), node_(node) {}
 
-  bool GetBinaryNumberOperationHint(NumberOperationHint* hint) {
-    if (lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) {
-      DCHECK_NE(0, node_->op()->ControlOutputCount());
-      DCHECK_EQ(1, node_->op()->EffectOutputCount());
-      DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node_->op()));
-      switch (BinaryOperationHintOf(node_->op())) {
-        case BinaryOperationHint::kSignedSmall:
-          *hint = NumberOperationHint::kSignedSmall;
-          return true;
-        case BinaryOperationHint::kSigned32:
-          *hint = NumberOperationHint::kSigned32;
-          return true;
-        case BinaryOperationHint::kNumberOrOddball:
-          *hint = NumberOperationHint::kNumberOrOddball;
-          return true;
-        case BinaryOperationHint::kAny:
-        case BinaryOperationHint::kNone:
-        case BinaryOperationHint::kString:
-          break;
-      }
-    }
-    return false;
-  }
-
   bool GetCompareNumberOperationHint(NumberOperationHint* hint) {
     if (lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) {
       DCHECK_EQ(1, node_->op()->EffectOutputCount());
@@ -69,17 +46,51 @@
           return true;
         case CompareOperationHint::kAny:
         case CompareOperationHint::kNone:
+        case CompareOperationHint::kString:
+        case CompareOperationHint::kReceiver:
+        case CompareOperationHint::kInternalizedString:
           break;
       }
     }
     return false;
   }
 
+  bool IsInternalizedStringCompareOperation() {
+    if (lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) {
+      DCHECK_EQ(1, node_->op()->EffectOutputCount());
+      return (CompareOperationHintOf(node_->op()) ==
+              CompareOperationHint::kInternalizedString) &&
+             BothInputsMaybe(Type::InternalizedString());
+    }
+    return false;
+  }
+
+  bool IsReceiverCompareOperation() {
+    if (lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) {
+      DCHECK_EQ(1, node_->op()->EffectOutputCount());
+      return (CompareOperationHintOf(node_->op()) ==
+              CompareOperationHint::kReceiver) &&
+             BothInputsMaybe(Type::Receiver());
+    }
+    return false;
+  }
+
+  bool IsStringCompareOperation() {
+    if (lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) {
+      DCHECK_EQ(1, node_->op()->EffectOutputCount());
+      return (CompareOperationHintOf(node_->op()) ==
+              CompareOperationHint::kString) &&
+             BothInputsMaybe(Type::String());
+    }
+    return false;
+  }
+
   // Check if a string addition will definitely result in creating a ConsString,
   // i.e. if the combined length of the resulting string exceeds the ConsString
   // minimum length.
   bool ShouldCreateConsString() {
     DCHECK_EQ(IrOpcode::kJSAdd, node_->opcode());
+    DCHECK(OneInputIs(Type::String()));
     if (BothInputsAre(Type::String()) ||
         ((lowering_->flags() & JSTypedLowering::kDeoptimizationEnabled) &&
          BinaryOperationHintOf(node_->op()) == BinaryOperationHint::kString)) {
@@ -103,6 +114,66 @@
     return false;
   }
 
+  // Inserts a CheckReceiver for the left input.
+  void CheckLeftInputToReceiver() {
+    Node* left_input = graph()->NewNode(simplified()->CheckReceiver(), left(),
+                                        effect(), control());
+    node_->ReplaceInput(0, left_input);
+    update_effect(left_input);
+  }
+
+  // Checks that both inputs are Receiver, and if we don't know
+  // statically that one side is already a Receiver, insert a
+  // CheckReceiver node.
+  void CheckInputsToReceiver() {
+    if (!left_type()->Is(Type::Receiver())) {
+      CheckLeftInputToReceiver();
+    }
+    if (!right_type()->Is(Type::Receiver())) {
+      Node* right_input = graph()->NewNode(simplified()->CheckReceiver(),
+                                           right(), effect(), control());
+      node_->ReplaceInput(1, right_input);
+      update_effect(right_input);
+    }
+  }
+
+  // Checks that both inputs are String, and if we don't know
+  // statically that one side is already a String, insert a
+  // CheckString node.
+  void CheckInputsToString() {
+    if (!left_type()->Is(Type::String())) {
+      Node* left_input = graph()->NewNode(simplified()->CheckString(), left(),
+                                          effect(), control());
+      node_->ReplaceInput(0, left_input);
+      update_effect(left_input);
+    }
+    if (!right_type()->Is(Type::String())) {
+      Node* right_input = graph()->NewNode(simplified()->CheckString(), right(),
+                                           effect(), control());
+      node_->ReplaceInput(1, right_input);
+      update_effect(right_input);
+    }
+  }
+
+  // Checks that both inputs are InternalizedString, and if we don't know
+  // statically that one side is already an InternalizedString, insert a
+  // CheckInternalizedString node.
+  void CheckInputsToInternalizedString() {
+    if (!left_type()->Is(Type::UniqueName())) {
+      Node* left_input = graph()->NewNode(
+          simplified()->CheckInternalizedString(), left(), effect(), control());
+      node_->ReplaceInput(0, left_input);
+      update_effect(left_input);
+    }
+    if (!right_type()->Is(Type::UniqueName())) {
+      Node* right_input =
+          graph()->NewNode(simplified()->CheckInternalizedString(), right(),
+                           effect(), control());
+      node_->ReplaceInput(1, right_input);
+      update_effect(right_input);
+    }
+  }
+
   void ConvertInputsToNumber() {
     // To convert the inputs to numbers, we have to provide frame states
     // for lazy bailouts in the ToNumber conversions.
@@ -277,30 +348,18 @@
     return nullptr;
   }
 
-  const Operator* SpeculativeNumberOp(NumberOperationHint hint) {
+  const Operator* NumberOpFromSpeculativeNumberOp() {
     switch (node_->opcode()) {
-      case IrOpcode::kJSAdd:
-        return simplified()->SpeculativeNumberAdd(hint);
-      case IrOpcode::kJSSubtract:
-        return simplified()->SpeculativeNumberSubtract(hint);
-      case IrOpcode::kJSMultiply:
-        return simplified()->SpeculativeNumberMultiply(hint);
-      case IrOpcode::kJSDivide:
-        return simplified()->SpeculativeNumberDivide(hint);
-      case IrOpcode::kJSModulus:
-        return simplified()->SpeculativeNumberModulus(hint);
-      case IrOpcode::kJSBitwiseAnd:
-        return simplified()->SpeculativeNumberBitwiseAnd(hint);
-      case IrOpcode::kJSBitwiseOr:
-        return simplified()->SpeculativeNumberBitwiseOr(hint);
-      case IrOpcode::kJSBitwiseXor:
-        return simplified()->SpeculativeNumberBitwiseXor(hint);
-      case IrOpcode::kJSShiftLeft:
-        return simplified()->SpeculativeNumberShiftLeft(hint);
-      case IrOpcode::kJSShiftRight:
-        return simplified()->SpeculativeNumberShiftRight(hint);
-      case IrOpcode::kJSShiftRightLogical:
-        return simplified()->SpeculativeNumberShiftRightLogical(hint);
+      case IrOpcode::kSpeculativeNumberAdd:
+        return simplified()->NumberAdd();
+      case IrOpcode::kSpeculativeNumberSubtract:
+        return simplified()->NumberSubtract();
+      case IrOpcode::kSpeculativeNumberMultiply:
+        return simplified()->NumberMultiply();
+      case IrOpcode::kSpeculativeNumberDivide:
+        return simplified()->NumberDivide();
+      case IrOpcode::kSpeculativeNumberModulus:
+        return simplified()->NumberModulus();
       default:
         break;
     }
@@ -316,6 +375,10 @@
 
   bool BothInputsAre(Type* t) { return LeftInputIs(t) && RightInputIs(t); }
 
+  bool BothInputsMaybe(Type* t) {
+    return left_type()->Maybe(t) && right_type()->Maybe(t);
+  }
+
   bool OneInputCannotBe(Type* t) {
     return !left_type()->Maybe(t) || !right_type()->Maybe(t);
   }
@@ -459,8 +522,13 @@
       dependencies_(dependencies),
       flags_(flags),
       jsgraph_(jsgraph),
-      the_hole_type_(
-          Type::HeapConstant(factory()->the_hole_value(), graph()->zone())),
+      pointer_comparable_type_(Type::Union(
+          Type::Oddball(),
+          Type::Union(
+              Type::SymbolOrReceiver(),
+              Type::HeapConstant(factory()->empty_string(), graph()->zone()),
+              graph()->zone()),
+          graph()->zone())),
       type_cache_(TypeCache::Get()) {
   for (size_t k = 0; k < arraysize(shifted_int32_ranges_); ++k) {
     double min = kMinInt / (1 << k);
@@ -469,20 +537,22 @@
   }
 }
 
+Reduction JSTypedLowering::ReduceSpeculativeNumberAdd(Node* node) {
+  JSBinopReduction r(this, node);
+  NumberOperationHint hint = NumberOperationHintOf(node->op());
+  if (hint == NumberOperationHint::kNumberOrOddball &&
+      r.BothInputsAre(Type::PlainPrimitive()) &&
+      r.NeitherInputCanBe(Type::StringOrReceiver())) {
+    // SpeculativeNumberAdd(x:-string, y:-string) =>
+    //     NumberAdd(ToNumber(x), ToNumber(y))
+    r.ConvertInputsToNumber();
+    return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
+  }
+  return NoChange();
+}
+
 Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
   JSBinopReduction r(this, node);
-  NumberOperationHint hint;
-  if (r.GetBinaryNumberOperationHint(&hint)) {
-    if (hint == NumberOperationHint::kNumberOrOddball &&
-        r.BothInputsAre(Type::PlainPrimitive()) &&
-        r.NeitherInputCanBe(Type::StringOrReceiver())) {
-      // JSAdd(x:-string, y:-string) => NumberAdd(ToNumber(x), ToNumber(y))
-      r.ConvertInputsToNumber();
-      return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
-    }
-    return r.ChangeToSpeculativeOperator(
-        simplified()->SpeculativeNumberAdd(hint), Type::Number());
-  }
   if (r.BothInputsAre(Type::Number())) {
     // JSAdd(x:number, y:number) => NumberAdd(x, y)
     r.ConvertInputsToNumber();
@@ -505,13 +575,20 @@
     } else if (!r.RightInputIs(Type::String())) {
       flags = STRING_ADD_CONVERT_RIGHT;
     }
+    Operator::Properties properties = node->op()->properties();
+    if (r.NeitherInputCanBe(Type::Receiver())) {
+      // Both sides are already strings, so we know that the
+      // string addition will not cause any observable side
+      // effects; it can still throw obviously.
+      properties = Operator::kNoWrite | Operator::kNoDeopt;
+    }
     // JSAdd(x:string, y) => CallStub[StringAdd](x, y)
     // JSAdd(x, y:string) => CallStub[StringAdd](x, y)
     Callable const callable =
         CodeFactory::StringAdd(isolate(), flags, NOT_TENURED);
     CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
         isolate(), graph()->zone(), callable.descriptor(), 0,
-        CallDescriptor::kNeedsFrameState, node->op()->properties());
+        CallDescriptor::kNeedsFrameState, properties);
     DCHECK_EQ(1, OperatorProperties::GetFrameStateInputCount(node->op()));
     node->InsertInput(graph()->zone(), 0,
                       jsgraph()->HeapConstant(callable.code()));
@@ -523,16 +600,6 @@
 
 Reduction JSTypedLowering::ReduceNumberBinop(Node* node) {
   JSBinopReduction r(this, node);
-  NumberOperationHint hint;
-  if (r.GetBinaryNumberOperationHint(&hint)) {
-    if (hint == NumberOperationHint::kNumberOrOddball &&
-        r.BothInputsAre(Type::NumberOrOddball())) {
-      r.ConvertInputsToNumber();
-      return r.ChangeToPureOperator(r.NumberOp(), Type::Number());
-    }
-    return r.ChangeToSpeculativeOperator(r.SpeculativeNumberOp(hint),
-                                         Type::Number());
-  }
   if (r.BothInputsAre(Type::PlainPrimitive()) ||
       !(flags() & kDeoptimizationEnabled)) {
     r.ConvertInputsToNumber();
@@ -541,13 +608,20 @@
   return NoChange();
 }
 
+Reduction JSTypedLowering::ReduceSpeculativeNumberBinop(Node* node) {
+  JSBinopReduction r(this, node);
+  NumberOperationHint hint = NumberOperationHintOf(node->op());
+  if (hint == NumberOperationHint::kNumberOrOddball &&
+      r.BothInputsAre(Type::NumberOrOddball())) {
+    r.ConvertInputsToNumber();
+    return r.ChangeToPureOperator(r.NumberOpFromSpeculativeNumberOp(),
+                                  Type::Number());
+  }
+  return NoChange();
+}
+
 Reduction JSTypedLowering::ReduceInt32Binop(Node* node) {
   JSBinopReduction r(this, node);
-  NumberOperationHint hint;
-  if (r.GetBinaryNumberOperationHint(&hint)) {
-    return r.ChangeToSpeculativeOperator(r.SpeculativeNumberOp(hint),
-                                         Type::Signed32());
-  }
   if (r.BothInputsAre(Type::PlainPrimitive()) ||
       !(flags() & kDeoptimizationEnabled)) {
     r.ConvertInputsToNumber();
@@ -559,12 +633,6 @@
 
 Reduction JSTypedLowering::ReduceUI32Shift(Node* node, Signedness signedness) {
   JSBinopReduction r(this, node);
-  NumberOperationHint hint;
-  if (r.GetBinaryNumberOperationHint(&hint)) {
-    return r.ChangeToSpeculativeOperator(
-        r.SpeculativeNumberOp(hint),
-        signedness == kUnsigned ? Type::Unsigned32() : Type::Signed32());
-  }
   if (r.BothInputsAre(Type::PlainPrimitive()) ||
       !(flags() & kDeoptimizationEnabled)) {
     r.ConvertInputsToNumber();
@@ -746,6 +814,10 @@
     r.ConvertInputsToNumber();
     less_than = simplified()->NumberLessThan();
     less_than_or_equal = simplified()->NumberLessThanOrEqual();
+  } else if (r.IsStringCompareOperation()) {
+    r.CheckInputsToString();
+    less_than = simplified()->StringLessThan();
+    less_than_or_equal = simplified()->StringLessThanOrEqual();
   } else {
     return NoChange();
   }
@@ -787,61 +859,72 @@
     return Replace(jsgraph()->Constant(f->string_string()));
   } else if (type->Is(Type::Symbol())) {
     return Replace(jsgraph()->Constant(f->symbol_string()));
-  } else if (type->Is(Type::Union(Type::Undefined(), Type::OtherUndetectable(),
-                                  graph()->zone()))) {
+  } else if (type->Is(Type::OtherUndetectableOrUndefined())) {
     return Replace(jsgraph()->Constant(f->undefined_string()));
-  } else if (type->Is(Type::Null())) {
+  } else if (type->Is(Type::NonCallableOrNull())) {
     return Replace(jsgraph()->Constant(f->object_string()));
   } else if (type->Is(Type::Function())) {
     return Replace(jsgraph()->Constant(f->function_string()));
   } else if (type->IsHeapConstant()) {
     return Replace(jsgraph()->Constant(
         Object::TypeOf(isolate(), type->AsHeapConstant()->Value())));
-  } else if (type->IsOtherNumberConstant()) {
-    return Replace(jsgraph()->Constant(f->number_string()));
   }
 
   return NoChange();
 }
 
 Reduction JSTypedLowering::ReduceJSEqualTypeOf(Node* node, bool invert) {
+  Node* input;
+  Handle<String> type;
   HeapObjectBinopMatcher m(node);
   if (m.left().IsJSTypeOf() && m.right().HasValue() &&
       m.right().Value()->IsString()) {
-    Node* replacement;
-    Node* input = m.left().InputAt(0);
-    Handle<String> value = Handle<String>::cast(m.right().Value());
-    if (String::Equals(value, factory()->boolean_string())) {
-      replacement =
-          graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
-                           graph()->NewNode(simplified()->ReferenceEqual(),
-                                            input, jsgraph()->TrueConstant()),
-                           jsgraph()->TrueConstant(),
-                           graph()->NewNode(simplified()->ReferenceEqual(),
-                                            input, jsgraph()->FalseConstant()));
-    } else if (String::Equals(value, factory()->function_string())) {
-      replacement = graph()->NewNode(simplified()->ObjectIsCallable(), input);
-    } else if (String::Equals(value, factory()->number_string())) {
-      replacement = graph()->NewNode(simplified()->ObjectIsNumber(), input);
-    } else if (String::Equals(value, factory()->string_string())) {
-      replacement = graph()->NewNode(simplified()->ObjectIsString(), input);
-    } else if (String::Equals(value, factory()->undefined_string())) {
-      replacement = graph()->NewNode(
-          common()->Select(MachineRepresentation::kTagged),
-          graph()->NewNode(simplified()->ReferenceEqual(), input,
-                           jsgraph()->NullConstant()),
-          jsgraph()->FalseConstant(),
-          graph()->NewNode(simplified()->ObjectIsUndetectable(), input));
-    } else {
-      return NoChange();
-    }
-    if (invert) {
-      replacement = graph()->NewNode(simplified()->BooleanNot(), replacement);
-    }
-    ReplaceWithValue(node, replacement);
-    return Replace(replacement);
+    input = m.left().InputAt(0);
+    type = Handle<String>::cast(m.right().Value());
+  } else if (m.right().IsJSTypeOf() && m.left().HasValue() &&
+             m.left().Value()->IsString()) {
+    input = m.right().InputAt(0);
+    type = Handle<String>::cast(m.left().Value());
+  } else {
+    return NoChange();
   }
-  return NoChange();
+  Node* value;
+  if (String::Equals(type, factory()->boolean_string())) {
+    value =
+        graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
+                         graph()->NewNode(simplified()->ReferenceEqual(), input,
+                                          jsgraph()->TrueConstant()),
+                         jsgraph()->TrueConstant(),
+                         graph()->NewNode(simplified()->ReferenceEqual(), input,
+                                          jsgraph()->FalseConstant()));
+  } else if (String::Equals(type, factory()->function_string())) {
+    value = graph()->NewNode(simplified()->ObjectIsDetectableCallable(), input);
+  } else if (String::Equals(type, factory()->number_string())) {
+    value = graph()->NewNode(simplified()->ObjectIsNumber(), input);
+  } else if (String::Equals(type, factory()->object_string())) {
+    value = graph()->NewNode(
+        common()->Select(MachineRepresentation::kTagged),
+        graph()->NewNode(simplified()->ObjectIsNonCallable(), input),
+        jsgraph()->TrueConstant(),
+        graph()->NewNode(simplified()->ReferenceEqual(), input,
+                         jsgraph()->NullConstant()));
+  } else if (String::Equals(type, factory()->string_string())) {
+    value = graph()->NewNode(simplified()->ObjectIsString(), input);
+  } else if (String::Equals(type, factory()->undefined_string())) {
+    value = graph()->NewNode(
+        common()->Select(MachineRepresentation::kTagged),
+        graph()->NewNode(simplified()->ReferenceEqual(), input,
+                         jsgraph()->NullConstant()),
+        jsgraph()->FalseConstant(),
+        graph()->NewNode(simplified()->ObjectIsUndetectable(), input));
+  } else {
+    return NoChange();
+  }
+  if (invert) {
+    value = graph()->NewNode(simplified()->BooleanNot(), value);
+  }
+  ReplaceWithValue(node, value);
+  return Replace(value);
 }
 
 Reduction JSTypedLowering::ReduceJSEqual(Node* node, bool invert) {
@@ -850,6 +933,13 @@
 
   JSBinopReduction r(this, node);
 
+  if (r.BothInputsAre(Type::UniqueName())) {
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
+  }
+  if (r.IsInternalizedStringCompareOperation()) {
+    r.CheckInputsToInternalizedString();
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
+  }
   if (r.BothInputsAre(Type::String())) {
     return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
   }
@@ -884,6 +974,12 @@
         simplified()->SpeculativeNumberEqual(hint), invert, Type::Boolean());
   } else if (r.BothInputsAre(Type::Number())) {
     return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
+  } else if (r.IsReceiverCompareOperation()) {
+    r.CheckInputsToReceiver();
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
+  } else if (r.IsStringCompareOperation()) {
+    r.CheckInputsToString();
+    return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
   }
   return NoChange();
 }
@@ -898,10 +994,10 @@
       return Replace(replacement);
     }
   }
-  if (r.OneInputCannotBe(Type::NumberOrSimdOrString())) {
+  if (r.OneInputCannotBe(Type::NumberOrString())) {
     // For values with canonical representation (i.e. neither String, nor
-    // Simd128Value nor Number) an empty type intersection means the values
-    // cannot be strictly equal.
+    // Number) an empty type intersection means the values cannot be strictly
+    // equal.
     if (!r.left_type()->Maybe(r.right_type())) {
       Node* replacement = jsgraph()->BooleanConstant(invert);
       ReplaceWithValue(node, replacement);
@@ -912,27 +1008,16 @@
   Reduction const reduction = ReduceJSEqualTypeOf(node, invert);
   if (reduction.Changed()) return reduction;
 
-  if (r.OneInputIs(the_hole_type_)) {
-    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
-  }
-  if (r.OneInputIs(Type::Undefined())) {
-    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
-  }
-  if (r.OneInputIs(Type::Null())) {
-    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
-  }
-  if (r.OneInputIs(Type::Boolean())) {
-    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
-  }
-  if (r.OneInputIs(Type::Object())) {
-    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
-  }
-  if (r.OneInputIs(Type::Receiver())) {
-    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
-  }
   if (r.BothInputsAre(Type::Unique())) {
     return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
   }
+  if (r.OneInputIs(pointer_comparable_type_)) {
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
+  }
+  if (r.IsInternalizedStringCompareOperation()) {
+    r.CheckInputsToInternalizedString();
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
+  }
   if (r.BothInputsAre(Type::String())) {
     return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
   }
@@ -946,6 +1031,15 @@
         simplified()->SpeculativeNumberEqual(hint), invert, Type::Boolean());
   } else if (r.BothInputsAre(Type::Number())) {
     return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
+  } else if (r.IsReceiverCompareOperation()) {
+    // For strict equality, it's enough to know that one input is a Receiver,
+    // as a strict equality comparison with a Receiver can only yield true if
+    // both sides refer to the same Receiver than.
+    r.CheckLeftInputToReceiver();
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(), invert);
+  } else if (r.IsStringCompareOperation()) {
+    r.CheckInputsToString();
+    return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
   }
   return NoChange();
 }
@@ -958,7 +1052,6 @@
     return Replace(input);
   } else if (input_type->Is(Type::OrderedNumber())) {
     // JSToBoolean(x:ordered-number) => BooleanNot(NumberEqual(x,#0))
-    RelaxEffectsAndControls(node);
     node->ReplaceInput(0, graph()->NewNode(simplified()->NumberEqual(), input,
                                            jsgraph()->ZeroConstant()));
     node->TrimInputCount(1);
@@ -966,10 +1059,33 @@
     return Changed(node);
   } else if (input_type->Is(Type::Number())) {
     // JSToBoolean(x:number) => NumberToBoolean(x)
-    RelaxEffectsAndControls(node);
     node->TrimInputCount(1);
     NodeProperties::ChangeOp(node, simplified()->NumberToBoolean());
     return Changed(node);
+  } else if (input_type->Is(Type::DetectableReceiverOrNull())) {
+    // JSToBoolean(x:detectable receiver \/ null)
+    //   => BooleanNot(ReferenceEqual(x,#null))
+    node->ReplaceInput(0, graph()->NewNode(simplified()->ReferenceEqual(),
+                                           input, jsgraph()->NullConstant()));
+    node->TrimInputCount(1);
+    NodeProperties::ChangeOp(node, simplified()->BooleanNot());
+    return Changed(node);
+  } else if (input_type->Is(Type::ReceiverOrNullOrUndefined())) {
+    // JSToBoolean(x:receiver \/ null \/ undefined)
+    //   => BooleanNot(ObjectIsUndetectable(x))
+    node->ReplaceInput(
+        0, graph()->NewNode(simplified()->ObjectIsUndetectable(), input));
+    node->TrimInputCount(1);
+    NodeProperties::ChangeOp(node, simplified()->BooleanNot());
+    return Changed(node);
+  } else if (input_type->Is(Type::String())) {
+    // JSToBoolean(x:string) => BooleanNot(ReferenceEqual(x,""))
+    node->ReplaceInput(0,
+                       graph()->NewNode(simplified()->ReferenceEqual(), input,
+                                        jsgraph()->EmptyStringConstant()));
+    node->TrimInputCount(1);
+    NodeProperties::ChangeOp(node, simplified()->BooleanNot());
+    return Changed(node);
   }
   return NoChange();
 }
@@ -1239,6 +1355,9 @@
   Node* value = NodeProperties::GetValueInput(node, 2);
   Type* key_type = NodeProperties::GetType(key);
   Type* value_type = NodeProperties::GetType(value);
+
+  if (!value_type->Is(Type::PlainPrimitive())) return NoChange();
+
   HeapObjectMatcher mbase(base);
   if (mbase.HasValue() && mbase.Value()->IsJSTypedArray()) {
     Handle<JSTypedArray> const array =
@@ -1257,7 +1376,6 @@
             Handle<FixedTypedArrayBase>::cast(handle(array->elements()));
         Node* buffer = jsgraph()->PointerConstant(elements->external_pointer());
         Node* length = jsgraph()->Constant(byte_length);
-        Node* context = NodeProperties::GetContextInput(node);
         Node* effect = NodeProperties::GetEffectInput(node);
         Node* control = NodeProperties::GetControlInput(node);
         // Convert to a number first.
@@ -1266,12 +1384,8 @@
           if (number_reduction.Changed()) {
             value = number_reduction.replacement();
           } else {
-            Node* frame_state_for_to_number =
-                NodeProperties::FindFrameStateBefore(node);
-            value = effect =
-                graph()->NewNode(javascript()->ToNumber(), value, context,
-                                 frame_state_for_to_number, effect, control);
-            control = graph()->NewNode(common()->IfSuccess(), value);
+            value =
+                graph()->NewNode(simplified()->PlainPrimitiveToNumber(), value);
           }
         }
         // Check if we can avoid the bounds check.
@@ -1316,11 +1430,30 @@
   Node* constructor = NodeProperties::GetValueInput(node, 0);
   Type* constructor_type = NodeProperties::GetType(constructor);
   Node* object = NodeProperties::GetValueInput(node, 1);
+  Type* object_type = NodeProperties::GetType(object);
   Node* context = NodeProperties::GetContextInput(node);
   Node* frame_state = NodeProperties::GetFrameStateInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
 
+  // Check if the {constructor} cannot be callable.
+  // See ES6 section 7.3.19 OrdinaryHasInstance ( C, O ) step 1.
+  if (!constructor_type->Maybe(Type::Callable())) {
+    Node* value = jsgraph()->FalseConstant();
+    ReplaceWithValue(node, value, effect, control);
+    return Replace(value);
+  }
+
+  // If the {constructor} cannot be a JSBoundFunction and then {object}
+  // cannot be a JSReceiver, then this can be constant-folded to false.
+  // See ES6 section 7.3.19 OrdinaryHasInstance ( C, O ) step 2 and 3.
+  if (!object_type->Maybe(Type::Receiver()) &&
+      !constructor_type->Maybe(Type::BoundFunction())) {
+    Node* value = jsgraph()->FalseConstant();
+    ReplaceWithValue(node, value, effect, control);
+    return Replace(value);
+  }
+
   // Check if the {constructor} is a (known) JSFunction.
   if (!constructor_type->IsHeapConstant() ||
       !constructor_type->AsHeapConstant()->Value()->IsJSFunction()) {
@@ -1473,16 +1606,17 @@
   DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
   ContextAccess const& access = ContextAccessOf(node->op());
   Node* effect = NodeProperties::GetEffectInput(node);
+  Node* context = NodeProperties::GetContextInput(node);
   Node* control = graph()->start();
   for (size_t i = 0; i < access.depth(); ++i) {
-    Node* previous = effect = graph()->NewNode(
+    context = effect = graph()->NewNode(
         simplified()->LoadField(
             AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
-        NodeProperties::GetValueInput(node, 0), effect, control);
-    node->ReplaceInput(0, previous);
+        context, effect, control);
   }
+  node->ReplaceInput(0, context);
   node->ReplaceInput(1, effect);
-  node->ReplaceInput(2, control);
+  node->AppendInput(jsgraph()->zone(), control);
   NodeProperties::ChangeOp(
       node,
       simplified()->LoadField(AccessBuilder::ForContextSlot(access.index())));
@@ -1493,15 +1627,17 @@
   DCHECK_EQ(IrOpcode::kJSStoreContext, node->opcode());
   ContextAccess const& access = ContextAccessOf(node->op());
   Node* effect = NodeProperties::GetEffectInput(node);
+  Node* context = NodeProperties::GetContextInput(node);
   Node* control = graph()->start();
+  Node* value = NodeProperties::GetValueInput(node, 0);
   for (size_t i = 0; i < access.depth(); ++i) {
-    Node* previous = effect = graph()->NewNode(
+    context = effect = graph()->NewNode(
         simplified()->LoadField(
             AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
-        NodeProperties::GetValueInput(node, 0), effect, control);
-    node->ReplaceInput(0, previous);
+        context, effect, control);
   }
-  node->RemoveInput(2);
+  node->ReplaceInput(0, context);
+  node->ReplaceInput(1, value);
   node->ReplaceInput(2, effect);
   NodeProperties::ChangeOp(
       node,
@@ -1591,7 +1727,6 @@
   Type* receiver_type = NodeProperties::GetType(receiver);
   Node* context = NodeProperties::GetContextInput(node);
   Type* context_type = NodeProperties::GetType(context);
-  Node* frame_state = NodeProperties::GetFrameStateInput(node);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
 
@@ -1614,10 +1749,10 @@
     } else {
       Node* native_context = effect = graph()->NewNode(
           javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-          context, context, effect);
+          context, effect);
       receiver = effect = graph()->NewNode(
           javascript()->LoadContext(0, Context::GLOBAL_PROXY_INDEX, true),
-          native_context, native_context, effect);
+          native_context, effect);
     }
     ReplaceWithValue(node, receiver, effect, control);
     return Replace(receiver);
@@ -1638,14 +1773,15 @@
     Node* efalse = effect;
     Node* rfalse;
     {
-      // Convert {receiver} using the ToObjectStub.
+      // Convert {receiver} using the ToObjectStub. The call does not require a
+      // frame-state in this case, because neither null nor undefined is passed.
       Callable callable = CodeFactory::ToObject(isolate());
       CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
           isolate(), graph()->zone(), callable.descriptor(), 0,
-          CallDescriptor::kNeedsFrameState, node->op()->properties());
+          CallDescriptor::kNoFlags, node->op()->properties());
       rfalse = efalse = graph()->NewNode(
           common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
-          receiver, context, frame_state, efalse);
+          receiver, context, efalse);
     }
 
     control = graph()->NewNode(common()->Merge(2), if_true, if_false);
@@ -1695,14 +1831,15 @@
   Node* econvert = effect;
   Node* rconvert;
   {
-    // Convert {receiver} using the ToObjectStub.
+    // Convert {receiver} using the ToObjectStub. The call does not require a
+    // frame-state in this case, because neither null nor undefined is passed.
     Callable callable = CodeFactory::ToObject(isolate());
     CallDescriptor const* const desc = Linkage::GetStubCallDescriptor(
         isolate(), graph()->zone(), callable.descriptor(), 0,
-        CallDescriptor::kNeedsFrameState, node->op()->properties());
+        CallDescriptor::kNoFlags, node->op()->properties());
     rconvert = econvert = graph()->NewNode(
         common()->Call(desc), jsgraph()->HeapConstant(callable.code()),
-        receiver, context, frame_state, econvert);
+        receiver, context, econvert);
   }
 
   // Replace {receiver} with global proxy of {context}.
@@ -1719,10 +1856,10 @@
     } else {
       Node* native_context = eglobal = graph()->NewNode(
           javascript()->LoadContext(0, Context::NATIVE_CONTEXT_INDEX, true),
-          context, context, eglobal);
+          context, eglobal);
       rglobal = eglobal = graph()->NewNode(
           javascript()->LoadContext(0, Context::GLOBAL_PROXY_INDEX, true),
-          native_context, native_context, eglobal);
+          native_context, eglobal);
     }
   }
 
@@ -1764,7 +1901,7 @@
   // The logic contained here is mirrored in Builtins::Generate_Adaptor.
   // Keep these in sync.
 
-  const bool is_construct = (node->opcode() == IrOpcode::kJSCallConstruct);
+  const bool is_construct = (node->opcode() == IrOpcode::kJSConstruct);
 
   DCHECK(Builtins::HasCppImplementation(builtin_index));
   DCHECK_EQ(0, flags & CallDescriptor::kSupportsTailCalls);
@@ -1824,9 +1961,9 @@
 
 }  // namespace
 
-Reduction JSTypedLowering::ReduceJSCallConstruct(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCallConstruct, node->opcode());
-  CallConstructParameters const& p = CallConstructParametersOf(node->op());
+Reduction JSTypedLowering::ReduceJSConstruct(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSConstruct, node->opcode());
+  ConstructParameters const& p = ConstructParametersOf(node->op());
   DCHECK_LE(2u, p.arity());
   int const arity = static_cast<int>(p.arity() - 2);
   Node* target = NodeProperties::GetValueInput(node, 0);
@@ -1899,10 +2036,38 @@
   return NoChange();
 }
 
+Reduction JSTypedLowering::ReduceJSCallForwardVarargs(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCallForwardVarargs, node->opcode());
+  CallForwardVarargsParameters p = CallForwardVarargsParametersOf(node->op());
+  Node* target = NodeProperties::GetValueInput(node, 0);
+  Type* target_type = NodeProperties::GetType(target);
 
-Reduction JSTypedLowering::ReduceJSCallFunction(Node* node) {
-  DCHECK_EQ(IrOpcode::kJSCallFunction, node->opcode());
-  CallFunctionParameters const& p = CallFunctionParametersOf(node->op());
+  // Check if {target} is a JSFunction.
+  if (target_type->Is(Type::Function())) {
+    // Compute flags for the call.
+    CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
+    if (p.tail_call_mode() == TailCallMode::kAllow) {
+      flags |= CallDescriptor::kSupportsTailCalls;
+    }
+
+    // Patch {node} to an indirect call via CallFunctionForwardVarargs.
+    Callable callable = CodeFactory::CallFunctionForwardVarargs(isolate());
+    node->InsertInput(graph()->zone(), 0,
+                      jsgraph()->HeapConstant(callable.code()));
+    node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(p.start_index()));
+    NodeProperties::ChangeOp(
+        node,
+        common()->Call(Linkage::GetStubCallDescriptor(
+            isolate(), graph()->zone(), callable.descriptor(), 1, flags)));
+    return Changed(node);
+  }
+
+  return NoChange();
+}
+
+Reduction JSTypedLowering::ReduceJSCall(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSCall, node->opcode());
+  CallParameters const& p = CallParametersOf(node->op());
   int const arity = static_cast<int>(p.arity() - 2);
   ConvertReceiverMode convert_mode = p.convert_mode();
   Node* target = NodeProperties::GetValueInput(node, 0);
@@ -1911,7 +2076,6 @@
   Type* receiver_type = NodeProperties::GetType(receiver);
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
-  Node* frame_state = NodeProperties::FindFrameStateBefore(node);
 
   // Try to infer receiver {convert_mode} from {receiver} type.
   if (receiver_type->Is(Type::NullOrUndefined())) {
@@ -1944,7 +2108,7 @@
         !receiver_type->Is(Type::Receiver())) {
       receiver = effect =
           graph()->NewNode(javascript()->ConvertReceiver(convert_mode),
-                           receiver, context, frame_state, effect, control);
+                           receiver, context, effect, control);
       NodeProperties::ReplaceValueInput(node, receiver, 1);
     }
 
@@ -2011,8 +2175,9 @@
   // Maybe we did at least learn something about the {receiver}.
   if (p.convert_mode() != convert_mode) {
     NodeProperties::ChangeOp(
-        node, javascript()->CallFunction(p.arity(), p.frequency(), p.feedback(),
-                                         convert_mode, p.tail_call_mode()));
+        node,
+        javascript()->Call(p.arity(), p.frequency(), p.feedback(), convert_mode,
+                           p.tail_call_mode()));
     return Changed(node);
   }
 
@@ -2031,6 +2196,18 @@
   Node* effect = NodeProperties::GetEffectInput(node);
   Node* control = NodeProperties::GetControlInput(node);
 
+  // We don't support lowering JSForInNext inside try blocks.
+  if (NodeProperties::IsExceptionalCall(node)) return NoChange();
+
+  // We know that the {index} is in Unsigned32 range here, otherwise executing
+  // the JSForInNext wouldn't be valid. Unfortunately due to OSR and generators
+  // this is not always reflected in the types, hence we might need to rename
+  // the {index} here.
+  if (!NodeProperties::GetType(index)->Is(Type::Unsigned32())) {
+    index = graph()->NewNode(common()->TypeGuard(Type::Unsigned32()), index,
+                             control);
+  }
+
   // Load the next {key} from the {cache_array}.
   Node* key = effect = graph()->NewNode(
       simplified()->LoadElement(AccessBuilder::ForFixedArrayElement()),
@@ -2085,6 +2262,28 @@
   return Changed(node);
 }
 
+Reduction JSTypedLowering::ReduceJSLoadMessage(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSLoadMessage, node->opcode());
+  ExternalReference const ref =
+      ExternalReference::address_of_pending_message_obj(isolate());
+  node->ReplaceInput(0, jsgraph()->ExternalConstant(ref));
+  NodeProperties::ChangeOp(
+      node, simplified()->LoadField(AccessBuilder::ForExternalTaggedValue()));
+  return Changed(node);
+}
+
+Reduction JSTypedLowering::ReduceJSStoreMessage(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSStoreMessage, node->opcode());
+  ExternalReference const ref =
+      ExternalReference::address_of_pending_message_obj(isolate());
+  Node* value = NodeProperties::GetValueInput(node, 0);
+  node->ReplaceInput(0, jsgraph()->ExternalConstant(ref));
+  node->ReplaceInput(1, value);
+  NodeProperties::ChangeOp(
+      node, simplified()->StoreField(AccessBuilder::ForExternalTaggedValue()));
+  return Changed(node);
+}
+
 Reduction JSTypedLowering::ReduceJSGeneratorStore(Node* node) {
   DCHECK_EQ(IrOpcode::kJSGeneratorStore, node->opcode());
   Node* generator = NodeProperties::GetValueInput(node, 0);
@@ -2095,7 +2294,7 @@
   Node* control = NodeProperties::GetControlInput(node);
   int register_count = OpParameter<int>(node);
 
-  FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectOperandStack();
+  FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectRegisterFile();
   FieldAccess context_field = AccessBuilder::ForJSGeneratorObjectContext();
   FieldAccess continuation_field =
       AccessBuilder::ForJSGeneratorObjectContinuation();
@@ -2149,7 +2348,7 @@
   Node* control = NodeProperties::GetControlInput(node);
   int index = OpParameter<int>(node);
 
-  FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectOperandStack();
+  FieldAccess array_field = AccessBuilder::ForJSGeneratorObjectRegisterFile();
   FieldAccess element_field = AccessBuilder::ForFixedArraySlot(index);
 
   Node* array = effect = graph()->NewNode(simplified()->LoadField(array_field),
@@ -2229,18 +2428,33 @@
       return ReduceJSStoreModule(node);
     case IrOpcode::kJSConvertReceiver:
       return ReduceJSConvertReceiver(node);
-    case IrOpcode::kJSCallConstruct:
-      return ReduceJSCallConstruct(node);
-    case IrOpcode::kJSCallFunction:
-      return ReduceJSCallFunction(node);
+    case IrOpcode::kJSConstruct:
+      return ReduceJSConstruct(node);
+    case IrOpcode::kJSCallForwardVarargs:
+      return ReduceJSCallForwardVarargs(node);
+    case IrOpcode::kJSCall:
+      return ReduceJSCall(node);
     case IrOpcode::kJSForInNext:
       return ReduceJSForInNext(node);
+    case IrOpcode::kJSLoadMessage:
+      return ReduceJSLoadMessage(node);
+    case IrOpcode::kJSStoreMessage:
+      return ReduceJSStoreMessage(node);
     case IrOpcode::kJSGeneratorStore:
       return ReduceJSGeneratorStore(node);
     case IrOpcode::kJSGeneratorRestoreContinuation:
       return ReduceJSGeneratorRestoreContinuation(node);
     case IrOpcode::kJSGeneratorRestoreRegister:
       return ReduceJSGeneratorRestoreRegister(node);
+    // TODO(mstarzinger): Simplified operations hiding in JS-level reducer not
+    // fooling anyone. Consider moving this into a separate reducer.
+    case IrOpcode::kSpeculativeNumberAdd:
+      return ReduceSpeculativeNumberAdd(node);
+    case IrOpcode::kSpeculativeNumberSubtract:
+    case IrOpcode::kSpeculativeNumberMultiply:
+    case IrOpcode::kSpeculativeNumberDivide:
+    case IrOpcode::kSpeculativeNumberModulus:
+      return ReduceSpeculativeNumberBinop(node);
     default:
       break;
   }
diff --git a/src/compiler/js-typed-lowering.h b/src/compiler/js-typed-lowering.h
index 3e71022..35195ec 100644
--- a/src/compiler/js-typed-lowering.h
+++ b/src/compiler/js-typed-lowering.h
@@ -70,9 +70,12 @@
   Reduction ReduceJSToString(Node* node);
   Reduction ReduceJSToObject(Node* node);
   Reduction ReduceJSConvertReceiver(Node* node);
-  Reduction ReduceJSCallConstruct(Node* node);
-  Reduction ReduceJSCallFunction(Node* node);
+  Reduction ReduceJSConstruct(Node* node);
+  Reduction ReduceJSCallForwardVarargs(Node* node);
+  Reduction ReduceJSCall(Node* node);
   Reduction ReduceJSForInNext(Node* node);
+  Reduction ReduceJSLoadMessage(Node* node);
+  Reduction ReduceJSStoreMessage(Node* node);
   Reduction ReduceJSGeneratorStore(Node* node);
   Reduction ReduceJSGeneratorRestoreContinuation(Node* node);
   Reduction ReduceJSGeneratorRestoreRegister(Node* node);
@@ -81,6 +84,8 @@
   Reduction ReduceInt32Binop(Node* node);
   Reduction ReduceUI32Shift(Node* node, Signedness signedness);
   Reduction ReduceCreateConsString(Node* node);
+  Reduction ReduceSpeculativeNumberAdd(Node* node);
+  Reduction ReduceSpeculativeNumberBinop(Node* node);
 
   Factory* factory() const;
   Graph* graph() const;
@@ -96,7 +101,7 @@
   Flags flags_;
   JSGraph* jsgraph_;
   Type* shifted_int32_ranges_[4];
-  Type* const the_hole_type_;
+  Type* pointer_comparable_type_;
   TypeCache const& type_cache_;
 };
 
diff --git a/src/compiler/jump-threading.cc b/src/compiler/jump-threading.cc
index d7d4f91..86d25de 100644
--- a/src/compiler/jump-threading.cc
+++ b/src/compiler/jump-threading.cc
@@ -4,6 +4,7 @@
 
 #include "src/compiler/jump-threading.h"
 #include "src/compiler/code-generator-impl.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/compiler/linkage.cc b/src/compiler/linkage.cc
index 971ea72..06f967a 100644
--- a/src/compiler/linkage.cc
+++ b/src/compiler/linkage.cc
@@ -5,7 +5,6 @@
 #include "src/compiler/linkage.h"
 
 #include "src/ast/scopes.h"
-#include "src/builtins/builtins-utils.h"
 #include "src/code-stubs.h"
 #include "src/compilation-info.h"
 #include "src/compiler/common-operator.h"
@@ -13,6 +12,7 @@
 #include "src/compiler/node.h"
 #include "src/compiler/osr.h"
 #include "src/compiler/pipeline.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -53,8 +53,7 @@
 MachineSignature* CallDescriptor::GetMachineSignature(Zone* zone) const {
   size_t param_count = ParameterCount();
   size_t return_count = ReturnCount();
-  MachineType* types = reinterpret_cast<MachineType*>(
-      zone->New(sizeof(MachineType*) * (param_count + return_count)));
+  MachineType* types = zone->NewArray<MachineType>(param_count + return_count);
   int current = 0;
   for (size_t i = 0; i < return_count; ++i) {
     types[current++] = GetReturnType(i);
@@ -143,16 +142,15 @@
 bool Linkage::NeedsFrameStateInput(Runtime::FunctionId function) {
   switch (function) {
     // Most runtime functions need a FrameState. A few chosen ones that we know
-    // not to call into arbitrary JavaScript, not to throw, and not to
-    // deoptimize
-    // are whitelisted here and can be called without a FrameState.
+    // not to call into arbitrary JavaScript, not to throw, and not to lazily
+    // deoptimize are whitelisted here and can be called without a FrameState.
     case Runtime::kAbort:
     case Runtime::kAllocateInTargetSpace:
+    case Runtime::kConvertReceiver:
     case Runtime::kCreateIterResultObject:
     case Runtime::kDefineGetterPropertyUnchecked:  // TODO(jarin): Is it safe?
     case Runtime::kDefineSetterPropertyUnchecked:  // TODO(jarin): Is it safe?
     case Runtime::kGeneratorGetContinuation:
-    case Runtime::kGetSuperConstructor:
     case Runtime::kIsFunction:
     case Runtime::kNewClosure:
     case Runtime::kNewClosure_Tenured:
@@ -173,13 +171,13 @@
       return false;
 
     // Some inline intrinsics are also safe to call without a FrameState.
+    case Runtime::kInlineClassOf:
     case Runtime::kInlineCreateIterResultObject:
     case Runtime::kInlineFixedArrayGet:
     case Runtime::kInlineFixedArraySet:
     case Runtime::kInlineGeneratorClose:
     case Runtime::kInlineGeneratorGetInputOrDebugPos:
     case Runtime::kInlineGeneratorGetResumeMode:
-    case Runtime::kInlineGetSuperConstructor:
     case Runtime::kInlineIsArray:
     case Runtime::kInlineIsJSReceiver:
     case Runtime::kInlineIsRegExp:
diff --git a/src/compiler/load-elimination.cc b/src/compiler/load-elimination.cc
index e50ebe1..10140e1 100644
--- a/src/compiler/load-elimination.cc
+++ b/src/compiler/load-elimination.cc
@@ -8,6 +8,8 @@
 #include "src/compiler/js-graph.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/simplified-operator.h"
+#include "src/factory.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -320,6 +322,42 @@
   }
 }
 
+bool LoadElimination::AbstractMaps::Lookup(
+    Node* object, ZoneHandleSet<Map>* object_maps) const {
+  for (auto pair : info_for_node_) {
+    if (MustAlias(object, pair.first)) {
+      *object_maps = pair.second;
+      return true;
+    }
+  }
+  return false;
+}
+
+LoadElimination::AbstractMaps const* LoadElimination::AbstractMaps::Kill(
+    Node* object, Zone* zone) const {
+  for (auto pair : this->info_for_node_) {
+    if (MayAlias(object, pair.first)) {
+      AbstractMaps* that = new (zone) AbstractMaps(zone);
+      for (auto pair : this->info_for_node_) {
+        if (!MayAlias(object, pair.first)) that->info_for_node_.insert(pair);
+      }
+      return that;
+    }
+  }
+  return this;
+}
+
+void LoadElimination::AbstractMaps::Print() const {
+  for (auto pair : info_for_node_) {
+    PrintF("    #%d:%s\n", pair.first->id(), pair.first->op()->mnemonic());
+    OFStream os(stdout);
+    ZoneHandleSet<Map> const& maps = pair.second;
+    for (size_t i = 0; i < maps.size(); ++i) {
+      os << "     - " << Brief(*maps[i]) << "\n";
+    }
+  }
+}
+
 bool LoadElimination::AbstractState::Equals(AbstractState const* that) const {
   if (this->checks_) {
     if (!that->checks_ || !that->checks_->Equals(this->checks_)) {
@@ -344,6 +382,13 @@
       return false;
     }
   }
+  if (this->maps_) {
+    if (!that->maps_ || !that->maps_->Equals(this->maps_)) {
+      return false;
+    }
+  } else if (that->maps_) {
+    return false;
+  }
   return true;
 }
 
@@ -372,6 +417,11 @@
       }
     }
   }
+
+  // Merge the information we have about the maps.
+  if (this->maps_) {
+    this->maps_ = that->maps_ ? that->maps_->Merge(this->maps_, zone) : nullptr;
+  }
 }
 
 Node* LoadElimination::AbstractState::LookupCheck(Node* node) const {
@@ -389,6 +439,35 @@
   return that;
 }
 
+bool LoadElimination::AbstractState::LookupMaps(
+    Node* object, ZoneHandleSet<Map>* object_map) const {
+  return this->maps_ && this->maps_->Lookup(object, object_map);
+}
+
+LoadElimination::AbstractState const* LoadElimination::AbstractState::AddMaps(
+    Node* object, ZoneHandleSet<Map> maps, Zone* zone) const {
+  AbstractState* that = new (zone) AbstractState(*this);
+  if (that->maps_) {
+    that->maps_ = that->maps_->Extend(object, maps, zone);
+  } else {
+    that->maps_ = new (zone) AbstractMaps(object, maps, zone);
+  }
+  return that;
+}
+
+LoadElimination::AbstractState const* LoadElimination::AbstractState::KillMaps(
+    Node* object, Zone* zone) const {
+  if (this->maps_) {
+    AbstractMaps const* that_maps = this->maps_->Kill(object, zone);
+    if (this->maps_ != that_maps) {
+      AbstractState* that = new (zone) AbstractState(*this);
+      that->maps_ = that_maps;
+      return that;
+    }
+  }
+  return this;
+}
+
 Node* LoadElimination::AbstractState::LookupElement(Node* object,
                                                     Node* index) const {
   if (this->elements_) {
@@ -456,7 +535,7 @@
       AbstractField const* that_field = this_field->Kill(object, zone);
       if (that_field != this_field) {
         AbstractState* that = new (zone) AbstractState(*this);
-        that->fields_[i] = this_field;
+        that->fields_[i] = that_field;
         while (++i < arraysize(fields_)) {
           if (this->fields_[i] != nullptr) {
             that->fields_[i] = this->fields_[i]->Kill(object, zone);
@@ -481,6 +560,10 @@
     PrintF("   checks:\n");
     checks_->Print();
   }
+  if (maps_) {
+    PrintF("   maps:\n");
+    maps_->Print();
+  }
   if (elements_) {
     PrintF("   elements:\n");
     elements_->Print();
@@ -520,23 +603,18 @@
 }
 
 Reduction LoadElimination::ReduceCheckMaps(Node* node) {
+  ZoneHandleSet<Map> const maps = CheckMapsParametersOf(node->op()).maps();
   Node* const object = NodeProperties::GetValueInput(node, 0);
   Node* const effect = NodeProperties::GetEffectInput(node);
   AbstractState const* state = node_states_.Get(effect);
   if (state == nullptr) return NoChange();
-  int const map_input_count = node->op()->ValueInputCount() - 1;
-  if (Node* const object_map =
-          state->LookupField(object, FieldIndexOf(HeapObject::kMapOffset))) {
-    for (int i = 0; i < map_input_count; ++i) {
-      Node* map = NodeProperties::GetValueInput(node, 1 + i);
-      if (map == object_map) return Replace(effect);
-    }
+  ZoneHandleSet<Map> object_maps;
+  if (state->LookupMaps(object, &object_maps)) {
+    if (maps.contains(object_maps)) return Replace(effect);
+    state = state->KillMaps(object, zone());
+    // TODO(turbofan): Compute the intersection.
   }
-  if (map_input_count == 1) {
-    Node* const map0 = NodeProperties::GetValueInput(node, 1);
-    state = state->AddField(object, FieldIndexOf(HeapObject::kMapOffset), map0,
-                            zone());
-  }
+  state = state->AddMaps(object, maps, zone());
   return UpdateState(node, state);
 }
 
@@ -546,18 +624,16 @@
   Node* const effect = NodeProperties::GetEffectInput(node);
   AbstractState const* state = node_states_.Get(effect);
   if (state == nullptr) return NoChange();
-  Node* fixed_array_map = jsgraph()->FixedArrayMapConstant();
-  if (Node* const elements_map =
-          state->LookupField(elements, FieldIndexOf(HeapObject::kMapOffset))) {
     // Check if the {elements} already have the fixed array map.
-    if (elements_map == fixed_array_map) {
-      ReplaceWithValue(node, elements, effect);
-      return Replace(elements);
-    }
+  ZoneHandleSet<Map> elements_maps;
+  ZoneHandleSet<Map> fixed_array_maps(factory()->fixed_array_map());
+  if (state->LookupMaps(elements, &elements_maps) &&
+      fixed_array_maps.contains(elements_maps)) {
+    ReplaceWithValue(node, elements, effect);
+    return Replace(elements);
   }
   // We know that the resulting elements have the fixed array map.
-  state = state->AddField(node, FieldIndexOf(HeapObject::kMapOffset),
-                          fixed_array_map, zone());
+  state = state->AddMaps(node, fixed_array_maps, zone());
   // Kill the previous elements on {object}.
   state =
       state->KillField(object, FieldIndexOf(JSObject::kElementsOffset), zone());
@@ -575,14 +651,12 @@
   if (state == nullptr) return NoChange();
   if (flags & GrowFastElementsFlag::kDoubleElements) {
     // We know that the resulting elements have the fixed double array map.
-    Node* fixed_double_array_map = jsgraph()->FixedDoubleArrayMapConstant();
-    state = state->AddField(node, FieldIndexOf(HeapObject::kMapOffset),
-                            fixed_double_array_map, zone());
+    state = state->AddMaps(
+        node, ZoneHandleSet<Map>(factory()->fixed_double_array_map()), zone());
   } else {
     // We know that the resulting elements have the fixed array map.
-    Node* fixed_array_map = jsgraph()->FixedArrayMapConstant();
-    state = state->AddField(node, FieldIndexOf(HeapObject::kMapOffset),
-                            fixed_array_map, zone());
+    state = state->AddMaps(
+        node, ZoneHandleSet<Map>(factory()->fixed_array_map()), zone());
   }
   if (flags & GrowFastElementsFlag::kArrayObject) {
     // Kill the previous Array::length on {object}.
@@ -599,31 +673,30 @@
 }
 
 Reduction LoadElimination::ReduceTransitionElementsKind(Node* node) {
+  ElementsTransition transition = ElementsTransitionOf(node->op());
   Node* const object = NodeProperties::GetValueInput(node, 0);
-  Node* const source_map = NodeProperties::GetValueInput(node, 1);
-  Node* const target_map = NodeProperties::GetValueInput(node, 2);
+  Handle<Map> source_map(transition.source());
+  Handle<Map> target_map(transition.target());
   Node* const effect = NodeProperties::GetEffectInput(node);
   AbstractState const* state = node_states_.Get(effect);
   if (state == nullptr) return NoChange();
-  if (Node* const object_map =
-          state->LookupField(object, FieldIndexOf(HeapObject::kMapOffset))) {
-    if (target_map == object_map) {
+  ZoneHandleSet<Map> object_maps;
+  if (state->LookupMaps(object, &object_maps)) {
+    if (ZoneHandleSet<Map>(target_map).contains(object_maps)) {
       // The {object} already has the {target_map}, so this TransitionElements
       // {node} is fully redundant (independent of what {source_map} is).
       return Replace(effect);
     }
-    state =
-        state->KillField(object, FieldIndexOf(HeapObject::kMapOffset), zone());
-    if (source_map == object_map) {
-      state = state->AddField(object, FieldIndexOf(HeapObject::kMapOffset),
-                              target_map, zone());
+    if (object_maps.contains(ZoneHandleSet<Map>(source_map))) {
+      object_maps.remove(source_map, zone());
+      object_maps.insert(target_map, zone());
+      state = state->KillMaps(object, zone());
+      state = state->AddMaps(object, object_maps, zone());
     }
   } else {
-    state =
-        state->KillField(object, FieldIndexOf(HeapObject::kMapOffset), zone());
+    state = state->KillMaps(object, zone());
   }
-  ElementsTransition transition = ElementsTransitionOf(node->op());
-  switch (transition) {
+  switch (transition.mode()) {
     case ElementsTransition::kFastTransition:
       break;
     case ElementsTransition::kSlowTransition:
@@ -642,23 +715,40 @@
   Node* const control = NodeProperties::GetControlInput(node);
   AbstractState const* state = node_states_.Get(effect);
   if (state == nullptr) return NoChange();
-  int field_index = FieldIndexOf(access);
-  if (field_index >= 0) {
-    if (Node* replacement = state->LookupField(object, field_index)) {
-      // Make sure we don't resurrect dead {replacement} nodes.
-      if (!replacement->IsDead()) {
-        // We might need to guard the {replacement} if the type of the
-        // {node} is more precise than the type of the {replacement}.
-        Type* const node_type = NodeProperties::GetType(node);
-        if (!NodeProperties::GetType(replacement)->Is(node_type)) {
-          replacement = graph()->NewNode(common()->TypeGuard(node_type),
-                                         replacement, control);
-        }
-        ReplaceWithValue(node, replacement, effect);
-        return Replace(replacement);
-      }
+  if (access.offset == HeapObject::kMapOffset &&
+      access.base_is_tagged == kTaggedBase) {
+    DCHECK(IsAnyTagged(access.machine_type.representation()));
+    ZoneHandleSet<Map> object_maps;
+    if (state->LookupMaps(object, &object_maps) && object_maps.size() == 1) {
+      Node* value = jsgraph()->HeapConstant(object_maps[0]);
+      NodeProperties::SetType(value, Type::OtherInternal());
+      ReplaceWithValue(node, value, effect);
+      return Replace(value);
     }
-    state = state->AddField(object, field_index, node, zone());
+  } else {
+    int field_index = FieldIndexOf(access);
+    if (field_index >= 0) {
+      if (Node* replacement = state->LookupField(object, field_index)) {
+        // Make sure we don't resurrect dead {replacement} nodes.
+        if (!replacement->IsDead()) {
+          // We might need to guard the {replacement} if the type of the
+          // {node} is more precise than the type of the {replacement}.
+          Type* const node_type = NodeProperties::GetType(node);
+          if (!NodeProperties::GetType(replacement)->Is(node_type)) {
+            replacement = graph()->NewNode(common()->TypeGuard(node_type),
+                                           replacement, control);
+            NodeProperties::SetType(replacement, node_type);
+          }
+          ReplaceWithValue(node, replacement, effect);
+          return Replace(replacement);
+        }
+      }
+      state = state->AddField(object, field_index, node, zone());
+    }
+  }
+  Handle<Map> field_map;
+  if (access.map.ToHandle(&field_map)) {
+    state = state->AddMaps(node, ZoneHandleSet<Map>(field_map), zone());
   }
   return UpdateState(node, state);
 }
@@ -670,19 +760,33 @@
   Node* const effect = NodeProperties::GetEffectInput(node);
   AbstractState const* state = node_states_.Get(effect);
   if (state == nullptr) return NoChange();
-  int field_index = FieldIndexOf(access);
-  if (field_index >= 0) {
-    Node* const old_value = state->LookupField(object, field_index);
-    if (old_value == new_value) {
-      // This store is fully redundant.
-      return Replace(effect);
+  if (access.offset == HeapObject::kMapOffset &&
+      access.base_is_tagged == kTaggedBase) {
+    DCHECK(IsAnyTagged(access.machine_type.representation()));
+    // Kill all potential knowledge about the {object}s map.
+    state = state->KillMaps(object, zone());
+    Type* const new_value_type = NodeProperties::GetType(new_value);
+    if (new_value_type->IsHeapConstant()) {
+      // Record the new {object} map information.
+      ZoneHandleSet<Map> object_maps(
+          Handle<Map>::cast(new_value_type->AsHeapConstant()->Value()));
+      state = state->AddMaps(object, object_maps, zone());
     }
-    // Kill all potentially aliasing fields and record the new value.
-    state = state->KillField(object, field_index, zone());
-    state = state->AddField(object, field_index, new_value, zone());
   } else {
-    // Unsupported StoreField operator.
-    state = state->KillFields(object, zone());
+    int field_index = FieldIndexOf(access);
+    if (field_index >= 0) {
+      Node* const old_value = state->LookupField(object, field_index);
+      if (old_value == new_value) {
+        // This store is fully redundant.
+        return Replace(effect);
+      }
+      // Kill all potentially aliasing fields and record the new value.
+      state = state->KillField(object, field_index, zone());
+      state = state->AddField(object, field_index, new_value, zone());
+    } else {
+      // Unsupported StoreField operator.
+      state = state->KillFields(object, zone());
+    }
   }
   return UpdateState(node, state);
 }
@@ -703,6 +807,7 @@
       if (!NodeProperties::GetType(replacement)->Is(node_type)) {
         replacement = graph()->NewNode(common()->TypeGuard(node_type),
                                        replacement, control);
+        NodeProperties::SetType(replacement, node_type);
       }
       ReplaceWithValue(node, replacement, effect);
       return Replace(replacement);
@@ -730,6 +835,9 @@
   // Only record the new value if the store doesn't have an implicit truncation.
   switch (access.machine_type.representation()) {
     case MachineRepresentation::kNone:
+    case MachineRepresentation::kSimd1x4:
+    case MachineRepresentation::kSimd1x8:
+    case MachineRepresentation::kSimd1x16:
     case MachineRepresentation::kBit:
       UNREACHABLE();
       break;
@@ -865,21 +973,31 @@
             break;
           }
           case IrOpcode::kTransitionElementsKind: {
+            ElementsTransition transition = ElementsTransitionOf(current->op());
             Node* const object = NodeProperties::GetValueInput(current, 0);
-            state = state->KillField(
-                object, FieldIndexOf(HeapObject::kMapOffset), zone());
-            state = state->KillField(
-                object, FieldIndexOf(JSObject::kElementsOffset), zone());
+            ZoneHandleSet<Map> object_maps;
+            if (!state->LookupMaps(object, &object_maps) ||
+                !ZoneHandleSet<Map>(transition.target())
+                     .contains(object_maps)) {
+              state = state->KillMaps(object, zone());
+              state = state->KillField(
+                  object, FieldIndexOf(JSObject::kElementsOffset), zone());
+            }
             break;
           }
           case IrOpcode::kStoreField: {
             FieldAccess const& access = FieldAccessOf(current->op());
             Node* const object = NodeProperties::GetValueInput(current, 0);
-            int field_index = FieldIndexOf(access);
-            if (field_index < 0) {
-              state = state->KillFields(object, zone());
+            if (access.offset == HeapObject::kMapOffset) {
+              // Invalidate what we know about the {object}s map.
+              state = state->KillMaps(object, zone());
             } else {
-              state = state->KillField(object, field_index, zone());
+              int field_index = FieldIndexOf(access);
+              if (field_index < 0) {
+                state = state->KillFields(object, zone());
+              } else {
+                state = state->KillField(object, field_index, zone());
+              }
             }
             break;
           }
@@ -911,7 +1029,8 @@
   DCHECK_EQ(0, offset % kPointerSize);
   int field_index = offset / kPointerSize;
   if (field_index >= static_cast<int>(kMaxTrackedFields)) return -1;
-  return field_index;
+  DCHECK_LT(0, field_index);
+  return field_index - 1;
 }
 
 // static
@@ -921,6 +1040,9 @@
     case MachineRepresentation::kNone:
     case MachineRepresentation::kBit:
     case MachineRepresentation::kSimd128:
+    case MachineRepresentation::kSimd1x4:
+    case MachineRepresentation::kSimd1x8:
+    case MachineRepresentation::kSimd1x16:
       UNREACHABLE();
       break;
     case MachineRepresentation::kWord32:
@@ -957,6 +1079,8 @@
 
 Graph* LoadElimination::graph() const { return jsgraph()->graph(); }
 
+Factory* LoadElimination::factory() const { return jsgraph()->factory(); }
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/load-elimination.h b/src/compiler/load-elimination.h
index 50979e4..cd486a2 100644
--- a/src/compiler/load-elimination.h
+++ b/src/compiler/load-elimination.h
@@ -8,9 +8,14 @@
 #include "src/base/compiler-specific.h"
 #include "src/compiler/graph-reducer.h"
 #include "src/globals.h"
+#include "src/zone/zone-handle-set.h"
 
 namespace v8 {
 namespace internal {
+
+// Forward declarations.
+class Factory;
+
 namespace compiler {
 
 // Foward declarations.
@@ -152,6 +157,49 @@
 
   static size_t const kMaxTrackedFields = 32;
 
+  // Abstract state to approximate the current map of an object along the
+  // effect paths through the graph.
+  class AbstractMaps final : public ZoneObject {
+   public:
+    explicit AbstractMaps(Zone* zone) : info_for_node_(zone) {}
+    AbstractMaps(Node* object, ZoneHandleSet<Map> maps, Zone* zone)
+        : info_for_node_(zone) {
+      info_for_node_.insert(std::make_pair(object, maps));
+    }
+
+    AbstractMaps const* Extend(Node* object, ZoneHandleSet<Map> maps,
+                               Zone* zone) const {
+      AbstractMaps* that = new (zone) AbstractMaps(zone);
+      that->info_for_node_ = this->info_for_node_;
+      that->info_for_node_.insert(std::make_pair(object, maps));
+      return that;
+    }
+    bool Lookup(Node* object, ZoneHandleSet<Map>* object_maps) const;
+    AbstractMaps const* Kill(Node* object, Zone* zone) const;
+    bool Equals(AbstractMaps const* that) const {
+      return this == that || this->info_for_node_ == that->info_for_node_;
+    }
+    AbstractMaps const* Merge(AbstractMaps const* that, Zone* zone) const {
+      if (this->Equals(that)) return this;
+      AbstractMaps* copy = new (zone) AbstractMaps(zone);
+      for (auto this_it : this->info_for_node_) {
+        Node* this_object = this_it.first;
+        ZoneHandleSet<Map> this_maps = this_it.second;
+        auto that_it = that->info_for_node_.find(this_object);
+        if (that_it != that->info_for_node_.end() &&
+            that_it->second == this_maps) {
+          copy->info_for_node_.insert(this_it);
+        }
+      }
+      return copy;
+    }
+
+    void Print() const;
+
+   private:
+    ZoneMap<Node*, ZoneHandleSet<Map>> info_for_node_;
+  };
+
   class AbstractState final : public ZoneObject {
    public:
     AbstractState() {
@@ -163,6 +211,11 @@
     bool Equals(AbstractState const* that) const;
     void Merge(AbstractState const* that, Zone* zone);
 
+    AbstractState const* AddMaps(Node* object, ZoneHandleSet<Map> maps,
+                                 Zone* zone) const;
+    AbstractState const* KillMaps(Node* object, Zone* zone) const;
+    bool LookupMaps(Node* object, ZoneHandleSet<Map>* object_maps) const;
+
     AbstractState const* AddField(Node* object, size_t index, Node* value,
                                   Zone* zone) const;
     AbstractState const* KillField(Node* object, size_t index,
@@ -185,6 +238,7 @@
     AbstractChecks const* checks_ = nullptr;
     AbstractElements const* elements_ = nullptr;
     AbstractField const* fields_[kMaxTrackedFields];
+    AbstractMaps const* maps_ = nullptr;
   };
 
   class AbstractStateForEffectNodes final : public ZoneObject {
@@ -223,6 +277,7 @@
 
   CommonOperatorBuilder* common() const;
   AbstractState const* empty_state() const { return &empty_state_; }
+  Factory* factory() const;
   Graph* graph() const;
   JSGraph* jsgraph() const { return jsgraph_; }
   Zone* zone() const { return node_states_.zone(); }
diff --git a/src/compiler/loop-variable-optimizer.cc b/src/compiler/loop-variable-optimizer.cc
index 55cce26..9bade27 100644
--- a/src/compiler/loop-variable-optimizer.cc
+++ b/src/compiler/loop-variable-optimizer.cc
@@ -303,9 +303,11 @@
   Node* initial = phi->InputAt(0);
   Node* arith = phi->InputAt(1);
   InductionVariable::ArithmeticType arithmeticType;
-  if (arith->opcode() == IrOpcode::kJSAdd) {
+  if (arith->opcode() == IrOpcode::kJSAdd ||
+      arith->opcode() == IrOpcode::kSpeculativeNumberAdd) {
     arithmeticType = InductionVariable::ArithmeticType::kAddition;
-  } else if (arith->opcode() == IrOpcode::kJSSubtract) {
+  } else if (arith->opcode() == IrOpcode::kJSSubtract ||
+             arith->opcode() == IrOpcode::kSpeculativeNumberSubtract) {
     arithmeticType = InductionVariable::ArithmeticType::kSubtraction;
   } else {
     return nullptr;
diff --git a/src/compiler/machine-graph-verifier.cc b/src/compiler/machine-graph-verifier.cc
index a8f7a25..2d5fce5 100644
--- a/src/compiler/machine-graph-verifier.cc
+++ b/src/compiler/machine-graph-verifier.cc
@@ -30,6 +30,10 @@
     Run();
   }
 
+  CallDescriptor* call_descriptor() const {
+    return linkage_->GetIncomingDescriptor();
+  }
+
   MachineRepresentation GetRepresentation(Node const* node) const {
     return representation_vector_.at(node->id());
   }
@@ -66,6 +70,18 @@
     }
   }
 
+  MachineRepresentation PromoteRepresentation(MachineRepresentation rep) {
+    switch (rep) {
+      case MachineRepresentation::kWord8:
+      case MachineRepresentation::kWord16:
+      case MachineRepresentation::kWord32:
+        return MachineRepresentation::kWord32;
+      default:
+        break;
+    }
+    return rep;
+  }
+
   void Run() {
     auto blocks = schedule_->all_blocks();
     for (BasicBlock* block : *blocks) {
@@ -82,6 +98,11 @@
                 linkage_->GetParameterType(ParameterIndexOf(node->op()))
                     .representation();
             break;
+          case IrOpcode::kReturn: {
+            representation_vector_[node->id()] = PromoteRepresentation(
+                linkage_->GetReturnType().representation());
+            break;
+          }
           case IrOpcode::kProjection: {
             representation_vector_[node->id()] = GetProjectionType(node);
           } break;
@@ -91,12 +112,12 @@
           case IrOpcode::kAtomicLoad:
           case IrOpcode::kLoad:
           case IrOpcode::kProtectedLoad:
-            representation_vector_[node->id()] =
-                LoadRepresentationOf(node->op()).representation();
+            representation_vector_[node->id()] = PromoteRepresentation(
+                LoadRepresentationOf(node->op()).representation());
             break;
           case IrOpcode::kCheckedLoad:
-            representation_vector_[node->id()] =
-                CheckedLoadRepresentationOf(node->op()).representation();
+            representation_vector_[node->id()] = PromoteRepresentation(
+                CheckedLoadRepresentationOf(node->op()).representation());
             break;
           case IrOpcode::kLoadStackPointer:
           case IrOpcode::kLoadFramePointer:
@@ -104,6 +125,10 @@
             representation_vector_[node->id()] =
                 MachineType::PointerRepresentation();
             break;
+          case IrOpcode::kUnalignedLoad:
+            representation_vector_[node->id()] = PromoteRepresentation(
+                UnalignedLoadRepresentationOf(node->op()).representation());
+            break;
           case IrOpcode::kPhi:
             representation_vector_[node->id()] =
                 PhiRepresentationOf(node->op());
@@ -119,9 +144,22 @@
             }
             break;
           }
-          case IrOpcode::kUnalignedLoad:
+          case IrOpcode::kAtomicStore:
             representation_vector_[node->id()] =
-                UnalignedLoadRepresentationOf(node->op()).representation();
+                PromoteRepresentation(AtomicStoreRepresentationOf(node->op()));
+            break;
+          case IrOpcode::kStore:
+          case IrOpcode::kProtectedStore:
+            representation_vector_[node->id()] = PromoteRepresentation(
+                StoreRepresentationOf(node->op()).representation());
+            break;
+          case IrOpcode::kCheckedStore:
+            representation_vector_[node->id()] =
+                PromoteRepresentation(CheckedStoreRepresentationOf(node->op()));
+            break;
+          case IrOpcode::kUnalignedStore:
+            representation_vector_[node->id()] = PromoteRepresentation(
+                UnalignedStoreRepresentationOf(node->op()));
             break;
           case IrOpcode::kHeapConstant:
           case IrOpcode::kNumberConstant:
@@ -170,6 +208,8 @@
           case IrOpcode::kTruncateFloat32ToUint32:
           case IrOpcode::kBitcastFloat32ToInt32:
           case IrOpcode::kInt32x4ExtractLane:
+          case IrOpcode::kInt16x8ExtractLane:
+          case IrOpcode::kInt8x16ExtractLane:
           case IrOpcode::kInt32Constant:
           case IrOpcode::kRelocatableInt32Constant:
           case IrOpcode::kTruncateFloat64ToWord32:
@@ -237,8 +277,12 @@
  public:
   MachineRepresentationChecker(
       Schedule const* const schedule,
-      MachineRepresentationInferrer const* const inferrer)
-      : schedule_(schedule), inferrer_(inferrer) {}
+      MachineRepresentationInferrer const* const inferrer, bool is_stub,
+      const char* name)
+      : schedule_(schedule),
+        inferrer_(inferrer),
+        is_stub_(is_stub),
+        name_(name) {}
 
   void Run() {
     BasicBlockVector const* blocks = schedule_->all_blocks();
@@ -290,9 +334,17 @@
             CheckValueInputForFloat64Op(node, 0);
             break;
           case IrOpcode::kWord64Equal:
-            CheckValueInputIsTaggedOrPointer(node, 0);
-            CheckValueInputRepresentationIs(
-                node, 1, inferrer_->GetRepresentation(node->InputAt(0)));
+            if (Is64()) {
+              CheckValueInputIsTaggedOrPointer(node, 0);
+              CheckValueInputIsTaggedOrPointer(node, 1);
+              if (!is_stub_) {
+                CheckValueInputRepresentationIs(
+                    node, 1, inferrer_->GetRepresentation(node->InputAt(0)));
+              }
+            } else {
+              CheckValueInputForInt64Op(node, 0);
+              CheckValueInputForInt64Op(node, 1);
+            }
             break;
           case IrOpcode::kInt64LessThan:
           case IrOpcode::kInt64LessThanOrEqual:
@@ -302,6 +354,8 @@
             CheckValueInputForInt64Op(node, 1);
             break;
           case IrOpcode::kInt32x4ExtractLane:
+          case IrOpcode::kInt16x8ExtractLane:
+          case IrOpcode::kInt8x16ExtractLane:
             CheckValueInputRepresentationIs(node, 0,
                                             MachineRepresentation::kSimd128);
             break;
@@ -317,6 +371,19 @@
             MACHINE_UNOP_32_LIST(LABEL) { CheckValueInputForInt32Op(node, 0); }
             break;
           case IrOpcode::kWord32Equal:
+            if (Is32()) {
+              CheckValueInputIsTaggedOrPointer(node, 0);
+              CheckValueInputIsTaggedOrPointer(node, 1);
+              if (!is_stub_) {
+                CheckValueInputRepresentationIs(
+                    node, 1, inferrer_->GetRepresentation(node->InputAt(0)));
+              }
+            } else {
+              CheckValueInputForInt32Op(node, 0);
+              CheckValueInputForInt32Op(node, 1);
+            }
+            break;
+
           case IrOpcode::kInt32LessThan:
           case IrOpcode::kInt32LessThanOrEqual:
           case IrOpcode::kUint32LessThan:
@@ -374,7 +441,7 @@
             CheckValueInputIsTaggedOrPointer(node, 0);
             CheckValueInputRepresentationIs(
                 node, 1, MachineType::PointerRepresentation());
-            switch (StoreRepresentationOf(node->op()).representation()) {
+            switch (inferrer_->GetRepresentation(node)) {
               case MachineRepresentation::kTagged:
               case MachineRepresentation::kTaggedPointer:
               case MachineRepresentation::kTaggedSigned:
@@ -382,15 +449,14 @@
                 break;
               default:
                 CheckValueInputRepresentationIs(
-                    node, 2,
-                    StoreRepresentationOf(node->op()).representation());
+                    node, 2, inferrer_->GetRepresentation(node));
             }
             break;
           case IrOpcode::kAtomicStore:
             CheckValueInputIsTaggedOrPointer(node, 0);
             CheckValueInputRepresentationIs(
                 node, 1, MachineType::PointerRepresentation());
-            switch (AtomicStoreRepresentationOf(node->op())) {
+            switch (inferrer_->GetRepresentation(node)) {
               case MachineRepresentation::kTagged:
               case MachineRepresentation::kTaggedPointer:
               case MachineRepresentation::kTaggedSigned:
@@ -398,7 +464,7 @@
                 break;
               default:
                 CheckValueInputRepresentationIs(
-                    node, 2, AtomicStoreRepresentationOf(node->op()));
+                    node, 2, inferrer_->GetRepresentation(node));
             }
             break;
           case IrOpcode::kPhi:
@@ -410,6 +476,11 @@
                   CheckValueInputIsTagged(node, i);
                 }
                 break;
+              case MachineRepresentation::kWord32:
+                for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
+                  CheckValueInputForInt32Op(node, i);
+                }
+                break;
               default:
                 for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
                   CheckValueInputRepresentationIs(
@@ -422,9 +493,35 @@
           case IrOpcode::kSwitch:
             CheckValueInputForInt32Op(node, 0);
             break;
-          case IrOpcode::kReturn:
-            // TODO(epertoso): use the linkage to determine which tipe we
-            // should have here.
+          case IrOpcode::kReturn: {
+            // TODO(ishell): enable once the pop count parameter type becomes
+            // MachineType::PointerRepresentation(). Currently it's int32 or
+            // word-size.
+            // CheckValueInputRepresentationIs(
+            //     node, 0, MachineType::PointerRepresentation());  // Pop count
+            size_t return_count = inferrer_->call_descriptor()->ReturnCount();
+            for (size_t i = 0; i < return_count; i++) {
+              MachineType type = inferrer_->call_descriptor()->GetReturnType(i);
+              int input_index = static_cast<int>(i + 1);
+              switch (type.representation()) {
+                case MachineRepresentation::kTagged:
+                case MachineRepresentation::kTaggedPointer:
+                case MachineRepresentation::kTaggedSigned:
+                  CheckValueInputIsTagged(node, input_index);
+                  break;
+                case MachineRepresentation::kWord32:
+                  CheckValueInputForInt32Op(node, input_index);
+                  break;
+                default:
+                  CheckValueInputRepresentationIs(
+                      node, 2, inferrer_->GetRepresentation(node));
+              }
+              break;
+            }
+            break;
+          }
+          case IrOpcode::kThrow:
+            CheckValueInputIsTagged(node, 0);
             break;
           case IrOpcode::kTypedStateValues:
           case IrOpcode::kFrameState:
@@ -434,6 +531,7 @@
               std::stringstream str;
               str << "Node #" << node->id() << ":" << *node->op()
                   << " in the machine graph is not being checked.";
+              PrintDebugHelp(str, node);
               FATAL(str.str().c_str());
             }
             break;
@@ -443,6 +541,15 @@
   }
 
  private:
+  static bool Is32() {
+    return MachineType::PointerRepresentation() ==
+           MachineRepresentation::kWord32;
+  }
+  static bool Is64() {
+    return MachineType::PointerRepresentation() ==
+           MachineRepresentation::kWord64;
+  }
+
   void CheckValueInputRepresentationIs(Node const* node, int index,
                                        MachineRepresentation representation) {
     Node const* input = node->InputAt(index);
@@ -450,10 +557,11 @@
         inferrer_->GetRepresentation(input);
     if (input_representation != representation) {
       std::stringstream str;
-      str << "TypeError: node #" << node->id() << ":" << *node->op() << ":"
-          << MachineReprToString(input_representation) << " uses node #"
-          << input->id() << ":" << *input->op() << " which doesn't have a "
-          << MachineReprToString(representation) << " representation.";
+      str << "TypeError: node #" << node->id() << ":" << *node->op()
+          << " uses node #" << input->id() << ":" << *input->op() << ":"
+          << input_representation << " which doesn't have a " << representation
+          << " representation.";
+      PrintDebugHelp(str, node);
       FATAL(str.str().c_str());
     }
   }
@@ -472,6 +580,7 @@
     str << "TypeError: node #" << node->id() << ":" << *node->op()
         << " uses node #" << input->id() << ":" << *input->op()
         << " which doesn't have a tagged representation.";
+    PrintDebugHelp(str, node);
     FATAL(str.str().c_str());
   }
 
@@ -482,6 +591,19 @@
       case MachineRepresentation::kTaggedPointer:
       case MachineRepresentation::kTaggedSigned:
         return;
+      case MachineRepresentation::kBit:
+      case MachineRepresentation::kWord8:
+      case MachineRepresentation::kWord16:
+      case MachineRepresentation::kWord32:
+        if (Is32()) {
+          return;
+        }
+        break;
+      case MachineRepresentation::kWord64:
+        if (Is64()) {
+          return;
+        }
+        break;
       default:
         break;
     }
@@ -491,6 +613,7 @@
       str << "TypeError: node #" << node->id() << ":" << *node->op()
           << " uses node #" << input->id() << ":" << *input->op()
           << " which doesn't have a tagged or pointer representation.";
+      PrintDebugHelp(str, node);
       FATAL(str.str().c_str());
     }
   }
@@ -507,6 +630,7 @@
         std::ostringstream str;
         str << "TypeError: node #" << input->id() << ":" << *input->op()
             << " is untyped.";
+        PrintDebugHelp(str, node);
         FATAL(str.str().c_str());
         break;
       }
@@ -517,6 +641,7 @@
     str << "TypeError: node #" << node->id() << ":" << *node->op()
         << " uses node #" << input->id() << ":" << *input->op()
         << " which doesn't have an int32-compatible representation.";
+    PrintDebugHelp(str, node);
     FATAL(str.str().c_str());
   }
 
@@ -531,6 +656,7 @@
         std::ostringstream str;
         str << "TypeError: node #" << input->id() << ":" << *input->op()
             << " is untyped.";
+        PrintDebugHelp(str, node);
         FATAL(str.str().c_str());
         break;
       }
@@ -539,9 +665,11 @@
         break;
     }
     std::ostringstream str;
-    str << "TypeError: node #" << node->id() << ":" << *node->op() << ":"
-        << input_representation << " uses node #" << input->id() << ":"
-        << *input->op() << " which doesn't have a kWord64 representation.";
+    str << "TypeError: node #" << node->id() << ":" << *node->op()
+        << " uses node #" << input->id() << ":" << *input->op() << ":"
+        << input_representation
+        << " which doesn't have a kWord64 representation.";
+    PrintDebugHelp(str, node);
     FATAL(str.str().c_str());
   }
 
@@ -555,6 +683,7 @@
     str << "TypeError: node #" << node->id() << ":" << *node->op()
         << " uses node #" << input->id() << ":" << *input->op()
         << " which doesn't have a kFloat32 representation.";
+    PrintDebugHelp(str, node);
     FATAL(str.str().c_str());
   }
 
@@ -568,6 +697,7 @@
     str << "TypeError: node #" << node->id() << ":" << *node->op()
         << " uses node #" << input->id() << ":" << *input->op()
         << " which doesn't have a kFloat64 representation.";
+    PrintDebugHelp(str, node);
     FATAL(str.str().c_str());
   }
 
@@ -590,11 +720,11 @@
           str << std::endl;
         }
         str << " * input " << i << " (" << input->id() << ":" << *input->op()
-            << ") doesn't have a " << MachineReprToString(expected_input_type)
-            << " representation.";
+            << ") doesn't have a " << expected_input_type << " representation.";
       }
     }
     if (should_log_error) {
+      PrintDebugHelp(str, node);
       FATAL(str.str().c_str());
     }
   }
@@ -640,6 +770,9 @@
       case MachineRepresentation::kFloat32:
       case MachineRepresentation::kFloat64:
       case MachineRepresentation::kSimd128:
+      case MachineRepresentation::kSimd1x4:
+      case MachineRepresentation::kSimd1x8:
+      case MachineRepresentation::kSimd1x16:
       case MachineRepresentation::kBit:
       case MachineRepresentation::kWord8:
       case MachineRepresentation::kWord16:
@@ -657,17 +790,28 @@
     return false;
   }
 
+  void PrintDebugHelp(std::ostream& out, Node const* node) {
+    if (DEBUG_BOOL) {
+      out << "\n#\n# Specify option --csa-trap-on-node=" << name_ << ","
+          << node->id() << " for debugging.";
+    }
+  }
+
   Schedule const* const schedule_;
   MachineRepresentationInferrer const* const inferrer_;
+  bool is_stub_;
+  const char* name_;
 };
 
 }  // namespace
 
 void MachineGraphVerifier::Run(Graph* graph, Schedule const* const schedule,
-                               Linkage* linkage, Zone* temp_zone) {
+                               Linkage* linkage, bool is_stub, const char* name,
+                               Zone* temp_zone) {
   MachineRepresentationInferrer representation_inferrer(schedule, graph,
                                                         linkage, temp_zone);
-  MachineRepresentationChecker checker(schedule, &representation_inferrer);
+  MachineRepresentationChecker checker(schedule, &representation_inferrer,
+                                       is_stub, name);
   checker.Run();
 }
 
diff --git a/src/compiler/machine-graph-verifier.h b/src/compiler/machine-graph-verifier.h
index b7d7b61..26e5d77 100644
--- a/src/compiler/machine-graph-verifier.h
+++ b/src/compiler/machine-graph-verifier.h
@@ -21,7 +21,8 @@
 class MachineGraphVerifier {
  public:
   static void Run(Graph* graph, Schedule const* const schedule,
-                  Linkage* linkage, Zone* temp_zone);
+                  Linkage* linkage, bool is_stub, const char* name,
+                  Zone* temp_zone);
 };
 
 }  // namespace compiler
diff --git a/src/compiler/machine-operator-reducer.cc b/src/compiler/machine-operator-reducer.cc
index 0ad20f0..a50f0dc 100644
--- a/src/compiler/machine-operator-reducer.cc
+++ b/src/compiler/machine-operator-reducer.cc
@@ -12,14 +12,15 @@
 #include "src/compiler/graph.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/node-matchers.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
-MachineOperatorReducer::MachineOperatorReducer(JSGraph* jsgraph)
-    : jsgraph_(jsgraph) {}
-
+MachineOperatorReducer::MachineOperatorReducer(JSGraph* jsgraph,
+                                               bool allow_signalling_nan)
+    : jsgraph_(jsgraph), allow_signalling_nan_(allow_signalling_nan) {}
 
 MachineOperatorReducer::~MachineOperatorReducer() {}
 
@@ -50,12 +51,12 @@
 Node* MachineOperatorReducer::Float64PowHalf(Node* value) {
   value =
       graph()->NewNode(machine()->Float64Add(), Float64Constant(0.0), value);
-  return graph()->NewNode(
-      common()->Select(MachineRepresentation::kFloat64, BranchHint::kFalse),
-      graph()->NewNode(machine()->Float64LessThanOrEqual(), value,
-                       Float64Constant(-V8_INFINITY)),
-      Float64Constant(V8_INFINITY),
-      graph()->NewNode(machine()->Float64Sqrt(), value));
+  Diamond d(graph(), common(),
+            graph()->NewNode(machine()->Float64LessThanOrEqual(), value,
+                             Float64Constant(-V8_INFINITY)),
+            BranchHint::kFalse);
+  return d.Phi(MachineRepresentation::kFloat64, Float64Constant(V8_INFINITY),
+               graph()->NewNode(machine()->Float64Sqrt(), value));
 }
 
 Node* MachineOperatorReducer::Word32And(Node* lhs, Node* rhs) {
@@ -316,19 +317,22 @@
     }
     case IrOpcode::kFloat32Sub: {
       Float32BinopMatcher m(node);
-      if (m.right().Is(0) && (copysign(1.0, m.right().Value()) > 0)) {
+      if (allow_signalling_nan_ && m.right().Is(0) &&
+          (copysign(1.0, m.right().Value()) > 0)) {
         return Replace(m.left().node());  // x - 0 => x
       }
       if (m.right().IsNaN()) {  // x - NaN => NaN
-        return Replace(m.right().node());
+        // Do some calculation to make a signalling NaN quiet.
+        return ReplaceFloat32(m.right().Value() - m.right().Value());
       }
       if (m.left().IsNaN()) {  // NaN - x => NaN
-        return Replace(m.left().node());
+        // Do some calculation to make a signalling NaN quiet.
+        return ReplaceFloat32(m.left().Value() - m.left().Value());
       }
       if (m.IsFoldable()) {  // L - R => (L - R)
         return ReplaceFloat32(m.left().Value() - m.right().Value());
       }
-      if (m.left().IsMinusZero()) {
+      if (allow_signalling_nan_ && m.left().IsMinusZero()) {
         // -0.0 - round_down(-0.0 - R) => round_up(R)
         if (machine()->Float32RoundUp().IsSupported() &&
             m.right().IsFloat32RoundDown()) {
@@ -350,7 +354,8 @@
     case IrOpcode::kFloat64Add: {
       Float64BinopMatcher m(node);
       if (m.right().IsNaN()) {  // x + NaN => NaN
-        return Replace(m.right().node());
+        // Do some calculation to make a signalling NaN quiet.
+        return ReplaceFloat64(m.right().Value() - m.right().Value());
       }
       if (m.IsFoldable()) {  // K + K => K
         return ReplaceFloat64(m.left().Value() + m.right().Value());
@@ -359,19 +364,22 @@
     }
     case IrOpcode::kFloat64Sub: {
       Float64BinopMatcher m(node);
-      if (m.right().Is(0) && (Double(m.right().Value()).Sign() > 0)) {
+      if (allow_signalling_nan_ && m.right().Is(0) &&
+          (Double(m.right().Value()).Sign() > 0)) {
         return Replace(m.left().node());  // x - 0 => x
       }
       if (m.right().IsNaN()) {  // x - NaN => NaN
-        return Replace(m.right().node());
+        // Do some calculation to make a signalling NaN quiet.
+        return ReplaceFloat64(m.right().Value() - m.right().Value());
       }
       if (m.left().IsNaN()) {  // NaN - x => NaN
-        return Replace(m.left().node());
+        // Do some calculation to make a signalling NaN quiet.
+        return ReplaceFloat64(m.left().Value() - m.left().Value());
       }
       if (m.IsFoldable()) {  // L - R => (L - R)
         return ReplaceFloat64(m.left().Value() - m.right().Value());
       }
-      if (m.left().IsMinusZero()) {
+      if (allow_signalling_nan_ && m.left().IsMinusZero()) {
         // -0.0 - round_down(-0.0 - R) => round_up(R)
         if (machine()->Float64RoundUp().IsSupported() &&
             m.right().IsFloat64RoundDown()) {
@@ -392,15 +400,17 @@
     }
     case IrOpcode::kFloat64Mul: {
       Float64BinopMatcher m(node);
+      if (allow_signalling_nan_ && m.right().Is(1))
+        return Replace(m.left().node());  // x * 1.0 => x
       if (m.right().Is(-1)) {  // x * -1.0 => -0.0 - x
         node->ReplaceInput(0, Float64Constant(-0.0));
         node->ReplaceInput(1, m.left().node());
         NodeProperties::ChangeOp(node, machine()->Float64Sub());
         return Changed(node);
       }
-      if (m.right().Is(1)) return Replace(m.left().node());  // x * 1.0 => x
       if (m.right().IsNaN()) {                               // x * NaN => NaN
-        return Replace(m.right().node());
+        // Do some calculation to make a signalling NaN quiet.
+        return ReplaceFloat64(m.right().Value() - m.right().Value());
       }
       if (m.IsFoldable()) {  // K * K => K
         return ReplaceFloat64(m.left().Value() * m.right().Value());
@@ -414,17 +424,21 @@
     }
     case IrOpcode::kFloat64Div: {
       Float64BinopMatcher m(node);
-      if (m.right().Is(1)) return Replace(m.left().node());  // x / 1.0 => x
+      if (allow_signalling_nan_ && m.right().Is(1))
+        return Replace(m.left().node());  // x / 1.0 => x
+      // TODO(ahaas): We could do x / 1.0 = x if we knew that x is not an sNaN.
       if (m.right().IsNaN()) {                               // x / NaN => NaN
-        return Replace(m.right().node());
+        // Do some calculation to make a signalling NaN quiet.
+        return ReplaceFloat64(m.right().Value() - m.right().Value());
       }
       if (m.left().IsNaN()) {  // NaN / x => NaN
-        return Replace(m.left().node());
+        // Do some calculation to make a signalling NaN quiet.
+        return ReplaceFloat64(m.left().Value() - m.left().Value());
       }
       if (m.IsFoldable()) {  // K / K => K
         return ReplaceFloat64(m.left().Value() / m.right().Value());
       }
-      if (m.right().Is(-1)) {  // x / -1.0 => -x
+      if (allow_signalling_nan_ && m.right().Is(-1)) {  // x / -1.0 => -x
         node->RemoveInput(1);
         NodeProperties::ChangeOp(node, machine()->Float64Neg());
         return Changed(node);
@@ -593,7 +607,13 @@
     }
     case IrOpcode::kChangeFloat32ToFloat64: {
       Float32Matcher m(node->InputAt(0));
-      if (m.HasValue()) return ReplaceFloat64(m.Value());
+      if (m.HasValue()) {
+        if (!allow_signalling_nan_ && std::isnan(m.Value())) {
+          // Do some calculation to make guarantee the value is a quiet NaN.
+          return ReplaceFloat64(m.Value() + m.Value());
+        }
+        return ReplaceFloat64(m.Value());
+      }
       break;
     }
     case IrOpcode::kChangeFloat64ToInt32: {
@@ -642,8 +662,15 @@
     }
     case IrOpcode::kTruncateFloat64ToFloat32: {
       Float64Matcher m(node->InputAt(0));
-      if (m.HasValue()) return ReplaceFloat32(DoubleToFloat32(m.Value()));
-      if (m.IsChangeFloat32ToFloat64()) return Replace(m.node()->InputAt(0));
+      if (m.HasValue()) {
+        if (!allow_signalling_nan_ && std::isnan(m.Value())) {
+          // Do some calculation to make guarantee the value is a quiet NaN.
+          return ReplaceFloat32(DoubleToFloat32(m.Value() + m.Value()));
+        }
+        return ReplaceFloat32(DoubleToFloat32(m.Value()));
+      }
+      if (allow_signalling_nan_ && m.IsChangeFloat32ToFloat64())
+        return Replace(m.node()->InputAt(0));
       break;
     }
     case IrOpcode::kRoundFloat64ToInt32: {
@@ -664,6 +691,8 @@
     case IrOpcode::kFloat64LessThan:
     case IrOpcode::kFloat64LessThanOrEqual:
       return ReduceFloat64Compare(node);
+    case IrOpcode::kFloat64RoundDown:
+      return ReduceFloat64RoundDown(node);
     default:
       break;
   }
@@ -841,14 +870,13 @@
     if (base::bits::IsPowerOfTwo32(divisor)) {
       uint32_t const mask = divisor - 1;
       Node* const zero = Int32Constant(0);
-      node->ReplaceInput(
-          0, graph()->NewNode(machine()->Int32LessThan(), dividend, zero));
-      node->ReplaceInput(
-          1, Int32Sub(zero, Word32And(Int32Sub(zero, dividend), mask)));
-      node->ReplaceInput(2, Word32And(dividend, mask));
-      NodeProperties::ChangeOp(
-          node,
-          common()->Select(MachineRepresentation::kWord32, BranchHint::kFalse));
+      Diamond d(graph(), common(),
+                graph()->NewNode(machine()->Int32LessThan(), dividend, zero),
+                BranchHint::kFalse);
+      return Replace(
+          d.Phi(MachineRepresentation::kWord32,
+                Int32Sub(zero, Word32And(Int32Sub(zero, dividend), mask)),
+                Word32And(dividend, mask)));
     } else {
       Node* quotient = Int32Div(dividend, divisor);
       DCHECK_EQ(dividend, node->InputAt(0));
@@ -1153,8 +1181,9 @@
     if (m.left().IsWord32Shl()) {
       Uint32BinopMatcher mleft(m.left().node());
       if (mleft.right().HasValue() &&
-          mleft.right().Value() >= base::bits::CountTrailingZeros32(mask)) {
-        // (x << L) & (-1 << K) => x << L iff K >= L
+          (mleft.right().Value() & 0x1f) >=
+              base::bits::CountTrailingZeros32(mask)) {
+        // (x << L) & (-1 << K) => x << L iff L >= K
         return Replace(mleft.node());
       }
     } else if (m.left().IsInt32Add()) {
@@ -1392,6 +1421,14 @@
   return NoChange();
 }
 
+Reduction MachineOperatorReducer::ReduceFloat64RoundDown(Node* node) {
+  DCHECK_EQ(IrOpcode::kFloat64RoundDown, node->opcode());
+  Float64Matcher m(node->InputAt(0));
+  if (m.HasValue()) {
+    return ReplaceFloat64(Floor(m.Value()));
+  }
+  return NoChange();
+}
 
 CommonOperatorBuilder* MachineOperatorReducer::common() const {
   return jsgraph()->common();
diff --git a/src/compiler/machine-operator-reducer.h b/src/compiler/machine-operator-reducer.h
index d0845d9..593f7f2 100644
--- a/src/compiler/machine-operator-reducer.h
+++ b/src/compiler/machine-operator-reducer.h
@@ -24,7 +24,8 @@
 class V8_EXPORT_PRIVATE MachineOperatorReducer final
     : public NON_EXPORTED_BASE(Reducer) {
  public:
-  explicit MachineOperatorReducer(JSGraph* jsgraph);
+  explicit MachineOperatorReducer(JSGraph* jsgraph,
+                                  bool allow_signalling_nan = true);
   ~MachineOperatorReducer();
 
   Reduction Reduce(Node* node) override;
@@ -96,6 +97,7 @@
   Reduction ReduceFloat64InsertLowWord32(Node* node);
   Reduction ReduceFloat64InsertHighWord32(Node* node);
   Reduction ReduceFloat64Compare(Node* node);
+  Reduction ReduceFloat64RoundDown(Node* node);
 
   Graph* graph() const;
   JSGraph* jsgraph() const { return jsgraph_; }
@@ -103,6 +105,7 @@
   MachineOperatorBuilder* machine() const;
 
   JSGraph* jsgraph_;
+  bool allow_signalling_nan_;
 };
 
 }  // namespace compiler
diff --git a/src/compiler/machine-operator.cc b/src/compiler/machine-operator.cc
index e36a61e..854c22e 100644
--- a/src/compiler/machine-operator.cc
+++ b/src/compiler/machine-operator.cc
@@ -43,7 +43,8 @@
 
 
 StoreRepresentation const& StoreRepresentationOf(Operator const* op) {
-  DCHECK_EQ(IrOpcode::kStore, op->opcode());
+  DCHECK(IrOpcode::kStore == op->opcode() ||
+         IrOpcode::kProtectedStore == op->opcode());
   return OpParameter<StoreRepresentation>(op);
 }
 
@@ -69,9 +70,9 @@
   return OpParameter<CheckedStoreRepresentation>(op);
 }
 
-MachineRepresentation StackSlotRepresentationOf(Operator const* op) {
+int StackSlotSizeOf(Operator const* op) {
   DCHECK_EQ(IrOpcode::kStackSlot, op->opcode());
-  return OpParameter<MachineRepresentation>(op);
+  return OpParameter<int>(op);
 }
 
 MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
@@ -129,7 +130,6 @@
   V(Word32Clz, Operator::kNoProperties, 1, 0, 1)                           \
   V(Word64Clz, Operator::kNoProperties, 1, 0, 1)                           \
   V(BitcastTaggedToWord, Operator::kNoProperties, 1, 0, 1)                 \
-  V(BitcastWordToTagged, Operator::kNoProperties, 1, 0, 1)                 \
   V(BitcastWordToTaggedSigned, Operator::kNoProperties, 1, 0, 1)           \
   V(TruncateFloat64ToWord32, Operator::kNoProperties, 1, 0, 1)             \
   V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 0, 1)              \
@@ -220,8 +220,6 @@
   V(Word32PairShr, Operator::kNoProperties, 3, 0, 2)                       \
   V(Word32PairSar, Operator::kNoProperties, 3, 0, 2)                       \
   V(CreateFloat32x4, Operator::kNoProperties, 4, 0, 1)                     \
-  V(Float32x4ExtractLane, Operator::kNoProperties, 2, 0, 1)                \
-  V(Float32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1)                \
   V(Float32x4Abs, Operator::kNoProperties, 1, 0, 1)                        \
   V(Float32x4Neg, Operator::kNoProperties, 1, 0, 1)                        \
   V(Float32x4Sqrt, Operator::kNoProperties, 1, 0, 1)                       \
@@ -241,57 +239,36 @@
   V(Float32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)            \
   V(Float32x4GreaterThan, Operator::kNoProperties, 2, 0, 1)                \
   V(Float32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)         \
-  V(Float32x4Select, Operator::kNoProperties, 3, 0, 1)                     \
-  V(Float32x4Swizzle, Operator::kNoProperties, 5, 0, 1)                    \
-  V(Float32x4Shuffle, Operator::kNoProperties, 6, 0, 1)                    \
   V(Float32x4FromInt32x4, Operator::kNoProperties, 1, 0, 1)                \
   V(Float32x4FromUint32x4, Operator::kNoProperties, 1, 0, 1)               \
   V(CreateInt32x4, Operator::kNoProperties, 4, 0, 1)                       \
-  V(Int32x4ExtractLane, Operator::kNoProperties, 2, 0, 1)                  \
-  V(Int32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1)                  \
   V(Int32x4Neg, Operator::kNoProperties, 1, 0, 1)                          \
   V(Int32x4Add, Operator::kCommutative, 2, 0, 1)                           \
   V(Int32x4Sub, Operator::kNoProperties, 2, 0, 1)                          \
   V(Int32x4Mul, Operator::kCommutative, 2, 0, 1)                           \
   V(Int32x4Min, Operator::kCommutative, 2, 0, 1)                           \
   V(Int32x4Max, Operator::kCommutative, 2, 0, 1)                           \
-  V(Int32x4ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)            \
-  V(Int32x4ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)           \
   V(Int32x4Equal, Operator::kCommutative, 2, 0, 1)                         \
   V(Int32x4NotEqual, Operator::kCommutative, 2, 0, 1)                      \
   V(Int32x4LessThan, Operator::kNoProperties, 2, 0, 1)                     \
   V(Int32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)              \
   V(Int32x4GreaterThan, Operator::kNoProperties, 2, 0, 1)                  \
   V(Int32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)           \
-  V(Int32x4Select, Operator::kNoProperties, 3, 0, 1)                       \
-  V(Int32x4Swizzle, Operator::kNoProperties, 5, 0, 1)                      \
-  V(Int32x4Shuffle, Operator::kNoProperties, 6, 0, 1)                      \
   V(Int32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1)                \
   V(Uint32x4Min, Operator::kCommutative, 2, 0, 1)                          \
   V(Uint32x4Max, Operator::kCommutative, 2, 0, 1)                          \
-  V(Uint32x4ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)           \
-  V(Uint32x4ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)          \
   V(Uint32x4LessThan, Operator::kNoProperties, 2, 0, 1)                    \
   V(Uint32x4LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)             \
   V(Uint32x4GreaterThan, Operator::kNoProperties, 2, 0, 1)                 \
   V(Uint32x4GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)          \
   V(Uint32x4FromFloat32x4, Operator::kNoProperties, 1, 0, 1)               \
-  V(CreateBool32x4, Operator::kNoProperties, 4, 0, 1)                      \
-  V(Bool32x4ExtractLane, Operator::kNoProperties, 2, 0, 1)                 \
-  V(Bool32x4ReplaceLane, Operator::kNoProperties, 3, 0, 1)                 \
   V(Bool32x4And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
   V(Bool32x4Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
   V(Bool32x4Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
   V(Bool32x4Not, Operator::kNoProperties, 1, 0, 1)                         \
   V(Bool32x4AnyTrue, Operator::kNoProperties, 1, 0, 1)                     \
   V(Bool32x4AllTrue, Operator::kNoProperties, 1, 0, 1)                     \
-  V(Bool32x4Swizzle, Operator::kNoProperties, 5, 0, 1)                     \
-  V(Bool32x4Shuffle, Operator::kNoProperties, 6, 0, 1)                     \
-  V(Bool32x4Equal, Operator::kCommutative, 2, 0, 1)                        \
-  V(Bool32x4NotEqual, Operator::kCommutative, 2, 0, 1)                     \
   V(CreateInt16x8, Operator::kNoProperties, 8, 0, 1)                       \
-  V(Int16x8ExtractLane, Operator::kNoProperties, 2, 0, 1)                  \
-  V(Int16x8ReplaceLane, Operator::kNoProperties, 3, 0, 1)                  \
   V(Int16x8Neg, Operator::kNoProperties, 1, 0, 1)                          \
   V(Int16x8Add, Operator::kCommutative, 2, 0, 1)                           \
   V(Int16x8AddSaturate, Operator::kCommutative, 2, 0, 1)                   \
@@ -300,43 +277,27 @@
   V(Int16x8Mul, Operator::kCommutative, 2, 0, 1)                           \
   V(Int16x8Min, Operator::kCommutative, 2, 0, 1)                           \
   V(Int16x8Max, Operator::kCommutative, 2, 0, 1)                           \
-  V(Int16x8ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)            \
-  V(Int16x8ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)           \
   V(Int16x8Equal, Operator::kCommutative, 2, 0, 1)                         \
   V(Int16x8NotEqual, Operator::kCommutative, 2, 0, 1)                      \
   V(Int16x8LessThan, Operator::kNoProperties, 2, 0, 1)                     \
   V(Int16x8LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)              \
   V(Int16x8GreaterThan, Operator::kNoProperties, 2, 0, 1)                  \
   V(Int16x8GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)           \
-  V(Int16x8Select, Operator::kNoProperties, 3, 0, 1)                       \
-  V(Int16x8Swizzle, Operator::kNoProperties, 9, 0, 1)                      \
-  V(Int16x8Shuffle, Operator::kNoProperties, 10, 0, 1)                     \
   V(Uint16x8AddSaturate, Operator::kCommutative, 2, 0, 1)                  \
   V(Uint16x8SubSaturate, Operator::kNoProperties, 2, 0, 1)                 \
   V(Uint16x8Min, Operator::kCommutative, 2, 0, 1)                          \
   V(Uint16x8Max, Operator::kCommutative, 2, 0, 1)                          \
-  V(Uint16x8ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)           \
-  V(Uint16x8ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)          \
   V(Uint16x8LessThan, Operator::kNoProperties, 2, 0, 1)                    \
   V(Uint16x8LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)             \
   V(Uint16x8GreaterThan, Operator::kNoProperties, 2, 0, 1)                 \
   V(Uint16x8GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)          \
-  V(CreateBool16x8, Operator::kNoProperties, 8, 0, 1)                      \
-  V(Bool16x8ExtractLane, Operator::kNoProperties, 2, 0, 1)                 \
-  V(Bool16x8ReplaceLane, Operator::kNoProperties, 3, 0, 1)                 \
   V(Bool16x8And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
   V(Bool16x8Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
   V(Bool16x8Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
   V(Bool16x8Not, Operator::kNoProperties, 1, 0, 1)                         \
   V(Bool16x8AnyTrue, Operator::kNoProperties, 1, 0, 1)                     \
   V(Bool16x8AllTrue, Operator::kNoProperties, 1, 0, 1)                     \
-  V(Bool16x8Swizzle, Operator::kNoProperties, 9, 0, 1)                     \
-  V(Bool16x8Shuffle, Operator::kNoProperties, 10, 0, 1)                    \
-  V(Bool16x8Equal, Operator::kCommutative, 2, 0, 1)                        \
-  V(Bool16x8NotEqual, Operator::kCommutative, 2, 0, 1)                     \
   V(CreateInt8x16, Operator::kNoProperties, 16, 0, 1)                      \
-  V(Int8x16ExtractLane, Operator::kNoProperties, 2, 0, 1)                  \
-  V(Int8x16ReplaceLane, Operator::kNoProperties, 3, 0, 1)                  \
   V(Int8x16Neg, Operator::kNoProperties, 1, 0, 1)                          \
   V(Int8x16Add, Operator::kCommutative, 2, 0, 1)                           \
   V(Int8x16AddSaturate, Operator::kCommutative, 2, 0, 1)                   \
@@ -345,40 +306,26 @@
   V(Int8x16Mul, Operator::kCommutative, 2, 0, 1)                           \
   V(Int8x16Min, Operator::kCommutative, 2, 0, 1)                           \
   V(Int8x16Max, Operator::kCommutative, 2, 0, 1)                           \
-  V(Int8x16ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)            \
-  V(Int8x16ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)           \
   V(Int8x16Equal, Operator::kCommutative, 2, 0, 1)                         \
   V(Int8x16NotEqual, Operator::kCommutative, 2, 0, 1)                      \
   V(Int8x16LessThan, Operator::kNoProperties, 2, 0, 1)                     \
   V(Int8x16LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)              \
   V(Int8x16GreaterThan, Operator::kNoProperties, 2, 0, 1)                  \
   V(Int8x16GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)           \
-  V(Int8x16Select, Operator::kNoProperties, 3, 0, 1)                       \
-  V(Int8x16Swizzle, Operator::kNoProperties, 17, 0, 1)                     \
-  V(Int8x16Shuffle, Operator::kNoProperties, 18, 0, 1)                     \
   V(Uint8x16AddSaturate, Operator::kCommutative, 2, 0, 1)                  \
   V(Uint8x16SubSaturate, Operator::kNoProperties, 2, 0, 1)                 \
   V(Uint8x16Min, Operator::kCommutative, 2, 0, 1)                          \
   V(Uint8x16Max, Operator::kCommutative, 2, 0, 1)                          \
-  V(Uint8x16ShiftLeftByScalar, Operator::kNoProperties, 2, 0, 1)           \
-  V(Uint8x16ShiftRightByScalar, Operator::kNoProperties, 2, 0, 1)          \
   V(Uint8x16LessThan, Operator::kNoProperties, 2, 0, 1)                    \
   V(Uint8x16LessThanOrEqual, Operator::kNoProperties, 2, 0, 1)             \
   V(Uint8x16GreaterThan, Operator::kNoProperties, 2, 0, 1)                 \
   V(Uint8x16GreaterThanOrEqual, Operator::kNoProperties, 2, 0, 1)          \
-  V(CreateBool8x16, Operator::kNoProperties, 16, 0, 1)                     \
-  V(Bool8x16ExtractLane, Operator::kNoProperties, 2, 0, 1)                 \
-  V(Bool8x16ReplaceLane, Operator::kNoProperties, 3, 0, 1)                 \
   V(Bool8x16And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
   V(Bool8x16Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
   V(Bool8x16Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1) \
   V(Bool8x16Not, Operator::kNoProperties, 1, 0, 1)                         \
   V(Bool8x16AnyTrue, Operator::kNoProperties, 1, 0, 1)                     \
   V(Bool8x16AllTrue, Operator::kNoProperties, 1, 0, 1)                     \
-  V(Bool8x16Swizzle, Operator::kNoProperties, 17, 0, 1)                    \
-  V(Bool8x16Shuffle, Operator::kNoProperties, 18, 0, 1)                    \
-  V(Bool8x16Equal, Operator::kCommutative, 2, 0, 1)                        \
-  V(Bool8x16NotEqual, Operator::kCommutative, 2, 0, 1)                     \
   V(Simd128Load, Operator::kNoProperties, 2, 0, 1)                         \
   V(Simd128Load1, Operator::kNoProperties, 2, 0, 1)                        \
   V(Simd128Load2, Operator::kNoProperties, 2, 0, 1)                        \
@@ -390,7 +337,10 @@
   V(Simd128And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
   V(Simd128Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)   \
   V(Simd128Xor, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)  \
-  V(Simd128Not, Operator::kNoProperties, 1, 0, 1)
+  V(Simd128Not, Operator::kNoProperties, 1, 0, 1)                          \
+  V(Simd32x4Select, Operator::kNoProperties, 3, 0, 1)                      \
+  V(Simd16x8Select, Operator::kNoProperties, 3, 0, 1)                      \
+  V(Simd8x16Select, Operator::kNoProperties, 3, 0, 1)
 
 #define PURE_OPTIONAL_OP_LIST(V)                            \
   V(Word32Ctz, Operator::kNoProperties, 1, 0, 1)            \
@@ -460,6 +410,26 @@
   V(kWord16)                          \
   V(kWord32)
 
+#define SIMD_LANE_OP_LIST(V) \
+  V(Float32x4, 4)            \
+  V(Int32x4, 4)              \
+  V(Int16x8, 8)              \
+  V(Int8x16, 16)
+
+#define SIMD_FORMAT_LIST(V) \
+  V(32x4, 32)               \
+  V(16x8, 16)               \
+  V(8x16, 8)
+
+#define STACK_SLOT_CACHED_SIZES_LIST(V) V(4) V(8) V(16)
+
+struct StackSlotOperator : public Operator1<int> {
+  explicit StackSlotOperator(int size)
+      : Operator1<int>(IrOpcode::kStackSlot,
+                       Operator::kNoDeopt | Operator::kNoThrow, "StackSlot", 0,
+                       0, 0, 1, 0, 0, size) {}
+};
+
 struct MachineOperatorGlobalCache {
 #define PURE(Name, properties, value_input_count, control_input_count,         \
              output_count)                                                     \
@@ -485,56 +455,51 @@
   OVERFLOW_OP_LIST(OVERFLOW_OP)
 #undef OVERFLOW_OP
 
-#define LOAD(Type)                                                           \
-  struct Load##Type##Operator final : public Operator1<LoadRepresentation> { \
-    Load##Type##Operator()                                                   \
-        : Operator1<LoadRepresentation>(                                     \
-              IrOpcode::kLoad,                                               \
-              Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,  \
-              "Load", 2, 1, 1, 1, 1, 0, MachineType::Type()) {}              \
-  };                                                                         \
-  struct UnalignedLoad##Type##Operator final                                 \
-      : public Operator1<UnalignedLoadRepresentation> {                      \
-    UnalignedLoad##Type##Operator()                                          \
-        : Operator1<UnalignedLoadRepresentation>(                            \
-              IrOpcode::kUnalignedLoad,                                      \
-              Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,  \
-              "UnalignedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {}     \
-  };                                                                         \
-  struct CheckedLoad##Type##Operator final                                   \
-      : public Operator1<CheckedLoadRepresentation> {                        \
-    CheckedLoad##Type##Operator()                                            \
-        : Operator1<CheckedLoadRepresentation>(                              \
-              IrOpcode::kCheckedLoad,                                        \
-              Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,  \
-              "CheckedLoad", 3, 1, 1, 1, 1, 0, MachineType::Type()) {}       \
-  };                                                                         \
-  struct ProtectedLoad##Type##Operator final                                 \
-      : public Operator1<ProtectedLoadRepresentation> {                      \
-    ProtectedLoad##Type##Operator()                                          \
-        : Operator1<ProtectedLoadRepresentation>(                            \
-              IrOpcode::kProtectedLoad,                                      \
-              Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,  \
-              "ProtectedLoad", 4, 1, 1, 1, 1, 0, MachineType::Type()) {}     \
-  };                                                                         \
-  Load##Type##Operator kLoad##Type;                                          \
-  UnalignedLoad##Type##Operator kUnalignedLoad##Type;                        \
-  CheckedLoad##Type##Operator kCheckedLoad##Type;                            \
+#define LOAD(Type)                                                            \
+  struct Load##Type##Operator final : public Operator1<LoadRepresentation> {  \
+    Load##Type##Operator()                                                    \
+        : Operator1<LoadRepresentation>(                                      \
+              IrOpcode::kLoad,                                                \
+              Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,   \
+              "Load", 2, 1, 1, 1, 1, 0, MachineType::Type()) {}               \
+  };                                                                          \
+  struct UnalignedLoad##Type##Operator final                                  \
+      : public Operator1<UnalignedLoadRepresentation> {                       \
+    UnalignedLoad##Type##Operator()                                           \
+        : Operator1<UnalignedLoadRepresentation>(                             \
+              IrOpcode::kUnalignedLoad,                                       \
+              Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,   \
+              "UnalignedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {}      \
+  };                                                                          \
+  struct CheckedLoad##Type##Operator final                                    \
+      : public Operator1<CheckedLoadRepresentation> {                         \
+    CheckedLoad##Type##Operator()                                             \
+        : Operator1<CheckedLoadRepresentation>(                               \
+              IrOpcode::kCheckedLoad,                                         \
+              Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,   \
+              "CheckedLoad", 3, 1, 1, 1, 1, 0, MachineType::Type()) {}        \
+  };                                                                          \
+  struct ProtectedLoad##Type##Operator final                                  \
+      : public Operator1<LoadRepresentation> {                                \
+    ProtectedLoad##Type##Operator()                                           \
+        : Operator1<LoadRepresentation>(                                      \
+              IrOpcode::kProtectedLoad,                                       \
+              Operator::kNoDeopt | Operator::kNoThrow, "ProtectedLoad", 3, 1, \
+              1, 1, 1, 0, MachineType::Type()) {}                             \
+  };                                                                          \
+  Load##Type##Operator kLoad##Type;                                           \
+  UnalignedLoad##Type##Operator kUnalignedLoad##Type;                         \
+  CheckedLoad##Type##Operator kCheckedLoad##Type;                             \
   ProtectedLoad##Type##Operator kProtectedLoad##Type;
   MACHINE_TYPE_LIST(LOAD)
 #undef LOAD
 
-#define STACKSLOT(Type)                                                      \
-  struct StackSlot##Type##Operator final                                     \
-      : public Operator1<MachineRepresentation> {                            \
-    StackSlot##Type##Operator()                                              \
-        : Operator1<MachineRepresentation>(                                  \
-              IrOpcode::kStackSlot, Operator::kNoDeopt | Operator::kNoThrow, \
-              "StackSlot", 0, 0, 0, 1, 0, 0,                                 \
-              MachineType::Type().representation()) {}                       \
-  };                                                                         \
-  StackSlot##Type##Operator kStackSlot##Type;
-  MACHINE_TYPE_LIST(STACKSLOT)
+#define STACKSLOT(Size)                                                     \
+  struct StackSlotOfSize##Size##Operator final : public StackSlotOperator { \
+    StackSlotOfSize##Size##Operator() : StackSlotOperator(Size) {}          \
+  };                                                                        \
+  StackSlotOfSize##Size##Operator kStackSlotSize##Size;
+  STACK_SLOT_CACHED_SIZES_LIST(STACKSLOT)
 #undef STACKSLOT
 
 #define STORE(Type)                                                            \
@@ -585,13 +550,24 @@
               "CheckedStore", 4, 1, 1, 0, 1, 0, MachineRepresentation::Type) { \
     }                                                                          \
   };                                                                           \
+  struct ProtectedStore##Type##Operator                                        \
+      : public Operator1<StoreRepresentation> {                                \
+    explicit ProtectedStore##Type##Operator()                                  \
+        : Operator1<StoreRepresentation>(                                      \
+              IrOpcode::kProtectedStore,                                       \
+              Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow,     \
+              "Store", 4, 1, 1, 0, 1, 0,                                       \
+              StoreRepresentation(MachineRepresentation::Type,                 \
+                                  kNoWriteBarrier)) {}                         \
+  };                                                                           \
   Store##Type##NoWriteBarrier##Operator kStore##Type##NoWriteBarrier;          \
   Store##Type##MapWriteBarrier##Operator kStore##Type##MapWriteBarrier;        \
   Store##Type##PointerWriteBarrier##Operator                                   \
       kStore##Type##PointerWriteBarrier;                                       \
   Store##Type##FullWriteBarrier##Operator kStore##Type##FullWriteBarrier;      \
   UnalignedStore##Type##Operator kUnalignedStore##Type;                        \
-  CheckedStore##Type##Operator kCheckedStore##Type;
+  CheckedStore##Type##Operator kCheckedStore##Type;                            \
+  ProtectedStore##Type##Operator kProtectedStore##Type;
   MACHINE_REPRESENTATION_LIST(STORE)
 #undef STORE
 
@@ -621,6 +597,19 @@
   ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE)
 #undef STORE
 
+  // The {BitcastWordToTagged} operator must not be marked as pure (especially
+  // not idempotent), because otherwise the splitting logic in the Scheduler
+  // might decide to split these operators, thus potentially creating live
+  // ranges of allocation top across calls or other things that might allocate.
+  // See https://bugs.chromium.org/p/v8/issues/detail?id=6059 for more details.
+  struct BitcastWordToTaggedOperator : public Operator {
+    BitcastWordToTaggedOperator()
+        : Operator(IrOpcode::kBitcastWordToTagged,
+                   Operator::kEliminatable | Operator::kNoWrite,
+                   "BitcastWordToTagged", 1, 0, 0, 1, 0, 0) {}
+  };
+  BitcastWordToTaggedOperator kBitcastWordToTagged;
+
   struct DebugBreakOperator : public Operator {
     DebugBreakOperator()
         : Operator(IrOpcode::kDebugBreak, Operator::kNoThrow, "DebugBreak", 0,
@@ -678,6 +667,9 @@
     MACHINE_REPRESENTATION_LIST(STORE)
 #undef STORE
     case MachineRepresentation::kBit:
+    case MachineRepresentation::kSimd1x4:
+    case MachineRepresentation::kSimd1x8:
+    case MachineRepresentation::kSimd1x16:
     case MachineRepresentation::kNone:
       break;
   }
@@ -726,15 +718,21 @@
   return nullptr;
 }
 
-const Operator* MachineOperatorBuilder::StackSlot(MachineRepresentation rep) {
-#define STACKSLOT(Type)                              \
-  if (rep == MachineType::Type().representation()) { \
-    return &cache_.kStackSlot##Type;                 \
+const Operator* MachineOperatorBuilder::StackSlot(int size) {
+  DCHECK_LE(0, size);
+#define CASE_CACHED_SIZE(Size) \
+  case Size:                   \
+    return &cache_.kStackSlotSize##Size;
+  switch (size) {
+    STACK_SLOT_CACHED_SIZES_LIST(CASE_CACHED_SIZE);
+    default:
+      return new (zone_) StackSlotOperator(size);
   }
-  MACHINE_TYPE_LIST(STACKSLOT)
-#undef STACKSLOT
-  UNREACHABLE();
-  return nullptr;
+#undef CASE_CACHED_SIZE
+}
+
+const Operator* MachineOperatorBuilder::StackSlot(MachineRepresentation rep) {
+  return StackSlot(1 << ElementSizeLog2Of(rep));
 }
 
 const Operator* MachineOperatorBuilder::Store(StoreRepresentation store_rep) {
@@ -755,6 +753,29 @@
     MACHINE_REPRESENTATION_LIST(STORE)
 #undef STORE
     case MachineRepresentation::kBit:
+    case MachineRepresentation::kSimd1x4:
+    case MachineRepresentation::kSimd1x8:
+    case MachineRepresentation::kSimd1x16:
+    case MachineRepresentation::kNone:
+      break;
+  }
+  UNREACHABLE();
+  return nullptr;
+}
+
+const Operator* MachineOperatorBuilder::ProtectedStore(
+    MachineRepresentation rep) {
+  switch (rep) {
+#define STORE(kRep)                       \
+  case MachineRepresentation::kRep:       \
+    return &cache_.kProtectedStore##kRep; \
+    break;
+    MACHINE_REPRESENTATION_LIST(STORE)
+#undef STORE
+    case MachineRepresentation::kBit:
+    case MachineRepresentation::kSimd1x4:
+    case MachineRepresentation::kSimd1x8:
+    case MachineRepresentation::kSimd1x16:
     case MachineRepresentation::kNone:
       break;
   }
@@ -766,6 +787,10 @@
   return &cache_.kUnsafePointerAdd;
 }
 
+const Operator* MachineOperatorBuilder::BitcastWordToTagged() {
+  return &cache_.kBitcastWordToTagged;
+}
+
 const Operator* MachineOperatorBuilder::DebugBreak() {
   return &cache_.kDebugBreak;
 }
@@ -796,6 +821,9 @@
     MACHINE_REPRESENTATION_LIST(STORE)
 #undef STORE
     case MachineRepresentation::kBit:
+    case MachineRepresentation::kSimd1x4:
+    case MachineRepresentation::kSimd1x8:
+    case MachineRepresentation::kSimd1x16:
     case MachineRepresentation::kNone:
       break;
   }
@@ -825,6 +853,60 @@
   return nullptr;
 }
 
+#define SIMD_LANE_OPS(Type, lane_count)                                     \
+  const Operator* MachineOperatorBuilder::Type##ExtractLane(                \
+      int32_t lane_index) {                                                 \
+    DCHECK(0 <= lane_index && lane_index < lane_count);                     \
+    return new (zone_)                                                      \
+        Operator1<int32_t>(IrOpcode::k##Type##ExtractLane, Operator::kPure, \
+                           "Extract lane", 1, 0, 0, 1, 0, 0, lane_index);   \
+  }                                                                         \
+  const Operator* MachineOperatorBuilder::Type##ReplaceLane(                \
+      int32_t lane_index) {                                                 \
+    DCHECK(0 <= lane_index && lane_index < lane_count);                     \
+    return new (zone_)                                                      \
+        Operator1<int32_t>(IrOpcode::k##Type##ReplaceLane, Operator::kPure, \
+                           "Replace lane", 2, 0, 0, 1, 0, 0, lane_index);   \
+  }
+SIMD_LANE_OP_LIST(SIMD_LANE_OPS)
+#undef SIMD_LANE_OPS
+
+#define SIMD_SHIFT_OPS(format, bits)                                        \
+  const Operator* MachineOperatorBuilder::Int##format##ShiftLeftByScalar(   \
+      int32_t shift) {                                                      \
+    DCHECK(0 <= shift && shift < bits);                                     \
+    return new (zone_) Operator1<int32_t>(                                  \
+        IrOpcode::kInt##format##ShiftLeftByScalar, Operator::kPure,         \
+        "Shift left", 1, 0, 0, 1, 0, 0, shift);                             \
+  }                                                                         \
+  const Operator* MachineOperatorBuilder::Int##format##ShiftRightByScalar(  \
+      int32_t shift) {                                                      \
+    DCHECK(0 < shift && shift <= bits);                                     \
+    return new (zone_) Operator1<int32_t>(                                  \
+        IrOpcode::kInt##format##ShiftRightByScalar, Operator::kPure,        \
+        "Arithmetic shift right", 1, 0, 0, 1, 0, 0, shift);                 \
+  }                                                                         \
+  const Operator* MachineOperatorBuilder::Uint##format##ShiftRightByScalar( \
+      int32_t shift) {                                                      \
+    DCHECK(0 <= shift && shift < bits);                                     \
+    return new (zone_) Operator1<int32_t>(                                  \
+        IrOpcode::kUint##format##ShiftRightByScalar, Operator::kPure,       \
+        "Shift right", 1, 0, 0, 1, 0, 0, shift);                            \
+  }
+SIMD_FORMAT_LIST(SIMD_SHIFT_OPS)
+#undef SIMD_SHIFT_OPS
+
+// TODO(bbudge) Add Shuffle, DCHECKs based on format.
+#define SIMD_PERMUTE_OPS(format, bits)                                         \
+  const Operator* MachineOperatorBuilder::Simd##format##Swizzle(               \
+      uint32_t swizzle) {                                                      \
+    return new (zone_)                                                         \
+        Operator1<uint32_t>(IrOpcode::kSimd##format##Swizzle, Operator::kPure, \
+                            "Swizzle", 2, 0, 0, 1, 0, 0, swizzle);             \
+  }
+SIMD_FORMAT_LIST(SIMD_PERMUTE_OPS)
+#undef SIMD_PERMUTE_OPS
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h
index 1cbec99..0558279 100644
--- a/src/compiler/machine-operator.h
+++ b/src/compiler/machine-operator.h
@@ -43,7 +43,6 @@
 
 // A Load needs a MachineType.
 typedef MachineType LoadRepresentation;
-typedef LoadRepresentation ProtectedLoadRepresentation;
 
 LoadRepresentation LoadRepresentationOf(Operator const*);
 
@@ -94,7 +93,7 @@
 
 CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const*);
 
-MachineRepresentation StackSlotRepresentationOf(Operator const* op);
+int StackSlotSizeOf(Operator const* op);
 
 MachineRepresentation AtomicStoreRepresentationOf(Operator const* op);
 
@@ -427,8 +426,8 @@
 
   // SIMD operators.
   const Operator* CreateFloat32x4();
-  const Operator* Float32x4ExtractLane();
-  const Operator* Float32x4ReplaceLane();
+  const Operator* Float32x4ExtractLane(int32_t);
+  const Operator* Float32x4ReplaceLane(int32_t);
   const Operator* Float32x4Abs();
   const Operator* Float32x4Neg();
   const Operator* Float32x4Sqrt();
@@ -448,61 +447,47 @@
   const Operator* Float32x4LessThanOrEqual();
   const Operator* Float32x4GreaterThan();
   const Operator* Float32x4GreaterThanOrEqual();
-  const Operator* Float32x4Select();
-  const Operator* Float32x4Swizzle();
-  const Operator* Float32x4Shuffle();
   const Operator* Float32x4FromInt32x4();
   const Operator* Float32x4FromUint32x4();
 
   const Operator* CreateInt32x4();
-  const Operator* Int32x4ExtractLane();
-  const Operator* Int32x4ReplaceLane();
+  const Operator* Int32x4ExtractLane(int32_t);
+  const Operator* Int32x4ReplaceLane(int32_t);
   const Operator* Int32x4Neg();
   const Operator* Int32x4Add();
   const Operator* Int32x4Sub();
   const Operator* Int32x4Mul();
   const Operator* Int32x4Min();
   const Operator* Int32x4Max();
-  const Operator* Int32x4ShiftLeftByScalar();
-  const Operator* Int32x4ShiftRightByScalar();
+  const Operator* Int32x4ShiftLeftByScalar(int32_t);
+  const Operator* Int32x4ShiftRightByScalar(int32_t);
   const Operator* Int32x4Equal();
   const Operator* Int32x4NotEqual();
   const Operator* Int32x4LessThan();
   const Operator* Int32x4LessThanOrEqual();
   const Operator* Int32x4GreaterThan();
   const Operator* Int32x4GreaterThanOrEqual();
-  const Operator* Int32x4Select();
-  const Operator* Int32x4Swizzle();
-  const Operator* Int32x4Shuffle();
   const Operator* Int32x4FromFloat32x4();
 
   const Operator* Uint32x4Min();
   const Operator* Uint32x4Max();
-  const Operator* Uint32x4ShiftLeftByScalar();
-  const Operator* Uint32x4ShiftRightByScalar();
+  const Operator* Uint32x4ShiftRightByScalar(int32_t);
   const Operator* Uint32x4LessThan();
   const Operator* Uint32x4LessThanOrEqual();
   const Operator* Uint32x4GreaterThan();
   const Operator* Uint32x4GreaterThanOrEqual();
   const Operator* Uint32x4FromFloat32x4();
 
-  const Operator* CreateBool32x4();
-  const Operator* Bool32x4ExtractLane();
-  const Operator* Bool32x4ReplaceLane();
   const Operator* Bool32x4And();
   const Operator* Bool32x4Or();
   const Operator* Bool32x4Xor();
   const Operator* Bool32x4Not();
   const Operator* Bool32x4AnyTrue();
   const Operator* Bool32x4AllTrue();
-  const Operator* Bool32x4Swizzle();
-  const Operator* Bool32x4Shuffle();
-  const Operator* Bool32x4Equal();
-  const Operator* Bool32x4NotEqual();
 
   const Operator* CreateInt16x8();
-  const Operator* Int16x8ExtractLane();
-  const Operator* Int16x8ReplaceLane();
+  const Operator* Int16x8ExtractLane(int32_t);
+  const Operator* Int16x8ReplaceLane(int32_t);
   const Operator* Int16x8Neg();
   const Operator* Int16x8Add();
   const Operator* Int16x8AddSaturate();
@@ -511,46 +496,35 @@
   const Operator* Int16x8Mul();
   const Operator* Int16x8Min();
   const Operator* Int16x8Max();
-  const Operator* Int16x8ShiftLeftByScalar();
-  const Operator* Int16x8ShiftRightByScalar();
+  const Operator* Int16x8ShiftLeftByScalar(int32_t);
+  const Operator* Int16x8ShiftRightByScalar(int32_t);
   const Operator* Int16x8Equal();
   const Operator* Int16x8NotEqual();
   const Operator* Int16x8LessThan();
   const Operator* Int16x8LessThanOrEqual();
   const Operator* Int16x8GreaterThan();
   const Operator* Int16x8GreaterThanOrEqual();
-  const Operator* Int16x8Select();
-  const Operator* Int16x8Swizzle();
-  const Operator* Int16x8Shuffle();
 
   const Operator* Uint16x8AddSaturate();
   const Operator* Uint16x8SubSaturate();
   const Operator* Uint16x8Min();
   const Operator* Uint16x8Max();
-  const Operator* Uint16x8ShiftLeftByScalar();
-  const Operator* Uint16x8ShiftRightByScalar();
+  const Operator* Uint16x8ShiftRightByScalar(int32_t);
   const Operator* Uint16x8LessThan();
   const Operator* Uint16x8LessThanOrEqual();
   const Operator* Uint16x8GreaterThan();
   const Operator* Uint16x8GreaterThanOrEqual();
 
-  const Operator* CreateBool16x8();
-  const Operator* Bool16x8ExtractLane();
-  const Operator* Bool16x8ReplaceLane();
   const Operator* Bool16x8And();
   const Operator* Bool16x8Or();
   const Operator* Bool16x8Xor();
   const Operator* Bool16x8Not();
   const Operator* Bool16x8AnyTrue();
   const Operator* Bool16x8AllTrue();
-  const Operator* Bool16x8Swizzle();
-  const Operator* Bool16x8Shuffle();
-  const Operator* Bool16x8Equal();
-  const Operator* Bool16x8NotEqual();
 
   const Operator* CreateInt8x16();
-  const Operator* Int8x16ExtractLane();
-  const Operator* Int8x16ReplaceLane();
+  const Operator* Int8x16ExtractLane(int32_t);
+  const Operator* Int8x16ReplaceLane(int32_t);
   const Operator* Int8x16Neg();
   const Operator* Int8x16Add();
   const Operator* Int8x16AddSaturate();
@@ -559,42 +533,31 @@
   const Operator* Int8x16Mul();
   const Operator* Int8x16Min();
   const Operator* Int8x16Max();
-  const Operator* Int8x16ShiftLeftByScalar();
-  const Operator* Int8x16ShiftRightByScalar();
+  const Operator* Int8x16ShiftLeftByScalar(int32_t);
+  const Operator* Int8x16ShiftRightByScalar(int32_t);
   const Operator* Int8x16Equal();
   const Operator* Int8x16NotEqual();
   const Operator* Int8x16LessThan();
   const Operator* Int8x16LessThanOrEqual();
   const Operator* Int8x16GreaterThan();
   const Operator* Int8x16GreaterThanOrEqual();
-  const Operator* Int8x16Select();
-  const Operator* Int8x16Swizzle();
-  const Operator* Int8x16Shuffle();
 
   const Operator* Uint8x16AddSaturate();
   const Operator* Uint8x16SubSaturate();
   const Operator* Uint8x16Min();
   const Operator* Uint8x16Max();
-  const Operator* Uint8x16ShiftLeftByScalar();
-  const Operator* Uint8x16ShiftRightByScalar();
+  const Operator* Uint8x16ShiftRightByScalar(int32_t);
   const Operator* Uint8x16LessThan();
   const Operator* Uint8x16LessThanOrEqual();
   const Operator* Uint8x16GreaterThan();
   const Operator* Uint8x16GreaterThanOrEqual();
 
-  const Operator* CreateBool8x16();
-  const Operator* Bool8x16ExtractLane();
-  const Operator* Bool8x16ReplaceLane();
   const Operator* Bool8x16And();
   const Operator* Bool8x16Or();
   const Operator* Bool8x16Xor();
   const Operator* Bool8x16Not();
   const Operator* Bool8x16AnyTrue();
   const Operator* Bool8x16AllTrue();
-  const Operator* Bool8x16Swizzle();
-  const Operator* Bool8x16Shuffle();
-  const Operator* Bool8x16Equal();
-  const Operator* Bool8x16NotEqual();
 
   const Operator* Simd128Load();
   const Operator* Simd128Load1();
@@ -608,6 +571,15 @@
   const Operator* Simd128Or();
   const Operator* Simd128Xor();
   const Operator* Simd128Not();
+  const Operator* Simd32x4Select();
+  const Operator* Simd32x4Swizzle(uint32_t);
+  const Operator* Simd32x4Shuffle();
+  const Operator* Simd16x8Select();
+  const Operator* Simd16x8Swizzle(uint32_t);
+  const Operator* Simd16x8Shuffle();
+  const Operator* Simd8x16Select();
+  const Operator* Simd8x16Swizzle(uint32_t);
+  const Operator* Simd8x16Shuffle();
 
   // load [base + index]
   const Operator* Load(LoadRepresentation rep);
@@ -615,6 +587,7 @@
 
   // store [base + index], value
   const Operator* Store(StoreRepresentation rep);
+  const Operator* ProtectedStore(MachineRepresentation rep);
 
   // unaligned load [base + index]
   const Operator* UnalignedLoad(UnalignedLoadRepresentation rep);
@@ -622,6 +595,7 @@
   // unaligned store [base + index], value
   const Operator* UnalignedStore(UnalignedStoreRepresentation rep);
 
+  const Operator* StackSlot(int size);
   const Operator* StackSlot(MachineRepresentation rep);
 
   // Access to the machine stack.
diff --git a/src/compiler/memory-optimizer.cc b/src/compiler/memory-optimizer.cc
index 66fcbb9..7e9a522 100644
--- a/src/compiler/memory-optimizer.cc
+++ b/src/compiler/memory-optimizer.cc
@@ -20,7 +20,8 @@
       empty_state_(AllocationState::Empty(zone)),
       pending_(zone),
       tokens_(zone),
-      zone_(zone) {}
+      zone_(zone),
+      graph_assembler_(jsgraph, nullptr, nullptr, zone) {}
 
 void MemoryOptimizer::Optimize() {
   EnqueueUses(graph()->start(), empty_state());
@@ -91,7 +92,9 @@
     case IrOpcode::kDeoptimizeUnless:
     case IrOpcode::kIfException:
     case IrOpcode::kLoad:
+    case IrOpcode::kProtectedLoad:
     case IrOpcode::kStore:
+    case IrOpcode::kProtectedStore:
     case IrOpcode::kRetain:
     case IrOpcode::kUnsafePointerAdd:
       return VisitOtherEffect(node, state);
@@ -101,12 +104,17 @@
   DCHECK_EQ(0, node->op()->EffectOutputCount());
 }
 
+#define __ gasm()->
+
 void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) {
   DCHECK_EQ(IrOpcode::kAllocate, node->opcode());
   Node* value;
   Node* size = node->InputAt(0);
   Node* effect = node->InputAt(1);
   Node* control = node->InputAt(2);
+
+  gasm()->Reset(effect, control);
+
   PretenureFlag pretenure = PretenureFlagOf(node->op());
 
   // Propagate tenuring from outer allocations to inner allocations, i.e.
@@ -141,11 +149,11 @@
   }
 
   // Determine the top/limit addresses.
-  Node* top_address = jsgraph()->ExternalConstant(
+  Node* top_address = __ ExternalConstant(
       pretenure == NOT_TENURED
           ? ExternalReference::new_space_allocation_top_address(isolate())
           : ExternalReference::old_space_allocation_top_address(isolate()));
-  Node* limit_address = jsgraph()->ExternalConstant(
+  Node* limit_address = __ ExternalConstant(
       pretenure == NOT_TENURED
           ? ExternalReference::new_space_allocation_limit_address(isolate())
           : ExternalReference::old_space_allocation_limit_address(isolate()));
@@ -171,89 +179,69 @@
 
       // Update the allocation top with the new object allocation.
       // TODO(bmeurer): Defer writing back top as much as possible.
-      Node* top = graph()->NewNode(machine()->IntAdd(), state->top(),
-                                   jsgraph()->IntPtrConstant(object_size));
-      effect = graph()->NewNode(
-          machine()->Store(StoreRepresentation(
-              MachineType::PointerRepresentation(), kNoWriteBarrier)),
-          top_address, jsgraph()->IntPtrConstant(0), top, effect, control);
+      Node* top = __ IntAdd(state->top(), __ IntPtrConstant(object_size));
+      __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
+                                   kNoWriteBarrier),
+               top_address, __ IntPtrConstant(0), top);
 
       // Compute the effective inner allocated address.
-      value = graph()->NewNode(
-          machine()->BitcastWordToTagged(),
-          graph()->NewNode(machine()->IntAdd(), state->top(),
-                           jsgraph()->IntPtrConstant(kHeapObjectTag)));
+      value = __ BitcastWordToTagged(
+          __ IntAdd(state->top(), __ IntPtrConstant(kHeapObjectTag)));
 
       // Extend the allocation {group}.
       group->Add(value);
       state = AllocationState::Open(group, state_size, top, zone());
     } else {
+      auto call_runtime = __ MakeDeferredLabel<1>();
+      auto done = __ MakeLabel<2>(MachineType::PointerRepresentation());
+
       // Setup a mutable reservation size node; will be patched as we fold
       // additional allocations into this new group.
-      Node* size = graph()->NewNode(common()->Int32Constant(object_size));
+      Node* size = __ UniqueInt32Constant(object_size);
 
       // Load allocation top and limit.
-      Node* top = effect =
-          graph()->NewNode(machine()->Load(MachineType::Pointer()), top_address,
-                           jsgraph()->IntPtrConstant(0), effect, control);
-      Node* limit = effect = graph()->NewNode(
-          machine()->Load(MachineType::Pointer()), limit_address,
-          jsgraph()->IntPtrConstant(0), effect, control);
+      Node* top =
+          __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
+      Node* limit =
+          __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
 
       // Check if we need to collect garbage before we can start bump pointer
       // allocation (always done for folded allocations).
-      Node* check = graph()->NewNode(
-          machine()->UintLessThan(),
-          graph()->NewNode(
-              machine()->IntAdd(), top,
-              machine()->Is64()
-                  ? graph()->NewNode(machine()->ChangeInt32ToInt64(), size)
-                  : size),
+      Node* check = __ UintLessThan(
+          __ IntAdd(top,
+                    machine()->Is64() ? __ ChangeInt32ToInt64(size) : size),
           limit);
-      Node* branch =
-          graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
 
-      Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-      Node* etrue = effect;
-      Node* vtrue = top;
+      __ GotoUnless(check, &call_runtime);
+      __ Goto(&done, top);
 
-      Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-      Node* efalse = effect;
-      Node* vfalse;
+      __ Bind(&call_runtime);
       {
-        Node* target = pretenure == NOT_TENURED
-                           ? jsgraph()->AllocateInNewSpaceStubConstant()
-                           : jsgraph()->AllocateInOldSpaceStubConstant();
+        Node* target =
+            pretenure == NOT_TENURED ? __ AllocateInNewSpaceStubConstant()
+                                     : __
+                                       AllocateInOldSpaceStubConstant();
         if (!allocate_operator_.is_set()) {
           CallDescriptor* descriptor =
               Linkage::GetAllocateCallDescriptor(graph()->zone());
           allocate_operator_.set(common()->Call(descriptor));
         }
-        vfalse = efalse = graph()->NewNode(allocate_operator_.get(), target,
-                                           size, efalse, if_false);
-        vfalse = graph()->NewNode(machine()->IntSub(), vfalse,
-                                  jsgraph()->IntPtrConstant(kHeapObjectTag));
+        Node* vfalse = __ Call(allocate_operator_.get(), target, size);
+        vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
+        __ Goto(&done, vfalse);
       }
 
-      control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-      effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-      value = graph()->NewNode(
-          common()->Phi(MachineType::PointerRepresentation(), 2), vtrue, vfalse,
-          control);
+      __ Bind(&done);
 
       // Compute the new top and write it back.
-      top = graph()->NewNode(machine()->IntAdd(), value,
-                             jsgraph()->IntPtrConstant(object_size));
-      effect = graph()->NewNode(
-          machine()->Store(StoreRepresentation(
-              MachineType::PointerRepresentation(), kNoWriteBarrier)),
-          top_address, jsgraph()->IntPtrConstant(0), top, effect, control);
+      top = __ IntAdd(done.PhiAt(0), __ IntPtrConstant(object_size));
+      __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
+                                   kNoWriteBarrier),
+               top_address, __ IntPtrConstant(0), top);
 
       // Compute the initial object address.
-      value = graph()->NewNode(
-          machine()->BitcastWordToTagged(),
-          graph()->NewNode(machine()->IntAdd(), value,
-                           jsgraph()->IntPtrConstant(kHeapObjectTag)));
+      value = __ BitcastWordToTagged(
+          __ IntAdd(done.PhiAt(0), __ IntPtrConstant(kHeapObjectTag)));
 
       // Start a new allocation group.
       AllocationGroup* group =
@@ -261,61 +249,42 @@
       state = AllocationState::Open(group, object_size, top, zone());
     }
   } else {
+    auto call_runtime = __ MakeDeferredLabel<1>();
+    auto done = __ MakeLabel<2>(MachineRepresentation::kTaggedPointer);
+
     // Load allocation top and limit.
-    Node* top = effect =
-        graph()->NewNode(machine()->Load(MachineType::Pointer()), top_address,
-                         jsgraph()->IntPtrConstant(0), effect, control);
-    Node* limit = effect =
-        graph()->NewNode(machine()->Load(MachineType::Pointer()), limit_address,
-                         jsgraph()->IntPtrConstant(0), effect, control);
+    Node* top =
+        __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0));
+    Node* limit =
+        __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0));
 
     // Compute the new top.
-    Node* new_top = graph()->NewNode(
-        machine()->IntAdd(), top,
-        machine()->Is64()
-            ? graph()->NewNode(machine()->ChangeInt32ToInt64(), size)
-            : size);
+    Node* new_top =
+        __ IntAdd(top, machine()->Is64() ? __ ChangeInt32ToInt64(size) : size);
 
     // Check if we can do bump pointer allocation here.
-    Node* check = graph()->NewNode(machine()->UintLessThan(), new_top, limit);
-    Node* branch =
-        graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+    Node* check = __ UintLessThan(new_top, limit);
+    __ GotoUnless(check, &call_runtime);
+    __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
+                                 kNoWriteBarrier),
+             top_address, __ IntPtrConstant(0), new_top);
+    __ Goto(&done, __ BitcastWordToTagged(
+                       __ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
 
-    Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
-    Node* etrue = effect;
-    Node* vtrue;
-    {
-      etrue = graph()->NewNode(
-          machine()->Store(StoreRepresentation(
-              MachineType::PointerRepresentation(), kNoWriteBarrier)),
-          top_address, jsgraph()->IntPtrConstant(0), new_top, etrue, if_true);
-      vtrue = graph()->NewNode(
-          machine()->BitcastWordToTagged(),
-          graph()->NewNode(machine()->IntAdd(), top,
-                           jsgraph()->IntPtrConstant(kHeapObjectTag)));
+    __ Bind(&call_runtime);
+    Node* target =
+        pretenure == NOT_TENURED ? __ AllocateInNewSpaceStubConstant()
+                                 : __
+                                   AllocateInOldSpaceStubConstant();
+    if (!allocate_operator_.is_set()) {
+      CallDescriptor* descriptor =
+          Linkage::GetAllocateCallDescriptor(graph()->zone());
+      allocate_operator_.set(common()->Call(descriptor));
     }
+    __ Goto(&done, __ Call(allocate_operator_.get(), target, size));
 
-    Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
-    Node* efalse = effect;
-    Node* vfalse;
-    {
-      Node* target = pretenure == NOT_TENURED
-                         ? jsgraph()->AllocateInNewSpaceStubConstant()
-                         : jsgraph()->AllocateInOldSpaceStubConstant();
-      if (!allocate_operator_.is_set()) {
-        CallDescriptor* descriptor =
-            Linkage::GetAllocateCallDescriptor(graph()->zone());
-        allocate_operator_.set(common()->Call(descriptor));
-      }
-      vfalse = efalse = graph()->NewNode(allocate_operator_.get(), target, size,
-                                         efalse, if_false);
-    }
-
-    control = graph()->NewNode(common()->Merge(2), if_true, if_false);
-    effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control);
-    value = graph()->NewNode(
-        common()->Phi(MachineRepresentation::kTaggedPointer, 2), vtrue, vfalse,
-        control);
+    __ Bind(&done);
+    value = done.PhiAt(0);
 
     // Create an unfoldable allocation group.
     AllocationGroup* group =
@@ -323,6 +292,10 @@
     state = AllocationState::Closed(group, zone());
   }
 
+  effect = __ ExtractCurrentEffect();
+  control = __ ExtractCurrentControl();
+  USE(control);  // Floating control, dropped on the floor.
+
   // Replace all effect uses of {node} with the {effect}, enqueue the
   // effect uses for further processing, and replace all value uses of
   // {node} with the {value}.
@@ -340,6 +313,8 @@
   node->Kill();
 }
 
+#undef __
+
 void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) {
   DCHECK_EQ(IrOpcode::kCall, node->opcode());
   // If the call can allocate, we start with a fresh state.
diff --git a/src/compiler/memory-optimizer.h b/src/compiler/memory-optimizer.h
index ba1d6dd..1541d22 100644
--- a/src/compiler/memory-optimizer.h
+++ b/src/compiler/memory-optimizer.h
@@ -5,6 +5,7 @@
 #ifndef V8_COMPILER_MEMORY_OPTIMIZER_H_
 #define V8_COMPILER_MEMORY_OPTIMIZER_H_
 
+#include "src/compiler/graph-assembler.h"
 #include "src/zone/zone-containers.h"
 
 namespace v8 {
@@ -131,6 +132,7 @@
   CommonOperatorBuilder* common() const;
   MachineOperatorBuilder* machine() const;
   Zone* zone() const { return zone_; }
+  GraphAssembler* gasm() { return &graph_assembler_; }
 
   SetOncePointer<const Operator> allocate_operator_;
   JSGraph* const jsgraph_;
@@ -138,6 +140,7 @@
   ZoneMap<NodeId, AllocationStates> pending_;
   ZoneQueue<Token> tokens_;
   Zone* const zone_;
+  GraphAssembler graph_assembler_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryOptimizer);
 };
diff --git a/src/compiler/mips/code-generator-mips.cc b/src/compiler/mips/code-generator-mips.cc
index 0a62b52..db4b529 100644
--- a/src/compiler/mips/code-generator-mips.cc
+++ b/src/compiler/mips/code-generator-mips.cc
@@ -270,6 +270,26 @@
   bool must_save_lr_;
 };
 
+#define CREATE_OOL_CLASS(ool_name, masm_ool_name, T)                 \
+  class ool_name final : public OutOfLineCode {                      \
+   public:                                                           \
+    ool_name(CodeGenerator* gen, T dst, T src1, T src2)              \
+        : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \
+                                                                     \
+    void Generate() final { __ masm_ool_name(dst_, src1_, src2_); }  \
+                                                                     \
+   private:                                                          \
+    T const dst_;                                                    \
+    T const src1_;                                                   \
+    T const src2_;                                                   \
+  }
+
+CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, DoubleRegister);
+CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, DoubleRegister);
+
+#undef CREATE_OOL_CLASS
 
 Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
   switch (condition) {
@@ -542,7 +562,7 @@
   // Check if current frame is an arguments adaptor frame.
   __ lw(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
   __ Branch(&done, ne, scratch1,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+            Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
 
   // Load arguments count from current arguments adaptor frame (note, it
   // does not include receiver).
@@ -712,10 +732,8 @@
     case kArchDeoptimize: {
       int deopt_state_id =
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
-      Deoptimizer::BailoutType bailout_type =
-          Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      CodeGenResult result = AssembleDeoptimizerCall(
-          deopt_state_id, bailout_type, current_source_position_);
+      CodeGenResult result =
+          AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
       if (result != kSuccess) return result;
       break;
     }
@@ -1132,36 +1150,24 @@
                i.InputDoubleRegister(1));
       break;
     case kMipsMaddS:
-      __ madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
-                i.InputFloatRegister(1), i.InputFloatRegister(2));
+      __ Madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
+                i.InputFloatRegister(1), i.InputFloatRegister(2),
+                kScratchDoubleReg);
       break;
     case kMipsMaddD:
-      __ madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-                i.InputDoubleRegister(1), i.InputDoubleRegister(2));
-      break;
-    case kMipsMaddfS:
-      __ maddf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
-                 i.InputFloatRegister(2));
-      break;
-    case kMipsMaddfD:
-      __ maddf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
-                 i.InputDoubleRegister(2));
+      __ Madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                i.InputDoubleRegister(1), i.InputDoubleRegister(2),
+                kScratchDoubleReg);
       break;
     case kMipsMsubS:
-      __ msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
-                i.InputFloatRegister(1), i.InputFloatRegister(2));
+      __ Msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
+                i.InputFloatRegister(1), i.InputFloatRegister(2),
+                kScratchDoubleReg);
       break;
     case kMipsMsubD:
-      __ msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-                i.InputDoubleRegister(1), i.InputDoubleRegister(2));
-      break;
-    case kMipsMsubfS:
-      __ msubf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
-                 i.InputFloatRegister(2));
-      break;
-    case kMipsMsubfD:
-      __ msubf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
-                 i.InputDoubleRegister(2));
+      __ Msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                i.InputDoubleRegister(1), i.InputDoubleRegister(2),
+                kScratchDoubleReg);
       break;
     case kMipsMulD:
       // TODO(plind): add special case: right op is -1.0, see arm port.
@@ -1239,47 +1245,39 @@
       break;
     }
     case kMipsFloat32Max: {
-      Label compare_nan, done_compare;
-      __ MaxNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
-                       i.InputSingleRegister(1), &compare_nan);
-      __ Branch(&done_compare);
-      __ bind(&compare_nan);
-      __ Move(i.OutputSingleRegister(),
-              std::numeric_limits<float>::quiet_NaN());
-      __ bind(&done_compare);
+      FPURegister dst = i.OutputSingleRegister();
+      FPURegister src1 = i.InputSingleRegister(0);
+      FPURegister src2 = i.InputSingleRegister(1);
+      auto ool = new (zone()) OutOfLineFloat32Max(this, dst, src1, src2);
+      __ Float32Max(dst, src1, src2, ool->entry());
+      __ bind(ool->exit());
       break;
     }
     case kMipsFloat64Max: {
-      Label compare_nan, done_compare;
-      __ MaxNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-                       i.InputDoubleRegister(1), &compare_nan);
-      __ Branch(&done_compare);
-      __ bind(&compare_nan);
-      __ Move(i.OutputDoubleRegister(),
-              std::numeric_limits<double>::quiet_NaN());
-      __ bind(&done_compare);
+      DoubleRegister dst = i.OutputDoubleRegister();
+      DoubleRegister src1 = i.InputDoubleRegister(0);
+      DoubleRegister src2 = i.InputDoubleRegister(1);
+      auto ool = new (zone()) OutOfLineFloat64Max(this, dst, src1, src2);
+      __ Float64Max(dst, src1, src2, ool->entry());
+      __ bind(ool->exit());
       break;
     }
     case kMipsFloat32Min: {
-      Label compare_nan, done_compare;
-      __ MinNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
-                       i.InputSingleRegister(1), &compare_nan);
-      __ Branch(&done_compare);
-      __ bind(&compare_nan);
-      __ Move(i.OutputSingleRegister(),
-              std::numeric_limits<float>::quiet_NaN());
-      __ bind(&done_compare);
+      FPURegister dst = i.OutputSingleRegister();
+      FPURegister src1 = i.InputSingleRegister(0);
+      FPURegister src2 = i.InputSingleRegister(1);
+      auto ool = new (zone()) OutOfLineFloat32Min(this, dst, src1, src2);
+      __ Float32Min(dst, src1, src2, ool->entry());
+      __ bind(ool->exit());
       break;
     }
     case kMipsFloat64Min: {
-      Label compare_nan, done_compare;
-      __ MinNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-                       i.InputDoubleRegister(1), &compare_nan);
-      __ Branch(&done_compare);
-      __ bind(&compare_nan);
-      __ Move(i.OutputDoubleRegister(),
-              std::numeric_limits<double>::quiet_NaN());
-      __ bind(&done_compare);
+      DoubleRegister dst = i.OutputDoubleRegister();
+      DoubleRegister src1 = i.InputDoubleRegister(0);
+      DoubleRegister src2 = i.InputDoubleRegister(1);
+      auto ool = new (zone()) OutOfLineFloat64Min(this, dst, src1, src2);
+      __ Float64Min(dst, src1, src2, ool->entry());
+      __ bind(ool->exit());
       break;
     }
     case kMipsCvtSD: {
@@ -1628,12 +1626,12 @@
   return false;
 }
 
+void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm,
+                            Instruction* instr, FlagsCondition condition,
+                            Label* tlabel, Label* flabel, bool fallthru) {
+#undef __
+#define __ masm->
 
-// Assembles branches after an instruction.
-void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
-  MipsOperandConverter i(this, instr);
-  Label* tlabel = branch->true_label;
-  Label* flabel = branch->false_label;
   Condition cc = kNoCondition;
   // MIPS does not have condition code flags, so compare and branch are
   // implemented differently than on the other arch's. The compare operations
@@ -1642,12 +1640,13 @@
   // registers to compare pseudo-op are not modified before this branch op, as
   // they are tested here.
 
+  MipsOperandConverter i(gen, instr);
   if (instr->arch_opcode() == kMipsTst) {
-    cc = FlagsConditionToConditionTst(branch->condition);
+    cc = FlagsConditionToConditionTst(condition);
     __ And(at, i.InputRegister(0), i.InputOperand(1));
     __ Branch(tlabel, cc, at, Operand(zero_reg));
   } else if (instr->arch_opcode() == kMipsAddOvf) {
-    switch (branch->condition) {
+    switch (condition) {
       case kOverflow:
         __ AddBranchOvf(i.OutputRegister(), i.InputRegister(0),
                         i.InputOperand(1), tlabel, flabel);
@@ -1657,11 +1656,11 @@
                         i.InputOperand(1), flabel, tlabel);
         break;
       default:
-        UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
+        UNSUPPORTED_COND(kMipsAddOvf, condition);
         break;
     }
   } else if (instr->arch_opcode() == kMipsSubOvf) {
-    switch (branch->condition) {
+    switch (condition) {
       case kOverflow:
         __ SubBranchOvf(i.OutputRegister(), i.InputRegister(0),
                         i.InputOperand(1), tlabel, flabel);
@@ -1671,11 +1670,11 @@
                         i.InputOperand(1), flabel, tlabel);
         break;
       default:
-        UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
+        UNSUPPORTED_COND(kMipsAddOvf, condition);
         break;
     }
   } else if (instr->arch_opcode() == kMipsMulOvf) {
-    switch (branch->condition) {
+    switch (condition) {
       case kOverflow:
         __ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
                         i.InputOperand(1), tlabel, flabel);
@@ -1685,15 +1684,15 @@
                         i.InputOperand(1), flabel, tlabel);
         break;
       default:
-        UNSUPPORTED_COND(kMipsMulOvf, branch->condition);
+        UNSUPPORTED_COND(kMipsMulOvf, condition);
         break;
     }
   } else if (instr->arch_opcode() == kMipsCmp) {
-    cc = FlagsConditionToConditionCmp(branch->condition);
+    cc = FlagsConditionToConditionCmp(condition);
     __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
   } else if (instr->arch_opcode() == kMipsCmpS) {
-    if (!convertCondition(branch->condition, cc)) {
-      UNSUPPORTED_COND(kMips64CmpS, branch->condition);
+    if (!convertCondition(condition, cc)) {
+      UNSUPPORTED_COND(kMips64CmpS, condition);
     }
     FPURegister left = i.InputOrZeroSingleRegister(0);
     FPURegister right = i.InputOrZeroSingleRegister(1);
@@ -1703,8 +1702,8 @@
     }
     __ BranchF32(tlabel, nullptr, cc, left, right);
   } else if (instr->arch_opcode() == kMipsCmpD) {
-    if (!convertCondition(branch->condition, cc)) {
-      UNSUPPORTED_COND(kMips64CmpD, branch->condition);
+    if (!convertCondition(condition, cc)) {
+      UNSUPPORTED_COND(kMips64CmpD, condition);
     }
     FPURegister left = i.InputOrZeroDoubleRegister(0);
     FPURegister right = i.InputOrZeroDoubleRegister(1);
@@ -1718,7 +1717,17 @@
            instr->arch_opcode());
     UNIMPLEMENTED();
   }
-  if (!branch->fallthru) __ Branch(flabel);  // no fallthru to flabel.
+  if (!fallthru) __ Branch(flabel);  // no fallthru to flabel.
+#undef __
+#define __ masm()->
+}
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+  Label* tlabel = branch->true_label;
+  Label* flabel = branch->false_label;
+  AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel,
+                         branch->fallthru);
 }
 
 
@@ -1726,6 +1735,68 @@
   if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
 }
 
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+                                     FlagsCondition condition) {
+  class OutOfLineTrap final : public OutOfLineCode {
+   public:
+    OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+        : OutOfLineCode(gen),
+          frame_elided_(frame_elided),
+          instr_(instr),
+          gen_(gen) {}
+
+    void Generate() final {
+      MipsOperandConverter i(gen_, instr_);
+
+      Builtins::Name trap_id =
+          static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
+      bool old_has_frame = __ has_frame();
+      if (frame_elided_) {
+        __ set_has_frame(true);
+        __ EnterFrame(StackFrame::WASM_COMPILED);
+      }
+      GenerateCallToTrap(trap_id);
+      if (frame_elided_) {
+        __ set_has_frame(old_has_frame);
+      }
+    }
+
+   private:
+    void GenerateCallToTrap(Builtins::Name trap_id) {
+      if (trap_id == Builtins::builtin_count) {
+        // We cannot test calls to the runtime in cctest/test-run-wasm.
+        // Therefore we emit a call to C here instead of a call to the runtime.
+        // We use the context register as the scratch register, because we do
+        // not have a context here.
+        __ PrepareCallCFunction(0, 0, cp);
+        __ CallCFunction(
+            ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+            0);
+        __ LeaveFrame(StackFrame::WASM_COMPILED);
+        __ Ret();
+      } else {
+        gen_->AssembleSourcePosition(instr_);
+        __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+                RelocInfo::CODE_TARGET);
+        ReferenceMap* reference_map =
+            new (gen_->zone()) ReferenceMap(gen_->zone());
+        gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+                              Safepoint::kNoLazyDeopt);
+        if (FLAG_debug_code) {
+          __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+        }
+      }
+    }
+
+    bool frame_elided_;
+    Instruction* instr_;
+    CodeGenerator* gen_;
+  };
+  bool frame_elided = !frame_access_state()->has_frame();
+  auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+  Label* tlabel = ool->entry();
+  AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true);
+}
 
 // Assembles boolean materializations after an instruction.
 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -1915,13 +1986,16 @@
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, Deoptimizer::BailoutType bailout_type,
-    SourcePosition pos) {
+    int deoptimization_id, SourcePosition pos) {
+  DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
+  DeoptimizeReason deoptimization_reason =
+      GetDeoptimizationReason(deoptimization_id);
+  Deoptimizer::BailoutType bailout_type =
+      deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
+                                                   : Deoptimizer::EAGER;
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
-  DeoptimizeReason deoptimization_reason =
-      GetDeoptimizationReason(deoptimization_id);
   __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
@@ -2080,9 +2154,7 @@
           destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
       switch (src.type()) {
         case Constant::kInt32:
-          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
-              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
-              src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+          if (RelocInfo::IsWasmReference(src.rmode())) {
             __ li(dst, Operand(src.ToInt32(), src.rmode()));
           } else {
             __ li(dst, Operand(src.ToInt32()));
diff --git a/src/compiler/mips/instruction-codes-mips.h b/src/compiler/mips/instruction-codes-mips.h
index 45ed041..edff56f 100644
--- a/src/compiler/mips/instruction-codes-mips.h
+++ b/src/compiler/mips/instruction-codes-mips.h
@@ -71,12 +71,8 @@
   V(MipsMulPair)                   \
   V(MipsMaddS)                     \
   V(MipsMaddD)                     \
-  V(MipsMaddfS)                    \
-  V(MipsMaddfD)                    \
   V(MipsMsubS)                     \
   V(MipsMsubD)                     \
-  V(MipsMsubfS)                    \
-  V(MipsMsubfD)                    \
   V(MipsFloat32RoundDown)          \
   V(MipsFloat32RoundTruncate)      \
   V(MipsFloat32RoundUp)            \
diff --git a/src/compiler/mips/instruction-selector-mips.cc b/src/compiler/mips/instruction-selector-mips.cc
index 1e4b996..d0ceac1 100644
--- a/src/compiler/mips/instruction-selector-mips.cc
+++ b/src/compiler/mips/instruction-selector-mips.cc
@@ -173,10 +173,9 @@
                         &inputs[1])) {
     inputs[0] = g.UseRegister(m.left().node());
     input_count++;
-  }
-  if (has_reverse_opcode &&
-      TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
-                        &input_count, &inputs[1])) {
+  } else if (has_reverse_opcode &&
+             TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
+                               &input_count, &inputs[1])) {
     inputs[0] = g.UseRegister(m.right().node());
     opcode = reverse_opcode;
     input_count++;
@@ -188,6 +187,8 @@
   if (cont->IsBranch()) {
     inputs[input_count++] = g.Label(cont->true_block());
     inputs[input_count++] = g.Label(cont->false_block());
+  } else if (cont->IsTrap()) {
+    inputs[input_count++] = g.TempImmediate(cont->trap_id());
   }
 
   if (cont->IsDeoptimize()) {
@@ -210,7 +211,7 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->reason(), cont->frame_state());
+                             cont->kind(), cont->reason(), cont->frame_state());
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -263,6 +264,9 @@
       break;
     case MachineRepresentation::kWord64:   // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -348,6 +352,9 @@
         break;
       case MachineRepresentation::kWord64:   // Fall through.
       case MachineRepresentation::kSimd128:  // Fall through.
+      case MachineRepresentation::kSimd1x4:  // Fall through.
+      case MachineRepresentation::kSimd1x8:  // Fall through.
+      case MachineRepresentation::kSimd1x16:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -368,6 +375,10 @@
   }
 }
 
+void InstructionSelector::VisitProtectedStore(Node* node) {
+  // TODO(eholk)
+  UNIMPLEMENTED();
+}
 
 void InstructionSelector::VisitWord32And(Node* node) {
   MipsOperandGenerator g(this);
@@ -394,9 +405,13 @@
         // zeros.
         if (lsb + mask_width > 32) mask_width = 32 - lsb;
 
-        Emit(kMipsExt, g.DefineAsRegister(node),
-             g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
-             g.TempImmediate(mask_width));
+        if (lsb == 0 && mask_width == 32) {
+          Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node()));
+        } else {
+          Emit(kMipsExt, g.DefineAsRegister(node),
+               g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+               g.TempImmediate(mask_width));
+        }
         return;
       }
       // Other cases fall through to the normal And operation.
@@ -652,7 +667,7 @@
   if (m.right().opcode() == IrOpcode::kWord32Shl &&
       CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
     Int32BinopMatcher mright(m.right().node());
-    if (mright.right().HasValue()) {
+    if (mright.right().HasValue() && !m.left().HasValue()) {
       int32_t shift_value = static_cast<int32_t>(mright.right().Value());
       Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
            g.UseRegister(mright.left().node()), g.TempImmediate(shift_value));
@@ -664,7 +679,7 @@
   if (m.left().opcode() == IrOpcode::kWord32Shl &&
       CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
     Int32BinopMatcher mleft(m.left().node());
-    if (mleft.right().HasValue()) {
+    if (mleft.right().HasValue() && !m.right().HasValue()) {
       int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
       Emit(kMipsLsa, g.DefineAsRegister(node), g.UseRegister(m.right().node()),
            g.UseRegister(mleft.left().node()), g.TempImmediate(shift_value));
@@ -900,35 +915,23 @@
 
 void InstructionSelector::VisitFloat32Add(Node* node) {
   MipsOperandGenerator g(this);
-  Float32BinopMatcher m(node);
-  if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
-    // For Add.S(Mul.S(x, y), z):
-    Float32BinopMatcher mleft(m.left().node());
-    if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(z, x, y).
+  if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(z, x, y).
+    Float32BinopMatcher m(node);
+    if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
+      // For Add.S(Mul.S(x, y), z):
+      Float32BinopMatcher mleft(m.left().node());
       Emit(kMipsMaddS, g.DefineAsRegister(node),
            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
            g.UseRegister(mleft.right().node()));
       return;
-    } else if (IsMipsArchVariant(kMips32r6)) {  // Select Maddf.S(z, x, y).
-      Emit(kMipsMaddfS, g.DefineSameAsFirst(node),
-           g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
-           g.UseRegister(mleft.right().node()));
-      return;
     }
-  }
-  if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
-    // For Add.S(x, Mul.S(y, z)):
-    Float32BinopMatcher mright(m.right().node());
-    if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(x, y, z).
+    if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
+      // For Add.S(x, Mul.S(y, z)):
+      Float32BinopMatcher mright(m.right().node());
       Emit(kMipsMaddS, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
            g.UseRegister(mright.left().node()),
            g.UseRegister(mright.right().node()));
       return;
-    } else if (IsMipsArchVariant(kMips32r6)) {  // Select Maddf.S(x, y, z).
-      Emit(kMipsMaddfS, g.DefineSameAsFirst(node),
-           g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
-           g.UseRegister(mright.right().node()));
-      return;
     }
   }
   VisitRRR(this, kMipsAddS, node);
@@ -937,35 +940,23 @@
 
 void InstructionSelector::VisitFloat64Add(Node* node) {
   MipsOperandGenerator g(this);
-  Float64BinopMatcher m(node);
-  if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
-    // For Add.D(Mul.D(x, y), z):
-    Float64BinopMatcher mleft(m.left().node());
-    if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.D(z, x, y).
+  if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(z, x, y).
+    Float64BinopMatcher m(node);
+    if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
+      // For Add.D(Mul.D(x, y), z):
+      Float64BinopMatcher mleft(m.left().node());
       Emit(kMipsMaddD, g.DefineAsRegister(node),
            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
            g.UseRegister(mleft.right().node()));
       return;
-    } else if (IsMipsArchVariant(kMips32r6)) {  // Select Maddf.D(z, x, y).
-      Emit(kMipsMaddfD, g.DefineSameAsFirst(node),
-           g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
-           g.UseRegister(mleft.right().node()));
-      return;
     }
-  }
-  if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
-    // For Add.D(x, Mul.D(y, z)):
-    Float64BinopMatcher mright(m.right().node());
-    if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.D(x, y, z).
+    if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+      // For Add.D(x, Mul.D(y, z)):
+      Float64BinopMatcher mright(m.right().node());
       Emit(kMipsMaddD, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
            g.UseRegister(mright.left().node()),
            g.UseRegister(mright.right().node()));
       return;
-    } else if (IsMipsArchVariant(kMips32r6)) {  // Select Maddf.D(x, y, z).
-      Emit(kMipsMaddfD, g.DefineSameAsFirst(node),
-           g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
-           g.UseRegister(mright.right().node()));
-      return;
     }
   }
   VisitRRR(this, kMipsAddD, node);
@@ -974,9 +965,9 @@
 
 void InstructionSelector::VisitFloat32Sub(Node* node) {
   MipsOperandGenerator g(this);
-  Float32BinopMatcher m(node);
-  if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
-    if (IsMipsArchVariant(kMips32r2)) {
+  if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(z, x, y).
+    Float32BinopMatcher m(node);
+    if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
       // For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
       Float32BinopMatcher mleft(m.left().node());
       Emit(kMipsMsubS, g.DefineAsRegister(node),
@@ -984,24 +975,15 @@
            g.UseRegister(mleft.right().node()));
       return;
     }
-  } else if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
-    if (IsMipsArchVariant(kMips32r6)) {
-      // For Sub.S(x,Mul.S(y,z)) select Msubf.S(x, y, z).
-      Float32BinopMatcher mright(m.right().node());
-      Emit(kMipsMsubfS, g.DefineSameAsFirst(node),
-           g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
-           g.UseRegister(mright.right().node()));
-      return;
-    }
   }
   VisitRRR(this, kMipsSubS, node);
 }
 
 void InstructionSelector::VisitFloat64Sub(Node* node) {
   MipsOperandGenerator g(this);
-  Float64BinopMatcher m(node);
-  if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
-    if (IsMipsArchVariant(kMips32r2)) {
+  if (IsMipsArchVariant(kMips32r2)) {  // Select Madd.S(z, x, y).
+    Float64BinopMatcher m(node);
+    if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
       // For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
       Float64BinopMatcher mleft(m.left().node());
       Emit(kMipsMsubD, g.DefineAsRegister(node),
@@ -1009,15 +991,6 @@
            g.UseRegister(mleft.right().node()));
       return;
     }
-  } else if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
-    if (IsMipsArchVariant(kMips32r6)) {
-      // For Sub.D(x,Mul.S(y,z)) select Msubf.D(x, y, z).
-      Float64BinopMatcher mright(m.right().node());
-      Emit(kMipsMsubfD, g.DefineSameAsFirst(node),
-           g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
-           g.UseRegister(mright.right().node()));
-      return;
-    }
   }
   VisitRRR(this, kMipsSubD, node);
 }
@@ -1231,6 +1204,9 @@
       break;
     case MachineRepresentation::kWord64:   // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -1281,6 +1257,9 @@
       break;
     case MachineRepresentation::kWord64:   // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -1329,6 +1308,9 @@
     case MachineRepresentation::kTagged:   // Fall through.
     case MachineRepresentation::kWord64:   // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -1404,11 +1386,14 @@
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
-    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
-                             cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
+                             cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(opcode, g.NoOutput(), left, right,
+                   g.TempImmediate(cont->trap_id()));
   }
 }
 
@@ -1614,12 +1599,15 @@
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
-                             g.TempImmediate(0), cont->reason(),
+                             g.TempImmediate(0), cont->kind(), cont->reason(),
                              cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+  } else if (cont->IsSet()) {
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
                    g.TempImmediate(0));
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
+                   g.TempImmediate(cont->trap_id()));
   }
 }
 
@@ -1632,14 +1620,29 @@
 }
 
 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kNotEqual, p.kind(), p.reason(), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kEqual, p.kind(), p.reason(), node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+                                          Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
diff --git a/src/compiler/mips64/code-generator-mips64.cc b/src/compiler/mips64/code-generator-mips64.cc
index a3bf433..3ab85e0 100644
--- a/src/compiler/mips64/code-generator-mips64.cc
+++ b/src/compiler/mips64/code-generator-mips64.cc
@@ -270,6 +270,26 @@
   bool must_save_lr_;
 };
 
+#define CREATE_OOL_CLASS(ool_name, masm_ool_name, T)                 \
+  class ool_name final : public OutOfLineCode {                      \
+   public:                                                           \
+    ool_name(CodeGenerator* gen, T dst, T src1, T src2)              \
+        : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \
+                                                                     \
+    void Generate() final { __ masm_ool_name(dst_, src1_, src2_); }  \
+                                                                     \
+   private:                                                          \
+    T const dst_;                                                    \
+    T const src1_;                                                   \
+    T const src2_;                                                   \
+  }
+
+CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, FPURegister);
+
+#undef CREATE_OOL_CLASS
 
 Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
   switch (condition) {
@@ -366,85 +386,108 @@
 }
 
 }  // namespace
-
-#define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr)                         \
+#define ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, length, out_of_bounds)         \
   do {                                                                        \
-    auto result = i.Output##width##Register();                                \
-    auto ool = new (zone()) OutOfLineLoad##width(this, result);               \
-    if (instr->InputAt(0)->IsRegister()) {                                    \
-      auto offset = i.InputRegister(0);                                       \
-      __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
-      __ And(kScratchReg, offset, Operand(0xffffffff));                       \
-      __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg);                 \
-      __ asm_instr(result, MemOperand(kScratchReg, 0));                       \
+    if (!length.is_reg() && base::bits::IsPowerOfTwo64(length.immediate())) { \
+      __ And(kScratchReg, offset, Operand(~(length.immediate() - 1)));        \
+      __ Branch(USE_DELAY_SLOT, out_of_bounds, ne, kScratchReg,               \
+                Operand(zero_reg));                                           \
     } else {                                                                  \
-      int offset = static_cast<int>(i.InputOperand(0).immediate());           \
-      __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset));       \
-      __ asm_instr(result, MemOperand(i.InputRegister(2), offset));           \
+      __ Branch(USE_DELAY_SLOT, out_of_bounds, hs, offset, length);           \
     }                                                                         \
-    __ bind(ool->exit());                                                     \
   } while (0)
 
-#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                              \
+#define ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, length, out_of_bounds)        \
   do {                                                                        \
-    auto result = i.OutputRegister();                                         \
-    auto ool = new (zone()) OutOfLineLoadInteger(this, result);               \
-    if (instr->InputAt(0)->IsRegister()) {                                    \
-      auto offset = i.InputRegister(0);                                       \
-      __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
-      __ And(kScratchReg, offset, Operand(0xffffffff));                       \
-      __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg);                 \
-      __ asm_instr(result, MemOperand(kScratchReg, 0));                       \
+    if (!length.is_reg() && base::bits::IsPowerOfTwo64(length.immediate())) { \
+      __ Or(kScratchReg, zero_reg, Operand(offset));                          \
+      __ And(kScratchReg, kScratchReg, Operand(~(length.immediate() - 1)));   \
+      __ Branch(out_of_bounds, ne, kScratchReg, Operand(zero_reg));           \
     } else {                                                                  \
-      int offset = static_cast<int>(i.InputOperand(0).immediate());           \
-      __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset));       \
-      __ asm_instr(result, MemOperand(i.InputRegister(2), offset));           \
+      __ Branch(out_of_bounds, ls, length.rm(), Operand(offset));             \
     }                                                                         \
-    __ bind(ool->exit());                                                     \
   } while (0)
 
-#define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr)                 \
-  do {                                                                 \
-    Label done;                                                        \
-    if (instr->InputAt(0)->IsRegister()) {                             \
-      auto offset = i.InputRegister(0);                                \
-      auto value = i.InputOrZero##width##Register(2);                  \
-      if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {      \
-        __ Move(kDoubleRegZero, 0.0);                                  \
-      }                                                                \
-      __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
-      __ And(kScratchReg, offset, Operand(0xffffffff));                \
-      __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg);          \
-      __ asm_instr(value, MemOperand(kScratchReg, 0));                 \
-    } else {                                                           \
-      int offset = static_cast<int>(i.InputOperand(0).immediate());    \
-      auto value = i.InputOrZero##width##Register(2);                  \
-      if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {      \
-        __ Move(kDoubleRegZero, 0.0);                                  \
-      }                                                                \
-      __ Branch(&done, ls, i.InputRegister(1), Operand(offset));       \
-      __ asm_instr(value, MemOperand(i.InputRegister(3), offset));     \
-    }                                                                  \
-    __ bind(&done);                                                    \
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr)                          \
+  do {                                                                         \
+    auto result = i.Output##width##Register();                                 \
+    auto ool = new (zone()) OutOfLineLoad##width(this, result);                \
+    if (instr->InputAt(0)->IsRegister()) {                                     \
+      auto offset = i.InputRegister(0);                                        \
+      ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), ool->entry()); \
+      __ And(kScratchReg, offset, Operand(0xffffffff));                        \
+      __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg);                  \
+      __ asm_instr(result, MemOperand(kScratchReg, 0));                        \
+    } else {                                                                   \
+      int offset = static_cast<int>(i.InputOperand(0).immediate());            \
+      ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1),               \
+                                      ool->entry());                           \
+      __ asm_instr(result, MemOperand(i.InputRegister(2), offset));            \
+    }                                                                          \
+    __ bind(ool->exit());                                                      \
   } while (0)
 
-#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)                      \
-  do {                                                                 \
-    Label done;                                                        \
-    if (instr->InputAt(0)->IsRegister()) {                             \
-      auto offset = i.InputRegister(0);                                \
-      auto value = i.InputOrZeroRegister(2);                           \
-      __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
-      __ And(kScratchReg, offset, Operand(0xffffffff));                \
-      __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg);          \
-      __ asm_instr(value, MemOperand(kScratchReg, 0));                 \
-    } else {                                                           \
-      int offset = static_cast<int>(i.InputOperand(0).immediate());    \
-      auto value = i.InputOrZeroRegister(2);                           \
-      __ Branch(&done, ls, i.InputRegister(1), Operand(offset));       \
-      __ asm_instr(value, MemOperand(i.InputRegister(3), offset));     \
-    }                                                                  \
-    __ bind(&done);                                                    \
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                               \
+  do {                                                                         \
+    auto result = i.OutputRegister();                                          \
+    auto ool = new (zone()) OutOfLineLoadInteger(this, result);                \
+    if (instr->InputAt(0)->IsRegister()) {                                     \
+      auto offset = i.InputRegister(0);                                        \
+      ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), ool->entry()); \
+      __ And(kScratchReg, offset, Operand(0xffffffff));                        \
+      __ Daddu(kScratchReg, i.InputRegister(2), kScratchReg);                  \
+      __ asm_instr(result, MemOperand(kScratchReg, 0));                        \
+    } else {                                                                   \
+      int offset = static_cast<int>(i.InputOperand(0).immediate());            \
+      ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1),               \
+                                      ool->entry());                           \
+      __ asm_instr(result, MemOperand(i.InputRegister(2), offset));            \
+    }                                                                          \
+    __ bind(ool->exit());                                                      \
+  } while (0)
+
+#define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr)                   \
+  do {                                                                   \
+    Label done;                                                          \
+    if (instr->InputAt(0)->IsRegister()) {                               \
+      auto offset = i.InputRegister(0);                                  \
+      auto value = i.InputOrZero##width##Register(2);                    \
+      if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {        \
+        __ Move(kDoubleRegZero, 0.0);                                    \
+      }                                                                  \
+      ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), &done);  \
+      __ And(kScratchReg, offset, Operand(0xffffffff));                  \
+      __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg);            \
+      __ asm_instr(value, MemOperand(kScratchReg, 0));                   \
+    } else {                                                             \
+      int offset = static_cast<int>(i.InputOperand(0).immediate());      \
+      auto value = i.InputOrZero##width##Register(2);                    \
+      if (value.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) {        \
+        __ Move(kDoubleRegZero, 0.0);                                    \
+      }                                                                  \
+      ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), &done); \
+      __ asm_instr(value, MemOperand(i.InputRegister(3), offset));       \
+    }                                                                    \
+    __ bind(&done);                                                      \
+  } while (0)
+
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)                        \
+  do {                                                                   \
+    Label done;                                                          \
+    if (instr->InputAt(0)->IsRegister()) {                               \
+      auto offset = i.InputRegister(0);                                  \
+      auto value = i.InputOrZeroRegister(2);                             \
+      ASSEMBLE_BOUNDS_CHECK_REGISTER(offset, i.InputOperand(1), &done);  \
+      __ And(kScratchReg, offset, Operand(0xffffffff));                  \
+      __ Daddu(kScratchReg, i.InputRegister(3), kScratchReg);            \
+      __ asm_instr(value, MemOperand(kScratchReg, 0));                   \
+    } else {                                                             \
+      int offset = static_cast<int>(i.InputOperand(0).immediate());      \
+      auto value = i.InputOrZeroRegister(2);                             \
+      ASSEMBLE_BOUNDS_CHECK_IMMEDIATE(offset, i.InputOperand(1), &done); \
+      __ asm_instr(value, MemOperand(i.InputRegister(3), offset));       \
+    }                                                                    \
+    __ bind(&done);                                                      \
   } while (0)
 
 #define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(mode)                                  \
@@ -556,7 +599,7 @@
   // Check if current frame is an arguments adaptor frame.
   __ ld(scratch3, MemOperand(fp, StandardFrameConstants::kContextOffset));
   __ Branch(&done, ne, scratch3,
-            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+            Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
 
   // Load arguments count from current arguments adaptor frame (note, it
   // does not include receiver).
@@ -725,10 +768,8 @@
     case kArchDeoptimize: {
       int deopt_state_id =
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
-      Deoptimizer::BailoutType bailout_type =
-          Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      CodeGenResult result = AssembleDeoptimizerCall(
-          deopt_state_id, bailout_type, current_source_position_);
+      CodeGenResult result =
+          AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
       if (result != kSuccess) return result;
       break;
     }
@@ -1326,36 +1367,24 @@
                i.InputDoubleRegister(1));
       break;
     case kMips64MaddS:
-      __ madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
-                i.InputFloatRegister(1), i.InputFloatRegister(2));
+      __ Madd_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
+                i.InputFloatRegister(1), i.InputFloatRegister(2),
+                kScratchDoubleReg);
       break;
     case kMips64MaddD:
-      __ madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-                i.InputDoubleRegister(1), i.InputDoubleRegister(2));
-      break;
-    case kMips64MaddfS:
-      __ maddf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
-                 i.InputFloatRegister(2));
-      break;
-    case kMips64MaddfD:
-      __ maddf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
-                 i.InputDoubleRegister(2));
+      __ Madd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                i.InputDoubleRegister(1), i.InputDoubleRegister(2),
+                kScratchDoubleReg);
       break;
     case kMips64MsubS:
-      __ msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
-                i.InputFloatRegister(1), i.InputFloatRegister(2));
+      __ Msub_s(i.OutputFloatRegister(), i.InputFloatRegister(0),
+                i.InputFloatRegister(1), i.InputFloatRegister(2),
+                kScratchDoubleReg);
       break;
     case kMips64MsubD:
-      __ msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-                i.InputDoubleRegister(1), i.InputDoubleRegister(2));
-      break;
-    case kMips64MsubfS:
-      __ msubf_s(i.OutputFloatRegister(), i.InputFloatRegister(1),
-                 i.InputFloatRegister(2));
-      break;
-    case kMips64MsubfD:
-      __ msubf_d(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
-                 i.InputDoubleRegister(2));
+      __ Msub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                i.InputDoubleRegister(1), i.InputDoubleRegister(2),
+                kScratchDoubleReg);
       break;
     case kMips64MulD:
       // TODO(plind): add special case: right op is -1.0, see arm port.
@@ -1430,47 +1459,39 @@
       break;
     }
     case kMips64Float32Max: {
-      Label compare_nan, done_compare;
-      __ MaxNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
-                       i.InputSingleRegister(1), &compare_nan);
-      __ Branch(&done_compare);
-      __ bind(&compare_nan);
-      __ Move(i.OutputSingleRegister(),
-              std::numeric_limits<float>::quiet_NaN());
-      __ bind(&done_compare);
+      FPURegister dst = i.OutputSingleRegister();
+      FPURegister src1 = i.InputSingleRegister(0);
+      FPURegister src2 = i.InputSingleRegister(1);
+      auto ool = new (zone()) OutOfLineFloat32Max(this, dst, src1, src2);
+      __ Float32Max(dst, src1, src2, ool->entry());
+      __ bind(ool->exit());
       break;
     }
     case kMips64Float64Max: {
-      Label compare_nan, done_compare;
-      __ MaxNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-                       i.InputDoubleRegister(1), &compare_nan);
-      __ Branch(&done_compare);
-      __ bind(&compare_nan);
-      __ Move(i.OutputDoubleRegister(),
-              std::numeric_limits<double>::quiet_NaN());
-      __ bind(&done_compare);
+      FPURegister dst = i.OutputDoubleRegister();
+      FPURegister src1 = i.InputDoubleRegister(0);
+      FPURegister src2 = i.InputDoubleRegister(1);
+      auto ool = new (zone()) OutOfLineFloat64Max(this, dst, src1, src2);
+      __ Float64Max(dst, src1, src2, ool->entry());
+      __ bind(ool->exit());
       break;
     }
     case kMips64Float32Min: {
-      Label compare_nan, done_compare;
-      __ MinNaNCheck_s(i.OutputSingleRegister(), i.InputSingleRegister(0),
-                       i.InputSingleRegister(1), &compare_nan);
-      __ Branch(&done_compare);
-      __ bind(&compare_nan);
-      __ Move(i.OutputSingleRegister(),
-              std::numeric_limits<float>::quiet_NaN());
-      __ bind(&done_compare);
+      FPURegister dst = i.OutputSingleRegister();
+      FPURegister src1 = i.InputSingleRegister(0);
+      FPURegister src2 = i.InputSingleRegister(1);
+      auto ool = new (zone()) OutOfLineFloat32Min(this, dst, src1, src2);
+      __ Float32Min(dst, src1, src2, ool->entry());
+      __ bind(ool->exit());
       break;
     }
     case kMips64Float64Min: {
-      Label compare_nan, done_compare;
-      __ MinNaNCheck_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
-                       i.InputDoubleRegister(1), &compare_nan);
-      __ Branch(&done_compare);
-      __ bind(&compare_nan);
-      __ Move(i.OutputDoubleRegister(),
-              std::numeric_limits<double>::quiet_NaN());
-      __ bind(&done_compare);
+      FPURegister dst = i.OutputDoubleRegister();
+      FPURegister src1 = i.InputDoubleRegister(0);
+      FPURegister src2 = i.InputDoubleRegister(1);
+      auto ool = new (zone()) OutOfLineFloat64Min(this, dst, src1, src2);
+      __ Float64Min(dst, src1, src2, ool->entry());
+      __ bind(ool->exit());
       break;
     }
     case kMips64Float64SilenceNaN:
@@ -1935,12 +1956,13 @@
   return false;
 }
 
+void AssembleBranchToLabels(CodeGenerator* gen, MacroAssembler* masm,
+                            Instruction* instr, FlagsCondition condition,
+                            Label* tlabel, Label* flabel, bool fallthru) {
+#undef __
+#define __ masm->
+  MipsOperandConverter i(gen, instr);
 
-// Assembles branches after an instruction.
-void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
-  MipsOperandConverter i(this, instr);
-  Label* tlabel = branch->true_label;
-  Label* flabel = branch->false_label;
   Condition cc = kNoCondition;
   // MIPS does not have condition code flags, so compare and branch are
   // implemented differently than on the other arch's. The compare operations
@@ -1950,17 +1972,17 @@
   // they are tested here.
 
   if (instr->arch_opcode() == kMips64Tst) {
-    cc = FlagsConditionToConditionTst(branch->condition);
+    cc = FlagsConditionToConditionTst(condition);
     __ And(at, i.InputRegister(0), i.InputOperand(1));
     __ Branch(tlabel, cc, at, Operand(zero_reg));
   } else if (instr->arch_opcode() == kMips64Dadd ||
              instr->arch_opcode() == kMips64Dsub) {
-    cc = FlagsConditionToConditionOvf(branch->condition);
+    cc = FlagsConditionToConditionOvf(condition);
     __ dsra32(kScratchReg, i.OutputRegister(), 0);
     __ sra(at, i.OutputRegister(), 31);
     __ Branch(tlabel, cc, at, Operand(kScratchReg));
   } else if (instr->arch_opcode() == kMips64DaddOvf) {
-    switch (branch->condition) {
+    switch (condition) {
       case kOverflow:
         __ DaddBranchOvf(i.OutputRegister(), i.InputRegister(0),
                          i.InputOperand(1), tlabel, flabel);
@@ -1970,11 +1992,11 @@
                          i.InputOperand(1), flabel, tlabel);
         break;
       default:
-        UNSUPPORTED_COND(kMips64DaddOvf, branch->condition);
+        UNSUPPORTED_COND(kMips64DaddOvf, condition);
         break;
     }
   } else if (instr->arch_opcode() == kMips64DsubOvf) {
-    switch (branch->condition) {
+    switch (condition) {
       case kOverflow:
         __ DsubBranchOvf(i.OutputRegister(), i.InputRegister(0),
                          i.InputOperand(1), tlabel, flabel);
@@ -1984,11 +2006,11 @@
                          i.InputOperand(1), flabel, tlabel);
         break;
       default:
-        UNSUPPORTED_COND(kMips64DsubOvf, branch->condition);
+        UNSUPPORTED_COND(kMips64DsubOvf, condition);
         break;
     }
   } else if (instr->arch_opcode() == kMips64MulOvf) {
-    switch (branch->condition) {
+    switch (condition) {
       case kOverflow: {
         __ MulBranchOvf(i.OutputRegister(), i.InputRegister(0),
                         i.InputOperand(1), tlabel, flabel, kScratchReg);
@@ -1998,15 +2020,15 @@
                         i.InputOperand(1), flabel, tlabel, kScratchReg);
       } break;
       default:
-        UNSUPPORTED_COND(kMips64MulOvf, branch->condition);
+        UNSUPPORTED_COND(kMips64MulOvf, condition);
         break;
     }
   } else if (instr->arch_opcode() == kMips64Cmp) {
-    cc = FlagsConditionToConditionCmp(branch->condition);
+    cc = FlagsConditionToConditionCmp(condition);
     __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
   } else if (instr->arch_opcode() == kMips64CmpS) {
-    if (!convertCondition(branch->condition, cc)) {
-      UNSUPPORTED_COND(kMips64CmpS, branch->condition);
+    if (!convertCondition(condition, cc)) {
+      UNSUPPORTED_COND(kMips64CmpS, condition);
     }
     FPURegister left = i.InputOrZeroSingleRegister(0);
     FPURegister right = i.InputOrZeroSingleRegister(1);
@@ -2016,8 +2038,8 @@
     }
     __ BranchF32(tlabel, nullptr, cc, left, right);
   } else if (instr->arch_opcode() == kMips64CmpD) {
-    if (!convertCondition(branch->condition, cc)) {
-      UNSUPPORTED_COND(kMips64CmpD, branch->condition);
+    if (!convertCondition(condition, cc)) {
+      UNSUPPORTED_COND(kMips64CmpD, condition);
     }
     FPURegister left = i.InputOrZeroDoubleRegister(0);
     FPURegister right = i.InputOrZeroDoubleRegister(1);
@@ -2031,7 +2053,18 @@
            instr->arch_opcode());
     UNIMPLEMENTED();
   }
-  if (!branch->fallthru) __ Branch(flabel);  // no fallthru to flabel.
+  if (!fallthru) __ Branch(flabel);  // no fallthru to flabel.
+#undef __
+#define __ masm()->
+}
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+  Label* tlabel = branch->true_label;
+  Label* flabel = branch->false_label;
+
+  AssembleBranchToLabels(this, masm(), instr, branch->condition, tlabel, flabel,
+                         branch->fallthru);
 }
 
 
@@ -2039,6 +2072,65 @@
   if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
 }
 
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+                                     FlagsCondition condition) {
+  class OutOfLineTrap final : public OutOfLineCode {
+   public:
+    OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+        : OutOfLineCode(gen),
+          frame_elided_(frame_elided),
+          instr_(instr),
+          gen_(gen) {}
+    void Generate() final {
+      MipsOperandConverter i(gen_, instr_);
+      Builtins::Name trap_id =
+          static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
+      bool old_has_frame = __ has_frame();
+      if (frame_elided_) {
+        __ set_has_frame(true);
+        __ EnterFrame(StackFrame::WASM_COMPILED);
+      }
+      GenerateCallToTrap(trap_id);
+      if (frame_elided_) {
+        __ set_has_frame(old_has_frame);
+      }
+    }
+
+   private:
+    void GenerateCallToTrap(Builtins::Name trap_id) {
+      if (trap_id == Builtins::builtin_count) {
+        // We cannot test calls to the runtime in cctest/test-run-wasm.
+        // Therefore we emit a call to C here instead of a call to the runtime.
+        // We use the context register as the scratch register, because we do
+        // not have a context here.
+        __ PrepareCallCFunction(0, 0, cp);
+        __ CallCFunction(
+            ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+            0);
+        __ LeaveFrame(StackFrame::WASM_COMPILED);
+        __ Ret();
+      } else {
+        gen_->AssembleSourcePosition(instr_);
+        __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+                RelocInfo::CODE_TARGET);
+        ReferenceMap* reference_map =
+            new (gen_->zone()) ReferenceMap(gen_->zone());
+        gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+                              Safepoint::kNoLazyDeopt);
+        if (FLAG_debug_code) {
+          __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+        }
+      }
+    }
+    bool frame_elided_;
+    Instruction* instr_;
+    CodeGenerator* gen_;
+  };
+  bool frame_elided = !frame_access_state()->has_frame();
+  auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+  Label* tlabel = ool->entry();
+  AssembleBranchToLabels(this, masm(), instr, condition, tlabel, nullptr, true);
+}
 
 // Assembles boolean materializations after an instruction.
 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -2239,13 +2331,16 @@
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, Deoptimizer::BailoutType bailout_type,
-    SourcePosition pos) {
+    int deoptimization_id, SourcePosition pos) {
+  DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
+  DeoptimizeReason deoptimization_reason =
+      GetDeoptimizationReason(deoptimization_id);
+  Deoptimizer::BailoutType bailout_type =
+      deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
+                                                   : Deoptimizer::EAGER;
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
-  DeoptimizeReason deoptimization_reason =
-      GetDeoptimizationReason(deoptimization_id);
   __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
@@ -2401,7 +2496,7 @@
           destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
       switch (src.type()) {
         case Constant::kInt32:
-          if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+          if (RelocInfo::IsWasmSizeReference(src.rmode())) {
             __ li(dst, Operand(src.ToInt32(), src.rmode()));
           } else {
             __ li(dst, Operand(src.ToInt32()));
@@ -2411,11 +2506,10 @@
           __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
           break;
         case Constant::kInt64:
-          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
-              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+          if (RelocInfo::IsWasmPtrReference(src.rmode())) {
             __ li(dst, Operand(src.ToInt64(), src.rmode()));
           } else {
-            DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+            DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
             __ li(dst, Operand(src.ToInt64()));
           }
           break;
diff --git a/src/compiler/mips64/instruction-codes-mips64.h b/src/compiler/mips64/instruction-codes-mips64.h
index 8f68ced..0c0e1aa 100644
--- a/src/compiler/mips64/instruction-codes-mips64.h
+++ b/src/compiler/mips64/instruction-codes-mips64.h
@@ -87,12 +87,8 @@
   V(Mips64MinD)                     \
   V(Mips64MaddS)                    \
   V(Mips64MaddD)                    \
-  V(Mips64MaddfS)                   \
-  V(Mips64MaddfD)                   \
   V(Mips64MsubS)                    \
   V(Mips64MsubD)                    \
-  V(Mips64MsubfS)                   \
-  V(Mips64MsubfD)                   \
   V(Mips64Float64RoundDown)         \
   V(Mips64Float64RoundTruncate)     \
   V(Mips64Float64RoundUp)           \
diff --git a/src/compiler/mips64/instruction-selector-mips64.cc b/src/compiler/mips64/instruction-selector-mips64.cc
index fbf09d6..4f19a17 100644
--- a/src/compiler/mips64/instruction-selector-mips64.cc
+++ b/src/compiler/mips64/instruction-selector-mips64.cc
@@ -92,9 +92,35 @@
       case kMips64Tst:
       case kMips64Xor:
         return is_uint16(value);
+      case kMips64Lb:
+      case kMips64Lbu:
+      case kMips64Sb:
+      case kMips64Lh:
+      case kMips64Lhu:
+      case kMips64Sh:
+      case kMips64Lw:
+      case kMips64Sw:
+      case kMips64Ld:
+      case kMips64Sd:
+      case kMips64Lwc1:
+      case kMips64Swc1:
       case kMips64Ldc1:
       case kMips64Sdc1:
-        return is_int16(value + kIntSize);
+      case kCheckedLoadInt8:
+      case kCheckedLoadUint8:
+      case kCheckedLoadInt16:
+      case kCheckedLoadUint16:
+      case kCheckedLoadWord32:
+      case kCheckedLoadWord64:
+      case kCheckedStoreWord8:
+      case kCheckedStoreWord16:
+      case kCheckedStoreWord32:
+      case kCheckedStoreWord64:
+      case kCheckedLoadFloat32:
+      case kCheckedLoadFloat64:
+      case kCheckedStoreFloat32:
+      case kCheckedStoreFloat64:
+        return is_int32(value);
       default:
         return is_int16(value);
     }
@@ -169,6 +195,16 @@
     DCHECK(m.IsWord64Sar());
     if (m.left().IsLoad() && m.right().Is(32) &&
         selector_->CanCover(m.node(), m.left().node())) {
+      MachineRepresentation rep =
+          LoadRepresentationOf(m.left().node()->op()).representation();
+      DCHECK(ElementSizeLog2Of(rep) == 3);
+      if (rep != MachineRepresentation::kTaggedSigned &&
+          rep != MachineRepresentation::kTaggedPointer &&
+          rep != MachineRepresentation::kTagged &&
+          rep != MachineRepresentation::kWord64) {
+        return;
+      }
+
       Mips64OperandGenerator g(selector_);
       Node* load = m.left().node();
       Node* offset = load->InputAt(1);
@@ -186,7 +222,8 @@
   }
 };
 
-bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node) {
+bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node,
+                          Node* output_node) {
   ExtendingLoadMatcher m(node, selector);
   Mips64OperandGenerator g(selector);
   if (m.Matches()) {
@@ -196,7 +233,7 @@
         m.opcode() | AddressingModeField::encode(kMode_MRI);
     DCHECK(is_int32(m.immediate()));
     inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
-    InstructionOperand outputs[] = {g.DefineAsRegister(node)};
+    InstructionOperand outputs[] = {g.DefineAsRegister(output_node)};
     selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
                    inputs);
     return true;
@@ -232,10 +269,9 @@
                         &inputs[1])) {
     inputs[0] = g.UseRegister(m.left().node());
     input_count++;
-  }
-  if (has_reverse_opcode &&
-      TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
-                        &input_count, &inputs[1])) {
+  } else if (has_reverse_opcode &&
+             TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
+                               &input_count, &inputs[1])) {
     inputs[0] = g.UseRegister(m.right().node());
     opcode = reverse_opcode;
     input_count++;
@@ -247,6 +283,8 @@
   if (cont->IsBranch()) {
     inputs[input_count++] = g.Label(cont->true_block());
     inputs[input_count++] = g.Label(cont->false_block());
+  } else if (cont->IsTrap()) {
+    inputs[input_count++] = g.TempImmediate(cont->trap_id());
   }
 
   if (cont->IsDeoptimize()) {
@@ -269,7 +307,7 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->reason(), cont->frame_state());
+                             cont->kind(), cont->reason(), cont->frame_state());
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -341,6 +379,9 @@
       opcode = kMips64Ld;
       break;
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -418,6 +459,9 @@
         opcode = kMips64Sd;
         break;
       case MachineRepresentation::kSimd128:  // Fall through.
+      case MachineRepresentation::kSimd1x4:  // Fall through.
+      case MachineRepresentation::kSimd1x8:  // Fall through.
+      case MachineRepresentation::kSimd1x16:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -438,6 +482,10 @@
   }
 }
 
+void InstructionSelector::VisitProtectedStore(Node* node) {
+  // TODO(eholk)
+  UNIMPLEMENTED();
+}
 
 void InstructionSelector::VisitWord32And(Node* node) {
   Mips64OperandGenerator g(this);
@@ -514,9 +562,13 @@
         // zeros.
         if (lsb + mask_width > 64) mask_width = 64 - lsb;
 
-        Emit(kMips64Dext, g.DefineAsRegister(node),
-             g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
-             g.TempImmediate(static_cast<int32_t>(mask_width)));
+        if (lsb == 0 && mask_width == 64) {
+          Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node()));
+        } else {
+          Emit(kMips64Dext, g.DefineAsRegister(node),
+               g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+               g.TempImmediate(static_cast<int32_t>(mask_width)));
+        }
         return;
       }
       // Other cases fall through to the normal And operation.
@@ -748,7 +800,7 @@
 
 
 void InstructionSelector::VisitWord64Sar(Node* node) {
-  if (TryEmitExtendingLoad(this, node)) return;
+  if (TryEmitExtendingLoad(this, node, node)) return;
   VisitRRO(this, kMips64Dsar, node);
 }
 
@@ -824,7 +876,7 @@
   if (m.right().opcode() == IrOpcode::kWord32Shl &&
       CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
     Int32BinopMatcher mright(m.right().node());
-    if (mright.right().HasValue()) {
+    if (mright.right().HasValue() && !m.left().HasValue()) {
       int32_t shift_value = static_cast<int32_t>(mright.right().Value());
       Emit(kMips64Lsa, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
            g.UseRegister(mright.left().node()), g.TempImmediate(shift_value));
@@ -836,7 +888,7 @@
   if (m.left().opcode() == IrOpcode::kWord32Shl &&
       CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
     Int32BinopMatcher mleft(m.left().node());
-    if (mleft.right().HasValue()) {
+    if (mleft.right().HasValue() && !m.right().HasValue()) {
       int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
       Emit(kMips64Lsa, g.DefineAsRegister(node),
            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
@@ -856,7 +908,7 @@
   if (m.right().opcode() == IrOpcode::kWord64Shl &&
       CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
     Int64BinopMatcher mright(m.right().node());
-    if (mright.right().HasValue()) {
+    if (mright.right().HasValue() && !m.left().HasValue()) {
       int32_t shift_value = static_cast<int32_t>(mright.right().Value());
       Emit(kMips64Dlsa, g.DefineAsRegister(node),
            g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
@@ -869,7 +921,7 @@
   if (m.left().opcode() == IrOpcode::kWord64Shl &&
       CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
     Int64BinopMatcher mleft(m.left().node());
-    if (mleft.right().HasValue()) {
+    if (mleft.right().HasValue() && !m.right().HasValue()) {
       int32_t shift_value = static_cast<int32_t>(mleft.right().Value());
       Emit(kMips64Dlsa, g.DefineAsRegister(node),
            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
@@ -1318,13 +1370,17 @@
   if (CanCover(node, value)) {
     switch (value->opcode()) {
       case IrOpcode::kWord64Sar: {
-        Int64BinopMatcher m(value);
-        if (m.right().IsInRange(32, 63)) {
-          // After smi untagging no need for truncate. Combine sequence.
-          Emit(kMips64Dsar, g.DefineSameAsFirst(node),
-               g.UseRegister(m.left().node()),
-               g.UseImmediate(m.right().node()));
+        if (TryEmitExtendingLoad(this, value, node)) {
           return;
+        } else {
+          Int64BinopMatcher m(value);
+          if (m.right().IsInRange(32, 63)) {
+            // After smi untagging no need for truncate. Combine sequence.
+            Emit(kMips64Dsar, g.DefineSameAsFirst(node),
+                 g.UseRegister(m.left().node()),
+                 g.UseImmediate(m.right().node()));
+            return;
+          }
         }
         break;
       }
@@ -1404,35 +1460,23 @@
 
 void InstructionSelector::VisitFloat32Add(Node* node) {
   Mips64OperandGenerator g(this);
-  Float32BinopMatcher m(node);
-  if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
-    // For Add.S(Mul.S(x, y), z):
-    Float32BinopMatcher mleft(m.left().node());
-    if (kArchVariant == kMips64r2) {  // Select Madd.S(z, x, y).
+  if (kArchVariant == kMips64r2) {  // Select Madd.S(z, x, y).
+    Float32BinopMatcher m(node);
+    if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
+      // For Add.S(Mul.S(x, y), z):
+      Float32BinopMatcher mleft(m.left().node());
       Emit(kMips64MaddS, g.DefineAsRegister(node),
            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
            g.UseRegister(mleft.right().node()));
       return;
-    } else if (kArchVariant == kMips64r6) {  // Select Maddf.S(z, x, y).
-      Emit(kMips64MaddfS, g.DefineSameAsFirst(node),
-           g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
-           g.UseRegister(mleft.right().node()));
-      return;
     }
-  }
-  if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
-    // For Add.S(x, Mul.S(y, z)):
-    Float32BinopMatcher mright(m.right().node());
-    if (kArchVariant == kMips64r2) {  // Select Madd.S(x, y, z).
+    if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
+      // For Add.S(x, Mul.S(y, z)):
+      Float32BinopMatcher mright(m.right().node());
       Emit(kMips64MaddS, g.DefineAsRegister(node),
            g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
            g.UseRegister(mright.right().node()));
       return;
-    } else if (kArchVariant == kMips64r6) {  // Select Maddf.S(x, y, z).
-      Emit(kMips64MaddfS, g.DefineSameAsFirst(node),
-           g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
-           g.UseRegister(mright.right().node()));
-      return;
     }
   }
   VisitRRR(this, kMips64AddS, node);
@@ -1441,35 +1485,23 @@
 
 void InstructionSelector::VisitFloat64Add(Node* node) {
   Mips64OperandGenerator g(this);
-  Float64BinopMatcher m(node);
-  if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
-    // For Add.D(Mul.D(x, y), z):
-    Float64BinopMatcher mleft(m.left().node());
-    if (kArchVariant == kMips64r2) {  // Select Madd.D(z, x, y).
+  if (kArchVariant == kMips64r2) {  // Select Madd.S(z, x, y).
+    Float64BinopMatcher m(node);
+    if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
+      // For Add.D(Mul.D(x, y), z):
+      Float64BinopMatcher mleft(m.left().node());
       Emit(kMips64MaddD, g.DefineAsRegister(node),
            g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
            g.UseRegister(mleft.right().node()));
       return;
-    } else if (kArchVariant == kMips64r6) {  // Select Maddf.D(z, x, y).
-      Emit(kMips64MaddfD, g.DefineSameAsFirst(node),
-           g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
-           g.UseRegister(mleft.right().node()));
-      return;
     }
-  }
-  if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
-    // For Add.D(x, Mul.D(y, z)):
-    Float64BinopMatcher mright(m.right().node());
-    if (kArchVariant == kMips64r2) {  // Select Madd.D(x, y, z).
+    if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+      // For Add.D(x, Mul.D(y, z)):
+      Float64BinopMatcher mright(m.right().node());
       Emit(kMips64MaddD, g.DefineAsRegister(node),
            g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
            g.UseRegister(mright.right().node()));
       return;
-    } else if (kArchVariant == kMips64r6) {  // Select Maddf.D(x, y, z).
-      Emit(kMips64MaddfD, g.DefineSameAsFirst(node),
-           g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
-           g.UseRegister(mright.right().node()));
-      return;
     }
   }
   VisitRRR(this, kMips64AddD, node);
@@ -1478,9 +1510,9 @@
 
 void InstructionSelector::VisitFloat32Sub(Node* node) {
   Mips64OperandGenerator g(this);
-  Float32BinopMatcher m(node);
-  if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
-    if (kArchVariant == kMips64r2) {
+  if (kArchVariant == kMips64r2) {  // Select Madd.S(z, x, y).
+    Float32BinopMatcher m(node);
+    if (m.left().IsFloat32Mul() && CanCover(node, m.left().node())) {
       // For Sub.S(Mul.S(x,y), z) select Msub.S(z, x, y).
       Float32BinopMatcher mleft(m.left().node());
       Emit(kMips64MsubS, g.DefineAsRegister(node),
@@ -1488,24 +1520,15 @@
            g.UseRegister(mleft.right().node()));
       return;
     }
-  } else if (m.right().IsFloat32Mul() && CanCover(node, m.right().node())) {
-    if (kArchVariant == kMips64r6) {
-      // For Sub.S(x,Mul.S(y,z)) select Msubf.S(x, y, z).
-      Float32BinopMatcher mright(m.right().node());
-      Emit(kMips64MsubfS, g.DefineSameAsFirst(node),
-           g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
-           g.UseRegister(mright.right().node()));
-      return;
-    }
   }
   VisitRRR(this, kMips64SubS, node);
 }
 
 void InstructionSelector::VisitFloat64Sub(Node* node) {
   Mips64OperandGenerator g(this);
-  Float64BinopMatcher m(node);
-  if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
-    if (kArchVariant == kMips64r2) {
+  if (kArchVariant == kMips64r2) {  // Select Madd.S(z, x, y).
+    Float64BinopMatcher m(node);
+    if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
       // For Sub.D(Mul.S(x,y), z) select Msub.D(z, x, y).
       Float64BinopMatcher mleft(m.left().node());
       Emit(kMips64MsubD, g.DefineAsRegister(node),
@@ -1513,15 +1536,6 @@
            g.UseRegister(mleft.right().node()));
       return;
     }
-  } else if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
-    if (kArchVariant == kMips64r6) {
-      // For Sub.D(x,Mul.S(y,z)) select Msubf.D(x, y, z).
-      Float64BinopMatcher mright(m.right().node());
-      Emit(kMips64MsubfD, g.DefineSameAsFirst(node),
-           g.UseRegister(m.left().node()), g.UseRegister(mright.left().node()),
-           g.UseRegister(mright.right().node()));
-      return;
-    }
   }
   VisitRRR(this, kMips64SubD, node);
 }
@@ -1735,6 +1749,9 @@
       opcode = kMips64Uld;
       break;
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -1785,6 +1802,9 @@
       opcode = kMips64Usd;
       break;
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -1835,6 +1855,9 @@
     case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:
     case MachineRepresentation::kSimd128:
+    case MachineRepresentation::kSimd1x4:   // Fall through.
+    case MachineRepresentation::kSimd1x8:   // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -1849,6 +1872,15 @@
                                                 : g.UseRegister(length)
                                           : g.UseRegister(length);
 
+  if (length->opcode() == IrOpcode::kInt32Constant) {
+    Int32Matcher m(length);
+    if (m.IsPowerOf2()) {
+      Emit(opcode, g.DefineAsRegister(node), offset_operand,
+           g.UseImmediate(length), g.UseRegister(buffer));
+      return;
+    }
+  }
+
   Emit(opcode | AddressingModeField::encode(kMode_MRI),
        g.DefineAsRegister(node), offset_operand, length_operand,
        g.UseRegister(buffer));
@@ -1887,6 +1919,9 @@
     case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:
     case MachineRepresentation::kSimd128:
+    case MachineRepresentation::kSimd1x4:   // Fall through.
+    case MachineRepresentation::kSimd1x8:   // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -1901,6 +1936,15 @@
                                                 : g.UseRegister(length)
                                           : g.UseRegister(length);
 
+  if (length->opcode() == IrOpcode::kInt32Constant) {
+    Int32Matcher m(length);
+    if (m.IsPowerOf2()) {
+      Emit(opcode, g.NoOutput(), offset_operand, g.UseImmediate(length),
+           g.UseRegisterOrImmediateZero(value), g.UseRegister(buffer));
+      return;
+    }
+  }
+
   Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
        offset_operand, length_operand, g.UseRegisterOrImmediateZero(value),
        g.UseRegister(buffer));
@@ -1919,11 +1963,14 @@
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
-    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
-                             cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
+                             cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(opcode, g.NoOutput(), left, right,
+                   g.TempImmediate(cont->trap_id()));
   }
 }
 
@@ -2133,8 +2180,11 @@
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, g.NoOutput(), value_operand,
-                             g.TempImmediate(0), cont->reason(),
+                             g.TempImmediate(0), cont->kind(), cont->reason(),
                              cont->frame_state());
+  } else if (cont->IsTrap()) {
+    selector->Emit(opcode, g.NoOutput(), value_operand, g.TempImmediate(0),
+                   g.TempImmediate(cont->trap_id()));
   } else {
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
                    g.TempImmediate(0));
@@ -2269,14 +2319,29 @@
 }
 
 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kNotEqual, p.kind(), p.reason(), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kEqual, p.kind(), p.reason(), node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+                                          Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
diff --git a/src/compiler/node-marker.h b/src/compiler/node-marker.h
index 84666d5..e38105d 100644
--- a/src/compiler/node-marker.h
+++ b/src/compiler/node-marker.h
@@ -20,11 +20,10 @@
  public:
   NodeMarkerBase(Graph* graph, uint32_t num_states);
 
-  V8_INLINE Mark Get(Node* node) {
+  V8_INLINE Mark Get(const Node* node) {
     Mark mark = node->mark();
     if (mark < mark_min_) {
-      mark = mark_min_;
-      node->set_mark(mark_min_);
+      return 0;
     }
     DCHECK_LT(mark, mark_max_);
     return mark - mark_min_;
@@ -52,9 +51,9 @@
 // set to State(0) in constant time.
 //
 // In its current implementation, in debug mode NodeMarker will try to
-// (efficiently) detect invalid use of an older NodeMarker. Namely, if you get
-// or set a node with a NodeMarker, and then get or set that node
-// with an older NodeMarker you will get a crash.
+// (efficiently) detect invalid use of an older NodeMarker. Namely, if you set a
+// node with a NodeMarker, and then get or set that node with an older
+// NodeMarker you will get a crash.
 //
 // GraphReducer uses a NodeMarker, so individual Reducers cannot use a
 // NodeMarker.
@@ -64,7 +63,7 @@
   V8_INLINE NodeMarker(Graph* graph, uint32_t num_states)
       : NodeMarkerBase(graph, num_states) {}
 
-  V8_INLINE State Get(Node* node) {
+  V8_INLINE State Get(const Node* node) {
     return static_cast<State>(NodeMarkerBase::Get(node));
   }
 
diff --git a/src/compiler/node-matchers.h b/src/compiler/node-matchers.h
index c317fdd..d2bdb8b 100644
--- a/src/compiler/node-matchers.h
+++ b/src/compiler/node-matchers.h
@@ -489,13 +489,14 @@
     bool power_of_two_plus_one = false;
     DisplacementMode displacement_mode = kPositiveDisplacement;
     int scale = 0;
-    if (m.HasIndexInput() && left->OwnedBy(node)) {
+    if (m.HasIndexInput() && left->OwnedByAddressingOperand()) {
       index = m.IndexInput();
       scale = m.scale();
       scale_expression = left;
       power_of_two_plus_one = m.power_of_two_plus_one();
       bool match_found = false;
-      if (right->opcode() == AddMatcher::kSubOpcode && right->OwnedBy(node)) {
+      if (right->opcode() == AddMatcher::kSubOpcode &&
+          right->OwnedByAddressingOperand()) {
         AddMatcher right_matcher(right);
         if (right_matcher.right().HasValue()) {
           // (S + (B - D))
@@ -506,7 +507,8 @@
         }
       }
       if (!match_found) {
-        if (right->opcode() == AddMatcher::kAddOpcode && right->OwnedBy(node)) {
+        if (right->opcode() == AddMatcher::kAddOpcode &&
+            right->OwnedByAddressingOperand()) {
           AddMatcher right_matcher(right);
           if (right_matcher.right().HasValue()) {
             // (S + (B + D))
@@ -526,7 +528,8 @@
       }
     } else {
       bool match_found = false;
-      if (left->opcode() == AddMatcher::kSubOpcode && left->OwnedBy(node)) {
+      if (left->opcode() == AddMatcher::kSubOpcode &&
+          left->OwnedByAddressingOperand()) {
         AddMatcher left_matcher(left);
         Node* left_left = left_matcher.left().node();
         Node* left_right = left_matcher.right().node();
@@ -551,7 +554,8 @@
         }
       }
       if (!match_found) {
-        if (left->opcode() == AddMatcher::kAddOpcode && left->OwnedBy(node)) {
+        if (left->opcode() == AddMatcher::kAddOpcode &&
+            left->OwnedByAddressingOperand()) {
           AddMatcher left_matcher(left);
           Node* left_left = left_matcher.left().node();
           Node* left_right = left_matcher.right().node();
@@ -565,13 +569,19 @@
               displacement = left_right;
               base = right;
             } else if (m.right().HasValue()) {
-              // ((S + B) + D)
-              index = left_matcher.IndexInput();
-              scale = left_matcher.scale();
-              scale_expression = left_left;
-              power_of_two_plus_one = left_matcher.power_of_two_plus_one();
-              base = left_right;
-              displacement = right;
+              if (left->OwnedBy(node)) {
+                // ((S + B) + D)
+                index = left_matcher.IndexInput();
+                scale = left_matcher.scale();
+                scale_expression = left_left;
+                power_of_two_plus_one = left_matcher.power_of_two_plus_one();
+                base = left_right;
+                displacement = right;
+              } else {
+                // (B + D)
+                base = left;
+                displacement = right;
+              }
             } else {
               // (B + B)
               index = left;
@@ -584,10 +594,16 @@
               displacement = left_right;
               base = right;
             } else if (m.right().HasValue()) {
-              // ((B + B) + D)
-              index = left_left;
-              base = left_right;
-              displacement = right;
+              if (left->OwnedBy(node)) {
+                // ((B + B) + D)
+                index = left_left;
+                base = left_right;
+                displacement = right;
+              } else {
+                // (B + D)
+                base = left;
+                displacement = right;
+              }
             } else {
               // (B + B)
               index = left;
diff --git a/src/compiler/node-properties.cc b/src/compiler/node-properties.cc
index 646dbc2..9243a08 100644
--- a/src/compiler/node-properties.cc
+++ b/src/compiler/node-properties.cc
@@ -2,14 +2,17 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/compiler/node-properties.h"
 #include "src/compiler/common-operator.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/js-operator.h"
 #include "src/compiler/linkage.h"
-#include "src/compiler/node-properties.h"
+#include "src/compiler/node-matchers.h"
 #include "src/compiler/operator-properties.h"
+#include "src/compiler/simplified-operator.h"
 #include "src/compiler/verifier.h"
 #include "src/handles-inl.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -311,6 +314,111 @@
 #endif
 }
 
+// static
+bool NodeProperties::IsSame(Node* a, Node* b) {
+  for (;;) {
+    if (a->opcode() == IrOpcode::kCheckHeapObject) {
+      a = GetValueInput(a, 0);
+      continue;
+    }
+    if (b->opcode() == IrOpcode::kCheckHeapObject) {
+      b = GetValueInput(b, 0);
+      continue;
+    }
+    return a == b;
+  }
+}
+
+// static
+NodeProperties::InferReceiverMapsResult NodeProperties::InferReceiverMaps(
+    Node* receiver, Node* effect, ZoneHandleSet<Map>* maps_return) {
+  HeapObjectMatcher m(receiver);
+  if (m.HasValue()) {
+    Handle<Map> receiver_map(m.Value()->map());
+    if (receiver_map->is_stable()) {
+      // The {receiver_map} is only reliable when we install a stability
+      // code dependency.
+      *maps_return = ZoneHandleSet<Map>(receiver_map);
+      return kUnreliableReceiverMaps;
+    }
+  }
+  InferReceiverMapsResult result = kReliableReceiverMaps;
+  while (true) {
+    switch (effect->opcode()) {
+      case IrOpcode::kCheckMaps: {
+        Node* const object = GetValueInput(effect, 0);
+        if (IsSame(receiver, object)) {
+          *maps_return = CheckMapsParametersOf(effect->op()).maps();
+          return result;
+        }
+        break;
+      }
+      case IrOpcode::kJSCreate: {
+        if (IsSame(receiver, effect)) {
+          HeapObjectMatcher mtarget(GetValueInput(effect, 0));
+          HeapObjectMatcher mnewtarget(GetValueInput(effect, 1));
+          if (mtarget.HasValue() && mnewtarget.HasValue()) {
+            Handle<JSFunction> original_constructor =
+                Handle<JSFunction>::cast(mnewtarget.Value());
+            if (original_constructor->has_initial_map()) {
+              Handle<Map> initial_map(original_constructor->initial_map());
+              if (initial_map->constructor_or_backpointer() ==
+                  *mtarget.Value()) {
+                *maps_return = ZoneHandleSet<Map>(initial_map);
+                return result;
+              }
+            }
+          }
+          // We reached the allocation of the {receiver}.
+          return kNoReceiverMaps;
+        }
+        break;
+      }
+      case IrOpcode::kStoreField: {
+        // We only care about StoreField of maps.
+        Node* const object = GetValueInput(effect, 0);
+        FieldAccess const& access = FieldAccessOf(effect->op());
+        if (access.base_is_tagged == kTaggedBase &&
+            access.offset == HeapObject::kMapOffset) {
+          if (IsSame(receiver, object)) {
+            Node* const value = GetValueInput(effect, 1);
+            HeapObjectMatcher m(value);
+            if (m.HasValue()) {
+              *maps_return = ZoneHandleSet<Map>(Handle<Map>::cast(m.Value()));
+              return result;
+            }
+          }
+          // Without alias analysis we cannot tell whether this
+          // StoreField[map] affects {receiver} or not.
+          result = kUnreliableReceiverMaps;
+        }
+        break;
+      }
+      case IrOpcode::kJSStoreMessage:
+      case IrOpcode::kJSStoreModule:
+      case IrOpcode::kStoreElement:
+      case IrOpcode::kStoreTypedElement: {
+        // These never change the map of objects.
+        break;
+      }
+      default: {
+        DCHECK_EQ(1, effect->op()->EffectOutputCount());
+        if (effect->op()->EffectInputCount() != 1) {
+          // Didn't find any appropriate CheckMaps node.
+          return kNoReceiverMaps;
+        }
+        if (!effect->op()->HasProperty(Operator::kNoWrite)) {
+          // Without alias/escape analysis we cannot tell whether this
+          // {effect} affects {receiver} or not.
+          result = kUnreliableReceiverMaps;
+        }
+        break;
+      }
+    }
+    DCHECK_EQ(1, effect->op()->EffectInputCount());
+    effect = NodeProperties::GetEffectInput(effect);
+  }
+}
 
 // static
 MaybeHandle<Context> NodeProperties::GetSpecializationContext(
@@ -338,6 +446,17 @@
 
 
 // static
+Node* NodeProperties::GetOuterContext(Node* node, size_t* depth) {
+  Node* context = NodeProperties::GetContextInput(node);
+  while (*depth > 0 &&
+         IrOpcode::IsContextChainExtendingOpcode(context->opcode())) {
+    context = NodeProperties::GetContextInput(context);
+    (*depth)--;
+  }
+  return context;
+}
+
+// static
 Type* NodeProperties::GetTypeOrAny(Node* node) {
   return IsTyped(node) ? node->type() : Type::Any();
 }
diff --git a/src/compiler/node-properties.h b/src/compiler/node-properties.h
index 2325323..5ed8540 100644
--- a/src/compiler/node-properties.h
+++ b/src/compiler/node-properties.h
@@ -8,6 +8,7 @@
 #include "src/compiler/node.h"
 #include "src/compiler/types.h"
 #include "src/globals.h"
+#include "src/zone/zone-handle-set.h"
 
 namespace v8 {
 namespace internal {
@@ -123,6 +124,20 @@
   //  - Switch: [ IfValue, ..., IfDefault ]
   static void CollectControlProjections(Node* node, Node** proj, size_t count);
 
+  // Checks if two nodes are the same, looking past {CheckHeapObject}.
+  static bool IsSame(Node* a, Node* b);
+
+  // Walks up the {effect} chain to find a witness that provides map
+  // information about the {receiver}. Can look through potentially
+  // side effecting nodes.
+  enum InferReceiverMapsResult {
+    kNoReceiverMaps,         // No receiver maps inferred.
+    kReliableReceiverMaps,   // Receiver maps can be trusted.
+    kUnreliableReceiverMaps  // Receiver maps might have changed (side-effect).
+  };
+  static InferReceiverMapsResult InferReceiverMaps(
+      Node* receiver, Node* effect, ZoneHandleSet<Map>* maps_return);
+
   // ---------------------------------------------------------------------------
   // Context.
 
@@ -132,6 +147,11 @@
   static MaybeHandle<Context> GetSpecializationContext(
       Node* node, MaybeHandle<Context> context = MaybeHandle<Context>());
 
+  // Walk up the context chain from the given {node} until we reduce the {depth}
+  // to 0 or hit a node that does not extend the context chain ({depth} will be
+  // updated accordingly).
+  static Node* GetOuterContext(Node* node, size_t* depth);
+
   // ---------------------------------------------------------------------------
   // Type.
 
diff --git a/src/compiler/node.cc b/src/compiler/node.cc
index f4e7b17..16dc2db 100644
--- a/src/compiler/node.cc
+++ b/src/compiler/node.cc
@@ -296,12 +296,44 @@
   return mask == 3;
 }
 
+bool Node::OwnedByAddressingOperand() const {
+  for (Use* use = first_use_; use; use = use->next) {
+    Node* from = use->from();
+    if (from->opcode() != IrOpcode::kLoad &&
+        // If {from} is store, make sure it does not use {this} as value
+        (from->opcode() != IrOpcode::kStore || from->InputAt(2) == this) &&
+        from->opcode() != IrOpcode::kInt32Add &&
+        from->opcode() != IrOpcode::kInt64Add) {
+      return false;
+    }
+  }
+  return true;
+}
 
 void Node::Print() const {
   OFStream os(stdout);
   os << *this << std::endl;
+  for (Node* input : this->inputs()) {
+    os << "  " << *input << std::endl;
+  }
 }
 
+std::ostream& operator<<(std::ostream& os, const Node& n) {
+  os << n.id() << ": " << *n.op();
+  if (n.InputCount() > 0) {
+    os << "(";
+    for (int i = 0; i < n.InputCount(); ++i) {
+      if (i != 0) os << ", ";
+      if (n.InputAt(i)) {
+        os << n.InputAt(i)->id();
+      } else {
+        os << "null";
+      }
+    }
+    os << ")";
+  }
+  return os;
+}
 
 Node::Node(NodeId id, const Operator* op, int inline_count, int inline_capacity)
     : op_(op),
@@ -378,25 +410,6 @@
 }
 #endif
 
-
-std::ostream& operator<<(std::ostream& os, const Node& n) {
-  os << n.id() << ": " << *n.op();
-  if (n.InputCount() > 0) {
-    os << "(";
-    for (int i = 0; i < n.InputCount(); ++i) {
-      if (i != 0) os << ", ";
-      if (n.InputAt(i)) {
-        os << n.InputAt(i)->id();
-      } else {
-        os << "null";
-      }
-    }
-    os << ")";
-  }
-  return os;
-}
-
-
 Node::InputEdges::iterator Node::InputEdges::iterator::operator++(int n) {
   iterator result(*this);
   ++(*this);
@@ -404,9 +417,6 @@
 }
 
 
-bool Node::InputEdges::empty() const { return begin() == end(); }
-
-
 Node::Inputs::const_iterator Node::Inputs::const_iterator::operator++(int n) {
   const_iterator result(*this);
   ++(*this);
@@ -414,9 +424,6 @@
 }
 
 
-bool Node::Inputs::empty() const { return begin() == end(); }
-
-
 Node::UseEdges::iterator Node::UseEdges::iterator::operator++(int n) {
   iterator result(*this);
   ++(*this);
diff --git a/src/compiler/node.h b/src/compiler/node.h
index dc6c5dc..b291af2 100644
--- a/src/compiler/node.h
+++ b/src/compiler/node.h
@@ -46,7 +46,7 @@
                    Node* const* inputs, bool has_extensible_inputs);
   static Node* Clone(Zone* zone, NodeId id, const Node* node);
 
-  bool IsDead() const { return InputCount() > 0 && !InputAt(0); }
+  inline bool IsDead() const;
   void Kill();
 
   const Operator* op() const { return op_; }
@@ -109,41 +109,11 @@
   int UseCount() const;
   void ReplaceUses(Node* replace_to);
 
-  class InputEdges final {
-   public:
-    typedef Edge value_type;
+  class InputEdges;
+  inline InputEdges input_edges();
 
-    class iterator;
-    inline iterator begin() const;
-    inline iterator end() const;
-
-    bool empty() const;
-
-    explicit InputEdges(Node* node) : node_(node) {}
-
-   private:
-    Node* node_;
-  };
-
-  InputEdges input_edges() { return InputEdges(this); }
-
-  class V8_EXPORT_PRIVATE Inputs final {
-   public:
-    typedef Node* value_type;
-
-    class const_iterator;
-    inline const_iterator begin() const;
-    inline const_iterator end() const;
-
-    bool empty() const;
-
-    explicit Inputs(Node* node) : node_(node) {}
-
-   private:
-    Node* node_;
-  };
-
-  Inputs inputs() { return Inputs(this); }
+  class Inputs;
+  inline Inputs inputs() const;
 
   class UseEdges final {
    public:
@@ -188,6 +158,10 @@
 
   // Returns true if {owner1} and {owner2} are the only users of {this} node.
   bool OwnedBy(Node const* owner1, Node const* owner2) const;
+
+  // Returns true if addressing related operands (such as load, store, lea)
+  // are the only users of {this} node.
+  bool OwnedByAddressingOperand() const;
   void Print() const;
 
  private:
@@ -294,7 +268,7 @@
   void set_type(Type* type) { type_ = type; }
 
   // Only NodeMarkers should manipulate the marks on nodes.
-  Mark mark() { return mark_; }
+  Mark mark() const { return mark_; }
   void set_mark(Mark mark) { mark_ = mark; }
 
   inline bool has_inline_inputs() const {
@@ -345,6 +319,48 @@
   return OpParameter<T>(node->op());
 }
 
+class Node::InputEdges final {
+ public:
+  typedef Edge value_type;
+
+  class iterator;
+  inline iterator begin() const;
+  inline iterator end() const;
+
+  bool empty() const { return count_ == 0; }
+  int count() const { return count_; }
+
+  inline value_type operator[](int index) const;
+
+  InputEdges(Node** input_root, Use* use_root, int count)
+      : input_root_(input_root), use_root_(use_root), count_(count) {}
+
+ private:
+  Node** input_root_;
+  Use* use_root_;
+  int count_;
+};
+
+class V8_EXPORT_PRIVATE Node::Inputs final {
+ public:
+  typedef Node* value_type;
+
+  class const_iterator;
+  inline const_iterator begin() const;
+  inline const_iterator end() const;
+
+  bool empty() const { return count_ == 0; }
+  int count() const { return count_; }
+
+  inline value_type operator[](int index) const;
+
+  explicit Inputs(Node* const* input_root, int count)
+      : input_root_(input_root), count_(count) {}
+
+ private:
+  Node* const* input_root_;
+  int count_;
+};
 
 // An encapsulation for information associated with a single use of node as a
 // input from another node, allowing access to both the defining node and
@@ -373,6 +389,7 @@
 
  private:
   friend class Node::UseEdges::iterator;
+  friend class Node::InputEdges;
   friend class Node::InputEdges::iterator;
 
   Edge(Node::Use* use, Node** input_ptr) : use_(use), input_ptr_(input_ptr) {
@@ -385,12 +402,37 @@
   Node** input_ptr_;
 };
 
+bool Node::IsDead() const {
+  Node::Inputs inputs = this->inputs();
+  return inputs.count() > 0 && inputs[0] == nullptr;
+}
+
+Node::InputEdges Node::input_edges() {
+  int inline_count = InlineCountField::decode(bit_field_);
+  if (inline_count != kOutlineMarker) {
+    return InputEdges(inputs_.inline_, reinterpret_cast<Use*>(this) - 1,
+                      inline_count);
+  } else {
+    return InputEdges(inputs_.outline_->inputs_,
+                      reinterpret_cast<Use*>(inputs_.outline_) - 1,
+                      inputs_.outline_->count_);
+  }
+}
+
+Node::Inputs Node::inputs() const {
+  int inline_count = InlineCountField::decode(bit_field_);
+  if (inline_count != kOutlineMarker) {
+    return Inputs(inputs_.inline_, inline_count);
+  } else {
+    return Inputs(inputs_.outline_->inputs_, inputs_.outline_->count_);
+  }
+}
 
 // A forward iterator to visit the edges for the input dependencies of a node.
 class Node::InputEdges::iterator final {
  public:
   typedef std::forward_iterator_tag iterator_category;
-  typedef int difference_type;
+  typedef std::ptrdiff_t difference_type;
   typedef Edge value_type;
   typedef Edge* pointer;
   typedef Edge& reference;
@@ -410,12 +452,23 @@
     return *this;
   }
   iterator operator++(int);
+  iterator& operator+=(difference_type offset) {
+    input_ptr_ += offset;
+    use_ -= offset;
+    return *this;
+  }
+  iterator operator+(difference_type offset) const {
+    return iterator(use_ - offset, input_ptr_ + offset);
+  }
+  difference_type operator-(const iterator& other) const {
+    return input_ptr_ - other.input_ptr_;
+  }
 
  private:
   friend class Node;
 
-  explicit iterator(Node* from, int index = 0)
-      : use_(from->GetUsePtr(index)), input_ptr_(from->GetInputPtr(index)) {}
+  explicit iterator(Use* use, Node** input_ptr)
+      : use_(use), input_ptr_(input_ptr) {}
 
   Use* use_;
   Node** input_ptr_;
@@ -423,57 +476,71 @@
 
 
 Node::InputEdges::iterator Node::InputEdges::begin() const {
-  return Node::InputEdges::iterator(this->node_, 0);
+  return Node::InputEdges::iterator(use_root_, input_root_);
 }
 
 
 Node::InputEdges::iterator Node::InputEdges::end() const {
-  return Node::InputEdges::iterator(this->node_, this->node_->InputCount());
+  return Node::InputEdges::iterator(use_root_ - count_, input_root_ + count_);
 }
 
+Edge Node::InputEdges::operator[](int index) const {
+  return Edge(use_root_ + index, input_root_ + index);
+}
 
 // A forward iterator to visit the inputs of a node.
 class Node::Inputs::const_iterator final {
  public:
   typedef std::forward_iterator_tag iterator_category;
-  typedef int difference_type;
+  typedef std::ptrdiff_t difference_type;
   typedef Node* value_type;
-  typedef Node** pointer;
-  typedef Node*& reference;
+  typedef const value_type* pointer;
+  typedef value_type& reference;
 
-  const_iterator(const const_iterator& other) : iter_(other.iter_) {}
+  const_iterator(const const_iterator& other) : input_ptr_(other.input_ptr_) {}
 
-  Node* operator*() const { return (*iter_).to(); }
+  Node* operator*() const { return *input_ptr_; }
   bool operator==(const const_iterator& other) const {
-    return iter_ == other.iter_;
+    return input_ptr_ == other.input_ptr_;
   }
   bool operator!=(const const_iterator& other) const {
     return !(*this == other);
   }
   const_iterator& operator++() {
-    ++iter_;
+    ++input_ptr_;
     return *this;
   }
   const_iterator operator++(int);
+  const_iterator& operator+=(difference_type offset) {
+    input_ptr_ += offset;
+    return *this;
+  }
+  const_iterator operator+(difference_type offset) const {
+    return const_iterator(input_ptr_ + offset);
+  }
+  difference_type operator-(const const_iterator& other) const {
+    return input_ptr_ - other.input_ptr_;
+  }
 
  private:
   friend class Node::Inputs;
 
-  const_iterator(Node* node, int index) : iter_(node, index) {}
+  explicit const_iterator(Node* const* input_ptr) : input_ptr_(input_ptr) {}
 
-  Node::InputEdges::iterator iter_;
+  Node* const* input_ptr_;
 };
 
 
 Node::Inputs::const_iterator Node::Inputs::begin() const {
-  return const_iterator(this->node_, 0);
+  return const_iterator(input_root_);
 }
 
 
 Node::Inputs::const_iterator Node::Inputs::end() const {
-  return const_iterator(this->node_, this->node_->InputCount());
+  return const_iterator(input_root_ + count_);
 }
 
+Node* Node::Inputs::operator[](int index) const { return input_root_[index]; }
 
 // A forward iterator to visit the uses edges of a node.
 class Node::UseEdges::iterator final {
diff --git a/src/compiler/opcodes.h b/src/compiler/opcodes.h
index fdbe001..b50754c 100644
--- a/src/compiler/opcodes.h
+++ b/src/compiler/opcodes.h
@@ -25,6 +25,8 @@
   V(Deoptimize)            \
   V(DeoptimizeIf)          \
   V(DeoptimizeUnless)      \
+  V(TrapIf)                \
+  V(TrapUnless)            \
   V(Return)                \
   V(TailCall)              \
   V(Terminate)             \
@@ -57,6 +59,7 @@
   V(FrameState)           \
   V(StateValues)          \
   V(TypedStateValues)     \
+  V(ArgumentsObjectState) \
   V(ObjectState)          \
   V(TypedObjectState)     \
   V(Call)                 \
@@ -104,7 +107,9 @@
 #define JS_SIMPLE_BINOP_LIST(V) \
   JS_COMPARE_BINOP_LIST(V)      \
   JS_BITWISE_BINOP_LIST(V)      \
-  JS_ARITH_BINOP_LIST(V)
+  JS_ARITH_BINOP_LIST(V)        \
+  V(JSInstanceOf)               \
+  V(JSOrdinaryHasInstance)
 
 #define JS_CONVERSION_UNOP_LIST(V) \
   V(JSToBoolean)                   \
@@ -116,32 +121,34 @@
   V(JSToString)
 
 #define JS_OTHER_UNOP_LIST(V) \
+  V(JSClassOf)                \
   V(JSTypeOf)
 
 #define JS_SIMPLE_UNOP_LIST(V) \
   JS_CONVERSION_UNOP_LIST(V)   \
   JS_OTHER_UNOP_LIST(V)
 
-#define JS_OBJECT_OP_LIST(V)  \
-  V(JSCreate)                 \
-  V(JSCreateArguments)        \
-  V(JSCreateArray)            \
-  V(JSCreateClosure)          \
-  V(JSCreateIterResultObject) \
-  V(JSCreateKeyValueArray)    \
-  V(JSCreateLiteralArray)     \
-  V(JSCreateLiteralObject)    \
-  V(JSCreateLiteralRegExp)    \
-  V(JSLoadProperty)           \
-  V(JSLoadNamed)              \
-  V(JSLoadGlobal)             \
-  V(JSStoreProperty)          \
-  V(JSStoreNamed)             \
-  V(JSStoreGlobal)            \
-  V(JSDeleteProperty)         \
-  V(JSHasProperty)            \
-  V(JSInstanceOf)             \
-  V(JSOrdinaryHasInstance)
+#define JS_OBJECT_OP_LIST(V)      \
+  V(JSCreate)                     \
+  V(JSCreateArguments)            \
+  V(JSCreateArray)                \
+  V(JSCreateClosure)              \
+  V(JSCreateIterResultObject)     \
+  V(JSCreateKeyValueArray)        \
+  V(JSCreateLiteralArray)         \
+  V(JSCreateLiteralObject)        \
+  V(JSCreateLiteralRegExp)        \
+  V(JSLoadProperty)               \
+  V(JSLoadNamed)                  \
+  V(JSLoadGlobal)                 \
+  V(JSStoreProperty)              \
+  V(JSStoreNamed)                 \
+  V(JSStoreNamedOwn)              \
+  V(JSStoreGlobal)                \
+  V(JSStoreDataPropertyInLiteral) \
+  V(JSDeleteProperty)             \
+  V(JSHasProperty)                \
+  V(JSGetSuperConstructor)
 
 #define JS_CONTEXT_OP_LIST(V) \
   V(JSLoadContext)            \
@@ -153,8 +160,11 @@
   V(JSCreateScriptContext)
 
 #define JS_OTHER_OP_LIST(V)         \
-  V(JSCallConstruct)                \
-  V(JSCallFunction)                 \
+  V(JSConstruct)                    \
+  V(JSConstructWithSpread)          \
+  V(JSCallForwardVarargs)           \
+  V(JSCall)                         \
+  V(JSCallWithSpread)               \
   V(JSCallRuntime)                  \
   V(JSConvertReceiver)              \
   V(JSForInNext)                    \
@@ -166,7 +176,8 @@
   V(JSGeneratorStore)               \
   V(JSGeneratorRestoreContinuation) \
   V(JSGeneratorRestoreRegister)     \
-  V(JSStackCheck)
+  V(JSStackCheck)                   \
+  V(JSDebugger)
 
 #define JS_OP_LIST(V)     \
   JS_SIMPLE_BINOP_LIST(V) \
@@ -181,6 +192,7 @@
   V(ChangeTaggedToInt32)             \
   V(ChangeTaggedToUint32)            \
   V(ChangeTaggedToFloat64)           \
+  V(ChangeTaggedToTaggedSigned)      \
   V(ChangeInt31ToTaggedSigned)       \
   V(ChangeInt32ToTagged)             \
   V(ChangeUint32ToTagged)            \
@@ -294,13 +306,17 @@
   V(PlainPrimitiveToWord32)         \
   V(PlainPrimitiveToFloat64)        \
   V(BooleanNot)                     \
+  V(StringCharAt)                   \
   V(StringCharCodeAt)               \
   V(StringFromCharCode)             \
   V(StringFromCodePoint)            \
+  V(StringIndexOf)                  \
   V(CheckBounds)                    \
   V(CheckIf)                        \
   V(CheckMaps)                      \
   V(CheckNumber)                    \
+  V(CheckInternalizedString)        \
+  V(CheckReceiver)                  \
   V(CheckString)                    \
   V(CheckSmi)                       \
   V(CheckHeapObject)                \
@@ -316,12 +332,15 @@
   V(StoreBuffer)                    \
   V(StoreElement)                   \
   V(StoreTypedElement)              \
-  V(ObjectIsCallable)               \
+  V(ObjectIsDetectableCallable)     \
+  V(ObjectIsNonCallable)            \
   V(ObjectIsNumber)                 \
   V(ObjectIsReceiver)               \
   V(ObjectIsSmi)                    \
   V(ObjectIsString)                 \
   V(ObjectIsUndetectable)           \
+  V(NewRestParameterElements)       \
+  V(NewUnmappedArgumentsElements)   \
   V(ArrayBufferWasNeutered)         \
   V(EnsureWritableFastElements)     \
   V(MaybeGrowFastElements)          \
@@ -527,6 +546,7 @@
   V(Word32PairShr)              \
   V(Word32PairSar)              \
   V(ProtectedLoad)              \
+  V(ProtectedStore)             \
   V(AtomicLoad)                 \
   V(AtomicStore)                \
   V(UnsafePointerAdd)
@@ -553,9 +573,6 @@
   V(Float32x4LessThanOrEqual)               \
   V(Float32x4GreaterThan)                   \
   V(Float32x4GreaterThanOrEqual)            \
-  V(Float32x4Select)                        \
-  V(Float32x4Swizzle)                       \
-  V(Float32x4Shuffle)                       \
   V(Float32x4FromInt32x4)                   \
   V(Float32x4FromUint32x4)                  \
   V(CreateInt32x4)                          \
@@ -574,9 +591,6 @@
   V(Int32x4LessThanOrEqual)                 \
   V(Int32x4GreaterThan)                     \
   V(Int32x4GreaterThanOrEqual)              \
-  V(Int32x4Select)                          \
-  V(Int32x4Swizzle)                         \
-  V(Int32x4Shuffle)                         \
   V(Int32x4FromFloat32x4)                   \
   V(Uint32x4Min)                            \
   V(Uint32x4Max)                            \
@@ -587,16 +601,10 @@
   V(Uint32x4GreaterThan)                    \
   V(Uint32x4GreaterThanOrEqual)             \
   V(Uint32x4FromFloat32x4)                  \
-  V(CreateBool32x4)                         \
-  V(Bool32x4ReplaceLane)                    \
   V(Bool32x4And)                            \
   V(Bool32x4Or)                             \
   V(Bool32x4Xor)                            \
   V(Bool32x4Not)                            \
-  V(Bool32x4Swizzle)                        \
-  V(Bool32x4Shuffle)                        \
-  V(Bool32x4Equal)                          \
-  V(Bool32x4NotEqual)                       \
   V(CreateInt16x8)                          \
   V(Int16x8ReplaceLane)                     \
   V(Int16x8Neg)                             \
@@ -615,9 +623,6 @@
   V(Int16x8LessThanOrEqual)                 \
   V(Int16x8GreaterThan)                     \
   V(Int16x8GreaterThanOrEqual)              \
-  V(Int16x8Select)                          \
-  V(Int16x8Swizzle)                         \
-  V(Int16x8Shuffle)                         \
   V(Uint16x8AddSaturate)                    \
   V(Uint16x8SubSaturate)                    \
   V(Uint16x8Min)                            \
@@ -628,16 +633,10 @@
   V(Uint16x8LessThanOrEqual)                \
   V(Uint16x8GreaterThan)                    \
   V(Uint16x8GreaterThanOrEqual)             \
-  V(CreateBool16x8)                         \
-  V(Bool16x8ReplaceLane)                    \
   V(Bool16x8And)                            \
   V(Bool16x8Or)                             \
   V(Bool16x8Xor)                            \
   V(Bool16x8Not)                            \
-  V(Bool16x8Swizzle)                        \
-  V(Bool16x8Shuffle)                        \
-  V(Bool16x8Equal)                          \
-  V(Bool16x8NotEqual)                       \
   V(CreateInt8x16)                          \
   V(Int8x16ReplaceLane)                     \
   V(Int8x16Neg)                             \
@@ -656,9 +655,6 @@
   V(Int8x16LessThanOrEqual)                 \
   V(Int8x16GreaterThan)                     \
   V(Int8x16GreaterThanOrEqual)              \
-  V(Int8x16Select)                          \
-  V(Int8x16Swizzle)                         \
-  V(Int8x16Shuffle)                         \
   V(Uint8x16AddSaturate)                    \
   V(Uint8x16SubSaturate)                    \
   V(Uint8x16Min)                            \
@@ -669,16 +665,23 @@
   V(Uint8x16LessThanOrEqual)                \
   V(Uint8x16GreaterThan)                    \
   V(Uint8x16GreaterThanOrEqual)             \
-  V(CreateBool8x16)                         \
-  V(Bool8x16ReplaceLane)                    \
   V(Bool8x16And)                            \
   V(Bool8x16Or)                             \
   V(Bool8x16Xor)                            \
   V(Bool8x16Not)                            \
-  V(Bool8x16Swizzle)                        \
-  V(Bool8x16Shuffle)                        \
-  V(Bool8x16Equal)                          \
-  V(Bool8x16NotEqual)
+  V(Simd128And)                             \
+  V(Simd128Or)                              \
+  V(Simd128Xor)                             \
+  V(Simd128Not)                             \
+  V(Simd32x4Select)                         \
+  V(Simd32x4Swizzle)                        \
+  V(Simd32x4Shuffle)                        \
+  V(Simd16x8Select)                         \
+  V(Simd16x8Swizzle)                        \
+  V(Simd16x8Shuffle)                        \
+  V(Simd8x16Select)                         \
+  V(Simd8x16Swizzle)                        \
+  V(Simd8x16Shuffle)
 
 #define MACHINE_SIMD_RETURN_NUM_OP_LIST(V) \
   V(Float32x4ExtractLane)                  \
@@ -687,13 +690,10 @@
   V(Int8x16ExtractLane)
 
 #define MACHINE_SIMD_RETURN_BOOL_OP_LIST(V) \
-  V(Bool32x4ExtractLane)                    \
   V(Bool32x4AnyTrue)                        \
   V(Bool32x4AllTrue)                        \
-  V(Bool16x8ExtractLane)                    \
   V(Bool16x8AnyTrue)                        \
   V(Bool16x8AllTrue)                        \
-  V(Bool8x16ExtractLane)                    \
   V(Bool8x16AnyTrue)                        \
   V(Bool8x16AllTrue)
 
@@ -705,11 +705,7 @@
   V(Simd128Store)                       \
   V(Simd128Store1)                      \
   V(Simd128Store2)                      \
-  V(Simd128Store3)                      \
-  V(Simd128And)                         \
-  V(Simd128Or)                          \
-  V(Simd128Xor)                         \
-  V(Simd128Not)
+  V(Simd128Store3)
 
 #define MACHINE_SIMD_OP_LIST(V)       \
   MACHINE_SIMD_RETURN_SIMD_OP_LIST(V) \
@@ -762,7 +758,7 @@
 
   // Returns true if opcode for JavaScript operator.
   static bool IsJsOpcode(Value value) {
-    return kJSEqual <= value && value <= kJSStackCheck;
+    return kJSEqual <= value && value <= kJSDebugger;
   }
 
   // Returns true if opcode for constant operator.
@@ -784,7 +780,7 @@
 
   // Returns true if opcode can be inlined.
   static bool IsInlineeOpcode(Value value) {
-    return value == kJSCallConstruct || value == kJSCallFunction;
+    return value == kJSConstruct || value == kJSCall;
   }
 
   // Returns true if opcode for comparison operator.
@@ -793,6 +789,10 @@
            (kNumberEqual <= value && value <= kStringLessThanOrEqual) ||
            (kWord32Equal <= value && value <= kFloat64LessThanOrEqual);
   }
+
+  static bool IsContextChainExtendingOpcode(Value value) {
+    return kJSCreateFunctionContext <= value && value <= kJSCreateScriptContext;
+  }
 };
 
 V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, IrOpcode::Value);
diff --git a/src/compiler/operation-typer.cc b/src/compiler/operation-typer.cc
index 9198f4b..dfd4c4b 100644
--- a/src/compiler/operation-typer.cc
+++ b/src/compiler/operation-typer.cc
@@ -366,8 +366,9 @@
 Type* OperationTyper::NumberFloor(Type* type) {
   DCHECK(type->Is(Type::Number()));
   if (type->Is(cache_.kIntegerOrMinusZeroOrNaN)) return type;
-  // TODO(bmeurer): We could infer a more precise type here.
-  return cache_.kIntegerOrMinusZeroOrNaN;
+  type = Type::Intersect(type, Type::MinusZeroOrNaN(), zone());
+  type = Type::Union(type, cache_.kInteger, zone());
+  return type;
 }
 
 Type* OperationTyper::NumberFround(Type* type) {
@@ -624,12 +625,19 @@
   }
 
   if (lhs->Is(Type::NaN()) || rhs->Is(Type::NaN())) return Type::NaN();
-  // Division is tricky, so all we do is try ruling out nan.
+  // Division is tricky, so all we do is try ruling out -0 and NaN.
+  bool maybe_minuszero = !lhs->Is(cache_.kPositiveIntegerOrNaN) ||
+                         !rhs->Is(cache_.kPositiveIntegerOrNaN);
   bool maybe_nan =
       lhs->Maybe(Type::NaN()) || rhs->Maybe(cache_.kZeroish) ||
       ((lhs->Min() == -V8_INFINITY || lhs->Max() == +V8_INFINITY) &&
        (rhs->Min() == -V8_INFINITY || rhs->Max() == +V8_INFINITY));
-  return maybe_nan ? Type::Number() : Type::OrderedNumber();
+
+  // Take into account the -0 and NaN information computed earlier.
+  Type* type = Type::PlainNumber();
+  if (maybe_minuszero) type = Type::Union(type, Type::MinusZero(), zone());
+  if (maybe_nan) type = Type::Union(type, Type::NaN(), zone());
+  return type;
 }
 
 Type* OperationTyper::NumberModulus(Type* lhs, Type* rhs) {
@@ -796,8 +804,35 @@
   DCHECK(lhs->Is(Type::Number()));
   DCHECK(rhs->Is(Type::Number()));
 
-  // TODO(turbofan): Infer a better type here.
-  return Type::Signed32();
+  if (!lhs->IsInhabited() || !rhs->IsInhabited()) return Type::None();
+
+  lhs = NumberToInt32(lhs);
+  rhs = NumberToUint32(rhs);
+
+  int32_t min_lhs = lhs->Min();
+  int32_t max_lhs = lhs->Max();
+  uint32_t min_rhs = rhs->Min();
+  uint32_t max_rhs = rhs->Max();
+  if (max_rhs > 31) {
+    // rhs can be larger than the bitmask
+    max_rhs = 31;
+    min_rhs = 0;
+  }
+
+  if (max_lhs > (kMaxInt >> max_rhs) || min_lhs < (kMinInt >> max_rhs)) {
+    // overflow possible
+    return Type::Signed32();
+  }
+
+  double min =
+      std::min(static_cast<int32_t>(static_cast<uint32_t>(min_lhs) << min_rhs),
+               static_cast<int32_t>(static_cast<uint32_t>(min_lhs) << max_rhs));
+  double max =
+      std::max(static_cast<int32_t>(static_cast<uint32_t>(max_lhs) << min_rhs),
+               static_cast<int32_t>(static_cast<uint32_t>(max_lhs) << max_rhs));
+
+  if (max == kMaxInt && min == kMinInt) return Type::Signed32();
+  return Type::Range(min, max, zone());
 }
 
 Type* OperationTyper::NumberShiftRight(Type* lhs, Type* rhs) {
@@ -809,33 +844,18 @@
   lhs = NumberToInt32(lhs);
   rhs = NumberToUint32(rhs);
 
-  double min = kMinInt;
-  double max = kMaxInt;
-  if (lhs->Min() >= 0) {
-    // Right-shifting a non-negative value cannot make it negative, nor larger.
-    min = std::max(min, 0.0);
-    max = std::min(max, lhs->Max());
-    if (rhs->Min() > 0 && rhs->Max() <= 31) {
-      max = static_cast<int>(max) >> static_cast<int>(rhs->Min());
-    }
+  int32_t min_lhs = lhs->Min();
+  int32_t max_lhs = lhs->Max();
+  uint32_t min_rhs = rhs->Min();
+  uint32_t max_rhs = rhs->Max();
+  if (max_rhs > 31) {
+    // rhs can be larger than the bitmask
+    max_rhs = 31;
+    min_rhs = 0;
   }
-  if (lhs->Max() < 0) {
-    // Right-shifting a negative value cannot make it non-negative, nor smaller.
-    min = std::max(min, lhs->Min());
-    max = std::min(max, -1.0);
-    if (rhs->Min() > 0 && rhs->Max() <= 31) {
-      min = static_cast<int>(min) >> static_cast<int>(rhs->Min());
-    }
-  }
-  if (rhs->Min() > 0 && rhs->Max() <= 31) {
-    // Right-shifting by a positive value yields a small integer value.
-    double shift_min = kMinInt >> static_cast<int>(rhs->Min());
-    double shift_max = kMaxInt >> static_cast<int>(rhs->Min());
-    min = std::max(min, shift_min);
-    max = std::min(max, shift_max);
-  }
-  // TODO(jarin) Ideally, the following micro-optimization should be performed
-  // by the type constructor.
+  double min = std::min(min_lhs >> min_rhs, min_lhs >> max_rhs);
+  double max = std::max(max_lhs >> min_rhs, max_lhs >> max_rhs);
+
   if (max == kMaxInt && min == kMinInt) return Type::Signed32();
   return Type::Range(min, max, zone());
 }
@@ -844,12 +864,29 @@
   DCHECK(lhs->Is(Type::Number()));
   DCHECK(rhs->Is(Type::Number()));
 
-  if (!lhs->IsInhabited()) return Type::None();
+  if (!lhs->IsInhabited() || !rhs->IsInhabited()) return Type::None();
 
   lhs = NumberToUint32(lhs);
+  rhs = NumberToUint32(rhs);
 
-  // Logical right-shifting any value cannot make it larger.
-  return Type::Range(0.0, lhs->Max(), zone());
+  uint32_t min_lhs = lhs->Min();
+  uint32_t max_lhs = lhs->Max();
+  uint32_t min_rhs = rhs->Min();
+  uint32_t max_rhs = rhs->Max();
+  if (max_rhs > 31) {
+    // rhs can be larger than the bitmask
+    max_rhs = 31;
+    min_rhs = 0;
+  }
+
+  double min = min_lhs >> max_rhs;
+  double max = max_lhs >> min_rhs;
+  DCHECK_LE(0, min);
+  DCHECK_LE(max, kMaxUInt32);
+
+  if (min == 0 && max == kMaxInt) return Type::Unsigned31();
+  if (min == 0 && max == kMaxUInt32) return Type::Unsigned32();
+  return Type::Range(min, max, zone());
 }
 
 Type* OperationTyper::NumberAtan2(Type* lhs, Type* rhs) {
diff --git a/src/compiler/operator-properties.cc b/src/compiler/operator-properties.cc
index 0a9e644..0d488d8 100644
--- a/src/compiler/operator-properties.cc
+++ b/src/compiler/operator-properties.cc
@@ -78,6 +78,8 @@
     case IrOpcode::kJSStoreProperty:
     case IrOpcode::kJSLoadGlobal:
     case IrOpcode::kJSStoreGlobal:
+    case IrOpcode::kJSStoreNamedOwn:
+    case IrOpcode::kJSStoreDataPropertyInLiteral:
     case IrOpcode::kJSDeleteProperty:
 
     // Context operations
@@ -92,14 +94,18 @@
     case IrOpcode::kJSToString:
 
     // Call operations
-    case IrOpcode::kJSCallConstruct:
-    case IrOpcode::kJSCallFunction:
+    case IrOpcode::kJSConstruct:
+    case IrOpcode::kJSConstructWithSpread:
+    case IrOpcode::kJSCallForwardVarargs:
+    case IrOpcode::kJSCall:
+    case IrOpcode::kJSCallWithSpread:
 
     // Misc operations
-    case IrOpcode::kJSConvertReceiver:
     case IrOpcode::kJSForInNext:
     case IrOpcode::kJSForInPrepare:
     case IrOpcode::kJSStackCheck:
+    case IrOpcode::kJSDebugger:
+    case IrOpcode::kJSGetSuperConstructor:
       return true;
 
     default:
diff --git a/src/compiler/osr.cc b/src/compiler/osr.cc
index a2dc430..687424b 100644
--- a/src/compiler/osr.cc
+++ b/src/compiler/osr.cc
@@ -268,28 +268,7 @@
     }
   }
 
-  OsrGuardType guard_type = OsrGuardType::kAny;
-  // Find the phi that uses the OsrGuard node and get the type from
-  // there. Skip the search if the OsrGuard does not have value use
-  // (i.e., if there is other use beyond the effect use).
-  if (OsrGuardTypeOf(osr_guard->op()) == OsrGuardType::kUninitialized &&
-      osr_guard->UseCount() > 1) {
-    Type* type = nullptr;
-    for (Node* use : osr_guard->uses()) {
-      if (use->opcode() == IrOpcode::kPhi) {
-        if (NodeProperties::GetControlInput(use) != loop) continue;
-        CHECK_NULL(type);
-        type = NodeProperties::GetType(use);
-      }
-    }
-    CHECK_NOT_NULL(type);
-
-    if (type->Is(Type::SignedSmall())) {
-      guard_type = OsrGuardType::kSignedSmall;
-    }
-  }
-
-  NodeProperties::ChangeOp(osr_guard, common->OsrGuard(guard_type));
+  NodeProperties::ChangeOp(osr_guard, common->OsrGuard(OsrGuardType::kAny));
 }
 
 }  // namespace
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
index 2614155..330b096 100644
--- a/src/compiler/pipeline.cc
+++ b/src/compiler/pipeline.cc
@@ -37,7 +37,6 @@
 #include "src/compiler/js-create-lowering.h"
 #include "src/compiler/js-frame-specialization.h"
 #include "src/compiler/js-generic-lowering.h"
-#include "src/compiler/js-global-object-specialization.h"
 #include "src/compiler/js-inlining-heuristic.h"
 #include "src/compiler/js-intrinsic-lowering.h"
 #include "src/compiler/js-native-context-specialization.h"
@@ -65,7 +64,6 @@
 #include "src/compiler/simplified-operator.h"
 #include "src/compiler/store-store-elimination.h"
 #include "src/compiler/tail-call-optimization.h"
-#include "src/compiler/type-hint-analyzer.h"
 #include "src/compiler/typed-optimization.h"
 #include "src/compiler/typer.h"
 #include "src/compiler/value-numbering-reducer.h"
@@ -75,6 +73,7 @@
 #include "src/ostreams.h"
 #include "src/parsing/parse-info.h"
 #include "src/register-configuration.h"
+#include "src/trap-handler/trap-handler.h"
 #include "src/type-info.h"
 #include "src/utils.h"
 
@@ -111,11 +110,37 @@
     javascript_ = new (graph_zone_) JSOperatorBuilder(graph_zone_);
     jsgraph_ = new (graph_zone_)
         JSGraph(isolate_, graph_, common_, javascript_, simplified_, machine_);
+    is_asm_ = info->shared_info()->asm_function();
   }
 
   // For WASM compile entry point.
+  PipelineData(ZoneStats* zone_stats, CompilationInfo* info, JSGraph* jsgraph,
+               SourcePositionTable* source_positions,
+               ZoneVector<trap_handler::ProtectedInstructionData>*
+                   protected_instructions)
+      : isolate_(info->isolate()),
+        info_(info),
+        debug_name_(info_->GetDebugName()),
+        zone_stats_(zone_stats),
+        graph_zone_scope_(zone_stats_, ZONE_NAME),
+        graph_(jsgraph->graph()),
+        source_positions_(source_positions),
+        machine_(jsgraph->machine()),
+        common_(jsgraph->common()),
+        javascript_(jsgraph->javascript()),
+        jsgraph_(jsgraph),
+        instruction_zone_scope_(zone_stats_, ZONE_NAME),
+        instruction_zone_(instruction_zone_scope_.zone()),
+        register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
+        register_allocation_zone_(register_allocation_zone_scope_.zone()),
+        protected_instructions_(protected_instructions) {
+    is_asm_ =
+        info->has_shared_info() ? info->shared_info()->asm_function() : false;
+  }
+
+  // For machine graph testing entry point.
   PipelineData(ZoneStats* zone_stats, CompilationInfo* info, Graph* graph,
-               SourcePositionTable* source_positions)
+               Schedule* schedule, SourcePositionTable* source_positions)
       : isolate_(info->isolate()),
         info_(info),
         debug_name_(info_->GetDebugName()),
@@ -123,27 +148,13 @@
         graph_zone_scope_(zone_stats_, ZONE_NAME),
         graph_(graph),
         source_positions_(source_positions),
-        instruction_zone_scope_(zone_stats_, ZONE_NAME),
-        instruction_zone_(instruction_zone_scope_.zone()),
-        register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
-        register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
-
-  // For machine graph testing entry point.
-  PipelineData(ZoneStats* zone_stats, CompilationInfo* info, Graph* graph,
-               Schedule* schedule)
-      : isolate_(info->isolate()),
-        info_(info),
-        debug_name_(info_->GetDebugName()),
-        zone_stats_(zone_stats),
-        graph_zone_scope_(zone_stats_, ZONE_NAME),
-        graph_(graph),
-        source_positions_(new (info->zone()) SourcePositionTable(graph_)),
         schedule_(schedule),
         instruction_zone_scope_(zone_stats_, ZONE_NAME),
         instruction_zone_(instruction_zone_scope_.zone()),
         register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
-        register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
-
+        register_allocation_zone_(register_allocation_zone_scope_.zone()) {
+    is_asm_ = false;
+  }
   // For register allocation testing entry point.
   PipelineData(ZoneStats* zone_stats, CompilationInfo* info,
                InstructionSequence* sequence)
@@ -156,7 +167,10 @@
         instruction_zone_(sequence->zone()),
         sequence_(sequence),
         register_allocation_zone_scope_(zone_stats_, ZONE_NAME),
-        register_allocation_zone_(register_allocation_zone_scope_.zone()) {}
+        register_allocation_zone_(register_allocation_zone_scope_.zone()) {
+    is_asm_ =
+        info->has_shared_info() ? info->shared_info()->asm_function() : false;
+  }
 
   ~PipelineData() {
     DeleteRegisterAllocationZone();
@@ -170,6 +184,11 @@
   PipelineStatistics* pipeline_statistics() { return pipeline_statistics_; }
   bool compilation_failed() const { return compilation_failed_; }
   void set_compilation_failed() { compilation_failed_ = true; }
+
+  bool is_asm() const { return is_asm_; }
+  bool verify_graph() const { return verify_graph_; }
+  void set_verify_graph(bool value) { verify_graph_ = value; }
+
   Handle<Code> code() { return code_; }
   void set_code(Handle<Code> code) {
     DCHECK(code_.is_null());
@@ -199,12 +218,6 @@
     loop_assignment_ = loop_assignment;
   }
 
-  TypeHintAnalysis* type_hint_analysis() const { return type_hint_analysis_; }
-  void set_type_hint_analysis(TypeHintAnalysis* type_hint_analysis) {
-    DCHECK_NULL(type_hint_analysis_);
-    type_hint_analysis_ = type_hint_analysis;
-  }
-
   Schedule* schedule() const { return schedule_; }
   void set_schedule(Schedule* schedule) {
     DCHECK(!schedule_);
@@ -233,6 +246,11 @@
     source_position_output_ = source_position_output;
   }
 
+  ZoneVector<trap_handler::ProtectedInstructionData>* protected_instructions()
+      const {
+    return protected_instructions_;
+  }
+
   void DeleteGraphZone() {
     if (graph_zone_ == nullptr) return;
     graph_zone_scope_.Destroy();
@@ -240,7 +258,6 @@
     graph_ = nullptr;
     source_positions_ = nullptr;
     loop_assignment_ = nullptr;
-    type_hint_analysis_ = nullptr;
     simplified_ = nullptr;
     machine_ = nullptr;
     common_ = nullptr;
@@ -293,7 +310,7 @@
     DCHECK(register_allocation_data_ == nullptr);
     register_allocation_data_ = new (register_allocation_zone())
         RegisterAllocationData(config, register_allocation_zone(), frame(),
-                               sequence(), debug_name_.get());
+                               sequence(), debug_name());
   }
 
   void BeginPhaseKind(const char* phase_kind_name) {
@@ -308,6 +325,8 @@
     }
   }
 
+  const char* debug_name() const { return debug_name_.get(); }
+
  private:
   Isolate* const isolate_;
   CompilationInfo* const info_;
@@ -316,6 +335,8 @@
   ZoneStats* const zone_stats_;
   PipelineStatistics* pipeline_statistics_ = nullptr;
   bool compilation_failed_ = false;
+  bool verify_graph_ = false;
+  bool is_asm_ = false;
   Handle<Code> code_ = Handle<Code>::null();
 
   // All objects in the following group of fields are allocated in graph_zone_.
@@ -325,7 +346,6 @@
   Graph* graph_ = nullptr;
   SourcePositionTable* source_positions_ = nullptr;
   LoopAssignmentAnalysis* loop_assignment_ = nullptr;
-  TypeHintAnalysis* type_hint_analysis_ = nullptr;
   SimplifiedOperatorBuilder* simplified_ = nullptr;
   MachineOperatorBuilder* machine_ = nullptr;
   CommonOperatorBuilder* common_ = nullptr;
@@ -355,6 +375,9 @@
   // Source position output for --trace-turbo.
   std::string source_position_output_;
 
+  ZoneVector<trap_handler::ProtectedInstructionData>* protected_instructions_ =
+      nullptr;
+
   DISALLOW_COPY_AND_ASSIGN(PipelineData);
 };
 
@@ -522,14 +545,13 @@
 
 class PipelineCompilationJob final : public CompilationJob {
  public:
-  PipelineCompilationJob(Isolate* isolate, Handle<JSFunction> function)
+  PipelineCompilationJob(ParseInfo* parse_info, Handle<JSFunction> function)
       // Note that the CompilationInfo is not initialized at the time we pass it
       // to the CompilationJob constructor, but it is not dereferenced there.
-      : CompilationJob(isolate, &info_, "TurboFan"),
-        zone_(isolate->allocator(), ZONE_NAME),
-        zone_stats_(isolate->allocator()),
-        parse_info_(&zone_, handle(function->shared())),
-        info_(&parse_info_, function),
+      : CompilationJob(parse_info->isolate(), &info_, "TurboFan"),
+        parse_info_(parse_info),
+        zone_stats_(parse_info->isolate()->allocator()),
+        info_(parse_info_.get()->zone(), parse_info_.get(), function),
         pipeline_statistics_(CreatePipelineStatistics(info(), &zone_stats_)),
         data_(&zone_stats_, info(), pipeline_statistics_.get()),
         pipeline_(&data_),
@@ -541,9 +563,8 @@
   Status FinalizeJobImpl() final;
 
  private:
-  Zone zone_;
+  std::unique_ptr<ParseInfo> parse_info_;
   ZoneStats zone_stats_;
-  ParseInfo parse_info_;
   CompilationInfo info_;
   std::unique_ptr<PipelineStatistics> pipeline_statistics_;
   PipelineData data_;
@@ -555,30 +576,37 @@
 
 PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl() {
   if (info()->shared_info()->asm_function()) {
-    if (info()->osr_frame()) info()->MarkAsFrameSpecializing();
+    if (info()->osr_frame() && !info()->is_optimizing_from_bytecode()) {
+      info()->MarkAsFrameSpecializing();
+    }
     info()->MarkAsFunctionContextSpecializing();
   } else {
     if (!FLAG_always_opt) {
       info()->MarkAsBailoutOnUninitialized();
     }
-    if (FLAG_turbo_inlining) {
-      info()->MarkAsInliningEnabled();
+    if (FLAG_turbo_loop_peeling) {
+      info()->MarkAsLoopPeelingEnabled();
     }
   }
-  if (!info()->shared_info()->asm_function() || FLAG_turbo_asm_deoptimization) {
+  if (info()->is_optimizing_from_bytecode() ||
+      !info()->shared_info()->asm_function()) {
     info()->MarkAsDeoptimizationEnabled();
     if (FLAG_inline_accessors) {
       info()->MarkAsAccessorInliningEnabled();
     }
+    if (info()->closure()->feedback_vector_cell()->map() ==
+        isolate()->heap()->one_closure_cell_map()) {
+      info()->MarkAsFunctionContextSpecializing();
+    }
   }
   if (!info()->is_optimizing_from_bytecode()) {
-    if (info()->is_deoptimization_enabled() && FLAG_turbo_type_feedback) {
-      info()->MarkAsTypeFeedbackEnabled();
-    }
     if (!Compiler::EnsureDeoptimizationSupport(info())) return FAILED;
+  } else if (FLAG_turbo_inlining) {
+    info()->MarkAsInliningEnabled();
   }
 
-  linkage_ = new (&zone_) Linkage(Linkage::ComputeIncoming(&zone_, info()));
+  linkage_ = new (info()->zone())
+      Linkage(Linkage::ComputeIncoming(info()->zone(), info()));
 
   if (!pipeline_.CreateGraph()) {
     if (isolate()->has_pending_exception()) return FAILED;  // Stack overflowed.
@@ -612,15 +640,18 @@
 
 class PipelineWasmCompilationJob final : public CompilationJob {
  public:
-  explicit PipelineWasmCompilationJob(CompilationInfo* info, Graph* graph,
-                                      CallDescriptor* descriptor,
-                                      SourcePositionTable* source_positions)
+  explicit PipelineWasmCompilationJob(
+      CompilationInfo* info, JSGraph* jsgraph, CallDescriptor* descriptor,
+      SourcePositionTable* source_positions,
+      ZoneVector<trap_handler::ProtectedInstructionData>* protected_insts,
+      bool allow_signalling_nan)
       : CompilationJob(info->isolate(), info, "TurboFan",
                        State::kReadyToExecute),
         zone_stats_(info->isolate()->allocator()),
-        data_(&zone_stats_, info, graph, source_positions),
+        data_(&zone_stats_, info, jsgraph, source_positions, protected_insts),
         pipeline_(&data_),
-        linkage_(descriptor) {}
+        linkage_(descriptor),
+        allow_signalling_nan_(allow_signalling_nan) {}
 
  protected:
   Status PrepareJobImpl() final;
@@ -632,6 +663,7 @@
   PipelineData data_;
   PipelineImpl pipeline_;
   Linkage linkage_;
+  bool allow_signalling_nan_;
 };
 
 PipelineWasmCompilationJob::Status
@@ -649,6 +681,24 @@
   }
 
   pipeline_.RunPrintAndVerify("Machine", true);
+  if (FLAG_wasm_opt) {
+    PipelineData* data = &data_;
+    PipelineRunScope scope(data, "WASM optimization");
+    JSGraphReducer graph_reducer(data->jsgraph(), scope.zone());
+    DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
+                                              data->common());
+    ValueNumberingReducer value_numbering(scope.zone(), data->graph()->zone());
+    MachineOperatorReducer machine_reducer(data->jsgraph(),
+                                           allow_signalling_nan_);
+    CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
+                                         data->common(), data->machine());
+    AddReducer(data, &graph_reducer, &dead_code_elimination);
+    AddReducer(data, &graph_reducer, &value_numbering);
+    AddReducer(data, &graph_reducer, &machine_reducer);
+    AddReducer(data, &graph_reducer, &common_reducer);
+    graph_reducer.ReduceGraph();
+    pipeline_.RunPrintAndVerify("Optimized Machine", true);
+  }
 
   if (!pipeline_.ScheduleAndSelectInstructions(&linkage_, true)) return FAILED;
   return SUCCEEDED;
@@ -694,20 +744,6 @@
 };
 
 
-struct TypeHintAnalysisPhase {
-  static const char* phase_name() { return "type hint analysis"; }
-
-  void Run(PipelineData* data, Zone* temp_zone) {
-    if (data->info()->is_type_feedback_enabled()) {
-      TypeHintAnalyzer analyzer(data->graph_zone());
-      Handle<Code> code(data->info()->shared_info()->code(), data->isolate());
-      TypeHintAnalysis* type_hint_analysis = analyzer.Analyze(code);
-      data->set_type_hint_analysis(type_hint_analysis);
-    }
-  }
-};
-
-
 struct GraphBuilderPhase {
   static const char* phase_name() { return "graph builder"; }
 
@@ -715,15 +751,18 @@
     bool succeeded = false;
 
     if (data->info()->is_optimizing_from_bytecode()) {
-      BytecodeGraphBuilder graph_builder(temp_zone, data->info(),
-                                         data->jsgraph(), 1.0f,
-                                         data->source_positions());
+      // Bytecode graph builder assumes deoptimziation is enabled.
+      DCHECK(data->info()->is_deoptimization_enabled());
+      BytecodeGraphBuilder graph_builder(
+          temp_zone, data->info()->shared_info(),
+          handle(data->info()->closure()->feedback_vector()),
+          data->info()->osr_ast_id(), data->jsgraph(), 1.0f,
+          data->source_positions());
       succeeded = graph_builder.CreateGraph();
     } else {
       AstGraphBuilderWithPositions graph_builder(
           temp_zone, data->info(), data->jsgraph(), 1.0f,
-          data->loop_assignment(), data->type_hint_analysis(),
-          data->source_positions());
+          data->loop_assignment(), data->source_positions());
       succeeded = graph_builder.CreateGraph();
     }
 
@@ -741,17 +780,16 @@
     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
                                               data->common());
+    CheckpointElimination checkpoint_elimination(&graph_reducer);
     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
                                          data->common(), data->machine());
     JSCallReducer::Flags call_reducer_flags = JSCallReducer::kNoFlags;
-    if (data->info()->is_bailout_on_uninitialized()) {
-      call_reducer_flags |= JSCallReducer::kBailoutOnUninitialized;
-    }
     if (data->info()->is_deoptimization_enabled()) {
       call_reducer_flags |= JSCallReducer::kDeoptimizationEnabled;
     }
     JSCallReducer call_reducer(&graph_reducer, data->jsgraph(),
-                               call_reducer_flags, data->native_context());
+                               call_reducer_flags, data->native_context(),
+                               data->info()->dependencies());
     JSContextSpecialization context_specialization(
         &graph_reducer, data->jsgraph(),
         data->info()->is_function_context_specializing()
@@ -759,9 +797,6 @@
             : MaybeHandle<Context>());
     JSFrameSpecialization frame_specialization(
         &graph_reducer, data->info()->osr_frame(), data->jsgraph());
-    JSGlobalObjectSpecialization global_object_specialization(
-        &graph_reducer, data->jsgraph(), data->global_object(),
-        data->info()->dependencies());
     JSNativeContextSpecialization::Flags flags =
         JSNativeContextSpecialization::kNoFlags;
     if (data->info()->is_accessor_inlining_enabled()) {
@@ -787,13 +822,11 @@
             ? JSIntrinsicLowering::kDeoptimizationEnabled
             : JSIntrinsicLowering::kDeoptimizationDisabled);
     AddReducer(data, &graph_reducer, &dead_code_elimination);
+    AddReducer(data, &graph_reducer, &checkpoint_elimination);
     AddReducer(data, &graph_reducer, &common_reducer);
     if (data->info()->is_frame_specializing()) {
       AddReducer(data, &graph_reducer, &frame_specialization);
     }
-    if (data->info()->is_deoptimization_enabled()) {
-      AddReducer(data, &graph_reducer, &global_object_specialization);
-    }
     AddReducer(data, &graph_reducer, &native_context_specialization);
     AddReducer(data, &graph_reducer, &context_specialization);
     AddReducer(data, &graph_reducer, &intrinsic_lowering);
@@ -817,21 +850,6 @@
   }
 };
 
-struct OsrTyperPhase {
-  static const char* phase_name() { return "osr typer"; }
-
-  void Run(PipelineData* data, Zone* temp_zone) {
-    NodeVector roots(temp_zone);
-    data->jsgraph()->GetCachedNodes(&roots);
-    // Dummy induction variable optimizer: at the moment, we do not try
-    // to compute loop variable bounds on OSR.
-    LoopVariableOptimizer induction_vars(data->jsgraph()->graph(),
-                                         data->common(), temp_zone);
-    Typer typer(data->isolate(), Typer::kNoFlags, data->graph());
-    typer.Run(roots, &induction_vars);
-  }
-};
-
 struct UntyperPhase {
   static const char* phase_name() { return "untyper"; }
 
@@ -888,10 +906,11 @@
             ? JSBuiltinReducer::kDeoptimizationEnabled
             : JSBuiltinReducer::kNoFlags,
         data->info()->dependencies(), data->native_context());
-    Handle<LiteralsArray> literals_array(data->info()->closure()->literals());
+    Handle<FeedbackVector> feedback_vector(
+        data->info()->closure()->feedback_vector());
     JSCreateLowering create_lowering(
         &graph_reducer, data->info()->dependencies(), data->jsgraph(),
-        literals_array, data->native_context(), temp_zone);
+        feedback_vector, data->native_context(), temp_zone);
     JSTypedLowering::Flags typed_lowering_flags = JSTypedLowering::kNoFlags;
     if (data->info()->is_deoptimization_enabled()) {
       typed_lowering_flags |= JSTypedLowering::kDeoptimizationEnabled;
@@ -930,7 +949,7 @@
   void Run(PipelineData* data, Zone* temp_zone) {
     EscapeAnalysis escape_analysis(data->graph(), data->jsgraph()->common(),
                                    temp_zone);
-    escape_analysis.Run();
+    if (!escape_analysis.Run()) return;
     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
     EscapeAnalysisReducer escape_reducer(&graph_reducer, data->jsgraph(),
                                          &escape_analysis, temp_zone);
@@ -944,8 +963,8 @@
   }
 };
 
-struct RepresentationSelectionPhase {
-  static const char* phase_name() { return "representation selection"; }
+struct SimplifiedLoweringPhase {
+  static const char* phase_name() { return "simplified lowering"; }
 
   void Run(PipelineData* data, Zone* temp_zone) {
     SimplifiedLowering lowering(data->jsgraph(), temp_zone,
@@ -978,6 +997,23 @@
   }
 };
 
+struct ConcurrentOptimizationPrepPhase {
+  static const char* phase_name() {
+    return "concurrent optimization preparation";
+  }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    // Make sure we cache these code stubs.
+    data->jsgraph()->CEntryStubConstant(1);
+    data->jsgraph()->CEntryStubConstant(2);
+    data->jsgraph()->CEntryStubConstant(3);
+
+    // This is needed for escape analysis.
+    NodeProperties::SetType(data->jsgraph()->FalseConstant(), Type::Boolean());
+    NodeProperties::SetType(data->jsgraph()->TrueConstant(), Type::Boolean());
+  }
+};
+
 struct GenericLoweringPhase {
   static const char* phase_name() { return "generic lowering"; }
 
@@ -1178,21 +1214,6 @@
 };
 
 
-struct StressLoopPeelingPhase {
-  static const char* phase_name() { return "stress loop peeling"; }
-
-  void Run(PipelineData* data, Zone* temp_zone) {
-    // Peel the first outer loop for testing.
-    // TODO(titzer): peel all loops? the N'th loop? Innermost loops?
-    LoopTree* loop_tree = LoopFinder::BuildLoopTree(data->graph(), temp_zone);
-    if (loop_tree != nullptr && loop_tree->outer_loops().size() > 0) {
-      LoopPeeler::Peel(data->graph(), data->common(), loop_tree,
-                       loop_tree->outer_loops()[0], temp_zone);
-    }
-  }
-};
-
-
 struct ComputeSchedulePhase {
   static const char* phase_name() { return "scheduling"; }
 
@@ -1475,8 +1496,6 @@
     Run<LoopAssignmentAnalysisPhase>();
   }
 
-  Run<TypeHintAnalysisPhase>();
-
   Run<GraphBuilderPhase>();
   if (data->compilation_failed()) {
     data->EndPhaseKind();
@@ -1486,8 +1505,6 @@
 
   // Perform OSR deconstruction.
   if (info()->is_osr()) {
-    Run<OsrTyperPhase>();
-
     Run<OsrDeconstructionPhase>();
 
     Run<UntyperPhase>();
@@ -1512,7 +1529,7 @@
     // Determine the Typer operation flags.
     Typer::Flags flags = Typer::kNoFlags;
     if (is_sloppy(info()->shared_info()->language_mode()) &&
-        !info()->shared_info()->IsBuiltin()) {
+        info()->shared_info()->IsUserJavaScript()) {
       // Sloppy mode functions always have an Object for this.
       flags |= Typer::kThisIsReceiver;
     }
@@ -1533,43 +1550,50 @@
     // Lower JSOperators where we can determine types.
     Run<TypedLoweringPhase>();
     RunPrintAndVerify("Lowered typed");
+  }
 
-    if (FLAG_turbo_loop_peeling) {
-      Run<LoopPeelingPhase>();
-      RunPrintAndVerify("Loops peeled", true);
-    } else {
-      Run<LoopExitEliminationPhase>();
-      RunPrintAndVerify("Loop exits eliminated", true);
+  // Do some hacky things to prepare for the optimization phase.
+  // (caching handles, etc.).
+  Run<ConcurrentOptimizationPrepPhase>();
+
+  data->EndPhaseKind();
+
+  return true;
+}
+
+bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
+  PipelineData* data = this->data_;
+
+  if (data->info()->is_loop_peeling_enabled()) {
+    Run<LoopPeelingPhase>();
+    RunPrintAndVerify("Loops peeled", true);
+  } else {
+    Run<LoopExitEliminationPhase>();
+    RunPrintAndVerify("Loop exits eliminated", true);
+  }
+
+  if (!data->is_asm()) {
+    if (FLAG_turbo_load_elimination) {
+      Run<LoadEliminationPhase>();
+      RunPrintAndVerify("Load eliminated");
     }
 
-    if (FLAG_turbo_stress_loop_peeling) {
-      Run<StressLoopPeelingPhase>();
-      RunPrintAndVerify("Loop peeled");
-    }
-
-    if (!info()->shared_info()->asm_function()) {
-      if (FLAG_turbo_load_elimination) {
-        Run<LoadEliminationPhase>();
-        RunPrintAndVerify("Load eliminated");
+    if (FLAG_turbo_escape) {
+      Run<EscapeAnalysisPhase>();
+      if (data->compilation_failed()) {
+        info()->AbortOptimization(kCyclicObjectStateDetectedInEscapeAnalysis);
+        data->EndPhaseKind();
+        return false;
       }
-
-      if (FLAG_turbo_escape) {
-        Run<EscapeAnalysisPhase>();
-        if (data->compilation_failed()) {
-          info()->AbortOptimization(kCyclicObjectStateDetectedInEscapeAnalysis);
-          data->EndPhaseKind();
-          return false;
-        }
-        RunPrintAndVerify("Escape Analysed");
-      }
+      RunPrintAndVerify("Escape Analysed");
     }
   }
 
-  // Select representations. This has to run w/o the Typer decorator, because
-  // we cannot compute meaningful types anyways, and the computed types might
-  // even conflict with the representation/truncation logic.
-  Run<RepresentationSelectionPhase>();
-  RunPrintAndVerify("Representations selected", true);
+  // Perform simplified lowering. This has to run w/o the Typer decorator,
+  // because we cannot compute meaningful types anyways, and the computed types
+  // might even conflict with the representation/truncation logic.
+  Run<SimplifiedLoweringPhase>();
+  RunPrintAndVerify("Simplified lowering", true);
 
 #ifdef DEBUG
   // From now on it is invalid to look at types on the nodes, because:
@@ -1592,14 +1616,6 @@
   Run<GenericLoweringPhase>();
   RunPrintAndVerify("Generic lowering", true);
 
-  data->EndPhaseKind();
-
-  return true;
-}
-
-bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
-  PipelineData* data = this->data_;
-
   data->BeginPhaseKind("block building");
 
   // Run early optimization pass.
@@ -1648,7 +1664,9 @@
 
   // Construct a pipeline for scheduling and code generation.
   ZoneStats zone_stats(isolate->allocator());
-  PipelineData data(&zone_stats, &info, graph, schedule);
+  SourcePositionTable source_positions(graph);
+  PipelineData data(&zone_stats, &info, graph, schedule, &source_positions);
+  data.set_verify_graph(FLAG_verify_csa);
   std::unique_ptr<PipelineStatistics> pipeline_statistics;
   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
     pipeline_statistics.reset(new PipelineStatistics(&info, &zone_stats));
@@ -1660,6 +1678,12 @@
 
   if (FLAG_trace_turbo) {
     {
+      CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
+      OFStream os(tracing_scope.file());
+      os << "---------------------------------------------------\n"
+         << "Begin compiling " << debug_name << " using Turbofan" << std::endl;
+    }
+    {
       TurboJsonFile json_of(&info, std::ios_base::trunc);
       json_of << "{\"function\":\"" << info.GetDebugName().get()
               << "\", \"source\":\"\",\n\"phases\":[";
@@ -1696,13 +1720,16 @@
 }
 
 // static
-Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
-                                              CallDescriptor* call_descriptor,
-                                              Graph* graph,
-                                              Schedule* schedule) {
+Handle<Code> Pipeline::GenerateCodeForTesting(
+    CompilationInfo* info, CallDescriptor* call_descriptor, Graph* graph,
+    Schedule* schedule, SourcePositionTable* source_positions) {
   // Construct a pipeline for scheduling and code generation.
   ZoneStats zone_stats(info->isolate()->allocator());
-  PipelineData data(&zone_stats, info, graph, schedule);
+  // TODO(wasm): Refactor code generation to check for non-existing source
+  // table, then remove this conditional allocation.
+  if (!source_positions)
+    source_positions = new (info->zone()) SourcePositionTable(graph);
+  PipelineData data(&zone_stats, info, graph, schedule, source_positions);
   std::unique_ptr<PipelineStatistics> pipeline_statistics;
   if (FLAG_turbo_stats || FLAG_turbo_stats_nvp) {
     pipeline_statistics.reset(new PipelineStatistics(info, &zone_stats));
@@ -1723,16 +1750,27 @@
 }
 
 // static
-CompilationJob* Pipeline::NewCompilationJob(Handle<JSFunction> function) {
-  return new PipelineCompilationJob(function->GetIsolate(), function);
+CompilationJob* Pipeline::NewCompilationJob(Handle<JSFunction> function,
+                                            bool has_script) {
+  Handle<SharedFunctionInfo> shared = handle(function->shared());
+  ParseInfo* parse_info;
+  if (!has_script) {
+    parse_info = ParseInfo::AllocateWithoutScript(shared);
+  } else {
+    parse_info = new ParseInfo(shared);
+  }
+  return new PipelineCompilationJob(parse_info, function);
 }
 
 // static
 CompilationJob* Pipeline::NewWasmCompilationJob(
-    CompilationInfo* info, Graph* graph, CallDescriptor* descriptor,
-    SourcePositionTable* source_positions) {
-  return new PipelineWasmCompilationJob(info, graph, descriptor,
-                                        source_positions);
+    CompilationInfo* info, JSGraph* jsgraph, CallDescriptor* descriptor,
+    SourcePositionTable* source_positions,
+    ZoneVector<trap_handler::ProtectedInstructionData>* protected_instructions,
+    bool allow_signalling_nan) {
+  return new PipelineWasmCompilationJob(
+      info, jsgraph, descriptor, source_positions, protected_instructions,
+      allow_signalling_nan);
 }
 
 bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
@@ -1767,12 +1805,27 @@
         info(), data->graph(), data->schedule()));
   }
 
-  if (FLAG_turbo_verify_machine_graph != nullptr &&
-      (!strcmp(FLAG_turbo_verify_machine_graph, "*") ||
-       !strcmp(FLAG_turbo_verify_machine_graph,
-               data->info()->GetDebugName().get()))) {
+  bool verify_stub_graph = data->verify_graph();
+  if (verify_stub_graph ||
+      (FLAG_turbo_verify_machine_graph != nullptr &&
+       (!strcmp(FLAG_turbo_verify_machine_graph, "*") ||
+        !strcmp(FLAG_turbo_verify_machine_graph, data->debug_name())))) {
+    if (FLAG_trace_verify_csa) {
+      AllowHandleDereference allow_deref;
+      CompilationInfo* info = data->info();
+      CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
+      OFStream os(tracing_scope.file());
+      os << "--------------------------------------------------\n"
+         << "--- Verifying " << data->debug_name() << " generated by TurboFan\n"
+         << "--------------------------------------------------\n"
+         << *data->schedule()
+         << "--------------------------------------------------\n"
+         << "--- End of " << data->debug_name() << " generated by TurboFan\n"
+         << "--------------------------------------------------\n";
+    }
     Zone temp_zone(data->isolate()->allocator(), ZONE_NAME);
     MachineGraphVerifier::Run(data->graph(), data->schedule(), linkage,
+                              data->info()->IsStub(), data->debug_name(),
                               &temp_zone);
   }
 
diff --git a/src/compiler/pipeline.h b/src/compiler/pipeline.h
index 0c0a57b..624ef01 100644
--- a/src/compiler/pipeline.h
+++ b/src/compiler/pipeline.h
@@ -9,6 +9,7 @@
 // Do not include anything from src/compiler here!
 #include "src/globals.h"
 #include "src/objects.h"
+#include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
@@ -17,9 +18,14 @@
 class CompilationJob;
 class RegisterConfiguration;
 
+namespace trap_handler {
+struct ProtectedInstructionData;
+}  // namespace trap_handler
+
 namespace compiler {
 
 class CallDescriptor;
+class JSGraph;
 class Graph;
 class InstructionSequence;
 class Schedule;
@@ -28,12 +34,16 @@
 class Pipeline : public AllStatic {
  public:
   // Returns a new compilation job for the given function.
-  static CompilationJob* NewCompilationJob(Handle<JSFunction> function);
+  static CompilationJob* NewCompilationJob(Handle<JSFunction> function,
+                                           bool has_script);
 
   // Returns a new compilation job for the WebAssembly compilation info.
   static CompilationJob* NewWasmCompilationJob(
-      CompilationInfo* info, Graph* graph, CallDescriptor* descriptor,
-      SourcePositionTable* source_positions);
+      CompilationInfo* info, JSGraph* jsgraph, CallDescriptor* descriptor,
+      SourcePositionTable* source_positions,
+      ZoneVector<trap_handler::ProtectedInstructionData>*
+          protected_instructions,
+      bool wasm_origin);
 
   // Run the pipeline on a machine graph and generate code. The {schedule} must
   // be valid, hence the given {graph} does not need to be schedulable.
@@ -60,10 +70,10 @@
 
   // Run the pipeline on a machine graph and generate code. If {schedule} is
   // {nullptr}, then compute a new schedule for code generation.
-  static Handle<Code> GenerateCodeForTesting(CompilationInfo* info,
-                                             CallDescriptor* call_descriptor,
-                                             Graph* graph,
-                                             Schedule* schedule = nullptr);
+  static Handle<Code> GenerateCodeForTesting(
+      CompilationInfo* info, CallDescriptor* call_descriptor, Graph* graph,
+      Schedule* schedule = nullptr,
+      SourcePositionTable* source_positions = nullptr);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(Pipeline);
diff --git a/src/compiler/ppc/code-generator-ppc.cc b/src/compiler/ppc/code-generator-ppc.cc
index a838ede..455b0ae 100644
--- a/src/compiler/ppc/code-generator-ppc.cc
+++ b/src/compiler/ppc/code-generator-ppc.cc
@@ -34,6 +34,7 @@
       case kFlags_branch:
       case kFlags_deoptimize:
       case kFlags_set:
+      case kFlags_trap:
         return SetRC;
       case kFlags_none:
         return LeaveRC;
@@ -263,7 +264,8 @@
       // Overflow checked for add/sub only.
       switch (op) {
 #if V8_TARGET_ARCH_PPC64
-        case kPPC_Add:
+        case kPPC_Add32:
+        case kPPC_Add64:
         case kPPC_Sub:
 #endif
         case kPPC_AddWithOverflow32:
@@ -276,7 +278,8 @@
     case kNotOverflow:
       switch (op) {
 #if V8_TARGET_ARCH_PPC64
-        case kPPC_Add:
+        case kPPC_Add32:
+        case kPPC_Add64:
         case kPPC_Sub:
 #endif
         case kPPC_AddWithOverflow32:
@@ -761,36 +764,33 @@
     DCHECK_EQ(LeaveRC, i.OutputRCBit());                      \
   } while (0)
 
-#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr, asm_instrx)   \
-  do {                                                        \
-    Label done;                                               \
-    Register result = i.OutputRegister();                     \
-    AddressingMode mode = kMode_None;                         \
-    MemOperand operand = i.MemoryOperand(&mode);              \
-    __ sync();                                                \
-    if (mode == kMode_MRI) {                                  \
-    __ asm_instr(result, operand);                            \
-    } else {                                                  \
-    __ asm_instrx(result, operand);                           \
-    }                                                         \
-    __ bind(&done);                                           \
-    __ cmp(result, result);                                   \
-    __ bne(&done);                                            \
-    __ isync();                                               \
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr, asm_instrx) \
+  do {                                                      \
+    Label done;                                             \
+    Register result = i.OutputRegister();                   \
+    AddressingMode mode = kMode_None;                       \
+    MemOperand operand = i.MemoryOperand(&mode);            \
+    if (mode == kMode_MRI) {                                \
+      __ asm_instr(result, operand);                        \
+    } else {                                                \
+      __ asm_instrx(result, operand);                       \
+    }                                                       \
+    __ lwsync();                                            \
   } while (0)
-#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, asm_instrx)  \
-  do {                                                        \
-    size_t index = 0;                                         \
-    AddressingMode mode = kMode_None;                         \
-    MemOperand operand = i.MemoryOperand(&mode, &index);      \
-    Register value = i.InputRegister(index);                  \
-    __ sync();                                                \
-    if (mode == kMode_MRI) {                                  \
-      __ asm_instr(value, operand);                           \
-    } else {                                                  \
-      __ asm_instrx(value, operand);                          \
-    }                                                         \
-    DCHECK_EQ(LeaveRC, i.OutputRCBit());                      \
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, asm_instrx) \
+  do {                                                       \
+    size_t index = 0;                                        \
+    AddressingMode mode = kMode_None;                        \
+    MemOperand operand = i.MemoryOperand(&mode, &index);     \
+    Register value = i.InputRegister(index);                 \
+    __ lwsync();                                             \
+    if (mode == kMode_MRI) {                                 \
+      __ asm_instr(value, operand);                          \
+    } else {                                                 \
+      __ asm_instrx(value, operand);                         \
+    }                                                        \
+    __ sync();                                               \
+    DCHECK_EQ(LeaveRC, i.OutputRCBit());                     \
   } while (0)
 
 void CodeGenerator::AssembleDeconstructFrame() {
@@ -813,7 +813,8 @@
 
   // Check if current frame is an arguments adaptor frame.
   __ LoadP(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
-  __ CmpSmiLiteral(scratch1, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ cmpi(scratch1,
+          Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ bne(&done);
 
   // Load arguments count from current arguments adaptor frame (note, it
@@ -1082,10 +1083,8 @@
     case kArchDeoptimize: {
       int deopt_state_id =
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
-      Deoptimizer::BailoutType bailout_type =
-          Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      CodeGenResult result = AssembleDeoptimizerCall(
-          deopt_state_id, bailout_type, current_source_position_);
+      CodeGenResult result =
+          AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
       if (result != kSuccess) return result;
       break;
     }
@@ -1322,7 +1321,7 @@
                 63 - i.InputInt32(2), i.OutputRCBit());
       break;
 #endif
-    case kPPC_Add:
+    case kPPC_Add32:
 #if V8_TARGET_ARCH_PPC64
       if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
         ASSEMBLE_ADD_WITH_OVERFLOW();
@@ -1335,10 +1334,26 @@
           __ addi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
           DCHECK_EQ(LeaveRC, i.OutputRCBit());
         }
+        __ extsw(i.OutputRegister(), i.OutputRegister());
 #if V8_TARGET_ARCH_PPC64
       }
 #endif
       break;
+#if V8_TARGET_ARCH_PPC64
+    case kPPC_Add64:
+      if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
+        ASSEMBLE_ADD_WITH_OVERFLOW();
+      } else {
+        if (HasRegisterInput(instr, 1)) {
+          __ add(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+                 LeaveOE, i.OutputRCBit());
+        } else {
+          __ addi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
+          DCHECK_EQ(LeaveRC, i.OutputRCBit());
+        }
+      }
+      break;
+#endif
     case kPPC_AddWithOverflow32:
       ASSEMBLE_ADD_WITH_OVERFLOW32();
       break;
@@ -1431,19 +1446,35 @@
       ASSEMBLE_FLOAT_BINOP_RC(fdiv, MiscField::decode(instr->opcode()));
       break;
     case kPPC_Mod32:
-      ASSEMBLE_MODULO(divw, mullw);
+      if (CpuFeatures::IsSupported(MODULO)) {
+        __ modsw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      } else {
+        ASSEMBLE_MODULO(divw, mullw);
+      }
       break;
 #if V8_TARGET_ARCH_PPC64
     case kPPC_Mod64:
-      ASSEMBLE_MODULO(divd, mulld);
+      if (CpuFeatures::IsSupported(MODULO)) {
+        __ modsd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      } else {
+        ASSEMBLE_MODULO(divd, mulld);
+      }
       break;
 #endif
     case kPPC_ModU32:
-      ASSEMBLE_MODULO(divwu, mullw);
+      if (CpuFeatures::IsSupported(MODULO)) {
+        __ moduw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      } else {
+        ASSEMBLE_MODULO(divwu, mullw);
+      }
       break;
 #if V8_TARGET_ARCH_PPC64
     case kPPC_ModU64:
-      ASSEMBLE_MODULO(divdu, mulld);
+      if (CpuFeatures::IsSupported(MODULO)) {
+        __ modud(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      } else {
+        ASSEMBLE_MODULO(divdu, mulld);
+      }
       break;
 #endif
     case kPPC_ModDouble:
@@ -1984,6 +2015,84 @@
   if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
 }
 
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+                                     FlagsCondition condition) {
+  class OutOfLineTrap final : public OutOfLineCode {
+   public:
+    OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+        : OutOfLineCode(gen),
+          frame_elided_(frame_elided),
+          instr_(instr),
+          gen_(gen) {}
+
+    void Generate() final {
+      PPCOperandConverter i(gen_, instr_);
+
+      Builtins::Name trap_id =
+          static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
+      bool old_has_frame = __ has_frame();
+      if (frame_elided_) {
+        __ set_has_frame(true);
+        __ EnterFrame(StackFrame::WASM_COMPILED, true);
+      }
+      GenerateCallToTrap(trap_id);
+      if (frame_elided_) {
+        __ set_has_frame(old_has_frame);
+      }
+    }
+
+   private:
+    void GenerateCallToTrap(Builtins::Name trap_id) {
+      if (trap_id == Builtins::builtin_count) {
+        // We cannot test calls to the runtime in cctest/test-run-wasm.
+        // Therefore we emit a call to C here instead of a call to the runtime.
+        // We use the context register as the scratch register, because we do
+        // not have a context here.
+        __ PrepareCallCFunction(0, 0, cp);
+        __ CallCFunction(
+            ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+            0);
+        __ LeaveFrame(StackFrame::WASM_COMPILED);
+        __ Ret();
+      } else {
+        gen_->AssembleSourcePosition(instr_);
+        __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+                RelocInfo::CODE_TARGET);
+        ReferenceMap* reference_map =
+            new (gen_->zone()) ReferenceMap(gen_->zone());
+        gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+                              Safepoint::kNoLazyDeopt);
+        if (FLAG_debug_code) {
+          __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+        }
+      }
+    }
+
+    bool frame_elided_;
+    Instruction* instr_;
+    CodeGenerator* gen_;
+  };
+  bool frame_elided = !frame_access_state()->has_frame();
+  auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+  Label* tlabel = ool->entry();
+  Label end;
+
+  ArchOpcode op = instr->arch_opcode();
+  CRegister cr = cr0;
+  Condition cond = FlagsConditionToCondition(condition, op);
+  if (op == kPPC_CmpDouble) {
+    // check for unordered if necessary
+    if (cond == le) {
+      __ bunordered(&end, cr);
+      // Unnecessary for eq/lt since only FU bit will be set.
+    } else if (cond == gt) {
+      __ bunordered(tlabel, cr);
+      // Unnecessary for ne/ge since only FU bit will be set.
+    }
+  }
+  __ b(cond, tlabel, cr);
+  __ bind(&end);
+}
 
 // Assembles boolean materializations after an instruction.
 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -2072,16 +2181,19 @@
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, Deoptimizer::BailoutType bailout_type,
-    SourcePosition pos) {
+    int deoptimization_id, SourcePosition pos) {
+  DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
+  DeoptimizeReason deoptimization_reason =
+      GetDeoptimizationReason(deoptimization_id);
+  Deoptimizer::BailoutType bailout_type =
+      deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
+                                                   : Deoptimizer::EAGER;
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   // TODO(turbofan): We should be able to generate better code by sharing the
   // actual final call site and just bl'ing to it here, similar to what we do
   // in the lithium backend.
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
-  DeoptimizeReason deoptimization_reason =
-      GetDeoptimizationReason(deoptimization_id);
   __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
@@ -2257,11 +2369,9 @@
       switch (src.type()) {
         case Constant::kInt32:
 #if V8_TARGET_ARCH_PPC64
-          if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+          if (RelocInfo::IsWasmSizeReference(src.rmode())) {
 #else
-          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
-              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
-              src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+          if (RelocInfo::IsWasmReference(src.rmode())) {
 #endif
             __ mov(dst, Operand(src.ToInt32(), src.rmode()));
           } else {
@@ -2270,11 +2380,10 @@
           break;
         case Constant::kInt64:
 #if V8_TARGET_ARCH_PPC64
-          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
-              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+          if (RelocInfo::IsWasmPtrReference(src.rmode())) {
             __ mov(dst, Operand(src.ToInt64(), src.rmode()));
           } else {
-            DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+            DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
 #endif
             __ mov(dst, Operand(src.ToInt64()));
 #if V8_TARGET_ARCH_PPC64
@@ -2313,8 +2422,23 @@
       DoubleRegister dst = destination->IsFPRegister()
                                ? g.ToDoubleRegister(destination)
                                : kScratchDoubleReg;
-      double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
-                                                        : src.ToFloat64();
+      double value;
+// bit_cast of snan is converted to qnan on ia32/x64
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+      intptr_t valueInt = (src.type() == Constant::kFloat32)
+                              ? src.ToFloat32AsInt()
+                              : src.ToFloat64AsInt();
+      if (valueInt == ((src.type() == Constant::kFloat32)
+                           ? 0x7fa00000
+                           : 0x7fa0000000000000)) {
+        value = bit_cast<double, int64_t>(0x7ff4000000000000L);
+      } else {
+#endif
+        value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
+                                                   : src.ToFloat64();
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+      }
+#endif
       __ LoadDoubleLiteral(dst, value, kScratchReg);
       if (destination->IsFPStackSlot()) {
         __ StoreDouble(dst, g.ToMemOperand(destination), r0);
diff --git a/src/compiler/ppc/instruction-codes-ppc.h b/src/compiler/ppc/instruction-codes-ppc.h
index 9198bcb..f68ab3a 100644
--- a/src/compiler/ppc/instruction-codes-ppc.h
+++ b/src/compiler/ppc/instruction-codes-ppc.h
@@ -33,7 +33,8 @@
   V(PPC_RotLeftAndClear64)         \
   V(PPC_RotLeftAndClearLeft64)     \
   V(PPC_RotLeftAndClearRight64)    \
-  V(PPC_Add)                       \
+  V(PPC_Add32)                     \
+  V(PPC_Add64)                     \
   V(PPC_AddWithOverflow32)         \
   V(PPC_AddPair)                   \
   V(PPC_AddDouble)                 \
@@ -42,7 +43,7 @@
   V(PPC_SubPair)                   \
   V(PPC_SubDouble)                 \
   V(PPC_Mul32)                     \
-  V(PPC_Mul32WithHigh32)          \
+  V(PPC_Mul32WithHigh32)           \
   V(PPC_Mul64)                     \
   V(PPC_MulHigh32)                 \
   V(PPC_MulHighU32)                \
diff --git a/src/compiler/ppc/instruction-scheduler-ppc.cc b/src/compiler/ppc/instruction-scheduler-ppc.cc
index dee8494..640a7e4 100644
--- a/src/compiler/ppc/instruction-scheduler-ppc.cc
+++ b/src/compiler/ppc/instruction-scheduler-ppc.cc
@@ -35,7 +35,8 @@
     case kPPC_RotLeftAndClear64:
     case kPPC_RotLeftAndClearLeft64:
     case kPPC_RotLeftAndClearRight64:
-    case kPPC_Add:
+    case kPPC_Add32:
+    case kPPC_Add64:
     case kPPC_AddWithOverflow32:
     case kPPC_AddPair:
     case kPPC_AddDouble:
diff --git a/src/compiler/ppc/instruction-selector-ppc.cc b/src/compiler/ppc/instruction-selector-ppc.cc
index 768b188..c2770b3 100644
--- a/src/compiler/ppc/instruction-selector-ppc.cc
+++ b/src/compiler/ppc/instruction-selector-ppc.cc
@@ -154,7 +154,10 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->reason(), cont->frame_state());
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else if (cont->IsTrap()) {
+    inputs[input_count++] = g.UseImmediate(cont->trap_id());
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -213,6 +216,9 @@
     case MachineRepresentation::kWord64:  // Fall through.
 #endif
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -322,6 +328,9 @@
       case MachineRepresentation::kWord64:  // Fall through.
 #endif
       case MachineRepresentation::kSimd128:  // Fall through.
+      case MachineRepresentation::kSimd1x4:  // Fall through.
+      case MachineRepresentation::kSimd1x8:  // Fall through.
+      case MachineRepresentation::kSimd1x16:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -339,6 +348,11 @@
   }
 }
 
+void InstructionSelector::VisitProtectedStore(Node* node) {
+  // TODO(eholk)
+  UNIMPLEMENTED();
+}
+
 // Architecture supports unaligned access, therefore VisitLoad is used instead
 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
 
@@ -381,6 +395,9 @@
     case MachineRepresentation::kWord64:  // Fall through.
 #endif
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -429,6 +446,9 @@
     case MachineRepresentation::kWord64:  // Fall through.
 #endif
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -836,7 +856,7 @@
 }
 
 void InstructionSelector::VisitInt32PairAdd(Node* node) {
-  VisitPairBinop(this, kPPC_AddPair, kPPC_Add, node);
+  VisitPairBinop(this, kPPC_AddPair, kPPC_Add32, node);
 }
 
 void InstructionSelector::VisitInt32PairSub(Node* node) {
@@ -1013,13 +1033,13 @@
 void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
 
 void InstructionSelector::VisitInt32Add(Node* node) {
-  VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add, kInt16Imm);
+  VisitBinop<Int32BinopMatcher>(this, node, kPPC_Add32, kInt16Imm);
 }
 
 
 #if V8_TARGET_ARCH_PPC64
 void InstructionSelector::VisitInt64Add(Node* node) {
-  VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add, kInt16Imm);
+  VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64, kInt16Imm);
 }
 #endif
 
@@ -1481,11 +1501,11 @@
 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
-    return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add, kInt16Imm,
+    return VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64, kInt16Imm,
                                          &cont);
   }
   FlagsContinuation cont;
-  VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add, kInt16Imm, &cont);
+  VisitBinop<Int64BinopMatcher>(this, node, kPPC_Add64, kInt16Imm, &cont);
 }
 
 
@@ -1528,11 +1548,14 @@
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
-    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
-                             cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
+                             cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(opcode, g.NoOutput(), left, right,
+                   g.UseImmediate(cont->trap_id()));
   }
 }
 
@@ -1693,7 +1716,7 @@
 #if V8_TARGET_ARCH_PPC64
               case IrOpcode::kInt64AddWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kOverflow);
-                return VisitBinop<Int64BinopMatcher>(selector, node, kPPC_Add,
+                return VisitBinop<Int64BinopMatcher>(selector, node, kPPC_Add64,
                                                      kInt16Imm, cont);
               case IrOpcode::kInt64SubWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kOverflow);
@@ -1771,14 +1794,29 @@
 }
 
 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kNotEqual, p.kind(), p.reason(), node->InputAt(1));
   VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
 }
 
 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kEqual, p.kind(), p.reason(), node->InputAt(1));
+  VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+  VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+                                          Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
   VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
 }
 
diff --git a/src/compiler/raw-machine-assembler.cc b/src/compiler/raw-machine-assembler.cc
index 14695c1..0e10177 100644
--- a/src/compiler/raw-machine-assembler.cc
+++ b/src/compiler/raw-machine-assembler.cc
@@ -4,10 +4,10 @@
 
 #include "src/compiler/raw-machine-assembler.h"
 
-#include "src/code-factory.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/pipeline.h"
 #include "src/compiler/scheduler.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -51,12 +51,12 @@
     os << *schedule_;
   }
   schedule_->EnsureCFGWellFormedness();
+  Scheduler::ComputeSpecialRPO(zone(), schedule_);
   schedule_->PropagateDeferredMark();
   if (FLAG_trace_turbo_scheduler) {
     PrintF("--- EDGE SPLIT AND PROPAGATED DEFERRED SCHEDULE ------------\n");
     os << *schedule_;
   }
-  Scheduler::ComputeSpecialRPO(zone(), schedule_);
   // Invalidate RawMachineAssembler.
   Schedule* schedule = schedule_;
   schedule_ = nullptr;
@@ -166,299 +166,39 @@
 
 void RawMachineAssembler::DebugBreak() { AddNode(machine()->DebugBreak()); }
 
+void RawMachineAssembler::Unreachable() {
+  Node* values[] = {UndefinedConstant()};  // Unused.
+  Node* ret = MakeNode(common()->Throw(), 1, values);
+  schedule()->AddThrow(CurrentBlock(), ret);
+  current_block_ = nullptr;
+}
+
 void RawMachineAssembler::Comment(const char* msg) {
   AddNode(machine()->Comment(msg));
 }
 
-Node* RawMachineAssembler::CallN(CallDescriptor* desc, Node* function,
-                                 Node** args) {
-  int param_count = static_cast<int>(desc->ParameterCount());
-  int input_count = param_count + 1;
-  Node** buffer = zone()->NewArray<Node*>(input_count);
-  int index = 0;
-  buffer[index++] = function;
-  for (int i = 0; i < param_count; i++) {
-    buffer[index++] = args[i];
-  }
-  return AddNode(common()->Call(desc), input_count, buffer);
+Node* RawMachineAssembler::CallN(CallDescriptor* desc, int input_count,
+                                 Node* const* inputs) {
+  DCHECK(!desc->NeedsFrameState());
+  // +1 is for target.
+  DCHECK_EQ(input_count, desc->ParameterCount() + 1);
+  return AddNode(common()->Call(desc), input_count, inputs);
 }
 
-
 Node* RawMachineAssembler::CallNWithFrameState(CallDescriptor* desc,
-                                               Node* function, Node** args,
-                                               Node* frame_state) {
+                                               int input_count,
+                                               Node* const* inputs) {
   DCHECK(desc->NeedsFrameState());
-  int param_count = static_cast<int>(desc->ParameterCount());
-  int input_count = param_count + 2;
-  Node** buffer = zone()->NewArray<Node*>(input_count);
-  int index = 0;
-  buffer[index++] = function;
-  for (int i = 0; i < param_count; i++) {
-    buffer[index++] = args[i];
-  }
-  buffer[index++] = frame_state;
-  return AddNode(common()->Call(desc), input_count, buffer);
+  // +2 is for target and frame state.
+  DCHECK_EQ(input_count, desc->ParameterCount() + 2);
+  return AddNode(common()->Call(desc), input_count, inputs);
 }
 
-Node* RawMachineAssembler::CallRuntime0(Runtime::FunctionId function,
-                                        Node* context) {
-  CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
-      zone(), function, 0, Operator::kNoProperties, CallDescriptor::kNoFlags);
-  int return_count = static_cast<int>(descriptor->ReturnCount());
-
-  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
-  Node* ref = AddNode(
-      common()->ExternalConstant(ExternalReference(function, isolate())));
-  Node* arity = Int32Constant(0);
-
-  return AddNode(common()->Call(descriptor), centry, ref, arity, context);
-}
-
-Node* RawMachineAssembler::CallRuntime1(Runtime::FunctionId function,
-                                        Node* arg1, Node* context) {
-  CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
-      zone(), function, 1, Operator::kNoProperties, CallDescriptor::kNoFlags);
-  int return_count = static_cast<int>(descriptor->ReturnCount());
-
-  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
-  Node* ref = AddNode(
-      common()->ExternalConstant(ExternalReference(function, isolate())));
-  Node* arity = Int32Constant(1);
-
-  return AddNode(common()->Call(descriptor), centry, arg1, ref, arity, context);
-}
-
-
-Node* RawMachineAssembler::CallRuntime2(Runtime::FunctionId function,
-                                        Node* arg1, Node* arg2, Node* context) {
-  CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
-      zone(), function, 2, Operator::kNoProperties, CallDescriptor::kNoFlags);
-  int return_count = static_cast<int>(descriptor->ReturnCount());
-
-  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
-  Node* ref = AddNode(
-      common()->ExternalConstant(ExternalReference(function, isolate())));
-  Node* arity = Int32Constant(2);
-
-  return AddNode(common()->Call(descriptor), centry, arg1, arg2, ref, arity,
-                 context);
-}
-
-Node* RawMachineAssembler::CallRuntime3(Runtime::FunctionId function,
-                                        Node* arg1, Node* arg2, Node* arg3,
-                                        Node* context) {
-  CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
-      zone(), function, 3, Operator::kNoProperties, CallDescriptor::kNoFlags);
-  int return_count = static_cast<int>(descriptor->ReturnCount());
-
-  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
-  Node* ref = AddNode(
-      common()->ExternalConstant(ExternalReference(function, isolate())));
-  Node* arity = Int32Constant(3);
-
-  return AddNode(common()->Call(descriptor), centry, arg1, arg2, arg3, ref,
-                 arity, context);
-}
-
-Node* RawMachineAssembler::CallRuntime4(Runtime::FunctionId function,
-                                        Node* arg1, Node* arg2, Node* arg3,
-                                        Node* arg4, Node* context) {
-  CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
-      zone(), function, 4, Operator::kNoProperties, CallDescriptor::kNoFlags);
-  int return_count = static_cast<int>(descriptor->ReturnCount());
-
-  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
-  Node* ref = AddNode(
-      common()->ExternalConstant(ExternalReference(function, isolate())));
-  Node* arity = Int32Constant(4);
-
-  return AddNode(common()->Call(descriptor), centry, arg1, arg2, arg3, arg4,
-                 ref, arity, context);
-}
-
-Node* RawMachineAssembler::CallRuntime5(Runtime::FunctionId function,
-                                        Node* arg1, Node* arg2, Node* arg3,
-                                        Node* arg4, Node* arg5, Node* context) {
-  CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
-      zone(), function, 5, Operator::kNoProperties, CallDescriptor::kNoFlags);
-  int return_count = static_cast<int>(descriptor->ReturnCount());
-
-  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
-  Node* ref = AddNode(
-      common()->ExternalConstant(ExternalReference(function, isolate())));
-  Node* arity = Int32Constant(5);
-
-  return AddNode(common()->Call(descriptor), centry, arg1, arg2, arg3, arg4,
-                 arg5, ref, arity, context);
-}
-
-Node* RawMachineAssembler::TailCallN(CallDescriptor* desc, Node* function,
-                                     Node** args) {
-  int param_count = static_cast<int>(desc->ParameterCount());
-  int input_count = param_count + 1;
-  Node** buffer = zone()->NewArray<Node*>(input_count);
-  int index = 0;
-  buffer[index++] = function;
-  for (int i = 0; i < param_count; i++) {
-    buffer[index++] = args[i];
-  }
-  Node* tail_call = MakeNode(common()->TailCall(desc), input_count, buffer);
-  schedule()->AddTailCall(CurrentBlock(), tail_call);
-  current_block_ = nullptr;
-  return tail_call;
-}
-
-Node* RawMachineAssembler::TailCallRuntime0(Runtime::FunctionId function,
-                                            Node* context) {
-  const int kArity = 0;
-  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
-      zone(), function, kArity, Operator::kNoProperties,
-      CallDescriptor::kSupportsTailCalls);
-  int return_count = static_cast<int>(desc->ReturnCount());
-
-  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
-  Node* ref = AddNode(
-      common()->ExternalConstant(ExternalReference(function, isolate())));
-  Node* arity = Int32Constant(kArity);
-
-  Node* nodes[] = {centry, ref, arity, context};
-  Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
-  schedule()->AddTailCall(CurrentBlock(), tail_call);
-  current_block_ = nullptr;
-  return tail_call;
-}
-
-Node* RawMachineAssembler::TailCallRuntime1(Runtime::FunctionId function,
-                                            Node* arg1, Node* context) {
-  const int kArity = 1;
-  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
-      zone(), function, kArity, Operator::kNoProperties,
-      CallDescriptor::kSupportsTailCalls);
-  int return_count = static_cast<int>(desc->ReturnCount());
-
-  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
-  Node* ref = AddNode(
-      common()->ExternalConstant(ExternalReference(function, isolate())));
-  Node* arity = Int32Constant(kArity);
-
-  Node* nodes[] = {centry, arg1, ref, arity, context};
-  Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
-  schedule()->AddTailCall(CurrentBlock(), tail_call);
-  current_block_ = nullptr;
-  return tail_call;
-}
-
-
-Node* RawMachineAssembler::TailCallRuntime2(Runtime::FunctionId function,
-                                            Node* arg1, Node* arg2,
-                                            Node* context) {
-  const int kArity = 2;
-  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
-      zone(), function, kArity, Operator::kNoProperties,
-      CallDescriptor::kSupportsTailCalls);
-  int return_count = static_cast<int>(desc->ReturnCount());
-
-  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
-  Node* ref = AddNode(
-      common()->ExternalConstant(ExternalReference(function, isolate())));
-  Node* arity = Int32Constant(kArity);
-
-  Node* nodes[] = {centry, arg1, arg2, ref, arity, context};
-  Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
-  schedule()->AddTailCall(CurrentBlock(), tail_call);
-  current_block_ = nullptr;
-  return tail_call;
-}
-
-Node* RawMachineAssembler::TailCallRuntime3(Runtime::FunctionId function,
-                                            Node* arg1, Node* arg2, Node* arg3,
-                                            Node* context) {
-  const int kArity = 3;
-  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
-      zone(), function, kArity, Operator::kNoProperties,
-      CallDescriptor::kSupportsTailCalls);
-  int return_count = static_cast<int>(desc->ReturnCount());
-
-  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
-  Node* ref = AddNode(
-      common()->ExternalConstant(ExternalReference(function, isolate())));
-  Node* arity = Int32Constant(kArity);
-
-  Node* nodes[] = {centry, arg1, arg2, arg3, ref, arity, context};
-  Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
-  schedule()->AddTailCall(CurrentBlock(), tail_call);
-  current_block_ = nullptr;
-  return tail_call;
-}
-
-Node* RawMachineAssembler::TailCallRuntime4(Runtime::FunctionId function,
-                                            Node* arg1, Node* arg2, Node* arg3,
-                                            Node* arg4, Node* context) {
-  const int kArity = 4;
-  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
-      zone(), function, kArity, Operator::kNoProperties,
-      CallDescriptor::kSupportsTailCalls);
-  int return_count = static_cast<int>(desc->ReturnCount());
-
-  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
-  Node* ref = AddNode(
-      common()->ExternalConstant(ExternalReference(function, isolate())));
-  Node* arity = Int32Constant(kArity);
-
-  Node* nodes[] = {centry, arg1, arg2, arg3, arg4, ref, arity, context};
-  Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
-  schedule()->AddTailCall(CurrentBlock(), tail_call);
-  current_block_ = nullptr;
-  return tail_call;
-}
-
-Node* RawMachineAssembler::TailCallRuntime5(Runtime::FunctionId function,
-                                            Node* arg1, Node* arg2, Node* arg3,
-                                            Node* arg4, Node* arg5,
-                                            Node* context) {
-  const int kArity = 5;
-  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
-      zone(), function, kArity, Operator::kNoProperties,
-      CallDescriptor::kSupportsTailCalls);
-  int return_count = static_cast<int>(desc->ReturnCount());
-
-  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
-  Node* ref = AddNode(
-      common()->ExternalConstant(ExternalReference(function, isolate())));
-  Node* arity = Int32Constant(kArity);
-
-  Node* nodes[] = {centry, arg1, arg2, arg3, arg4, arg5, ref, arity, context};
-  Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
-  schedule()->AddTailCall(CurrentBlock(), tail_call);
-  current_block_ = nullptr;
-  return tail_call;
-}
-
-Node* RawMachineAssembler::TailCallRuntime6(Runtime::FunctionId function,
-                                            Node* arg1, Node* arg2, Node* arg3,
-                                            Node* arg4, Node* arg5, Node* arg6,
-                                            Node* context) {
-  const int kArity = 6;
-  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
-      zone(), function, kArity, Operator::kNoProperties,
-      CallDescriptor::kSupportsTailCalls);
-  int return_count = static_cast<int>(desc->ReturnCount());
-
-  Node* centry = HeapConstant(CEntryStub(isolate(), return_count).GetCode());
-  Node* ref = AddNode(
-      common()->ExternalConstant(ExternalReference(function, isolate())));
-  Node* arity = Int32Constant(kArity);
-
-  Node* nodes[] = {centry, arg1, arg2, arg3,  arg4,
-                   arg5,   arg6, ref,  arity, context};
-  Node* tail_call = MakeNode(common()->TailCall(desc), arraysize(nodes), nodes);
-
+Node* RawMachineAssembler::TailCallN(CallDescriptor* desc, int input_count,
+                                     Node* const* inputs) {
+  // +1 is for target.
+  DCHECK_EQ(input_count, desc->ParameterCount() + 1);
+  Node* tail_call = MakeNode(common()->TailCall(desc), input_count, inputs);
   schedule()->AddTailCall(CurrentBlock(), tail_call);
   current_block_ = nullptr;
   return tail_call;
@@ -502,6 +242,21 @@
   return AddNode(common()->Call(descriptor), function, arg0, arg1);
 }
 
+Node* RawMachineAssembler::CallCFunction3(MachineType return_type,
+                                          MachineType arg0_type,
+                                          MachineType arg1_type,
+                                          MachineType arg2_type, Node* function,
+                                          Node* arg0, Node* arg1, Node* arg2) {
+  MachineSignature::Builder builder(zone(), 1, 3);
+  builder.AddReturn(return_type);
+  builder.AddParam(arg0_type);
+  builder.AddParam(arg1_type);
+  builder.AddParam(arg2_type);
+  const CallDescriptor* descriptor =
+      Linkage::GetSimplifiedCDescriptor(zone(), builder.Build());
+
+  return AddNode(common()->Call(descriptor), function, arg0, arg1, arg2);
+}
 
 Node* RawMachineAssembler::CallCFunction8(
     MachineType return_type, MachineType arg0_type, MachineType arg1_type,
@@ -584,7 +339,11 @@
   return graph()->NewNodeUnchecked(op, input_count, inputs);
 }
 
-RawMachineLabel::~RawMachineLabel() { DCHECK(bound_ || !used_); }
+RawMachineLabel::~RawMachineLabel() {
+  // If this DCHECK fails, it means that the label has been bound but it's not
+  // used, or the opposite. This would cause the register allocator to crash.
+  DCHECK_EQ(bound_, used_);
+}
 
 }  // namespace compiler
 }  // namespace internal
diff --git a/src/compiler/raw-machine-assembler.h b/src/compiler/raw-machine-assembler.h
index 6d2accb..d726217 100644
--- a/src/compiler/raw-machine-assembler.h
+++ b/src/compiler/raw-machine-assembler.h
@@ -534,13 +534,21 @@
 
   // Conversions.
   Node* BitcastTaggedToWord(Node* a) {
+#ifdef ENABLE_VERIFY_CSA
     return AddNode(machine()->BitcastTaggedToWord(), a);
+#else
+    return a;
+#endif
   }
   Node* BitcastWordToTagged(Node* a) {
     return AddNode(machine()->BitcastWordToTagged(), a);
   }
   Node* BitcastWordToTaggedSigned(Node* a) {
+#ifdef ENABLE_VERIFY_CSA
     return AddNode(machine()->BitcastWordToTaggedSigned(), a);
+#else
+    return a;
+#endif
   }
   Node* TruncateFloat64ToWord32(Node* a) {
     return AddNode(machine()->TruncateFloat64ToWord32(), a);
@@ -653,6 +661,12 @@
   Node* Float64RoundTiesEven(Node* a) {
     return AddNode(machine()->Float64RoundTiesEven().op(), a);
   }
+  Node* Word32ReverseBytes(Node* a) {
+    return AddNode(machine()->Word32ReverseBytes().op(), a);
+  }
+  Node* Word64ReverseBytes(Node* a) {
+    return AddNode(machine()->Word64ReverseBytes().op(), a);
+  }
 
   // Float64 bit operations.
   Node* Float64ExtractLowWord32(Node* a) {
@@ -701,26 +715,18 @@
   }
 
   // Call a given call descriptor and the given arguments.
-  Node* CallN(CallDescriptor* desc, Node* function, Node** args);
+  // The call target is passed as part of the {inputs} array.
+  Node* CallN(CallDescriptor* desc, int input_count, Node* const* inputs);
+
   // Call a given call descriptor and the given arguments and frame-state.
-  Node* CallNWithFrameState(CallDescriptor* desc, Node* function, Node** args,
-                            Node* frame_state);
-  // Call to a runtime function with zero arguments.
-  Node* CallRuntime0(Runtime::FunctionId function, Node* context);
-  // Call to a runtime function with one arguments.
-  Node* CallRuntime1(Runtime::FunctionId function, Node* arg0, Node* context);
-  // Call to a runtime function with two arguments.
-  Node* CallRuntime2(Runtime::FunctionId function, Node* arg1, Node* arg2,
-                     Node* context);
-  // Call to a runtime function with three arguments.
-  Node* CallRuntime3(Runtime::FunctionId function, Node* arg1, Node* arg2,
-                     Node* arg3, Node* context);
-  // Call to a runtime function with four arguments.
-  Node* CallRuntime4(Runtime::FunctionId function, Node* arg1, Node* arg2,
-                     Node* arg3, Node* arg4, Node* context);
-  // Call to a runtime function with five arguments.
-  Node* CallRuntime5(Runtime::FunctionId function, Node* arg1, Node* arg2,
-                     Node* arg3, Node* arg4, Node* arg5, Node* context);
+  // The call target and frame state are passed as part of the {inputs} array.
+  Node* CallNWithFrameState(CallDescriptor* desc, int input_count,
+                            Node* const* inputs);
+
+  // Tail call a given call descriptor and the given arguments.
+  // The call target is passed as part of the {inputs} array.
+  Node* TailCallN(CallDescriptor* desc, int input_count, Node* const* inputs);
+
   // Call to a C function with zero arguments.
   Node* CallCFunction0(MachineType return_type, Node* function);
   // Call to a C function with one parameter.
@@ -730,6 +736,10 @@
   Node* CallCFunction2(MachineType return_type, MachineType arg0_type,
                        MachineType arg1_type, Node* function, Node* arg0,
                        Node* arg1);
+  // Call to a C function with three arguments.
+  Node* CallCFunction3(MachineType return_type, MachineType arg0_type,
+                       MachineType arg1_type, MachineType arg2_type,
+                       Node* function, Node* arg0, Node* arg1, Node* arg2);
   // Call to a C function with eight arguments.
   Node* CallCFunction8(MachineType return_type, MachineType arg0_type,
                        MachineType arg1_type, MachineType arg2_type,
@@ -739,30 +749,6 @@
                        Node* arg1, Node* arg2, Node* arg3, Node* arg4,
                        Node* arg5, Node* arg6, Node* arg7);
 
-  // Tail call the given call descriptor and the given arguments.
-  Node* TailCallN(CallDescriptor* call_descriptor, Node* function, Node** args);
-  // Tail call to a runtime function with zero arguments.
-  Node* TailCallRuntime0(Runtime::FunctionId function, Node* context);
-  // Tail call to a runtime function with one argument.
-  Node* TailCallRuntime1(Runtime::FunctionId function, Node* arg0,
-                         Node* context);
-  // Tail call to a runtime function with two arguments.
-  Node* TailCallRuntime2(Runtime::FunctionId function, Node* arg1, Node* arg2,
-                         Node* context);
-  // Tail call to a runtime function with three arguments.
-  Node* TailCallRuntime3(Runtime::FunctionId function, Node* arg1, Node* arg2,
-                         Node* arg3, Node* context);
-  // Tail call to a runtime function with four arguments.
-  Node* TailCallRuntime4(Runtime::FunctionId function, Node* arg1, Node* arg2,
-                         Node* arg3, Node* arg4, Node* context);
-  // Tail call to a runtime function with five arguments.
-  Node* TailCallRuntime5(Runtime::FunctionId function, Node* arg1, Node* arg2,
-                         Node* arg3, Node* arg4, Node* arg5, Node* context);
-  // Tail call to a runtime function with six arguments.
-  Node* TailCallRuntime6(Runtime::FunctionId function, Node* arg1, Node* arg2,
-                         Node* arg3, Node* arg4, Node* arg5, Node* arg6,
-                         Node* context);
-
   // ===========================================================================
   // The following utility methods deal with control flow, hence might switch
   // the current basic block or create new basic blocks for labels.
@@ -783,6 +769,7 @@
   void Bind(RawMachineLabel* label);
   void Deoptimize(Node* state);
   void DebugBreak();
+  void Unreachable();
   void Comment(const char* msg);
 
   // Add success / exception successor blocks and ends the current block ending
diff --git a/src/compiler/redundancy-elimination.cc b/src/compiler/redundancy-elimination.cc
index 6dcf2bf..38feb8b 100644
--- a/src/compiler/redundancy-elimination.cc
+++ b/src/compiler/redundancy-elimination.cc
@@ -16,12 +16,15 @@
 RedundancyElimination::~RedundancyElimination() {}
 
 Reduction RedundancyElimination::Reduce(Node* node) {
+  if (node_checks_.Get(node)) return NoChange();
   switch (node->opcode()) {
     case IrOpcode::kCheckBounds:
     case IrOpcode::kCheckFloat64Hole:
     case IrOpcode::kCheckHeapObject:
     case IrOpcode::kCheckIf:
+    case IrOpcode::kCheckInternalizedString:
     case IrOpcode::kCheckNumber:
+    case IrOpcode::kCheckReceiver:
     case IrOpcode::kCheckSmi:
     case IrOpcode::kCheckString:
     case IrOpcode::kCheckTaggedHole:
@@ -36,6 +39,11 @@
     case IrOpcode::kCheckedTaggedToInt32:
     case IrOpcode::kCheckedUint32ToInt32:
       return ReduceCheckNode(node);
+    case IrOpcode::kSpeculativeNumberAdd:
+    case IrOpcode::kSpeculativeNumberSubtract:
+      // For increments and decrements by a constant, try to learn from the last
+      // bounds check.
+      return TryReuseBoundsCheckForFirstInput(node);
     case IrOpcode::kEffectPhi:
       return ReduceEffectPhi(node);
     case IrOpcode::kDead:
@@ -114,7 +122,14 @@
 namespace {
 
 bool IsCompatibleCheck(Node const* a, Node const* b) {
-  if (a->op() != b->op()) return false;
+  if (a->op() != b->op()) {
+    if (a->opcode() == IrOpcode::kCheckInternalizedString &&
+        b->opcode() == IrOpcode::kCheckString) {
+      // CheckInternalizedString(node) implies CheckString(node)
+    } else {
+      return false;
+    }
+  }
   for (int i = a->op()->ValueInputCount(); --i >= 0;) {
     if (a->InputAt(i) != b->InputAt(i)) return false;
   }
@@ -133,6 +148,17 @@
   return nullptr;
 }
 
+Node* RedundancyElimination::EffectPathChecks::LookupBoundsCheckFor(
+    Node* node) const {
+  for (Check const* check = head_; check != nullptr; check = check->next) {
+    if (check->node->opcode() == IrOpcode::kCheckBounds &&
+        check->node->InputAt(0) == node) {
+      return check->node;
+    }
+  }
+  return nullptr;
+}
+
 RedundancyElimination::EffectPathChecks const*
 RedundancyElimination::PathChecksForEffectNodes::Get(Node* node) const {
   size_t const id = node->id();
@@ -158,10 +184,41 @@
     ReplaceWithValue(node, check);
     return Replace(check);
   }
+
   // Learn from this check.
   return UpdateChecks(node, checks->AddCheck(zone(), node));
 }
 
+Reduction RedundancyElimination::TryReuseBoundsCheckForFirstInput(Node* node) {
+  DCHECK(node->opcode() == IrOpcode::kSpeculativeNumberAdd ||
+         node->opcode() == IrOpcode::kSpeculativeNumberSubtract);
+
+  DCHECK_EQ(1, node->op()->EffectInputCount());
+  DCHECK_EQ(1, node->op()->EffectOutputCount());
+
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  EffectPathChecks const* checks = node_checks_.Get(effect);
+
+  // If we do not know anything about the predecessor, do not propagate just yet
+  // because we will have to recompute anyway once we compute the predecessor.
+  if (checks == nullptr) return NoChange();
+
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  // Only use bounds checks for increments/decrements by a constant.
+  if (right->opcode() == IrOpcode::kNumberConstant) {
+    if (Node* bounds_check = checks->LookupBoundsCheckFor(left)) {
+      // Only use the bounds checked type if it is better.
+      if (NodeProperties::GetType(bounds_check)
+              ->Is(NodeProperties::GetType(left))) {
+        node->ReplaceInput(0, bounds_check);
+      }
+    }
+  }
+
+  return UpdateChecks(node, checks);
+}
+
 Reduction RedundancyElimination::ReduceEffectPhi(Node* node) {
   Node* const control = NodeProperties::GetControlInput(node);
   if (control->opcode() == IrOpcode::kLoop) {
diff --git a/src/compiler/redundancy-elimination.h b/src/compiler/redundancy-elimination.h
index 88f9032..786c960 100644
--- a/src/compiler/redundancy-elimination.h
+++ b/src/compiler/redundancy-elimination.h
@@ -34,6 +34,7 @@
 
     EffectPathChecks const* AddCheck(Zone* zone, Node* node) const;
     Node* LookupCheck(Node* node) const;
+    Node* LookupBoundsCheckFor(Node* node) const;
 
    private:
     EffectPathChecks(Check* head, size_t size) : head_(head), size_(size) {}
@@ -62,6 +63,8 @@
   Reduction TakeChecksFromFirstEffect(Node* node);
   Reduction UpdateChecks(Node* node, EffectPathChecks const* checks);
 
+  Reduction TryReuseBoundsCheckForFirstInput(Node* node);
+
   Zone* zone() const { return zone_; }
 
   PathChecksForEffectNodes node_checks_;
diff --git a/src/compiler/register-allocator-verifier.cc b/src/compiler/register-allocator-verifier.cc
index cefd04a..d589a9d 100644
--- a/src/compiler/register-allocator-verifier.cc
+++ b/src/compiler/register-allocator-verifier.cc
@@ -2,9 +2,11 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/compiler/register-allocator-verifier.h"
+
 #include "src/bit-vector.h"
 #include "src/compiler/instruction.h"
-#include "src/compiler/register-allocator-verifier.h"
+#include "src/ostreams.h"
 
 namespace v8 {
 namespace internal {
@@ -300,6 +302,27 @@
   }
 }
 
+void BlockAssessments::Print() const {
+  OFStream os(stdout);
+  for (const auto pair : map()) {
+    const InstructionOperand op = pair.first;
+    const Assessment* assessment = pair.second;
+    // Use operator<< so we can write the assessment on the same
+    // line. Since we need a register configuration, just pick
+    // Turbofan for now.
+    PrintableInstructionOperand wrapper = {RegisterConfiguration::Turbofan(),
+                                           op};
+    os << wrapper << " : ";
+    if (assessment->kind() == AssessmentKind::Final) {
+      os << "v" << FinalAssessment::cast(assessment)->virtual_register();
+    } else {
+      os << "P";
+    }
+    os << std::endl;
+  }
+  os << std::endl;
+}
+
 BlockAssessments* RegisterAllocatorVerifier::CreateForBlock(
     const InstructionBlock* block) {
   RpoNumber current_block_id = block->rpo_number();
@@ -352,8 +375,9 @@
   // for the original operand (the one where the assessment was created for
   // first) are also pending. To avoid recursion, we use a work list. To
   // deal with cycles, we keep a set of seen nodes.
-  ZoneQueue<std::pair<const PendingAssessment*, int>> worklist(zone());
-  ZoneSet<RpoNumber> seen(zone());
+  Zone local_zone(zone()->allocator(), ZONE_NAME);
+  ZoneQueue<std::pair<const PendingAssessment*, int>> worklist(&local_zone);
+  ZoneSet<RpoNumber> seen(&local_zone);
   worklist.push(std::make_pair(assessment, virtual_register));
   seen.insert(block_id);
 
@@ -448,7 +472,11 @@
   // is virtual_register.
   const PendingAssessment* old = assessment->original_pending_assessment();
   CHECK_NOT_NULL(old);
-  ValidatePendingAssessment(block_id, op, current_assessments, old,
+  RpoNumber old_block = old->origin()->rpo_number();
+  DCHECK_LE(old_block, block_id);
+  BlockAssessments* old_block_assessments =
+      old_block == block_id ? current_assessments : assessments_[old_block];
+  ValidatePendingAssessment(old_block, op, old_block_assessments, old,
                             virtual_register);
 }
 
diff --git a/src/compiler/register-allocator-verifier.h b/src/compiler/register-allocator-verifier.h
index 9a605d6..989589e 100644
--- a/src/compiler/register-allocator-verifier.h
+++ b/src/compiler/register-allocator-verifier.h
@@ -5,13 +5,14 @@
 #ifndef V8_REGISTER_ALLOCATOR_VERIFIER_H_
 #define V8_REGISTER_ALLOCATOR_VERIFIER_H_
 
+#include "src/compiler/instruction.h"
 #include "src/zone/zone-containers.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
-class InstructionOperand;
+class InstructionBlock;
 class InstructionSequence;
 
 // The register allocator validator traverses instructions in the instruction
diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc
index 0ed479f..403c344 100644
--- a/src/compiler/register-allocator.cc
+++ b/src/compiler/register-allocator.cc
@@ -86,6 +86,10 @@
       return kDoubleSize;
     case MachineRepresentation::kSimd128:
       return kSimd128Size;
+    case MachineRepresentation::kSimd1x4:
+    case MachineRepresentation::kSimd1x8:
+    case MachineRepresentation::kSimd1x16:
+      return kSimdMaskRegisters ? kPointerSize : kSimd128Size;
     case MachineRepresentation::kNone:
       break;
   }
@@ -2985,7 +2989,7 @@
     GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
   DCHECK_GE(positions.length(), num_regs);
 
-  for (int i = 0; i < num_regs; i++) {
+  for (int i = 0; i < num_regs; ++i) {
     positions[i] = LifetimePosition::MaxPosition();
   }
 
@@ -3009,9 +3013,17 @@
 
   for (LiveRange* cur_inactive : inactive_live_ranges()) {
     DCHECK(cur_inactive->End() > range->Start());
+    int cur_reg = cur_inactive->assigned_register();
+    // No need to carry out intersections, when this register won't be
+    // interesting to this range anyway.
+    // TODO(mtrofin): extend to aliased ranges, too.
+    if ((kSimpleFPAliasing || !check_fp_aliasing()) &&
+        positions[cur_reg] < range->Start()) {
+      continue;
+    }
+
     LifetimePosition next_intersection = cur_inactive->FirstIntersection(range);
     if (!next_intersection.IsValid()) continue;
-    int cur_reg = cur_inactive->assigned_register();
     if (kSimpleFPAliasing || !check_fp_aliasing()) {
       positions[cur_reg] = Min(positions[cur_reg], next_intersection);
       TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
@@ -3111,8 +3123,9 @@
   const int* codes = allocatable_register_codes();
   MachineRepresentation rep = current->representation();
   if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat32 ||
-                             rep == MachineRepresentation::kSimd128))
+                             rep == MachineRepresentation::kSimd128)) {
     GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
+  }
 
   DCHECK_GE(free_until_pos.length(), num_codes);
 
@@ -3166,6 +3179,9 @@
                              rep == MachineRepresentation::kSimd128))
     GetFPRegisterSet(rep, &num_regs, &num_codes, &codes);
 
+  // use_pos keeps track of positions a register/alias is used at.
+  // block_pos keeps track of positions where a register/alias is blocked
+  // from.
   LifetimePosition use_pos[RegisterConfiguration::kMaxFPRegisters];
   LifetimePosition block_pos[RegisterConfiguration::kMaxFPRegisters];
   for (int i = 0; i < num_regs; i++) {
@@ -3181,6 +3197,8 @@
         block_pos[cur_reg] = use_pos[cur_reg] =
             LifetimePosition::GapFromInstructionIndex(0);
       } else {
+        DCHECK_NE(LifetimePosition::GapFromInstructionIndex(0),
+                  block_pos[cur_reg]);
         use_pos[cur_reg] =
             range->NextLifetimePositionRegisterIsBeneficial(current->Start());
       }
@@ -3196,7 +3214,9 @@
               LifetimePosition::GapFromInstructionIndex(0);
         } else {
           use_pos[aliased_reg] =
-              range->NextLifetimePositionRegisterIsBeneficial(current->Start());
+              Min(block_pos[aliased_reg],
+                  range->NextLifetimePositionRegisterIsBeneficial(
+                      current->Start()));
         }
       }
     }
@@ -3204,10 +3224,23 @@
 
   for (LiveRange* range : inactive_live_ranges()) {
     DCHECK(range->End() > current->Start());
-    LifetimePosition next_intersection = range->FirstIntersection(current);
-    if (!next_intersection.IsValid()) continue;
     int cur_reg = range->assigned_register();
     bool is_fixed = range->TopLevel()->IsFixed();
+
+    // Don't perform costly intersections if they are guaranteed to not update
+    // block_pos or use_pos.
+    // TODO(mtrofin): extend to aliased ranges, too.
+    if ((kSimpleFPAliasing || !check_fp_aliasing())) {
+      if (is_fixed) {
+        if (block_pos[cur_reg] < range->Start()) continue;
+      } else {
+        if (use_pos[cur_reg] < range->Start()) continue;
+      }
+    }
+
+    LifetimePosition next_intersection = range->FirstIntersection(current);
+    if (!next_intersection.IsValid()) continue;
+
     if (kSimpleFPAliasing || !check_fp_aliasing()) {
       if (is_fixed) {
         block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
@@ -3242,19 +3275,18 @@
     }
   }
 
-  LifetimePosition pos = use_pos[reg];
-
-  if (pos < register_use->pos()) {
+  if (use_pos[reg] < register_use->pos()) {
+    // If there is a gap position before the next register use, we can
+    // spill until there. The gap position will then fit the fill move.
     if (LifetimePosition::ExistsGapPositionBetween(current->Start(),
                                                    register_use->pos())) {
       SpillBetween(current, current->Start(), register_use->pos());
-    } else {
-      SetLiveRangeAssignedRegister(current, reg);
-      SplitAndSpillIntersecting(current);
+      return;
     }
-    return;
   }
 
+  // We couldn't spill until the next register use. Split before the register
+  // is blocked, if applicable.
   if (block_pos[reg] < current->End()) {
     // Register becomes blocked before the current range end. Split before that
     // position.
diff --git a/src/compiler/representation-change.cc b/src/compiler/representation-change.cc
index e3e5108..4b4f8c9 100644
--- a/src/compiler/representation-change.cc
+++ b/src/compiler/representation-change.cc
@@ -9,6 +9,8 @@
 #include "src/base/bits.h"
 #include "src/code-factory.h"
 #include "src/compiler/machine-operator.h"
+#include "src/compiler/node-matchers.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -168,9 +170,10 @@
     case MachineRepresentation::kWord64:
       DCHECK(use_info.type_check() == TypeCheckKind::kNone);
       return GetWord64RepresentationFor(node, output_rep, output_type);
-    case MachineRepresentation::kSimd128:  // Fall through.
-      // TODO(bbudge) Handle conversions between tagged and untagged.
-      break;
+    case MachineRepresentation::kSimd128:
+    case MachineRepresentation::kSimd1x4:
+    case MachineRepresentation::kSimd1x8:
+    case MachineRepresentation::kSimd1x16:
     case MachineRepresentation::kNone:
       return node;
   }
@@ -270,9 +273,15 @@
       return TypeError(node, output_rep, output_type,
                        MachineRepresentation::kTaggedSigned);
     }
-  } else if (CanBeTaggedPointer(output_rep) &&
-             use_info.type_check() == TypeCheckKind::kSignedSmall) {
-    op = simplified()->CheckedTaggedToTaggedSigned();
+  } else if (CanBeTaggedPointer(output_rep)) {
+    if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
+      op = simplified()->CheckedTaggedToTaggedSigned();
+    } else if (output_type->Is(Type::SignedSmall())) {
+      op = simplified()->ChangeTaggedToTaggedSigned();
+    } else {
+      return TypeError(node, output_rep, output_type,
+                       MachineRepresentation::kTaggedSigned);
+    }
   } else if (output_rep == MachineRepresentation::kBit &&
              use_info.type_check() == TypeCheckKind::kSignedSmall) {
     // TODO(turbofan): Consider adding a Bailout operator that just deopts.
@@ -307,7 +316,12 @@
     // We just provide a dummy value here.
     return jsgraph()->TheHoleConstant();
   } else if (output_rep == MachineRepresentation::kBit) {
-    return node;
+    if (output_type->Is(Type::Boolean())) {
+      op = simplified()->ChangeBitToTagged();
+    } else {
+      return TypeError(node, output_rep, output_type,
+                       MachineRepresentation::kTagged);
+    }
   } else if (IsWord(output_rep)) {
     if (output_type->Is(Type::Unsigned32())) {
       // uint32 -> float64 -> tagged
@@ -582,33 +596,33 @@
   } else if (output_rep == MachineRepresentation::kBit) {
     return node;  // Sloppy comparison -> word32
   } else if (output_rep == MachineRepresentation::kFloat64) {
-    if (output_type->Is(Type::Unsigned32())) {
-      op = machine()->ChangeFloat64ToUint32();
-    } else if (output_type->Is(Type::Signed32())) {
+    if (output_type->Is(Type::Signed32())) {
       op = machine()->ChangeFloat64ToInt32();
-    } else if (use_info.truncation().IsUsedAsWord32()) {
-      op = machine()->TruncateFloat64ToWord32();
     } else if (use_info.type_check() == TypeCheckKind::kSignedSmall ||
                use_info.type_check() == TypeCheckKind::kSigned32) {
       op = simplified()->CheckedFloat64ToInt32(
           output_type->Maybe(Type::MinusZero())
               ? use_info.minus_zero_check()
               : CheckForMinusZeroMode::kDontCheckForMinusZero);
+    } else if (output_type->Is(Type::Unsigned32())) {
+      op = machine()->ChangeFloat64ToUint32();
+    } else if (use_info.truncation().IsUsedAsWord32()) {
+      op = machine()->TruncateFloat64ToWord32();
     }
   } else if (output_rep == MachineRepresentation::kFloat32) {
     node = InsertChangeFloat32ToFloat64(node);  // float32 -> float64 -> int32
-    if (output_type->Is(Type::Unsigned32())) {
-      op = machine()->ChangeFloat64ToUint32();
-    } else if (output_type->Is(Type::Signed32())) {
+    if (output_type->Is(Type::Signed32())) {
       op = machine()->ChangeFloat64ToInt32();
-    } else if (use_info.truncation().IsUsedAsWord32()) {
-      op = machine()->TruncateFloat64ToWord32();
     } else if (use_info.type_check() == TypeCheckKind::kSignedSmall ||
                use_info.type_check() == TypeCheckKind::kSigned32) {
       op = simplified()->CheckedFloat64ToInt32(
           output_type->Maybe(Type::MinusZero())
               ? CheckForMinusZeroMode::kCheckForMinusZero
               : CheckForMinusZeroMode::kDontCheckForMinusZero);
+    } else if (output_type->Is(Type::Unsigned32())) {
+      op = machine()->ChangeFloat64ToUint32();
+    } else if (use_info.truncation().IsUsedAsWord32()) {
+      op = machine()->TruncateFloat64ToWord32();
     }
   } else if (output_rep == MachineRepresentation::kTaggedSigned) {
     if (output_type->Is(Type::Signed32())) {
@@ -622,16 +636,8 @@
     }
   } else if (output_rep == MachineRepresentation::kTagged ||
              output_rep == MachineRepresentation::kTaggedPointer) {
-    if (output_type->Is(Type::Unsigned32())) {
-      op = simplified()->ChangeTaggedToUint32();
-    } else if (output_type->Is(Type::Signed32())) {
+    if (output_type->Is(Type::Signed32())) {
       op = simplified()->ChangeTaggedToInt32();
-    } else if (use_info.truncation().IsUsedAsWord32()) {
-      if (use_info.type_check() != TypeCheckKind::kNone) {
-        op = simplified()->CheckedTruncateTaggedToWord32();
-      } else {
-        op = simplified()->TruncateTaggedToWord32();
-      }
     } else if (use_info.type_check() == TypeCheckKind::kSignedSmall) {
       op = simplified()->CheckedTaggedSignedToInt32();
     } else if (use_info.type_check() == TypeCheckKind::kSigned32) {
@@ -639,6 +645,14 @@
           output_type->Maybe(Type::MinusZero())
               ? CheckForMinusZeroMode::kCheckForMinusZero
               : CheckForMinusZeroMode::kDontCheckForMinusZero);
+    } else if (output_type->Is(Type::Unsigned32())) {
+      op = simplified()->ChangeTaggedToUint32();
+    } else if (use_info.truncation().IsUsedAsWord32()) {
+      if (output_type->Is(Type::NumberOrOddball())) {
+        op = simplified()->TruncateTaggedToWord32();
+      } else if (use_info.type_check() != TypeCheckKind::kNone) {
+        op = simplified()->CheckedTruncateTaggedToWord32();
+      }
     }
   } else if (output_rep == MachineRepresentation::kWord32) {
     // Only the checked case should get here, the non-checked case is
@@ -689,8 +703,12 @@
   // Eagerly fold representation changes for constants.
   switch (node->opcode()) {
     case IrOpcode::kHeapConstant: {
-      Handle<HeapObject> value = OpParameter<Handle<HeapObject>>(node);
-      return jsgraph()->Int32Constant(value->BooleanValue() ? 1 : 0);
+      HeapObjectMatcher m(node);
+      if (m.Is(factory()->false_value())) {
+        return jsgraph()->Int32Constant(0);
+      } else if (m.Is(factory()->true_value())) {
+        return jsgraph()->Int32Constant(1);
+      }
     }
     default:
       break;
@@ -807,6 +825,24 @@
   }
 }
 
+const Operator* RepresentationChanger::TaggedSignedOperatorFor(
+    IrOpcode::Value opcode) {
+  switch (opcode) {
+    case IrOpcode::kSpeculativeNumberLessThan:
+      return machine()->Is32() ? machine()->Int32LessThan()
+                               : machine()->Int64LessThan();
+    case IrOpcode::kSpeculativeNumberLessThanOrEqual:
+      return machine()->Is32() ? machine()->Int32LessThanOrEqual()
+                               : machine()->Int64LessThanOrEqual();
+    case IrOpcode::kSpeculativeNumberEqual:
+      return machine()->Is32() ? machine()->Word32Equal()
+                               : machine()->Word64Equal();
+    default:
+      UNREACHABLE();
+      return nullptr;
+  }
+}
+
 const Operator* RepresentationChanger::Uint32OperatorFor(
     IrOpcode::Value opcode) {
   switch (opcode) {
diff --git a/src/compiler/representation-change.h b/src/compiler/representation-change.h
index d7895da..4fa7d91 100644
--- a/src/compiler/representation-change.h
+++ b/src/compiler/representation-change.h
@@ -238,6 +238,7 @@
                              UseInfo use_info);
   const Operator* Int32OperatorFor(IrOpcode::Value opcode);
   const Operator* Int32OverflowOperatorFor(IrOpcode::Value opcode);
+  const Operator* TaggedSignedOperatorFor(IrOpcode::Value opcode);
   const Operator* Uint32OperatorFor(IrOpcode::Value opcode);
   const Operator* Uint32OverflowOperatorFor(IrOpcode::Value opcode);
   const Operator* Float64OperatorFor(IrOpcode::Value opcode);
diff --git a/src/compiler/s390/code-generator-s390.cc b/src/compiler/s390/code-generator-s390.cc
index 5dcc82f..8e9db3d 100644
--- a/src/compiler/s390/code-generator-s390.cc
+++ b/src/compiler/s390/code-generator-s390.cc
@@ -119,12 +119,30 @@
     InstructionOperand* op = instr_->InputAt(index);
     return SlotToMemOperand(AllocatedOperand::cast(op)->index());
   }
+
+  MemOperand InputStackSlot32(size_t index) {
+#if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
+    // We want to read the 32-bits directly from memory
+    MemOperand mem = InputStackSlot(index);
+    return MemOperand(mem.rb(), mem.rx(), mem.offset() + 4);
+#else
+    return InputStackSlot(index);
+#endif
+  }
 };
 
+static inline bool HasRegisterOutput(Instruction* instr, int index = 0) {
+  return instr->OutputCount() > 0 && instr->OutputAt(index)->IsRegister();
+}
+
 static inline bool HasRegisterInput(Instruction* instr, int index) {
   return instr->InputAt(index)->IsRegister();
 }
 
+static inline bool HasFPRegisterInput(Instruction* instr, int index) {
+  return instr->InputAt(index)->IsFPRegister();
+}
+
 static inline bool HasImmediateInput(Instruction* instr, size_t index) {
   return instr->InputAt(index)->IsImmediate();
 }
@@ -133,6 +151,10 @@
   return instr->InputAt(index)->IsStackSlot();
 }
 
+static inline bool HasFPStackSlotInput(Instruction* instr, size_t index) {
+  return instr->InputAt(index)->IsFPStackSlot();
+}
+
 namespace {
 
 class OutOfLineLoadNAN32 final : public OutOfLineCode {
@@ -250,17 +272,33 @@
       return eq;
     case kNotEqual:
       return ne;
-    case kSignedLessThan:
     case kUnsignedLessThan:
+      // unsigned number never less than 0
+      if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
+        return CC_NOP;
+    // fall through
+    case kSignedLessThan:
       return lt;
-    case kSignedGreaterThanOrEqual:
     case kUnsignedGreaterThanOrEqual:
+      // unsigned number always greater than or equal 0
+      if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
+        return CC_ALWAYS;
+    // fall through
+    case kSignedGreaterThanOrEqual:
       return ge;
-    case kSignedLessThanOrEqual:
     case kUnsignedLessThanOrEqual:
+      // unsigned number never less than 0
+      if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
+        return CC_EQ;
+    // fall through
+    case kSignedLessThanOrEqual:
       return le;
-    case kSignedGreaterThan:
     case kUnsignedGreaterThan:
+      // unsigned number always greater than or equal 0
+      if (op == kS390_LoadAndTestWord32 || op == kS390_LoadAndTestWord64)
+        return ne;
+    // fall through
+    case kSignedGreaterThan:
       return gt;
     case kOverflow:
       // Overflow checked for AddP/SubP only.
@@ -292,8 +330,176 @@
   return kNoCondition;
 }
 
+typedef void (MacroAssembler::*RRTypeInstr)(Register, Register);
+typedef void (MacroAssembler::*RMTypeInstr)(Register, const MemOperand&);
+typedef void (MacroAssembler::*RITypeInstr)(Register, const Operand&);
+typedef void (MacroAssembler::*RRRTypeInstr)(Register, Register, Register);
+typedef void (MacroAssembler::*RRMTypeInstr)(Register, Register,
+                                             const MemOperand&);
+typedef void (MacroAssembler::*RRITypeInstr)(Register, Register,
+                                             const Operand&);
+
+#define CHECK_AND_ZERO_EXT_OUTPUT(num)                                   \
+  {                                                                      \
+    CHECK(HasImmediateInput(instr, (num)));                              \
+    int doZeroExt = i.InputInt32(num);                                   \
+    if (doZeroExt) masm->LoadlW(i.OutputRegister(), i.OutputRegister()); \
+  }
+
+void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
+                   Instruction* instr, RRTypeInstr rr_instr,
+                   RMTypeInstr rm_instr, RITypeInstr ri_instr) {
+  CHECK(i.OutputRegister().is(i.InputRegister(0)));
+  AddressingMode mode = AddressingModeField::decode(instr->opcode());
+  int zeroExtIndex = 2;
+  if (mode != kMode_None) {
+    size_t first_index = 1;
+    MemOperand operand = i.MemoryOperand(&mode, &first_index);
+    zeroExtIndex = first_index;
+    CHECK(rm_instr != NULL);
+    (masm->*rm_instr)(i.OutputRegister(), operand);
+  } else if (HasRegisterInput(instr, 1)) {
+    (masm->*rr_instr)(i.OutputRegister(), i.InputRegister(1));
+  } else if (HasImmediateInput(instr, 1)) {
+    (masm->*ri_instr)(i.OutputRegister(), i.InputImmediate(1));
+  } else if (HasStackSlotInput(instr, 1)) {
+    (masm->*rm_instr)(i.OutputRegister(), i.InputStackSlot32(1));
+  } else {
+    UNREACHABLE();
+  }
+  CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
+}
+
+void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
+                   Instruction* instr, RRRTypeInstr rrr_instr,
+                   RMTypeInstr rm_instr, RITypeInstr ri_instr) {
+  AddressingMode mode = AddressingModeField::decode(instr->opcode());
+  int zeroExtIndex = 2;
+  if (mode != kMode_None) {
+    CHECK(i.OutputRegister().is(i.InputRegister(0)));
+    size_t first_index = 1;
+    MemOperand operand = i.MemoryOperand(&mode, &first_index);
+    zeroExtIndex = first_index;
+    CHECK(rm_instr != NULL);
+    (masm->*rm_instr)(i.OutputRegister(), operand);
+  } else if (HasRegisterInput(instr, 1)) {
+    (masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
+                       i.InputRegister(1));
+  } else if (HasImmediateInput(instr, 1)) {
+    CHECK(i.OutputRegister().is(i.InputRegister(0)));
+    (masm->*ri_instr)(i.OutputRegister(), i.InputImmediate(1));
+  } else if (HasStackSlotInput(instr, 1)) {
+    CHECK(i.OutputRegister().is(i.InputRegister(0)));
+    (masm->*rm_instr)(i.OutputRegister(), i.InputStackSlot32(1));
+  } else {
+    UNREACHABLE();
+  }
+  CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
+}
+
+void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
+                   Instruction* instr, RRRTypeInstr rrr_instr,
+                   RMTypeInstr rm_instr, RRITypeInstr rri_instr) {
+  AddressingMode mode = AddressingModeField::decode(instr->opcode());
+  int zeroExtIndex = 2;
+  if (mode != kMode_None) {
+    CHECK(i.OutputRegister().is(i.InputRegister(0)));
+    size_t first_index = 1;
+    MemOperand operand = i.MemoryOperand(&mode, &first_index);
+    zeroExtIndex = first_index;
+    CHECK(rm_instr != NULL);
+    (masm->*rm_instr)(i.OutputRegister(), operand);
+  } else if (HasRegisterInput(instr, 1)) {
+    (masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
+                       i.InputRegister(1));
+  } else if (HasImmediateInput(instr, 1)) {
+    (masm->*rri_instr)(i.OutputRegister(), i.InputRegister(0),
+                       i.InputImmediate(1));
+  } else if (HasStackSlotInput(instr, 1)) {
+    CHECK(i.OutputRegister().is(i.InputRegister(0)));
+    (masm->*rm_instr)(i.OutputRegister(), i.InputStackSlot32(1));
+  } else {
+    UNREACHABLE();
+  }
+  CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
+}
+
+void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
+                   Instruction* instr, RRRTypeInstr rrr_instr,
+                   RRMTypeInstr rrm_instr, RRITypeInstr rri_instr) {
+  AddressingMode mode = AddressingModeField::decode(instr->opcode());
+  int zeroExtIndex = 2;
+  if (mode != kMode_None) {
+    size_t first_index = 1;
+    MemOperand operand = i.MemoryOperand(&mode, &first_index);
+    zeroExtIndex = first_index;
+    CHECK(rrm_instr != NULL);
+    (masm->*rrm_instr)(i.OutputRegister(), i.InputRegister(0), operand);
+  } else if (HasRegisterInput(instr, 1)) {
+    (masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
+                       i.InputRegister(1));
+  } else if (HasImmediateInput(instr, 1)) {
+    (masm->*rri_instr)(i.OutputRegister(), i.InputRegister(0),
+                       i.InputImmediate(1));
+  } else if (HasStackSlotInput(instr, 1)) {
+    (masm->*rrm_instr)(i.OutputRegister(), i.InputRegister(0),
+                       i.InputStackSlot32(1));
+  } else {
+    UNREACHABLE();
+  }
+  CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
+}
+
+void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
+                   Instruction* instr, RRRTypeInstr rrr_instr,
+                   RRITypeInstr rri_instr) {
+  AddressingMode mode = AddressingModeField::decode(instr->opcode());
+  CHECK(mode == kMode_None);
+  int zeroExtIndex = 2;
+  if (HasRegisterInput(instr, 1)) {
+    (masm->*rrr_instr)(i.OutputRegister(), i.InputRegister(0),
+                       i.InputRegister(1));
+  } else if (HasImmediateInput(instr, 1)) {
+    (masm->*rri_instr)(i.OutputRegister(), i.InputRegister(0),
+                       i.InputImmediate(1));
+  } else {
+    UNREACHABLE();
+  }
+  CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
+}
+
+void AssembleBinOp(S390OperandConverter& i, MacroAssembler* masm,
+                   Instruction* instr, RRTypeInstr rr_instr,
+                   RITypeInstr ri_instr) {
+  AddressingMode mode = AddressingModeField::decode(instr->opcode());
+  CHECK(mode == kMode_None);
+  CHECK(i.OutputRegister().is(i.InputRegister(0)));
+  int zeroExtIndex = 2;
+  if (HasRegisterInput(instr, 1)) {
+    (masm->*rr_instr)(i.OutputRegister(), i.InputRegister(1));
+  } else if (HasImmediateInput(instr, 1)) {
+    (masm->*ri_instr)(i.OutputRegister(), i.InputImmediate(1));
+  } else {
+    UNREACHABLE();
+  }
+  CHECK_AND_ZERO_EXT_OUTPUT(zeroExtIndex);
+}
+
+#define ASSEMBLE_BIN_OP(instr1, instr2, instr3)            \
+  AssembleBinOp(i, masm(), instr, &MacroAssembler::instr1, \
+                &MacroAssembler::instr2, &MacroAssembler::instr3)
+
+#undef CHECK_AND_ZERO_EXT_OUTPUT
+
 }  // namespace
 
+#define CHECK_AND_ZERO_EXT_OUTPUT(num)                                \
+  {                                                                   \
+    CHECK(HasImmediateInput(instr, (num)));                           \
+    int doZeroExt = i.InputInt32(num);                                \
+    if (doZeroExt) __ LoadlW(i.OutputRegister(), i.OutputRegister()); \
+  }
+
 #define ASSEMBLE_FLOAT_UNOP(asm_instr)                                \
   do {                                                                \
     __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
@@ -318,26 +524,92 @@
     }                                                      \
   } while (0)
 
-#define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr)                 \
-  do {                                                          \
-    if (HasRegisterInput(instr, 1)) {                           \
-      if (i.CompareLogical()) {                                 \
-        __ cmpl_instr(i.InputRegister(0), i.InputRegister(1));  \
-      } else {                                                  \
-        __ cmp_instr(i.InputRegister(0), i.InputRegister(1));   \
-      }                                                         \
-    } else {                                                    \
-      if (i.CompareLogical()) {                                 \
-        __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1)); \
-      } else {                                                  \
-        __ cmp_instr(i.InputRegister(0), i.InputImmediate(1));  \
-      }                                                         \
-    }                                                           \
+#define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr)                         \
+  do {                                                                  \
+    AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
+    if (mode != kMode_None) {                                           \
+      size_t first_index = 1;                                           \
+      MemOperand operand = i.MemoryOperand(&mode, &first_index);        \
+      if (i.CompareLogical()) {                                         \
+        __ cmpl_instr(i.InputRegister(0), operand);                     \
+      } else {                                                          \
+        __ cmp_instr(i.InputRegister(0), operand);                      \
+      }                                                                 \
+    } else if (HasRegisterInput(instr, 1)) {                            \
+      if (i.CompareLogical()) {                                         \
+        __ cmpl_instr(i.InputRegister(0), i.InputRegister(1));          \
+      } else {                                                          \
+        __ cmp_instr(i.InputRegister(0), i.InputRegister(1));           \
+      }                                                                 \
+    } else if (HasImmediateInput(instr, 1)) {                           \
+      if (i.CompareLogical()) {                                         \
+        __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1));         \
+      } else {                                                          \
+        __ cmp_instr(i.InputRegister(0), i.InputImmediate(1));          \
+      }                                                                 \
+    } else {                                                            \
+      DCHECK(HasStackSlotInput(instr, 1));                              \
+      if (i.CompareLogical()) {                                         \
+        __ cmpl_instr(i.InputRegister(0), i.InputStackSlot(1));         \
+      } else {                                                          \
+        __ cmp_instr(i.InputRegister(0), i.InputStackSlot(1));          \
+      }                                                                 \
+    }                                                                   \
   } while (0)
 
-#define ASSEMBLE_FLOAT_COMPARE(cmp_instr)                            \
-  do {                                                               \
-    __ cmp_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1); \
+#define ASSEMBLE_COMPARE32(cmp_instr, cmpl_instr)                       \
+  do {                                                                  \
+    AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
+    if (mode != kMode_None) {                                           \
+      size_t first_index = 1;                                           \
+      MemOperand operand = i.MemoryOperand(&mode, &first_index);        \
+      if (i.CompareLogical()) {                                         \
+        __ cmpl_instr(i.InputRegister(0), operand);                     \
+      } else {                                                          \
+        __ cmp_instr(i.InputRegister(0), operand);                      \
+      }                                                                 \
+    } else if (HasRegisterInput(instr, 1)) {                            \
+      if (i.CompareLogical()) {                                         \
+        __ cmpl_instr(i.InputRegister(0), i.InputRegister(1));          \
+      } else {                                                          \
+        __ cmp_instr(i.InputRegister(0), i.InputRegister(1));           \
+      }                                                                 \
+    } else if (HasImmediateInput(instr, 1)) {                           \
+      if (i.CompareLogical()) {                                         \
+        __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1));         \
+      } else {                                                          \
+        __ cmp_instr(i.InputRegister(0), i.InputImmediate(1));          \
+      }                                                                 \
+    } else {                                                            \
+      DCHECK(HasStackSlotInput(instr, 1));                              \
+      if (i.CompareLogical()) {                                         \
+        __ cmpl_instr(i.InputRegister(0), i.InputStackSlot32(1));       \
+      } else {                                                          \
+        __ cmp_instr(i.InputRegister(0), i.InputStackSlot32(1));        \
+      }                                                                 \
+    }                                                                   \
+  } while (0)
+
+#define ASSEMBLE_FLOAT_COMPARE(cmp_rr_instr, cmp_rm_instr, load_instr)     \
+  do {                                                                     \
+    AddressingMode mode = AddressingModeField::decode(instr->opcode());    \
+    if (mode != kMode_None) {                                              \
+      size_t first_index = 1;                                              \
+      MemOperand operand = i.MemoryOperand(&mode, &first_index);           \
+      __ cmp_rm_instr(i.InputDoubleRegister(0), operand);                  \
+    } else if (HasFPRegisterInput(instr, 1)) {                             \
+      __ cmp_rr_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); \
+    } else {                                                               \
+      USE(HasFPStackSlotInput);                                            \
+      DCHECK(HasFPStackSlotInput(instr, 1));                               \
+      MemOperand operand = i.InputStackSlot(1);                            \
+      if (operand.offset() >= 0) {                                         \
+        __ cmp_rm_instr(i.InputDoubleRegister(0), operand);                \
+      } else {                                                             \
+        __ load_instr(kScratchDoubleReg, operand);                         \
+        __ cmp_rr_instr(i.InputDoubleRegister(0), kScratchDoubleReg);      \
+      }                                                                    \
+    }                                                                      \
   } while (0)
 
 // Divide instruction dr will implicity use register pair
@@ -349,7 +621,7 @@
     __ LoadRR(r0, i.InputRegister(0));          \
     __ shift_instr(r0, Operand(32));            \
     __ div_instr(r0, i.InputRegister(1));       \
-    __ ltr(i.OutputRegister(), r0);             \
+    __ LoadlW(i.OutputRegister(), r0);          \
   } while (0)
 
 #define ASSEMBLE_FLOAT_MODULO()                                               \
@@ -569,6 +841,7 @@
     }                                                                  \
     __ bind(&done);                                                    \
   } while (0)
+//
 // Only MRI mode for these instructions available
 #define ASSEMBLE_LOAD_FLOAT(asm_instr)                \
   do {                                                \
@@ -586,6 +859,38 @@
     __ asm_instr(result, operand);               \
   } while (0)
 
+#define ASSEMBLE_LOADANDTEST64(asm_instr_rr, asm_instr_rm)              \
+  {                                                                     \
+    AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
+    Register dst = HasRegisterOutput(instr) ? i.OutputRegister() : r0;  \
+    if (mode != kMode_None) {                                           \
+      size_t first_index = 0;                                           \
+      MemOperand operand = i.MemoryOperand(&mode, &first_index);        \
+      __ asm_instr_rm(dst, operand);                                    \
+    } else if (HasRegisterInput(instr, 0)) {                            \
+      __ asm_instr_rr(dst, i.InputRegister(0));                         \
+    } else {                                                            \
+      DCHECK(HasStackSlotInput(instr, 0));                              \
+      __ asm_instr_rm(dst, i.InputStackSlot(0));                        \
+    }                                                                   \
+  }
+
+#define ASSEMBLE_LOADANDTEST32(asm_instr_rr, asm_instr_rm)              \
+  {                                                                     \
+    AddressingMode mode = AddressingModeField::decode(instr->opcode()); \
+    Register dst = HasRegisterOutput(instr) ? i.OutputRegister() : r0;  \
+    if (mode != kMode_None) {                                           \
+      size_t first_index = 0;                                           \
+      MemOperand operand = i.MemoryOperand(&mode, &first_index);        \
+      __ asm_instr_rm(dst, operand);                                    \
+    } else if (HasRegisterInput(instr, 0)) {                            \
+      __ asm_instr_rr(dst, i.InputRegister(0));                         \
+    } else {                                                            \
+      DCHECK(HasStackSlotInput(instr, 0));                              \
+      __ asm_instr_rm(dst, i.InputStackSlot32(0));                      \
+    }                                                                   \
+  }
+
 #define ASSEMBLE_STORE_FLOAT32()                         \
   do {                                                   \
     size_t index = 0;                                    \
@@ -729,7 +1034,8 @@
 
   // Check if current frame is an arguments adaptor frame.
   __ LoadP(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
-  __ CmpSmiLiteral(scratch1, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ CmpP(scratch1,
+          Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ bne(&done);
 
   // Load arguments count from current arguments adaptor frame (note, it
@@ -984,10 +1290,8 @@
     case kArchDeoptimize: {
       int deopt_state_id =
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
-      Deoptimizer::BailoutType bailout_type =
-          Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      CodeGenResult result = AssembleDeoptimizerCall(
-          deopt_state_id, bailout_type, current_source_position_);
+      CodeGenResult result =
+          AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
       if (result != kSuccess) return result;
       break;
     }
@@ -1048,35 +1352,43 @@
       break;
     }
     case kS390_And32:
-      ASSEMBLE_BINOP(And);
+      if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+        ASSEMBLE_BIN_OP(nrk, And, nilf);
+      } else {
+        ASSEMBLE_BIN_OP(nr, And, nilf);
+      }
       break;
     case kS390_And64:
       ASSEMBLE_BINOP(AndP);
       break;
     case kS390_Or32:
-      ASSEMBLE_BINOP(Or);
+      if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+        ASSEMBLE_BIN_OP(ork, Or, oilf);
+      } else {
+        ASSEMBLE_BIN_OP(or_z, Or, oilf);
+      }
+      break;
     case kS390_Or64:
       ASSEMBLE_BINOP(OrP);
       break;
     case kS390_Xor32:
-      ASSEMBLE_BINOP(Xor);
+      if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+        ASSEMBLE_BIN_OP(xrk, Xor, xilf);
+      } else {
+        ASSEMBLE_BIN_OP(xr, Xor, xilf);
+      }
       break;
     case kS390_Xor64:
       ASSEMBLE_BINOP(XorP);
       break;
     case kS390_ShiftLeft32:
-      if (HasRegisterInput(instr, 1)) {
-        if (i.OutputRegister().is(i.InputRegister(1)) &&
-            !CpuFeatures::IsSupported(DISTINCT_OPS)) {
-          __ LoadRR(kScratchReg, i.InputRegister(1));
-          __ ShiftLeft(i.OutputRegister(), i.InputRegister(0), kScratchReg);
-        } else {
-          ASSEMBLE_BINOP(ShiftLeft);
-        }
+      if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+        AssembleBinOp(i, masm(), instr, &MacroAssembler::ShiftLeft,
+                      &MacroAssembler::ShiftLeft);
       } else {
-        ASSEMBLE_BINOP(ShiftLeft);
+        AssembleBinOp(i, masm(), instr, &MacroAssembler::sll,
+                      &MacroAssembler::sll);
       }
-      __ LoadlW(i.OutputRegister(0), i.OutputRegister(0));
       break;
 #if V8_TARGET_ARCH_S390X
     case kS390_ShiftLeft64:
@@ -1084,18 +1396,13 @@
       break;
 #endif
     case kS390_ShiftRight32:
-      if (HasRegisterInput(instr, 1)) {
-        if (i.OutputRegister().is(i.InputRegister(1)) &&
-            !CpuFeatures::IsSupported(DISTINCT_OPS)) {
-          __ LoadRR(kScratchReg, i.InputRegister(1));
-          __ ShiftRight(i.OutputRegister(), i.InputRegister(0), kScratchReg);
-        } else {
-          ASSEMBLE_BINOP(ShiftRight);
-        }
+      if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+        AssembleBinOp(i, masm(), instr, &MacroAssembler::srlk,
+                      &MacroAssembler::srlk);
       } else {
-        ASSEMBLE_BINOP(ShiftRight);
+        AssembleBinOp(i, masm(), instr, &MacroAssembler::srl,
+                      &MacroAssembler::srl);
       }
-      __ LoadlW(i.OutputRegister(0), i.OutputRegister(0));
       break;
 #if V8_TARGET_ARCH_S390X
     case kS390_ShiftRight64:
@@ -1103,19 +1410,13 @@
       break;
 #endif
     case kS390_ShiftRightArith32:
-      if (HasRegisterInput(instr, 1)) {
-        if (i.OutputRegister().is(i.InputRegister(1)) &&
-            !CpuFeatures::IsSupported(DISTINCT_OPS)) {
-          __ LoadRR(kScratchReg, i.InputRegister(1));
-          __ ShiftRightArith(i.OutputRegister(), i.InputRegister(0),
-                             kScratchReg);
-        } else {
-          ASSEMBLE_BINOP(ShiftRightArith);
-        }
+      if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+        AssembleBinOp(i, masm(), instr, &MacroAssembler::srak,
+                      &MacroAssembler::srak);
       } else {
-        ASSEMBLE_BINOP(ShiftRightArith);
+        AssembleBinOp(i, masm(), instr, &MacroAssembler::sra,
+                      &MacroAssembler::sra);
       }
-      __ LoadlW(i.OutputRegister(), i.OutputRegister());
       break;
 #if V8_TARGET_ARCH_S390X
     case kS390_ShiftRightArith64:
@@ -1197,7 +1498,7 @@
       break;
     }
 #endif
-    case kS390_RotRight32:
+    case kS390_RotRight32: {
       if (HasRegisterInput(instr, 1)) {
         __ LoadComplementRR(kScratchReg, i.InputRegister(1));
         __ rll(i.OutputRegister(), i.InputRegister(0), kScratchReg);
@@ -1205,7 +1506,9 @@
         __ rll(i.OutputRegister(), i.InputRegister(0),
                Operand(32 - i.InputInt32(1)));
       }
+      CHECK_AND_ZERO_EXT_OUTPUT(2);
       break;
+    }
 #if V8_TARGET_ARCH_S390X
     case kS390_RotRight64:
       if (HasRegisterInput(instr, 1)) {
@@ -1216,33 +1519,6 @@
                 Operand(64 - i.InputInt32(1)));
       }
       break;
-#endif
-    case kS390_Not32:
-      __ Not32(i.OutputRegister(), i.InputRegister(0));
-      break;
-    case kS390_Not64:
-      __ Not64(i.OutputRegister(), i.InputRegister(0));
-      break;
-    case kS390_RotLeftAndMask32:
-      if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
-        int shiftAmount = i.InputInt32(1);
-        int endBit = 63 - i.InputInt32(3);
-        int startBit = 63 - i.InputInt32(2);
-        __ rll(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
-        __ risbg(i.OutputRegister(), i.OutputRegister(), Operand(startBit),
-                 Operand(endBit), Operand::Zero(), true);
-      } else {
-        int shiftAmount = i.InputInt32(1);
-        int clearBitLeft = 63 - i.InputInt32(2);
-        int clearBitRight = i.InputInt32(3);
-        __ rll(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
-        __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBitLeft));
-        __ srlg(i.OutputRegister(), i.OutputRegister(),
-                Operand((clearBitLeft + clearBitRight)));
-        __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBitRight));
-      }
-      break;
-#if V8_TARGET_ARCH_S390X
     case kS390_RotLeftAndClear64:
       if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
         int shiftAmount = i.InputInt32(1);
@@ -1291,10 +1567,14 @@
       }
       break;
 #endif
-    case kS390_Add32:
-      ASSEMBLE_BINOP(Add32);
-      __ LoadW(i.OutputRegister(), i.OutputRegister());
+    case kS390_Add32: {
+      if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+        ASSEMBLE_BIN_OP(ark, Add32, Add32_RRI);
+      } else {
+        ASSEMBLE_BIN_OP(ar, Add32, Add32_RI);
+      }
       break;
+    }
     case kS390_Add64:
       ASSEMBLE_BINOP(AddP);
       break;
@@ -1319,8 +1599,11 @@
       }
       break;
     case kS390_Sub32:
-      ASSEMBLE_BINOP(Sub32);
-      __ LoadW(i.OutputRegister(), i.OutputRegister());
+      if (CpuFeatures::IsSupported(DISTINCT_OPS)) {
+        ASSEMBLE_BIN_OP(srk, Sub32, Sub32_RRI);
+      } else {
+        ASSEMBLE_BIN_OP(sr, Sub32, Sub32_RI);
+      }
       break;
     case kS390_Sub64:
       ASSEMBLE_BINOP(SubP);
@@ -1352,26 +1635,15 @@
       }
       break;
     case kS390_Mul32:
-      if (HasRegisterInput(instr, 1)) {
-        __ Mul32(i.InputRegister(0), i.InputRegister(1));
-      } else if (HasImmediateInput(instr, 1)) {
-        __ Mul32(i.InputRegister(0), i.InputImmediate(1));
-      } else if (HasStackSlotInput(instr, 1)) {
-#ifdef V8_TARGET_ARCH_S390X
-        // Avoid endian-issue here:
-        // stg r1, 0(fp)
-        // ...
-        // msy r2, 0(fp) <-- This will read the upper 32 bits
-        __ lg(kScratchReg, i.InputStackSlot(1));
-        __ Mul32(i.InputRegister(0), kScratchReg);
-#else
-        __ Mul32(i.InputRegister(0), i.InputStackSlot(1));
-#endif
-      } else {
-        UNIMPLEMENTED();
-      }
+      ASSEMBLE_BIN_OP(Mul32, Mul32, Mul32);
+      break;
+    case kS390_Mul32WithOverflow:
+      ASSEMBLE_BIN_OP(Mul32WithOverflowIfCCUnequal,
+                      Mul32WithOverflowIfCCUnequal,
+                      Mul32WithOverflowIfCCUnequal);
       break;
     case kS390_Mul64:
+      CHECK(i.OutputRegister().is(i.InputRegister(0)));
       if (HasRegisterInput(instr, 1)) {
         __ Mul64(i.InputRegister(0), i.InputRegister(1));
       } else if (HasImmediateInput(instr, 1)) {
@@ -1383,50 +1655,10 @@
       }
       break;
     case kS390_MulHigh32:
-      __ LoadRR(r1, i.InputRegister(0));
-      if (HasRegisterInput(instr, 1)) {
-        __ mr_z(r0, i.InputRegister(1));
-      } else if (HasStackSlotInput(instr, 1)) {
-#ifdef V8_TARGET_ARCH_S390X
-        // Avoid endian-issue here:
-        // stg r1, 0(fp)
-        // ...
-        // mfy r2, 0(fp) <-- This will read the upper 32 bits
-        __ lg(kScratchReg, i.InputStackSlot(1));
-        __ mr_z(r0, kScratchReg);
-#else
-        __ mfy(r0, i.InputStackSlot(1));
-#endif
-      } else {
-        UNIMPLEMENTED();
-      }
-      __ LoadW(i.OutputRegister(), r0);
-      break;
-    case kS390_Mul32WithHigh32:
-      __ LoadRR(r1, i.InputRegister(0));
-      __ mr_z(r0, i.InputRegister(1));
-      __ LoadW(i.OutputRegister(0), r1);  // low
-      __ LoadW(i.OutputRegister(1), r0);  // high
+      ASSEMBLE_BIN_OP(MulHigh32, MulHigh32, MulHigh32);
       break;
     case kS390_MulHighU32:
-      __ LoadRR(r1, i.InputRegister(0));
-      if (HasRegisterInput(instr, 1)) {
-        __ mlr(r0, i.InputRegister(1));
-      } else if (HasStackSlotInput(instr, 1)) {
-#ifdef V8_TARGET_ARCH_S390X
-        // Avoid endian-issue here:
-        // stg r1, 0(fp)
-        // ...
-        // mfy r2, 0(fp) <-- This will read the upper 32 bits
-        __ lg(kScratchReg, i.InputStackSlot(1));
-        __ mlr(r0, kScratchReg);
-#else
-        __ ml(r0, i.InputStackSlot(1));
-#endif
-      } else {
-        UNIMPLEMENTED();
-      }
-      __ LoadlW(i.OutputRegister(), r0);
+      ASSEMBLE_BIN_OP(MulHighU32, MulHighU32, MulHighU32);
       break;
     case kS390_MulFloat:
       // Ensure we don't clobber right
@@ -1455,13 +1687,10 @@
       __ ltgr(i.OutputRegister(), r1);  // Copy R1: Quotient to output
       break;
 #endif
-    case kS390_Div32:
-      __ LoadRR(r0, i.InputRegister(0));
-      __ srda(r0, Operand(32));
-      __ dr(r0, i.InputRegister(1));
-      __ LoadAndTestP_ExtendSrc(i.OutputRegister(),
-                                r1);  // Copy R1: Quotient to output
+    case kS390_Div32: {
+      ASSEMBLE_BIN_OP(Div32, Div32, Div32);
       break;
+    }
 #if V8_TARGET_ARCH_S390X
     case kS390_DivU64:
       __ LoadRR(r1, i.InputRegister(0));
@@ -1470,14 +1699,10 @@
       __ ltgr(i.OutputRegister(), r1);  // Copy R1: Quotient to output
       break;
 #endif
-    case kS390_DivU32:
-      __ LoadRR(r0, i.InputRegister(0));
-      __ srdl(r0, Operand(32));
-      __ dlr(r0, i.InputRegister(1));  // R0:R1: Dividend
-      __ LoadlW(i.OutputRegister(), r1);  // Copy R1: Quotient to output
-      __ LoadAndTestP_ExtendSrc(r1, r1);
+    case kS390_DivU32: {
+      ASSEMBLE_BIN_OP(DivU32, DivU32, DivU32);
       break;
-
+    }
     case kS390_DivFloat:
       // InputDoubleRegister(1)=InputDoubleRegister(0)/InputDoubleRegister(1)
       if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
@@ -1503,10 +1728,10 @@
       }
       break;
     case kS390_Mod32:
-      ASSEMBLE_MODULO(dr, srda);
+      ASSEMBLE_BIN_OP(Mod32, Mod32, Mod32);
       break;
     case kS390_ModU32:
-      ASSEMBLE_MODULO(dlr, srdl);
+      ASSEMBLE_BIN_OP(ModU32, ModU32, ModU32);
       break;
 #if V8_TARGET_ARCH_S390X
     case kS390_Mod64:
@@ -1611,7 +1836,7 @@
     }
     case kS390_Neg32:
       __ lcr(i.OutputRegister(), i.InputRegister(0));
-      __ LoadW(i.OutputRegister(), i.OutputRegister());
+      CHECK_AND_ZERO_EXT_OUTPUT(1);
       break;
     case kS390_Neg64:
       __ lcgr(i.OutputRegister(), i.InputRegister(0));
@@ -1659,14 +1884,16 @@
     case kS390_Cntlz32: {
       __ llgfr(i.OutputRegister(), i.InputRegister(0));
       __ flogr(r0, i.OutputRegister());
-      __ LoadRR(i.OutputRegister(), r0);
-      __ SubP(i.OutputRegister(), Operand(32));
-    } break;
+      __ Add32(i.OutputRegister(), r0, Operand(-32));
+      // No need to zero-ext b/c llgfr is done already
+      break;
+    }
 #if V8_TARGET_ARCH_S390X
     case kS390_Cntlz64: {
       __ flogr(r0, i.InputRegister(0));
       __ LoadRR(i.OutputRegister(), r0);
-    } break;
+      break;
+    }
 #endif
     case kS390_Popcnt32:
       __ Popcnt32(i.OutputRegister(), i.InputRegister(0));
@@ -1677,7 +1904,7 @@
       break;
 #endif
     case kS390_Cmp32:
-      ASSEMBLE_COMPARE(Cmp32, CmpLogical32);
+      ASSEMBLE_COMPARE32(Cmp32, CmpLogical32);
       break;
 #if V8_TARGET_ARCH_S390X
     case kS390_Cmp64:
@@ -1685,28 +1912,38 @@
       break;
 #endif
     case kS390_CmpFloat:
-      __ cebr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      ASSEMBLE_FLOAT_COMPARE(cebr, ceb, ley);
+      // __ cebr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
       break;
     case kS390_CmpDouble:
-      __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      ASSEMBLE_FLOAT_COMPARE(cdbr, cdb, ldy);
+      // __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
       break;
     case kS390_Tst32:
       if (HasRegisterInput(instr, 1)) {
-        __ AndP(r0, i.InputRegister(0), i.InputRegister(1));
+        __ And(r0, i.InputRegister(0), i.InputRegister(1));
       } else {
-        __ AndP(r0, i.InputRegister(0), i.InputImmediate(1));
+        Operand opnd = i.InputImmediate(1);
+        if (is_uint16(opnd.immediate())) {
+          __ tmll(i.InputRegister(0), opnd);
+        } else {
+          __ lr(r0, i.InputRegister(0));
+          __ nilf(r0, opnd);
+        }
       }
-      __ LoadAndTestP_ExtendSrc(r0, r0);
       break;
-#if V8_TARGET_ARCH_S390X
     case kS390_Tst64:
       if (HasRegisterInput(instr, 1)) {
         __ AndP(r0, i.InputRegister(0), i.InputRegister(1));
       } else {
-        __ AndP(r0, i.InputRegister(0), i.InputImmediate(1));
+        Operand opnd = i.InputImmediate(1);
+        if (is_uint16(opnd.immediate())) {
+          __ tmll(i.InputRegister(0), opnd);
+        } else {
+          __ AndP(r0, i.InputRegister(0), opnd);
+        }
       }
       break;
-#endif
     case kS390_Float64SilenceNaN: {
       DoubleRegister value = i.InputDoubleRegister(0);
       DoubleRegister result = i.OutputDoubleRegister();
@@ -1758,18 +1995,12 @@
       break;
     }
     case kS390_ExtendSignWord8:
-#if V8_TARGET_ARCH_S390X
-      __ lgbr(i.OutputRegister(), i.InputRegister(0));
-#else
       __ lbr(i.OutputRegister(), i.InputRegister(0));
-#endif
+      CHECK_AND_ZERO_EXT_OUTPUT(1);
       break;
     case kS390_ExtendSignWord16:
-#if V8_TARGET_ARCH_S390X
-      __ lghr(i.OutputRegister(), i.InputRegister(0));
-#else
       __ lhr(i.OutputRegister(), i.InputRegister(0));
-#endif
+      CHECK_AND_ZERO_EXT_OUTPUT(1);
       break;
 #if V8_TARGET_ARCH_S390X
     case kS390_ExtendSignWord32:
@@ -2005,6 +2236,14 @@
       ASSEMBLE_LOAD_INTEGER(lg);
       break;
 #endif
+    case kS390_LoadAndTestWord32: {
+      ASSEMBLE_LOADANDTEST32(ltr, lt_z);
+      break;
+    }
+    case kS390_LoadAndTestWord64: {
+      ASSEMBLE_LOADANDTEST64(ltgr, ltg);
+      break;
+    }
     case kS390_LoadFloat32:
       ASSEMBLE_LOAD_FLOAT(LoadFloat32);
       break;
@@ -2040,6 +2279,9 @@
     case kS390_StoreDouble:
       ASSEMBLE_STORE_DOUBLE();
       break;
+    case kS390_Lay:
+      __ lay(i.OutputRegister(), i.MemoryOperand());
+      break;
     case kCheckedLoadInt8:
       ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlB);
 #if V8_TARGET_ARCH_S390X
@@ -2152,6 +2394,84 @@
   if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
 }
 
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+                                     FlagsCondition condition) {
+  class OutOfLineTrap final : public OutOfLineCode {
+   public:
+    OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+        : OutOfLineCode(gen),
+          frame_elided_(frame_elided),
+          instr_(instr),
+          gen_(gen) {}
+
+    void Generate() final {
+      S390OperandConverter i(gen_, instr_);
+
+      Builtins::Name trap_id =
+          static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
+      bool old_has_frame = __ has_frame();
+      if (frame_elided_) {
+        __ set_has_frame(true);
+        __ EnterFrame(StackFrame::WASM_COMPILED);
+      }
+      GenerateCallToTrap(trap_id);
+      if (frame_elided_) {
+        __ set_has_frame(old_has_frame);
+      }
+    }
+
+   private:
+    void GenerateCallToTrap(Builtins::Name trap_id) {
+      if (trap_id == Builtins::builtin_count) {
+        // We cannot test calls to the runtime in cctest/test-run-wasm.
+        // Therefore we emit a call to C here instead of a call to the runtime.
+        // We use the context register as the scratch register, because we do
+        // not have a context here.
+        __ PrepareCallCFunction(0, 0, cp);
+        __ CallCFunction(
+            ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+            0);
+        __ LeaveFrame(StackFrame::WASM_COMPILED);
+        __ Ret();
+      } else {
+        gen_->AssembleSourcePosition(instr_);
+        __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+                RelocInfo::CODE_TARGET);
+        ReferenceMap* reference_map =
+            new (gen_->zone()) ReferenceMap(gen_->zone());
+        gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+                              Safepoint::kNoLazyDeopt);
+        if (FLAG_debug_code) {
+          __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
+        }
+      }
+    }
+
+    bool frame_elided_;
+    Instruction* instr_;
+    CodeGenerator* gen_;
+  };
+  bool frame_elided = !frame_access_state()->has_frame();
+  auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+  Label* tlabel = ool->entry();
+  Label end;
+
+  ArchOpcode op = instr->arch_opcode();
+  Condition cond = FlagsConditionToCondition(condition, op);
+  if (op == kS390_CmpDouble) {
+    // check for unordered if necessary
+    if (cond == le) {
+      __ bunordered(&end);
+      // Unnecessary for eq/lt since only FU bit will be set.
+    } else if (cond == gt) {
+      __ bunordered(tlabel);
+      // Unnecessary for ne/ge since only FU bit will be set.
+    }
+  }
+  __ b(cond, tlabel);
+  __ bind(&end);
+}
+
 // Assembles boolean materializations after an instruction.
 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
                                         FlagsCondition condition) {
@@ -2210,16 +2530,19 @@
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, Deoptimizer::BailoutType bailout_type,
-    SourcePosition pos) {
+    int deoptimization_id, SourcePosition pos) {
+  DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
+  DeoptimizeReason deoptimization_reason =
+      GetDeoptimizationReason(deoptimization_id);
+  Deoptimizer::BailoutType bailout_type =
+      deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
+                                                   : Deoptimizer::EAGER;
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   // TODO(turbofan): We should be able to generate better code by sharing the
   // actual final call site and just bl'ing to it here, similar to what we do
   // in the lithium backend.
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
-  DeoptimizeReason deoptimization_reason =
-      GetDeoptimizationReason(deoptimization_id);
   __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
@@ -2377,25 +2700,22 @@
       switch (src.type()) {
         case Constant::kInt32:
 #if V8_TARGET_ARCH_S390X
-          if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+          if (RelocInfo::IsWasmSizeReference(src.rmode())) {
 #else
-          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
-              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
-              src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+          if (RelocInfo::IsWasmReference(src.rmode())) {
 #endif
             __ mov(dst, Operand(src.ToInt32(), src.rmode()));
           } else {
-            __ mov(dst, Operand(src.ToInt32()));
+            __ Load(dst, Operand(src.ToInt32()));
           }
           break;
         case Constant::kInt64:
 #if V8_TARGET_ARCH_S390X
-          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
-              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+          if (RelocInfo::IsWasmPtrReference(src.rmode())) {
             __ mov(dst, Operand(src.ToInt64(), src.rmode()));
           } else {
-            DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
-            __ mov(dst, Operand(src.ToInt64()));
+            DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
+            __ Load(dst, Operand(src.ToInt64()));
           }
 #else
           __ mov(dst, Operand(src.ToInt64()));
diff --git a/src/compiler/s390/instruction-codes-s390.h b/src/compiler/s390/instruction-codes-s390.h
index 80e1532..b99e79f 100644
--- a/src/compiler/s390/instruction-codes-s390.h
+++ b/src/compiler/s390/instruction-codes-s390.h
@@ -31,10 +31,10 @@
   V(S390_RotRight64)               \
   V(S390_Not32)                    \
   V(S390_Not64)                    \
-  V(S390_RotLeftAndMask32)         \
   V(S390_RotLeftAndClear64)        \
   V(S390_RotLeftAndClearLeft64)    \
   V(S390_RotLeftAndClearRight64)   \
+  V(S390_Lay)                      \
   V(S390_Add32)                    \
   V(S390_Add64)                    \
   V(S390_AddPair)                  \
@@ -47,7 +47,7 @@
   V(S390_SubPair)                  \
   V(S390_MulPair)                  \
   V(S390_Mul32)                    \
-  V(S390_Mul32WithHigh32)          \
+  V(S390_Mul32WithOverflow)        \
   V(S390_Mul64)                    \
   V(S390_MulHigh32)                \
   V(S390_MulHighU32)               \
@@ -135,6 +135,10 @@
   V(S390_LoadWordU16)              \
   V(S390_LoadWordS32)              \
   V(S390_LoadWordU32)              \
+  V(S390_LoadAndTestWord32)        \
+  V(S390_LoadAndTestWord64)        \
+  V(S390_LoadAndTestFloat32)       \
+  V(S390_LoadAndTestFloat64)       \
   V(S390_LoadReverse16RR)          \
   V(S390_LoadReverse32RR)          \
   V(S390_LoadReverse64RR)          \
diff --git a/src/compiler/s390/instruction-scheduler-s390.cc b/src/compiler/s390/instruction-scheduler-s390.cc
index 5ebe489..d6ec3de 100644
--- a/src/compiler/s390/instruction-scheduler-s390.cc
+++ b/src/compiler/s390/instruction-scheduler-s390.cc
@@ -32,10 +32,10 @@
     case kS390_RotRight64:
     case kS390_Not32:
     case kS390_Not64:
-    case kS390_RotLeftAndMask32:
     case kS390_RotLeftAndClear64:
     case kS390_RotLeftAndClearLeft64:
     case kS390_RotLeftAndClearRight64:
+    case kS390_Lay:
     case kS390_Add32:
     case kS390_Add64:
     case kS390_AddPair:
@@ -48,7 +48,7 @@
     case kS390_SubFloat:
     case kS390_SubDouble:
     case kS390_Mul32:
-    case kS390_Mul32WithHigh32:
+    case kS390_Mul32WithOverflow:
     case kS390_Mul64:
     case kS390_MulHigh32:
     case kS390_MulHighU32:
@@ -130,6 +130,10 @@
     case kS390_LoadReverse16RR:
     case kS390_LoadReverse32RR:
     case kS390_LoadReverse64RR:
+    case kS390_LoadAndTestWord32:
+    case kS390_LoadAndTestWord64:
+    case kS390_LoadAndTestFloat32:
+    case kS390_LoadAndTestFloat64:
       return kNoOpcodeFlags;
 
     case kS390_LoadWordS8:
diff --git a/src/compiler/s390/instruction-selector-s390.cc b/src/compiler/s390/instruction-selector-s390.cc
index eed08a9..e591d3c 100644
--- a/src/compiler/s390/instruction-selector-s390.cc
+++ b/src/compiler/s390/instruction-selector-s390.cc
@@ -12,29 +12,85 @@
 namespace internal {
 namespace compiler {
 
-enum ImmediateMode {
-  kShift32Imm,
-  kShift64Imm,
-  kInt32Imm,
-  kInt32Imm_Negate,
-  kUint32Imm,
-  kInt20Imm,
-  kNoImmediate
+enum class OperandMode : uint32_t {
+  kNone = 0u,
+  // Immediate mode
+  kShift32Imm = 1u << 0,
+  kShift64Imm = 1u << 1,
+  kInt32Imm = 1u << 2,
+  kInt32Imm_Negate = 1u << 3,
+  kUint32Imm = 1u << 4,
+  kInt20Imm = 1u << 5,
+  kUint12Imm = 1u << 6,
+  // Instr format
+  kAllowRRR = 1u << 7,
+  kAllowRM = 1u << 8,
+  kAllowRI = 1u << 9,
+  kAllowRRI = 1u << 10,
+  kAllowRRM = 1u << 11,
+  // Useful combination
+  kAllowImmediate = kAllowRI | kAllowRRI,
+  kAllowMemoryOperand = kAllowRM | kAllowRRM,
+  kAllowDistinctOps = kAllowRRR | kAllowRRI | kAllowRRM,
+  kBitWiseCommonMode = kAllowRI,
+  kArithmeticCommonMode = kAllowRM | kAllowRI
 };
 
+typedef base::Flags<OperandMode, uint32_t> OperandModes;
+DEFINE_OPERATORS_FOR_FLAGS(OperandModes);
+OperandModes immediateModeMask =
+    OperandMode::kShift32Imm | OperandMode::kShift64Imm |
+    OperandMode::kInt32Imm | OperandMode::kInt32Imm_Negate |
+    OperandMode::kUint32Imm | OperandMode::kInt20Imm;
+
+#define AndOperandMode                                              \
+  ((OperandMode::kBitWiseCommonMode | OperandMode::kUint32Imm |     \
+    OperandMode::kAllowRM | (CpuFeatures::IsSupported(DISTINCT_OPS) \
+                                 ? OperandMode::kAllowRRR           \
+                                 : OperandMode::kBitWiseCommonMode)))
+
+#define OrOperandMode AndOperandMode
+#define XorOperandMode AndOperandMode
+
+#define ShiftOperandMode                                         \
+  ((OperandMode::kBitWiseCommonMode | OperandMode::kShift64Imm | \
+    (CpuFeatures::IsSupported(DISTINCT_OPS)                      \
+         ? OperandMode::kAllowRRR                                \
+         : OperandMode::kBitWiseCommonMode)))
+
+#define AddOperandMode                                            \
+  ((OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm | \
+    (CpuFeatures::IsSupported(DISTINCT_OPS)                       \
+         ? (OperandMode::kAllowRRR | OperandMode::kAllowRRI)      \
+         : OperandMode::kArithmeticCommonMode)))
+#define SubOperandMode                                                   \
+  ((OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm_Negate | \
+    (CpuFeatures::IsSupported(DISTINCT_OPS)                              \
+         ? (OperandMode::kAllowRRR | OperandMode::kAllowRRI)             \
+         : OperandMode::kArithmeticCommonMode)))
+#define MulOperandMode \
+  (OperandMode::kArithmeticCommonMode | OperandMode::kInt32Imm)
+
 // Adds S390-specific methods for generating operands.
 class S390OperandGenerator final : public OperandGenerator {
  public:
   explicit S390OperandGenerator(InstructionSelector* selector)
       : OperandGenerator(selector) {}
 
-  InstructionOperand UseOperand(Node* node, ImmediateMode mode) {
+  InstructionOperand UseOperand(Node* node, OperandModes mode) {
     if (CanBeImmediate(node, mode)) {
       return UseImmediate(node);
     }
     return UseRegister(node);
   }
 
+  InstructionOperand UseAnyExceptImmediate(Node* node) {
+    if (NodeProperties::IsConstant(node))
+      return UseRegister(node);
+    else
+      return Use(node);
+  }
+
   int64_t GetImmediate(Node* node) {
     if (node->opcode() == IrOpcode::kInt32Constant)
       return OpParameter<int32_t>(node);
@@ -45,7 +101,7 @@
     return 0L;
   }
 
-  bool CanBeImmediate(Node* node, ImmediateMode mode) {
+  bool CanBeImmediate(Node* node, OperandModes mode) {
     int64_t value;
     if (node->opcode() == IrOpcode::kInt32Constant)
       value = OpParameter<int32_t>(node);
@@ -56,22 +112,47 @@
     return CanBeImmediate(value, mode);
   }
 
-  bool CanBeImmediate(int64_t value, ImmediateMode mode) {
-    switch (mode) {
-      case kShift32Imm:
-        return 0 <= value && value < 32;
-      case kShift64Imm:
-        return 0 <= value && value < 64;
-      case kInt32Imm:
-        return is_int32(value);
-      case kInt32Imm_Negate:
-        return is_int32(-value);
-      case kUint32Imm:
-        return is_uint32(value);
-      case kInt20Imm:
-        return is_int20(value);
-      case kNoImmediate:
-        return false;
+  bool CanBeImmediate(int64_t value, OperandModes mode) {
+    if (mode & OperandMode::kShift32Imm)
+      return 0 <= value && value < 32;
+    else if (mode & OperandMode::kShift64Imm)
+      return 0 <= value && value < 64;
+    else if (mode & OperandMode::kInt32Imm)
+      return is_int32(value);
+    else if (mode & OperandMode::kInt32Imm_Negate)
+      return is_int32(-value);
+    else if (mode & OperandMode::kUint32Imm)
+      return is_uint32(value);
+    else if (mode & OperandMode::kInt20Imm)
+      return is_int20(value);
+    else if (mode & OperandMode::kUint12Imm)
+      return is_uint12(value);
+    else
+      return false;
+  }
+
+  bool CanBeMemoryOperand(InstructionCode opcode, Node* user, Node* input,
+                          int effect_level) {
+    if (input->opcode() != IrOpcode::kLoad ||
+        !selector()->CanCover(user, input)) {
+      return false;
+    }
+
+    if (effect_level != selector()->GetEffectLevel(input)) {
+      return false;
+    }
+
+    MachineRepresentation rep =
+        LoadRepresentationOf(input->op()).representation();
+    switch (opcode) {
+      case kS390_Cmp64:
+      case kS390_LoadAndTestWord64:
+        return rep == MachineRepresentation::kWord64 || IsAnyTagged(rep);
+      case kS390_LoadAndTestWord32:
+      case kS390_Cmp32:
+        return rep == MachineRepresentation::kWord32;
+      default:
+        break;
     }
     return false;
   }
@@ -119,9 +200,9 @@
     return mode;
   }
 
-  AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
-                                                  InstructionOperand inputs[],
-                                                  size_t* input_count) {
+  AddressingMode GetEffectiveAddressMemoryOperand(
+      Node* operand, InstructionOperand inputs[], size_t* input_count,
+      OperandModes immediate_mode = OperandMode::kInt20Imm) {
 #if V8_TARGET_ARCH_S390X
     BaseWithIndexAndDisplacement64Matcher m(operand,
                                             AddressOption::kAllowInputSwap);
@@ -131,7 +212,7 @@
 #endif
     DCHECK(m.matches());
     if ((m.displacement() == nullptr ||
-         CanBeImmediate(m.displacement(), kInt20Imm))) {
+         CanBeImmediate(m.displacement(), immediate_mode))) {
       DCHECK(m.scale() == 0);
       return GenerateMemoryOperandInputs(m.index(), m.base(), m.displacement(),
                                          m.displacement_mode(), inputs,
@@ -158,6 +239,153 @@
 
 namespace {
 
+bool S390OpcodeOnlySupport12BitDisp(ArchOpcode opcode) {
+  switch (opcode) {
+    case kS390_CmpFloat:
+    case kS390_CmpDouble:
+      return true;
+    default:
+      return false;
+  }
+}
+
+bool S390OpcodeOnlySupport12BitDisp(InstructionCode op) {
+  ArchOpcode opcode = ArchOpcodeField::decode(op);
+  return S390OpcodeOnlySupport12BitDisp(opcode);
+}
+
+#define OpcodeImmMode(op)                                       \
+  (S390OpcodeOnlySupport12BitDisp(op) ? OperandMode::kUint12Imm \
+                                      : OperandMode::kInt20Imm)
+
+ArchOpcode SelectLoadOpcode(Node* node) {
+  NodeMatcher m(node);
+  DCHECK(m.IsLoad());
+  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+  ArchOpcode opcode = kArchNop;
+  switch (load_rep.representation()) {
+    case MachineRepresentation::kFloat32:
+      opcode = kS390_LoadFloat32;
+      break;
+    case MachineRepresentation::kFloat64:
+      opcode = kS390_LoadDouble;
+      break;
+    case MachineRepresentation::kBit:  // Fall through.
+    case MachineRepresentation::kWord8:
+      opcode = load_rep.IsSigned() ? kS390_LoadWordS8 : kS390_LoadWordU8;
+      break;
+    case MachineRepresentation::kWord16:
+      opcode = load_rep.IsSigned() ? kS390_LoadWordS16 : kS390_LoadWordU16;
+      break;
+#if !V8_TARGET_ARCH_S390X
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
+    case MachineRepresentation::kTagged:         // Fall through.
+#endif
+    case MachineRepresentation::kWord32:
+      opcode = kS390_LoadWordU32;
+      break;
+#if V8_TARGET_ARCH_S390X
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
+    case MachineRepresentation::kTagged:         // Fall through.
+    case MachineRepresentation::kWord64:
+      opcode = kS390_LoadWord64;
+      break;
+#else
+    case MachineRepresentation::kWord64:  // Fall through.
+#endif
+    case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
+    case MachineRepresentation::kNone:
+    default:
+      UNREACHABLE();
+  }
+  return opcode;
+}
+
+bool AutoZeroExtendsWord32ToWord64(Node* node) {
+#if !V8_TARGET_ARCH_S390X
+  return true;
+#else
+  switch (node->opcode()) {
+    case IrOpcode::kInt32Div:
+    case IrOpcode::kUint32Div:
+    case IrOpcode::kInt32MulHigh:
+    case IrOpcode::kUint32MulHigh:
+    case IrOpcode::kInt32Mod:
+    case IrOpcode::kUint32Mod:
+    case IrOpcode::kWord32Clz:
+    case IrOpcode::kWord32Popcnt:
+      return true;
+    default:
+      return false;
+  }
+  return false;
+#endif
+}
+
+bool ZeroExtendsWord32ToWord64(Node* node) {
+#if !V8_TARGET_ARCH_S390X
+  return true;
+#else
+  switch (node->opcode()) {
+    case IrOpcode::kInt32Add:
+    case IrOpcode::kInt32Sub:
+    case IrOpcode::kWord32And:
+    case IrOpcode::kWord32Or:
+    case IrOpcode::kWord32Xor:
+    case IrOpcode::kWord32Shl:
+    case IrOpcode::kWord32Shr:
+    case IrOpcode::kWord32Sar:
+    case IrOpcode::kInt32Mul:
+    case IrOpcode::kWord32Ror:
+    case IrOpcode::kInt32Div:
+    case IrOpcode::kUint32Div:
+    case IrOpcode::kInt32MulHigh:
+    case IrOpcode::kInt32Mod:
+    case IrOpcode::kUint32Mod:
+    case IrOpcode::kWord32Popcnt:
+      return true;
+    // TODO(john.yan): consider the following case to be valid
+    // case IrOpcode::kWord32Equal:
+    // case IrOpcode::kInt32LessThan:
+    // case IrOpcode::kInt32LessThanOrEqual:
+    // case IrOpcode::kUint32LessThan:
+    // case IrOpcode::kUint32LessThanOrEqual:
+    // case IrOpcode::kUint32MulHigh:
+    //   // These 32-bit operations implicitly zero-extend to 64-bit on x64, so
+    //   the
+    //   // zero-extension is a no-op.
+    //   return true;
+    // case IrOpcode::kProjection: {
+    //   Node* const value = node->InputAt(0);
+    //   switch (value->opcode()) {
+    //     case IrOpcode::kInt32AddWithOverflow:
+    //     case IrOpcode::kInt32SubWithOverflow:
+    //     case IrOpcode::kInt32MulWithOverflow:
+    //       return true;
+    //     default:
+    //       return false;
+    //   }
+    // }
+    case IrOpcode::kLoad: {
+      LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+      switch (load_rep.representation()) {
+        case MachineRepresentation::kWord32:
+          return true;
+        default:
+          return false;
+      }
+    }
+    default:
+      return false;
+  }
+#endif
+}
+
 void VisitRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
   S390OperandGenerator g(selector);
   selector->Emit(opcode, g.DefineAsRegister(node),
@@ -171,15 +399,15 @@
                  g.UseRegister(node->InputAt(1)));
 }
 
+#if V8_TARGET_ARCH_S390X
 void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
-              ImmediateMode operand_mode) {
+              OperandModes operand_mode) {
   S390OperandGenerator g(selector);
   selector->Emit(opcode, g.DefineAsRegister(node),
                  g.UseRegister(node->InputAt(0)),
                  g.UseOperand(node->InputAt(1), operand_mode));
 }
 
-#if V8_TARGET_ARCH_S390X
 void VisitTryTruncateDouble(InstructionSelector* selector, ArchOpcode opcode,
                             Node* node) {
   S390OperandGenerator g(selector);
@@ -200,7 +428,7 @@
 // Shared routine for multiple binary operations.
 template <typename Matcher>
 void VisitBinop(InstructionSelector* selector, Node* node,
-                InstructionCode opcode, ImmediateMode operand_mode,
+                InstructionCode opcode, OperandModes operand_mode,
                 FlagsContinuation* cont) {
   S390OperandGenerator g(selector);
   Matcher m(node);
@@ -260,7 +488,10 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->reason(), cont->frame_state());
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else if (cont->IsTrap()) {
+    inputs[input_count++] = g.UseImmediate(cont->trap_id());
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -269,54 +500,152 @@
 // Shared routine for multiple binary operations.
 template <typename Matcher>
 void VisitBinop(InstructionSelector* selector, Node* node, ArchOpcode opcode,
-                ImmediateMode operand_mode) {
+                OperandModes operand_mode) {
   FlagsContinuation cont;
   VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
 }
 
+void VisitBin32op(InstructionSelector* selector, Node* node,
+                  InstructionCode opcode, OperandModes operand_mode,
+                  FlagsContinuation* cont) {
+  S390OperandGenerator g(selector);
+  Int32BinopMatcher m(node);
+  Node* left = m.left().node();
+  Node* right = m.right().node();
+  InstructionOperand inputs[8];
+  size_t input_count = 0;
+  InstructionOperand outputs[2];
+  size_t output_count = 0;
+
+  // match left of TruncateInt64ToInt32
+  if (m.left().IsTruncateInt64ToInt32() && selector->CanCover(node, left)) {
+    left = left->InputAt(0);
+  }
+  // match right of TruncateInt64ToInt32
+  if (m.right().IsTruncateInt64ToInt32() && selector->CanCover(node, right)) {
+    right = right->InputAt(0);
+  }
+
+#if V8_TARGET_ARCH_S390X
+  if ((ZeroExtendsWord32ToWord64(right) || g.CanBeBetterLeftOperand(right)) &&
+      node->op()->HasProperty(Operator::kCommutative) &&
+      !g.CanBeImmediate(right, operand_mode)) {
+    std::swap(left, right);
+  }
+#else
+  if (node->op()->HasProperty(Operator::kCommutative) &&
+      !g.CanBeImmediate(right, operand_mode) &&
+      (g.CanBeBetterLeftOperand(right))) {
+    std::swap(left, right);
+  }
+#endif
+
+  // left is always register
+  InstructionOperand const left_input = g.UseRegister(left);
+  inputs[input_count++] = left_input;
+
+  // TODO(turbofan): match complex addressing modes.
+  if (left == right) {
+    // If both inputs refer to the same operand, enforce allocating a register
+    // for both of them to ensure that we don't end up generating code like
+    // this:
+    //
+    //   mov rax, [rbp-0x10]
+    //   add rax, [rbp-0x10]
+    //   jo label
+    inputs[input_count++] = left_input;
+    // Can only be RR or RRR
+    operand_mode &= OperandMode::kAllowRRR;
+  } else if ((operand_mode & OperandMode::kAllowImmediate) &&
+             g.CanBeImmediate(right, operand_mode)) {
+    inputs[input_count++] = g.UseImmediate(right);
+    // Can only be RI or RRI
+    operand_mode &= OperandMode::kAllowImmediate;
+  } else if (operand_mode & OperandMode::kAllowMemoryOperand) {
+    NodeMatcher mright(right);
+    if (mright.IsLoad() && selector->CanCover(node, right) &&
+        SelectLoadOpcode(right) == kS390_LoadWordU32) {
+      AddressingMode mode =
+          g.GetEffectiveAddressMemoryOperand(right, inputs, &input_count);
+      opcode |= AddressingModeField::encode(mode);
+      operand_mode &= ~OperandMode::kAllowImmediate;
+      if (operand_mode & OperandMode::kAllowRM)
+        operand_mode &= ~OperandMode::kAllowDistinctOps;
+    } else if (operand_mode & OperandMode::kAllowRM) {
+      DCHECK(!(operand_mode & OperandMode::kAllowRRM));
+      inputs[input_count++] = g.Use(right);
+      // Can not be Immediate
+      operand_mode &=
+          ~OperandMode::kAllowImmediate & ~OperandMode::kAllowDistinctOps;
+    } else if (operand_mode & OperandMode::kAllowRRM) {
+      DCHECK(!(operand_mode & OperandMode::kAllowRM));
+      inputs[input_count++] = g.Use(right);
+      // Can not be Immediate
+      operand_mode &= ~OperandMode::kAllowImmediate;
+    } else {
+      UNREACHABLE();
+    }
+  } else {
+    inputs[input_count++] = g.UseRegister(right);
+    // Can only be RR or RRR
+    operand_mode &= OperandMode::kAllowRRR;
+  }
+
+  bool doZeroExt =
+      AutoZeroExtendsWord32ToWord64(node) || !ZeroExtendsWord32ToWord64(left);
+
+  inputs[input_count++] =
+      g.TempImmediate(doZeroExt && (!AutoZeroExtendsWord32ToWord64(node)));
+
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+  }
+
+  if (doZeroExt && (operand_mode & OperandMode::kAllowDistinctOps) &&
+      // If we can deoptimize as a result of the binop, we need to make sure
+      // that
+      // the deopt inputs are not overwritten by the binop result. One way
+      // to achieve that is to declare the output register as same-as-first.
+      !cont->IsDeoptimize()) {
+    outputs[output_count++] = g.DefineAsRegister(node);
+  } else {
+    outputs[output_count++] = g.DefineSameAsFirst(node);
+  }
+
+  if (cont->IsSet()) {
+    outputs[output_count++] = g.DefineAsRegister(cont->result());
+  }
+
+  DCHECK_NE(0u, input_count);
+  DCHECK_NE(0u, output_count);
+  DCHECK_GE(arraysize(inputs), input_count);
+  DCHECK_GE(arraysize(outputs), output_count);
+
+  opcode = cont->Encode(opcode);
+
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else if (cont->IsTrap()) {
+    inputs[input_count++] = g.UseImmediate(cont->trap_id());
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  }
+}
+
+void VisitBin32op(InstructionSelector* selector, Node* node, ArchOpcode opcode,
+                  OperandModes operand_mode) {
+  FlagsContinuation cont;
+  VisitBin32op(selector, node, opcode, operand_mode, &cont);
+}
+
 }  // namespace
 
 void InstructionSelector::VisitLoad(Node* node) {
-  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
   S390OperandGenerator g(this);
-  ArchOpcode opcode = kArchNop;
-  switch (load_rep.representation()) {
-    case MachineRepresentation::kFloat32:
-      opcode = kS390_LoadFloat32;
-      break;
-    case MachineRepresentation::kFloat64:
-      opcode = kS390_LoadDouble;
-      break;
-    case MachineRepresentation::kBit:  // Fall through.
-    case MachineRepresentation::kWord8:
-      opcode = load_rep.IsSigned() ? kS390_LoadWordS8 : kS390_LoadWordU8;
-      break;
-    case MachineRepresentation::kWord16:
-      opcode = load_rep.IsSigned() ? kS390_LoadWordS16 : kS390_LoadWordU16;
-      break;
-#if !V8_TARGET_ARCH_S390X
-    case MachineRepresentation::kTaggedSigned:   // Fall through.
-    case MachineRepresentation::kTaggedPointer:  // Fall through.
-    case MachineRepresentation::kTagged:  // Fall through.
-#endif
-    case MachineRepresentation::kWord32:
-      opcode = kS390_LoadWordU32;
-      break;
-#if V8_TARGET_ARCH_S390X
-    case MachineRepresentation::kTaggedSigned:   // Fall through.
-    case MachineRepresentation::kTaggedPointer:  // Fall through.
-    case MachineRepresentation::kTagged:  // Fall through.
-    case MachineRepresentation::kWord64:
-      opcode = kS390_LoadWord64;
-      break;
-#else
-    case MachineRepresentation::kWord64:    // Fall through.
-#endif
-    case MachineRepresentation::kSimd128:  // Fall through.
-    case MachineRepresentation::kNone:
-      UNREACHABLE();
-      return;
-  }
+  ArchOpcode opcode = SelectLoadOpcode(node);
   InstructionOperand outputs[1];
   outputs[0] = g.DefineAsRegister(node);
   InstructionOperand inputs[3];
@@ -350,7 +679,7 @@
     inputs[input_count++] = g.UseUniqueRegister(base);
     // OutOfLineRecordWrite uses the offset in an 'AddP' instruction as well as
     // for the store itself, so we must check compatibility with both.
-    if (g.CanBeImmediate(offset, kInt20Imm)) {
+    if (g.CanBeImmediate(offset, OperandMode::kInt20Imm)) {
       inputs[input_count++] = g.UseImmediate(offset);
       addressing_mode = kMode_MRI;
     } else {
@@ -423,6 +752,9 @@
       case MachineRepresentation::kWord64:  // Fall through.
 #endif
       case MachineRepresentation::kSimd128:  // Fall through.
+      case MachineRepresentation::kSimd1x4:  // Fall through.
+      case MachineRepresentation::kSimd1x8:  // Fall through.
+      case MachineRepresentation::kSimd1x16:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -440,6 +772,11 @@
   }
 }
 
+void InstructionSelector::VisitProtectedStore(Node* node) {
+  // TODO(eholk)
+  UNIMPLEMENTED();
+}
+
 // Architecture supports unaligned access, therefore VisitLoad is used instead
 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
 
@@ -482,6 +819,9 @@
     case MachineRepresentation::kWord64:  // Fall through.
 #endif
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -489,7 +829,7 @@
   AddressingMode addressingMode = kMode_MRR;
   Emit(opcode | AddressingModeField::encode(addressingMode),
        g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
-       g.UseOperand(length, kUint32Imm));
+       g.UseOperand(length, OperandMode::kUint32Imm));
 }
 
 void InstructionSelector::VisitCheckedStore(Node* node) {
@@ -529,6 +869,9 @@
     case MachineRepresentation::kWord64:  // Fall through.
 #endif
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -536,9 +879,10 @@
   AddressingMode addressingMode = kMode_MRR;
   Emit(opcode | AddressingModeField::encode(addressingMode), g.NoOutput(),
        g.UseRegister(base), g.UseRegister(offset),
-       g.UseOperand(length, kUint32Imm), g.UseRegister(value));
+       g.UseOperand(length, OperandMode::kUint32Imm), g.UseRegister(value));
 }
 
+#if 0
 static inline bool IsContiguousMask32(uint32_t value, int* mb, int* me) {
   int mask_width = base::bits::CountPopulation32(value);
   int mask_msb = base::bits::CountLeadingZeros32(value);
@@ -549,6 +893,7 @@
   *me = mask_lsb;
   return true;
 }
+#endif
 
 #if V8_TARGET_ARCH_S390X
 static inline bool IsContiguousMask64(uint64_t value, int* mb, int* me) {
@@ -564,37 +909,7 @@
 #endif
 
 void InstructionSelector::VisitWord32And(Node* node) {
-  S390OperandGenerator g(this);
-  Int32BinopMatcher m(node);
-  int mb = 0;
-  int me = 0;
-  if (m.right().HasValue() && IsContiguousMask32(m.right().Value(), &mb, &me)) {
-    int sh = 0;
-    Node* left = m.left().node();
-    if ((m.left().IsWord32Shr() || m.left().IsWord32Shl()) &&
-        CanCover(node, left)) {
-      Int32BinopMatcher mleft(m.left().node());
-      if (mleft.right().IsInRange(0, 31)) {
-        left = mleft.left().node();
-        sh = mleft.right().Value();
-        if (m.left().IsWord32Shr()) {
-          // Adjust the mask such that it doesn't include any rotated bits.
-          if (mb > 31 - sh) mb = 31 - sh;
-          sh = (32 - sh) & 0x1f;
-        } else {
-          // Adjust the mask such that it doesn't include any rotated bits.
-          if (me < sh) me = sh;
-        }
-      }
-    }
-    if (mb >= me) {
-      Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
-           g.UseRegister(left), g.TempImmediate(sh), g.TempImmediate(mb),
-           g.TempImmediate(me));
-      return;
-    }
-  }
-  VisitBinop<Int32BinopMatcher>(this, node, kS390_And32, kUint32Imm);
+  VisitBin32op(this, node, kS390_And32, AndOperandMode);
 }
 
 #if V8_TARGET_ARCH_S390X
@@ -646,65 +961,36 @@
       }
     }
   }
-  VisitBinop<Int64BinopMatcher>(this, node, kS390_And64, kUint32Imm);
+  VisitBinop<Int64BinopMatcher>(this, node, kS390_And64,
+                                OperandMode::kUint32Imm);
 }
 #endif
 
 void InstructionSelector::VisitWord32Or(Node* node) {
-  Int32BinopMatcher m(node);
-  VisitBinop<Int32BinopMatcher>(this, node, kS390_Or32, kUint32Imm);
+  VisitBin32op(this, node, kS390_Or32, OrOperandMode);
 }
 
 #if V8_TARGET_ARCH_S390X
 void InstructionSelector::VisitWord64Or(Node* node) {
   Int64BinopMatcher m(node);
-  VisitBinop<Int64BinopMatcher>(this, node, kS390_Or64, kUint32Imm);
+  VisitBinop<Int64BinopMatcher>(this, node, kS390_Or64,
+                                OperandMode::kUint32Imm);
 }
 #endif
 
 void InstructionSelector::VisitWord32Xor(Node* node) {
-  S390OperandGenerator g(this);
-  Int32BinopMatcher m(node);
-  if (m.right().Is(-1)) {
-    Emit(kS390_Not32, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
-  } else {
-    VisitBinop<Int32BinopMatcher>(this, node, kS390_Xor32, kUint32Imm);
-  }
+  VisitBin32op(this, node, kS390_Xor32, XorOperandMode);
 }
 
 #if V8_TARGET_ARCH_S390X
 void InstructionSelector::VisitWord64Xor(Node* node) {
-  S390OperandGenerator g(this);
-  Int64BinopMatcher m(node);
-  if (m.right().Is(-1)) {
-    Emit(kS390_Not64, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
-  } else {
-    VisitBinop<Int64BinopMatcher>(this, node, kS390_Xor64, kUint32Imm);
-  }
+  VisitBinop<Int64BinopMatcher>(this, node, kS390_Xor64,
+                                OperandMode::kUint32Imm);
 }
 #endif
 
 void InstructionSelector::VisitWord32Shl(Node* node) {
-  S390OperandGenerator g(this);
-  Int32BinopMatcher m(node);
-  if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
-    Int32BinopMatcher mleft(m.left().node());
-    int sh = m.right().Value();
-    int mb;
-    int me;
-    if (mleft.right().HasValue() &&
-        IsContiguousMask32(mleft.right().Value() << sh, &mb, &me)) {
-      // Adjust the mask such that it doesn't include any rotated bits.
-      if (me < sh) me = sh;
-      if (mb >= me) {
-        Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
-             g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
-             g.TempImmediate(mb), g.TempImmediate(me));
-        return;
-      }
-    }
-  }
-  VisitRRO(this, kS390_ShiftLeft32, node, kShift32Imm);
+  VisitBin32op(this, node, kS390_ShiftLeft32, ShiftOperandMode);
 }
 
 #if V8_TARGET_ARCH_S390X
@@ -747,32 +1033,12 @@
       }
     }
   }
-  VisitRRO(this, kS390_ShiftLeft64, node, kShift64Imm);
+  VisitRRO(this, kS390_ShiftLeft64, node, OperandMode::kShift64Imm);
 }
 #endif
 
 void InstructionSelector::VisitWord32Shr(Node* node) {
-  S390OperandGenerator g(this);
-  Int32BinopMatcher m(node);
-  if (m.left().IsWord32And() && m.right().IsInRange(0, 31)) {
-    Int32BinopMatcher mleft(m.left().node());
-    int sh = m.right().Value();
-    int mb;
-    int me;
-    if (mleft.right().HasValue() &&
-        IsContiguousMask32((uint32_t)(mleft.right().Value()) >> sh, &mb, &me)) {
-      // Adjust the mask such that it doesn't include any rotated bits.
-      if (mb > 31 - sh) mb = 31 - sh;
-      sh = (32 - sh) & 0x1f;
-      if (mb >= me) {
-        Emit(kS390_RotLeftAndMask32, g.DefineAsRegister(node),
-             g.UseRegister(mleft.left().node()), g.TempImmediate(sh),
-             g.TempImmediate(mb), g.TempImmediate(me));
-        return;
-      }
-    }
-  }
-  VisitRRO(this, kS390_ShiftRight32, node, kShift32Imm);
+  VisitBin32op(this, node, kS390_ShiftRight32, ShiftOperandMode);
 }
 
 #if V8_TARGET_ARCH_S390X
@@ -811,7 +1077,7 @@
       }
     }
   }
-  VisitRRO(this, kS390_ShiftRight64, node, kShift64Imm);
+  VisitRRO(this, kS390_ShiftRight64, node, OperandMode::kShift64Imm);
 }
 #endif
 
@@ -822,16 +1088,20 @@
   if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
     Int32BinopMatcher mleft(m.left().node());
     if (mleft.right().Is(16) && m.right().Is(16)) {
-      Emit(kS390_ExtendSignWord16, g.DefineAsRegister(node),
-           g.UseRegister(mleft.left().node()));
+      bool doZeroExt = !ZeroExtendsWord32ToWord64(mleft.left().node());
+      Emit(kS390_ExtendSignWord16,
+           doZeroExt ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node),
+           g.UseRegister(mleft.left().node()), g.TempImmediate(doZeroExt));
       return;
     } else if (mleft.right().Is(24) && m.right().Is(24)) {
-      Emit(kS390_ExtendSignWord8, g.DefineAsRegister(node),
-           g.UseRegister(mleft.left().node()));
+      bool doZeroExt = !ZeroExtendsWord32ToWord64(mleft.left().node());
+      Emit(kS390_ExtendSignWord8,
+           doZeroExt ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node),
+           g.UseRegister(mleft.left().node()), g.TempImmediate(doZeroExt));
       return;
     }
   }
-  VisitRRO(this, kS390_ShiftRightArith32, node, kShift32Imm);
+  VisitBin32op(this, node, kS390_ShiftRightArith32, ShiftOperandMode);
 }
 
 #if !V8_TARGET_ARCH_S390X
@@ -857,7 +1127,7 @@
     // instruction.
     selector->Emit(opcode2, g.DefineSameAsFirst(node),
                    g.UseRegister(node->InputAt(0)),
-                   g.UseRegister(node->InputAt(2)));
+                   g.UseRegister(node->InputAt(2)), g.TempImmediate(0));
   }
 }
 
@@ -887,7 +1157,8 @@
     // The high word of the result is not used, so we emit the standard 32 bit
     // instruction.
     Emit(kS390_Mul32, g.DefineSameAsFirst(node),
-         g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(2)));
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(2)),
+         g.TempImmediate(0));
   }
 }
 
@@ -943,24 +1214,25 @@
 
 #if V8_TARGET_ARCH_S390X
 void InstructionSelector::VisitWord64Sar(Node* node) {
-  VisitRRO(this, kS390_ShiftRightArith64, node, kShift64Imm);
+  VisitRRO(this, kS390_ShiftRightArith64, node, OperandMode::kShift64Imm);
 }
 #endif
 
 void InstructionSelector::VisitWord32Ror(Node* node) {
-  VisitRRO(this, kS390_RotRight32, node, kShift32Imm);
+  // TODO(john): match dst = ror(src1, src2 + imm)
+  VisitBin32op(this, node, kS390_RotRight32,
+               OperandMode::kAllowRI | OperandMode::kAllowRRR |
+                   OperandMode::kAllowRRI | OperandMode::kShift32Imm);
 }
 
 #if V8_TARGET_ARCH_S390X
 void InstructionSelector::VisitWord64Ror(Node* node) {
-  VisitRRO(this, kS390_RotRight64, node, kShift64Imm);
+  VisitRRO(this, kS390_RotRight64, node, OperandMode::kShift64Imm);
 }
 #endif
 
 void InstructionSelector::VisitWord32Clz(Node* node) {
-  S390OperandGenerator g(this);
-  Emit(kS390_Cntlz32, g.DefineAsRegister(node),
-       g.UseRegister(node->InputAt(0)));
+  VisitRR(this, kS390_Cntlz32, node);
 }
 
 #if V8_TARGET_ARCH_S390X
@@ -973,8 +1245,8 @@
 
 void InstructionSelector::VisitWord32Popcnt(Node* node) {
   S390OperandGenerator g(this);
-  Emit(kS390_Popcnt32, g.DefineAsRegister(node),
-       g.UseRegister(node->InputAt(0)));
+  Node* value = node->InputAt(0);
+  Emit(kS390_Popcnt32, g.DefineAsRegister(node), g.UseRegister(value));
 }
 
 #if V8_TARGET_ARCH_S390X
@@ -1023,12 +1295,13 @@
 }
 
 void InstructionSelector::VisitInt32Add(Node* node) {
-  VisitBinop<Int32BinopMatcher>(this, node, kS390_Add32, kInt32Imm);
+  VisitBin32op(this, node, kS390_Add32, AddOperandMode);
 }
 
 #if V8_TARGET_ARCH_S390X
 void InstructionSelector::VisitInt64Add(Node* node) {
-  VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, kInt32Imm);
+  VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64,
+                                OperandMode::kInt32Imm);
 }
 #endif
 
@@ -1036,10 +1309,12 @@
   S390OperandGenerator g(this);
   Int32BinopMatcher m(node);
   if (m.left().Is(0)) {
-    Emit(kS390_Neg32, g.DefineAsRegister(node),
-         g.UseRegister(m.right().node()));
+    Node* right = m.right().node();
+    bool doZeroExt = ZeroExtendsWord32ToWord64(right);
+    Emit(kS390_Neg32, g.DefineAsRegister(node), g.UseRegister(right),
+         g.TempImmediate(doZeroExt));
   } else {
-    VisitBinop<Int32BinopMatcher>(this, node, kS390_Sub32, kInt32Imm_Negate);
+    VisitBin32op(this, node, kS390_Sub32, SubOperandMode);
   }
 }
 
@@ -1051,7 +1326,8 @@
     Emit(kS390_Neg64, g.DefineAsRegister(node),
          g.UseRegister(m.right().node()));
   } else {
-    VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64, kInt32Imm_Negate);
+    VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64,
+                                  OperandMode::kInt32Imm_Negate);
   }
 }
 #endif
@@ -1061,35 +1337,14 @@
 void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
                   InstructionOperand left, InstructionOperand right,
                   FlagsContinuation* cont);
-void EmitInt32MulWithOverflow(InstructionSelector* selector, Node* node,
-                              FlagsContinuation* cont) {
-  S390OperandGenerator g(selector);
-  Int32BinopMatcher m(node);
-  InstructionOperand result_operand = g.DefineAsRegister(node);
-  InstructionOperand high32_operand = g.TempRegister();
-  InstructionOperand temp_operand = g.TempRegister();
-  {
-    InstructionOperand outputs[] = {result_operand, high32_operand};
-    InstructionOperand inputs[] = {g.UseRegister(m.left().node()),
-                                   g.UseRegister(m.right().node())};
-    selector->Emit(kS390_Mul32WithHigh32, 2, outputs, 2, inputs);
-  }
-  {
-    InstructionOperand shift_31 = g.UseImmediate(31);
-    InstructionOperand outputs[] = {temp_operand};
-    InstructionOperand inputs[] = {result_operand, shift_31};
-    selector->Emit(kS390_ShiftRightArith32, 1, outputs, 2, inputs);
-  }
 
-  VisitCompare(selector, kS390_Cmp32, high32_operand, temp_operand, cont);
-}
-
+#if V8_TARGET_ARCH_S390X
 void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
   S390OperandGenerator g(selector);
   Int32BinopMatcher m(node);
   Node* left = m.left().node();
   Node* right = m.right().node();
-  if (g.CanBeImmediate(right, kInt32Imm)) {
+  if (g.CanBeImmediate(right, OperandMode::kInt32Imm)) {
     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
                    g.UseImmediate(right));
   } else {
@@ -1100,17 +1355,18 @@
                    g.Use(right));
   }
 }
+#endif
 
 }  // namespace
 
 void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
     FlagsContinuation cont = FlagsContinuation::ForSet(kNotEqual, ovf);
-    return EmitInt32MulWithOverflow(this, node, &cont);
+    return VisitBin32op(this, node, kS390_Mul32WithOverflow,
+                        OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
+                        &cont);
   }
-  VisitMul(this, node, kS390_Mul32);
-  // FlagsContinuation cont;
-  // EmitInt32MulWithOverflow(this, node, &cont);
+  VisitBin32op(this, node, kS390_Mul32, MulOperandMode);
 }
 
 void InstructionSelector::VisitInt32Mul(Node* node) {
@@ -1118,14 +1374,20 @@
   Int32BinopMatcher m(node);
   Node* left = m.left().node();
   Node* right = m.right().node();
-  if (g.CanBeImmediate(right, kInt32Imm) &&
+  if (g.CanBeImmediate(right, OperandMode::kInt32Imm) &&
       base::bits::IsPowerOfTwo32(g.GetImmediate(right))) {
     int power = 31 - base::bits::CountLeadingZeros32(g.GetImmediate(right));
-    Emit(kS390_ShiftLeft32, g.DefineSameAsFirst(node), g.UseRegister(left),
-         g.UseImmediate(power));
+    bool doZeroExt = !ZeroExtendsWord32ToWord64(left);
+    InstructionOperand dst =
+        (doZeroExt && CpuFeatures::IsSupported(DISTINCT_OPS))
+            ? g.DefineAsRegister(node)
+            : g.DefineSameAsFirst(node);
+
+    Emit(kS390_ShiftLeft32, dst, g.UseRegister(left), g.UseImmediate(power),
+         g.TempImmediate(doZeroExt));
     return;
   }
-  VisitMul(this, node, kS390_Mul32);
+  VisitBin32op(this, node, kS390_Mul32, MulOperandMode);
 }
 
 #if V8_TARGET_ARCH_S390X
@@ -1134,7 +1396,7 @@
   Int64BinopMatcher m(node);
   Node* left = m.left().node();
   Node* right = m.right().node();
-  if (g.CanBeImmediate(right, kInt32Imm) &&
+  if (g.CanBeImmediate(right, OperandMode::kInt32Imm) &&
       base::bits::IsPowerOfTwo64(g.GetImmediate(right))) {
     int power = 63 - base::bits::CountLeadingZeros64(g.GetImmediate(right));
     Emit(kS390_ShiftLeft64, g.DefineSameAsFirst(node), g.UseRegister(left),
@@ -1146,31 +1408,18 @@
 #endif
 
 void InstructionSelector::VisitInt32MulHigh(Node* node) {
-  S390OperandGenerator g(this);
-  Int32BinopMatcher m(node);
-  Node* left = m.left().node();
-  Node* right = m.right().node();
-  if (g.CanBeBetterLeftOperand(right)) {
-    std::swap(left, right);
-  }
-  Emit(kS390_MulHigh32, g.DefineAsRegister(node), g.UseRegister(left),
-       g.Use(right));
+  VisitBin32op(this, node, kS390_MulHigh32,
+               OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps);
 }
 
 void InstructionSelector::VisitUint32MulHigh(Node* node) {
-  S390OperandGenerator g(this);
-  Int32BinopMatcher m(node);
-  Node* left = m.left().node();
-  Node* right = m.right().node();
-  if (g.CanBeBetterLeftOperand(right)) {
-    std::swap(left, right);
-  }
-  Emit(kS390_MulHighU32, g.DefineAsRegister(node), g.UseRegister(left),
-       g.Use(right));
+  VisitBin32op(this, node, kS390_MulHighU32,
+               OperandMode::kAllowRRM | OperandMode::kAllowRRR);
 }
 
 void InstructionSelector::VisitInt32Div(Node* node) {
-  VisitRRR(this, kS390_Div32, node);
+  VisitBin32op(this, node, kS390_Div32,
+               OperandMode::kAllowRRM | OperandMode::kAllowRRR);
 }
 
 #if V8_TARGET_ARCH_S390X
@@ -1180,7 +1429,8 @@
 #endif
 
 void InstructionSelector::VisitUint32Div(Node* node) {
-  VisitRRR(this, kS390_DivU32, node);
+  VisitBin32op(this, node, kS390_DivU32,
+               OperandMode::kAllowRRM | OperandMode::kAllowRRR);
 }
 
 #if V8_TARGET_ARCH_S390X
@@ -1190,7 +1440,8 @@
 #endif
 
 void InstructionSelector::VisitInt32Mod(Node* node) {
-  VisitRRR(this, kS390_Mod32, node);
+  VisitBin32op(this, node, kS390_Mod32,
+               OperandMode::kAllowRRM | OperandMode::kAllowRRR);
 }
 
 #if V8_TARGET_ARCH_S390X
@@ -1200,7 +1451,8 @@
 #endif
 
 void InstructionSelector::VisitUint32Mod(Node* node) {
-  VisitRRR(this, kS390_ModU32, node);
+  VisitBin32op(this, node, kS390_ModU32,
+               OperandMode::kAllowRRM | OperandMode::kAllowRRR);
 }
 
 #if V8_TARGET_ARCH_S390X
@@ -1264,7 +1516,13 @@
 }
 
 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
-  // TODO(mbrandy): inspect input to see if nop is appropriate.
+  S390OperandGenerator g(this);
+  Node* value = node->InputAt(0);
+  if (ZeroExtendsWord32ToWord64(value)) {
+    // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
+    // zero-extension is a no-op.
+    return EmitIdentity(node);
+  }
   VisitRR(this, kS390_Uint32ToUint64, node);
 }
 #endif
@@ -1470,46 +1728,46 @@
 }
 
 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+  OperandModes mode = AddOperandMode;
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
-    return VisitBinop<Int32BinopMatcher>(this, node, kS390_Add32, kInt32Imm,
-                                         &cont);
+    return VisitBin32op(this, node, kS390_Add32, mode, &cont);
   }
   FlagsContinuation cont;
-  VisitBinop<Int32BinopMatcher>(this, node, kS390_Add32, kInt32Imm, &cont);
+  VisitBin32op(this, node, kS390_Add32, mode, &cont);
 }
 
 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+  OperandModes mode = SubOperandMode;
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
-    return VisitBinop<Int32BinopMatcher>(this, node, kS390_Sub32,
-                                         kInt32Imm_Negate, &cont);
+    return VisitBin32op(this, node, kS390_Sub32, mode, &cont);
   }
   FlagsContinuation cont;
-  VisitBinop<Int32BinopMatcher>(this, node, kS390_Sub32, kInt32Imm_Negate,
-                                &cont);
+  VisitBin32op(this, node, kS390_Sub32, mode, &cont);
 }
 
 #if V8_TARGET_ARCH_S390X
 void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
-    return VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, kInt32Imm,
-                                         &cont);
+    return VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64,
+                                         OperandMode::kInt32Imm, &cont);
   }
   FlagsContinuation cont;
-  VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, kInt32Imm, &cont);
+  VisitBinop<Int64BinopMatcher>(this, node, kS390_Add64, OperandMode::kInt32Imm,
+                                &cont);
 }
 
 void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
   if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
     FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
     return VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64,
-                                         kInt32Imm_Negate, &cont);
+                                         OperandMode::kInt32Imm_Negate, &cont);
   }
   FlagsContinuation cont;
-  VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64, kInt32Imm_Negate,
-                                &cont);
+  VisitBinop<Int64BinopMatcher>(this, node, kS390_Sub64,
+                                OperandMode::kInt32Imm_Negate, &cont);
 }
 #endif
 
@@ -1539,68 +1797,204 @@
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
-    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
-                             cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
+                             cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(opcode, g.NoOutput(), left, right,
+                   g.UseImmediate(cont->trap_id()));
   }
 }
 
+void VisitWordCompareZero(InstructionSelector* selector, Node* user,
+                          Node* value, InstructionCode opcode,
+                          FlagsContinuation* cont);
+
+void VisitLoadAndTest(InstructionSelector* selector, InstructionCode opcode,
+                      Node* node, Node* value, FlagsContinuation* cont,
+                      bool discard_output = false);
+
 // Shared routine for multiple word compare operations.
 void VisitWordCompare(InstructionSelector* selector, Node* node,
                       InstructionCode opcode, FlagsContinuation* cont,
-                      bool commutative, ImmediateMode immediate_mode) {
+                      OperandModes immediate_mode) {
   S390OperandGenerator g(selector);
   Node* left = node->InputAt(0);
   Node* right = node->InputAt(1);
 
-  // Match immediates on left or right side of comparison.
-  if (g.CanBeImmediate(right, immediate_mode)) {
-    VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
-                 cont);
-  } else if (g.CanBeImmediate(left, immediate_mode)) {
-    if (!commutative) cont->Commute();
-    VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
-                 cont);
+  DCHECK(IrOpcode::IsComparisonOpcode(node->opcode()) ||
+         node->opcode() == IrOpcode::kInt32Sub ||
+         node->opcode() == IrOpcode::kInt64Sub);
+
+  InstructionOperand inputs[8];
+  InstructionOperand outputs[1];
+  size_t input_count = 0;
+  size_t output_count = 0;
+
+  // If one of the two inputs is an immediate, make sure it's on the right, or
+  // if one of the two inputs is a memory operand, make sure it's on the left.
+  int effect_level = selector->GetEffectLevel(node);
+  if (cont->IsBranch()) {
+    effect_level = selector->GetEffectLevel(
+        cont->true_block()->PredecessorAt(0)->control_input());
+  }
+
+  if ((!g.CanBeImmediate(right, immediate_mode) &&
+       g.CanBeImmediate(left, immediate_mode)) ||
+      (!g.CanBeMemoryOperand(opcode, node, right, effect_level) &&
+       g.CanBeMemoryOperand(opcode, node, left, effect_level))) {
+    if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
+    std::swap(left, right);
+  }
+
+  // check if compare with 0
+  if (g.CanBeImmediate(right, immediate_mode) && g.GetImmediate(right) == 0) {
+    DCHECK(opcode == kS390_Cmp32 || opcode == kS390_Cmp64);
+    ArchOpcode load_and_test = (opcode == kS390_Cmp32)
+                                   ? kS390_LoadAndTestWord32
+                                   : kS390_LoadAndTestWord64;
+    return VisitLoadAndTest(selector, load_and_test, node, left, cont, true);
+  }
+
+  inputs[input_count++] = g.UseRegister(left);
+  if (g.CanBeMemoryOperand(opcode, node, right, effect_level)) {
+    // generate memory operand
+    AddressingMode addressing_mode = g.GetEffectiveAddressMemoryOperand(
+        right, inputs, &input_count, OpcodeImmMode(opcode));
+    opcode |= AddressingModeField::encode(addressing_mode);
+  } else if (g.CanBeImmediate(right, immediate_mode)) {
+    inputs[input_count++] = g.UseImmediate(right);
   } else {
-    VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
-                 cont);
+    inputs[input_count++] = g.UseAnyExceptImmediate(right);
+  }
+
+  opcode = cont->Encode(opcode);
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+  } else if (cont->IsSet()) {
+    outputs[output_count++] = g.DefineAsRegister(cont->result());
+  } else if (cont->IsTrap()) {
+    inputs[input_count++] = g.UseImmediate(cont->trap_id());
+  } else {
+    DCHECK(cont->IsDeoptimize());
+    // nothing to do
+  }
+
+  DCHECK(input_count <= 8 && output_count <= 1);
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
 }
 
 void VisitWord32Compare(InstructionSelector* selector, Node* node,
                         FlagsContinuation* cont) {
-  ImmediateMode mode = (CompareLogical(cont) ? kUint32Imm : kInt32Imm);
-  VisitWordCompare(selector, node, kS390_Cmp32, cont, false, mode);
+  OperandModes mode =
+      (CompareLogical(cont) ? OperandMode::kUint32Imm : OperandMode::kInt32Imm);
+  VisitWordCompare(selector, node, kS390_Cmp32, cont, mode);
 }
 
 #if V8_TARGET_ARCH_S390X
 void VisitWord64Compare(InstructionSelector* selector, Node* node,
                         FlagsContinuation* cont) {
-  ImmediateMode mode = (CompareLogical(cont) ? kUint32Imm : kUint32Imm);
-  VisitWordCompare(selector, node, kS390_Cmp64, cont, false, mode);
+  OperandModes mode =
+      (CompareLogical(cont) ? OperandMode::kUint32Imm : OperandMode::kInt32Imm);
+  VisitWordCompare(selector, node, kS390_Cmp64, cont, mode);
 }
 #endif
 
 // Shared routine for multiple float32 compare operations.
 void VisitFloat32Compare(InstructionSelector* selector, Node* node,
                          FlagsContinuation* cont) {
-  S390OperandGenerator g(selector);
-  Node* left = node->InputAt(0);
-  Node* right = node->InputAt(1);
-  VisitCompare(selector, kS390_CmpFloat, g.UseRegister(left),
-               g.UseRegister(right), cont);
+  VisitWordCompare(selector, node, kS390_CmpFloat, cont, OperandMode::kNone);
 }
 
 // Shared routine for multiple float64 compare operations.
 void VisitFloat64Compare(InstructionSelector* selector, Node* node,
                          FlagsContinuation* cont) {
+  VisitWordCompare(selector, node, kS390_CmpDouble, cont, OperandMode::kNone);
+}
+
+void VisitTestUnderMask(InstructionSelector* selector, Node* node,
+                        FlagsContinuation* cont) {
+  DCHECK(node->opcode() == IrOpcode::kWord32And ||
+         node->opcode() == IrOpcode::kWord64And);
+  ArchOpcode opcode =
+      (node->opcode() == IrOpcode::kWord32And) ? kS390_Tst32 : kS390_Tst64;
   S390OperandGenerator g(selector);
   Node* left = node->InputAt(0);
   Node* right = node->InputAt(1);
-  VisitCompare(selector, kS390_CmpDouble, g.UseRegister(left),
-               g.UseRegister(right), cont);
+  if (!g.CanBeImmediate(right, OperandMode::kUint32Imm) &&
+      g.CanBeImmediate(left, OperandMode::kUint32Imm)) {
+    std::swap(left, right);
+  }
+  VisitCompare(selector, opcode, g.UseRegister(left),
+               g.UseOperand(right, OperandMode::kUint32Imm), cont);
+}
+
+void VisitLoadAndTest(InstructionSelector* selector, InstructionCode opcode,
+                      Node* node, Node* value, FlagsContinuation* cont,
+                      bool discard_output) {
+  static_assert(kS390_LoadAndTestFloat64 - kS390_LoadAndTestWord32 == 3,
+                "LoadAndTest Opcode shouldn't contain other opcodes.");
+
+  // TODO(john.yan): Add support for Float32/Float64.
+  DCHECK(opcode >= kS390_LoadAndTestWord32 ||
+         opcode <= kS390_LoadAndTestWord64);
+
+  S390OperandGenerator g(selector);
+  InstructionOperand inputs[8];
+  InstructionOperand outputs[2];
+  size_t input_count = 0;
+  size_t output_count = 0;
+  bool use_value = false;
+
+  int effect_level = selector->GetEffectLevel(node);
+  if (cont->IsBranch()) {
+    effect_level = selector->GetEffectLevel(
+        cont->true_block()->PredecessorAt(0)->control_input());
+  }
+
+  if (g.CanBeMemoryOperand(opcode, node, value, effect_level)) {
+    // generate memory operand
+    AddressingMode addressing_mode =
+        g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count);
+    opcode |= AddressingModeField::encode(addressing_mode);
+  } else {
+    inputs[input_count++] = g.UseAnyExceptImmediate(value);
+    use_value = true;
+  }
+
+  if (!discard_output && !use_value) {
+    outputs[output_count++] = g.DefineAsRegister(value);
+  }
+
+  opcode = cont->Encode(opcode);
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+  } else if (cont->IsSet()) {
+    outputs[output_count++] = g.DefineAsRegister(cont->result());
+  } else if (cont->IsTrap()) {
+    inputs[input_count++] = g.UseImmediate(cont->trap_id());
+  } else {
+    DCHECK(cont->IsDeoptimize());
+    // nothing to do
+  }
+
+  DCHECK(input_count <= 8 && output_count <= 2);
+  opcode = cont->Encode(opcode);
+  if (cont->IsDeoptimize()) {
+    selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else {
+    selector->Emit(opcode, output_count, outputs, input_count, inputs);
+  }
 }
 
 // Shared routine for word comparisons against zero.
@@ -1618,11 +2012,29 @@
     cont->Negate();
   }
 
+  FlagsCondition fc = cont->condition();
   if (selector->CanCover(user, value)) {
     switch (value->opcode()) {
-      case IrOpcode::kWord32Equal:
+      case IrOpcode::kWord32Equal: {
         cont->OverwriteAndNegateIfEqual(kEqual);
+        Int32BinopMatcher m(value);
+        if (m.right().Is(0)) {
+          // Try to combine the branch with a comparison.
+          Node* const user = m.node();
+          Node* const value = m.left().node();
+          if (selector->CanCover(user, value)) {
+            switch (value->opcode()) {
+              case IrOpcode::kInt32Sub:
+                return VisitWord32Compare(selector, value, cont);
+              case IrOpcode::kWord32And:
+                return VisitTestUnderMask(selector, value, cont);
+              default:
+                break;
+            }
+          }
+        }
         return VisitWord32Compare(selector, value, cont);
+      }
       case IrOpcode::kInt32LessThan:
         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
         return VisitWord32Compare(selector, value, cont);
@@ -1636,9 +2048,26 @@
         cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
         return VisitWord32Compare(selector, value, cont);
 #if V8_TARGET_ARCH_S390X
-      case IrOpcode::kWord64Equal:
+      case IrOpcode::kWord64Equal: {
         cont->OverwriteAndNegateIfEqual(kEqual);
+        Int64BinopMatcher m(value);
+        if (m.right().Is(0)) {
+          // Try to combine the branch with a comparison.
+          Node* const user = m.node();
+          Node* const value = m.left().node();
+          if (selector->CanCover(user, value)) {
+            switch (value->opcode()) {
+              case IrOpcode::kInt64Sub:
+                return VisitWord64Compare(selector, value, cont);
+              case IrOpcode::kWord64And:
+                return VisitTestUnderMask(selector, value, cont);
+              default:
+                break;
+            }
+          }
+        }
         return VisitWord64Compare(selector, value, cont);
+      }
       case IrOpcode::kInt64LessThan:
         cont->OverwriteAndNegateIfEqual(kSignedLessThan);
         return VisitWord64Compare(selector, value, cont);
@@ -1685,24 +2114,28 @@
             switch (node->opcode()) {
               case IrOpcode::kInt32AddWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kOverflow);
-                return VisitBinop<Int32BinopMatcher>(
-                    selector, node, kS390_Add32, kInt32Imm, cont);
+                return VisitBin32op(selector, node, kS390_Add32, AddOperandMode,
+                                    cont);
               case IrOpcode::kInt32SubWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kOverflow);
-                return VisitBinop<Int32BinopMatcher>(
-                    selector, node, kS390_Sub32, kInt32Imm_Negate, cont);
+                return VisitBin32op(selector, node, kS390_Sub32, SubOperandMode,
+                                    cont);
               case IrOpcode::kInt32MulWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kNotEqual);
-                return EmitInt32MulWithOverflow(selector, node, cont);
+                return VisitBin32op(
+                    selector, node, kS390_Mul32WithOverflow,
+                    OperandMode::kInt32Imm | OperandMode::kAllowDistinctOps,
+                    cont);
 #if V8_TARGET_ARCH_S390X
               case IrOpcode::kInt64AddWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kOverflow);
                 return VisitBinop<Int64BinopMatcher>(
-                    selector, node, kS390_Add64, kInt32Imm, cont);
+                    selector, node, kS390_Add64, OperandMode::kInt32Imm, cont);
               case IrOpcode::kInt64SubWithOverflow:
                 cont->OverwriteAndNegateIfEqual(kOverflow);
                 return VisitBinop<Int64BinopMatcher>(
-                    selector, node, kS390_Sub64, kInt32Imm_Negate, cont);
+                    selector, node, kS390_Sub64, OperandMode::kInt32Imm_Negate,
+                    cont);
 #endif
               default:
                 break;
@@ -1711,53 +2144,77 @@
         }
         break;
       case IrOpcode::kInt32Sub:
-        return VisitWord32Compare(selector, value, cont);
+        if (fc == kNotEqual || fc == kEqual)
+          return VisitWord32Compare(selector, value, cont);
+        break;
       case IrOpcode::kWord32And:
-        return VisitWordCompare(selector, value, kS390_Tst32, cont, true,
-                                kUint32Imm);
-// TODO(mbrandy): Handle?
-// case IrOpcode::kInt32Add:
-// case IrOpcode::kWord32Or:
-// case IrOpcode::kWord32Xor:
-// case IrOpcode::kWord32Sar:
-// case IrOpcode::kWord32Shl:
-// case IrOpcode::kWord32Shr:
-// case IrOpcode::kWord32Ror:
+        return VisitTestUnderMask(selector, value, cont);
+      case IrOpcode::kLoad: {
+        LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+        switch (load_rep.representation()) {
+          case MachineRepresentation::kWord32:
+            if (opcode == kS390_LoadAndTestWord32) {
+              return VisitLoadAndTest(selector, opcode, user, value, cont);
+            }
+          default:
+            break;
+        }
+        break;
+      }
+      case IrOpcode::kInt32Add:
+        // can't handle overflow case.
+        break;
+      case IrOpcode::kWord32Or:
+        return VisitBin32op(selector, value, kS390_Or32, OrOperandMode, cont);
+      case IrOpcode::kWord32Xor:
+        return VisitBin32op(selector, value, kS390_Xor32, XorOperandMode, cont);
+      case IrOpcode::kWord32Sar:
+      case IrOpcode::kWord32Shl:
+      case IrOpcode::kWord32Shr:
+      case IrOpcode::kWord32Ror:
+        // doesn't generate cc, so ignore.
+        break;
 #if V8_TARGET_ARCH_S390X
       case IrOpcode::kInt64Sub:
-        return VisitWord64Compare(selector, value, cont);
+        if (fc == kNotEqual || fc == kEqual)
+          return VisitWord64Compare(selector, value, cont);
+        break;
       case IrOpcode::kWord64And:
-        return VisitWordCompare(selector, value, kS390_Tst64, cont, true,
-                                kUint32Imm);
-// TODO(mbrandy): Handle?
-// case IrOpcode::kInt64Add:
-// case IrOpcode::kWord64Or:
-// case IrOpcode::kWord64Xor:
-// case IrOpcode::kWord64Sar:
-// case IrOpcode::kWord64Shl:
-// case IrOpcode::kWord64Shr:
-// case IrOpcode::kWord64Ror:
+        return VisitTestUnderMask(selector, value, cont);
+      case IrOpcode::kInt64Add:
+        // can't handle overflow case.
+        break;
+      case IrOpcode::kWord64Or:
+        // TODO(john.yan): need to handle
+        break;
+      case IrOpcode::kWord64Xor:
+        // TODO(john.yan): need to handle
+        break;
+      case IrOpcode::kWord64Sar:
+      case IrOpcode::kWord64Shl:
+      case IrOpcode::kWord64Shr:
+      case IrOpcode::kWord64Ror:
+        // doesn't generate cc, so ignore
+        break;
 #endif
       default:
         break;
     }
   }
 
-  // Branch could not be combined with a compare, emit compare against 0.
-  S390OperandGenerator g(selector);
-  VisitCompare(selector, opcode, g.UseRegister(value), g.TempImmediate(0),
-               cont);
+  // Branch could not be combined with a compare, emit LoadAndTest
+  VisitLoadAndTest(selector, opcode, user, value, cont, true);
 }
 
 void VisitWord32CompareZero(InstructionSelector* selector, Node* user,
                             Node* value, FlagsContinuation* cont) {
-  VisitWordCompareZero(selector, user, value, kS390_Cmp32, cont);
+  VisitWordCompareZero(selector, user, value, kS390_LoadAndTestWord32, cont);
 }
 
 #if V8_TARGET_ARCH_S390X
 void VisitWord64CompareZero(InstructionSelector* selector, Node* user,
                             Node* value, FlagsContinuation* cont) {
-  VisitWordCompareZero(selector, user, value, kS390_Cmp64, cont);
+  VisitWordCompareZero(selector, user, value, kS390_LoadAndTestWord64, cont);
 }
 #endif
 
@@ -1770,14 +2227,29 @@
 }
 
 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kNotEqual, p.kind(), p.reason(), node->InputAt(1));
   VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
 }
 
 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kEqual, p.kind(), p.reason(), node->InputAt(1));
+  VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+  VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+                                          Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
   VisitWord32CompareZero(this, node, node->InputAt(0), &cont);
 }
 
@@ -1797,9 +2269,14 @@
     InstructionOperand index_operand = value_operand;
     if (sw.min_value) {
       index_operand = g.TempRegister();
-      Emit(kS390_Sub32, index_operand, value_operand,
-           g.TempImmediate(sw.min_value));
+      Emit(kS390_Lay | AddressingModeField::encode(kMode_MRI), index_operand,
+           value_operand, g.TempImmediate(-sw.min_value));
     }
+#if V8_TARGET_ARCH_S390X
+    InstructionOperand index_operand_zero_ext = g.TempRegister();
+    Emit(kS390_Uint32ToUint64, index_operand_zero_ext, index_operand);
+    index_operand = index_operand_zero_ext;
+#endif
     // Generate a table lookup.
     return EmitTableSwitch(sw, index_operand);
   }
diff --git a/src/compiler/schedule.cc b/src/compiler/schedule.cc
index eb3dda8..dcc84b3 100644
--- a/src/compiler/schedule.cc
+++ b/src/compiler/schedule.cc
@@ -407,7 +407,7 @@
       if (!block->deferred()) {
         bool deferred = block->PredecessorCount() > 0;
         for (auto pred : block->predecessors()) {
-          if (!pred->deferred()) {
+          if (!pred->deferred() && (pred->rpo_number() < block->rpo_number())) {
             deferred = false;
           }
         }
diff --git a/src/compiler/simd-scalar-lowering.cc b/src/compiler/simd-scalar-lowering.cc
index c5a94b4..19ffe93 100644
--- a/src/compiler/simd-scalar-lowering.cc
+++ b/src/compiler/simd-scalar-lowering.cc
@@ -9,6 +9,7 @@
 #include "src/compiler/node-properties.h"
 
 #include "src/compiler/node.h"
+#include "src/objects-inl.h"
 #include "src/wasm/wasm-module.h"
 
 namespace v8 {
@@ -58,6 +59,9 @@
           // that they are processed after all other nodes.
           PreparePhiReplacement(input);
           stack_.push_front({input, 0});
+        } else if (input->opcode() == IrOpcode::kEffectPhi ||
+                   input->opcode() == IrOpcode::kLoop) {
+          stack_.push_front({input, 0});
         } else {
           stack_.push_back({input, 0});
         }
@@ -70,12 +74,14 @@
 #define FOREACH_INT32X4_OPCODE(V) \
   V(Int32x4Add)                   \
   V(Int32x4ExtractLane)           \
-  V(CreateInt32x4)
+  V(CreateInt32x4)                \
+  V(Int32x4ReplaceLane)
 
 #define FOREACH_FLOAT32X4_OPCODE(V) \
   V(Float32x4Add)                   \
   V(Float32x4ExtractLane)           \
-  V(CreateFloat32x4)
+  V(CreateFloat32x4)                \
+  V(Float32x4ReplaceLane)
 
 void SimdScalarLowering::SetLoweredType(Node* node, Node* output) {
   switch (node->opcode()) {
@@ -102,7 +108,7 @@
   // In function calls, the simd128 types are passed as 4 Int32 types. The
   // parameters are typecast to the types as needed for various operations.
   int result = old_index;
-  for (int i = 0; i < old_index; i++) {
+  for (int i = 0; i < old_index; ++i) {
     if (signature->GetParam(i) == MachineRepresentation::kSimd128) {
       result += 3;
     }
@@ -123,7 +129,7 @@
 static int GetReturnCountAfterLowering(
     Signature<MachineRepresentation>* signature) {
   int result = static_cast<int>(signature->return_count());
-  for (int i = 0; i < static_cast<int>(signature->return_count()); i++) {
+  for (int i = 0; i < static_cast<int>(signature->return_count()); ++i) {
     if (signature->GetReturn(i) == MachineRepresentation::kSimd128) {
       result += 3;
     }
@@ -131,6 +137,100 @@
   return result;
 }
 
+void SimdScalarLowering::GetIndexNodes(Node* index, Node** new_indices) {
+  new_indices[0] = index;
+  for (size_t i = 1; i < kMaxLanes; ++i) {
+    new_indices[i] = graph()->NewNode(machine()->Int32Add(), index,
+                                      graph()->NewNode(common()->Int32Constant(
+                                          static_cast<int>(i) * kLaneWidth)));
+  }
+}
+
+void SimdScalarLowering::LowerLoadOp(MachineRepresentation rep, Node* node,
+                                     const Operator* load_op) {
+  if (rep == MachineRepresentation::kSimd128) {
+    Node* base = node->InputAt(0);
+    Node* index = node->InputAt(1);
+    Node* indices[kMaxLanes];
+    GetIndexNodes(index, indices);
+    Node* rep_nodes[kMaxLanes];
+    rep_nodes[0] = node;
+    NodeProperties::ChangeOp(rep_nodes[0], load_op);
+    if (node->InputCount() > 2) {
+      DCHECK(node->InputCount() > 3);
+      Node* effect_input = node->InputAt(2);
+      Node* control_input = node->InputAt(3);
+      rep_nodes[3] = graph()->NewNode(load_op, base, indices[3], effect_input,
+                                      control_input);
+      rep_nodes[2] = graph()->NewNode(load_op, base, indices[2], rep_nodes[3],
+                                      control_input);
+      rep_nodes[1] = graph()->NewNode(load_op, base, indices[1], rep_nodes[2],
+                                      control_input);
+      rep_nodes[0]->ReplaceInput(2, rep_nodes[1]);
+    } else {
+      for (size_t i = 1; i < kMaxLanes; ++i) {
+        rep_nodes[i] = graph()->NewNode(load_op, base, indices[i]);
+      }
+    }
+    ReplaceNode(node, rep_nodes);
+  } else {
+    DefaultLowering(node);
+  }
+}
+
+void SimdScalarLowering::LowerStoreOp(MachineRepresentation rep, Node* node,
+                                      const Operator* store_op,
+                                      SimdType rep_type) {
+  if (rep == MachineRepresentation::kSimd128) {
+    Node* base = node->InputAt(0);
+    Node* index = node->InputAt(1);
+    Node* indices[kMaxLanes];
+    GetIndexNodes(index, indices);
+    DCHECK(node->InputCount() > 2);
+    Node* value = node->InputAt(2);
+    DCHECK(HasReplacement(1, value));
+    Node* rep_nodes[kMaxLanes];
+    rep_nodes[0] = node;
+    Node** rep_inputs = GetReplacementsWithType(value, rep_type);
+    rep_nodes[0]->ReplaceInput(2, rep_inputs[0]);
+    NodeProperties::ChangeOp(node, store_op);
+    if (node->InputCount() > 3) {
+      DCHECK(node->InputCount() > 4);
+      Node* effect_input = node->InputAt(3);
+      Node* control_input = node->InputAt(4);
+      rep_nodes[3] = graph()->NewNode(store_op, base, indices[3], rep_inputs[3],
+                                      effect_input, control_input);
+      rep_nodes[2] = graph()->NewNode(store_op, base, indices[2], rep_inputs[2],
+                                      rep_nodes[3], control_input);
+      rep_nodes[1] = graph()->NewNode(store_op, base, indices[1], rep_inputs[1],
+                                      rep_nodes[2], control_input);
+      rep_nodes[0]->ReplaceInput(3, rep_nodes[1]);
+
+    } else {
+      for (size_t i = 1; i < kMaxLanes; ++i) {
+        rep_nodes[i] =
+            graph()->NewNode(store_op, base, indices[i], rep_inputs[i]);
+      }
+    }
+
+    ReplaceNode(node, rep_nodes);
+  } else {
+    DefaultLowering(node);
+  }
+}
+
+void SimdScalarLowering::LowerBinaryOp(Node* node, SimdType rep_type,
+                                       const Operator* op) {
+  DCHECK(node->InputCount() == 2);
+  Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
+  Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type);
+  Node* rep_node[kMaxLanes];
+  for (int i = 0; i < kMaxLanes; ++i) {
+    rep_node[i] = graph()->NewNode(op, rep_left[i], rep_right[i]);
+  }
+  ReplaceNode(node, rep_node);
+}
+
 void SimdScalarLowering::LowerNode(Node* node) {
   SimdType rep_type = ReplacementType(node);
   switch (node->opcode()) {
@@ -159,13 +259,13 @@
           NodeProperties::ChangeOp(node, common()->Parameter(new_index));
 
           Node* new_node[kMaxLanes];
-          for (int i = 0; i < kMaxLanes; i++) {
+          for (int i = 0; i < kMaxLanes; ++i) {
             new_node[i] = nullptr;
           }
           new_node[0] = node;
           if (signature()->GetParam(old_index) ==
               MachineRepresentation::kSimd128) {
-            for (int i = 1; i < kMaxLanes; i++) {
+            for (int i = 1; i < kMaxLanes; ++i) {
               new_node[i] = graph()->NewNode(common()->Parameter(new_index + i),
                                              graph()->start());
             }
@@ -175,6 +275,57 @@
       }
       break;
     }
+    case IrOpcode::kLoad: {
+      MachineRepresentation rep =
+          LoadRepresentationOf(node->op()).representation();
+      const Operator* load_op;
+      if (rep_type == SimdType::kInt32) {
+        load_op = machine()->Load(MachineType::Int32());
+      } else if (rep_type == SimdType::kFloat32) {
+        load_op = machine()->Load(MachineType::Float32());
+      }
+      LowerLoadOp(rep, node, load_op);
+      break;
+    }
+    case IrOpcode::kUnalignedLoad: {
+      MachineRepresentation rep =
+          UnalignedLoadRepresentationOf(node->op()).representation();
+      const Operator* load_op;
+      if (rep_type == SimdType::kInt32) {
+        load_op = machine()->UnalignedLoad(MachineType::Int32());
+      } else if (rep_type == SimdType::kFloat32) {
+        load_op = machine()->UnalignedLoad(MachineType::Float32());
+      }
+      LowerLoadOp(rep, node, load_op);
+      break;
+    }
+    case IrOpcode::kStore: {
+      MachineRepresentation rep =
+          StoreRepresentationOf(node->op()).representation();
+      WriteBarrierKind write_barrier_kind =
+          StoreRepresentationOf(node->op()).write_barrier_kind();
+      const Operator* store_op;
+      if (rep_type == SimdType::kInt32) {
+        store_op = machine()->Store(StoreRepresentation(
+            MachineRepresentation::kWord32, write_barrier_kind));
+      } else {
+        store_op = machine()->Store(StoreRepresentation(
+            MachineRepresentation::kFloat32, write_barrier_kind));
+      }
+      LowerStoreOp(rep, node, store_op, rep_type);
+      break;
+    }
+    case IrOpcode::kUnalignedStore: {
+      MachineRepresentation rep = UnalignedStoreRepresentationOf(node->op());
+      const Operator* store_op;
+      if (rep_type == SimdType::kInt32) {
+        store_op = machine()->UnalignedStore(MachineRepresentation::kWord32);
+      } else {
+        store_op = machine()->UnalignedStore(MachineRepresentation::kFloat32);
+      }
+      LowerStoreOp(rep, node, store_op, rep_type);
+      break;
+    }
     case IrOpcode::kReturn: {
       DefaultLowering(node);
       int new_return_count = GetReturnCountAfterLowering(signature());
@@ -200,7 +351,7 @@
           descriptor->GetReturnType(0) == MachineType::Simd128()) {
         // We access the additional return values through projections.
         Node* rep_node[kMaxLanes];
-        for (int i = 0; i < kMaxLanes; i++) {
+        for (int i = 0; i < kMaxLanes; ++i) {
           rep_node[i] =
               graph()->NewNode(common()->Projection(i), node, graph()->start());
         }
@@ -214,7 +365,7 @@
         // The replacement nodes have already been created, we only have to
         // replace placeholder nodes.
         Node** rep_node = GetReplacements(node);
-        for (int i = 0; i < node->op()->ValueInputCount(); i++) {
+        for (int i = 0; i < node->op()->ValueInputCount(); ++i) {
           Node** rep_input =
               GetReplacementsWithType(node->InputAt(i), rep_type);
           for (int j = 0; j < kMaxLanes; j++) {
@@ -226,75 +377,51 @@
       }
       break;
     }
-
     case IrOpcode::kInt32x4Add: {
-      DCHECK(node->InputCount() == 2);
-      Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
-      Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type);
-      Node* rep_node[kMaxLanes];
-      for (int i = 0; i < kMaxLanes; i++) {
-        rep_node[i] =
-            graph()->NewNode(machine()->Int32Add(), rep_left[i], rep_right[i]);
-      }
-      ReplaceNode(node, rep_node);
+      LowerBinaryOp(node, rep_type, machine()->Int32Add());
       break;
     }
-
-    case IrOpcode::kCreateInt32x4: {
-      Node* rep_node[kMaxLanes];
-      for (int i = 0; i < kMaxLanes; i++) {
-        DCHECK(!HasReplacement(1, node->InputAt(i)));
-        rep_node[i] = node->InputAt(i);
-      }
-      ReplaceNode(node, rep_node);
-      break;
-    }
-
-    case IrOpcode::kInt32x4ExtractLane: {
-      Node* laneNode = node->InputAt(1);
-      DCHECK_EQ(laneNode->opcode(), IrOpcode::kInt32Constant);
-      int32_t lane = OpParameter<int32_t>(laneNode);
-      Node* rep_node[kMaxLanes] = {
-          GetReplacementsWithType(node->InputAt(0), rep_type)[lane], nullptr,
-          nullptr, nullptr};
-      ReplaceNode(node, rep_node);
-      break;
-    }
-
     case IrOpcode::kFloat32x4Add: {
-      DCHECK(node->InputCount() == 2);
-      Node** rep_left = GetReplacementsWithType(node->InputAt(0), rep_type);
-      Node** rep_right = GetReplacementsWithType(node->InputAt(1), rep_type);
-      Node* rep_node[kMaxLanes];
-      for (int i = 0; i < kMaxLanes; i++) {
-        rep_node[i] = graph()->NewNode(machine()->Float32Add(), rep_left[i],
-                                       rep_right[i]);
-      }
-      ReplaceNode(node, rep_node);
+      LowerBinaryOp(node, rep_type, machine()->Float32Add());
       break;
     }
-
+    case IrOpcode::kCreateInt32x4:
     case IrOpcode::kCreateFloat32x4: {
       Node* rep_node[kMaxLanes];
-      for (int i = 0; i < kMaxLanes; i++) {
-        DCHECK(!HasReplacement(1, node->InputAt(i)));
-        rep_node[i] = node->InputAt(i);
+      for (int i = 0; i < kMaxLanes; ++i) {
+        if (HasReplacement(0, node->InputAt(i))) {
+          rep_node[i] = GetReplacements(node->InputAt(i))[0];
+        } else {
+          rep_node[i] = node->InputAt(i);
+        }
       }
       ReplaceNode(node, rep_node);
       break;
     }
-
+    case IrOpcode::kInt32x4ExtractLane:
     case IrOpcode::kFloat32x4ExtractLane: {
-      Node* laneNode = node->InputAt(1);
-      DCHECK_EQ(laneNode->opcode(), IrOpcode::kInt32Constant);
-      int32_t lane = OpParameter<int32_t>(laneNode);
+      int32_t lane = OpParameter<int32_t>(node);
       Node* rep_node[kMaxLanes] = {
           GetReplacementsWithType(node->InputAt(0), rep_type)[lane], nullptr,
           nullptr, nullptr};
       ReplaceNode(node, rep_node);
       break;
     }
-
+    case IrOpcode::kInt32x4ReplaceLane:
+    case IrOpcode::kFloat32x4ReplaceLane: {
+      DCHECK_EQ(2, node->InputCount());
+      Node* repNode = node->InputAt(1);
+      int32_t lane = OpParameter<int32_t>(node);
+      DCHECK(lane >= 0 && lane <= 3);
+      Node** rep_node = GetReplacementsWithType(node->InputAt(0), rep_type);
+      if (HasReplacement(0, repNode)) {
+        rep_node[lane] = GetReplacements(repNode)[0];
+      } else {
+        rep_node[lane] = repNode;
+      }
+      ReplaceNode(node, rep_node);
+      break;
+    }
     default: { DefaultLowering(node); }
   }
 }
@@ -322,7 +449,7 @@
   DCHECK(new_node[0] != nullptr ||
          (new_node[1] == nullptr && new_node[2] == nullptr &&
           new_node[3] == nullptr));
-  for (int i = 0; i < kMaxLanes; i++) {
+  for (int i = 0; i < kMaxLanes; ++i) {
     replacements_[old->id()].node[i] = new_node[i];
   }
 }
@@ -348,7 +475,7 @@
   }
   Node** result = zone()->NewArray<Node*>(kMaxLanes);
   if (ReplacementType(node) == SimdType::kInt32 && type == SimdType::kFloat32) {
-    for (int i = 0; i < kMaxLanes; i++) {
+    for (int i = 0; i < kMaxLanes; ++i) {
       if (replacements[i] != nullptr) {
         result[i] = graph()->NewNode(machine()->BitcastInt32ToFloat32(),
                                      replacements[i]);
@@ -357,7 +484,7 @@
       }
     }
   } else {
-    for (int i = 0; i < kMaxLanes; i++) {
+    for (int i = 0; i < kMaxLanes; ++i) {
       if (replacements[i] != nullptr) {
         result[i] = graph()->NewNode(machine()->BitcastFloat32ToInt32(),
                                      replacements[i]);
@@ -379,17 +506,17 @@
     int value_count = phi->op()->ValueInputCount();
     SimdType type = ReplacementType(phi);
     Node** inputs_rep[kMaxLanes];
-    for (int i = 0; i < kMaxLanes; i++) {
+    for (int i = 0; i < kMaxLanes; ++i) {
       inputs_rep[i] = zone()->NewArray<Node*>(value_count + 1);
       inputs_rep[i][value_count] = NodeProperties::GetControlInput(phi, 0);
     }
-    for (int i = 0; i < value_count; i++) {
+    for (int i = 0; i < value_count; ++i) {
       for (int j = 0; j < kMaxLanes; j++) {
         inputs_rep[j][i] = placeholder_;
       }
     }
     Node* rep_nodes[kMaxLanes];
-    for (int i = 0; i < kMaxLanes; i++) {
+    for (int i = 0; i < kMaxLanes; ++i) {
       if (type == SimdType::kInt32) {
         rep_nodes[i] = graph()->NewNode(
             common()->Phi(MachineRepresentation::kWord32, value_count),
diff --git a/src/compiler/simd-scalar-lowering.h b/src/compiler/simd-scalar-lowering.h
index 39449f4..c795c6b 100644
--- a/src/compiler/simd-scalar-lowering.h
+++ b/src/compiler/simd-scalar-lowering.h
@@ -31,6 +31,7 @@
   enum class SimdType : uint8_t { kInt32, kFloat32 };
 
   static const int kMaxLanes = 4;
+  static const int kLaneWidth = 16 / kMaxLanes;
 
   struct Replacement {
     Node* node[kMaxLanes];
@@ -53,6 +54,12 @@
   SimdType ReplacementType(Node* node);
   void PreparePhiReplacement(Node* phi);
   void SetLoweredType(Node* node, Node* output);
+  void GetIndexNodes(Node* index, Node** new_indices);
+  void LowerLoadOp(MachineRepresentation rep, Node* node,
+                   const Operator* load_op);
+  void LowerStoreOp(MachineRepresentation rep, Node* node,
+                    const Operator* store_op, SimdType rep_type);
+  void LowerBinaryOp(Node* node, SimdType rep_type, const Operator* op);
 
   struct NodeState {
     Node* node;
diff --git a/src/compiler/simplified-lowering.cc b/src/compiler/simplified-lowering.cc
index c90d743..4acc77f 100644
--- a/src/compiler/simplified-lowering.cc
+++ b/src/compiler/simplified-lowering.cc
@@ -138,7 +138,10 @@
       return UseInfo::TruncatingWord32();
     case MachineRepresentation::kBit:
       return UseInfo::Bool();
-    case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd128:
+    case MachineRepresentation::kSimd1x4:
+    case MachineRepresentation::kSimd1x8:
+    case MachineRepresentation::kSimd1x16:
     case MachineRepresentation::kNone:
       break;
   }
@@ -170,6 +173,7 @@
 }
 
 void ChangeToPureOp(Node* node, const Operator* new_op) {
+  DCHECK(new_op->HasProperty(Operator::kPure));
   if (node->op()->EffectInputCount() > 0) {
     DCHECK_LT(0, node->op()->ControlInputCount());
     // Disconnect the node from effect and control chains.
@@ -209,8 +213,30 @@
 
 #endif  // DEBUG
 
-}  // namespace
+bool CanOverflowSigned32(const Operator* op, Type* left, Type* right,
+                         Zone* type_zone) {
+  // We assume the inputs are checked Signed32 (or known statically
+  // to be Signed32). Technically, theinputs could also be minus zero, but
+  // that cannot cause overflow.
+  left = Type::Intersect(left, Type::Signed32(), type_zone);
+  right = Type::Intersect(right, Type::Signed32(), type_zone);
+  if (!left->IsInhabited() || !right->IsInhabited()) return false;
+  switch (op->opcode()) {
+    case IrOpcode::kSpeculativeNumberAdd:
+      return (left->Max() + right->Max() > kMaxInt) ||
+             (left->Min() + right->Min() < kMinInt);
 
+    case IrOpcode::kSpeculativeNumberSubtract:
+      return (left->Max() - right->Min() > kMaxInt) ||
+             (left->Min() - right->Max() < kMinInt);
+
+    default:
+      UNREACHABLE();
+  }
+  return true;
+}
+
+}  // namespace
 
 class RepresentationSelector {
  public:
@@ -675,6 +701,11 @@
            GetUpperBound(node->InputAt(1))->Is(type);
   }
 
+  bool IsNodeRepresentationTagged(Node* node) {
+    MachineRepresentation representation = GetInfo(node)->representation();
+    return IsAnyTagged(representation);
+  }
+
   bool OneInputCannotBe(Node* node, Type* type) {
     DCHECK_EQ(2, node->op()->ValueInputCount());
     return !GetUpperBound(node->InputAt(0))->Maybe(type) ||
@@ -867,6 +898,7 @@
   // Helper for handling selects.
   void VisitSelect(Node* node, Truncation truncation,
                    SimplifiedLowering* lowering) {
+    DCHECK(TypeOf(node->InputAt(0))->Is(Type::Boolean()));
     ProcessInput(node, 0, UseInfo::Bool());
 
     MachineRepresentation output =
@@ -953,7 +985,7 @@
     }
   }
 
-  MachineSemantic DeoptValueSemanticOf(Type* type) {
+  static MachineSemantic DeoptValueSemanticOf(Type* type) {
     // We only need signedness to do deopt correctly.
     if (type->Is(Type::Signed32())) {
       return MachineSemantic::kInt32;
@@ -964,6 +996,29 @@
     }
   }
 
+  static MachineType DeoptMachineTypeOf(MachineRepresentation rep, Type* type) {
+    if (!type->IsInhabited()) {
+      return MachineType::None();
+    }
+    // TODO(turbofan): Special treatment for ExternalPointer here,
+    // to avoid incompatible truncations. We really need a story
+    // for the JSFunction::entry field.
+    if (type->Is(Type::ExternalPointer())) {
+      return MachineType::Pointer();
+    }
+    // Do not distinguish between various Tagged variations.
+    if (IsAnyTagged(rep)) {
+      return MachineType::AnyTagged();
+    }
+    MachineType machine_type(rep, DeoptValueSemanticOf(type));
+    DCHECK(machine_type.representation() != MachineRepresentation::kWord32 ||
+           machine_type.semantic() == MachineSemantic::kInt32 ||
+           machine_type.semantic() == MachineSemantic::kUint32);
+    DCHECK(machine_type.representation() != MachineRepresentation::kBit ||
+           type->Is(Type::Boolean()));
+    return machine_type;
+  }
+
   void VisitStateValues(Node* node) {
     if (propagate()) {
       for (int i = 0; i < node->InputCount(); i++) {
@@ -976,20 +1031,12 @@
               ZoneVector<MachineType>(node->InputCount(), zone);
       for (int i = 0; i < node->InputCount(); i++) {
         Node* input = node->InputAt(i);
-        NodeInfo* input_info = GetInfo(input);
-        Type* input_type = TypeOf(input);
-        MachineRepresentation rep = input_type->IsInhabited()
-                                        ? input_info->representation()
-                                        : MachineRepresentation::kNone;
-        MachineType machine_type(rep, DeoptValueSemanticOf(input_type));
-        DCHECK(machine_type.representation() !=
-                   MachineRepresentation::kWord32 ||
-               machine_type.semantic() == MachineSemantic::kInt32 ||
-               machine_type.semantic() == MachineSemantic::kUint32);
-        (*types)[i] = machine_type;
+        (*types)[i] =
+            DeoptMachineTypeOf(GetInfo(input)->representation(), TypeOf(input));
       }
-      NodeProperties::ChangeOp(node,
-                               jsgraph_->common()->TypedStateValues(types));
+      SparseInputMask mask = SparseInputMaskOf(node->op());
+      NodeProperties::ChangeOp(
+          node, jsgraph_->common()->TypedStateValues(types, mask));
     }
     SetOutput(node, MachineRepresentation::kTagged);
   }
@@ -1002,9 +1049,14 @@
         // TODO(turbofan): Special treatment for ExternalPointer here,
         // to avoid incompatible truncations. We really need a story
         // for the JSFunction::entry field.
-        UseInfo use_info = input_type->Is(Type::ExternalPointer())
-                               ? UseInfo::PointerInt()
-                               : UseInfo::Any();
+        UseInfo use_info = UseInfo::None();
+        if (input_type->IsInhabited()) {
+          if (input_type->Is(Type::ExternalPointer())) {
+            use_info = UseInfo::PointerInt();
+          } else {
+            use_info = UseInfo::Any();
+          }
+        }
         EnqueueInput(node, i, use_info);
       }
     } else if (lower()) {
@@ -1014,26 +1066,8 @@
               ZoneVector<MachineType>(node->InputCount(), zone);
       for (int i = 0; i < node->InputCount(); i++) {
         Node* input = node->InputAt(i);
-        NodeInfo* input_info = GetInfo(input);
-        Type* input_type = TypeOf(input);
-        // TODO(turbofan): Special treatment for ExternalPointer here,
-        // to avoid incompatible truncations. We really need a story
-        // for the JSFunction::entry field.
-        if (input_type->Is(Type::ExternalPointer())) {
-          (*types)[i] = MachineType::Pointer();
-        } else {
-          MachineRepresentation rep = input_type->IsInhabited()
-                                          ? input_info->representation()
-                                          : MachineRepresentation::kNone;
-          MachineType machine_type(rep, DeoptValueSemanticOf(input_type));
-          DCHECK(machine_type.representation() !=
-                     MachineRepresentation::kWord32 ||
-                 machine_type.semantic() == MachineSemantic::kInt32 ||
-                 machine_type.semantic() == MachineSemantic::kUint32);
-          DCHECK(machine_type.representation() != MachineRepresentation::kBit ||
-                 input_type->Is(Type::Boolean()));
-          (*types)[i] = machine_type;
-        }
+        (*types)[i] =
+            DeoptMachineTypeOf(GetInfo(input)->representation(), TypeOf(input));
       }
       NodeProperties::ChangeOp(node,
                                jsgraph_->common()->TypedObjectState(types));
@@ -1080,17 +1114,14 @@
         return kNoWriteBarrier;
       }
       if (value_type->IsHeapConstant()) {
-        Handle<HeapObject> value_object = value_type->AsHeapConstant()->Value();
-        RootIndexMap root_index_map(jsgraph_->isolate());
-        int root_index = root_index_map.Lookup(*value_object);
-        if (root_index != RootIndexMap::kInvalidRootIndex &&
-            jsgraph_->isolate()->heap()->RootIsImmortalImmovable(root_index)) {
-          // Write barriers are unnecessary for immortal immovable roots.
-          return kNoWriteBarrier;
-        }
-        if (value_object->IsMap()) {
-          // Write barriers for storing maps are cheaper.
-          return kMapWriteBarrier;
+        Heap::RootListIndex root_index;
+        Heap* heap = jsgraph_->isolate()->heap();
+        if (heap->IsRootHandle(value_type->AsHeapConstant()->Value(),
+                               &root_index)) {
+          if (heap->RootIsImmortalImmovable(root_index)) {
+            // Write barriers are unnecessary for immortal immovable roots.
+            return kNoWriteBarrier;
+          }
         }
       }
       if (field_representation == MachineRepresentation::kTaggedPointer ||
@@ -1160,10 +1191,14 @@
     // ToNumber(x) can throw if x is either a Receiver or a Symbol, so we can
     // only eliminate an unused speculative number operation if we know that
     // the inputs are PlainPrimitive, which excludes everything that's might
-    // have side effects or throws during a ToNumber conversion.
-    if (BothInputsAre(node, Type::PlainPrimitive())) {
+    // have side effects or throws during a ToNumber conversion. We are only
+    // allowed to perform a number addition if neither input is a String, even
+    // if the value is never used, so we further limit to NumberOrOddball in
+    // order to explicitly exclude String inputs.
+    if (BothInputsAre(node, Type::NumberOrOddball())) {
       if (truncation.IsUnused()) return VisitUnused(node);
     }
+
     if (BothInputsAre(node, type_cache_.kAdditiveSafeIntegerOrMinusZero) &&
         (GetUpperBound(node)->Is(Type::Signed32()) ||
          GetUpperBound(node)->Is(Type::Unsigned32()) ||
@@ -1177,33 +1212,38 @@
     // Try to use type feedback.
     NumberOperationHint hint = NumberOperationHintOf(node->op());
 
-    // Handle the case when no int32 checks on inputs are necessary
-    // (but an overflow check is needed on the output).
-    if (BothInputsAre(node, Type::Signed32()) ||
-        (BothInputsAre(node, Type::Signed32OrMinusZero()) &&
-         NodeProperties::GetType(node)->Is(type_cache_.kSafeInteger))) {
-      // If both the inputs the feedback are int32, use the overflow op.
-      if (hint == NumberOperationHint::kSignedSmall ||
-          hint == NumberOperationHint::kSigned32) {
-        VisitBinop(node, UseInfo::TruncatingWord32(),
-                   MachineRepresentation::kWord32, Type::Signed32());
-        if (lower()) ChangeToInt32OverflowOp(node);
-        return;
-      }
-    }
-
     if (hint == NumberOperationHint::kSignedSmall ||
         hint == NumberOperationHint::kSigned32) {
-      UseInfo left_use = CheckedUseInfoAsWord32FromHint(hint);
-      // For CheckedInt32Add and CheckedInt32Sub, we don't need to do
-      // a minus zero check for the right hand side, since we already
-      // know that the left hand side is a proper Signed32 value,
-      // potentially guarded by a check.
-      UseInfo right_use = CheckedUseInfoAsWord32FromHint(
-          hint, CheckForMinusZeroMode::kDontCheckForMinusZero);
-      VisitBinop(node, left_use, right_use, MachineRepresentation::kWord32,
-                 Type::Signed32());
-      if (lower()) ChangeToInt32OverflowOp(node);
+      Type* left_feedback_type = TypeOf(node->InputAt(0));
+      Type* right_feedback_type = TypeOf(node->InputAt(1));
+      // Handle the case when no int32 checks on inputs are necessary (but
+      // an overflow check is needed on the output).
+      // TODO(jarin) We should not look at the upper bound because the typer
+      // could have already baked in some feedback into the upper bound.
+      if (BothInputsAre(node, Type::Signed32()) ||
+          (BothInputsAre(node, Type::Signed32OrMinusZero()) &&
+           GetUpperBound(node)->Is(type_cache_.kSafeInteger))) {
+        VisitBinop(node, UseInfo::TruncatingWord32(),
+                   MachineRepresentation::kWord32, Type::Signed32());
+      } else {
+        UseInfo left_use = CheckedUseInfoAsWord32FromHint(hint);
+        // For CheckedInt32Add and CheckedInt32Sub, we don't need to do
+        // a minus zero check for the right hand side, since we already
+        // know that the left hand side is a proper Signed32 value,
+        // potentially guarded by a check.
+        UseInfo right_use = CheckedUseInfoAsWord32FromHint(
+            hint, CheckForMinusZeroMode::kDontCheckForMinusZero);
+        VisitBinop(node, left_use, right_use, MachineRepresentation::kWord32,
+                   Type::Signed32());
+      }
+      if (lower()) {
+        if (CanOverflowSigned32(node->op(), left_feedback_type,
+                                right_feedback_type, graph_zone())) {
+          ChangeToInt32OverflowOp(node);
+        } else {
+          ChangeToPureOp(node, Int32Op(node));
+        }
+      }
       return;
     }
 
@@ -1392,10 +1432,12 @@
         return;
       }
 
-      case IrOpcode::kBranch:
+      case IrOpcode::kBranch: {
+        DCHECK(TypeOf(node->InputAt(0))->Is(Type::Boolean()));
         ProcessInput(node, 0, UseInfo::Bool());
         EnqueueInput(node, NodeProperties::FirstControlIndex(node));
         return;
+      }
       case IrOpcode::kSwitch:
         ProcessInput(node, 0, UseInfo::TruncatingWord32());
         EnqueueInput(node, NodeProperties::FirstControlIndex(node));
@@ -1550,13 +1592,38 @@
         NumberOperationHint hint = NumberOperationHintOf(node->op());
         switch (hint) {
           case NumberOperationHint::kSignedSmall:
-          case NumberOperationHint::kSigned32:
-            VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
-                       MachineRepresentation::kBit);
-            if (lower()) ChangeToPureOp(node, Int32Op(node));
+          case NumberOperationHint::kSigned32: {
+            if (propagate()) {
+              VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+                         MachineRepresentation::kBit);
+            } else if (retype()) {
+              SetOutput(node, MachineRepresentation::kBit, Type::Any());
+            } else {
+              DCHECK(lower());
+              Node* lhs = node->InputAt(0);
+              Node* rhs = node->InputAt(1);
+              if (IsNodeRepresentationTagged(lhs) &&
+                  IsNodeRepresentationTagged(rhs)) {
+                VisitBinop(node, UseInfo::CheckedSignedSmallAsTaggedSigned(),
+                           MachineRepresentation::kBit);
+                ChangeToPureOp(
+                    node, changer_->TaggedSignedOperatorFor(node->opcode()));
+
+              } else {
+                VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+                           MachineRepresentation::kBit);
+                ChangeToPureOp(node, Int32Op(node));
+              }
+            }
             return;
-          case NumberOperationHint::kNumber:
+          }
           case NumberOperationHint::kNumberOrOddball:
+            // Abstract and strict equality don't perform ToNumber conversions
+            // on Oddballs, so make sure we don't accidentially sneak in a
+            // hint with Oddball feedback here.
+            DCHECK_NE(IrOpcode::kSpeculativeNumberEqual, node->opcode());
+          // Fallthrough
+          case NumberOperationHint::kNumber:
             VisitBinop(node, CheckedUseInfoAsFloat64FromHint(hint),
                        MachineRepresentation::kBit);
             if (lower()) ChangeToPureOp(node, Float64Op(node));
@@ -1919,8 +1986,26 @@
         if (BothInputsAre(node, Type::PlainPrimitive())) {
           if (truncation.IsUnused()) return VisitUnused(node);
         }
+        NumberOperationHint hint = NumberOperationHintOf(node->op());
+        Type* rhs_type = GetUpperBound(node->InputAt(1));
+        if (rhs_type->Is(type_cache_.kZeroish) &&
+            (hint == NumberOperationHint::kSignedSmall ||
+             hint == NumberOperationHint::kSigned32) &&
+            !truncation.IsUsedAsWord32()) {
+          // The SignedSmall or Signed32 feedback means that the results that we
+          // have seen so far were of type Unsigned31.  We speculate that this
+          // will continue to hold.  Moreover, since the RHS is 0, the result
+          // will just be the (converted) LHS.
+          VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
+                     MachineRepresentation::kWord32, Type::Unsigned31());
+          if (lower()) {
+            node->RemoveInput(1);
+            NodeProperties::ChangeOp(node,
+                                     simplified()->CheckedUint32ToInt32());
+          }
+          return;
+        }
         if (BothInputsAre(node, Type::NumberOrOddball())) {
-          Type* rhs_type = GetUpperBound(node->InputAt(1));
           VisitBinop(node, UseInfo::TruncatingWord32(),
                      UseInfo::TruncatingWord32(),
                      MachineRepresentation::kWord32);
@@ -1929,8 +2014,6 @@
           }
           return;
         }
-        NumberOperationHint hint = NumberOperationHintOf(node->op());
-        Type* rhs_type = GetUpperBound(node->InputAt(1));
         VisitBinop(node, CheckedUseInfoAsWord32FromHint(hint),
                    MachineRepresentation::kWord32, Type::Unsigned32());
         if (lower()) {
@@ -2156,9 +2239,15 @@
         return VisitBinop(node, UseInfo::AnyTagged(),
                           MachineRepresentation::kTaggedPointer);
       }
-      case IrOpcode::kStringCharCodeAt: {
+      case IrOpcode::kStringCharAt: {
         VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
-                   MachineRepresentation::kWord32);
+                   MachineRepresentation::kTaggedPointer);
+        return;
+      }
+      case IrOpcode::kStringCharCodeAt: {
+        // TODO(turbofan): Allow builtins to return untagged values.
+        VisitBinop(node, UseInfo::AnyTagged(), UseInfo::TruncatingWord32(),
+                   MachineRepresentation::kTaggedSigned);
         return;
       }
       case IrOpcode::kStringFromCharCode: {
@@ -2171,6 +2260,13 @@
                   MachineRepresentation::kTaggedPointer);
         return;
       }
+      case IrOpcode::kStringIndexOf: {
+        ProcessInput(node, 0, UseInfo::AnyTagged());
+        ProcessInput(node, 1, UseInfo::AnyTagged());
+        ProcessInput(node, 2, UseInfo::TaggedSigned());
+        SetOutput(node, MachineRepresentation::kTaggedSigned);
+        return;
+      }
 
       case IrOpcode::kCheckBounds: {
         Type* index_type = TypeOf(node->InputAt(0));
@@ -2207,6 +2303,17 @@
         SetOutput(node, MachineRepresentation::kNone);
         return;
       }
+      case IrOpcode::kCheckInternalizedString: {
+        if (InputIs(node, Type::InternalizedString())) {
+          VisitUnop(node, UseInfo::AnyTagged(),
+                    MachineRepresentation::kTaggedPointer);
+          if (lower()) DeferReplacement(node, node->InputAt(0));
+        } else {
+          VisitUnop(node, UseInfo::CheckedHeapObjectAsTaggedPointer(),
+                    MachineRepresentation::kTaggedPointer);
+        }
+        return;
+      }
       case IrOpcode::kCheckNumber: {
         if (InputIs(node, Type::Number())) {
           if (truncation.IsUsedAsWord32()) {
@@ -2226,6 +2333,17 @@
         }
         return;
       }
+      case IrOpcode::kCheckReceiver: {
+        if (InputIs(node, Type::Receiver())) {
+          VisitUnop(node, UseInfo::AnyTagged(),
+                    MachineRepresentation::kTaggedPointer);
+          if (lower()) DeferReplacement(node, node->InputAt(0));
+        } else {
+          VisitUnop(node, UseInfo::CheckedHeapObjectAsTaggedPointer(),
+                    MachineRepresentation::kTaggedPointer);
+        }
+        return;
+      }
       case IrOpcode::kCheckSmi: {
         if (SmiValuesAre32Bits() && truncation.IsUsedAsWord32()) {
           VisitUnop(node, UseInfo::CheckedSignedSmallAsWord32(),
@@ -2243,7 +2361,7 @@
                     MachineRepresentation::kTaggedPointer);
           if (lower()) DeferReplacement(node, node->InputAt(0));
         } else {
-          VisitUnop(node, UseInfo::AnyTagged(),
+          VisitUnop(node, UseInfo::CheckedHeapObjectAsTaggedPointer(),
                     MachineRepresentation::kTaggedPointer);
         }
         return;
@@ -2423,9 +2541,12 @@
         }
         return;
       }
-      case IrOpcode::kObjectIsCallable: {
-        // TODO(turbofan): Add Type::Callable to optimize this?
-        VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
+      case IrOpcode::kObjectIsDetectableCallable: {
+        VisitObjectIs(node, Type::DetectableCallable(), lowering);
+        return;
+      }
+      case IrOpcode::kObjectIsNonCallable: {
+        VisitObjectIs(node, Type::NonCallable(), lowering);
         return;
       }
       case IrOpcode::kObjectIsNumber: {
@@ -2449,12 +2570,17 @@
         VisitObjectIs(node, Type::Undetectable(), lowering);
         return;
       }
+      case IrOpcode::kNewRestParameterElements:
+      case IrOpcode::kNewUnmappedArgumentsElements: {
+        ProcessRemainingInputs(node, 0);
+        SetOutput(node, MachineRepresentation::kTaggedPointer);
+        return;
+      }
       case IrOpcode::kArrayBufferWasNeutered: {
         VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kBit);
         return;
       }
       case IrOpcode::kCheckFloat64Hole: {
-        if (truncation.IsUnused()) return VisitUnused(node);
         CheckFloat64HoleMode mode = CheckFloat64HoleModeOf(node->op());
         ProcessInput(node, 0, UseInfo::TruncatingFloat64());
         ProcessRemainingInputs(node, 1);
@@ -2466,8 +2592,7 @@
         return;
       }
       case IrOpcode::kCheckTaggedHole: {
-        VisitUnop(node, UseInfo::AnyTagged(),
-                  MachineRepresentation::kTaggedPointer);
+        VisitUnop(node, UseInfo::AnyTagged(), MachineRepresentation::kTagged);
         return;
       }
       case IrOpcode::kConvertTaggedHoleToUndefined: {
@@ -2562,6 +2687,7 @@
       case IrOpcode::kBeginRegion:
       case IrOpcode::kProjection:
       case IrOpcode::kOsrValue:
+      case IrOpcode::kArgumentsObjectState:
 // All JavaScript operators except JSToNumber have uniform handling.
 #define OPCODE_CASE(name) case IrOpcode::k##name:
         JS_SIMPLE_BINOP_LIST(OPCODE_CASE)
@@ -3276,12 +3402,11 @@
 
 void SimplifiedLowering::DoShift(Node* node, Operator const* op,
                                  Type* rhs_type) {
-  Node* const rhs = NodeProperties::GetValueInput(node, 1);
   if (!rhs_type->Is(type_cache_.kZeroToThirtyOne)) {
+    Node* const rhs = NodeProperties::GetValueInput(node, 1);
     node->ReplaceInput(1, graph()->NewNode(machine()->Word32And(), rhs,
                                            jsgraph()->Int32Constant(0x1f)));
   }
-  DCHECK(op->HasProperty(Operator::kPure));
   ChangeToPureOp(node, op);
 }
 
diff --git a/src/compiler/simplified-operator-reducer.cc b/src/compiler/simplified-operator-reducer.cc
index b8a486d..dcfb485 100644
--- a/src/compiler/simplified-operator-reducer.cc
+++ b/src/compiler/simplified-operator-reducer.cc
@@ -129,6 +129,15 @@
       }
       break;
     }
+    case IrOpcode::kCheckedFloat64ToInt32: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue() && IsInt32Double(m.Value())) {
+        Node* value = jsgraph()->Int32Constant(static_cast<int32_t>(m.Value()));
+        ReplaceWithValue(node, value);
+        return Replace(value);
+      }
+      break;
+    }
     case IrOpcode::kCheckedTaggedToInt32:
     case IrOpcode::kCheckedTaggedSignedToInt32: {
       NodeMatcher m(node->InputAt(0));
diff --git a/src/compiler/simplified-operator.cc b/src/compiler/simplified-operator.cc
index 345a2c5..90a4e34 100644
--- a/src/compiler/simplified-operator.cc
+++ b/src/compiler/simplified-operator.cc
@@ -92,6 +92,7 @@
   // really only relevant for eliminating loads and they don't care about the
   // write barrier mode.
   return lhs.base_is_tagged == rhs.base_is_tagged && lhs.offset == rhs.offset &&
+         lhs.map.address() == rhs.map.address() &&
          lhs.machine_type == rhs.machine_type;
 }
 
@@ -118,6 +119,10 @@
     name->Print(os);
     os << ", ";
   }
+  Handle<Map> map;
+  if (access.map.ToHandle(&map)) {
+    os << Brief(*map) << ", ";
+  }
 #endif
   access.type->PrintTo(os);
   os << ", " << access.machine_type << ", " << access.write_barrier_kind << "]";
@@ -229,6 +234,44 @@
   return os;
 }
 
+std::ostream& operator<<(std::ostream& os, CheckMapsFlags flags) {
+  bool empty = true;
+  if (flags & CheckMapsFlag::kTryMigrateInstance) {
+    os << "TryMigrateInstance";
+    empty = false;
+  }
+  if (empty) os << "None";
+  return os;
+}
+
+bool operator==(CheckMapsParameters const& lhs,
+                CheckMapsParameters const& rhs) {
+  return lhs.flags() == rhs.flags() && lhs.maps() == rhs.maps();
+}
+
+bool operator!=(CheckMapsParameters const& lhs,
+                CheckMapsParameters const& rhs) {
+  return !(lhs == rhs);
+}
+
+size_t hash_value(CheckMapsParameters const& p) {
+  return base::hash_combine(p.flags(), p.maps());
+}
+
+std::ostream& operator<<(std::ostream& os, CheckMapsParameters const& p) {
+  ZoneHandleSet<Map> const& maps = p.maps();
+  os << p.flags();
+  for (size_t i = 0; i < maps.size(); ++i) {
+    os << ", " << Brief(*maps[i]);
+  }
+  return os;
+}
+
+CheckMapsParameters const& CheckMapsParametersOf(Operator const* op) {
+  DCHECK_EQ(IrOpcode::kCheckMaps, op->opcode());
+  return OpParameter<CheckMapsParameters>(op);
+}
+
 size_t hash_value(CheckTaggedInputMode mode) {
   return static_cast<size_t>(mode);
 }
@@ -274,22 +317,36 @@
   return OpParameter<GrowFastElementsFlags>(op);
 }
 
+bool operator==(ElementsTransition const& lhs, ElementsTransition const& rhs) {
+  return lhs.mode() == rhs.mode() &&
+         lhs.source().address() == rhs.source().address() &&
+         lhs.target().address() == rhs.target().address();
+}
+
+bool operator!=(ElementsTransition const& lhs, ElementsTransition const& rhs) {
+  return !(lhs == rhs);
+}
+
 size_t hash_value(ElementsTransition transition) {
-  return static_cast<uint8_t>(transition);
+  return base::hash_combine(static_cast<uint8_t>(transition.mode()),
+                            transition.source().address(),
+                            transition.target().address());
 }
 
 std::ostream& operator<<(std::ostream& os, ElementsTransition transition) {
-  switch (transition) {
+  switch (transition.mode()) {
     case ElementsTransition::kFastTransition:
-      return os << "fast-transition";
+      return os << "fast-transition from " << Brief(*transition.source())
+                << " to " << Brief(*transition.target());
     case ElementsTransition::kSlowTransition:
-      return os << "slow-transition";
+      return os << "slow-transition from " << Brief(*transition.source())
+                << " to " << Brief(*transition.target());
   }
   UNREACHABLE();
   return os;
 }
 
-ElementsTransition ElementsTransitionOf(const Operator* op) {
+ElementsTransition const& ElementsTransitionOf(const Operator* op) {
   DCHECK_EQ(IrOpcode::kTransitionElementsKind, op->opcode());
   return OpParameter<ElementsTransition>(op);
 }
@@ -331,6 +388,12 @@
   return OpParameter<NumberOperationHint>(op);
 }
 
+int ParameterCountOf(const Operator* op) {
+  DCHECK(op->opcode() == IrOpcode::kNewUnmappedArgumentsElements ||
+         op->opcode() == IrOpcode::kNewRestParameterElements);
+  return OpParameter<int>(op);
+}
+
 PretenureFlag PretenureFlagOf(const Operator* op) {
   DCHECK_EQ(IrOpcode::kAllocate, op->opcode());
   return OpParameter<PretenureFlag>(op);
@@ -395,8 +458,10 @@
   V(NumberToUint32, Operator::kNoProperties, 1, 0)               \
   V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0)         \
   V(NumberSilenceNaN, Operator::kNoProperties, 1, 0)             \
+  V(StringCharAt, Operator::kNoProperties, 2, 1)                 \
   V(StringCharCodeAt, Operator::kNoProperties, 2, 1)             \
   V(StringFromCharCode, Operator::kNoProperties, 1, 0)           \
+  V(StringIndexOf, Operator::kNoProperties, 3, 0)                \
   V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0)       \
   V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0)       \
   V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0)      \
@@ -404,6 +469,7 @@
   V(ChangeTaggedToInt32, Operator::kNoProperties, 1, 0)          \
   V(ChangeTaggedToUint32, Operator::kNoProperties, 1, 0)         \
   V(ChangeTaggedToFloat64, Operator::kNoProperties, 1, 0)        \
+  V(ChangeTaggedToTaggedSigned, Operator::kNoProperties, 1, 0)   \
   V(ChangeFloat64ToTagged, Operator::kNoProperties, 1, 0)        \
   V(ChangeFloat64ToTaggedPointer, Operator::kNoProperties, 1, 0) \
   V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0)    \
@@ -414,7 +480,8 @@
   V(TruncateTaggedToBit, Operator::kNoProperties, 1, 0)          \
   V(TruncateTaggedToWord32, Operator::kNoProperties, 1, 0)       \
   V(TruncateTaggedToFloat64, Operator::kNoProperties, 1, 0)      \
-  V(ObjectIsCallable, Operator::kNoProperties, 1, 0)             \
+  V(ObjectIsDetectableCallable, Operator::kNoProperties, 1, 0)   \
+  V(ObjectIsNonCallable, Operator::kNoProperties, 1, 0)          \
   V(ObjectIsNumber, Operator::kNoProperties, 1, 0)               \
   V(ObjectIsReceiver, Operator::kNoProperties, 1, 0)             \
   V(ObjectIsSmi, Operator::kNoProperties, 1, 0)                  \
@@ -436,7 +503,9 @@
   V(CheckBounds, 2, 1)                  \
   V(CheckHeapObject, 1, 1)              \
   V(CheckIf, 1, 0)                      \
+  V(CheckInternalizedString, 1, 1)      \
   V(CheckNumber, 1, 1)                  \
+  V(CheckReceiver, 1, 1)                \
   V(CheckSmi, 1, 1)                     \
   V(CheckString, 1, 1)                  \
   V(CheckTaggedHole, 1, 1)              \
@@ -689,16 +758,15 @@
   return nullptr;
 }
 
-const Operator* SimplifiedOperatorBuilder::CheckMaps(int map_input_count) {
-  // TODO(bmeurer): Cache the most important versions of this operator.
-  DCHECK_LT(0, map_input_count);
-  int const value_input_count = 1 + map_input_count;
-  return new (zone()) Operator1<int>(           // --
-      IrOpcode::kCheckMaps,                     // opcode
-      Operator::kNoThrow | Operator::kNoWrite,  // flags
-      "CheckMaps",                              // name
-      value_input_count, 1, 1, 0, 1, 0,         // counts
-      map_input_count);                         // parameter
+const Operator* SimplifiedOperatorBuilder::CheckMaps(CheckMapsFlags flags,
+                                                     ZoneHandleSet<Map> maps) {
+  CheckMapsParameters const parameters(flags, maps);
+  return new (zone()) Operator1<CheckMapsParameters>(  // --
+      IrOpcode::kCheckMaps,                            // opcode
+      Operator::kNoThrow | Operator::kNoWrite,         // flags
+      "CheckMaps",                                     // name
+      1, 1, 1, 0, 1, 0,                                // counts
+      parameters);                                     // parameter
 }
 
 const Operator* SimplifiedOperatorBuilder::CheckFloat64Hole(
@@ -733,10 +801,30 @@
       IrOpcode::kTransitionElementsKind,              // opcode
       Operator::kNoDeopt | Operator::kNoThrow,        // flags
       "TransitionElementsKind",                       // name
-      3, 1, 1, 0, 1, 0,                               // counts
+      1, 1, 1, 0, 1, 0,                               // counts
       transition);                                    // parameter
 }
 
+const Operator* SimplifiedOperatorBuilder::NewUnmappedArgumentsElements(
+    int parameter_count) {
+  return new (zone()) Operator1<int>(           // --
+      IrOpcode::kNewUnmappedArgumentsElements,  // opcode
+      Operator::kEliminatable,                  // flags
+      "NewUnmappedArgumentsElements",           // name
+      0, 1, 0, 1, 1, 0,                         // counts
+      parameter_count);                         // parameter
+}
+
+const Operator* SimplifiedOperatorBuilder::NewRestParameterElements(
+    int parameter_count) {
+  return new (zone()) Operator1<int>(       // --
+      IrOpcode::kNewRestParameterElements,  // opcode
+      Operator::kEliminatable,              // flags
+      "NewRestParameterElements",           // name
+      0, 1, 0, 1, 1, 0,                     // counts
+      parameter_count);                     // parameter
+}
+
 const Operator* SimplifiedOperatorBuilder::Allocate(PretenureFlag pretenure) {
   switch (pretenure) {
     case NOT_TENURED:
diff --git a/src/compiler/simplified-operator.h b/src/compiler/simplified-operator.h
index 833a055..ff3f60a 100644
--- a/src/compiler/simplified-operator.h
+++ b/src/compiler/simplified-operator.h
@@ -14,6 +14,7 @@
 #include "src/handles.h"
 #include "src/machine-type.h"
 #include "src/objects.h"
+#include "src/zone/zone-handle-set.h"
 
 namespace v8 {
 namespace internal {
@@ -64,6 +65,7 @@
   BaseTaggedness base_is_tagged;  // specifies if the base pointer is tagged.
   int offset;                     // offset of the field, without tag.
   MaybeHandle<Name> name;         // debugging only.
+  MaybeHandle<Map> map;           // map of the field value (if known).
   Type* type;                     // type of the field.
   MachineType machine_type;       // machine type of the field.
   WriteBarrierKind write_barrier_kind;  // write barrier hint.
@@ -143,6 +145,41 @@
 
 CheckForMinusZeroMode CheckMinusZeroModeOf(const Operator*) WARN_UNUSED_RESULT;
 
+// Flags for map checks.
+enum class CheckMapsFlag : uint8_t {
+  kNone = 0u,
+  kTryMigrateInstance = 1u << 0,  // Try instance migration.
+};
+typedef base::Flags<CheckMapsFlag> CheckMapsFlags;
+
+DEFINE_OPERATORS_FOR_FLAGS(CheckMapsFlags)
+
+std::ostream& operator<<(std::ostream&, CheckMapsFlags);
+
+// A descriptor for map checks.
+class CheckMapsParameters final {
+ public:
+  CheckMapsParameters(CheckMapsFlags flags, ZoneHandleSet<Map> const& maps)
+      : flags_(flags), maps_(maps) {}
+
+  CheckMapsFlags flags() const { return flags_; }
+  ZoneHandleSet<Map> const& maps() const { return maps_; }
+
+ private:
+  CheckMapsFlags const flags_;
+  ZoneHandleSet<Map> const maps_;
+};
+
+bool operator==(CheckMapsParameters const&, CheckMapsParameters const&);
+bool operator!=(CheckMapsParameters const&, CheckMapsParameters const&);
+
+size_t hash_value(CheckMapsParameters const&);
+
+std::ostream& operator<<(std::ostream&, CheckMapsParameters const&);
+
+CheckMapsParameters const& CheckMapsParametersOf(Operator const*)
+    WARN_UNUSED_RESULT;
+
 // A descriptor for growing elements backing stores.
 enum class GrowFastElementsFlag : uint8_t {
   kNone = 0u,
@@ -160,16 +197,35 @@
     WARN_UNUSED_RESULT;
 
 // A descriptor for elements kind transitions.
-enum class ElementsTransition : uint8_t {
-  kFastTransition,  // simple transition, just updating the map.
-  kSlowTransition   // full transition, round-trip to the runtime.
+class ElementsTransition final {
+ public:
+  enum Mode : uint8_t {
+    kFastTransition,  // simple transition, just updating the map.
+    kSlowTransition   // full transition, round-trip to the runtime.
+  };
+
+  ElementsTransition(Mode mode, Handle<Map> source, Handle<Map> target)
+      : mode_(mode), source_(source), target_(target) {}
+
+  Mode mode() const { return mode_; }
+  Handle<Map> source() const { return source_; }
+  Handle<Map> target() const { return target_; }
+
+ private:
+  Mode const mode_;
+  Handle<Map> const source_;
+  Handle<Map> const target_;
 };
 
+bool operator==(ElementsTransition const&, ElementsTransition const&);
+bool operator!=(ElementsTransition const&, ElementsTransition const&);
+
 size_t hash_value(ElementsTransition);
 
 std::ostream& operator<<(std::ostream&, ElementsTransition);
 
-ElementsTransition ElementsTransitionOf(const Operator* op) WARN_UNUSED_RESULT;
+ElementsTransition const& ElementsTransitionOf(const Operator* op)
+    WARN_UNUSED_RESULT;
 
 // A hint for speculative number operations.
 enum class NumberOperationHint : uint8_t {
@@ -186,6 +242,8 @@
 NumberOperationHint NumberOperationHintOf(const Operator* op)
     WARN_UNUSED_RESULT;
 
+int ParameterCountOf(const Operator* op) WARN_UNUSED_RESULT;
+
 PretenureFlag PretenureFlagOf(const Operator* op) WARN_UNUSED_RESULT;
 
 UnicodeEncoding UnicodeEncodingOf(const Operator*) WARN_UNUSED_RESULT;
@@ -294,9 +352,11 @@
   const Operator* StringEqual();
   const Operator* StringLessThan();
   const Operator* StringLessThanOrEqual();
+  const Operator* StringCharAt();
   const Operator* StringCharCodeAt();
   const Operator* StringFromCharCode();
   const Operator* StringFromCodePoint(UnicodeEncoding encoding);
+  const Operator* StringIndexOf();
 
   const Operator* PlainPrimitiveToNumber();
   const Operator* PlainPrimitiveToWord32();
@@ -306,6 +366,7 @@
   const Operator* ChangeTaggedToInt32();
   const Operator* ChangeTaggedToUint32();
   const Operator* ChangeTaggedToFloat64();
+  const Operator* ChangeTaggedToTaggedSigned();
   const Operator* ChangeInt31ToTaggedSigned();
   const Operator* ChangeInt32ToTagged();
   const Operator* ChangeUint32ToTagged();
@@ -319,12 +380,14 @@
 
   const Operator* CheckIf();
   const Operator* CheckBounds();
-  const Operator* CheckMaps(int map_input_count);
+  const Operator* CheckMaps(CheckMapsFlags, ZoneHandleSet<Map>);
 
   const Operator* CheckHeapObject();
+  const Operator* CheckInternalizedString();
   const Operator* CheckNumber();
   const Operator* CheckSmi();
   const Operator* CheckString();
+  const Operator* CheckReceiver();
 
   const Operator* CheckedInt32Add();
   const Operator* CheckedInt32Sub();
@@ -348,13 +411,20 @@
   const Operator* CheckTaggedHole();
   const Operator* ConvertTaggedHoleToUndefined();
 
-  const Operator* ObjectIsCallable();
+  const Operator* ObjectIsDetectableCallable();
+  const Operator* ObjectIsNonCallable();
   const Operator* ObjectIsNumber();
   const Operator* ObjectIsReceiver();
   const Operator* ObjectIsSmi();
   const Operator* ObjectIsString();
   const Operator* ObjectIsUndetectable();
 
+  // new-rest-parameter-elements
+  const Operator* NewRestParameterElements(int parameter_count);
+
+  // new-unmapped-arguments-elements
+  const Operator* NewUnmappedArgumentsElements(int parameter_count);
+
   // array-buffer-was-neutered buffer
   const Operator* ArrayBufferWasNeutered();
 
diff --git a/src/compiler/state-values-utils.cc b/src/compiler/state-values-utils.cc
index e8310d7..899c91a 100644
--- a/src/compiler/state-values-utils.cc
+++ b/src/compiler/state-values-utils.cc
@@ -4,6 +4,8 @@
 
 #include "src/compiler/state-values-utils.h"
 
+#include "src/bit-vector.h"
+
 namespace v8 {
 namespace internal {
 namespace compiler {
@@ -47,6 +49,16 @@
   if (key->count != static_cast<size_t>(node->InputCount())) {
     return false;
   }
+
+  DCHECK(node->opcode() == IrOpcode::kStateValues);
+  SparseInputMask node_mask = SparseInputMaskOf(node->op());
+
+  if (node_mask != key->mask) {
+    return false;
+  }
+
+  // Comparing real inputs rather than sparse inputs, since we already know the
+  // sparse input masks are the same.
   for (size_t i = 0; i < key->count; i++) {
     if (key->values[i] != node->InputAt(static_cast<int>(i))) {
       return false;
@@ -62,6 +74,9 @@
   if (key1->count != key2->count) {
     return false;
   }
+  if (key1->mask != key2->mask) {
+    return false;
+  }
   for (size_t i = 0; i < key1->count; i++) {
     if (key1->values[i] != key2->values[i]) {
       return false;
@@ -73,19 +88,18 @@
 
 Node* StateValuesCache::GetEmptyStateValues() {
   if (empty_state_values_ == nullptr) {
-    empty_state_values_ = graph()->NewNode(common()->StateValues(0));
+    empty_state_values_ =
+        graph()->NewNode(common()->StateValues(0, SparseInputMask::Dense()));
   }
   return empty_state_values_;
 }
 
-
-NodeVector* StateValuesCache::GetWorkingSpace(size_t level) {
-  while (working_space_.size() <= level) {
-    void* space = zone()->New(sizeof(NodeVector));
-    working_space_.push_back(new (space)
-                                 NodeVector(kMaxInputCount, nullptr, zone()));
+StateValuesCache::WorkingBuffer* StateValuesCache::GetWorkingSpace(
+    size_t level) {
+  if (working_space_.size() <= level) {
+    working_space_.resize(level + 1);
   }
-  return working_space_[level];
+  return &working_space_[level];
 }
 
 namespace {
@@ -93,24 +107,24 @@
 int StateValuesHashKey(Node** nodes, size_t count) {
   size_t hash = count;
   for (size_t i = 0; i < count; i++) {
-    hash = hash * 23 + nodes[i]->id();
+    hash = hash * 23 + (nodes[i] == nullptr ? 0 : nodes[i]->id());
   }
   return static_cast<int>(hash & 0x7fffffff);
 }
 
 }  // namespace
 
-
-Node* StateValuesCache::GetValuesNodeFromCache(Node** nodes, size_t count) {
-  StateValuesKey key(count, nodes);
+Node* StateValuesCache::GetValuesNodeFromCache(Node** nodes, size_t count,
+                                               SparseInputMask mask) {
+  StateValuesKey key(count, mask, nodes);
   int hash = StateValuesHashKey(nodes, count);
   ZoneHashMap::Entry* lookup =
       hash_map_.LookupOrInsert(&key, hash, ZoneAllocationPolicy(zone()));
   DCHECK_NOT_NULL(lookup);
   Node* node;
   if (lookup->value == nullptr) {
-    int input_count = static_cast<int>(count);
-    node = graph()->NewNode(common()->StateValues(input_count), input_count,
+    int node_count = static_cast<int>(count);
+    node = graph()->NewNode(common()->StateValues(node_count, mask), node_count,
                             nodes);
     NodeKey* new_key = new (zone()->New(sizeof(NodeKey))) NodeKey(node);
     lookup->key = new_key;
@@ -121,106 +135,192 @@
   return node;
 }
 
+SparseInputMask::BitMaskType StateValuesCache::FillBufferWithValues(
+    WorkingBuffer* node_buffer, size_t* node_count, size_t* values_idx,
+    Node** values, size_t count, const BitVector* liveness,
+    int liveness_offset) {
+  SparseInputMask::BitMaskType input_mask = 0;
 
-class StateValuesCache::ValueArrayIterator {
- public:
-  ValueArrayIterator(Node** values, size_t count)
-      : values_(values), count_(count), current_(0) {}
+  // Virtual nodes are the live nodes plus the implicit optimized out nodes,
+  // which are implied by the liveness mask.
+  size_t virtual_node_count = *node_count;
 
-  void Advance() {
-    if (!done()) {
-      current_++;
+  while (*values_idx < count && *node_count < kMaxInputCount &&
+         virtual_node_count < SparseInputMask::kMaxSparseInputs) {
+    DCHECK_LE(*values_idx, static_cast<size_t>(INT_MAX));
+
+    if (liveness == nullptr ||
+        liveness->Contains(liveness_offset + static_cast<int>(*values_idx))) {
+      input_mask |= 1 << (virtual_node_count);
+      (*node_buffer)[(*node_count)++] = values[*values_idx];
+    }
+    virtual_node_count++;
+
+    (*values_idx)++;
+  }
+
+  DCHECK(*node_count <= StateValuesCache::kMaxInputCount);
+  DCHECK(virtual_node_count <= SparseInputMask::kMaxSparseInputs);
+
+  // Add the end marker at the end of the mask.
+  input_mask |= SparseInputMask::kEndMarker << virtual_node_count;
+
+  return input_mask;
+}
+
+Node* StateValuesCache::BuildTree(size_t* values_idx, Node** values,
+                                  size_t count, const BitVector* liveness,
+                                  int liveness_offset, size_t level) {
+  WorkingBuffer* node_buffer = GetWorkingSpace(level);
+  size_t node_count = 0;
+  SparseInputMask::BitMaskType input_mask = SparseInputMask::kDenseBitMask;
+
+  if (level == 0) {
+    input_mask = FillBufferWithValues(node_buffer, &node_count, values_idx,
+                                      values, count, liveness, liveness_offset);
+    // Make sure we returned a sparse input mask.
+    DCHECK_NE(input_mask, SparseInputMask::kDenseBitMask);
+  } else {
+    while (*values_idx < count && node_count < kMaxInputCount) {
+      if (count - *values_idx < kMaxInputCount - node_count) {
+        // If we have fewer values remaining than inputs remaining, dump the
+        // remaining values into this node.
+        // TODO(leszeks): We could optimise this further by only counting
+        // remaining live nodes.
+
+        size_t previous_input_count = node_count;
+        input_mask =
+            FillBufferWithValues(node_buffer, &node_count, values_idx, values,
+                                 count, liveness, liveness_offset);
+        // Make sure we have exhausted our values.
+        DCHECK_EQ(*values_idx, count);
+        // Make sure we returned a sparse input mask.
+        DCHECK_NE(input_mask, SparseInputMask::kDenseBitMask);
+
+        // Make sure we haven't touched inputs below previous_input_count in the
+        // mask.
+        DCHECK_EQ(input_mask & ((1 << previous_input_count) - 1), 0u);
+        // Mark all previous inputs as live.
+        input_mask |= ((1 << previous_input_count) - 1);
+
+        break;
+
+      } else {
+        // Otherwise, add the values to a subtree and add that as an input.
+        Node* subtree = BuildTree(values_idx, values, count, liveness,
+                                  liveness_offset, level - 1);
+        (*node_buffer)[node_count++] = subtree;
+        // Don't touch the bitmask, so that it stays dense.
+      }
     }
   }
 
-  bool done() { return current_ >= count_; }
-
-  Node* node() {
-    DCHECK(!done());
-    return values_[current_];
-  }
-
- private:
-  Node** values_;
-  size_t count_;
-  size_t current_;
-};
-
-
-Node* StateValuesCache::BuildTree(ValueArrayIterator* it, size_t max_height) {
-  if (max_height == 0) {
-    Node* node = it->node();
-    it->Advance();
-    return node;
-  }
-  DCHECK(!it->done());
-
-  NodeVector* buffer = GetWorkingSpace(max_height);
-  size_t count = 0;
-  for (; count < kMaxInputCount; count++) {
-    if (it->done()) break;
-    (*buffer)[count] = BuildTree(it, max_height - 1);
-  }
-  if (count == 1) {
-    return (*buffer)[0];
+  if (node_count == 1 && input_mask == SparseInputMask::kDenseBitMask) {
+    // Elide the StateValue node if there is only one, dense input. This will
+    // only happen if we built a single subtree (as nodes with values are always
+    // sparse), and so we can replace ourselves with it.
+    DCHECK_EQ((*node_buffer)[0]->opcode(), IrOpcode::kStateValues);
+    return (*node_buffer)[0];
   } else {
-    return GetValuesNodeFromCache(&(buffer->front()), count);
+    return GetValuesNodeFromCache(node_buffer->data(), node_count,
+                                  SparseInputMask(input_mask));
   }
 }
 
-
-Node* StateValuesCache::GetNodeForValues(Node** values, size_t count) {
 #if DEBUG
+namespace {
+
+void CheckTreeContainsValues(Node* tree, Node** values, size_t count,
+                             const BitVector* liveness, int liveness_offset) {
+  CHECK_EQ(count, StateValuesAccess(tree).size());
+
+  int i;
+  auto access = StateValuesAccess(tree);
+  auto it = access.begin();
+  auto itend = access.end();
+  for (i = 0; it != itend; ++it, ++i) {
+    if (liveness == nullptr || liveness->Contains(liveness_offset + i)) {
+      CHECK((*it).node == values[i]);
+    } else {
+      CHECK((*it).node == nullptr);
+    }
+  }
+  CHECK_EQ(static_cast<size_t>(i), count);
+}
+
+}  // namespace
+#endif
+
+Node* StateValuesCache::GetNodeForValues(Node** values, size_t count,
+                                         const BitVector* liveness,
+                                         int liveness_offset) {
+#if DEBUG
+  // Check that the values represent actual values, and not a tree of values.
   for (size_t i = 0; i < count; i++) {
-    DCHECK_NE(values[i]->opcode(), IrOpcode::kStateValues);
-    DCHECK_NE(values[i]->opcode(), IrOpcode::kTypedStateValues);
+    if (values[i] != nullptr) {
+      DCHECK_NE(values[i]->opcode(), IrOpcode::kStateValues);
+      DCHECK_NE(values[i]->opcode(), IrOpcode::kTypedStateValues);
+    }
+  }
+  if (liveness != nullptr) {
+    DCHECK_LE(liveness_offset + count, static_cast<size_t>(liveness->length()));
+
+    for (size_t i = 0; i < count; i++) {
+      if (liveness->Contains(liveness_offset + static_cast<int>(i))) {
+        DCHECK_NOT_NULL(values[i]);
+      }
+    }
   }
 #endif
+
   if (count == 0) {
     return GetEmptyStateValues();
   }
+
+  // This is a worst-case tree height estimate, assuming that all values are
+  // live. We could get a better estimate by counting zeroes in the liveness
+  // vector, but there's no point -- any excess height in the tree will be
+  // collapsed by the single-input elision at the end of BuildTree.
   size_t height = 0;
-  size_t max_nodes = 1;
-  while (count > max_nodes) {
+  size_t max_inputs = kMaxInputCount;
+  while (count > max_inputs) {
     height++;
-    max_nodes *= kMaxInputCount;
+    max_inputs *= kMaxInputCount;
   }
 
-  ValueArrayIterator it(values, count);
+  size_t values_idx = 0;
+  Node* tree =
+      BuildTree(&values_idx, values, count, liveness, liveness_offset, height);
+  // The values should be exhausted by the end of BuildTree.
+  DCHECK_EQ(values_idx, count);
 
-  Node* tree = BuildTree(&it, height);
+  // The 'tree' must be rooted with a state value node.
+  DCHECK_EQ(tree->opcode(), IrOpcode::kStateValues);
 
-  // If the 'tree' is a single node, equip it with a StateValues wrapper.
-  if (tree->opcode() != IrOpcode::kStateValues &&
-      tree->opcode() != IrOpcode::kTypedStateValues) {
-    tree = GetValuesNodeFromCache(&tree, 1);
-  }
+#if DEBUG
+  CheckTreeContainsValues(tree, values, count, liveness, liveness_offset);
+#endif
 
   return tree;
 }
 
-
 StateValuesAccess::iterator::iterator(Node* node) : current_depth_(0) {
-  // A hacky way initialize - just set the index before the node we want
-  // to process and then advance to it.
-  stack_[current_depth_].node = node;
-  stack_[current_depth_].index = -1;
-  Advance();
+  stack_[current_depth_] =
+      SparseInputMaskOf(node->op()).IterateOverInputs(node);
+  EnsureValid();
 }
 
-
-StateValuesAccess::iterator::StatePos* StateValuesAccess::iterator::Top() {
+SparseInputMask::InputIterator* StateValuesAccess::iterator::Top() {
   DCHECK(current_depth_ >= 0);
   DCHECK(current_depth_ < kMaxInlineDepth);
   return &(stack_[current_depth_]);
 }
 
-
 void StateValuesAccess::iterator::Push(Node* node) {
   current_depth_++;
   CHECK(current_depth_ < kMaxInlineDepth);
-  stack_[current_depth_].node = node;
-  stack_[current_depth_].index = 0;
+  stack_[current_depth_] =
+      SparseInputMaskOf(node->op()).IterateOverInputs(node);
 }
 
 
@@ -234,48 +334,61 @@
 
 
 void StateValuesAccess::iterator::Advance() {
-  // Advance the current index.
-  Top()->index++;
+  Top()->Advance();
+  EnsureValid();
+}
 
-  // Fix up the position to point to a valid node.
+void StateValuesAccess::iterator::EnsureValid() {
   while (true) {
-    // TODO(jarin): Factor to a separate method.
-    Node* node = Top()->node;
-    int index = Top()->index;
+    SparseInputMask::InputIterator* top = Top();
 
-    if (index >= node->InputCount()) {
-      // Pop stack and move to the next sibling.
+    if (top->IsEmpty()) {
+      // We are on a valid (albeit optimized out) node.
+      return;
+    }
+
+    if (top->IsEnd()) {
+      // We have hit the end of this iterator. Pop the stack and move to the
+      // next sibling iterator.
       Pop();
       if (done()) {
         // Stack is exhausted, we have reached the end.
         return;
       }
-      Top()->index++;
-    } else if (node->InputAt(index)->opcode() == IrOpcode::kStateValues ||
-               node->InputAt(index)->opcode() == IrOpcode::kTypedStateValues) {
-      // Nested state, we need to push to the stack.
-      Push(node->InputAt(index));
-    } else {
-      // We are on a valid node, we can stop the iteration.
-      return;
+      Top()->Advance();
+      continue;
     }
+
+    // At this point the value is known to be live and within our input nodes.
+    Node* value_node = top->GetReal();
+
+    if (value_node->opcode() == IrOpcode::kStateValues ||
+        value_node->opcode() == IrOpcode::kTypedStateValues) {
+      // Nested state, we need to push to the stack.
+      Push(value_node);
+      continue;
+    }
+
+    // We are on a valid node, we can stop the iteration.
+    return;
   }
 }
 
-
-Node* StateValuesAccess::iterator::node() {
-  return Top()->node->InputAt(Top()->index);
-}
-
+Node* StateValuesAccess::iterator::node() { return Top()->Get(nullptr); }
 
 MachineType StateValuesAccess::iterator::type() {
-  Node* state = Top()->node;
-  if (state->opcode() == IrOpcode::kStateValues) {
+  Node* parent = Top()->parent();
+  if (parent->opcode() == IrOpcode::kStateValues) {
     return MachineType::AnyTagged();
   } else {
-    DCHECK_EQ(IrOpcode::kTypedStateValues, state->opcode());
-    ZoneVector<MachineType> const* types = MachineTypesOf(state->op());
-    return (*types)[Top()->index];
+    DCHECK_EQ(IrOpcode::kTypedStateValues, parent->opcode());
+
+    if (Top()->IsEmpty()) {
+      return MachineType::None();
+    } else {
+      ZoneVector<MachineType> const* types = MachineTypesOf(parent->op());
+      return (*types)[Top()->real_index()];
+    }
   }
 }
 
@@ -300,14 +413,24 @@
 
 size_t StateValuesAccess::size() {
   size_t count = 0;
-  for (int i = 0; i < node_->InputCount(); i++) {
-    if (node_->InputAt(i)->opcode() == IrOpcode::kStateValues ||
-        node_->InputAt(i)->opcode() == IrOpcode::kTypedStateValues) {
-      count += StateValuesAccess(node_->InputAt(i)).size();
-    } else {
+  SparseInputMask mask = SparseInputMaskOf(node_->op());
+
+  SparseInputMask::InputIterator iterator = mask.IterateOverInputs(node_);
+
+  for (; !iterator.IsEnd(); iterator.Advance()) {
+    if (iterator.IsEmpty()) {
       count++;
+    } else {
+      Node* value = iterator.GetReal();
+      if (value->opcode() == IrOpcode::kStateValues ||
+          value->opcode() == IrOpcode::kTypedStateValues) {
+        count += StateValuesAccess(value).size();
+      } else {
+        count++;
+      }
     }
   }
+
   return count;
 }
 
diff --git a/src/compiler/state-values-utils.h b/src/compiler/state-values-utils.h
index 14b1b9e..e1fd7d2 100644
--- a/src/compiler/state-values-utils.h
+++ b/src/compiler/state-values-utils.h
@@ -5,12 +5,16 @@
 #ifndef V8_COMPILER_STATE_VALUES_UTILS_H_
 #define V8_COMPILER_STATE_VALUES_UTILS_H_
 
+#include <array>
+#include "src/compiler/common-operator.h"
 #include "src/compiler/js-graph.h"
 #include "src/globals.h"
 
 namespace v8 {
 namespace internal {
 
+class BitVector;
+
 namespace compiler {
 
 class Graph;
@@ -19,10 +23,13 @@
  public:
   explicit StateValuesCache(JSGraph* js_graph);
 
-  Node* GetNodeForValues(Node** values, size_t count);
+  Node* GetNodeForValues(Node** values, size_t count,
+                         const BitVector* liveness = nullptr,
+                         int liveness_offset = 0);
 
  private:
   static const size_t kMaxInputCount = 8;
+  typedef std::array<Node*, kMaxInputCount> WorkingBuffer;
 
   struct NodeKey {
     Node* node;
@@ -33,22 +40,35 @@
   struct StateValuesKey : public NodeKey {
     // ValueArray - array of nodes ({node} has to be nullptr).
     size_t count;
+    SparseInputMask mask;
     Node** values;
 
-    StateValuesKey(size_t count, Node** values)
-        : NodeKey(nullptr), count(count), values(values) {}
+    StateValuesKey(size_t count, SparseInputMask mask, Node** values)
+        : NodeKey(nullptr), count(count), mask(mask), values(values) {}
   };
 
-  class ValueArrayIterator;
-
   static bool AreKeysEqual(void* key1, void* key2);
   static bool IsKeysEqualToNode(StateValuesKey* key, Node* node);
   static bool AreValueKeysEqual(StateValuesKey* key1, StateValuesKey* key2);
 
-  Node* BuildTree(ValueArrayIterator* it, size_t max_height);
-  NodeVector* GetWorkingSpace(size_t level);
+  // Fills {node_buffer}, starting from {node_count}, with {values}, starting
+  // at {values_idx}, sparsely encoding according to {liveness}. {node_count} is
+  // updated with the new number of inputs in {node_buffer}, and a bitmask of
+  // the sparse encoding is returned.
+  SparseInputMask::BitMaskType FillBufferWithValues(WorkingBuffer* node_buffer,
+                                                    size_t* node_count,
+                                                    size_t* values_idx,
+                                                    Node** values, size_t count,
+                                                    const BitVector* liveness,
+                                                    int liveness_offset);
+
+  Node* BuildTree(size_t* values_idx, Node** values, size_t count,
+                  const BitVector* liveness, int liveness_offset, size_t level);
+
+  WorkingBuffer* GetWorkingSpace(size_t level);
   Node* GetEmptyStateValues();
-  Node* GetValuesNodeFromCache(Node** nodes, size_t count);
+  Node* GetValuesNodeFromCache(Node** nodes, size_t count,
+                               SparseInputMask mask);
 
   Graph* graph() { return js_graph_->graph(); }
   CommonOperatorBuilder* common() { return js_graph_->common(); }
@@ -57,7 +77,7 @@
 
   JSGraph* js_graph_;
   CustomMatcherZoneHashMap hash_map_;
-  ZoneVector<NodeVector*> working_space_;  // One working space per level.
+  ZoneVector<WorkingBuffer> working_space_;  // One working space per level.
   Node* empty_state_values_;
 };
 
@@ -86,21 +106,14 @@
     MachineType type();
     bool done();
     void Advance();
+    void EnsureValid();
 
-    struct StatePos {
-      Node* node;
-      int index;
-
-      explicit StatePos(Node* node) : node(node), index(0) {}
-      StatePos() {}
-    };
-
-    StatePos* Top();
+    SparseInputMask::InputIterator* Top();
     void Push(Node* node);
     void Pop();
 
     static const int kMaxInlineDepth = 8;
-    StatePos stack_[kMaxInlineDepth];
+    SparseInputMask::InputIterator stack_[kMaxInlineDepth];
     int current_depth_;
   };
 
diff --git a/src/compiler/type-cache.h b/src/compiler/type-cache.h
index 69eaf11..3d9801b 100644
--- a/src/compiler/type-cache.h
+++ b/src/compiler/type-cache.h
@@ -64,6 +64,8 @@
   Type* const kPositiveInteger = CreateRange(0.0, V8_INFINITY);
   Type* const kPositiveIntegerOrMinusZero =
       Type::Union(kPositiveInteger, Type::MinusZero(), zone());
+  Type* const kPositiveIntegerOrNaN =
+      Type::Union(kPositiveInteger, Type::NaN(), zone());
   Type* const kPositiveIntegerOrMinusZeroOrNaN =
       Type::Union(kPositiveIntegerOrMinusZero, Type::NaN(), zone());
 
@@ -97,6 +99,11 @@
   // [0, String::kMaxLength].
   Type* const kStringLengthType = CreateRange(0.0, String::kMaxLength);
 
+  // A time value always contains a tagged number in the range
+  // [-kMaxTimeInMs, kMaxTimeInMs].
+  Type* const kTimeValueType =
+      CreateRange(-DateCache::kMaxTimeInMs, DateCache::kMaxTimeInMs);
+
   // The JSDate::day property always contains a tagged number in the range
   // [1, 31] or NaN.
   Type* const kJSDateDayType =
@@ -123,9 +130,8 @@
 
   // The JSDate::value property always contains a tagged number in the range
   // [-kMaxTimeInMs, kMaxTimeInMs] or NaN.
-  Type* const kJSDateValueType = Type::Union(
-      CreateRange(-DateCache::kMaxTimeInMs, DateCache::kMaxTimeInMs),
-      Type::NaN(), zone());
+  Type* const kJSDateValueType =
+      Type::Union(kTimeValueType, Type::NaN(), zone());
 
   // The JSDate::weekday property always contains a tagged number in the range
   // [0, 6] or NaN.
@@ -137,6 +143,10 @@
   Type* const kJSDateYearType =
       Type::Union(Type::SignedSmall(), Type::NaN(), zone());
 
+  // The valid number of arguments for JavaScript functions.
+  Type* const kArgumentsLengthType =
+      Type::Range(0.0, Code::kMaxArguments, zone());
+
  private:
   template <typename T>
   Type* CreateRange() {
diff --git a/src/compiler/type-hint-analyzer.cc b/src/compiler/type-hint-analyzer.cc
deleted file mode 100644
index da77a0c..0000000
--- a/src/compiler/type-hint-analyzer.cc
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/compiler/type-hint-analyzer.h"
-
-#include "src/assembler.h"
-#include "src/code-stubs.h"
-#include "src/ic/ic-state.h"
-#include "src/type-hints.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-namespace {
-
-BinaryOperationHint ToBinaryOperationHint(Token::Value op,
-                                          BinaryOpICState::Kind kind) {
-  switch (kind) {
-    case BinaryOpICState::NONE:
-      return BinaryOperationHint::kNone;
-    case BinaryOpICState::SMI:
-      return BinaryOperationHint::kSignedSmall;
-    case BinaryOpICState::INT32:
-      return (Token::IsTruncatingBinaryOp(op) && SmiValuesAre31Bits())
-                 ? BinaryOperationHint::kNumberOrOddball
-                 : BinaryOperationHint::kSigned32;
-    case BinaryOpICState::NUMBER:
-      return BinaryOperationHint::kNumberOrOddball;
-    case BinaryOpICState::STRING:
-      return BinaryOperationHint::kString;
-    case BinaryOpICState::GENERIC:
-      return BinaryOperationHint::kAny;
-  }
-  UNREACHABLE();
-  return BinaryOperationHint::kNone;
-}
-
-CompareOperationHint ToCompareOperationHint(Token::Value op,
-                                            CompareICState::State state) {
-  switch (state) {
-    case CompareICState::UNINITIALIZED:
-      return CompareOperationHint::kNone;
-    case CompareICState::SMI:
-      return CompareOperationHint::kSignedSmall;
-    case CompareICState::NUMBER:
-      return Token::IsOrderedRelationalCompareOp(op)
-                 ? CompareOperationHint::kNumberOrOddball
-                 : CompareOperationHint::kNumber;
-    case CompareICState::STRING:
-    case CompareICState::INTERNALIZED_STRING:
-    case CompareICState::UNIQUE_NAME:
-    case CompareICState::RECEIVER:
-    case CompareICState::KNOWN_RECEIVER:
-    case CompareICState::BOOLEAN:
-    case CompareICState::GENERIC:
-      return CompareOperationHint::kAny;
-  }
-  UNREACHABLE();
-  return CompareOperationHint::kNone;
-}
-
-}  // namespace
-
-bool TypeHintAnalysis::GetBinaryOperationHint(TypeFeedbackId id,
-                                              BinaryOperationHint* hint) const {
-  auto i = infos_.find(id);
-  if (i == infos_.end()) return false;
-  Handle<Code> code = i->second;
-  DCHECK_EQ(Code::BINARY_OP_IC, code->kind());
-  BinaryOpICState state(code->GetIsolate(), code->extra_ic_state());
-  *hint = ToBinaryOperationHint(state.op(), state.kind());
-  return true;
-}
-
-bool TypeHintAnalysis::GetCompareOperationHint(
-    TypeFeedbackId id, CompareOperationHint* hint) const {
-  auto i = infos_.find(id);
-  if (i == infos_.end()) return false;
-  Handle<Code> code = i->second;
-  DCHECK_EQ(Code::COMPARE_IC, code->kind());
-  CompareICStub stub(code->stub_key(), code->GetIsolate());
-  *hint = ToCompareOperationHint(stub.op(), stub.state());
-  return true;
-}
-
-bool TypeHintAnalysis::GetToBooleanHints(TypeFeedbackId id,
-                                         ToBooleanHints* hints) const {
-  auto i = infos_.find(id);
-  if (i == infos_.end()) return false;
-  Handle<Code> code = i->second;
-  DCHECK_EQ(Code::TO_BOOLEAN_IC, code->kind());
-  ToBooleanICStub stub(code->GetIsolate(), code->extra_ic_state());
-  *hints = stub.hints();
-  return true;
-}
-
-TypeHintAnalysis* TypeHintAnalyzer::Analyze(Handle<Code> code) {
-  DisallowHeapAllocation no_gc;
-  TypeHintAnalysis::Infos infos(zone());
-  Isolate* const isolate = code->GetIsolate();
-  int const mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET_WITH_ID);
-  for (RelocIterator it(*code, mask); !it.done(); it.next()) {
-    RelocInfo* rinfo = it.rinfo();
-    Address target_address = rinfo->target_address();
-    Code* target = Code::GetCodeFromTargetAddress(target_address);
-    switch (target->kind()) {
-      case Code::BINARY_OP_IC:
-      case Code::COMPARE_IC:
-      case Code::TO_BOOLEAN_IC: {
-        // Add this feedback to the {infos}.
-        TypeFeedbackId id(static_cast<unsigned>(rinfo->data()));
-        infos.insert(std::make_pair(id, handle(target, isolate)));
-        break;
-      }
-      default:
-        // Ignore the remaining code objects.
-        break;
-    }
-  }
-  return new (zone()) TypeHintAnalysis(infos, zone());
-}
-
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
diff --git a/src/compiler/type-hint-analyzer.h b/src/compiler/type-hint-analyzer.h
deleted file mode 100644
index 354f894..0000000
--- a/src/compiler/type-hint-analyzer.h
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_TYPE_HINT_ANALYZER_H_
-#define V8_COMPILER_TYPE_HINT_ANALYZER_H_
-
-#include "src/handles.h"
-#include "src/type-hints.h"
-#include "src/zone/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// The result of analyzing type hints.
-class TypeHintAnalysis final : public ZoneObject {
- public:
-  typedef ZoneMap<TypeFeedbackId, Handle<Code>> Infos;
-
-  explicit TypeHintAnalysis(Infos const& infos, Zone* zone)
-      : infos_(infos), zone_(zone) {}
-
-  bool GetBinaryOperationHint(TypeFeedbackId id,
-                              BinaryOperationHint* hint) const;
-  bool GetCompareOperationHint(TypeFeedbackId id,
-                               CompareOperationHint* hint) const;
-  bool GetToBooleanHints(TypeFeedbackId id, ToBooleanHints* hints) const;
-
- private:
-  Zone* zone() const { return zone_; }
-
-  Infos const infos_;
-  Zone* zone_;
-};
-
-
-// The class that performs type hint analysis on the fullcodegen code object.
-class TypeHintAnalyzer final {
- public:
-  explicit TypeHintAnalyzer(Zone* zone) : zone_(zone) {}
-
-  TypeHintAnalysis* Analyze(Handle<Code> code);
-
- private:
-  Zone* zone() const { return zone_; }
-
-  Zone* const zone_;
-
-  DISALLOW_COPY_AND_ASSIGN(TypeHintAnalyzer);
-};
-
-}  // namespace compiler
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_COMPILER_TYPE_HINT_ANALYZER_H_
diff --git a/src/compiler/typed-optimization.cc b/src/compiler/typed-optimization.cc
index 5ebc390..e130a10 100644
--- a/src/compiler/typed-optimization.cc
+++ b/src/compiler/typed-optimization.cc
@@ -83,14 +83,17 @@
     case IrOpcode::kLoadField:
       return ReduceLoadField(node);
     case IrOpcode::kNumberCeil:
-    case IrOpcode::kNumberFloor:
     case IrOpcode::kNumberRound:
     case IrOpcode::kNumberTrunc:
       return ReduceNumberRoundop(node);
+    case IrOpcode::kNumberFloor:
+      return ReduceNumberFloor(node);
     case IrOpcode::kNumberToUint8Clamped:
       return ReduceNumberToUint8Clamped(node);
     case IrOpcode::kPhi:
       return ReducePhi(node);
+    case IrOpcode::kReferenceEqual:
+      return ReduceReferenceEqual(node);
     case IrOpcode::kSelect:
       return ReduceSelect(node);
     default:
@@ -185,6 +188,40 @@
   return NoChange();
 }
 
+Reduction TypedOptimization::ReduceNumberFloor(Node* node) {
+  Node* const input = NodeProperties::GetValueInput(node, 0);
+  Type* const input_type = NodeProperties::GetType(input);
+  if (input_type->Is(type_cache_.kIntegerOrMinusZeroOrNaN)) {
+    return Replace(input);
+  }
+  if (input_type->Is(Type::PlainNumber()) &&
+      input->opcode() == IrOpcode::kNumberDivide) {
+    Node* const lhs = NodeProperties::GetValueInput(input, 0);
+    Type* const lhs_type = NodeProperties::GetType(lhs);
+    Node* const rhs = NodeProperties::GetValueInput(input, 1);
+    Type* const rhs_type = NodeProperties::GetType(rhs);
+    if (lhs_type->Is(Type::Unsigned32()) && rhs_type->Is(Type::Unsigned32())) {
+      // We can replace
+      //
+      //   NumberFloor(NumberDivide(lhs: unsigned32,
+      //                            rhs: unsigned32)): plain-number
+      //
+      // with
+      //
+      //   NumberToUint32(NumberDivide(lhs, rhs))
+      //
+      // and just smash the type of the {lhs} on the {node},
+      // as the truncated result must be in the same range as
+      // {lhs} since {rhs} cannot be less than 1 (due to the
+      // plain-number type constraint on the {node}).
+      NodeProperties::ChangeOp(node, simplified()->NumberToUint32());
+      NodeProperties::SetType(node, lhs_type);
+      return Changed(node);
+    }
+  }
+  return NoChange();
+}
+
 Reduction TypedOptimization::ReduceNumberRoundop(Node* node) {
   Node* const input = NodeProperties::GetValueInput(node, 0);
   Type* const input_type = NodeProperties::GetType(input);
@@ -223,6 +260,18 @@
   return NoChange();
 }
 
+Reduction TypedOptimization::ReduceReferenceEqual(Node* node) {
+  DCHECK_EQ(IrOpcode::kReferenceEqual, node->opcode());
+  Node* const lhs = NodeProperties::GetValueInput(node, 0);
+  Node* const rhs = NodeProperties::GetValueInput(node, 1);
+  Type* const lhs_type = NodeProperties::GetType(lhs);
+  Type* const rhs_type = NodeProperties::GetType(rhs);
+  if (!lhs_type->Maybe(rhs_type)) {
+    return Replace(jsgraph()->FalseConstant());
+  }
+  return NoChange();
+}
+
 Reduction TypedOptimization::ReduceSelect(Node* node) {
   DCHECK_EQ(IrOpcode::kSelect, node->opcode());
   Node* const condition = NodeProperties::GetValueInput(node, 0);
diff --git a/src/compiler/typed-optimization.h b/src/compiler/typed-optimization.h
index fb2db72..93de680 100644
--- a/src/compiler/typed-optimization.h
+++ b/src/compiler/typed-optimization.h
@@ -46,9 +46,11 @@
   Reduction ReduceCheckMaps(Node* node);
   Reduction ReduceCheckString(Node* node);
   Reduction ReduceLoadField(Node* node);
+  Reduction ReduceNumberFloor(Node* node);
   Reduction ReduceNumberRoundop(Node* node);
   Reduction ReduceNumberToUint8Clamped(Node* node);
   Reduction ReducePhi(Node* node);
+  Reduction ReduceReferenceEqual(Node* node);
   Reduction ReduceSelect(Node* node);
 
   CompilationDependencies* dependencies() const { return dependencies_; }
diff --git a/src/compiler/typer.cc b/src/compiler/typer.cc
index 2642a10..ed1a04a 100644
--- a/src/compiler/typer.cc
+++ b/src/compiler/typer.cc
@@ -43,13 +43,14 @@
   Zone* zone = this->zone();
   Factory* const factory = isolate->factory();
 
-  singleton_false_ = Type::HeapConstant(factory->false_value(), zone);
-  singleton_true_ = Type::HeapConstant(factory->true_value(), zone);
-  singleton_the_hole_ = Type::HeapConstant(factory->the_hole_value(), zone);
+  singleton_empty_string_ = Type::HeapConstant(factory->empty_string(), zone);
+  singleton_false_ = operation_typer_.singleton_false();
+  singleton_true_ = operation_typer_.singleton_true();
   falsish_ = Type::Union(
       Type::Undetectable(),
       Type::Union(Type::Union(singleton_false_, cache_.kZeroish, zone),
-                  singleton_the_hole_, zone),
+                  Type::Union(singleton_empty_string_, Type::Hole(), zone),
+                  zone),
       zone);
   truish_ = Type::Union(
       singleton_true_,
@@ -122,6 +123,8 @@
       DECLARE_CASE(Deoptimize)
       DECLARE_CASE(DeoptimizeIf)
       DECLARE_CASE(DeoptimizeUnless)
+      DECLARE_CASE(TrapIf)
+      DECLARE_CASE(TrapUnless)
       DECLARE_CASE(Return)
       DECLARE_CASE(TailCall)
       DECLARE_CASE(Terminate)
@@ -185,6 +188,8 @@
       DECLARE_CASE(Deoptimize)
       DECLARE_CASE(DeoptimizeIf)
       DECLARE_CASE(DeoptimizeUnless)
+      DECLARE_CASE(TrapIf)
+      DECLARE_CASE(TrapUnless)
       DECLARE_CASE(Return)
       DECLARE_CASE(TailCall)
       DECLARE_CASE(Terminate)
@@ -279,7 +284,8 @@
   SIMPLIFIED_SPECULATIVE_NUMBER_BINOP_LIST(DECLARE_METHOD)
 #undef DECLARE_METHOD
 
-  static Type* ObjectIsCallable(Type*, Typer*);
+  static Type* ObjectIsDetectableCallable(Type*, Typer*);
+  static Type* ObjectIsNonCallable(Type*, Typer*);
   static Type* ObjectIsNumber(Type*, Typer*);
   static Type* ObjectIsReceiver(Type*, Typer*);
   static Type* ObjectIsSmi(Type*, Typer*);
@@ -292,7 +298,7 @@
   JS_SIMPLE_BINOP_LIST(DECLARE_METHOD)
 #undef DECLARE_METHOD
 
-  static Type* JSCallFunctionTyper(Type*, Typer*);
+  static Type* JSCallTyper(Type*, Typer*);
 
   static Type* ReferenceEqualTyper(Type*, Type*, Typer*);
   static Type* StringFromCharCodeTyper(Type*, Typer*);
@@ -497,9 +503,15 @@
 
 // Type checks.
 
-Type* Typer::Visitor::ObjectIsCallable(Type* type, Typer* t) {
-  if (type->Is(Type::Function())) return t->singleton_true_;
-  if (type->Is(Type::Primitive())) return t->singleton_false_;
+Type* Typer::Visitor::ObjectIsDetectableCallable(Type* type, Typer* t) {
+  if (type->Is(Type::DetectableCallable())) return t->singleton_true_;
+  if (!type->Maybe(Type::DetectableCallable())) return t->singleton_false_;
+  return Type::Boolean();
+}
+
+Type* Typer::Visitor::ObjectIsNonCallable(Type* type, Typer* t) {
+  if (type->Is(Type::NonCallable())) return t->singleton_true_;
+  if (!type->Maybe(Type::NonCallable())) return t->singleton_false_;
   return Type::Boolean();
 }
 
@@ -822,6 +834,10 @@
   return Type::Internal();
 }
 
+Type* Typer::Visitor::TypeArgumentsObjectState(Node* node) {
+  return Type::Internal();
+}
+
 Type* Typer::Visitor::TypeObjectState(Node* node) { return Type::Internal(); }
 
 Type* Typer::Visitor::TypeTypedObjectState(Node* node) {
@@ -893,8 +909,7 @@
       (lhs->Max() < rhs->Min() || lhs->Min() > rhs->Max())) {
     return t->singleton_false_;
   }
-  if ((lhs->Is(t->singleton_the_hole_) || rhs->Is(t->singleton_the_hole_)) &&
-      !lhs->Maybe(rhs)) {
+  if ((lhs->Is(Type::Hole()) || rhs->Is(Type::Hole())) && !lhs->Maybe(rhs)) {
     return t->singleton_false_;
   }
   if (lhs->IsHeapConstant() && rhs->Is(lhs)) {
@@ -1041,6 +1056,9 @@
 
 // JS unary operators.
 
+Type* Typer::Visitor::TypeJSClassOf(Node* node) {
+  return Type::InternalizedStringOrNull();
+}
 
 Type* Typer::Visitor::TypeJSTypeOf(Node* node) {
   return Type::InternalizedString();
@@ -1233,6 +1251,15 @@
   return nullptr;
 }
 
+Type* Typer::Visitor::TypeJSStoreNamedOwn(Node* node) {
+  UNREACHABLE();
+  return nullptr;
+}
+
+Type* Typer::Visitor::TypeJSStoreDataPropertyInLiteral(Node* node) {
+  UNREACHABLE();
+  return nullptr;
+}
 
 Type* Typer::Visitor::TypeJSDeleteProperty(Node* node) {
   return Type::Boolean();
@@ -1240,12 +1267,21 @@
 
 Type* Typer::Visitor::TypeJSHasProperty(Node* node) { return Type::Boolean(); }
 
-Type* Typer::Visitor::TypeJSInstanceOf(Node* node) { return Type::Boolean(); }
+// JS instanceof operator.
 
-Type* Typer::Visitor::TypeJSOrdinaryHasInstance(Node* node) {
+Type* Typer::Visitor::JSInstanceOfTyper(Type* lhs, Type* rhs, Typer* t) {
   return Type::Boolean();
 }
 
+Type* Typer::Visitor::JSOrdinaryHasInstanceTyper(Type* lhs, Type* rhs,
+                                                 Typer* t) {
+  return Type::Boolean();
+}
+
+Type* Typer::Visitor::TypeJSGetSuperConstructor(Node* node) {
+  return Type::Callable();
+}
+
 // JS context operators.
 
 
@@ -1291,12 +1327,13 @@
 
 // JS other operators.
 
+Type* Typer::Visitor::TypeJSConstruct(Node* node) { return Type::Receiver(); }
 
-Type* Typer::Visitor::TypeJSCallConstruct(Node* node) {
+Type* Typer::Visitor::TypeJSConstructWithSpread(Node* node) {
   return Type::Receiver();
 }
 
-Type* Typer::Visitor::JSCallFunctionTyper(Type* fun, Typer* t) {
+Type* Typer::Visitor::JSCallTyper(Type* fun, Typer* t) {
   if (fun->IsHeapConstant() && fun->AsHeapConstant()->Value()->IsJSFunction()) {
     Handle<JSFunction> function =
         Handle<JSFunction>::cast(fun->AsHeapConstant()->Value());
@@ -1344,6 +1381,8 @@
         case kMathClz32:
           return t->cache_.kZeroToThirtyTwo;
         // Date functions.
+        case kDateNow:
+          return t->cache_.kTimeValueType;
         case kDateGetDate:
           return t->cache_.kJSDateDayType;
         case kDateGetDay:
@@ -1363,6 +1402,7 @@
           return t->cache_.kJSDateSecondType;
         case kDateGetTime:
           return t->cache_.kJSDateValueType;
+
         // Number functions.
         case kNumberIsFinite:
         case kNumberIsInteger:
@@ -1375,16 +1415,41 @@
           return t->cache_.kIntegerOrMinusZeroOrNaN;
         case kNumberToString:
           return Type::String();
+
         // String functions.
         case kStringCharCodeAt:
           return Type::Union(Type::Range(0, kMaxUInt16, t->zone()), Type::NaN(),
                              t->zone());
         case kStringCharAt:
+          return Type::String();
+        case kStringCodePointAt:
+          return Type::Union(Type::Range(0.0, String::kMaxCodePoint, t->zone()),
+                             Type::Undefined(), t->zone());
         case kStringConcat:
         case kStringFromCharCode:
+        case kStringFromCodePoint:
+          return Type::String();
+        case kStringIndexOf:
+        case kStringLastIndexOf:
+          return Type::Range(-1.0, String::kMaxLength - 1.0, t->zone());
+        case kStringEndsWith:
+        case kStringIncludes:
+          return Type::Boolean();
+        case kStringRaw:
+        case kStringRepeat:
+        case kStringSlice:
+          return Type::String();
+        case kStringStartsWith:
+          return Type::Boolean();
         case kStringSubstr:
+        case kStringSubstring:
         case kStringToLowerCase:
+        case kStringToString:
         case kStringToUpperCase:
+        case kStringTrim:
+        case kStringTrimLeft:
+        case kStringTrimRight:
+        case kStringValueOf:
           return Type::String();
 
         case kStringIterator:
@@ -1401,15 +1466,59 @@
           return Type::OtherObject();
 
         // Array functions.
+        case kArrayIsArray:
+          return Type::Boolean();
+        case kArrayConcat:
+          return Type::Receiver();
+        case kArrayEvery:
+          return Type::Boolean();
+        case kArrayFill:
+        case kArrayFilter:
+          return Type::Receiver();
+        case kArrayFindIndex:
+          return Type::Range(-1, kMaxSafeInteger, t->zone());
+        case kArrayForEach:
+          return Type::Undefined();
+        case kArrayIncludes:
+          return Type::Boolean();
         case kArrayIndexOf:
+          return Type::Range(-1, kMaxSafeInteger, t->zone());
+        case kArrayJoin:
+          return Type::String();
         case kArrayLastIndexOf:
           return Type::Range(-1, kMaxSafeInteger, t->zone());
+        case kArrayMap:
+          return Type::Receiver();
         case kArrayPush:
           return t->cache_.kPositiveSafeInteger;
+        case kArrayReverse:
+        case kArraySlice:
+          return Type::Receiver();
+        case kArraySome:
+          return Type::Boolean();
+        case kArraySplice:
+          return Type::Receiver();
+        case kArrayUnshift:
+          return t->cache_.kPositiveSafeInteger;
 
         // Object functions.
+        case kObjectAssign:
+        case kObjectCreate:
+          return Type::OtherObject();
         case kObjectHasOwnProperty:
           return Type::Boolean();
+        case kObjectToString:
+          return Type::String();
+
+        // RegExp functions.
+        case kRegExpCompile:
+          return Type::OtherObject();
+        case kRegExpExec:
+          return Type::Union(Type::OtherObject(), Type::Null(), t->zone());
+        case kRegExpTest:
+          return Type::Boolean();
+        case kRegExpToString:
+          return Type::String();
 
         // Function functions.
         case kFunctionHasInstance:
@@ -1426,6 +1535,46 @@
         case kGlobalIsFinite:
         case kGlobalIsNaN:
           return Type::Boolean();
+
+        // Map functions.
+        case kMapClear:
+        case kMapForEach:
+          return Type::Undefined();
+        case kMapDelete:
+        case kMapHas:
+          return Type::Boolean();
+        case kMapEntries:
+        case kMapKeys:
+        case kMapSet:
+        case kMapValues:
+          return Type::OtherObject();
+
+        // Set functions.
+        case kSetAdd:
+        case kSetEntries:
+        case kSetKeys:
+        case kSetValues:
+          return Type::OtherObject();
+        case kSetClear:
+        case kSetForEach:
+          return Type::Undefined();
+        case kSetDelete:
+        case kSetHas:
+          return Type::Boolean();
+
+        // WeakMap functions.
+        case kWeakMapDelete:
+        case kWeakMapHas:
+          return Type::Boolean();
+        case kWeakMapSet:
+          return Type::OtherObject();
+
+        // WeakSet functions.
+        case kWeakSetAdd:
+          return Type::OtherObject();
+        case kWeakSetDelete:
+        case kWeakSetHas:
+          return Type::Boolean();
         default:
           break;
       }
@@ -1434,13 +1583,19 @@
   return Type::NonInternal();
 }
 
-
-Type* Typer::Visitor::TypeJSCallFunction(Node* node) {
-  // TODO(bmeurer): We could infer better types if we wouldn't ignore the
-  // argument types for the JSCallFunctionTyper above.
-  return TypeUnaryOp(node, JSCallFunctionTyper);
+Type* Typer::Visitor::TypeJSCallForwardVarargs(Node* node) {
+  return TypeUnaryOp(node, JSCallTyper);
 }
 
+Type* Typer::Visitor::TypeJSCall(Node* node) {
+  // TODO(bmeurer): We could infer better types if we wouldn't ignore the
+  // argument types for the JSCallTyper above.
+  return TypeUnaryOp(node, JSCallTyper);
+}
+
+Type* Typer::Visitor::TypeJSCallWithSpread(Node* node) {
+  return TypeUnaryOp(node, JSCallTyper);
+}
 
 Type* Typer::Visitor::TypeJSCallRuntime(Node* node) {
   switch (CallRuntimeParametersOf(node->op()).id()) {
@@ -1468,6 +1623,8 @@
       return TypeUnaryOp(node, ToObject);
     case Runtime::kInlineToString:
       return TypeUnaryOp(node, ToString);
+    case Runtime::kInlineClassOf:
+      return Type::InternalizedStringOrNull();
     case Runtime::kHasInPrototypeChain:
       return Type::Boolean();
     default:
@@ -1486,7 +1643,7 @@
 
 
 Type* Typer::Visitor::TypeJSForInNext(Node* node) {
-  return Type::Union(Type::Name(), Type::Undefined(), zone());
+  return Type::Union(Type::String(), Type::Undefined(), zone());
 }
 
 
@@ -1530,6 +1687,8 @@
 
 Type* Typer::Visitor::TypeJSStackCheck(Node* node) { return Type::Any(); }
 
+Type* Typer::Visitor::TypeJSDebugger(Node* node) { return Type::Any(); }
+
 // Simplified operators.
 
 Type* Typer::Visitor::TypeBooleanNot(Node* node) { return Type::Boolean(); }
@@ -1595,6 +1754,8 @@
   return Type::String();
 }
 
+Type* Typer::Visitor::TypeStringCharAt(Node* node) { return Type::String(); }
+
 Type* Typer::Visitor::TypeStringCharCodeAt(Node* node) {
   return typer_->cache_.kUint16;
 }
@@ -1607,6 +1768,10 @@
   return TypeUnaryOp(node, StringFromCodePointTyper);
 }
 
+Type* Typer::Visitor::TypeStringIndexOf(Node* node) {
+  return Type::Range(-1.0, String::kMaxLength - 1.0, zone());
+}
+
 Type* Typer::Visitor::TypeCheckBounds(Node* node) {
   Type* index = Operand(node, 0);
   Type* length = Operand(node, 1);
@@ -1628,6 +1793,11 @@
   return nullptr;
 }
 
+Type* Typer::Visitor::TypeCheckInternalizedString(Node* node) {
+  Type* arg = Operand(node, 0);
+  return Type::Intersect(arg, Type::InternalizedString(), zone());
+}
+
 Type* Typer::Visitor::TypeCheckMaps(Node* node) {
   UNREACHABLE();
   return nullptr;
@@ -1638,6 +1808,11 @@
   return Type::Intersect(arg, Type::Number(), zone());
 }
 
+Type* Typer::Visitor::TypeCheckReceiver(Node* node) {
+  Type* arg = Operand(node, 0);
+  return Type::Intersect(arg, Type::Receiver(), zone());
+}
+
 Type* Typer::Visitor::TypeCheckSmi(Node* node) {
   Type* arg = Operand(node, 0);
   return Type::Intersect(arg, Type::SignedSmall(), zone());
@@ -1726,8 +1901,12 @@
   return nullptr;
 }
 
-Type* Typer::Visitor::TypeObjectIsCallable(Node* node) {
-  return TypeUnaryOp(node, ObjectIsCallable);
+Type* Typer::Visitor::TypeObjectIsDetectableCallable(Node* node) {
+  return TypeUnaryOp(node, ObjectIsDetectableCallable);
+}
+
+Type* Typer::Visitor::TypeObjectIsNonCallable(Node* node) {
+  return TypeUnaryOp(node, ObjectIsNonCallable);
 }
 
 Type* Typer::Visitor::TypeObjectIsNumber(Node* node) {
@@ -1752,6 +1931,14 @@
   return TypeUnaryOp(node, ObjectIsUndetectable);
 }
 
+Type* Typer::Visitor::TypeNewUnmappedArgumentsElements(Node* node) {
+  return Type::OtherInternal();
+}
+
+Type* Typer::Visitor::TypeNewRestParameterElements(Node* node) {
+  return Type::OtherInternal();
+}
+
 Type* Typer::Visitor::TypeArrayBufferWasNeutered(Node* node) {
   return Type::Boolean();
 }
diff --git a/src/compiler/typer.h b/src/compiler/typer.h
index 7f6f90a..09b0b4d 100644
--- a/src/compiler/typer.h
+++ b/src/compiler/typer.h
@@ -50,9 +50,9 @@
   TypeCache const& cache_;
   OperationTyper operation_typer_;
 
+  Type* singleton_empty_string_;
   Type* singleton_false_;
   Type* singleton_true_;
-  Type* singleton_the_hole_;
   Type* falsish_;
   Type* truish_;
 
diff --git a/src/compiler/types.cc b/src/compiler/types.cc
index 806bd8f..f28a56a 100644
--- a/src/compiler/types.cc
+++ b/src/compiler/types.cc
@@ -7,6 +7,7 @@
 #include "src/compiler/types.h"
 
 #include "src/handles-inl.h"
+#include "src/objects-inl.h"
 #include "src/ostreams.h"
 
 namespace v8 {
@@ -151,6 +152,8 @@
     case ONE_BYTE_STRING_TYPE:
     case CONS_STRING_TYPE:
     case CONS_ONE_BYTE_STRING_TYPE:
+    case THIN_STRING_TYPE:
+    case THIN_ONE_BYTE_STRING_TYPE:
     case SLICED_STRING_TYPE:
     case SLICED_ONE_BYTE_STRING_TYPE:
     case EXTERNAL_STRING_TYPE:
@@ -187,8 +190,6 @@
     }
     case HEAP_NUMBER_TYPE:
       return kNumber;
-    case SIMD128_VALUE_TYPE:
-      return kSimd;
     case JS_OBJECT_TYPE:
     case JS_ARGUMENTS_TYPE:
     case JS_ERROR_TYPE:
@@ -196,7 +197,17 @@
     case JS_GLOBAL_PROXY_TYPE:
     case JS_API_OBJECT_TYPE:
     case JS_SPECIAL_API_OBJECT_TYPE:
-      if (map->is_undetectable()) return kOtherUndetectable;
+      if (map->is_undetectable()) {
+        // Currently we assume that every undetectable receiver is also
+        // callable, which is what we need to support document.all.  We
+        // could add another Type bit to support other use cases in the
+        // future if necessary.
+        DCHECK(map->is_callable());
+        return kOtherUndetectable;
+      }
+      if (map->is_callable()) {
+        return kOtherCallable;
+      }
       return kOtherObject;
     case JS_VALUE_TYPE:
     case JS_MESSAGE_OBJECT_TYPE:
@@ -204,7 +215,6 @@
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_GENERATOR_OBJECT_TYPE:
     case JS_MODULE_NAMESPACE_TYPE:
-    case JS_FIXED_ARRAY_ITERATOR_TYPE:
     case JS_ARRAY_BUFFER_TYPE:
     case JS_ARRAY_TYPE:
     case JS_REGEXP_TYPE:  // TODO(rossberg): there should be a RegExp type.
@@ -215,6 +225,7 @@
     case JS_SET_ITERATOR_TYPE:
     case JS_MAP_ITERATOR_TYPE:
     case JS_STRING_ITERATOR_TYPE:
+    case JS_ASYNC_FROM_SYNC_ITERATOR_TYPE:
 
     case JS_TYPED_ARRAY_KEY_ITERATOR_TYPE:
     case JS_FAST_ARRAY_KEY_ITERATOR_TYPE:
@@ -254,16 +265,21 @@
 
     case JS_WEAK_MAP_TYPE:
     case JS_WEAK_SET_TYPE:
+    case JS_PROMISE_CAPABILITY_TYPE:
     case JS_PROMISE_TYPE:
-    case JS_BOUND_FUNCTION_TYPE:
+      DCHECK(!map->is_callable());
       DCHECK(!map->is_undetectable());
       return kOtherObject;
+    case JS_BOUND_FUNCTION_TYPE:
+      DCHECK(!map->is_undetectable());
+      return kBoundFunction;
     case JS_FUNCTION_TYPE:
       DCHECK(!map->is_undetectable());
       return kFunction;
     case JS_PROXY_TYPE:
       DCHECK(!map->is_undetectable());
-      return kProxy;
+      if (map->is_callable()) return kCallableProxy;
+      return kOtherProxy;
     case MAP_TYPE:
     case ALLOCATION_SITE_TYPE:
     case ACCESSOR_INFO_TYPE:
@@ -297,12 +313,9 @@
     case INTERCEPTOR_INFO_TYPE:
     case CALL_HANDLER_INFO_TYPE:
     case OBJECT_TEMPLATE_INFO_TYPE:
-    case SIGNATURE_INFO_TYPE:
-    case TYPE_SWITCH_INFO_TYPE:
     case ALLOCATION_MEMENTO_TYPE:
     case TYPE_FEEDBACK_INFO_TYPE:
     case ALIASED_ARGUMENTS_ENTRY_TYPE:
-    case BOX_TYPE:
     case PROMISE_RESOLVE_THENABLE_JOB_INFO_TYPE:
     case PROMISE_REACTION_JOB_INFO_TYPE:
     case DEBUG_INFO_TYPE:
@@ -310,8 +323,10 @@
     case CELL_TYPE:
     case WEAK_CELL_TYPE:
     case PROTOTYPE_INFO_TYPE:
+    case TUPLE2_TYPE:
     case TUPLE3_TYPE:
     case CONTEXT_EXTENSION_TYPE:
+    case CONSTANT_ELEMENTS_PAIR_TYPE:
       UNREACHABLE();
       return kNone;
   }
@@ -447,7 +462,7 @@
                                    i::Handle<i::HeapObject> object)
     : TypeBase(kHeapConstant), bitset_(bitset), object_(object) {
   DCHECK(!object->IsHeapNumber());
-  DCHECK(!object->IsString());
+  DCHECK_IMPLIES(object->IsString(), object->IsInternalizedString());
 }
 
 // -----------------------------------------------------------------------------
@@ -823,17 +838,8 @@
     return Range(v, v, zone);
   } else if (value->IsHeapNumber()) {
     return NewConstant(value->Number(), zone);
-  } else if (value->IsString()) {
-    bitset b = BitsetType::Lub(*value);
-    DCHECK(b == BitsetType::kInternalizedString ||
-           b == BitsetType::kOtherString);
-    if (b == BitsetType::kInternalizedString) {
-      return Type::InternalizedString();
-    } else if (b == BitsetType::kOtherString) {
-      return Type::OtherString();
-    } else {
-      UNREACHABLE();
-    }
+  } else if (value->IsString() && !value->IsInternalizedString()) {
+    return Type::OtherString();
   }
   return HeapConstant(i::Handle<i::HeapObject>::cast(value), zone);
 }
diff --git a/src/compiler/types.h b/src/compiler/types.h
index e783570..9e55a0b 100644
--- a/src/compiler/types.h
+++ b/src/compiler/types.h
@@ -116,56 +116,75 @@
   V(Symbol,              1u << 12)  \
   V(InternalizedString,  1u << 13)  \
   V(OtherString,         1u << 14)  \
-  V(Simd,                1u << 15)  \
-  V(OtherObject,         1u << 17)  \
-  V(OtherUndetectable,   1u << 16)  \
-  V(Proxy,               1u << 18)  \
-  V(Function,            1u << 19)  \
-  V(Hole,                1u << 20)  \
-  V(OtherInternal,       1u << 21)  \
-  V(ExternalPointer,     1u << 22)  \
+  V(OtherCallable,       1u << 15)  \
+  V(OtherObject,         1u << 16)  \
+  V(OtherUndetectable,   1u << 17)  \
+  V(CallableProxy,       1u << 18)  \
+  V(OtherProxy,          1u << 19)  \
+  V(Function,            1u << 20)  \
+  V(BoundFunction,       1u << 21)  \
+  V(Hole,                1u << 22)  \
+  V(OtherInternal,       1u << 23)  \
+  V(ExternalPointer,     1u << 24)  \
   \
-  V(Signed31,                   kUnsigned30 | kNegative31) \
-  V(Signed32,                   kSigned31 | kOtherUnsigned31 | kOtherSigned32) \
-  V(Signed32OrMinusZero,        kSigned32 | kMinusZero) \
-  V(Signed32OrMinusZeroOrNaN,   kSigned32 | kMinusZero | kNaN) \
-  V(Negative32,                 kNegative31 | kOtherSigned32) \
-  V(Unsigned31,                 kUnsigned30 | kOtherUnsigned31) \
-  V(Unsigned32,                 kUnsigned30 | kOtherUnsigned31 | \
-                                kOtherUnsigned32) \
-  V(Unsigned32OrMinusZero,      kUnsigned32 | kMinusZero) \
-  V(Unsigned32OrMinusZeroOrNaN, kUnsigned32 | kMinusZero | kNaN) \
-  V(Integral32,                 kSigned32 | kUnsigned32) \
-  V(PlainNumber,                kIntegral32 | kOtherNumber) \
-  V(OrderedNumber,              kPlainNumber | kMinusZero) \
-  V(MinusZeroOrNaN,             kMinusZero | kNaN) \
-  V(Number,                     kOrderedNumber | kNaN) \
-  V(String,                     kInternalizedString | kOtherString) \
-  V(UniqueName,                 kSymbol | kInternalizedString) \
-  V(Name,                       kSymbol | kString) \
-  V(BooleanOrNumber,            kBoolean | kNumber) \
-  V(BooleanOrNullOrNumber,      kBooleanOrNumber | kNull) \
-  V(BooleanOrNullOrUndefined,   kBoolean | kNull | kUndefined) \
-  V(NullOrNumber,               kNull | kNumber) \
-  V(NullOrUndefined,            kNull | kUndefined) \
-  V(Undetectable,               kNullOrUndefined | kOtherUndetectable) \
-  V(NumberOrOddball,            kNumber | kNullOrUndefined | kBoolean | kHole) \
-  V(NumberOrSimdOrString,       kNumber | kSimd | kString) \
-  V(NumberOrString,             kNumber | kString) \
-  V(NumberOrUndefined,          kNumber | kUndefined) \
-  V(PlainPrimitive,             kNumberOrString | kBoolean | kNullOrUndefined) \
-  V(Primitive,                  kSymbol | kSimd | kPlainPrimitive) \
-  V(DetectableReceiver,         kFunction | kOtherObject | kProxy) \
-  V(Object,                     kFunction | kOtherObject | kOtherUndetectable) \
-  V(Receiver,                   kObject | kProxy) \
-  V(ReceiverOrUndefined,        kReceiver | kUndefined) \
-  V(StringOrReceiver,           kString | kReceiver) \
-  V(Unique,                     kBoolean | kUniqueName | kNull | kUndefined | \
-                                kReceiver) \
-  V(Internal,                   kHole | kExternalPointer | kOtherInternal) \
-  V(NonInternal,                kPrimitive | kReceiver) \
-  V(NonNumber,                  kUnique | kString | kInternal) \
-  V(Any,                        0xfffffffeu)
+  V(Signed31,                     kUnsigned30 | kNegative31) \
+  V(Signed32,                     kSigned31 | kOtherUnsigned31 | \
+                                  kOtherSigned32) \
+  V(Signed32OrMinusZero,          kSigned32 | kMinusZero) \
+  V(Signed32OrMinusZeroOrNaN,     kSigned32 | kMinusZero | kNaN) \
+  V(Negative32,                   kNegative31 | kOtherSigned32) \
+  V(Unsigned31,                   kUnsigned30 | kOtherUnsigned31) \
+  V(Unsigned32,                   kUnsigned30 | kOtherUnsigned31 | \
+                                  kOtherUnsigned32) \
+  V(Unsigned32OrMinusZero,        kUnsigned32 | kMinusZero) \
+  V(Unsigned32OrMinusZeroOrNaN,   kUnsigned32 | kMinusZero | kNaN) \
+  V(Integral32,                   kSigned32 | kUnsigned32) \
+  V(Integral32OrMinusZeroOrNaN,   kIntegral32 | kMinusZero | kNaN) \
+  V(PlainNumber,                  kIntegral32 | kOtherNumber) \
+  V(OrderedNumber,                kPlainNumber | kMinusZero) \
+  V(MinusZeroOrNaN,               kMinusZero | kNaN) \
+  V(Number,                       kOrderedNumber | kNaN) \
+  V(String,                       kInternalizedString | kOtherString) \
+  V(UniqueName,                   kSymbol | kInternalizedString) \
+  V(Name,                         kSymbol | kString) \
+  V(InternalizedStringOrNull,     kInternalizedString | kNull) \
+  V(BooleanOrNumber,              kBoolean | kNumber) \
+  V(BooleanOrNullOrNumber,        kBooleanOrNumber | kNull) \
+  V(BooleanOrNullOrUndefined,     kBoolean | kNull | kUndefined) \
+  V(Oddball,                      kBooleanOrNullOrUndefined | kHole) \
+  V(NullOrNumber,                 kNull | kNumber) \
+  V(NullOrUndefined,              kNull | kUndefined) \
+  V(Undetectable,                 kNullOrUndefined | kOtherUndetectable) \
+  V(NumberOrOddball,              kNumber | kNullOrUndefined | kBoolean | \
+                                  kHole) \
+  V(NumberOrString,               kNumber | kString) \
+  V(NumberOrUndefined,            kNumber | kUndefined) \
+  V(PlainPrimitive,               kNumberOrString | kBoolean | \
+                                  kNullOrUndefined) \
+  V(Primitive,                    kSymbol | kPlainPrimitive) \
+  V(OtherUndetectableOrUndefined, kOtherUndetectable | kUndefined) \
+  V(Proxy,                        kCallableProxy | kOtherProxy) \
+  V(DetectableCallable,           kFunction | kBoundFunction | \
+                                  kOtherCallable | kCallableProxy) \
+  V(Callable,                     kDetectableCallable | kOtherUndetectable) \
+  V(NonCallable,                  kOtherObject | kOtherProxy) \
+  V(NonCallableOrNull,            kNonCallable | kNull) \
+  V(DetectableObject,             kFunction | kBoundFunction | \
+                                  kOtherCallable | kOtherObject) \
+  V(DetectableReceiver,           kDetectableObject | kProxy) \
+  V(DetectableReceiverOrNull,     kDetectableReceiver | kNull) \
+  V(Object,                       kDetectableObject | kOtherUndetectable) \
+  V(Receiver,                     kObject | kProxy) \
+  V(ReceiverOrUndefined,          kReceiver | kUndefined) \
+  V(ReceiverOrNullOrUndefined,    kReceiver | kNull | kUndefined) \
+  V(SymbolOrReceiver,             kSymbol | kReceiver) \
+  V(StringOrReceiver,             kString | kReceiver) \
+  V(Unique,                       kBoolean | kUniqueName | kNull | \
+                                  kUndefined | kReceiver) \
+  V(Internal,                     kHole | kExternalPointer | kOtherInternal) \
+  V(NonInternal,                  kPrimitive | kReceiver) \
+  V(NonNumber,                    kUnique | kString | kInternal) \
+  V(Any,                          0xfffffffeu)
 
 // clang-format on
 
diff --git a/src/compiler/value-numbering-reducer.cc b/src/compiler/value-numbering-reducer.cc
index 30473f2..38e1f0c 100644
--- a/src/compiler/value-numbering-reducer.cc
+++ b/src/compiler/value-numbering-reducer.cc
@@ -18,8 +18,8 @@
 
 size_t HashCode(Node* node) {
   size_t h = base::hash_combine(node->op()->HashCode(), node->InputCount());
-  for (int j = 0; j < node->InputCount(); ++j) {
-    h = base::hash_combine(h, node->InputAt(j)->id());
+  for (Node* input : node->inputs()) {
+    h = base::hash_combine(h, input->id());
   }
   return h;
 }
@@ -32,10 +32,17 @@
   DCHECK_NOT_NULL(b->op());
   if (!a->op()->Equals(b->op())) return false;
   if (a->InputCount() != b->InputCount()) return false;
-  for (int j = 0; j < a->InputCount(); ++j) {
-    DCHECK_NOT_NULL(a->InputAt(j));
-    DCHECK_NOT_NULL(b->InputAt(j));
-    if (a->InputAt(j)->id() != b->InputAt(j)->id()) return false;
+  Node::Inputs aInputs = a->inputs();
+  Node::Inputs bInputs = b->inputs();
+
+  auto aIt = aInputs.begin();
+  auto bIt = bInputs.begin();
+  auto aEnd = aInputs.end();
+
+  for (; aIt != aEnd; ++aIt, ++bIt) {
+    DCHECK_NOT_NULL(*aIt);
+    DCHECK_NOT_NULL(*bIt);
+    if ((*aIt)->id() != (*bIt)->id()) return false;
   }
   return true;
 }
diff --git a/src/compiler/verifier.cc b/src/compiler/verifier.cc
index 872305b..7f63ceb 100644
--- a/src/compiler/verifier.cc
+++ b/src/compiler/verifier.cc
@@ -14,11 +14,12 @@
 #include "src/compiler/all-nodes.h"
 #include "src/compiler/common-operator.h"
 #include "src/compiler/graph.h"
-#include "src/compiler/node.h"
+#include "src/compiler/js-operator.h"
 #include "src/compiler/node-properties.h"
+#include "src/compiler/node.h"
 #include "src/compiler/opcodes.h"
-#include "src/compiler/operator.h"
 #include "src/compiler/operator-properties.h"
+#include "src/compiler/operator.h"
 #include "src/compiler/schedule.h"
 #include "src/compiler/simplified-operator.h"
 #include "src/ostreams.h"
@@ -150,7 +151,7 @@
                   "control");
     }
 
-    // Verify that no-no-throw nodes only have IfSuccess/IfException control
+    // Verify that nodes that can throw only have IfSuccess/IfException control
     // uses.
     if (!node->op()->HasProperty(Operator::kNoThrow)) {
       int count_success = 0, count_exception = 0;
@@ -206,6 +207,8 @@
       }
       CHECK_EQ(1, count_true);
       CHECK_EQ(1, count_false);
+      // The condition must be a Boolean.
+      CheckValueInputIs(node, 0, Type::Boolean());
       // Type is empty.
       CheckNotTyped(node);
       break;
@@ -283,6 +286,11 @@
       // Type is empty.
       CheckNotTyped(node);
       break;
+    case IrOpcode::kTrapIf:
+    case IrOpcode::kTrapUnless:
+      // Type is empty.
+      CheckNotTyped(node);
+      break;
     case IrOpcode::kDeoptimize:
     case IrOpcode::kReturn:
     case IrOpcode::kThrow:
@@ -402,6 +410,10 @@
       CHECK_EQ(0, effect_count);
       CHECK_EQ(0, control_count);
       CHECK_EQ(3, value_count);
+      // The condition must be a Boolean.
+      CheckValueInputIs(node, 0, Type::Boolean());
+      // Type can be anything.
+      CheckTypeIs(node, Type::Any());
       break;
     }
     case IrOpcode::kPhi: {
@@ -484,6 +496,7 @@
     }
     case IrOpcode::kStateValues:
     case IrOpcode::kTypedStateValues:
+    case IrOpcode::kArgumentsObjectState:
     case IrOpcode::kObjectState:
     case IrOpcode::kTypedObjectState:
       // TODO(jarin): what are the constraints on these?
@@ -590,16 +603,43 @@
       CheckTypeIs(node, Type::OtherObject());
       break;
     case IrOpcode::kJSLoadProperty:
+      // Type can be anything.
+      CheckTypeIs(node, Type::Any());
+      CHECK(PropertyAccessOf(node->op()).feedback().IsValid());
+      break;
     case IrOpcode::kJSLoadNamed:
+      // Type can be anything.
+      CheckTypeIs(node, Type::Any());
+      CHECK(NamedAccessOf(node->op()).feedback().IsValid());
+      break;
     case IrOpcode::kJSLoadGlobal:
       // Type can be anything.
       CheckTypeIs(node, Type::Any());
+      CHECK(LoadGlobalParametersOf(node->op()).feedback().IsValid());
       break;
     case IrOpcode::kJSStoreProperty:
+      // Type is empty.
+      CheckNotTyped(node);
+      CHECK(PropertyAccessOf(node->op()).feedback().IsValid());
+      break;
     case IrOpcode::kJSStoreNamed:
+      // Type is empty.
+      CheckNotTyped(node);
+      CHECK(NamedAccessOf(node->op()).feedback().IsValid());
+      break;
     case IrOpcode::kJSStoreGlobal:
       // Type is empty.
       CheckNotTyped(node);
+      CHECK(StoreGlobalParametersOf(node->op()).feedback().IsValid());
+      break;
+    case IrOpcode::kJSStoreNamedOwn:
+      // Type is empty.
+      CheckNotTyped(node);
+      CHECK(StoreNamedOwnParametersOf(node->op()).feedback().IsValid());
+      break;
+    case IrOpcode::kJSStoreDataPropertyInLiteral:
+      // Type is empty.
+      CheckNotTyped(node);
       break;
     case IrOpcode::kJSDeleteProperty:
     case IrOpcode::kJSHasProperty:
@@ -608,9 +648,20 @@
       // Type is Boolean.
       CheckTypeIs(node, Type::Boolean());
       break;
+    case IrOpcode::kJSClassOf:
+      // Type is InternaliedString \/ Null.
+      CheckTypeIs(node, Type::InternalizedStringOrNull());
+      break;
     case IrOpcode::kJSTypeOf:
-      // Type is String.
-      CheckTypeIs(node, Type::String());
+      // Type is InternalizedString.
+      CheckTypeIs(node, Type::InternalizedString());
+      break;
+    case IrOpcode::kJSGetSuperConstructor:
+      // We don't check the input for Type::Function because
+      // this_function can be context-allocated.
+      // Any -> Callable.
+      CheckValueInputIs(node, 0, Type::Any());
+      CheckTypeIs(node, Type::Callable());
       break;
 
     case IrOpcode::kJSLoadContext:
@@ -635,12 +686,15 @@
       break;
     }
 
-    case IrOpcode::kJSCallConstruct:
+    case IrOpcode::kJSConstruct:
+    case IrOpcode::kJSConstructWithSpread:
     case IrOpcode::kJSConvertReceiver:
       // Type is Receiver.
       CheckTypeIs(node, Type::Receiver());
       break;
-    case IrOpcode::kJSCallFunction:
+    case IrOpcode::kJSCallForwardVarargs:
+    case IrOpcode::kJSCall:
+    case IrOpcode::kJSCallWithSpread:
     case IrOpcode::kJSCallRuntime:
       // Type can be anything.
       CheckTypeIs(node, Type::Any());
@@ -680,6 +734,7 @@
       break;
 
     case IrOpcode::kJSStackCheck:
+    case IrOpcode::kJSDebugger:
       // Type is empty.
       CheckNotTyped(node);
       break;
@@ -861,6 +916,12 @@
       CheckValueInputIs(node, 1, Type::String());
       CheckTypeIs(node, Type::Boolean());
       break;
+    case IrOpcode::kStringCharAt:
+      // (String, Unsigned32) -> String
+      CheckValueInputIs(node, 0, Type::String());
+      CheckValueInputIs(node, 1, Type::Unsigned32());
+      CheckTypeIs(node, Type::String());
+      break;
     case IrOpcode::kStringCharCodeAt:
       // (String, Unsigned32) -> UnsignedSmall
       CheckValueInputIs(node, 0, Type::String());
@@ -877,13 +938,22 @@
       CheckValueInputIs(node, 0, Type::Number());
       CheckTypeIs(node, Type::String());
       break;
-    case IrOpcode::kReferenceEqual: {
+    case IrOpcode::kStringIndexOf:
+      // (String, String, SignedSmall) -> SignedSmall
+      CheckValueInputIs(node, 0, Type::String());
+      CheckValueInputIs(node, 1, Type::String());
+      CheckValueInputIs(node, 2, Type::SignedSmall());
+      CheckTypeIs(node, Type::SignedSmall());
+      break;
+
+    case IrOpcode::kReferenceEqual:
       // (Unique, Any) -> Boolean  and
       // (Any, Unique) -> Boolean
       CheckTypeIs(node, Type::Boolean());
       break;
-    }
-    case IrOpcode::kObjectIsCallable:
+
+    case IrOpcode::kObjectIsDetectableCallable:
+    case IrOpcode::kObjectIsNonCallable:
     case IrOpcode::kObjectIsNumber:
     case IrOpcode::kObjectIsReceiver:
     case IrOpcode::kObjectIsSmi:
@@ -893,6 +963,10 @@
       CheckValueInputIs(node, 0, Type::Any());
       CheckTypeIs(node, Type::Boolean());
       break;
+    case IrOpcode::kNewRestParameterElements:
+    case IrOpcode::kNewUnmappedArgumentsElements:
+      CheckTypeIs(node, Type::OtherInternal());
+      break;
     case IrOpcode::kAllocate:
       CheckValueInputIs(node, 0, Type::PlainNumber());
       break;
@@ -910,8 +984,6 @@
       break;
     case IrOpcode::kTransitionElementsKind:
       CheckValueInputIs(node, 0, Type::Any());
-      CheckValueInputIs(node, 1, Type::Internal());
-      CheckValueInputIs(node, 2, Type::Internal());
       CheckNotTyped(node);
       break;
 
@@ -951,6 +1023,8 @@
       // CheckTypeIs(node, to));
       break;
     }
+    case IrOpcode::kChangeTaggedToTaggedSigned:
+      break;
     case IrOpcode::kTruncateTaggedToFloat64: {
       // NumberOrUndefined /\ Tagged -> Number /\ UntaggedFloat64
       // TODO(neis): Activate once ChangeRepresentation works in typer.
@@ -1041,6 +1115,10 @@
       CheckValueInputIs(node, 0, Type::Boolean());
       CheckNotTyped(node);
       break;
+    case IrOpcode::kCheckInternalizedString:
+      CheckValueInputIs(node, 0, Type::Any());
+      CheckTypeIs(node, Type::InternalizedString());
+      break;
     case IrOpcode::kCheckMaps:
       // (Any, Internal, ..., Internal) -> Any
       CheckValueInputIs(node, 0, Type::Any());
@@ -1053,6 +1131,10 @@
       CheckValueInputIs(node, 0, Type::Any());
       CheckTypeIs(node, Type::Number());
       break;
+    case IrOpcode::kCheckReceiver:
+      CheckValueInputIs(node, 0, Type::Any());
+      CheckTypeIs(node, Type::Receiver());
+      break;
     case IrOpcode::kCheckSmi:
       CheckValueInputIs(node, 0, Type::Any());
       break;
@@ -1140,6 +1222,7 @@
     // -----------------------
     case IrOpcode::kLoad:
     case IrOpcode::kProtectedLoad:
+    case IrOpcode::kProtectedStore:
     case IrOpcode::kStore:
     case IrOpcode::kStackSlot:
     case IrOpcode::kWord32And:
diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc
index 1b61c15..168178e 100644
--- a/src/compiler/wasm-compiler.cc
+++ b/src/compiler/wasm-compiler.cc
@@ -6,11 +6,12 @@
 
 #include <memory>
 
-#include "src/isolate-inl.h"
-
+#include "src/assembler-inl.h"
 #include "src/base/platform/elapsed-timer.h"
 #include "src/base/platform/platform.h"
-
+#include "src/builtins/builtins.h"
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
 #include "src/compiler/access-builder.h"
 #include "src/compiler/common-operator.h"
 #include "src/compiler/compiler-source-position-table.h"
@@ -27,15 +28,15 @@
 #include "src/compiler/pipeline.h"
 #include "src/compiler/simd-scalar-lowering.h"
 #include "src/compiler/zone-stats.h"
-
-#include "src/code-factory.h"
-#include "src/code-stubs.h"
 #include "src/factory.h"
+#include "src/isolate-inl.h"
 #include "src/log-inl.h"
-
-#include "src/wasm/ast-decoder.h"
+#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/wasm-limits.h"
 #include "src/wasm/wasm-module.h"
+#include "src/wasm/wasm-objects.h"
 #include "src/wasm/wasm-opcodes.h"
+#include "src/wasm/wasm-text.h"
 
 // TODO(titzer): pull WASM_64 up to a common header.
 #if !V8_TARGET_ARCH_32_BIT || V8_TARGET_ARCH_X64
@@ -64,13 +65,12 @@
   }
 }
 
-Node* BuildCallToRuntime(Runtime::FunctionId f, JSGraph* jsgraph,
-                         Handle<Context> context, Node** parameters,
-                         int parameter_count, Node** effect_ptr,
-                         Node* control) {
-  // At the moment we only allow 2 parameters. If more parameters are needed,
-  // then the size of {inputs} below has to be increased accordingly.
-  DCHECK(parameter_count <= 2);
+// Only call this function for code which is not reused across instantiations,
+// as we do not patch the embedded context.
+Node* BuildCallToRuntimeWithContext(Runtime::FunctionId f, JSGraph* jsgraph,
+                                    Node* context, Node** parameters,
+                                    int parameter_count, Node** effect_ptr,
+                                    Node* control) {
   const Runtime::Function* fun = Runtime::FunctionForId(f);
   CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
       jsgraph->zone(), f, fun->nargs, Operator::kNoProperties,
@@ -78,7 +78,11 @@
   // CEntryStubConstant nodes have to be created and cached in the main
   // thread. At the moment this is only done for CEntryStubConstant(1).
   DCHECK_EQ(1, fun->result_size);
-  Node* inputs[8];
+  // At the moment we only allow 3 parameters. If more parameters are needed,
+  // increase this constant accordingly.
+  static const int kMaxParams = 3;
+  DCHECK_GE(kMaxParams, parameter_count);
+  Node* inputs[kMaxParams + 6];
   int count = 0;
   inputs[count++] = jsgraph->CEntryStubConstant(fun->result_size);
   for (int i = 0; i < parameter_count; i++) {
@@ -87,7 +91,7 @@
   inputs[count++] = jsgraph->ExternalConstant(
       ExternalReference(f, jsgraph->isolate()));         // ref
   inputs[count++] = jsgraph->Int32Constant(fun->nargs);  // arity
-  inputs[count++] = jsgraph->HeapConstant(context);      // context
+  inputs[count++] = context;                             // context
   inputs[count++] = *effect_ptr;
   inputs[count++] = control;
 
@@ -97,8 +101,23 @@
   return node;
 }
 
+Node* BuildCallToRuntime(Runtime::FunctionId f, JSGraph* jsgraph,
+                         Node** parameters, int parameter_count,
+                         Node** effect_ptr, Node* control) {
+  return BuildCallToRuntimeWithContext(f, jsgraph, jsgraph->NoContextConstant(),
+                                       parameters, parameter_count, effect_ptr,
+                                       control);
+}
+
 }  // namespace
 
+// TODO(eholk): Support trap handlers on other platforms.
+#if V8_TARGET_ARCH_X64 && V8_OS_LINUX
+const bool kTrapHandlerSupported = true;
+#else
+const bool kTrapHandlerSupported = false;
+#endif
+
 // A helper that handles building graph fragments for trapping.
 // To avoid generating a ton of redundant code that just calls the runtime
 // to trap, we generate a per-trap-reason block of code that all trap sites
@@ -159,21 +178,70 @@
     return TrapIfEq64(reason, node, 0, position);
   }
 
+  Builtins::Name GetBuiltinIdForTrap(wasm::TrapReason reason) {
+    if (builder_->module_ && !builder_->module_->instance->context.is_null()) {
+      switch (reason) {
+#define TRAPREASON_TO_MESSAGE(name) \
+  case wasm::k##name:               \
+    return Builtins::kThrowWasm##name;
+        FOREACH_WASM_TRAPREASON(TRAPREASON_TO_MESSAGE)
+#undef TRAPREASON_TO_MESSAGE
+        default:
+          UNREACHABLE();
+          return Builtins::builtin_count;
+      }
+    } else {
+      // We use Runtime::kNumFunctions as a marker to tell the code generator
+      // to generate a call to a testing c-function instead of a runtime
+      // function. This code should only be called from a cctest.
+      return Builtins::builtin_count;
+    }
+  }
+
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM ||      \
+    V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || \
+    V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390 ||    \
+    V8_TARGET_ARCH_S390X || V8_TARGET_ARCH_X87
+#define WASM_TRAP_IF_SUPPORTED
+#endif
+
   // Add a trap if {cond} is true.
   void AddTrapIfTrue(wasm::TrapReason reason, Node* cond,
                      wasm::WasmCodePosition position) {
-    AddTrapIf(reason, cond, true, position);
+#ifdef WASM_TRAP_IF_SUPPORTED
+    if (FLAG_wasm_trap_if) {
+      int32_t trap_id = GetBuiltinIdForTrap(reason);
+      Node* node = graph()->NewNode(common()->TrapIf(trap_id), cond,
+                                    builder_->Effect(), builder_->Control());
+      *builder_->control_ = node;
+      builder_->SetSourcePosition(node, position);
+      return;
+    }
+#endif  // WASM_TRAP_IF_SUPPORTED
+    BuildTrapIf(reason, cond, true, position);
   }
 
   // Add a trap if {cond} is false.
   void AddTrapIfFalse(wasm::TrapReason reason, Node* cond,
                       wasm::WasmCodePosition position) {
-    AddTrapIf(reason, cond, false, position);
+#ifdef WASM_TRAP_IF_SUPPORTED
+    if (FLAG_wasm_trap_if) {
+      int32_t trap_id = GetBuiltinIdForTrap(reason);
+
+      Node* node = graph()->NewNode(common()->TrapUnless(trap_id), cond,
+                                    builder_->Effect(), builder_->Control());
+      *builder_->control_ = node;
+      builder_->SetSourcePosition(node, position);
+      return;
+    }
+#endif  // WASM_TRAP_IF_SUPPORTED
+
+    BuildTrapIf(reason, cond, false, position);
   }
 
   // Add a trap if {cond} is true or false according to {iftrue}.
-  void AddTrapIf(wasm::TrapReason reason, Node* cond, bool iftrue,
-                 wasm::WasmCodePosition position) {
+  void BuildTrapIf(wasm::TrapReason reason, Node* cond, bool iftrue,
+                   wasm::WasmCodePosition position) {
     Node** effect_ptr = builder_->effect_;
     Node** control_ptr = builder_->control_;
     Node* before = *effect_ptr;
@@ -196,18 +264,18 @@
     }
   }
 
-  Node* GetTrapValue(wasm::LocalType type) {
+  Node* GetTrapValue(wasm::ValueType type) {
     switch (type) {
-      case wasm::kAstI32:
+      case wasm::kWasmI32:
         return jsgraph()->Int32Constant(0xdeadbeef);
-      case wasm::kAstI64:
+      case wasm::kWasmI64:
         return jsgraph()->Int64Constant(0xdeadbeefdeadbeef);
-      case wasm::kAstF32:
+      case wasm::kWasmF32:
         return jsgraph()->Float32Constant(bit_cast<float>(0xdeadbeef));
-      case wasm::kAstF64:
+      case wasm::kWasmF64:
         return jsgraph()->Float64Constant(bit_cast<double>(0xdeadbeefdeadbeef));
         break;
-      case wasm::kAstS128:
+      case wasm::kWasmS128:
         return builder_->CreateS128Value(0xdeadbeef);
         break;
       default:
@@ -246,7 +314,6 @@
   }
 
   void BuildTrapCode(Node* reason_node, Node* position_node) {
-    Node* end;
     Node** control_ptr = builder_->control_;
     Node** effect_ptr = builder_->effect_;
     wasm::ModuleEnv* module = builder_->module_;
@@ -268,8 +335,7 @@
     if (module && !module->instance->context.is_null()) {
       Node* parameters[] = {trap_reason_smi,     // message id
                             trap_position_smi};  // byte position
-      BuildCallToRuntime(Runtime::kThrowWasmError, jsgraph(),
-                         module->instance->context, parameters,
+      BuildCallToRuntime(Runtime::kThrowWasmError, jsgraph(), parameters,
                          arraysize(parameters), effect_ptr, *control_ptr);
     }
     if (false) {
@@ -277,36 +343,36 @@
       Node* thrw =
           graph()->NewNode(common()->Throw(), jsgraph()->ZeroConstant(),
                            *effect_ptr, *control_ptr);
-      end = thrw;
+      MergeControlToEnd(jsgraph(), thrw);
     } else {
       // End the control flow with returning 0xdeadbeef
       Node* ret_value = GetTrapValue(builder_->GetFunctionSignature());
-      end = graph()->NewNode(jsgraph()->common()->Return(),
-                             jsgraph()->Int32Constant(0), ret_value,
-                             *effect_ptr, *control_ptr);
+      builder_->Return(ret_value);
     }
-
-    MergeControlToEnd(jsgraph(), end);
   }
 };
 
 WasmGraphBuilder::WasmGraphBuilder(
-    Zone* zone, JSGraph* jsgraph, wasm::FunctionSig* function_signature,
+    wasm::ModuleEnv* module_env, Zone* zone, JSGraph* jsgraph,
+    wasm::FunctionSig* sig,
     compiler::SourcePositionTable* source_position_table)
     : zone_(zone),
       jsgraph_(jsgraph),
-      module_(nullptr),
-      mem_buffer_(nullptr),
-      mem_size_(nullptr),
+      module_(module_env),
+      signature_tables_(zone),
       function_tables_(zone),
       function_table_sizes_(zone),
-      control_(nullptr),
-      effect_(nullptr),
       cur_buffer_(def_buffer_),
       cur_bufsize_(kDefaultBufferSize),
       trap_(new (zone) WasmTrapHelper(this)),
-      function_signature_(function_signature),
+      sig_(sig),
       source_position_table_(source_position_table) {
+  for (size_t i = 0; i < sig->parameter_count(); i++) {
+    if (sig->GetParam(i) == wasm::kWasmS128) has_simd_ = true;
+  }
+  for (size_t i = 0; i < sig->return_count(); i++) {
+    if (sig->GetReturn(i) == wasm::kWasmS128) has_simd_ = true;
+  }
   DCHECK_NOT_NULL(jsgraph_);
 }
 
@@ -318,7 +384,7 @@
   return start;
 }
 
-Node* WasmGraphBuilder::Param(unsigned index, wasm::LocalType type) {
+Node* WasmGraphBuilder::Param(unsigned index) {
   return graph()->NewNode(jsgraph()->common()->Parameter(index),
                           graph()->start());
 }
@@ -376,7 +442,7 @@
   return graph()->NewNode(jsgraph()->common()->Merge(count), count, controls);
 }
 
-Node* WasmGraphBuilder::Phi(wasm::LocalType type, unsigned count, Node** vals,
+Node* WasmGraphBuilder::Phi(wasm::ValueType type, unsigned count, Node** vals,
                             Node* control) {
   DCHECK(IrOpcode::IsMergeOpcode(control->opcode()));
   Node** buf = Realloc(vals, count, count + 1);
@@ -412,43 +478,45 @@
 
 void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position,
                                   Node** effect, Node** control) {
-  if (effect == nullptr) {
-    effect = effect_;
-  }
-  if (control == nullptr) {
-    control = control_;
-  }
+  if (FLAG_wasm_no_stack_checks) return;
   // We do not generate stack checks for cctests.
-  if (module_ && !module_->instance->context.is_null()) {
-    Node* limit = graph()->NewNode(
-        jsgraph()->machine()->Load(MachineType::Pointer()),
-        jsgraph()->ExternalConstant(
-            ExternalReference::address_of_stack_limit(jsgraph()->isolate())),
-        jsgraph()->IntPtrConstant(0), *effect, *control);
-    Node* pointer = graph()->NewNode(jsgraph()->machine()->LoadStackPointer());
+  if (!module_ || module_->instance->context.is_null()) return;
+  if (effect == nullptr) effect = effect_;
+  if (control == nullptr) control = control_;
 
-    Node* check =
-        graph()->NewNode(jsgraph()->machine()->UintLessThan(), limit, pointer);
+  Node* limit = graph()->NewNode(
+      jsgraph()->machine()->Load(MachineType::Pointer()),
+      jsgraph()->ExternalConstant(
+          ExternalReference::address_of_stack_limit(jsgraph()->isolate())),
+      jsgraph()->IntPtrConstant(0), *effect, *control);
+  Node* pointer = graph()->NewNode(jsgraph()->machine()->LoadStackPointer());
 
-    Diamond stack_check(graph(), jsgraph()->common(), check, BranchHint::kTrue);
-    stack_check.Chain(*control);
-    Node* effect_true = *effect;
+  Node* check =
+      graph()->NewNode(jsgraph()->machine()->UintLessThan(), limit, pointer);
 
-    Node* effect_false;
-    // Generate a call to the runtime if there is a stack check failure.
-    {
-      Node* node = BuildCallToRuntime(Runtime::kStackGuard, jsgraph(),
-                                      module_->instance->context, nullptr, 0,
-                                      effect, stack_check.if_false);
-      effect_false = node;
-    }
+  Diamond stack_check(graph(), jsgraph()->common(), check, BranchHint::kTrue);
+  stack_check.Chain(*control);
+  Node* effect_true = *effect;
 
-    Node* ephi = graph()->NewNode(jsgraph()->common()->EffectPhi(2),
-                                  effect_true, effect_false, stack_check.merge);
+  Handle<Code> code = jsgraph()->isolate()->builtins()->WasmStackGuard();
+  CallInterfaceDescriptor idesc =
+      WasmRuntimeCallDescriptor(jsgraph()->isolate());
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      jsgraph()->isolate(), jsgraph()->zone(), idesc, 0,
+      CallDescriptor::kNoFlags, Operator::kNoProperties);
+  Node* stub_code = jsgraph()->HeapConstant(code);
 
-    *control = stack_check.merge;
-    *effect = ephi;
-  }
+  Node* context = jsgraph()->NoContextConstant();
+  Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), stub_code,
+                                context, *effect, stack_check.if_false);
+
+  SetSourcePosition(call, position);
+
+  Node* ephi = graph()->NewNode(jsgraph()->common()->EffectPhi(2), effect_true,
+                                call, stack_check.merge);
+
+  *control = stack_check.merge;
+  *effect = ephi;
 }
 
 Node* WasmGraphBuilder::Binop(wasm::WasmOpcode opcode, Node* left, Node* right,
@@ -1042,9 +1110,18 @@
   DCHECK_NOT_NULL(*control_);
   DCHECK_NOT_NULL(*effect_);
 
-  Node** buf = Realloc(vals, count, count + 3);
-  memmove(buf + 1, buf, sizeof(void*) * count);
+  static const int kStackAllocatedNodeBufferSize = 8;
+  Node* stack_buffer[kStackAllocatedNodeBufferSize];
+  std::vector<Node*> heap_buffer;
+
+  Node** buf = stack_buffer;
+  if (count + 3 > kStackAllocatedNodeBufferSize) {
+    heap_buffer.resize(count + 3);
+    buf = heap_buffer.data();
+  }
+
   buf[0] = jsgraph()->Int32Constant(0);
+  memcpy(buf + 1, vals, sizeof(void*) * count);
   buf[count + 1] = *effect_;
   buf[count + 2] = *control_;
   Node* ret =
@@ -1107,7 +1184,7 @@
 }
 
 Node* WasmGraphBuilder::BuildChangeEndianness(Node* node, MachineType memtype,
-                                              wasm::LocalType wasmtype) {
+                                              wasm::ValueType wasmtype) {
   Node* result;
   Node* value = node;
   MachineOperatorBuilder* m = jsgraph()->machine();
@@ -1223,7 +1300,7 @@
       // Perform sign extension using following trick
       // result = (x << machine_width - type_width) >> (machine_width -
       // type_width)
-      if (wasmtype == wasm::kAstI64) {
+      if (wasmtype == wasm::kWasmI64) {
         shiftBitCount = jsgraph()->Int32Constant(64 - valueSizeInBits);
         result = graph()->NewNode(
             m->Word64Sar(),
@@ -1231,7 +1308,7 @@
                              graph()->NewNode(m->ChangeInt32ToInt64(), result),
                              shiftBitCount),
             shiftBitCount);
-      } else if (wasmtype == wasm::kAstI32) {
+      } else if (wasmtype == wasm::kWasmI32) {
         shiftBitCount = jsgraph()->Int32Constant(32 - valueSizeInBits);
         result = graph()->NewNode(
             m->Word32Sar(),
@@ -1714,37 +1791,24 @@
 Node* WasmGraphBuilder::GrowMemory(Node* input) {
   Diamond check_input_range(
       graph(), jsgraph()->common(),
-      graph()->NewNode(
-          jsgraph()->machine()->Uint32LessThanOrEqual(), input,
-          jsgraph()->Uint32Constant(wasm::WasmModule::kV8MaxPages)),
+      graph()->NewNode(jsgraph()->machine()->Uint32LessThanOrEqual(), input,
+                       jsgraph()->Uint32Constant(FLAG_wasm_max_mem_pages)),
       BranchHint::kTrue);
 
   check_input_range.Chain(*control_);
 
-  Runtime::FunctionId function_id = Runtime::kWasmGrowMemory;
-  const Runtime::Function* function = Runtime::FunctionForId(function_id);
-  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(
-      jsgraph()->zone(), function_id, function->nargs, Operator::kNoThrow,
-      CallDescriptor::kNoFlags);
-  wasm::ModuleEnv* module = module_;
-  input = BuildChangeUint32ToSmi(input);
-  Node* inputs[] = {
-      jsgraph()->CEntryStubConstant(function->result_size), input,  // C entry
-      jsgraph()->ExternalConstant(
-          ExternalReference(function_id, jsgraph()->isolate())),  // ref
-      jsgraph()->Int32Constant(function->nargs),                  // arity
-      jsgraph()->HeapConstant(module->instance->context),         // context
-      *effect_,
-      check_input_range.if_true};
-  Node* call = graph()->NewNode(jsgraph()->common()->Call(desc),
-                                static_cast<int>(arraysize(inputs)), inputs);
+  Node* parameters[] = {BuildChangeUint32ToSmi(input)};
+  Node* old_effect = *effect_;
+  Node* call = BuildCallToRuntime(Runtime::kWasmGrowMemory, jsgraph(),
+                                  parameters, arraysize(parameters), effect_,
+                                  check_input_range.if_true);
 
   Node* result = BuildChangeSmiToInt32(call);
 
   result = check_input_range.Phi(MachineRepresentation::kWord32, result,
                                  jsgraph()->Int32Constant(-1));
-  *effect_ = graph()->NewNode(jsgraph()->common()->EffectPhi(2), call, *effect_,
-                              check_input_range.merge);
+  *effect_ = graph()->NewNode(jsgraph()->common()->EffectPhi(2), call,
+                              old_effect, check_input_range.merge);
   *control_ = check_input_range.merge;
   return result;
 }
@@ -1767,8 +1831,7 @@
       graph()->NewNode(machine->Word32And(), input, Int32Constant(0xFFFFu)));
 
   Node* parameters[] = {lower, upper};  // thrown value
-  return BuildCallToRuntime(Runtime::kWasmThrow, jsgraph(),
-                            module_->instance->context, parameters,
+  return BuildCallToRuntime(Runtime::kWasmThrow, jsgraph(), parameters,
                             arraysize(parameters), effect_, *control_);
 }
 
@@ -1778,8 +1841,7 @@
   Node* parameters[] = {input};  // caught value
   Node* value =
       BuildCallToRuntime(Runtime::kWasmGetCaughtExceptionValue, jsgraph(),
-                         module_->instance->context, parameters,
-                         arraysize(parameters), effect_, *control_);
+                         parameters, arraysize(parameters), effect_, *control_);
 
   Node* is_smi;
   Node* is_heap;
@@ -1911,36 +1973,101 @@
 }
 
 Node* WasmGraphBuilder::BuildI32AsmjsRemS(Node* left, Node* right) {
+  CommonOperatorBuilder* c = jsgraph()->common();
   MachineOperatorBuilder* m = jsgraph()->machine();
+  Node* const zero = jsgraph()->Int32Constant(0);
 
   Int32Matcher mr(right);
   if (mr.HasValue()) {
-    if (mr.Value() == 0) {
-      return jsgraph()->Int32Constant(0);
-    } else if (mr.Value() == -1) {
-      return jsgraph()->Int32Constant(0);
+    if (mr.Value() == 0 || mr.Value() == -1) {
+      return zero;
     }
     return graph()->NewNode(m->Int32Mod(), left, right, *control_);
   }
 
-  // asm.js semantics return 0 on divide or mod by zero.
-  // Explicit check for x % 0.
-  Diamond z(
-      graph(), jsgraph()->common(),
-      graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(0)),
-      BranchHint::kFalse);
+  // General case for signed integer modulus, with optimization for (unknown)
+  // power of 2 right hand side.
+  //
+  //   if 0 < right then
+  //     msk = right - 1
+  //     if right & msk != 0 then
+  //       left % right
+  //     else
+  //       if left < 0 then
+  //         -(-left & msk)
+  //       else
+  //         left & msk
+  //   else
+  //     if right < -1 then
+  //       left % right
+  //     else
+  //       zero
+  //
+  // Note: We do not use the Diamond helper class here, because it really hurts
+  // readability with nested diamonds.
+  Node* const minus_one = jsgraph()->Int32Constant(-1);
 
-  // Explicit check for x % -1.
-  Diamond d(
-      graph(), jsgraph()->common(),
-      graph()->NewNode(m->Word32Equal(), right, jsgraph()->Int32Constant(-1)),
-      BranchHint::kFalse);
-  d.Chain(z.if_false);
+  const Operator* const merge_op = c->Merge(2);
+  const Operator* const phi_op = c->Phi(MachineRepresentation::kWord32, 2);
 
-  return z.Phi(
-      MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
-      d.Phi(MachineRepresentation::kWord32, jsgraph()->Int32Constant(0),
-            graph()->NewNode(m->Int32Mod(), left, right, d.if_false)));
+  Node* check0 = graph()->NewNode(m->Int32LessThan(), zero, right);
+  Node* branch0 =
+      graph()->NewNode(c->Branch(BranchHint::kTrue), check0, graph()->start());
+
+  Node* if_true0 = graph()->NewNode(c->IfTrue(), branch0);
+  Node* true0;
+  {
+    Node* msk = graph()->NewNode(m->Int32Add(), right, minus_one);
+
+    Node* check1 = graph()->NewNode(m->Word32And(), right, msk);
+    Node* branch1 = graph()->NewNode(c->Branch(), check1, if_true0);
+
+    Node* if_true1 = graph()->NewNode(c->IfTrue(), branch1);
+    Node* true1 = graph()->NewNode(m->Int32Mod(), left, right, if_true1);
+
+    Node* if_false1 = graph()->NewNode(c->IfFalse(), branch1);
+    Node* false1;
+    {
+      Node* check2 = graph()->NewNode(m->Int32LessThan(), left, zero);
+      Node* branch2 =
+          graph()->NewNode(c->Branch(BranchHint::kFalse), check2, if_false1);
+
+      Node* if_true2 = graph()->NewNode(c->IfTrue(), branch2);
+      Node* true2 = graph()->NewNode(
+          m->Int32Sub(), zero,
+          graph()->NewNode(m->Word32And(),
+                           graph()->NewNode(m->Int32Sub(), zero, left), msk));
+
+      Node* if_false2 = graph()->NewNode(c->IfFalse(), branch2);
+      Node* false2 = graph()->NewNode(m->Word32And(), left, msk);
+
+      if_false1 = graph()->NewNode(merge_op, if_true2, if_false2);
+      false1 = graph()->NewNode(phi_op, true2, false2, if_false1);
+    }
+
+    if_true0 = graph()->NewNode(merge_op, if_true1, if_false1);
+    true0 = graph()->NewNode(phi_op, true1, false1, if_true0);
+  }
+
+  Node* if_false0 = graph()->NewNode(c->IfFalse(), branch0);
+  Node* false0;
+  {
+    Node* check1 = graph()->NewNode(m->Int32LessThan(), right, minus_one);
+    Node* branch1 =
+        graph()->NewNode(c->Branch(BranchHint::kTrue), check1, if_false0);
+
+    Node* if_true1 = graph()->NewNode(c->IfTrue(), branch1);
+    Node* true1 = graph()->NewNode(m->Int32Mod(), left, right, if_true1);
+
+    Node* if_false1 = graph()->NewNode(c->IfFalse(), branch1);
+    Node* false1 = zero;
+
+    if_false0 = graph()->NewNode(merge_op, if_true1, if_false1);
+    false0 = graph()->NewNode(phi_op, true1, false1, if_false0);
+  }
+
+  Node* merge0 = graph()->NewNode(merge_op, if_true0, if_false0);
+  return graph()->NewNode(phi_op, true0, false0, merge0);
 }
 
 Node* WasmGraphBuilder::BuildI32AsmjsDivU(Node* left, Node* right) {
@@ -2016,6 +2143,8 @@
             graph()->NewNode(jsgraph()->machine()->Word64Equal(), right,
                              jsgraph()->Int64Constant(-1)));
 
+  d.Chain(*control_);
+
   Node* rem = graph()->NewNode(jsgraph()->machine()->Int64Mod(), left, right,
                                d.if_false);
 
@@ -2179,6 +2308,7 @@
   Node* in_bounds = graph()->NewNode(machine->Uint32LessThan(), key, size);
   trap_->AddTrapIfFalse(wasm::kTrapFuncInvalid, in_bounds, position);
   Node* table = function_tables_[table_index];
+  Node* signatures = signature_tables_[table_index];
 
   // Load signature from the table and check.
   // The table is a FixedArray; signatures are encoded as SMIs.
@@ -2187,7 +2317,7 @@
   const int fixed_offset = access.header_size - access.tag();
   {
     Node* load_sig = graph()->NewNode(
-        machine->Load(MachineType::AnyTagged()), table,
+        machine->Load(MachineType::AnyTagged()), signatures,
         graph()->NewNode(machine->Int32Add(),
                          graph()->NewNode(machine->Word32Shl(), key,
                                           Int32Constant(kPointerSizeLog2)),
@@ -2202,14 +2332,12 @@
   }
 
   // Load code object from the table.
-  uint32_t table_size = module_->module->function_tables[table_index].min_size;
-  uint32_t offset = fixed_offset + kPointerSize * table_size;
   Node* load_code = graph()->NewNode(
       machine->Load(MachineType::AnyTagged()), table,
       graph()->NewNode(machine->Int32Add(),
                        graph()->NewNode(machine->Word32Shl(), key,
                                         Int32Constant(kPointerSizeLog2)),
-                       Uint32Constant(offset)),
+                       Uint32Constant(fixed_offset)),
       *effect_, *control_);
 
   args[0] = load_code;
@@ -2342,24 +2470,20 @@
   return value;
 }
 
-Node* WasmGraphBuilder::ToJS(Node* node, wasm::LocalType type) {
+Node* WasmGraphBuilder::ToJS(Node* node, wasm::ValueType type) {
   switch (type) {
-    case wasm::kAstI32:
+    case wasm::kWasmI32:
       return BuildChangeInt32ToTagged(node);
-    case wasm::kAstS128:
-    case wasm::kAstI64:
-      // Throw a TypeError. The native context is good enough here because we
-      // only throw a TypeError.
-      return BuildCallToRuntime(Runtime::kWasmThrowTypeError, jsgraph(),
-                                jsgraph()->isolate()->native_context(), nullptr,
-                                0, effect_, *control_);
-    case wasm::kAstF32:
+    case wasm::kWasmS128:
+    case wasm::kWasmI64:
+      UNREACHABLE();
+    case wasm::kWasmF32:
       node = graph()->NewNode(jsgraph()->machine()->ChangeFloat32ToFloat64(),
                               node);
       return BuildChangeFloat64ToTagged(node);
-    case wasm::kAstF64:
+    case wasm::kWasmF64:
       return BuildChangeFloat64ToTagged(node);
-    case wasm::kAstStmt:
+    case wasm::kWasmStmt:
       return jsgraph()->UndefinedConstant();
     default:
       UNREACHABLE();
@@ -2367,8 +2491,7 @@
   }
 }
 
-Node* WasmGraphBuilder::BuildJavaScriptToNumber(Node* node, Node* context,
-                                                Node* effect, Node* control) {
+Node* WasmGraphBuilder::BuildJavaScriptToNumber(Node* node, Node* context) {
   Callable callable = CodeFactory::ToNumber(jsgraph()->isolate());
   CallDescriptor* desc = Linkage::GetStubCallDescriptor(
       jsgraph()->isolate(), jsgraph()->zone(), callable.descriptor(), 0,
@@ -2376,7 +2499,9 @@
   Node* stub_code = jsgraph()->HeapConstant(callable.code());
 
   Node* result = graph()->NewNode(jsgraph()->common()->Call(desc), stub_code,
-                                  node, context, effect, control);
+                                  node, context, *effect_, *control_);
+
+  SetSourcePosition(result, 1);
 
   *effect_ = result;
 
@@ -2495,35 +2620,30 @@
 }
 
 Node* WasmGraphBuilder::FromJS(Node* node, Node* context,
-                               wasm::LocalType type) {
+                               wasm::ValueType type) {
+  DCHECK_NE(wasm::kWasmStmt, type);
+
   // Do a JavaScript ToNumber.
-  Node* num = BuildJavaScriptToNumber(node, context, *effect_, *control_);
+  Node* num = BuildJavaScriptToNumber(node, context);
 
   // Change representation.
   SimplifiedOperatorBuilder simplified(jsgraph()->zone());
   num = BuildChangeTaggedToFloat64(num);
 
   switch (type) {
-    case wasm::kAstI32: {
+    case wasm::kWasmI32: {
       num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToWord32(),
                              num);
       break;
     }
-    case wasm::kAstS128:
-    case wasm::kAstI64:
-      // Throw a TypeError. The native context is good enough here because we
-      // only throw a TypeError.
-      return BuildCallToRuntime(Runtime::kWasmThrowTypeError, jsgraph(),
-                                jsgraph()->isolate()->native_context(), nullptr,
-                                0, effect_, *control_);
-    case wasm::kAstF32:
+    case wasm::kWasmS128:
+    case wasm::kWasmI64:
+      UNREACHABLE();
+    case wasm::kWasmF32:
       num = graph()->NewNode(jsgraph()->machine()->TruncateFloat64ToFloat32(),
                              num);
       break;
-    case wasm::kAstF64:
-      break;
-    case wasm::kAstStmt:
-      num = jsgraph()->Int32Constant(0);
+    case wasm::kWasmF64:
       break;
     default:
       UNREACHABLE();
@@ -2613,42 +2733,73 @@
   return jsgraph()->IntPtrConstant(HeapNumber::kValueOffset - kHeapObjectTag);
 }
 
+bool IsJSCompatible(wasm::ValueType type) {
+  return (type != wasm::kWasmI64) && (type != wasm::kWasmS128);
+}
+
+bool HasJSCompatibleSignature(wasm::FunctionSig* sig) {
+  for (size_t i = 0; i < sig->parameter_count(); i++) {
+    if (!IsJSCompatible(sig->GetParam(i))) {
+      return false;
+    }
+  }
+  for (size_t i = 0; i < sig->return_count(); i++) {
+    if (!IsJSCompatible(sig->GetReturn(i))) {
+      return false;
+    }
+  }
+  return true;
+}
+
 void WasmGraphBuilder::BuildJSToWasmWrapper(Handle<Code> wasm_code,
                                             wasm::FunctionSig* sig) {
   int wasm_count = static_cast<int>(sig->parameter_count());
-  int param_count;
-  if (jsgraph()->machine()->Is64()) {
-    param_count = static_cast<int>(sig->parameter_count());
-  } else {
-    param_count = Int64Lowering::GetParameterCountAfterLowering(sig);
-  }
-  int count = param_count + 3;
+  int count = wasm_count + 3;
   Node** args = Buffer(count);
 
   // Build the start and the JS parameter nodes.
-  Node* start = Start(param_count + 5);
+  Node* start = Start(wasm_count + 5);
   *control_ = start;
   *effect_ = start;
+
   // Create the context parameter
   Node* context = graph()->NewNode(
       jsgraph()->common()->Parameter(
           Linkage::GetJSCallContextParamIndex(wasm_count + 1), "%context"),
       graph()->start());
 
+  if (!HasJSCompatibleSignature(sig_)) {
+    // Throw a TypeError. Use the context of the calling javascript function
+    // (passed as a parameter), such that the generated code is context
+    // independent.
+    BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError, jsgraph(),
+                                  context, nullptr, 0, effect_, *control_);
+
+    // Add a dummy call to the wasm function so that the generated wrapper
+    // contains a reference to the wrapped wasm function. Without this reference
+    // the wasm function could not be re-imported into another wasm module.
+    int pos = 0;
+    args[pos++] = HeapConstant(wasm_code);
+    args[pos++] = *effect_;
+    args[pos++] = *control_;
+
+    // We only need a dummy call descriptor.
+    wasm::FunctionSig::Builder dummy_sig_builder(jsgraph()->zone(), 0, 0);
+    CallDescriptor* desc = wasm::ModuleEnv::GetWasmCallDescriptor(
+        jsgraph()->zone(), dummy_sig_builder.Build());
+    *effect_ = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
+    Return(jsgraph()->UndefinedConstant());
+    return;
+  }
+
   int pos = 0;
   args[pos++] = HeapConstant(wasm_code);
 
   // Convert JS parameters to WASM numbers.
   for (int i = 0; i < wasm_count; ++i) {
-    Node* param =
-        graph()->NewNode(jsgraph()->common()->Parameter(i + 1), start);
+    Node* param = Param(i + 1);
     Node* wasm_param = FromJS(param, context, sig->GetParam(i));
     args[pos++] = wasm_param;
-    if (jsgraph()->machine()->Is32() && sig->GetParam(i) == wasm::kAstI64) {
-      // We make up the high word with SAR to get the proper sign extension.
-      args[pos++] = graph()->NewNode(jsgraph()->machine()->Word32Sar(),
-                                     wasm_param, jsgraph()->Int32Constant(31));
-    }
   }
 
   args[pos++] = *effect_;
@@ -2657,23 +2808,13 @@
   // Call the WASM code.
   CallDescriptor* desc =
       wasm::ModuleEnv::GetWasmCallDescriptor(jsgraph()->zone(), sig);
-  if (jsgraph()->machine()->Is32()) {
-    desc = wasm::ModuleEnv::GetI32WasmCallDescriptor(jsgraph()->zone(), desc);
-  }
-  Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), count, args);
-  Node* retval = call;
-  if (jsgraph()->machine()->Is32() && sig->return_count() > 0 &&
-      sig->GetReturn(0) == wasm::kAstI64) {
-    // The return values comes as two values, we pick the low word.
-    retval = graph()->NewNode(jsgraph()->common()->Projection(0), retval,
-                              graph()->start());
-  }
-  Node* jsval = ToJS(
-      retval, sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
-  Node* ret = graph()->NewNode(jsgraph()->common()->Return(),
-                               jsgraph()->Int32Constant(0), jsval, call, start);
 
-  MergeControlToEnd(jsgraph(), ret);
+  Node* call = graph()->NewNode(jsgraph()->common()->Call(desc), count, args);
+  *effect_ = call;
+  Node* retval = call;
+  Node* jsval = ToJS(
+      retval, sig->return_count() == 0 ? wasm::kWasmStmt : sig->GetReturn());
+  Return(jsval);
 }
 
 int WasmGraphBuilder::AddParameterNodes(Node** args, int pos, int param_count,
@@ -2681,14 +2822,8 @@
   // Convert WASM numbers to JS values.
   int param_index = 0;
   for (int i = 0; i < param_count; ++i) {
-    Node* param = graph()->NewNode(
-        jsgraph()->common()->Parameter(param_index++), graph()->start());
+    Node* param = Param(param_index++);
     args[pos++] = ToJS(param, sig->GetParam(i));
-    if (jsgraph()->machine()->Is32() && sig->GetParam(i) == wasm::kAstI64) {
-      // On 32 bit platforms we have to skip the high word of int64
-      // parameters.
-      param_index++;
-    }
   }
   return pos;
 }
@@ -2698,19 +2833,25 @@
   DCHECK(target->IsCallable());
 
   int wasm_count = static_cast<int>(sig->parameter_count());
-  int param_count;
-  if (jsgraph()->machine()->Is64()) {
-    param_count = wasm_count;
-  } else {
-    param_count = Int64Lowering::GetParameterCountAfterLowering(sig);
-  }
 
   // Build the start and the parameter nodes.
   Isolate* isolate = jsgraph()->isolate();
   CallDescriptor* desc;
-  Node* start = Start(param_count + 3);
+  Node* start = Start(wasm_count + 3);
   *effect_ = start;
   *control_ = start;
+
+  if (!HasJSCompatibleSignature(sig_)) {
+    // Throw a TypeError. Embedding the context is ok here, since this code is
+    // regenerated at instantiation time.
+    Node* context =
+        jsgraph()->HeapConstant(jsgraph()->isolate()->native_context());
+    Return(BuildCallToRuntimeWithContext(Runtime::kWasmThrowTypeError,
+                                         jsgraph(), context, nullptr, 0,
+                                         effect_, *control_));
+    return;
+  }
+
   Node** args = Buffer(wasm_count + 7);
 
   Node* call;
@@ -2777,24 +2918,123 @@
     call = graph()->NewNode(jsgraph()->common()->Call(desc), pos, args);
   }
 
+  *effect_ = call;
+  SetSourcePosition(call, 0);
+
   // Convert the return value back.
-  Node* ret;
-  Node* val =
-      FromJS(call, HeapConstant(isolate->native_context()),
-             sig->return_count() == 0 ? wasm::kAstStmt : sig->GetReturn());
-  Node* pop_size = jsgraph()->Int32Constant(0);
-  if (jsgraph()->machine()->Is32() && sig->return_count() > 0 &&
-      sig->GetReturn() == wasm::kAstI64) {
-    ret = graph()->NewNode(jsgraph()->common()->Return(), pop_size, val,
-                           graph()->NewNode(jsgraph()->machine()->Word32Sar(),
-                                            val, jsgraph()->Int32Constant(31)),
-                           call, start);
-  } else {
-    ret = graph()->NewNode(jsgraph()->common()->Return(), pop_size, val, call,
-                           start);
+  Node* i32_zero = jsgraph()->Int32Constant(0);
+  Node* val = sig->return_count() == 0
+                  ? i32_zero
+                  : FromJS(call, HeapConstant(isolate->native_context()),
+                           sig->GetReturn());
+  Return(val);
+}
+
+void WasmGraphBuilder::BuildWasmInterpreterEntry(
+    uint32_t function_index, wasm::FunctionSig* sig,
+    Handle<WasmInstanceObject> instance) {
+  int wasm_count = static_cast<int>(sig->parameter_count());
+  int param_count = jsgraph()->machine()->Is64()
+                        ? wasm_count
+                        : Int64Lowering::GetParameterCountAfterLowering(sig);
+
+  // Build the start and the parameter nodes.
+  Node* start = Start(param_count + 3);
+  *effect_ = start;
+  *control_ = start;
+
+  // Compute size for the argument buffer.
+  int args_size_bytes = 0;
+  for (int i = 0; i < wasm_count; i++) {
+    args_size_bytes +=
+        RoundUpToMultipleOfPowOf2(1 << ElementSizeLog2Of(sig->GetParam(i)), 8);
   }
 
-  MergeControlToEnd(jsgraph(), ret);
+  // The return value is also passed via this buffer:
+  DCHECK_GE(wasm::kV8MaxWasmFunctionReturns, sig->return_count());
+  // TODO(wasm): Handle multi-value returns.
+  DCHECK_EQ(1, wasm::kV8MaxWasmFunctionReturns);
+  int return_size_bytes =
+      sig->return_count() == 0 ? 0 : 1 << ElementSizeLog2Of(sig->GetReturn(0));
+
+  // Get a stack slot for the arguments.
+  Node* arg_buffer = args_size_bytes == 0 && return_size_bytes == 0
+                         ? jsgraph()->IntPtrConstant(0)
+                         : graph()->NewNode(jsgraph()->machine()->StackSlot(
+                               std::max(args_size_bytes, return_size_bytes)));
+
+  // Now store all our arguments to the buffer.
+  int param_index = 0;
+  int offset = 0;
+  for (int i = 0; i < wasm_count; i++) {
+    Node* param = Param(param_index++);
+    bool is_i64_as_two_params =
+        jsgraph()->machine()->Is32() && sig->GetParam(i) == wasm::kWasmI64;
+
+    if (is_i64_as_two_params) {
+      StoreRepresentation store_rep(wasm::kWasmI32,
+                                    WriteBarrierKind::kNoWriteBarrier);
+      *effect_ =
+          graph()->NewNode(jsgraph()->machine()->Store(store_rep), arg_buffer,
+                           Int32Constant(offset + kInt64LowerHalfMemoryOffset),
+                           param, *effect_, *control_);
+
+      param = Param(param_index++);
+      *effect_ =
+          graph()->NewNode(jsgraph()->machine()->Store(store_rep), arg_buffer,
+                           Int32Constant(offset + kInt64UpperHalfMemoryOffset),
+                           param, *effect_, *control_);
+      offset += 8;
+
+    } else {
+      MachineRepresentation param_rep = sig->GetParam(i);
+      StoreRepresentation store_rep(param_rep,
+                                    WriteBarrierKind::kNoWriteBarrier);
+      *effect_ =
+          graph()->NewNode(jsgraph()->machine()->Store(store_rep), arg_buffer,
+                           Int32Constant(offset), param, *effect_, *control_);
+      offset += RoundUpToMultipleOfPowOf2(1 << ElementSizeLog2Of(param_rep), 8);
+    }
+
+    DCHECK(IsAligned(offset, 8));
+  }
+  DCHECK_EQ(param_count, param_index);
+  DCHECK_EQ(args_size_bytes, offset);
+
+  // We are passing the raw arg_buffer here. To the GC and other parts, it looks
+  // like a Smi (lowest bit not set). In the runtime function however, don't
+  // call Smi::value on it, but just cast it to a byte pointer.
+  Node* parameters[] = {
+      jsgraph()->HeapConstant(instance),       // wasm instance
+      jsgraph()->SmiConstant(function_index),  // function index
+      arg_buffer,                              // argument buffer
+  };
+  BuildCallToRuntime(Runtime::kWasmRunInterpreter, jsgraph(), parameters,
+                     arraysize(parameters), effect_, *control_);
+
+  // Read back the return value.
+  if (jsgraph()->machine()->Is32() && sig->return_count() > 0 &&
+      sig->GetReturn() == wasm::kWasmI64) {
+    MachineType load_rep = wasm::WasmOpcodes::MachineTypeFor(wasm::kWasmI32);
+    Node* lower =
+        graph()->NewNode(jsgraph()->machine()->Load(load_rep), arg_buffer,
+                         Int32Constant(0), *effect_, *control_);
+    Node* upper =
+        graph()->NewNode(jsgraph()->machine()->Load(load_rep), arg_buffer,
+                         Int32Constant(sizeof(int32_t)), *effect_, *control_);
+    Return(upper, lower);
+  } else {
+    Node* val;
+    if (sig->return_count() == 0) {
+      val = Int32Constant(0);
+    } else {
+      MachineType load_rep =
+          wasm::WasmOpcodes::MachineTypeFor(sig->GetReturn());
+      val = graph()->NewNode(jsgraph()->machine()->Load(load_rep), arg_buffer,
+                             Int32Constant(0), *effect_, *control_);
+    }
+    Return(val);
+  }
 }
 
 Node* WasmGraphBuilder::MemBuffer(uint32_t offset) {
@@ -2853,12 +3093,18 @@
 
 void WasmGraphBuilder::EnsureFunctionTableNodes() {
   if (function_tables_.size() > 0) return;
-  for (size_t i = 0; i < module_->instance->function_tables.size(); ++i) {
-    auto handle = module_->instance->function_tables[i];
-    DCHECK(!handle.is_null());
-    function_tables_.push_back(HeapConstant(handle));
+  size_t tables_size = module_->instance->function_tables.size();
+  DCHECK(tables_size == module_->instance->signature_tables.size());
+  for (size_t i = 0; i < tables_size; ++i) {
+    auto function_handle = module_->instance->function_tables[i];
+    auto signature_handle = module_->instance->signature_tables[i];
+    DCHECK(!function_handle.is_null() && !signature_handle.is_null());
+    function_tables_.push_back(HeapConstant(function_handle));
+    signature_tables_.push_back(HeapConstant(signature_handle));
     uint32_t table_size = module_->module->function_tables[i].min_size;
-    function_table_sizes_.push_back(Uint32Constant(table_size));
+    function_table_sizes_.push_back(jsgraph()->RelocatableInt32Constant(
+        static_cast<uint32_t>(table_size),
+        RelocInfo::WASM_FUNCTION_TABLE_SIZE_REFERENCE));
   }
 }
 
@@ -2895,6 +3141,7 @@
                                       uint32_t offset,
                                       wasm::WasmCodePosition position) {
   DCHECK(module_ && module_->instance);
+  if (FLAG_wasm_no_bounds_checks) return;
   uint32_t size = module_->instance->mem_size;
   byte memsize = wasm::WasmOpcodes::MemSize(memtype);
 
@@ -2945,15 +3192,14 @@
   trap_->AddTrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
 }
 
-
-Node* WasmGraphBuilder::LoadMem(wasm::LocalType type, MachineType memtype,
+Node* WasmGraphBuilder::LoadMem(wasm::ValueType type, MachineType memtype,
                                 Node* index, uint32_t offset,
                                 uint32_t alignment,
                                 wasm::WasmCodePosition position) {
   Node* load;
 
   // WASM semantics throw on OOB. Introduce explicit bounds check.
-  if (!FLAG_wasm_trap_handler) {
+  if (!FLAG_wasm_trap_handler || !kTrapHandlerSupported) {
     BoundsCheckMem(memtype, index, offset, position);
   }
   bool aligned = static_cast<int>(alignment) >=
@@ -2961,18 +3207,19 @@
 
   if (aligned ||
       jsgraph()->machine()->UnalignedLoadSupported(memtype, alignment)) {
-    if (FLAG_wasm_trap_handler) {
-      Node* context = HeapConstant(module_->instance->context);
+    if (FLAG_wasm_trap_handler && kTrapHandlerSupported) {
+      DCHECK(FLAG_wasm_guard_pages);
       Node* position_node = jsgraph()->Int32Constant(position);
       load = graph()->NewNode(jsgraph()->machine()->ProtectedLoad(memtype),
-                              MemBuffer(offset), index, context, position_node,
-                              *effect_, *control_);
+                              MemBuffer(offset), index, position_node, *effect_,
+                              *control_);
     } else {
       load = graph()->NewNode(jsgraph()->machine()->Load(memtype),
                               MemBuffer(offset), index, *effect_, *control_);
     }
   } else {
-    DCHECK(!FLAG_wasm_trap_handler);
+    // TODO(eholk): Support unaligned loads with trap handlers.
+    DCHECK(!FLAG_wasm_trap_handler || !kTrapHandlerSupported);
     load = graph()->NewNode(jsgraph()->machine()->UnalignedLoad(memtype),
                             MemBuffer(offset), index, *effect_, *control_);
   }
@@ -2983,7 +3230,7 @@
   load = BuildChangeEndianness(load, memtype, type);
 #endif
 
-  if (type == wasm::kAstI64 &&
+  if (type == wasm::kWasmI64 &&
       ElementSizeLog2Of(memtype.representation()) < 3) {
     // TODO(titzer): TF zeroes the upper bits of 64-bit loads for subword sizes.
     if (memtype.IsSigned()) {
@@ -3006,7 +3253,9 @@
   Node* store;
 
   // WASM semantics throw on OOB. Introduce explicit bounds check.
-  BoundsCheckMem(memtype, index, offset, position);
+  if (!FLAG_wasm_trap_handler || !kTrapHandlerSupported) {
+    BoundsCheckMem(memtype, index, offset, position);
+  }
   StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
 
   bool aligned = static_cast<int>(alignment) >=
@@ -3018,11 +3267,20 @@
 
   if (aligned ||
       jsgraph()->machine()->UnalignedStoreSupported(memtype, alignment)) {
-    StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
-    store =
-        graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
-                         index, val, *effect_, *control_);
+    if (FLAG_wasm_trap_handler && kTrapHandlerSupported) {
+      Node* position_node = jsgraph()->Int32Constant(position);
+      store = graph()->NewNode(
+          jsgraph()->machine()->ProtectedStore(memtype.representation()),
+          MemBuffer(offset), index, val, position_node, *effect_, *control_);
+    } else {
+      StoreRepresentation rep(memtype.representation(), kNoWriteBarrier);
+      store =
+          graph()->NewNode(jsgraph()->machine()->Store(rep), MemBuffer(offset),
+                           index, val, *effect_, *control_);
+    }
   } else {
+    // TODO(eholk): Support unaligned stores with trap handlers.
+    DCHECK(!FLAG_wasm_trap_handler || !kTrapHandlerSupported);
     UnalignedStoreRepresentation rep(memtype.representation());
     store =
         graph()->NewNode(jsgraph()->machine()->UnalignedStore(rep),
@@ -3070,16 +3328,14 @@
 void WasmGraphBuilder::Int64LoweringForTesting() {
   if (jsgraph()->machine()->Is32()) {
     Int64Lowering r(jsgraph()->graph(), jsgraph()->machine(),
-                    jsgraph()->common(), jsgraph()->zone(),
-                    function_signature_);
+                    jsgraph()->common(), jsgraph()->zone(), sig_);
     r.LowerGraph();
   }
 }
 
 void WasmGraphBuilder::SimdScalarLoweringForTesting() {
   SimdScalarLowering(jsgraph()->graph(), jsgraph()->machine(),
-                     jsgraph()->common(), jsgraph()->zone(),
-                     function_signature_)
+                     jsgraph()->common(), jsgraph()->zone(), sig_)
       .LowerGraph();
 }
 
@@ -3093,6 +3349,7 @@
 Node* WasmGraphBuilder::CreateS128Value(int32_t value) {
   // TODO(gdeepti): Introduce Simd128Constant to common-operator.h and use
   // instead of creating a SIMD Value.
+  has_simd_ = true;
   return graph()->NewNode(jsgraph()->machine()->CreateInt32x4(),
                           Int32Constant(value), Int32Constant(value),
                           Int32Constant(value), Int32Constant(value));
@@ -3100,36 +3357,348 @@
 
 Node* WasmGraphBuilder::SimdOp(wasm::WasmOpcode opcode,
                                const NodeVector& inputs) {
+  has_simd_ = true;
   switch (opcode) {
-    case wasm::kExprI32x4Splat:
-      return graph()->NewNode(jsgraph()->machine()->CreateInt32x4(), inputs[0],
-                              inputs[0], inputs[0], inputs[0]);
-    case wasm::kExprI32x4Add:
-      return graph()->NewNode(jsgraph()->machine()->Int32x4Add(), inputs[0],
-                              inputs[1]);
-    case wasm::kExprF32x4ExtractLane:
-      return graph()->NewNode(jsgraph()->machine()->Float32x4ExtractLane(),
-                              inputs[0], inputs[1]);
     case wasm::kExprF32x4Splat:
       return graph()->NewNode(jsgraph()->machine()->CreateFloat32x4(),
                               inputs[0], inputs[0], inputs[0], inputs[0]);
+    case wasm::kExprF32x4SConvertI32x4:
+      return graph()->NewNode(jsgraph()->machine()->Float32x4FromInt32x4(),
+                              inputs[0]);
+    case wasm::kExprF32x4UConvertI32x4:
+      return graph()->NewNode(jsgraph()->machine()->Float32x4FromUint32x4(),
+                              inputs[0]);
+    case wasm::kExprF32x4Abs:
+      return graph()->NewNode(jsgraph()->machine()->Float32x4Abs(), inputs[0]);
+    case wasm::kExprF32x4Neg:
+      return graph()->NewNode(jsgraph()->machine()->Float32x4Neg(), inputs[0]);
     case wasm::kExprF32x4Add:
       return graph()->NewNode(jsgraph()->machine()->Float32x4Add(), inputs[0],
                               inputs[1]);
+    case wasm::kExprF32x4Sub:
+      return graph()->NewNode(jsgraph()->machine()->Float32x4Sub(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprF32x4Eq:
+      return graph()->NewNode(jsgraph()->machine()->Float32x4Equal(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprF32x4Ne:
+      return graph()->NewNode(jsgraph()->machine()->Float32x4NotEqual(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI32x4Splat:
+      return graph()->NewNode(jsgraph()->machine()->CreateInt32x4(), inputs[0],
+                              inputs[0], inputs[0], inputs[0]);
+    case wasm::kExprI32x4SConvertF32x4:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4FromFloat32x4(),
+                              inputs[0]);
+    case wasm::kExprI32x4UConvertF32x4:
+      return graph()->NewNode(jsgraph()->machine()->Uint32x4FromFloat32x4(),
+                              inputs[0]);
+    case wasm::kExprI32x4Neg:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4Neg(), inputs[0]);
+    case wasm::kExprI32x4Add:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4Add(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI32x4Sub:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4Sub(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI32x4Mul:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4Mul(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI32x4MinS:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4Min(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI32x4MaxS:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4Max(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI32x4Eq:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4Equal(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI32x4Ne:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4NotEqual(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI32x4LtS:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4GreaterThan(),
+                              inputs[1], inputs[0]);
+    case wasm::kExprI32x4LeS:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4GreaterThanOrEqual(),
+                              inputs[1], inputs[0]);
+    case wasm::kExprI32x4GtS:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4GreaterThan(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI32x4GeS:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4GreaterThanOrEqual(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI32x4MinU:
+      return graph()->NewNode(jsgraph()->machine()->Uint32x4Min(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI32x4MaxU:
+      return graph()->NewNode(jsgraph()->machine()->Uint32x4Max(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI32x4LtU:
+      return graph()->NewNode(jsgraph()->machine()->Uint32x4GreaterThan(),
+                              inputs[1], inputs[0]);
+    case wasm::kExprI32x4LeU:
+      return graph()->NewNode(
+          jsgraph()->machine()->Uint32x4GreaterThanOrEqual(), inputs[1],
+          inputs[0]);
+    case wasm::kExprI32x4GtU:
+      return graph()->NewNode(jsgraph()->machine()->Uint32x4GreaterThan(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI32x4GeU:
+      return graph()->NewNode(
+          jsgraph()->machine()->Uint32x4GreaterThanOrEqual(), inputs[0],
+          inputs[1]);
+    case wasm::kExprI16x8Splat:
+      return graph()->NewNode(jsgraph()->machine()->CreateInt16x8(), inputs[0],
+                              inputs[0], inputs[0], inputs[0], inputs[0],
+                              inputs[0], inputs[0], inputs[0]);
+    case wasm::kExprI16x8Neg:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8Neg(), inputs[0]);
+    case wasm::kExprI16x8Add:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8Add(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI16x8AddSaturateS:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8AddSaturate(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI16x8Sub:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8Sub(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI16x8SubSaturateS:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8SubSaturate(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI16x8Mul:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8Mul(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI16x8MinS:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8Min(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI16x8MaxS:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8Max(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI16x8Eq:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8Equal(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI16x8Ne:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8NotEqual(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI16x8LtS:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8GreaterThan(),
+                              inputs[1], inputs[0]);
+    case wasm::kExprI16x8LeS:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8GreaterThanOrEqual(),
+                              inputs[1], inputs[0]);
+    case wasm::kExprI16x8GtS:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8GreaterThan(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI16x8GeS:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8GreaterThanOrEqual(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI16x8AddSaturateU:
+      return graph()->NewNode(jsgraph()->machine()->Uint16x8AddSaturate(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI16x8SubSaturateU:
+      return graph()->NewNode(jsgraph()->machine()->Uint16x8SubSaturate(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI16x8MinU:
+      return graph()->NewNode(jsgraph()->machine()->Uint16x8Min(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI16x8MaxU:
+      return graph()->NewNode(jsgraph()->machine()->Uint16x8Max(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI16x8LtU:
+      return graph()->NewNode(jsgraph()->machine()->Uint16x8GreaterThan(),
+                              inputs[1], inputs[0]);
+    case wasm::kExprI16x8LeU:
+      return graph()->NewNode(
+          jsgraph()->machine()->Uint16x8GreaterThanOrEqual(), inputs[1],
+          inputs[0]);
+    case wasm::kExprI16x8GtU:
+      return graph()->NewNode(jsgraph()->machine()->Uint16x8GreaterThan(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI16x8GeU:
+      return graph()->NewNode(
+          jsgraph()->machine()->Uint16x8GreaterThanOrEqual(), inputs[0],
+          inputs[1]);
+    case wasm::kExprI8x16Splat:
+      return graph()->NewNode(jsgraph()->machine()->CreateInt8x16(), inputs[0],
+                              inputs[0], inputs[0], inputs[0], inputs[0],
+                              inputs[0], inputs[0], inputs[0], inputs[0],
+                              inputs[0], inputs[0], inputs[0], inputs[0],
+                              inputs[0], inputs[0], inputs[0]);
+    case wasm::kExprI8x16Neg:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16Neg(), inputs[0]);
+    case wasm::kExprI8x16Add:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16Add(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI8x16AddSaturateS:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16AddSaturate(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI8x16Sub:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16Sub(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI8x16SubSaturateS:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16SubSaturate(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI8x16Mul:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16Mul(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI8x16MinS:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16Min(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI8x16MaxS:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16Max(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI8x16Eq:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16Equal(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI8x16Ne:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16NotEqual(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI8x16LtS:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16GreaterThan(),
+                              inputs[1], inputs[0]);
+    case wasm::kExprI8x16LeS:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16GreaterThanOrEqual(),
+                              inputs[1], inputs[0]);
+    case wasm::kExprI8x16GtS:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16GreaterThan(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI8x16GeS:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16GreaterThanOrEqual(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI8x16AddSaturateU:
+      return graph()->NewNode(jsgraph()->machine()->Uint8x16AddSaturate(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI8x16SubSaturateU:
+      return graph()->NewNode(jsgraph()->machine()->Uint8x16SubSaturate(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI8x16MinU:
+      return graph()->NewNode(jsgraph()->machine()->Uint8x16Min(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI8x16MaxU:
+      return graph()->NewNode(jsgraph()->machine()->Uint8x16Max(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprI8x16LtU:
+      return graph()->NewNode(jsgraph()->machine()->Uint8x16GreaterThan(),
+                              inputs[1], inputs[0]);
+    case wasm::kExprI8x16LeU:
+      return graph()->NewNode(
+          jsgraph()->machine()->Uint8x16GreaterThanOrEqual(), inputs[1],
+          inputs[0]);
+    case wasm::kExprI8x16GtU:
+      return graph()->NewNode(jsgraph()->machine()->Uint8x16GreaterThan(),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI8x16GeU:
+      return graph()->NewNode(
+          jsgraph()->machine()->Uint8x16GreaterThanOrEqual(), inputs[0],
+          inputs[1]);
+    case wasm::kExprS32x4Select:
+      return graph()->NewNode(jsgraph()->machine()->Simd32x4Select(), inputs[0],
+                              inputs[1], inputs[2]);
+    case wasm::kExprS16x8Select:
+      return graph()->NewNode(jsgraph()->machine()->Simd16x8Select(), inputs[0],
+                              inputs[1], inputs[2]);
+    case wasm::kExprS8x16Select:
+      return graph()->NewNode(jsgraph()->machine()->Simd8x16Select(), inputs[0],
+                              inputs[1], inputs[2]);
+    case wasm::kExprS128And:
+      return graph()->NewNode(jsgraph()->machine()->Simd128And(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprS128Or:
+      return graph()->NewNode(jsgraph()->machine()->Simd128Or(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprS128Xor:
+      return graph()->NewNode(jsgraph()->machine()->Simd128Xor(), inputs[0],
+                              inputs[1]);
+    case wasm::kExprS128Not:
+      return graph()->NewNode(jsgraph()->machine()->Simd128Not(), inputs[0]);
     default:
       return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
   }
 }
 
-Node* WasmGraphBuilder::SimdExtractLane(wasm::WasmOpcode opcode, uint8_t lane,
-                                        Node* input) {
+Node* WasmGraphBuilder::SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane,
+                                   const NodeVector& inputs) {
+  has_simd_ = true;
   switch (opcode) {
-    case wasm::kExprI32x4ExtractLane:
-      return graph()->NewNode(jsgraph()->machine()->Int32x4ExtractLane(), input,
-                              Int32Constant(lane));
     case wasm::kExprF32x4ExtractLane:
-      return graph()->NewNode(jsgraph()->machine()->Float32x4ExtractLane(),
-                              input, Int32Constant(lane));
+      return graph()->NewNode(jsgraph()->machine()->Float32x4ExtractLane(lane),
+                              inputs[0]);
+    case wasm::kExprF32x4ReplaceLane:
+      return graph()->NewNode(jsgraph()->machine()->Float32x4ReplaceLane(lane),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI32x4ExtractLane:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4ExtractLane(lane),
+                              inputs[0]);
+    case wasm::kExprI32x4ReplaceLane:
+      return graph()->NewNode(jsgraph()->machine()->Int32x4ReplaceLane(lane),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI16x8ExtractLane:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8ExtractLane(lane),
+                              inputs[0]);
+    case wasm::kExprI16x8ReplaceLane:
+      return graph()->NewNode(jsgraph()->machine()->Int16x8ReplaceLane(lane),
+                              inputs[0], inputs[1]);
+    case wasm::kExprI8x16ExtractLane:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16ExtractLane(lane),
+                              inputs[0]);
+    case wasm::kExprI8x16ReplaceLane:
+      return graph()->NewNode(jsgraph()->machine()->Int8x16ReplaceLane(lane),
+                              inputs[0], inputs[1]);
+    default:
+      return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
+  }
+}
+
+Node* WasmGraphBuilder::SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift,
+                                    const NodeVector& inputs) {
+  has_simd_ = true;
+  switch (opcode) {
+    case wasm::kExprI32x4Shl:
+      return graph()->NewNode(
+          jsgraph()->machine()->Int32x4ShiftLeftByScalar(shift), inputs[0]);
+    case wasm::kExprI32x4ShrS:
+      return graph()->NewNode(
+          jsgraph()->machine()->Int32x4ShiftRightByScalar(shift), inputs[0]);
+    case wasm::kExprI32x4ShrU:
+      return graph()->NewNode(
+          jsgraph()->machine()->Uint32x4ShiftRightByScalar(shift), inputs[0]);
+    case wasm::kExprI16x8Shl:
+      return graph()->NewNode(
+          jsgraph()->machine()->Int16x8ShiftLeftByScalar(shift), inputs[0]);
+    case wasm::kExprI16x8ShrS:
+      return graph()->NewNode(
+          jsgraph()->machine()->Int16x8ShiftRightByScalar(shift), inputs[0]);
+    case wasm::kExprI16x8ShrU:
+      return graph()->NewNode(
+          jsgraph()->machine()->Uint16x8ShiftRightByScalar(shift), inputs[0]);
+    case wasm::kExprI8x16Shl:
+      return graph()->NewNode(
+          jsgraph()->machine()->Int8x16ShiftLeftByScalar(shift), inputs[0]);
+    case wasm::kExprI8x16ShrS:
+      return graph()->NewNode(
+          jsgraph()->machine()->Int8x16ShiftRightByScalar(shift), inputs[0]);
+    case wasm::kExprI8x16ShrU:
+      return graph()->NewNode(
+          jsgraph()->machine()->Uint8x16ShiftRightByScalar(shift), inputs[0]);
+    default:
+      return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
+  }
+}
+
+Node* WasmGraphBuilder::SimdSwizzleOp(wasm::WasmOpcode opcode, uint32_t swizzle,
+                                      const NodeVector& inputs) {
+  has_simd_ = true;
+  switch (opcode) {
+    case wasm::kExprS32x4Swizzle:
+      return graph()->NewNode(jsgraph()->machine()->Simd32x4Swizzle(swizzle),
+                              inputs[0]);
+    case wasm::kExprS16x8Swizzle:
+      return graph()->NewNode(jsgraph()->machine()->Simd16x8Swizzle(swizzle),
+                              inputs[0]);
+    case wasm::kExprS8x16Swizzle:
+      return graph()->NewNode(jsgraph()->machine()->Simd8x16Swizzle(swizzle),
+                              inputs[0]);
     default:
       return graph()->NewNode(UnsupportedOpcode(opcode), nullptr);
   }
@@ -3156,9 +3725,10 @@
                                    *script_str, 0, 0));
 }
 
-Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::ModuleEnv* module,
+Handle<Code> CompileJSToWasmWrapper(Isolate* isolate,
+                                    const wasm::WasmModule* module,
                                     Handle<Code> wasm_code, uint32_t index) {
-  const wasm::WasmFunction* func = &module->module->functions[index];
+  const wasm::WasmFunction* func = &module->functions[index];
 
   //----------------------------------------------------------------------------
   // Create the Graph
@@ -3172,10 +3742,10 @@
   Node* control = nullptr;
   Node* effect = nullptr;
 
-  WasmGraphBuilder builder(&zone, &jsgraph, func->sig);
+  wasm::ModuleEnv module_env(module, nullptr);
+  WasmGraphBuilder builder(&module_env, &zone, &jsgraph, func->sig);
   builder.set_control_ptr(&control);
   builder.set_effect_ptr(&effect);
-  builder.set_module(module);
   builder.BuildJSToWasmWrapper(wasm_code, func->sig);
 
   //----------------------------------------------------------------------------
@@ -3188,8 +3758,8 @@
   }
 
   // Schedule and compile to machine code.
-  int params =
-      static_cast<int>(module->GetFunctionSignature(index)->parameter_count());
+  int params = static_cast<int>(
+      module_env.GetFunctionSignature(index)->parameter_count());
   CallDescriptor* incoming = Linkage::GetJSCallDescriptor(
       &zone, false, params + 1, CallDescriptor::kNoFlags);
   Code::Flags flags = Code::ComputeFlags(Code::JS_TO_WASM_FUNCTION);
@@ -3222,10 +3792,11 @@
   }
 
   if (isolate->logger()->is_logging_code_events() || isolate->is_profiling()) {
-    RecordFunctionCompilation(
-        CodeEventListener::FUNCTION_TAG, isolate, code, "js-to-wasm", index,
-        wasm::WasmName("export"),
-        module->module->GetName(func->name_offset, func->name_length));
+    char func_name[32];
+    SNPrintF(ArrayVector(func_name), "js-to-wasm#%d", func->func_index);
+    RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate, code,
+                              "js-to-wasm", index, wasm::WasmName("export"),
+                              CStrVector(func_name));
   }
   return code;
 }
@@ -3233,7 +3804,8 @@
 Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
                                     wasm::FunctionSig* sig, uint32_t index,
                                     Handle<String> module_name,
-                                    MaybeHandle<String> import_name) {
+                                    MaybeHandle<String> import_name,
+                                    wasm::ModuleOrigin origin) {
   //----------------------------------------------------------------------------
   // Create the Graph
   //----------------------------------------------------------------------------
@@ -3246,7 +3818,12 @@
   Node* control = nullptr;
   Node* effect = nullptr;
 
-  WasmGraphBuilder builder(&zone, &jsgraph, sig);
+  SourcePositionTable* source_position_table =
+      origin == wasm::kAsmJsOrigin ? new (&zone) SourcePositionTable(&graph)
+                                   : nullptr;
+
+  WasmGraphBuilder builder(nullptr, &zone, &jsgraph, sig,
+                           source_position_table);
   builder.set_control_ptr(&control);
   builder.set_effect_ptr(&effect);
   builder.BuildWasmToJSWrapper(target, sig);
@@ -3282,7 +3859,8 @@
     }
 
     CompilationInfo info(func_name, isolate, &zone, flags);
-    code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+    code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr,
+                                            source_position_table);
 #ifdef ENABLE_DISASSEMBLER
     if (FLAG_print_opt_code && !code.is_null()) {
       OFStream os(stdout);
@@ -3310,6 +3888,75 @@
   return code;
 }
 
+Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
+                                         wasm::FunctionSig* sig,
+                                         Handle<WasmInstanceObject> instance) {
+  //----------------------------------------------------------------------------
+  // Create the Graph
+  //----------------------------------------------------------------------------
+  Zone zone(isolate->allocator(), ZONE_NAME);
+  Graph graph(&zone);
+  CommonOperatorBuilder common(&zone);
+  MachineOperatorBuilder machine(
+      &zone, MachineType::PointerRepresentation(),
+      InstructionSelector::SupportedMachineOperatorFlags(),
+      InstructionSelector::AlignmentRequirements());
+  JSGraph jsgraph(isolate, &graph, &common, nullptr, nullptr, &machine);
+
+  Node* control = nullptr;
+  Node* effect = nullptr;
+
+  WasmGraphBuilder builder(nullptr, &zone, &jsgraph, sig);
+  builder.set_control_ptr(&control);
+  builder.set_effect_ptr(&effect);
+  builder.BuildWasmInterpreterEntry(func_index, sig, instance);
+
+  Handle<Code> code = Handle<Code>::null();
+  {
+    if (FLAG_trace_turbo_graph) {  // Simple textual RPO.
+      OFStream os(stdout);
+      os << "-- Wasm to interpreter graph -- " << std::endl;
+      os << AsRPO(graph);
+    }
+
+    // Schedule and compile to machine code.
+    CallDescriptor* incoming =
+        wasm::ModuleEnv::GetWasmCallDescriptor(&zone, sig);
+    if (machine.Is32()) {
+      incoming = wasm::ModuleEnv::GetI32WasmCallDescriptor(&zone, incoming);
+    }
+    Code::Flags flags = Code::ComputeFlags(Code::WASM_INTERPRETER_ENTRY);
+    EmbeddedVector<char, 32> debug_name;
+    int name_len = SNPrintF(debug_name, "wasm-to-interpreter#%d", func_index);
+    DCHECK(name_len > 0 && name_len < debug_name.length());
+    debug_name.Truncate(name_len);
+    DCHECK_EQ('\0', debug_name.start()[debug_name.length()]);
+
+    CompilationInfo info(debug_name, isolate, &zone, flags);
+    code = Pipeline::GenerateCodeForTesting(&info, incoming, &graph, nullptr);
+#ifdef ENABLE_DISASSEMBLER
+    if (FLAG_print_opt_code && !code.is_null()) {
+      OFStream os(stdout);
+      code->Disassemble(debug_name.start(), os);
+    }
+#endif
+
+    if (isolate->logger()->is_logging_code_events() ||
+        isolate->is_profiling()) {
+      RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate, code,
+                                "wasm-to-interpreter", func_index,
+                                wasm::WasmName("module"), debug_name);
+    }
+  }
+
+  Handle<FixedArray> deopt_data = isolate->factory()->NewFixedArray(1, TENURED);
+  Handle<WeakCell> weak_instance = isolate->factory()->NewWeakCell(instance);
+  deopt_data->set(0, *weak_instance);
+  code->set_deoptimization_data(*deopt_data);
+
+  return code;
+}
+
 SourcePositionTable* WasmCompilationUnit::BuildGraphForWasmFunction(
     double* decode_ms) {
   base::ElapsedTimer decode_timer;
@@ -3323,12 +3970,12 @@
   MachineOperatorBuilder* machine = jsgraph_->machine();
   SourcePositionTable* source_position_table =
       new (jsgraph_->zone()) SourcePositionTable(graph);
-  WasmGraphBuilder builder(jsgraph_->zone(), jsgraph_, function_->sig,
-                           source_position_table);
-  wasm::FunctionBody body = {
-      module_env_, function_->sig, module_env_->module->module_start,
-      module_env_->module->module_start + function_->code_start_offset,
-      module_env_->module->module_start + function_->code_end_offset};
+  WasmGraphBuilder builder(&module_env_->module_env, jsgraph_->zone(), jsgraph_,
+                           function_->sig, source_position_table);
+  const byte* module_start = module_env_->wire_bytes.start();
+  wasm::FunctionBody body = {function_->sig, module_start,
+                             module_start + function_->code_start_offset,
+                             module_start + function_->code_end_offset};
   graph_construction_result_ =
       wasm::BuildTFGraph(isolate_->allocator(), &builder, body);
 
@@ -3341,18 +3988,26 @@
   }
 
   if (machine->Is32()) {
-    Int64Lowering r(graph, machine, common, jsgraph_->zone(), function_->sig);
-    r.LowerGraph();
+    Int64Lowering(graph, machine, common, jsgraph_->zone(), function_->sig)
+        .LowerGraph();
   }
 
-  SimdScalarLowering(graph, machine, common, jsgraph_->zone(), function_->sig)
-      .LowerGraph();
+  if (builder.has_simd() && !CpuFeatures::SupportsSimd128()) {
+    SimdScalarLowering(graph, machine, common, jsgraph_->zone(), function_->sig)
+        .LowerGraph();
+  }
 
   int index = static_cast<int>(function_->func_index);
 
   if (index >= FLAG_trace_wasm_ast_start && index < FLAG_trace_wasm_ast_end) {
     OFStream os(stdout);
-    PrintAst(isolate_->allocator(), body, os, nullptr);
+    PrintRawWasmCode(isolate_->allocator(), body,
+                     module_env_->module_env.module);
+  }
+  if (index >= FLAG_trace_wasm_text_start && index < FLAG_trace_wasm_text_end) {
+    OFStream os(stdout);
+    PrintWasmText(module_env_->module_env.module, module_env_->wire_bytes,
+                  function_->func_index, os, nullptr);
   }
   if (FLAG_trace_wasm_decode_time) {
     *decode_ms = decode_timer.Elapsed().InMillisecondsF();
@@ -3360,15 +4015,22 @@
   return source_position_table;
 }
 
+char* WasmCompilationUnit::GetTaggedFunctionName(
+    const wasm::WasmFunction* function) {
+  snprintf(function_name_, sizeof(function_name_), "wasm#%d",
+           function->func_index);
+  return function_name_;
+}
+
 WasmCompilationUnit::WasmCompilationUnit(wasm::ErrorThrower* thrower,
                                          Isolate* isolate,
-                                         wasm::ModuleEnv* module_env,
+                                         wasm::ModuleBytesEnv* module_env,
                                          const wasm::WasmFunction* function,
                                          uint32_t index)
     : thrower_(thrower),
       isolate_(isolate),
       module_env_(module_env),
-      function_(function),
+      function_(&module_env->module_env.module->functions[index]),
       graph_zone_(new Zone(isolate->allocator(), ZONE_NAME)),
       jsgraph_(new (graph_zone()) JSGraph(
           isolate, new (graph_zone()) Graph(graph_zone()),
@@ -3379,14 +4041,14 @@
                        InstructionSelector::AlignmentRequirements()))),
       compilation_zone_(isolate->allocator(), ZONE_NAME),
       info_(function->name_length != 0
-                ? module_env->module->GetNameOrNull(function->name_offset,
-                                                    function->name_length)
-                : ArrayVector("wasm"),
+                ? module_env->wire_bytes.GetNameOrNull(function)
+                : CStrVector(GetTaggedFunctionName(function)),
             isolate, &compilation_zone_,
             Code::ComputeFlags(Code::WASM_FUNCTION)),
       job_(),
       index_(index),
-      ok_(true) {
+      ok_(true),
+      protected_instructions_(&compilation_zone_) {
   // Create and cache this node in the main thread.
   jsgraph_->CEntryStubConstant(1);
 }
@@ -3398,7 +4060,9 @@
   if (FLAG_trace_wasm_compiler) {
     OFStream os(stdout);
     os << "Compiling WASM function "
-       << wasm::WasmFunctionName(function_, module_env_) << std::endl;
+       << wasm::WasmFunctionName(
+              function_, module_env_->wire_bytes.GetNameOrNull(function_))
+       << std::endl;
     os << std::endl;
   }
 
@@ -3423,11 +4087,12 @@
   CallDescriptor* descriptor = wasm::ModuleEnv::GetWasmCallDescriptor(
       &compilation_zone_, function_->sig);
   if (jsgraph_->machine()->Is32()) {
-    descriptor =
-        module_env_->GetI32WasmCallDescriptor(&compilation_zone_, descriptor);
+    descriptor = module_env_->module_env.GetI32WasmCallDescriptor(
+        &compilation_zone_, descriptor);
   }
-  job_.reset(Pipeline::NewWasmCompilationJob(&info_, jsgraph_->graph(),
-                                             descriptor, source_positions));
+  job_.reset(Pipeline::NewWasmCompilationJob(
+      &info_, jsgraph_, descriptor, source_positions, &protected_instructions_,
+      module_env_->module_env.module->origin != wasm::kWasmOrigin));
   ok_ = job_->ExecuteJob() == CompilationJob::SUCCEEDED;
   // TODO(bradnelson): Improve histogram handling of size_t.
   // TODO(ahaas): The counters are not thread-safe at the moment.
@@ -3438,11 +4103,10 @@
   if (FLAG_trace_wasm_decode_time) {
     double pipeline_ms = pipeline_timer.Elapsed().InMillisecondsF();
     PrintF(
-        "wasm-compilation phase 1 ok: %d bytes, %0.3f ms decode, %zu nodes, "
+        "wasm-compilation phase 1 ok: %u bytes, %0.3f ms decode, %zu nodes, "
         "%0.3f ms pipeline\n",
-        static_cast<int>(function_->code_end_offset -
-                         function_->code_start_offset),
-        decode_ms, node_count, pipeline_ms);
+        function_->code_end_offset - function_->code_start_offset, decode_ms,
+        node_count, pipeline_ms);
   }
 }
 
@@ -3451,8 +4115,7 @@
     if (graph_construction_result_.failed()) {
       // Add the function as another context for the exception
       ScopedVector<char> buffer(128);
-      wasm::WasmName name = module_env_->module->GetName(
-          function_->name_offset, function_->name_length);
+      wasm::WasmName name = module_env_->wire_bytes.GetName(function_);
       SNPrintF(buffer, "Compiling WASM function #%d:%.*s failed:",
                function_->func_index, name.length(), name.start());
       thrower_->CompileFailed(buffer.start(), graph_construction_result_);
@@ -3460,31 +4123,29 @@
 
     return Handle<Code>::null();
   }
+  base::ElapsedTimer codegen_timer;
+  if (FLAG_trace_wasm_decode_time) {
+    codegen_timer.Start();
+  }
   if (job_->FinalizeJob() != CompilationJob::SUCCEEDED) {
     return Handle<Code>::null();
   }
-  base::ElapsedTimer compile_timer;
-  if (FLAG_trace_wasm_decode_time) {
-    compile_timer.Start();
-  }
   Handle<Code> code = info_.code();
   DCHECK(!code.is_null());
 
   if (isolate_->logger()->is_logging_code_events() ||
       isolate_->is_profiling()) {
-    RecordFunctionCompilation(
-        CodeEventListener::FUNCTION_TAG, isolate_, code, "WASM_function",
-        function_->func_index, wasm::WasmName("module"),
-        module_env_->module->GetName(function_->name_offset,
-                                     function_->name_length));
+    RecordFunctionCompilation(CodeEventListener::FUNCTION_TAG, isolate_, code,
+                              "WASM_function", function_->func_index,
+                              wasm::WasmName("module"),
+                              module_env_->wire_bytes.GetName(function_));
   }
 
   if (FLAG_trace_wasm_decode_time) {
-    double compile_ms = compile_timer.Elapsed().InMillisecondsF();
-    PrintF("wasm-code-generation ok: %d bytes, %0.3f ms code generation\n",
-           static_cast<int>(function_->code_end_offset -
-                            function_->code_start_offset),
-           compile_ms);
+    double codegen_ms = codegen_timer.Elapsed().InMillisecondsF();
+    PrintF("wasm-code-generation ok: %u bytes, %0.3f ms code generation\n",
+           function_->code_end_offset - function_->code_start_offset,
+           codegen_ms);
   }
 
   return code;
diff --git a/src/compiler/wasm-compiler.h b/src/compiler/wasm-compiler.h
index b4bc350..706c386 100644
--- a/src/compiler/wasm-compiler.h
+++ b/src/compiler/wasm-compiler.h
@@ -11,6 +11,8 @@
 // Do not include anything from src/compiler here!
 #include "src/compilation-info.h"
 #include "src/compiler.h"
+#include "src/trap-handler/trap-handler.h"
+#include "src/wasm/wasm-module.h"
 #include "src/wasm/wasm-opcodes.h"
 #include "src/wasm/wasm-result.h"
 #include "src/zone/zone.h"
@@ -29,8 +31,10 @@
 
 namespace wasm {
 // Forward declarations for some WASM data structures.
+struct ModuleBytesEnv;
 struct ModuleEnv;
 struct WasmFunction;
+struct WasmModule;
 class ErrorThrower;
 struct DecodeStruct;
 
@@ -43,7 +47,7 @@
 class WasmCompilationUnit final {
  public:
   WasmCompilationUnit(wasm::ErrorThrower* thrower, Isolate* isolate,
-                      wasm::ModuleEnv* module_env,
+                      wasm::ModuleBytesEnv* module_env,
                       const wasm::WasmFunction* function, uint32_t index);
 
   Zone* graph_zone() { return graph_zone_.get(); }
@@ -54,20 +58,24 @@
 
   static Handle<Code> CompileWasmFunction(wasm::ErrorThrower* thrower,
                                           Isolate* isolate,
-                                          wasm::ModuleEnv* module_env,
+                                          wasm::ModuleBytesEnv* module_env,
                                           const wasm::WasmFunction* function) {
-    WasmCompilationUnit unit(thrower, isolate, module_env, function, 0);
+    WasmCompilationUnit unit(thrower, isolate, module_env, function,
+                             function->func_index);
     unit.ExecuteCompilation();
     return unit.FinishCompilation();
   }
 
  private:
   SourcePositionTable* BuildGraphForWasmFunction(double* decode_ms);
+  char* GetTaggedFunctionName(const wasm::WasmFunction* function);
 
   wasm::ErrorThrower* thrower_;
   Isolate* isolate_;
-  wasm::ModuleEnv* module_env_;
+  wasm::ModuleBytesEnv* module_env_;
   const wasm::WasmFunction* function_;
+  // Function name is tagged with uint32 func_index - wasm#<func_index>
+  char function_name_[16];
   // The graph zone is deallocated at the end of ExecuteCompilation.
   std::unique_ptr<Zone> graph_zone_;
   JSGraph* jsgraph_;
@@ -77,6 +85,9 @@
   uint32_t index_;
   wasm::Result<wasm::DecodeStruct*> graph_construction_result_;
   bool ok_;
+  ZoneVector<trap_handler::ProtectedInstructionData>
+      protected_instructions_;  // Instructions that are protected by the signal
+                                // handler.
 
   DISALLOW_COPY_AND_ASSIGN(WasmCompilationUnit);
 };
@@ -85,12 +96,20 @@
 Handle<Code> CompileWasmToJSWrapper(Isolate* isolate, Handle<JSReceiver> target,
                                     wasm::FunctionSig* sig, uint32_t index,
                                     Handle<String> module_name,
-                                    MaybeHandle<String> import_name);
+                                    MaybeHandle<String> import_name,
+                                    wasm::ModuleOrigin origin);
 
 // Wraps a given wasm code object, producing a code object.
-Handle<Code> CompileJSToWasmWrapper(Isolate* isolate, wasm::ModuleEnv* module,
+Handle<Code> CompileJSToWasmWrapper(Isolate* isolate,
+                                    const wasm::WasmModule* module,
                                     Handle<Code> wasm_code, uint32_t index);
 
+// Compiles a stub that redirects a call to a wasm function to the wasm
+// interpreter. It's ABI compatible with the compiled wasm function.
+Handle<Code> CompileWasmInterpreterEntry(Isolate* isolate, uint32_t func_index,
+                                         wasm::FunctionSig* sig,
+                                         Handle<WasmInstanceObject> instance);
+
 // Abstracts details of building TurboFan graph nodes for WASM to separate
 // the WASM decoder from the internal details of TurboFan.
 class WasmTrapHelper;
@@ -98,7 +117,7 @@
 class WasmGraphBuilder {
  public:
   WasmGraphBuilder(
-      Zone* z, JSGraph* g, wasm::FunctionSig* function_signature,
+      wasm::ModuleEnv* module_env, Zone* z, JSGraph* g, wasm::FunctionSig* sig,
       compiler::SourcePositionTable* source_position_table = nullptr);
 
   Node** Buffer(size_t count) {
@@ -116,11 +135,11 @@
   //-----------------------------------------------------------------------
   Node* Error();
   Node* Start(unsigned params);
-  Node* Param(unsigned index, wasm::LocalType type);
+  Node* Param(unsigned index);
   Node* Loop(Node* entry);
   Node* Terminate(Node* effect, Node* control);
   Node* Merge(unsigned count, Node** controls);
-  Node* Phi(wasm::LocalType type, unsigned count, Node** vals, Node* control);
+  Node* Phi(wasm::ValueType type, unsigned count, Node** vals, Node* control);
   Node* EffectPhi(unsigned count, Node** effects, Node* control);
   Node* NumberConstant(int32_t value);
   Node* Uint32Constant(uint32_t value);
@@ -155,7 +174,12 @@
   Node* Switch(unsigned count, Node* key);
   Node* IfValue(int32_t value, Node* sw);
   Node* IfDefault(Node* sw);
-  Node* Return(unsigned count, Node** vals);
+  Node* Return(unsigned count, Node** nodes);
+  template <typename... Nodes>
+  Node* Return(Node* fst, Nodes*... more) {
+    Node* arr[] = {fst, more...};
+    return Return(arraysize(arr), arr);
+  }
   Node* ReturnVoid();
   Node* Unreachable(wasm::WasmCodePosition position);
 
@@ -166,9 +190,11 @@
 
   void BuildJSToWasmWrapper(Handle<Code> wasm_code, wasm::FunctionSig* sig);
   void BuildWasmToJSWrapper(Handle<JSReceiver> target, wasm::FunctionSig* sig);
+  void BuildWasmInterpreterEntry(uint32_t func_index, wasm::FunctionSig* sig,
+                                 Handle<WasmInstanceObject> instance);
 
-  Node* ToJS(Node* node, wasm::LocalType type);
-  Node* FromJS(Node* node, Node* context, wasm::LocalType type);
+  Node* ToJS(Node* node, wasm::ValueType type);
+  Node* FromJS(Node* node, Node* context, wasm::ValueType type);
   Node* Invert(Node* node);
   void EnsureFunctionTableNodes();
 
@@ -178,7 +204,7 @@
   Node* CurrentMemoryPages();
   Node* GetGlobal(uint32_t index);
   Node* SetGlobal(uint32_t index, Node* val);
-  Node* LoadMem(wasm::LocalType type, MachineType memtype, Node* index,
+  Node* LoadMem(wasm::ValueType type, MachineType memtype, Node* index,
                 uint32_t offset, uint32_t alignment,
                 wasm::WasmCodePosition position);
   Node* StoreMem(MachineType type, Node* index, uint32_t offset,
@@ -190,13 +216,11 @@
   Node* Control() { return *control_; }
   Node* Effect() { return *effect_; }
 
-  void set_module(wasm::ModuleEnv* module) { this->module_ = module; }
-
   void set_control_ptr(Node** control) { this->control_ = control; }
 
   void set_effect_ptr(Node** effect) { this->effect_ = effect; }
 
-  wasm::FunctionSig* GetFunctionSignature() { return function_signature_; }
+  wasm::FunctionSig* GetFunctionSignature() { return sig_; }
 
   void Int64LoweringForTesting();
 
@@ -207,7 +231,19 @@
   Node* CreateS128Value(int32_t value);
 
   Node* SimdOp(wasm::WasmOpcode opcode, const NodeVector& inputs);
-  Node* SimdExtractLane(wasm::WasmOpcode opcode, uint8_t lane, Node* input);
+
+  Node* SimdLaneOp(wasm::WasmOpcode opcode, uint8_t lane,
+                   const NodeVector& inputs);
+
+  Node* SimdShiftOp(wasm::WasmOpcode opcode, uint8_t shift,
+                    const NodeVector& inputs);
+
+  Node* SimdSwizzleOp(wasm::WasmOpcode opcode, uint32_t swizzle,
+                      const NodeVector& inputs);
+
+  bool has_simd() const { return has_simd_; }
+
+  wasm::ModuleEnv* module_env() const { return module_; }
 
  private:
   static const int kDefaultBufferSize = 16;
@@ -215,19 +251,21 @@
 
   Zone* zone_;
   JSGraph* jsgraph_;
-  wasm::ModuleEnv* module_;
-  Node* mem_buffer_;
-  Node* mem_size_;
+  wasm::ModuleEnv* module_ = nullptr;
+  Node* mem_buffer_ = nullptr;
+  Node* mem_size_ = nullptr;
+  NodeVector signature_tables_;
   NodeVector function_tables_;
   NodeVector function_table_sizes_;
-  Node** control_;
-  Node** effect_;
+  Node** control_ = nullptr;
+  Node** effect_ = nullptr;
   Node** cur_buffer_;
   size_t cur_bufsize_;
   Node* def_buffer_[kDefaultBufferSize];
+  bool has_simd_ = false;
 
   WasmTrapHelper* trap_;
-  wasm::FunctionSig* function_signature_;
+  wasm::FunctionSig* sig_;
   SetOncePointer<const Operator> allocate_heap_number_operator_;
 
   compiler::SourcePositionTable* source_position_table_ = nullptr;
@@ -243,7 +281,7 @@
                       wasm::WasmCodePosition position);
 
   Node* BuildChangeEndianness(Node* node, MachineType type,
-                              wasm::LocalType wasmtype = wasm::kAstStmt);
+                              wasm::ValueType wasmtype = wasm::kWasmStmt);
 
   Node* MaskShiftCount32(Node* node);
   Node* MaskShiftCount64(Node* node);
@@ -314,8 +352,7 @@
                        MachineType result_type, int trap_zero,
                        wasm::WasmCodePosition position);
 
-  Node* BuildJavaScriptToNumber(Node* node, Node* context, Node* effect,
-                                Node* control);
+  Node* BuildJavaScriptToNumber(Node* node, Node* context);
 
   Node* BuildChangeInt32ToTagged(Node* value);
   Node* BuildChangeFloat64ToTagged(Node* value);
diff --git a/src/compiler/wasm-linkage.cc b/src/compiler/wasm-linkage.cc
index a41c93c..01c1b86 100644
--- a/src/compiler/wasm-linkage.cc
+++ b/src/compiler/wasm-linkage.cc
@@ -5,6 +5,7 @@
 #include "src/assembler.h"
 #include "src/base/lazy-instance.h"
 #include "src/macro-assembler.h"
+#include "src/objects-inl.h"
 #include "src/register-configuration.h"
 
 #include "src/wasm/wasm-module.h"
@@ -24,17 +25,17 @@
 
 namespace {
 
-MachineType MachineTypeFor(LocalType type) {
+MachineType MachineTypeFor(ValueType type) {
   switch (type) {
-    case kAstI32:
+    case kWasmI32:
       return MachineType::Int32();
-    case kAstI64:
+    case kWasmI64:
       return MachineType::Int64();
-    case kAstF64:
+    case kWasmF64:
       return MachineType::Float64();
-    case kAstF32:
+    case kWasmF32:
       return MachineType::Float32();
-    case kAstS128:
+    case kWasmS128:
       return MachineType::Simd128();
     default:
       UNREACHABLE();
@@ -173,7 +174,7 @@
 
   int stack_offset;
 
-  LinkageLocation Next(LocalType type) {
+  LinkageLocation Next(ValueType type) {
     if (IsFloatingPoint(type)) {
       // Allocate a floating point register/stack location.
       if (fp_offset < fp_count) {
@@ -182,7 +183,7 @@
         // Allocate floats using a double register, but modify the code to
         // reflect how ARM FP registers alias.
         // TODO(bbudge) Modify wasm linkage to allow use of all float regs.
-        if (type == kAstF32) {
+        if (type == kWasmF32) {
           int float_reg_code = reg.code() * 2;
           DCHECK(float_reg_code < RegisterConfiguration::kMaxFPRegisters);
           return regloc(DoubleRegister::from_code(float_reg_code),
@@ -206,11 +207,11 @@
       }
     }
   }
-  bool IsFloatingPoint(LocalType type) {
-    return type == kAstF32 || type == kAstF64;
+  bool IsFloatingPoint(ValueType type) {
+    return type == kWasmF32 || type == kWasmF64;
   }
-  int Words(LocalType type) {
-    if (kPointerSize < 8 && (type == kAstI64 || type == kAstF64)) {
+  int Words(ValueType type) {
+    if (kPointerSize < 8 && (type == kWasmI64 || type == kWasmF64)) {
       return 2;
     }
     return 1;
@@ -285,7 +286,7 @@
   // Add return location(s).
   const int return_count = static_cast<int>(locations.return_count_);
   for (int i = 0; i < return_count; i++) {
-    LocalType ret = fsig->GetReturn(i);
+    ValueType ret = fsig->GetReturn(i);
     locations.AddReturn(rets.Next(ret));
   }
 
@@ -294,7 +295,7 @@
   // Add register and/or stack parameter(s).
   const int parameter_count = static_cast<int>(fsig->parameter_count());
   for (int i = 0; i < parameter_count; i++) {
-    LocalType param = fsig->GetParam(i);
+    ValueType param = fsig->GetParam(i);
     locations.AddParam(params.Next(param));
   }
 
diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc
index 745ac50..ae33e8c 100644
--- a/src/compiler/x64/code-generator-x64.cc
+++ b/src/compiler/x64/code-generator-x64.cc
@@ -11,6 +11,7 @@
 #include "src/compiler/gap-resolver.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/osr.h"
+#include "src/heap/heap-inl.h"
 #include "src/wasm/wasm-module.h"
 #include "src/x64/assembler-x64.h"
 #include "src/x64/macro-assembler-x64.h"
@@ -43,9 +44,7 @@
       DCHECK_EQ(0, bit_cast<int64_t>(constant.ToFloat64()));
       return Immediate(0);
     }
-    if (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
-        constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE ||
-        constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+    if (RelocInfo::IsWasmReference(constant.rmode())) {
       return Immediate(constant.ToInt32(), constant.rmode());
     }
     return Immediate(constant.ToInt32());
@@ -270,38 +269,56 @@
 
 class WasmOutOfLineTrap final : public OutOfLineCode {
  public:
-  WasmOutOfLineTrap(CodeGenerator* gen, Address pc, bool frame_elided,
-                    Register context, int32_t position)
+  WasmOutOfLineTrap(CodeGenerator* gen, int pc, bool frame_elided,
+                    int32_t position, Instruction* instr)
       : OutOfLineCode(gen),
+        gen_(gen),
         pc_(pc),
         frame_elided_(frame_elided),
-        context_(context),
-        position_(position) {}
+        position_(position),
+        instr_(instr) {}
 
+  // TODO(eholk): Refactor this method to take the code generator as a
+  // parameter.
   void Generate() final {
-    // TODO(eholk): record pc_ and the current pc in a table so that
-    // the signal handler can find it.
-    USE(pc_);
+    __ RecordProtectedInstructionLanding(pc_);
 
     if (frame_elided_) {
-      __ EnterFrame(StackFrame::WASM);
+      __ EnterFrame(StackFrame::WASM_COMPILED);
     }
 
     wasm::TrapReason trap_id = wasm::kTrapMemOutOfBounds;
     int trap_reason = wasm::WasmOpcodes::TrapReasonToMessageId(trap_id);
     __ Push(Smi::FromInt(trap_reason));
     __ Push(Smi::FromInt(position_));
-    __ Move(rsi, context_);
+    __ Move(rsi, gen_->isolate()->native_context());
     __ CallRuntime(Runtime::kThrowWasmError);
+
+    if (instr_->reference_map() != nullptr) {
+      gen_->RecordSafepoint(instr_->reference_map(), Safepoint::kSimple, 0,
+                            Safepoint::kNoLazyDeopt);
+    }
   }
 
  private:
-  Address pc_;
+  CodeGenerator* gen_;
+  int pc_;
   bool frame_elided_;
-  Register context_;
   int32_t position_;
+  Instruction* instr_;
 };
 
+void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
+                         InstructionCode opcode, size_t input_count,
+                         X64OperandConverter& i, int pc, Instruction* instr) {
+  const X64MemoryProtection protection =
+      static_cast<X64MemoryProtection>(MiscField::decode(opcode));
+  if (protection == X64MemoryProtection::kProtected) {
+    const bool frame_elided = !codegen->frame_access_state()->has_frame();
+    const int32_t position = i.InputInt32(input_count - 1);
+    new (zone) WasmOutOfLineTrap(codegen, pc, frame_elided, position, instr);
+  }
+}
 }  // namespace
 
 
@@ -708,8 +725,8 @@
   Label done;
 
   // Check if current frame is an arguments adaptor frame.
-  __ Cmp(Operand(rbp, StandardFrameConstants::kContextOffset),
-         Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ cmpp(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
+          Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   __ j(not_equal, &done, Label::kNear);
 
   // Load arguments count from current arguments adaptor frame (note, it
@@ -912,10 +929,8 @@
     case kArchDeoptimize: {
       int deopt_state_id =
           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
-      Deoptimizer::BailoutType bailout_type =
-          Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      CodeGenResult result = AssembleDeoptimizerCall(
-          deopt_state_id, bailout_type, current_source_position_);
+      CodeGenResult result =
+          AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
       if (result != kSuccess) return result;
       break;
     }
@@ -1838,21 +1853,31 @@
       __ Subsd(i.InputDoubleRegister(0), kScratchDoubleReg);
       break;
     case kX64Movsxbl:
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       ASSEMBLE_MOVX(movsxbl);
       __ AssertZeroExtended(i.OutputRegister());
       break;
     case kX64Movzxbl:
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       ASSEMBLE_MOVX(movzxbl);
       __ AssertZeroExtended(i.OutputRegister());
       break;
     case kX64Movsxbq:
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       ASSEMBLE_MOVX(movsxbq);
       break;
     case kX64Movzxbq:
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       ASSEMBLE_MOVX(movzxbq);
       __ AssertZeroExtended(i.OutputRegister());
       break;
     case kX64Movb: {
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       size_t index = 0;
       Operand operand = i.MemoryOperand(&index);
       if (HasImmediateInput(instr, index)) {
@@ -1863,21 +1888,31 @@
       break;
     }
     case kX64Movsxwl:
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       ASSEMBLE_MOVX(movsxwl);
       __ AssertZeroExtended(i.OutputRegister());
       break;
     case kX64Movzxwl:
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       ASSEMBLE_MOVX(movzxwl);
       __ AssertZeroExtended(i.OutputRegister());
       break;
     case kX64Movsxwq:
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       ASSEMBLE_MOVX(movsxwq);
       break;
     case kX64Movzxwq:
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       ASSEMBLE_MOVX(movzxwq);
       __ AssertZeroExtended(i.OutputRegister());
       break;
     case kX64Movw: {
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       size_t index = 0;
       Operand operand = i.MemoryOperand(&index);
       if (HasImmediateInput(instr, index)) {
@@ -1888,7 +1923,8 @@
       break;
     }
     case kX64Movl:
-    case kX64TrapMovl:
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       if (instr->HasOutput()) {
         if (instr->addressing_mode() == kMode_None) {
           if (instr->InputAt(0)->IsRegister()) {
@@ -1897,14 +1933,7 @@
             __ movl(i.OutputRegister(), i.InputOperand(0));
           }
         } else {
-          Address pc = __ pc();
           __ movl(i.OutputRegister(), i.MemoryOperand());
-
-          if (arch_opcode == kX64TrapMovl) {
-            bool frame_elided = !frame_access_state()->has_frame();
-            new (zone()) WasmOutOfLineTrap(this, pc, frame_elided,
-                                           i.InputRegister(2), i.InputInt32(3));
-          }
         }
         __ AssertZeroExtended(i.OutputRegister());
       } else {
@@ -1918,9 +1947,13 @@
       }
       break;
     case kX64Movsxlq:
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       ASSEMBLE_MOVX(movsxlq);
       break;
     case kX64Movq:
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       if (instr->HasOutput()) {
         __ movq(i.OutputRegister(), i.MemoryOperand());
       } else {
@@ -1934,6 +1967,8 @@
       }
       break;
     case kX64Movss:
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       if (instr->HasOutput()) {
         __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
       } else {
@@ -1943,6 +1978,8 @@
       }
       break;
     case kX64Movsd:
+      EmitOOLTrapIfNeeded(zone(), this, opcode, instr->InputCount(), i,
+                          __ pc_offset(), instr);
       if (instr->HasOutput()) {
         __ Movsd(i.OutputDoubleRegister(), i.MemoryOperand());
       } else {
@@ -2059,30 +2096,35 @@
       __ incl(i.OutputRegister());
       break;
     case kX64Push:
-      if (HasImmediateInput(instr, 0)) {
+      if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
+        size_t index = 0;
+        Operand operand = i.MemoryOperand(&index);
+        __ pushq(operand);
+        frame_access_state()->IncreaseSPDelta(1);
+        unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+                                                         kPointerSize);
+      } else if (HasImmediateInput(instr, 0)) {
         __ pushq(i.InputImmediate(0));
         frame_access_state()->IncreaseSPDelta(1);
         unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
                                                          kPointerSize);
+      } else if (instr->InputAt(0)->IsRegister()) {
+        __ pushq(i.InputRegister(0));
+        frame_access_state()->IncreaseSPDelta(1);
+        unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+                                                         kPointerSize);
+      } else if (instr->InputAt(0)->IsFPRegister()) {
+        // TODO(titzer): use another machine instruction?
+        __ subq(rsp, Immediate(kDoubleSize));
+        frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
+        unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+                                                         kDoubleSize);
+        __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
       } else {
-        if (instr->InputAt(0)->IsRegister()) {
-          __ pushq(i.InputRegister(0));
-          frame_access_state()->IncreaseSPDelta(1);
-          unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
-                                                           kPointerSize);
-        } else if (instr->InputAt(0)->IsFPRegister()) {
-          // TODO(titzer): use another machine instruction?
-          __ subq(rsp, Immediate(kDoubleSize));
-          frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
-          unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
-                                                           kDoubleSize);
-          __ Movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
-        } else {
-          __ pushq(i.InputOperand(0));
-          frame_access_state()->IncreaseSPDelta(1);
-          unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
-                                                           kPointerSize);
-        }
+        __ pushq(i.InputOperand(0));
+        frame_access_state()->IncreaseSPDelta(1);
+        unwinding_info_writer_.MaybeIncreaseBaseOffsetAt(__ pc_offset(),
+                                                         kPointerSize);
       }
       break;
     case kX64Poke: {
@@ -2124,6 +2166,26 @@
       __ Pextrd(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
       break;
     }
+    case kX64Int32x4ReplaceLane: {
+      CpuFeatureScope sse_scope(masm(), SSE4_1);
+      if (instr->InputAt(2)->IsRegister()) {
+        __ Pinsrd(i.OutputSimd128Register(), i.InputRegister(2),
+                  i.InputInt8(1));
+      } else {
+        __ Pinsrd(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
+      }
+      break;
+    }
+    case kX64Int32x4Add: {
+      CpuFeatureScope sse_scope(masm(), SSE4_1);
+      __ paddd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+      break;
+    }
+    case kX64Int32x4Sub: {
+      CpuFeatureScope sse_scope(masm(), SSE4_1);
+      __ psubd(i.OutputSimd128Register(), i.InputSimd128Register(1));
+      break;
+    }
     case kCheckedLoadInt8:
       ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
       break;
@@ -2183,61 +2245,58 @@
   return kSuccess;
 }  // NOLINT(readability/fn_size)
 
+namespace {
+
+Condition FlagsConditionToCondition(FlagsCondition condition) {
+  switch (condition) {
+    case kUnorderedEqual:
+    case kEqual:
+      return equal;
+    case kUnorderedNotEqual:
+    case kNotEqual:
+      return not_equal;
+    case kSignedLessThan:
+      return less;
+    case kSignedGreaterThanOrEqual:
+      return greater_equal;
+    case kSignedLessThanOrEqual:
+      return less_equal;
+    case kSignedGreaterThan:
+      return greater;
+    case kUnsignedLessThan:
+      return below;
+    case kUnsignedGreaterThanOrEqual:
+      return above_equal;
+    case kUnsignedLessThanOrEqual:
+      return below_equal;
+    case kUnsignedGreaterThan:
+      return above;
+    case kOverflow:
+      return overflow;
+    case kNotOverflow:
+      return no_overflow;
+    default:
+      break;
+  }
+  UNREACHABLE();
+  return no_condition;
+}
+
+}  // namespace
 
 // Assembles branches after this instruction.
 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
-  X64OperandConverter i(this, instr);
   Label::Distance flabel_distance =
       branch->fallthru ? Label::kNear : Label::kFar;
   Label* tlabel = branch->true_label;
   Label* flabel = branch->false_label;
-  switch (branch->condition) {
-    case kUnorderedEqual:
-      __ j(parity_even, flabel, flabel_distance);
-    // Fall through.
-    case kEqual:
-      __ j(equal, tlabel);
-      break;
-    case kUnorderedNotEqual:
-      __ j(parity_even, tlabel);
-    // Fall through.
-    case kNotEqual:
-      __ j(not_equal, tlabel);
-      break;
-    case kSignedLessThan:
-      __ j(less, tlabel);
-      break;
-    case kSignedGreaterThanOrEqual:
-      __ j(greater_equal, tlabel);
-      break;
-    case kSignedLessThanOrEqual:
-      __ j(less_equal, tlabel);
-      break;
-    case kSignedGreaterThan:
-      __ j(greater, tlabel);
-      break;
-    case kUnsignedLessThan:
-      __ j(below, tlabel);
-      break;
-    case kUnsignedGreaterThanOrEqual:
-      __ j(above_equal, tlabel);
-      break;
-    case kUnsignedLessThanOrEqual:
-      __ j(below_equal, tlabel);
-      break;
-    case kUnsignedGreaterThan:
-      __ j(above, tlabel);
-      break;
-    case kOverflow:
-      __ j(overflow, tlabel);
-      break;
-    case kNotOverflow:
-      __ j(no_overflow, tlabel);
-      break;
-    default:
-      UNREACHABLE();
-      break;
+  if (branch->condition == kUnorderedEqual) {
+    __ j(parity_even, flabel, flabel_distance);
+  } else if (branch->condition == kUnorderedNotEqual) {
+    __ j(parity_even, tlabel);
   }
+  __ j(FlagsConditionToCondition(branch->condition), tlabel);
+
   if (!branch->fallthru) __ jmp(flabel, flabel_distance);
 }
 
@@ -2246,6 +2305,73 @@
   if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
 }
 
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+                                     FlagsCondition condition) {
+  class OutOfLineTrap final : public OutOfLineCode {
+   public:
+    OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+        : OutOfLineCode(gen),
+          frame_elided_(frame_elided),
+          instr_(instr),
+          gen_(gen) {}
+
+    void Generate() final {
+      X64OperandConverter i(gen_, instr_);
+
+      Builtins::Name trap_id =
+          static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
+      bool old_has_frame = __ has_frame();
+      if (frame_elided_) {
+        __ set_has_frame(true);
+        __ EnterFrame(StackFrame::WASM_COMPILED);
+      }
+      GenerateCallToTrap(trap_id);
+      if (frame_elided_) {
+        __ set_has_frame(old_has_frame);
+      }
+    }
+
+   private:
+    void GenerateCallToTrap(Builtins::Name trap_id) {
+      if (trap_id == Builtins::builtin_count) {
+        // We cannot test calls to the runtime in cctest/test-run-wasm.
+        // Therefore we emit a call to C here instead of a call to the runtime.
+        __ PrepareCallCFunction(0);
+        __ CallCFunction(
+            ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+            0);
+        __ LeaveFrame(StackFrame::WASM_COMPILED);
+        __ Ret();
+      } else {
+        gen_->AssembleSourcePosition(instr_);
+        __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
+                RelocInfo::CODE_TARGET);
+        ReferenceMap* reference_map =
+            new (gen_->zone()) ReferenceMap(gen_->zone());
+        gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+                              Safepoint::kNoLazyDeopt);
+        if (FLAG_debug_code) {
+          __ ud2();
+        }
+      }
+    }
+
+    bool frame_elided_;
+    Instruction* instr_;
+    CodeGenerator* gen_;
+  };
+  bool frame_elided = !frame_access_state()->has_frame();
+  auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+  Label* tlabel = ool->entry();
+  Label end;
+  if (condition == kUnorderedEqual) {
+    __ j(parity_even, &end);
+  } else if (condition == kUnorderedNotEqual) {
+    __ j(parity_even, tlabel);
+  }
+  __ j(FlagsConditionToCondition(condition), tlabel);
+  __ bind(&end);
+}
 
 // Assembles boolean materializations after this instruction.
 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -2258,60 +2384,17 @@
   Label check;
   DCHECK_NE(0u, instr->OutputCount());
   Register reg = i.OutputRegister(instr->OutputCount() - 1);
-  Condition cc = no_condition;
-  switch (condition) {
-    case kUnorderedEqual:
-      __ j(parity_odd, &check, Label::kNear);
-      __ movl(reg, Immediate(0));
-      __ jmp(&done, Label::kNear);
-    // Fall through.
-    case kEqual:
-      cc = equal;
-      break;
-    case kUnorderedNotEqual:
-      __ j(parity_odd, &check, Label::kNear);
-      __ movl(reg, Immediate(1));
-      __ jmp(&done, Label::kNear);
-    // Fall through.
-    case kNotEqual:
-      cc = not_equal;
-      break;
-    case kSignedLessThan:
-      cc = less;
-      break;
-    case kSignedGreaterThanOrEqual:
-      cc = greater_equal;
-      break;
-    case kSignedLessThanOrEqual:
-      cc = less_equal;
-      break;
-    case kSignedGreaterThan:
-      cc = greater;
-      break;
-    case kUnsignedLessThan:
-      cc = below;
-      break;
-    case kUnsignedGreaterThanOrEqual:
-      cc = above_equal;
-      break;
-    case kUnsignedLessThanOrEqual:
-      cc = below_equal;
-      break;
-    case kUnsignedGreaterThan:
-      cc = above;
-      break;
-    case kOverflow:
-      cc = overflow;
-      break;
-    case kNotOverflow:
-      cc = no_overflow;
-      break;
-    default:
-      UNREACHABLE();
-      break;
+  if (condition == kUnorderedEqual) {
+    __ j(parity_odd, &check, Label::kNear);
+    __ movl(reg, Immediate(0));
+    __ jmp(&done, Label::kNear);
+  } else if (condition == kUnorderedNotEqual) {
+    __ j(parity_odd, &check, Label::kNear);
+    __ movl(reg, Immediate(1));
+    __ jmp(&done, Label::kNear);
   }
   __ bind(&check);
-  __ setcc(cc, reg);
+  __ setcc(FlagsConditionToCondition(condition), reg);
   __ movzxbl(reg, reg);
   __ bind(&done);
 }
@@ -2344,13 +2427,16 @@
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, Deoptimizer::BailoutType bailout_type,
-    SourcePosition pos) {
+    int deoptimization_id, SourcePosition pos) {
+  DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
+  DeoptimizeReason deoptimization_reason =
+      GetDeoptimizationReason(deoptimization_id);
+  Deoptimizer::BailoutType bailout_type =
+      deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
+                                                   : Deoptimizer::EAGER;
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
-  DeoptimizeReason deoptimization_reason =
-      GetDeoptimizationReason(deoptimization_id);
   __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
@@ -2555,8 +2641,7 @@
                                                : kScratchRegister;
       switch (src.type()) {
         case Constant::kInt32: {
-          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
-              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+          if (RelocInfo::IsWasmPtrReference(src.rmode())) {
             __ movq(dst, src.ToInt64(), src.rmode());
           } else {
             // TODO(dcarney): don't need scratch in this case.
@@ -2564,7 +2649,7 @@
             if (value == 0) {
               __ xorl(dst, dst);
             } else {
-              if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
+              if (RelocInfo::IsWasmSizeReference(src.rmode())) {
                 __ movl(dst, Immediate(value, src.rmode()));
               } else {
                 __ movl(dst, Immediate(value));
@@ -2574,11 +2659,10 @@
           break;
         }
         case Constant::kInt64:
-          if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
-              src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
+          if (RelocInfo::IsWasmPtrReference(src.rmode())) {
             __ movq(dst, src.ToInt64(), src.rmode());
           } else {
-            DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
+            DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
             __ Set(dst, src.ToInt64());
           }
           break;
diff --git a/src/compiler/x64/instruction-codes-x64.h b/src/compiler/x64/instruction-codes-x64.h
index 35acec0..aad1727 100644
--- a/src/compiler/x64/instruction-codes-x64.h
+++ b/src/compiler/x64/instruction-codes-x64.h
@@ -128,7 +128,6 @@
   V(X64Movzxwq)                    \
   V(X64Movw)                       \
   V(X64Movl)                       \
-  V(X64TrapMovl)                   \
   V(X64Movsxlq)                    \
   V(X64Movq)                       \
   V(X64Movsd)                      \
@@ -148,7 +147,10 @@
   V(X64Xchgw)                      \
   V(X64Xchgl)                      \
   V(X64Int32x4Create)              \
-  V(X64Int32x4ExtractLane)
+  V(X64Int32x4ExtractLane)         \
+  V(X64Int32x4ReplaceLane)         \
+  V(X64Int32x4Add)                 \
+  V(X64Int32x4Sub)
 
 // Addressing modes represent the "shape" of inputs to an instruction.
 // Many instructions support multiple addressing modes. Addressing modes
@@ -183,6 +185,8 @@
   V(M8I)  /* [      %r2*8 + K] */      \
   V(Root) /* [%root       + K] */
 
+enum X64MemoryProtection { kUnprotected = 0, kProtected = 1 };
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/x64/instruction-scheduler-x64.cc b/src/compiler/x64/instruction-scheduler-x64.cc
index ef0c3ad..427e580 100644
--- a/src/compiler/x64/instruction-scheduler-x64.cc
+++ b/src/compiler/x64/instruction-scheduler-x64.cc
@@ -125,6 +125,9 @@
     case kX64Inc32:
     case kX64Int32x4Create:
     case kX64Int32x4ExtractLane:
+    case kX64Int32x4ReplaceLane:
+    case kX64Int32x4Add:
+    case kX64Int32x4Sub:
       return (instr->addressing_mode() == kMode_None)
           ? kNoOpcodeFlags
           : kIsLoadOperation | kHasSideEffect;
@@ -155,7 +158,6 @@
       return kHasSideEffect;
 
     case kX64Movl:
-    case kX64TrapMovl:
       if (instr->HasOutput()) {
         DCHECK(instr->InputCount() >= 1);
         return instr->InputAt(0)->IsRegister() ? kNoOpcodeFlags
diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc
index 878e778..7abdd90 100644
--- a/src/compiler/x64/instruction-selector-x64.cc
+++ b/src/compiler/x64/instruction-selector-x64.cc
@@ -58,6 +58,7 @@
     MachineRepresentation rep =
         LoadRepresentationOf(input->op()).representation();
     switch (opcode) {
+      case kX64Push:
       case kX64Cmp:
       case kX64Test:
         return rep == MachineRepresentation::kWord64 || IsAnyTagged(rep);
@@ -82,6 +83,15 @@
                                              InstructionOperand inputs[],
                                              size_t* input_count) {
     AddressingMode mode = kMode_MRI;
+    if (base != nullptr && (index != nullptr || displacement != nullptr)) {
+      if (base->opcode() == IrOpcode::kInt32Constant &&
+          OpParameter<int32_t>(base) == 0) {
+        base = nullptr;
+      } else if (base->opcode() == IrOpcode::kInt64Constant &&
+                 OpParameter<int64_t>(base) == 0) {
+        base = nullptr;
+      }
+    }
     if (base != nullptr) {
       inputs[(*input_count)++] = UseRegister(base);
       if (index != nullptr) {
@@ -110,17 +120,22 @@
         }
       }
     } else {
-      DCHECK_NOT_NULL(index);
       DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
-      inputs[(*input_count)++] = UseRegister(index);
       if (displacement != nullptr) {
-        inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
-                                       ? UseNegatedImmediate(displacement)
-                                       : UseImmediate(displacement);
-        static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
-                                                    kMode_M4I, kMode_M8I};
-        mode = kMnI_modes[scale_exponent];
+        if (index == nullptr) {
+          inputs[(*input_count)++] = UseRegister(displacement);
+          mode = kMode_MR;
+        } else {
+          inputs[(*input_count)++] = UseRegister(index);
+          inputs[(*input_count)++] = displacement_mode == kNegativeDisplacement
+                                         ? UseNegatedImmediate(displacement)
+                                         : UseImmediate(displacement);
+          static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
+                                                      kMode_M4I, kMode_M8I};
+          mode = kMnI_modes[scale_exponent];
+        }
       } else {
+        inputs[(*input_count)++] = UseRegister(index);
         static const AddressingMode kMn_modes[] = {kMode_MR, kMode_MR1,
                                                    kMode_M4, kMode_M8};
         mode = kMn_modes[scale_exponent];
@@ -154,10 +169,18 @@
     }
     BaseWithIndexAndDisplacement64Matcher m(operand, AddressOption::kAllowAll);
     DCHECK(m.matches());
-    if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
+    if (m.displacement() == nullptr || CanBeImmediate(m.displacement())) {
       return GenerateMemoryOperandInputs(
           m.index(), m.scale(), m.base(), m.displacement(),
           m.displacement_mode(), inputs, input_count);
+    } else if (m.base() == nullptr &&
+               m.displacement_mode() == kPositiveDisplacement) {
+      // The displacement cannot be an immediate, but we can use the
+      // displacement as base instead and still benefit from addressing
+      // modes for the scale.
+      return GenerateMemoryOperandInputs(m.index(), m.scale(), m.displacement(),
+                                         nullptr, m.displacement_mode(), inputs,
+                                         input_count);
     } else {
       inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
       inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
@@ -171,7 +194,6 @@
 };
 
 namespace {
-
 ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
   ArchOpcode opcode = kArchNop;
   switch (load_rep.representation()) {
@@ -198,6 +220,9 @@
       opcode = kX64Movq;
       break;
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       break;
@@ -205,6 +230,42 @@
   return opcode;
 }
 
+ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
+  switch (store_rep.representation()) {
+    case MachineRepresentation::kFloat32:
+      return kX64Movss;
+      break;
+    case MachineRepresentation::kFloat64:
+      return kX64Movsd;
+      break;
+    case MachineRepresentation::kBit:  // Fall through.
+    case MachineRepresentation::kWord8:
+      return kX64Movb;
+      break;
+    case MachineRepresentation::kWord16:
+      return kX64Movw;
+      break;
+    case MachineRepresentation::kWord32:
+      return kX64Movl;
+      break;
+    case MachineRepresentation::kTaggedSigned:   // Fall through.
+    case MachineRepresentation::kTaggedPointer:  // Fall through.
+    case MachineRepresentation::kTagged:         // Fall through.
+    case MachineRepresentation::kWord64:
+      return kX64Movq;
+      break;
+    case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
+    case MachineRepresentation::kNone:
+      UNREACHABLE();
+      return kArchNop;
+  }
+  UNREACHABLE();
+  return kArchNop;
+}
+
 }  // namespace
 
 void InstructionSelector::VisitLoad(Node* node) {
@@ -214,33 +275,21 @@
   ArchOpcode opcode = GetLoadOpcode(load_rep);
   InstructionOperand outputs[1];
   outputs[0] = g.DefineAsRegister(node);
-  InstructionOperand inputs[3];
-  size_t input_count = 0;
-  AddressingMode mode =
-      g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
-  InstructionCode code = opcode | AddressingModeField::encode(mode);
-  Emit(code, 1, outputs, input_count, inputs);
-}
-
-void InstructionSelector::VisitProtectedLoad(Node* node) {
-  LoadRepresentation load_rep = LoadRepresentationOf(node->op());
-  X64OperandGenerator g(this);
-
-  ArchOpcode opcode = GetLoadOpcode(load_rep);
-  InstructionOperand outputs[1];
-  outputs[0] = g.DefineAsRegister(node);
   InstructionOperand inputs[4];
   size_t input_count = 0;
   AddressingMode mode =
       g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
-  // Add the context parameter as an input.
-  inputs[input_count++] = g.UseUniqueRegister(node->InputAt(2));
-  // Add the source position as an input
-  inputs[input_count++] = g.UseImmediate(node->InputAt(3));
   InstructionCode code = opcode | AddressingModeField::encode(mode);
+  if (node->opcode() == IrOpcode::kProtectedLoad) {
+    code |= MiscField::encode(X64MemoryProtection::kProtected);
+    // Add the source position as an input
+    inputs[input_count++] = g.UseImmediate(node->InputAt(2));
+  }
   Emit(code, 1, outputs, input_count, inputs);
 }
 
+void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
+
 void InstructionSelector::VisitStore(Node* node) {
   X64OperandGenerator g(this);
   Node* base = node->InputAt(0);
@@ -249,10 +298,9 @@
 
   StoreRepresentation store_rep = StoreRepresentationOf(node->op());
   WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
-  MachineRepresentation rep = store_rep.representation();
 
   if (write_barrier_kind != kNoWriteBarrier) {
-    DCHECK(CanBeTaggedPointer(rep));
+    DCHECK(CanBeTaggedPointer(store_rep.representation()));
     AddressingMode addressing_mode;
     InstructionOperand inputs[3];
     size_t input_count = 0;
@@ -287,41 +335,18 @@
     code |= MiscField::encode(static_cast<int>(record_write_mode));
     Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
   } else {
-    ArchOpcode opcode = kArchNop;
-    switch (rep) {
-      case MachineRepresentation::kFloat32:
-        opcode = kX64Movss;
-        break;
-      case MachineRepresentation::kFloat64:
-        opcode = kX64Movsd;
-        break;
-      case MachineRepresentation::kBit:  // Fall through.
-      case MachineRepresentation::kWord8:
-        opcode = kX64Movb;
-        break;
-      case MachineRepresentation::kWord16:
-        opcode = kX64Movw;
-        break;
-      case MachineRepresentation::kWord32:
-        opcode = kX64Movl;
-        break;
-      case MachineRepresentation::kTaggedSigned:   // Fall through.
-      case MachineRepresentation::kTaggedPointer:  // Fall through.
-      case MachineRepresentation::kTagged:  // Fall through.
-      case MachineRepresentation::kWord64:
-        opcode = kX64Movq;
-        break;
-      case MachineRepresentation::kSimd128:  // Fall through.
-      case MachineRepresentation::kNone:
-        UNREACHABLE();
-        return;
-    }
+    ArchOpcode opcode = GetStoreOpcode(store_rep);
     InstructionOperand inputs[4];
     size_t input_count = 0;
     AddressingMode addressing_mode =
         g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
     InstructionCode code =
         opcode | AddressingModeField::encode(addressing_mode);
+    if ((ElementSizeLog2Of(store_rep.representation()) < kPointerSizeLog2) &&
+        (value->opcode() == IrOpcode::kTruncateInt64ToInt32) &&
+        CanCover(node, value)) {
+      value = value->InputAt(0);
+    }
     InstructionOperand value_operand =
         g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
     inputs[input_count++] = value_operand;
@@ -330,6 +355,27 @@
   }
 }
 
+void InstructionSelector::VisitProtectedStore(Node* node) {
+  X64OperandGenerator g(this);
+  Node* value = node->InputAt(2);
+  Node* position = node->InputAt(3);
+
+  StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+
+  ArchOpcode opcode = GetStoreOpcode(store_rep);
+  InstructionOperand inputs[5];
+  size_t input_count = 0;
+  AddressingMode addressing_mode =
+      g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+  InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+                         MiscField::encode(X64MemoryProtection::kProtected);
+  InstructionOperand value_operand =
+      g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
+  inputs[input_count++] = value_operand;
+  inputs[input_count++] = g.UseImmediate(position);
+  Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count, inputs);
+}
+
 // Architecture supports unaligned access, therefore VisitLoad is used instead
 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
 
@@ -364,6 +410,9 @@
       break;
     case MachineRepresentation::kBit:      // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:       // Fall through.
     case MachineRepresentation::kTaggedSigned:   // Fall through.
     case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:   // Fall through.
@@ -419,6 +468,9 @@
       break;
     case MachineRepresentation::kBit:      // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:       // Fall through.
     case MachineRepresentation::kTaggedSigned:   // Fall through.
     case MachineRepresentation::kTaggedPointer:  // Fall through.
     case MachineRepresentation::kTagged:   // Fall through.
@@ -502,7 +554,7 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->reason(), cont->frame_state());
+                             cont->kind(), cont->reason(), cont->frame_state());
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -796,31 +848,6 @@
   VisitWord64Shift(this, node, kX64Ror);
 }
 
-
-void InstructionSelector::VisitWord64Clz(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kX64Lzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitWord32Clz(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kX64Lzcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitWord64Ctz(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kX64Tzcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitWord32Ctz(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kX64Tzcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
 void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
 
 
@@ -830,18 +857,6 @@
 
 void InstructionSelector::VisitWord32ReverseBytes(Node* node) { UNREACHABLE(); }
 
-void InstructionSelector::VisitWord32Popcnt(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kX64Popcnt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitWord64Popcnt(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kX64Popcnt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
 void InstructionSelector::VisitInt32Add(Node* node) {
   X64OperandGenerator g(this);
 
@@ -1064,55 +1079,6 @@
   VisitMulHigh(this, node, kX64UmulHigh32);
 }
 
-
-void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEFloat32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEFloat64ToUint32 | MiscField::encode(1), g.DefineAsRegister(node),
-       g.Use(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEFloat64ToUint32 | MiscField::encode(0), g.DefineAsRegister(node),
-       g.Use(node->InputAt(0)));
-}
-
-void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEFloat32ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEFloat32ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
 void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
   X64OperandGenerator g(this);
   InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
@@ -1330,16 +1296,65 @@
 
 }  // namespace
 
+#define RO_OP_LIST(V)                                                    \
+  V(Word64Clz, kX64Lzcnt)                                                \
+  V(Word32Clz, kX64Lzcnt32)                                              \
+  V(Word64Ctz, kX64Tzcnt)                                                \
+  V(Word32Ctz, kX64Tzcnt32)                                              \
+  V(Word64Popcnt, kX64Popcnt)                                            \
+  V(Word32Popcnt, kX64Popcnt32)                                          \
+  V(Float64Sqrt, kSSEFloat64Sqrt)                                        \
+  V(Float32Sqrt, kSSEFloat32Sqrt)                                        \
+  V(ChangeFloat64ToInt32, kSSEFloat64ToInt32)                            \
+  V(ChangeFloat64ToUint32, kSSEFloat64ToUint32 | MiscField::encode(1))   \
+  V(TruncateFloat64ToUint32, kSSEFloat64ToUint32 | MiscField::encode(0)) \
+  V(TruncateFloat64ToFloat32, kSSEFloat64ToFloat32)                      \
+  V(ChangeFloat32ToFloat64, kSSEFloat32ToFloat64)                        \
+  V(TruncateFloat32ToInt32, kSSEFloat32ToInt32)                          \
+  V(TruncateFloat32ToUint32, kSSEFloat32ToUint32)                        \
+  V(ChangeInt32ToFloat64, kSSEInt32ToFloat64)                            \
+  V(ChangeUint32ToFloat64, kSSEUint32ToFloat64)                          \
+  V(RoundFloat64ToInt32, kSSEFloat64ToInt32)                             \
+  V(RoundInt32ToFloat32, kSSEInt32ToFloat32)                             \
+  V(RoundInt64ToFloat32, kSSEInt64ToFloat32)                             \
+  V(RoundInt64ToFloat64, kSSEInt64ToFloat64)                             \
+  V(RoundUint32ToFloat32, kSSEUint32ToFloat32)                           \
+  V(BitcastFloat32ToInt32, kX64BitcastFI)                                \
+  V(BitcastFloat64ToInt64, kX64BitcastDL)                                \
+  V(BitcastInt32ToFloat32, kX64BitcastIF)                                \
+  V(BitcastInt64ToFloat64, kX64BitcastLD)                                \
+  V(Float64ExtractLowWord32, kSSEFloat64ExtractLowWord32)                \
+  V(Float64ExtractHighWord32, kSSEFloat64ExtractHighWord32)
 
-void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
-  VisitRO(this, node, kSSEFloat64ToFloat32);
-}
+#define RR_OP_LIST(V)                                                         \
+  V(Float32RoundDown, kSSEFloat32Round | MiscField::encode(kRoundDown))       \
+  V(Float64RoundDown, kSSEFloat64Round | MiscField::encode(kRoundDown))       \
+  V(Float32RoundUp, kSSEFloat32Round | MiscField::encode(kRoundUp))           \
+  V(Float64RoundUp, kSSEFloat64Round | MiscField::encode(kRoundUp))           \
+  V(Float32RoundTruncate, kSSEFloat32Round | MiscField::encode(kRoundToZero)) \
+  V(Float64RoundTruncate, kSSEFloat64Round | MiscField::encode(kRoundToZero)) \
+  V(Float32RoundTiesEven,                                                     \
+    kSSEFloat32Round | MiscField::encode(kRoundToNearest))                    \
+  V(Float64RoundTiesEven, kSSEFloat64Round | MiscField::encode(kRoundToNearest))
+
+#define RO_VISITOR(Name, opcode)                      \
+  void InstructionSelector::Visit##Name(Node* node) { \
+    VisitRO(this, node, opcode);                      \
+  }
+RO_OP_LIST(RO_VISITOR)
+#undef RO_VISITOR
+
+#define RR_VISITOR(Name, opcode)                      \
+  void InstructionSelector::Visit##Name(Node* node) { \
+    VisitRR(this, node, opcode);                      \
+  }
+RR_OP_LIST(RR_VISITOR)
+#undef RR_VISITOR
 
 void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
   VisitRR(this, node, kArchTruncateDoubleToI);
 }
 
-
 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
   X64OperandGenerator g(this);
   Node* value = node->InputAt(0);
@@ -1365,34 +1380,6 @@
   Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
 }
 
-void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
-  VisitRO(this, node, kSSEFloat64ToInt32);
-}
-
-void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEInt32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEInt64ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEInt64ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEUint32ToFloat32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
 void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
   X64OperandGenerator g(this);
   InstructionOperand temps[] = {g.TempRegister()};
@@ -1408,31 +1395,6 @@
        arraysize(temps), temps);
 }
 
-
-void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kX64BitcastFI, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kX64BitcastDL, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kX64BitcastIF, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kX64BitcastLD, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
-}
-
-
 void InstructionSelector::VisitFloat32Add(Node* node) {
   VisitFloatBinop(this, node, kAVXFloat32Add, kSSEFloat32Add);
 }
@@ -1457,10 +1419,6 @@
 }
 
 
-void InstructionSelector::VisitFloat32Sqrt(Node* node) {
-  VisitRO(this, node, kSSEFloat32Sqrt);
-}
-
 void InstructionSelector::VisitFloat32Max(Node* node) {
   VisitRRO(this, node, kSSEFloat32Max);
 }
@@ -1511,55 +1469,12 @@
   VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
 }
 
-void InstructionSelector::VisitFloat64Sqrt(Node* node) {
-  VisitRO(this, node, kSSEFloat64Sqrt);
-}
-
-
-void InstructionSelector::VisitFloat32RoundDown(Node* node) {
-  VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundDown));
-}
-
-
-void InstructionSelector::VisitFloat64RoundDown(Node* node) {
-  VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundDown));
-}
-
-
-void InstructionSelector::VisitFloat32RoundUp(Node* node) {
-  VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundUp));
-}
-
-
-void InstructionSelector::VisitFloat64RoundUp(Node* node) {
-  VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundUp));
-}
-
-
-void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
-  VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToZero));
-}
-
-
-void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
-  VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToZero));
-}
-
 
 void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
   UNREACHABLE();
 }
 
 
-void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
-  VisitRR(this, node, kSSEFloat32Round | MiscField::encode(kRoundToNearest));
-}
-
-
-void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
-  VisitRR(this, node, kSSEFloat64Round | MiscField::encode(kRoundToNearest));
-}
-
 void InstructionSelector::VisitFloat32Neg(Node* node) {
   VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Neg, kSSEFloat32Neg);
 }
@@ -1607,17 +1522,29 @@
     }
   } else {
     // Push any stack arguments.
+    int effect_level = GetEffectLevel(node);
     for (PushParameter input : base::Reversed(*arguments)) {
-      // TODO(titzer): X64Push cannot handle stack->stack double moves
-      // because there is no way to encode fixed double slots.
-      InstructionOperand value =
-          g.CanBeImmediate(input.node())
-              ? g.UseImmediate(input.node())
-              : IsSupported(ATOM) ||
-                        sequence()->IsFP(GetVirtualRegister(input.node()))
-                    ? g.UseRegister(input.node())
-                    : g.Use(input.node());
-      Emit(kX64Push, g.NoOutput(), value);
+      Node* input_node = input.node();
+      if (g.CanBeImmediate(input_node)) {
+        Emit(kX64Push, g.NoOutput(), g.UseImmediate(input_node));
+      } else if (IsSupported(ATOM) ||
+                 sequence()->IsFP(GetVirtualRegister(input_node))) {
+        // TODO(titzer): X64Push cannot handle stack->stack double moves
+        // because there is no way to encode fixed double slots.
+        Emit(kX64Push, g.NoOutput(), g.UseRegister(input_node));
+      } else if (g.CanBeMemoryOperand(kX64Push, node, input_node,
+                                      effect_level)) {
+        InstructionOperand outputs[1];
+        InstructionOperand inputs[4];
+        size_t input_count = 0;
+        InstructionCode opcode = kX64Push;
+        AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
+            input_node, inputs, &input_count);
+        opcode |= AddressingModeField::encode(mode);
+        Emit(opcode, 0, outputs, input_count, inputs);
+      } else {
+        Emit(kX64Push, g.NoOutput(), g.Use(input_node));
+      }
     }
   }
 }
@@ -1649,11 +1576,14 @@
     selector->Emit(opcode, 0, nullptr, input_count, inputs);
   } else if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
-                             cont->reason(), cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     InstructionOperand output = g.DefineAsRegister(cont->result());
     selector->Emit(opcode, 1, &output, input_count, inputs);
+  } else {
+    DCHECK(cont->IsTrap());
+    inputs[input_count++] = g.UseImmediate(cont->trap_id());
+    selector->Emit(opcode, 0, nullptr, input_count, inputs);
   }
 }
 
@@ -1667,11 +1597,14 @@
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
-    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
-                             cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
+                             cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(opcode, g.NoOutput(), left, right,
+                   g.UseImmediate(cont->trap_id()));
   }
 }
 
@@ -1687,21 +1620,54 @@
   VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
 }
 
+MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
+  if (hint_node->opcode() == IrOpcode::kLoad) {
+    MachineType hint = LoadRepresentationOf(hint_node->op());
+    if (node->opcode() == IrOpcode::kInt32Constant ||
+        node->opcode() == IrOpcode::kInt64Constant) {
+      int64_t constant = node->opcode() == IrOpcode::kInt32Constant
+                             ? OpParameter<int32_t>(node)
+                             : OpParameter<int64_t>(node);
+      if (hint == MachineType::Int8()) {
+        if (constant >= std::numeric_limits<int8_t>::min() &&
+            constant <= std::numeric_limits<int8_t>::max()) {
+          return hint;
+        }
+      } else if (hint == MachineType::Uint8()) {
+        if (constant >= std::numeric_limits<uint8_t>::min() &&
+            constant <= std::numeric_limits<uint8_t>::max()) {
+          return hint;
+        }
+      } else if (hint == MachineType::Int16()) {
+        if (constant >= std::numeric_limits<int16_t>::min() &&
+            constant <= std::numeric_limits<int16_t>::max()) {
+          return hint;
+        }
+      } else if (hint == MachineType::Uint16()) {
+        if (constant >= std::numeric_limits<uint16_t>::min() &&
+            constant <= std::numeric_limits<uint16_t>::max()) {
+          return hint;
+        }
+      } else if (hint == MachineType::Int32()) {
+        return hint;
+      } else if (hint == MachineType::Uint32()) {
+        if (constant >= 0) return hint;
+      }
+    }
+  }
+  return node->opcode() == IrOpcode::kLoad ? LoadRepresentationOf(node->op())
+                                           : MachineType::None();
+}
+
 // Tries to match the size of the given opcode to that of the operands, if
 // possible.
 InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
                                     Node* right, FlagsContinuation* cont) {
-  // Currently, if one of the two operands is not a Load, we don't know what its
-  // machine representation is, so we bail out.
-  // TODO(epertoso): we can probably get some size information out of immediates
-  // and phi nodes.
-  if (left->opcode() != IrOpcode::kLoad || right->opcode() != IrOpcode::kLoad) {
-    return opcode;
-  }
+  // TODO(epertoso): we can probably get some size information out phi nodes.
   // If the load representations don't match, both operands will be
   // zero/sign-extended to 32bit.
-  MachineType left_type = LoadRepresentationOf(left->op());
-  MachineType right_type = LoadRepresentationOf(right->op());
+  MachineType left_type = MachineTypeForNarrow(left, right);
+  MachineType right_type = MachineTypeForNarrow(right, left);
   if (left_type == right_type) {
     switch (left_type.representation()) {
       case MachineRepresentation::kBit:
@@ -1775,11 +1741,6 @@
                                          g.UseRegister(right), cont);
   }
 
-  if (g.CanBeBetterLeftOperand(right)) {
-    if (!node->op()->HasProperty(Operator::kCommutative)) cont->Commute();
-    std::swap(left, right);
-  }
-
   return VisitCompare(selector, opcode, left, right, cont,
                       node->op()->HasProperty(Operator::kCommutative));
 }
@@ -1824,11 +1785,13 @@
         selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
                        g.Label(cont->false_block()));
       } else if (cont->IsDeoptimize()) {
-        selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->reason(),
-                                 cont->frame_state());
-      } else {
-        DCHECK(cont->IsSet());
+        selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->kind(),
+                                 cont->reason(), cont->frame_state());
+      } else if (cont->IsSet()) {
         selector->Emit(opcode, g.DefineAsRegister(cont->result()));
+      } else {
+        DCHECK(cont->IsTrap());
+        selector->Emit(opcode, g.NoOutput(), g.UseImmediate(cont->trap_id()));
       }
       return;
     }
@@ -2001,12 +1964,8 @@
         break;
       case IrOpcode::kInt32Sub:
         return VisitWordCompare(selector, value, kX64Cmp32, cont);
-      case IrOpcode::kInt64Sub:
-        return VisitWord64Compare(selector, value, cont);
       case IrOpcode::kWord32And:
         return VisitWordCompare(selector, value, kX64Test32, cont);
-      case IrOpcode::kWord64And:
-        return VisitWordCompare(selector, value, kX64Test, cont);
       default:
         break;
     }
@@ -2025,14 +1984,29 @@
 }
 
 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kNotEqual, p.kind(), p.reason(), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kEqual, p.kind(), p.reason(), node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+                                          Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
@@ -2072,32 +2046,7 @@
   FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
   Int32BinopMatcher m(user);
   if (m.right().Is(0)) {
-    Node* value = m.left().node();
-
-    // Try to combine with comparisons against 0 by simply inverting the branch.
-    while (CanCover(user, value) && value->opcode() == IrOpcode::kWord32Equal) {
-      Int32BinopMatcher m(value);
-      if (m.right().Is(0)) {
-        user = value;
-        value = m.left().node();
-        cont.Negate();
-      } else {
-        break;
-      }
-    }
-
-    // Try to combine the branch with a comparison.
-    if (CanCover(user, value)) {
-      switch (value->opcode()) {
-        case IrOpcode::kInt32Sub:
-          return VisitWordCompare(this, value, kX64Cmp32, &cont);
-        case IrOpcode::kWord32And:
-          return VisitWordCompare(this, value, kX64Test32, &cont);
-        default:
-          break;
-      }
-    }
-    return VisitCompareZero(this, value, kX64Cmp32, &cont);
+    return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
   }
   VisitWordCompare(this, node, kX64Cmp32, &cont);
 }
@@ -2250,21 +2199,6 @@
   VisitFloat64Compare(this, node, &cont);
 }
 
-
-void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEFloat64ExtractLowWord32, g.DefineAsRegister(node),
-       g.Use(node->InputAt(0)));
-}
-
-
-void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
-  X64OperandGenerator g(this);
-  Emit(kSSEFloat64ExtractHighWord32, g.DefineAsRegister(node),
-       g.Use(node->InputAt(0)));
-}
-
-
 void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
   X64OperandGenerator g(this);
   Node* left = node->InputAt(0);
@@ -2347,8 +2281,29 @@
 
 void InstructionSelector::VisitInt32x4ExtractLane(Node* node) {
   X64OperandGenerator g(this);
+  int32_t lane = OpParameter<int32_t>(node);
   Emit(kX64Int32x4ExtractLane, g.DefineAsRegister(node),
-       g.UseRegister(node->InputAt(0)), g.UseImmediate(node->InputAt(1)));
+       g.UseRegister(node->InputAt(0)), g.UseImmediate(lane));
+}
+
+void InstructionSelector::VisitInt32x4ReplaceLane(Node* node) {
+  X64OperandGenerator g(this);
+  int32_t lane = OpParameter<int32_t>(node);
+  Emit(kX64Int32x4ReplaceLane, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseImmediate(lane),
+       g.Use(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitInt32x4Add(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kX64Int32x4Add, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitInt32x4Sub(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kX64Int32x4Sub, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
 }
 
 // static
diff --git a/src/compiler/x87/code-generator-x87.cc b/src/compiler/x87/code-generator-x87.cc
index d2f64e8..fc5992a 100644
--- a/src/compiler/x87/code-generator-x87.cc
+++ b/src/compiler/x87/code-generator-x87.cc
@@ -60,9 +60,7 @@
   Immediate ToImmediate(InstructionOperand* operand) {
     Constant constant = ToConstant(operand);
     if (constant.type() == Constant::kInt32 &&
-        (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
-         constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
-         constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE)) {
+        RelocInfo::IsWasmReference(constant.rmode())) {
       return Immediate(reinterpret_cast<Address>(constant.ToInt32()),
                        constant.rmode());
     }
@@ -738,10 +736,8 @@
       __ fild_s(MemOperand(esp, 0));
       __ lea(esp, Operand(esp, kPointerSize));
 
-      Deoptimizer::BailoutType bailout_type =
-          Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
-      CodeGenResult result = AssembleDeoptimizerCall(
-          deopt_state_id, bailout_type, current_source_position_);
+      CodeGenResult result =
+          AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
       if (result != kSuccess) return result;
       break;
     }
@@ -996,10 +992,10 @@
       } else {
         __ add(i.OutputRegister(0), i.InputRegister(2));
       }
-      __ adc(i.InputRegister(1), Operand(i.InputRegister(3)));
       if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
         __ Move(i.OutputRegister(1), i.InputRegister(1));
       }
+      __ adc(i.OutputRegister(1), Operand(i.InputRegister(3)));
       if (use_temp) {
         __ Move(i.OutputRegister(0), i.TempRegister(0));
       }
@@ -1021,10 +1017,10 @@
       } else {
         __ sub(i.OutputRegister(0), i.InputRegister(2));
       }
-      __ sbb(i.InputRegister(1), Operand(i.InputRegister(3)));
       if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
         __ Move(i.OutputRegister(1), i.InputRegister(1));
       }
+      __ sbb(i.OutputRegister(1), Operand(i.InputRegister(3)));
       if (use_temp) {
         __ Move(i.OutputRegister(0), i.TempRegister(0));
       }
@@ -2030,10 +2026,55 @@
   return kSuccess;
 }  // NOLINT(readability/fn_size)
 
+static Condition FlagsConditionToCondition(FlagsCondition condition) {
+  switch (condition) {
+    case kUnorderedEqual:
+    case kEqual:
+      return equal;
+      break;
+    case kUnorderedNotEqual:
+    case kNotEqual:
+      return not_equal;
+      break;
+    case kSignedLessThan:
+      return less;
+      break;
+    case kSignedGreaterThanOrEqual:
+      return greater_equal;
+      break;
+    case kSignedLessThanOrEqual:
+      return less_equal;
+      break;
+    case kSignedGreaterThan:
+      return greater;
+      break;
+    case kUnsignedLessThan:
+      return below;
+      break;
+    case kUnsignedGreaterThanOrEqual:
+      return above_equal;
+      break;
+    case kUnsignedLessThanOrEqual:
+      return below_equal;
+      break;
+    case kUnsignedGreaterThan:
+      return above;
+      break;
+    case kOverflow:
+      return overflow;
+      break;
+    case kNotOverflow:
+      return no_overflow;
+      break;
+    default:
+      UNREACHABLE();
+      return no_condition;
+      break;
+  }
+}
 
 // Assembles a branch after an instruction.
 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
-  X87OperandConverter i(this, instr);
   Label::Distance flabel_distance =
       branch->fallthru ? Label::kNear : Label::kFar;
 
@@ -2046,53 +2087,13 @@
   Label* tlabel_dst = branch->true_label;
   Label* flabel_dst = branch->false_label;
 
-  switch (branch->condition) {
-    case kUnorderedEqual:
-      __ j(parity_even, flabel, flabel_distance);
-    // Fall through.
-    case kEqual:
-      __ j(equal, tlabel);
-      break;
-    case kUnorderedNotEqual:
-      __ j(parity_even, tlabel);
-    // Fall through.
-    case kNotEqual:
-      __ j(not_equal, tlabel);
-      break;
-    case kSignedLessThan:
-      __ j(less, tlabel);
-      break;
-    case kSignedGreaterThanOrEqual:
-      __ j(greater_equal, tlabel);
-      break;
-    case kSignedLessThanOrEqual:
-      __ j(less_equal, tlabel);
-      break;
-    case kSignedGreaterThan:
-      __ j(greater, tlabel);
-      break;
-    case kUnsignedLessThan:
-      __ j(below, tlabel);
-      break;
-    case kUnsignedGreaterThanOrEqual:
-      __ j(above_equal, tlabel);
-      break;
-    case kUnsignedLessThanOrEqual:
-      __ j(below_equal, tlabel);
-      break;
-    case kUnsignedGreaterThan:
-      __ j(above, tlabel);
-      break;
-    case kOverflow:
-      __ j(overflow, tlabel);
-      break;
-    case kNotOverflow:
-      __ j(no_overflow, tlabel);
-      break;
-    default:
-      UNREACHABLE();
-      break;
+  if (branch->condition == kUnorderedEqual) {
+    __ j(parity_even, flabel, flabel_distance);
+  } else if (branch->condition == kUnorderedNotEqual) {
+    __ j(parity_even, tlabel);
   }
+  __ j(FlagsConditionToCondition(branch->condition), tlabel);
+
   // Add a jump if not falling through to the next block.
   if (!branch->fallthru) __ jmp(flabel);
 
@@ -2130,6 +2131,71 @@
   if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
 }
 
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+                                     FlagsCondition condition) {
+  class OutOfLineTrap final : public OutOfLineCode {
+   public:
+    OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
+        : OutOfLineCode(gen),
+          frame_elided_(frame_elided),
+          instr_(instr),
+          gen_(gen) {}
+
+    void Generate() final {
+      X87OperandConverter i(gen_, instr_);
+
+      Runtime::FunctionId trap_id = static_cast<Runtime::FunctionId>(
+          i.InputInt32(instr_->InputCount() - 1));
+      bool old_has_frame = __ has_frame();
+      if (frame_elided_) {
+        __ set_has_frame(true);
+        __ EnterFrame(StackFrame::WASM_COMPILED);
+      }
+      GenerateCallToTrap(trap_id);
+      if (frame_elided_) {
+        ReferenceMap* reference_map =
+            new (gen_->zone()) ReferenceMap(gen_->zone());
+        gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
+                              Safepoint::kNoLazyDeopt);
+        __ set_has_frame(old_has_frame);
+      }
+      if (FLAG_debug_code) {
+        __ ud2();
+      }
+    }
+
+   private:
+    void GenerateCallToTrap(Runtime::FunctionId trap_id) {
+      if (trap_id == Runtime::kNumFunctions) {
+        // We cannot test calls to the runtime in cctest/test-run-wasm.
+        // Therefore we emit a call to C here instead of a call to the runtime.
+        __ PrepareCallCFunction(0, esi);
+        __ CallCFunction(
+            ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
+            0);
+      } else {
+        __ Move(esi, isolate()->native_context());
+        gen_->AssembleSourcePosition(instr_);
+        __ CallRuntime(trap_id);
+      }
+    }
+
+    bool frame_elided_;
+    Instruction* instr_;
+    CodeGenerator* gen_;
+  };
+  bool frame_elided = !frame_access_state()->has_frame();
+  auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
+  Label* tlabel = ool->entry();
+  Label end;
+  if (condition == kUnorderedEqual) {
+    __ j(parity_even, &end);
+  } else if (condition == kUnorderedNotEqual) {
+    __ j(parity_even, tlabel);
+  }
+  __ j(FlagsConditionToCondition(condition), tlabel);
+  __ bind(&end);
+}
 
 // Assembles boolean materializations after an instruction.
 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
@@ -2142,58 +2208,17 @@
   Label check;
   DCHECK_NE(0u, instr->OutputCount());
   Register reg = i.OutputRegister(instr->OutputCount() - 1);
-  Condition cc = no_condition;
-  switch (condition) {
-    case kUnorderedEqual:
-      __ j(parity_odd, &check, Label::kNear);
-      __ Move(reg, Immediate(0));
-      __ jmp(&done, Label::kNear);
-    // Fall through.
-    case kEqual:
-      cc = equal;
-      break;
-    case kUnorderedNotEqual:
-      __ j(parity_odd, &check, Label::kNear);
-      __ mov(reg, Immediate(1));
-      __ jmp(&done, Label::kNear);
-    // Fall through.
-    case kNotEqual:
-      cc = not_equal;
-      break;
-    case kSignedLessThan:
-      cc = less;
-      break;
-    case kSignedGreaterThanOrEqual:
-      cc = greater_equal;
-      break;
-    case kSignedLessThanOrEqual:
-      cc = less_equal;
-      break;
-    case kSignedGreaterThan:
-      cc = greater;
-      break;
-    case kUnsignedLessThan:
-      cc = below;
-      break;
-    case kUnsignedGreaterThanOrEqual:
-      cc = above_equal;
-      break;
-    case kUnsignedLessThanOrEqual:
-      cc = below_equal;
-      break;
-    case kUnsignedGreaterThan:
-      cc = above;
-      break;
-    case kOverflow:
-      cc = overflow;
-      break;
-    case kNotOverflow:
-      cc = no_overflow;
-      break;
-    default:
-      UNREACHABLE();
-      break;
+  if (condition == kUnorderedEqual) {
+    __ j(parity_odd, &check, Label::kNear);
+    __ Move(reg, Immediate(0));
+    __ jmp(&done, Label::kNear);
+  } else if (condition == kUnorderedNotEqual) {
+    __ j(parity_odd, &check, Label::kNear);
+    __ mov(reg, Immediate(1));
+    __ jmp(&done, Label::kNear);
   }
+  Condition cc = FlagsConditionToCondition(condition);
+
   __ bind(&check);
   if (reg.is_byte_register()) {
     // setcc for byte registers (al, bl, cl, dl).
@@ -2238,13 +2263,16 @@
 }
 
 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
-    int deoptimization_id, Deoptimizer::BailoutType bailout_type,
-    SourcePosition pos) {
+    int deoptimization_id, SourcePosition pos) {
+  DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
+  DeoptimizeReason deoptimization_reason =
+      GetDeoptimizationReason(deoptimization_id);
+  Deoptimizer::BailoutType bailout_type =
+      deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
+                                                   : Deoptimizer::EAGER;
   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
       isolate(), deoptimization_id, bailout_type);
   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
-  DeoptimizeReason deoptimization_reason =
-      GetDeoptimizationReason(deoptimization_id);
   __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   return kSuccess;
@@ -2560,7 +2588,7 @@
       __ Move(dst, g.ToImmediate(source));
     } else if (src_constant.type() == Constant::kFloat32) {
       // TODO(turbofan): Can we do better here?
-      uint32_t src = bit_cast<uint32_t>(src_constant.ToFloat32());
+      uint32_t src = src_constant.ToFloat32AsInt();
       if (destination->IsFPRegister()) {
         __ sub(esp, Immediate(kInt32Size));
         __ mov(MemOperand(esp, 0), Immediate(src));
@@ -2575,7 +2603,7 @@
       }
     } else {
       DCHECK_EQ(Constant::kFloat64, src_constant.type());
-      uint64_t src = bit_cast<uint64_t>(src_constant.ToFloat64());
+      uint64_t src = src_constant.ToFloat64AsInt();
       uint32_t lower = static_cast<uint32_t>(src);
       uint32_t upper = static_cast<uint32_t>(src >> 32);
       if (destination->IsFPRegister()) {
diff --git a/src/compiler/x87/instruction-selector-x87.cc b/src/compiler/x87/instruction-selector-x87.cc
index a737d1e..ede0d45 100644
--- a/src/compiler/x87/instruction-selector-x87.cc
+++ b/src/compiler/x87/instruction-selector-x87.cc
@@ -195,6 +195,9 @@
       break;
     case MachineRepresentation::kWord64:   // Fall through.
     case MachineRepresentation::kSimd128:  // Fall through.
+    case MachineRepresentation::kSimd1x4:  // Fall through.
+    case MachineRepresentation::kSimd1x8:  // Fall through.
+    case MachineRepresentation::kSimd1x16:  // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -285,6 +288,9 @@
         break;
       case MachineRepresentation::kWord64:   // Fall through.
       case MachineRepresentation::kSimd128:  // Fall through.
+      case MachineRepresentation::kSimd1x4:  // Fall through.
+      case MachineRepresentation::kSimd1x8:  // Fall through.
+      case MachineRepresentation::kSimd1x16:  // Fall through.
       case MachineRepresentation::kNone:
         UNREACHABLE();
         return;
@@ -312,6 +318,11 @@
   }
 }
 
+void InstructionSelector::VisitProtectedStore(Node* node) {
+  // TODO(eholk)
+  UNIMPLEMENTED();
+}
+
 // Architecture supports unaligned access, therefore VisitLoad is used instead
 void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
 
@@ -347,6 +358,9 @@
     case MachineRepresentation::kTagged:         // Fall through.
     case MachineRepresentation::kWord64:         // Fall through.
     case MachineRepresentation::kSimd128:        // Fall through.
+    case MachineRepresentation::kSimd1x4:        // Fall through.
+    case MachineRepresentation::kSimd1x8:        // Fall through.
+    case MachineRepresentation::kSimd1x16:       // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -396,6 +410,9 @@
     case MachineRepresentation::kTagged:         // Fall through.
     case MachineRepresentation::kWord64:         // Fall through.
     case MachineRepresentation::kSimd128:        // Fall through.
+    case MachineRepresentation::kSimd1x4:        // Fall through.
+    case MachineRepresentation::kSimd1x8:        // Fall through.
+    case MachineRepresentation::kSimd1x16:       // Fall through.
     case MachineRepresentation::kNone:
       UNREACHABLE();
       return;
@@ -476,7 +493,7 @@
   opcode = cont->Encode(opcode);
   if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, output_count, outputs, input_count, inputs,
-                             cont->reason(), cont->frame_state());
+                             cont->kind(), cont->reason(), cont->frame_state());
   } else {
     selector->Emit(opcode, output_count, outputs, input_count, inputs);
   }
@@ -1218,11 +1235,14 @@
     selector->Emit(opcode, 0, nullptr, input_count, inputs);
   } else if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(opcode, 0, nullptr, input_count, inputs,
-                             cont->reason(), cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     InstructionOperand output = g.DefineAsRegister(cont->result());
     selector->Emit(opcode, 1, &output, input_count, inputs);
+  } else {
+    DCHECK(cont->IsTrap());
+    inputs[input_count++] = g.UseImmediate(cont->trap_id());
+    selector->Emit(opcode, 0, nullptr, input_count, inputs);
   }
 }
 
@@ -1236,11 +1256,14 @@
     selector->Emit(opcode, g.NoOutput(), left, right,
                    g.Label(cont->true_block()), g.Label(cont->false_block()));
   } else if (cont->IsDeoptimize()) {
-    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->reason(),
-                             cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+    selector->EmitDeoptimize(opcode, g.NoOutput(), left, right, cont->kind(),
+                             cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(opcode, g.DefineAsByteRegister(cont->result()), left, right);
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(opcode, g.NoOutput(), left, right,
+                   g.UseImmediate(cont->trap_id()));
   }
 }
 
@@ -1256,21 +1279,54 @@
   VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
 }
 
+MachineType MachineTypeForNarrow(Node* node, Node* hint_node) {
+  if (hint_node->opcode() == IrOpcode::kLoad) {
+    MachineType hint = LoadRepresentationOf(hint_node->op());
+    if (node->opcode() == IrOpcode::kInt32Constant ||
+        node->opcode() == IrOpcode::kInt64Constant) {
+      int64_t constant = node->opcode() == IrOpcode::kInt32Constant
+                             ? OpParameter<int32_t>(node)
+                             : OpParameter<int64_t>(node);
+      if (hint == MachineType::Int8()) {
+        if (constant >= std::numeric_limits<int8_t>::min() &&
+            constant <= std::numeric_limits<int8_t>::max()) {
+          return hint;
+        }
+      } else if (hint == MachineType::Uint8()) {
+        if (constant >= std::numeric_limits<uint8_t>::min() &&
+            constant <= std::numeric_limits<uint8_t>::max()) {
+          return hint;
+        }
+      } else if (hint == MachineType::Int16()) {
+        if (constant >= std::numeric_limits<int16_t>::min() &&
+            constant <= std::numeric_limits<int16_t>::max()) {
+          return hint;
+        }
+      } else if (hint == MachineType::Uint16()) {
+        if (constant >= std::numeric_limits<uint16_t>::min() &&
+            constant <= std::numeric_limits<uint16_t>::max()) {
+          return hint;
+        }
+      } else if (hint == MachineType::Int32()) {
+        return hint;
+      } else if (hint == MachineType::Uint32()) {
+        if (constant >= 0) return hint;
+      }
+    }
+  }
+  return node->opcode() == IrOpcode::kLoad ? LoadRepresentationOf(node->op())
+                                           : MachineType::None();
+}
+
 // Tries to match the size of the given opcode to that of the operands, if
 // possible.
 InstructionCode TryNarrowOpcodeSize(InstructionCode opcode, Node* left,
                                     Node* right, FlagsContinuation* cont) {
-  // Currently, if one of the two operands is not a Load, we don't know what its
-  // machine representation is, so we bail out.
-  // TODO(epertoso): we can probably get some size information out of immediates
-  // and phi nodes.
-  if (left->opcode() != IrOpcode::kLoad || right->opcode() != IrOpcode::kLoad) {
-    return opcode;
-  }
+  // TODO(epertoso): we can probably get some size information out of phi nodes.
   // If the load representations don't match, both operands will be
   // zero/sign-extended to 32bit.
-  MachineType left_type = LoadRepresentationOf(left->op());
-  MachineType right_type = LoadRepresentationOf(right->op());
+  MachineType left_type = MachineTypeForNarrow(left, right);
+  MachineType right_type = MachineTypeForNarrow(right, left);
   if (left_type == right_type) {
     switch (left_type.representation()) {
       case MachineRepresentation::kBit:
@@ -1316,11 +1372,14 @@
   } else if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(cont->Encode(kX87Float32Cmp), g.NoOutput(),
                              g.Use(node->InputAt(0)), g.Use(node->InputAt(1)),
-                             cont->reason(), cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(cont->Encode(kX87Float32Cmp),
                    g.DefineAsByteRegister(cont->result()));
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(cont->Encode(kX87Float32Cmp), g.NoOutput(),
+                   g.UseImmediate(cont->trap_id()));
   }
 }
 
@@ -1337,11 +1396,14 @@
   } else if (cont->IsDeoptimize()) {
     selector->EmitDeoptimize(cont->Encode(kX87Float64Cmp), g.NoOutput(),
                              g.Use(node->InputAt(0)), g.Use(node->InputAt(1)),
-                             cont->reason(), cont->frame_state());
-  } else {
-    DCHECK(cont->IsSet());
+                             cont->kind(), cont->reason(), cont->frame_state());
+  } else if (cont->IsSet()) {
     selector->Emit(cont->Encode(kX87Float64Cmp),
                    g.DefineAsByteRegister(cont->result()));
+  } else {
+    DCHECK(cont->IsTrap());
+    selector->Emit(cont->Encode(kX87Float64Cmp), g.NoOutput(),
+                   g.UseImmediate(cont->trap_id()));
   }
 }
 
@@ -1372,10 +1434,8 @@
 
   // Match immediates on right side of comparison.
   if (g.CanBeImmediate(right)) {
-    if (g.CanBeMemoryOperand(opcode, node, left, effect_level)) {
-      // TODO(epertoso): we should use `narrowed_opcode' here once we match
-      // immediates too.
-      return VisitCompareWithMemoryOperand(selector, opcode, left,
+    if (g.CanBeMemoryOperand(narrowed_opcode, node, left, effect_level)) {
+      return VisitCompareWithMemoryOperand(selector, narrowed_opcode, left,
                                            g.UseImmediate(right), cont);
     }
     return VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right),
@@ -1417,8 +1477,8 @@
         selector->Emit(opcode, g.NoOutput(), g.Label(cont->true_block()),
                        g.Label(cont->false_block()));
       } else if (cont->IsDeoptimize()) {
-        selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->reason(),
-                                 cont->frame_state());
+        selector->EmitDeoptimize(opcode, 0, nullptr, 0, nullptr, cont->kind(),
+                                 cont->reason(), cont->frame_state());
       } else {
         DCHECK(cont->IsSet());
         selector->Emit(opcode, g.DefineAsRegister(cont->result()));
@@ -1531,14 +1591,29 @@
 }
 
 void InstructionSelector::VisitDeoptimizeIf(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kNotEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kNotEqual, p.kind(), p.reason(), node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }
 
 void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
+  DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
   FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
-      kEqual, DeoptimizeReasonOf(node->op()), node->InputAt(1));
+      kEqual, p.kind(), p.reason(), node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapIf(Node* node, Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kNotEqual, func_id, node->InputAt(1));
+  VisitWordCompareZero(this, node, node->InputAt(0), &cont);
+}
+
+void InstructionSelector::VisitTrapUnless(Node* node,
+                                          Runtime::FunctionId func_id) {
+  FlagsContinuation cont =
+      FlagsContinuation::ForTrap(kEqual, func_id, node->InputAt(1));
   VisitWordCompareZero(this, node, node->InputAt(0), &cont);
 }