Merge V8 5.8.283.32

Test: Build V8 for arm, arm64, x86, x86_64, mips, mips64 and
set a PAC script from the UI on bullhead

Change-Id: I7cc773b5daca34d869e768a1deebae3876f2dfac
diff --git a/src/ic/access-compiler.cc b/src/ic/access-compiler.cc
index d92f9c0..d210ea8 100644
--- a/src/ic/access-compiler.cc
+++ b/src/ic/access-compiler.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/ic/access-compiler.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/ic/accessor-assembler.cc b/src/ic/accessor-assembler.cc
new file mode 100644
index 0000000..d3379ab
--- /dev/null
+++ b/src/ic/accessor-assembler.cc
@@ -0,0 +1,2024 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/ic/accessor-assembler.h"
+
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/counters.h"
+#include "src/ic/handler-configuration.h"
+#include "src/ic/stub-cache.h"
+#include "src/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+using compiler::CodeAssemblerState;
+using compiler::Node;
+
+//////////////////// Private helpers.
+
+Node* AccessorAssembler::TryMonomorphicCase(Node* slot, Node* vector,
+                                            Node* receiver_map,
+                                            Label* if_handler,
+                                            Variable* var_handler,
+                                            Label* if_miss) {
+  Comment("TryMonomorphicCase");
+  DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
+
+  // TODO(ishell): add helper class that hides offset computations for a series
+  // of loads.
+  int32_t header_size = FixedArray::kHeaderSize - kHeapObjectTag;
+  // Adding |header_size| with a separate IntPtrAdd rather than passing it
+  // into ElementOffsetFromIndex() allows it to be folded into a single
+  // [base, index, offset] indirect memory access on x64.
+  Node* offset =
+      ElementOffsetFromIndex(slot, FAST_HOLEY_ELEMENTS, SMI_PARAMETERS);
+  Node* feedback = Load(MachineType::AnyTagged(), vector,
+                        IntPtrAdd(offset, IntPtrConstant(header_size)));
+
+  // Try to quickly handle the monomorphic case without knowing for sure
+  // if we have a weak cell in feedback. We do know it's safe to look
+  // at WeakCell::kValueOffset.
+  GotoIf(WordNotEqual(receiver_map, LoadWeakCellValueUnchecked(feedback)),
+         if_miss);
+
+  Node* handler =
+      Load(MachineType::AnyTagged(), vector,
+           IntPtrAdd(offset, IntPtrConstant(header_size + kPointerSize)));
+
+  var_handler->Bind(handler);
+  Goto(if_handler);
+  return feedback;
+}
+
+void AccessorAssembler::HandlePolymorphicCase(Node* receiver_map,
+                                              Node* feedback, Label* if_handler,
+                                              Variable* var_handler,
+                                              Label* if_miss,
+                                              int unroll_count) {
+  Comment("HandlePolymorphicCase");
+  DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
+
+  // Iterate {feedback} array.
+  const int kEntrySize = 2;
+
+  for (int i = 0; i < unroll_count; i++) {
+    Label next_entry(this);
+    Node* cached_map =
+        LoadWeakCellValue(LoadFixedArrayElement(feedback, i * kEntrySize));
+    GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
+
+    // Found, now call handler.
+    Node* handler = LoadFixedArrayElement(feedback, i * kEntrySize + 1);
+    var_handler->Bind(handler);
+    Goto(if_handler);
+
+    Bind(&next_entry);
+  }
+
+  // Loop from {unroll_count}*kEntrySize to {length}.
+  Node* init = IntPtrConstant(unroll_count * kEntrySize);
+  Node* length = LoadAndUntagFixedArrayBaseLength(feedback);
+  BuildFastLoop(
+      init, length,
+      [this, receiver_map, feedback, if_handler, var_handler](Node* index) {
+        Node* cached_map =
+            LoadWeakCellValue(LoadFixedArrayElement(feedback, index));
+
+        Label next_entry(this);
+        GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
+
+        // Found, now call handler.
+        Node* handler = LoadFixedArrayElement(feedback, index, kPointerSize);
+        var_handler->Bind(handler);
+        Goto(if_handler);
+
+        Bind(&next_entry);
+      },
+      kEntrySize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+  // The loop falls through if no handler was found.
+  Goto(if_miss);
+}
+
+void AccessorAssembler::HandleKeyedStorePolymorphicCase(
+    Node* receiver_map, Node* feedback, Label* if_handler,
+    Variable* var_handler, Label* if_transition_handler,
+    Variable* var_transition_map_cell, Label* if_miss) {
+  DCHECK_EQ(MachineRepresentation::kTagged, var_handler->rep());
+  DCHECK_EQ(MachineRepresentation::kTagged, var_transition_map_cell->rep());
+
+  const int kEntrySize = 3;
+
+  Node* init = IntPtrConstant(0);
+  Node* length = LoadAndUntagFixedArrayBaseLength(feedback);
+  BuildFastLoop(init, length,
+                [this, receiver_map, feedback, if_handler, var_handler,
+                 if_transition_handler, var_transition_map_cell](Node* index) {
+                  Node* cached_map =
+                      LoadWeakCellValue(LoadFixedArrayElement(feedback, index));
+                  Label next_entry(this);
+                  GotoIf(WordNotEqual(receiver_map, cached_map), &next_entry);
+
+                  Node* maybe_transition_map_cell =
+                      LoadFixedArrayElement(feedback, index, kPointerSize);
+
+                  var_handler->Bind(
+                      LoadFixedArrayElement(feedback, index, 2 * kPointerSize));
+                  GotoIf(WordEqual(maybe_transition_map_cell,
+                                   LoadRoot(Heap::kUndefinedValueRootIndex)),
+                         if_handler);
+                  var_transition_map_cell->Bind(maybe_transition_map_cell);
+                  Goto(if_transition_handler);
+
+                  Bind(&next_entry);
+                },
+                kEntrySize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+  // The loop falls through if no handler was found.
+  Goto(if_miss);
+}
+
+void AccessorAssembler::HandleLoadICHandlerCase(
+    const LoadICParameters* p, Node* handler, Label* miss,
+    ElementSupport support_elements) {
+  Comment("have_handler");
+  ExitPoint direct_exit(this);
+
+  Variable var_holder(this, MachineRepresentation::kTagged);
+  var_holder.Bind(p->receiver);
+  Variable var_smi_handler(this, MachineRepresentation::kTagged);
+  var_smi_handler.Bind(handler);
+
+  Variable* vars[] = {&var_holder, &var_smi_handler};
+  Label if_smi_handler(this, 2, vars);
+  Label try_proto_handler(this), call_handler(this);
+
+  Branch(TaggedIsSmi(handler), &if_smi_handler, &try_proto_handler);
+
+  // |handler| is a Smi, encoding what to do. See SmiHandler methods
+  // for the encoding format.
+  Bind(&if_smi_handler);
+  {
+    HandleLoadICSmiHandlerCase(p, var_holder.value(), var_smi_handler.value(),
+                               miss, &direct_exit, support_elements);
+  }
+
+  Bind(&try_proto_handler);
+  {
+    GotoIf(IsCodeMap(LoadMap(handler)), &call_handler);
+    HandleLoadICProtoHandlerCase(p, handler, &var_holder, &var_smi_handler,
+                                 &if_smi_handler, miss, &direct_exit, false);
+  }
+
+  Bind(&call_handler);
+  {
+    typedef LoadWithVectorDescriptor Descriptor;
+    TailCallStub(Descriptor(isolate()), handler, p->context, p->receiver,
+                 p->name, p->slot, p->vector);
+  }
+}
+
+void AccessorAssembler::HandleLoadICSmiHandlerCase(
+    const LoadICParameters* p, Node* holder, Node* smi_handler, Label* miss,
+    ExitPoint* exit_point, ElementSupport support_elements) {
+  Variable var_double_value(this, MachineRepresentation::kFloat64);
+  Label rebox_double(this, &var_double_value);
+
+  Node* handler_word = SmiUntag(smi_handler);
+  Node* handler_kind = DecodeWord<LoadHandler::KindBits>(handler_word);
+  if (support_elements == kSupportElements) {
+    Label property(this);
+    GotoIfNot(
+        WordEqual(handler_kind, IntPtrConstant(LoadHandler::kForElements)),
+        &property);
+
+    Comment("element_load");
+    Node* intptr_index = TryToIntptr(p->name, miss);
+    Node* elements = LoadElements(holder);
+    Node* is_jsarray_condition =
+        IsSetWord<LoadHandler::IsJsArrayBits>(handler_word);
+    Node* elements_kind =
+        DecodeWord32FromWord<LoadHandler::ElementsKindBits>(handler_word);
+    Label if_hole(this), unimplemented_elements_kind(this);
+    Label* out_of_bounds = miss;
+    EmitElementLoad(holder, elements, elements_kind, intptr_index,
+                    is_jsarray_condition, &if_hole, &rebox_double,
+                    &var_double_value, &unimplemented_elements_kind,
+                    out_of_bounds, miss, exit_point);
+
+    Bind(&unimplemented_elements_kind);
+    {
+      // Smi handlers should only be installed for supported elements kinds.
+      // Crash if we get here.
+      DebugBreak();
+      Goto(miss);
+    }
+
+    Bind(&if_hole);
+    {
+      Comment("convert hole");
+      GotoIfNot(IsSetWord<LoadHandler::ConvertHoleBits>(handler_word), miss);
+      Node* protector_cell = LoadRoot(Heap::kArrayProtectorRootIndex);
+      DCHECK(isolate()->heap()->array_protector()->IsPropertyCell());
+      GotoIfNot(
+          WordEqual(LoadObjectField(protector_cell, PropertyCell::kValueOffset),
+                    SmiConstant(Smi::FromInt(Isolate::kProtectorValid))),
+          miss);
+      exit_point->Return(UndefinedConstant());
+    }
+
+    Bind(&property);
+    Comment("property_load");
+  }
+
+  Label constant(this), field(this);
+  Branch(WordEqual(handler_kind, IntPtrConstant(LoadHandler::kForFields)),
+         &field, &constant);
+
+  Bind(&field);
+  {
+    Comment("field_load");
+    Node* offset = DecodeWord<LoadHandler::FieldOffsetBits>(handler_word);
+
+    Label inobject(this), out_of_object(this);
+    Branch(IsSetWord<LoadHandler::IsInobjectBits>(handler_word), &inobject,
+           &out_of_object);
+
+    Bind(&inobject);
+    {
+      Label is_double(this);
+      GotoIf(IsSetWord<LoadHandler::IsDoubleBits>(handler_word), &is_double);
+      exit_point->Return(LoadObjectField(holder, offset));
+
+      Bind(&is_double);
+      if (FLAG_unbox_double_fields) {
+        var_double_value.Bind(
+            LoadObjectField(holder, offset, MachineType::Float64()));
+      } else {
+        Node* mutable_heap_number = LoadObjectField(holder, offset);
+        var_double_value.Bind(LoadHeapNumberValue(mutable_heap_number));
+      }
+      Goto(&rebox_double);
+    }
+
+    Bind(&out_of_object);
+    {
+      Label is_double(this);
+      Node* properties = LoadProperties(holder);
+      Node* value = LoadObjectField(properties, offset);
+      GotoIf(IsSetWord<LoadHandler::IsDoubleBits>(handler_word), &is_double);
+      exit_point->Return(value);
+
+      Bind(&is_double);
+      var_double_value.Bind(LoadHeapNumberValue(value));
+      Goto(&rebox_double);
+    }
+
+    Bind(&rebox_double);
+    exit_point->Return(AllocateHeapNumberWithValue(var_double_value.value()));
+  }
+
+  Bind(&constant);
+  {
+    Comment("constant_load");
+    Node* descriptors = LoadMapDescriptors(LoadMap(holder));
+    Node* descriptor =
+        DecodeWord<LoadHandler::DescriptorValueIndexBits>(handler_word);
+    CSA_ASSERT(this,
+               UintPtrLessThan(descriptor,
+                               LoadAndUntagFixedArrayBaseLength(descriptors)));
+    Node* value = LoadFixedArrayElement(descriptors, descriptor);
+
+    Label if_accessor_info(this);
+    GotoIf(IsSetWord<LoadHandler::IsAccessorInfoBits>(handler_word),
+           &if_accessor_info);
+    exit_point->Return(value);
+
+    Bind(&if_accessor_info);
+    Callable callable = CodeFactory::ApiGetter(isolate());
+    exit_point->ReturnCallStub(callable, p->context, p->receiver, holder,
+                               value);
+  }
+}
+
+void AccessorAssembler::HandleLoadICProtoHandlerCase(
+    const LoadICParameters* p, Node* handler, Variable* var_holder,
+    Variable* var_smi_handler, Label* if_smi_handler, Label* miss,
+    ExitPoint* exit_point, bool throw_reference_error_if_nonexistent) {
+  DCHECK_EQ(MachineRepresentation::kTagged, var_holder->rep());
+  DCHECK_EQ(MachineRepresentation::kTagged, var_smi_handler->rep());
+
+  // IC dispatchers rely on these assumptions to be held.
+  STATIC_ASSERT(FixedArray::kLengthOffset == LoadHandler::kHolderCellOffset);
+  DCHECK_EQ(FixedArray::OffsetOfElementAt(LoadHandler::kSmiHandlerIndex),
+            LoadHandler::kSmiHandlerOffset);
+  DCHECK_EQ(FixedArray::OffsetOfElementAt(LoadHandler::kValidityCellIndex),
+            LoadHandler::kValidityCellOffset);
+
+  // Both FixedArray and Tuple3 handlers have validity cell at the same offset.
+  Label validity_cell_check_done(this);
+  Node* validity_cell =
+      LoadObjectField(handler, LoadHandler::kValidityCellOffset);
+  GotoIf(WordEqual(validity_cell, IntPtrConstant(0)),
+         &validity_cell_check_done);
+  Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
+  GotoIf(WordNotEqual(cell_value,
+                      SmiConstant(Smi::FromInt(Map::kPrototypeChainValid))),
+         miss);
+  Goto(&validity_cell_check_done);
+
+  Bind(&validity_cell_check_done);
+  Node* smi_handler = LoadObjectField(handler, LoadHandler::kSmiHandlerOffset);
+  CSA_ASSERT(this, TaggedIsSmi(smi_handler));
+  Node* handler_flags = SmiUntag(smi_handler);
+
+  Label check_prototypes(this);
+  GotoIfNot(
+      IsSetWord<LoadHandler::DoNegativeLookupOnReceiverBits>(handler_flags),
+      &check_prototypes);
+  {
+    CSA_ASSERT(this, Word32BinaryNot(
+                         HasInstanceType(p->receiver, JS_GLOBAL_OBJECT_TYPE)));
+    // We have a dictionary receiver, do a negative lookup check.
+    NameDictionaryNegativeLookup(p->receiver, p->name, miss);
+    Goto(&check_prototypes);
+  }
+
+  Bind(&check_prototypes);
+  Node* maybe_holder_cell =
+      LoadObjectField(handler, LoadHandler::kHolderCellOffset);
+  Label array_handler(this), tuple_handler(this);
+  Branch(TaggedIsSmi(maybe_holder_cell), &array_handler, &tuple_handler);
+
+  Bind(&tuple_handler);
+  {
+    Label load_existent(this);
+    GotoIf(WordNotEqual(maybe_holder_cell, NullConstant()), &load_existent);
+    // This is a handler for a load of a non-existent value.
+    if (throw_reference_error_if_nonexistent) {
+      exit_point->ReturnCallRuntime(Runtime::kThrowReferenceError, p->context,
+                                    p->name);
+    } else {
+      exit_point->Return(UndefinedConstant());
+    }
+
+    Bind(&load_existent);
+    Node* holder = LoadWeakCellValue(maybe_holder_cell);
+    // The |holder| is guaranteed to be alive at this point since we passed
+    // both the receiver map check and the validity cell check.
+    CSA_ASSERT(this, WordNotEqual(holder, IntPtrConstant(0)));
+
+    var_holder->Bind(holder);
+    var_smi_handler->Bind(smi_handler);
+    Goto(if_smi_handler);
+  }
+
+  Bind(&array_handler);
+  {
+    exit_point->ReturnCallStub(
+        CodeFactory::LoadICProtoArray(isolate(),
+                                      throw_reference_error_if_nonexistent),
+        p->context, p->receiver, p->name, p->slot, p->vector, handler);
+  }
+}
+
+Node* AccessorAssembler::EmitLoadICProtoArrayCheck(
+    const LoadICParameters* p, Node* handler, Node* handler_length,
+    Node* handler_flags, Label* miss,
+    bool throw_reference_error_if_nonexistent) {
+  Variable start_index(this, MachineType::PointerRepresentation());
+  start_index.Bind(IntPtrConstant(LoadHandler::kFirstPrototypeIndex));
+
+  Label can_access(this);
+  GotoIfNot(IsSetWord<LoadHandler::DoAccessCheckOnReceiverBits>(handler_flags),
+            &can_access);
+  {
+    // Skip this entry of a handler.
+    start_index.Bind(IntPtrConstant(LoadHandler::kFirstPrototypeIndex + 1));
+
+    int offset =
+        FixedArray::OffsetOfElementAt(LoadHandler::kFirstPrototypeIndex);
+    Node* expected_native_context =
+        LoadWeakCellValue(LoadObjectField(handler, offset), miss);
+    CSA_ASSERT(this, IsNativeContext(expected_native_context));
+
+    Node* native_context = LoadNativeContext(p->context);
+    GotoIf(WordEqual(expected_native_context, native_context), &can_access);
+    // If the receiver is not a JSGlobalProxy then we miss.
+    GotoIfNot(IsJSGlobalProxy(p->receiver), miss);
+    // For JSGlobalProxy receiver try to compare security tokens of current
+    // and expected native contexts.
+    Node* expected_token = LoadContextElement(expected_native_context,
+                                              Context::SECURITY_TOKEN_INDEX);
+    Node* current_token =
+        LoadContextElement(native_context, Context::SECURITY_TOKEN_INDEX);
+    Branch(WordEqual(expected_token, current_token), &can_access, miss);
+  }
+  Bind(&can_access);
+
+  BuildFastLoop(start_index.value(), handler_length,
+                [this, p, handler, miss](Node* current) {
+                  Node* prototype_cell =
+                      LoadFixedArrayElement(handler, current);
+                  CheckPrototype(prototype_cell, p->name, miss);
+                },
+                1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+
+  Node* maybe_holder_cell =
+      LoadFixedArrayElement(handler, LoadHandler::kHolderCellIndex);
+  Label load_existent(this);
+  GotoIf(WordNotEqual(maybe_holder_cell, NullConstant()), &load_existent);
+  // This is a handler for a load of a non-existent value.
+  if (throw_reference_error_if_nonexistent) {
+    TailCallRuntime(Runtime::kThrowReferenceError, p->context, p->name);
+  } else {
+    Return(UndefinedConstant());
+  }
+
+  Bind(&load_existent);
+  Node* holder = LoadWeakCellValue(maybe_holder_cell);
+  // The |holder| is guaranteed to be alive at this point since we passed
+  // the receiver map check, the validity cell check and the prototype chain
+  // check.
+  CSA_ASSERT(this, WordNotEqual(holder, IntPtrConstant(0)));
+  return holder;
+}
+
+void AccessorAssembler::HandleLoadGlobalICHandlerCase(
+    const LoadICParameters* pp, Node* handler, Label* miss,
+    ExitPoint* exit_point, bool throw_reference_error_if_nonexistent) {
+  LoadICParameters p = *pp;
+  DCHECK_NULL(p.receiver);
+  Node* native_context = LoadNativeContext(p.context);
+  p.receiver = LoadContextElement(native_context, Context::EXTENSION_INDEX);
+
+  Variable var_holder(this, MachineRepresentation::kTagged);
+  Variable var_smi_handler(this, MachineRepresentation::kTagged);
+  Label if_smi_handler(this);
+  HandleLoadICProtoHandlerCase(&p, handler, &var_holder, &var_smi_handler,
+                               &if_smi_handler, miss, exit_point,
+                               throw_reference_error_if_nonexistent);
+  Bind(&if_smi_handler);
+  HandleLoadICSmiHandlerCase(&p, var_holder.value(), var_smi_handler.value(),
+                             miss, exit_point, kOnlyProperties);
+}
+
+void AccessorAssembler::HandleStoreICHandlerCase(
+    const StoreICParameters* p, Node* handler, Label* miss,
+    ElementSupport support_elements) {
+  Label if_smi_handler(this), if_nonsmi_handler(this);
+  Label if_proto_handler(this), if_element_handler(this), call_handler(this);
+
+  Branch(TaggedIsSmi(handler), &if_smi_handler, &if_nonsmi_handler);
+
+  // |handler| is a Smi, encoding what to do. See SmiHandler methods
+  // for the encoding format.
+  Bind(&if_smi_handler);
+  {
+    Node* holder = p->receiver;
+    Node* handler_word = SmiUntag(handler);
+
+    // Handle non-transitioning field stores.
+    HandleStoreICSmiHandlerCase(handler_word, holder, p->value, nullptr, miss);
+  }
+
+  Bind(&if_nonsmi_handler);
+  {
+    Node* handler_map = LoadMap(handler);
+    if (support_elements == kSupportElements) {
+      GotoIf(IsTuple2Map(handler_map), &if_element_handler);
+    }
+    Branch(IsCodeMap(handler_map), &call_handler, &if_proto_handler);
+  }
+
+  if (support_elements == kSupportElements) {
+    Bind(&if_element_handler);
+    { HandleStoreICElementHandlerCase(p, handler, miss); }
+  }
+
+  Bind(&if_proto_handler);
+  {
+    HandleStoreICProtoHandler(p, handler, miss);
+  }
+
+  // |handler| is a heap object. Must be code, call it.
+  Bind(&call_handler);
+  {
+    StoreWithVectorDescriptor descriptor(isolate());
+    TailCallStub(descriptor, handler, p->context, p->receiver, p->name,
+                 p->value, p->slot, p->vector);
+  }
+}
+
+void AccessorAssembler::HandleStoreICElementHandlerCase(
+    const StoreICParameters* p, Node* handler, Label* miss) {
+  Comment("HandleStoreICElementHandlerCase");
+  Node* validity_cell = LoadObjectField(handler, Tuple2::kValue1Offset);
+  Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
+  GotoIf(WordNotEqual(cell_value,
+                      SmiConstant(Smi::FromInt(Map::kPrototypeChainValid))),
+         miss);
+
+  Node* code_handler = LoadObjectField(handler, Tuple2::kValue2Offset);
+  CSA_ASSERT(this, IsCodeMap(LoadMap(code_handler)));
+
+  StoreWithVectorDescriptor descriptor(isolate());
+  TailCallStub(descriptor, code_handler, p->context, p->receiver, p->name,
+               p->value, p->slot, p->vector);
+}
+
+void AccessorAssembler::HandleStoreICProtoHandler(const StoreICParameters* p,
+                                                  Node* handler, Label* miss) {
+  // IC dispatchers rely on these assumptions to be held.
+  STATIC_ASSERT(FixedArray::kLengthOffset ==
+                StoreHandler::kTransitionCellOffset);
+  DCHECK_EQ(FixedArray::OffsetOfElementAt(StoreHandler::kSmiHandlerIndex),
+            StoreHandler::kSmiHandlerOffset);
+  DCHECK_EQ(FixedArray::OffsetOfElementAt(StoreHandler::kValidityCellIndex),
+            StoreHandler::kValidityCellOffset);
+
+  // Both FixedArray and Tuple3 handlers have validity cell at the same offset.
+  Label validity_cell_check_done(this);
+  Node* validity_cell =
+      LoadObjectField(handler, StoreHandler::kValidityCellOffset);
+  GotoIf(WordEqual(validity_cell, IntPtrConstant(0)),
+         &validity_cell_check_done);
+  Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
+  GotoIf(WordNotEqual(cell_value,
+                      SmiConstant(Smi::FromInt(Map::kPrototypeChainValid))),
+         miss);
+  Goto(&validity_cell_check_done);
+
+  Bind(&validity_cell_check_done);
+  Node* smi_handler = LoadObjectField(handler, StoreHandler::kSmiHandlerOffset);
+  CSA_ASSERT(this, TaggedIsSmi(smi_handler));
+
+  Node* maybe_transition_cell =
+      LoadObjectField(handler, StoreHandler::kTransitionCellOffset);
+  Label array_handler(this), tuple_handler(this);
+  Branch(TaggedIsSmi(maybe_transition_cell), &array_handler, &tuple_handler);
+
+  Variable var_transition(this, MachineRepresentation::kTagged);
+  Label if_transition(this), if_transition_to_constant(this);
+  Bind(&tuple_handler);
+  {
+    Node* transition = LoadWeakCellValue(maybe_transition_cell, miss);
+    var_transition.Bind(transition);
+    Goto(&if_transition);
+  }
+
+  Bind(&array_handler);
+  {
+    Node* length = SmiUntag(maybe_transition_cell);
+    BuildFastLoop(IntPtrConstant(StoreHandler::kFirstPrototypeIndex), length,
+                  [this, p, handler, miss](Node* current) {
+                    Node* prototype_cell =
+                        LoadFixedArrayElement(handler, current);
+                    CheckPrototype(prototype_cell, p->name, miss);
+                  },
+                  1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
+
+    Node* maybe_transition_cell =
+        LoadFixedArrayElement(handler, StoreHandler::kTransitionCellIndex);
+    Node* transition = LoadWeakCellValue(maybe_transition_cell, miss);
+    var_transition.Bind(transition);
+    Goto(&if_transition);
+  }
+
+  Bind(&if_transition);
+  {
+    Node* holder = p->receiver;
+    Node* transition = var_transition.value();
+    Node* handler_word = SmiUntag(smi_handler);
+
+    GotoIf(IsSetWord32<Map::Deprecated>(LoadMapBitField3(transition)), miss);
+
+    Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
+    GotoIf(WordEqual(handler_kind,
+                     IntPtrConstant(StoreHandler::kTransitionToConstant)),
+           &if_transition_to_constant);
+
+    // Handle transitioning field stores.
+    HandleStoreICSmiHandlerCase(handler_word, holder, p->value, transition,
+                                miss);
+
+    Bind(&if_transition_to_constant);
+    {
+      // Check that constant matches value.
+      Node* value_index_in_descriptor =
+          DecodeWord<StoreHandler::DescriptorValueIndexBits>(handler_word);
+      Node* descriptors = LoadMapDescriptors(transition);
+      Node* constant =
+          LoadFixedArrayElement(descriptors, value_index_in_descriptor);
+      GotoIf(WordNotEqual(p->value, constant), miss);
+
+      StoreMap(p->receiver, transition);
+      Return(p->value);
+    }
+  }
+}
+
+void AccessorAssembler::HandleStoreICSmiHandlerCase(Node* handler_word,
+                                                    Node* holder, Node* value,
+                                                    Node* transition,
+                                                    Label* miss) {
+  Comment(transition ? "transitioning field store" : "field store");
+
+#ifdef DEBUG
+  Node* handler_kind = DecodeWord<StoreHandler::KindBits>(handler_word);
+  if (transition) {
+    CSA_ASSERT(
+        this,
+        Word32Or(
+            WordEqual(handler_kind,
+                      IntPtrConstant(StoreHandler::kTransitionToField)),
+            WordEqual(handler_kind,
+                      IntPtrConstant(StoreHandler::kTransitionToConstant))));
+  } else {
+    if (FLAG_track_constant_fields) {
+      CSA_ASSERT(
+          this,
+          Word32Or(WordEqual(handler_kind,
+                             IntPtrConstant(StoreHandler::kStoreField)),
+                   WordEqual(handler_kind,
+                             IntPtrConstant(StoreHandler::kStoreConstField))));
+    } else {
+      CSA_ASSERT(this, WordEqual(handler_kind,
+                                 IntPtrConstant(StoreHandler::kStoreField)));
+    }
+  }
+#endif
+
+  Node* field_representation =
+      DecodeWord<StoreHandler::FieldRepresentationBits>(handler_word);
+
+  Label if_smi_field(this), if_double_field(this), if_heap_object_field(this),
+      if_tagged_field(this);
+
+  GotoIf(WordEqual(field_representation, IntPtrConstant(StoreHandler::kTagged)),
+         &if_tagged_field);
+  GotoIf(WordEqual(field_representation,
+                   IntPtrConstant(StoreHandler::kHeapObject)),
+         &if_heap_object_field);
+  GotoIf(WordEqual(field_representation, IntPtrConstant(StoreHandler::kDouble)),
+         &if_double_field);
+  CSA_ASSERT(this, WordEqual(field_representation,
+                             IntPtrConstant(StoreHandler::kSmi)));
+  Goto(&if_smi_field);
+
+  Bind(&if_tagged_field);
+  {
+    Comment("store tagged field");
+    HandleStoreFieldAndReturn(handler_word, holder, Representation::Tagged(),
+                              value, transition, miss);
+  }
+
+  Bind(&if_double_field);
+  {
+    Comment("store double field");
+    HandleStoreFieldAndReturn(handler_word, holder, Representation::Double(),
+                              value, transition, miss);
+  }
+
+  Bind(&if_heap_object_field);
+  {
+    Comment("store heap object field");
+    HandleStoreFieldAndReturn(handler_word, holder,
+                              Representation::HeapObject(), value, transition,
+                              miss);
+  }
+
+  Bind(&if_smi_field);
+  {
+    Comment("store smi field");
+    HandleStoreFieldAndReturn(handler_word, holder, Representation::Smi(),
+                              value, transition, miss);
+  }
+}
+
+void AccessorAssembler::HandleStoreFieldAndReturn(Node* handler_word,
+                                                  Node* holder,
+                                                  Representation representation,
+                                                  Node* value, Node* transition,
+                                                  Label* miss) {
+  bool transition_to_field = transition != nullptr;
+  Node* prepared_value = PrepareValueForStore(
+      handler_word, holder, representation, transition, value, miss);
+
+  Label if_inobject(this), if_out_of_object(this);
+  Branch(IsSetWord<StoreHandler::IsInobjectBits>(handler_word), &if_inobject,
+         &if_out_of_object);
+
+  Bind(&if_inobject);
+  {
+    StoreNamedField(handler_word, holder, true, representation, prepared_value,
+                    transition_to_field, miss);
+    if (transition_to_field) {
+      StoreMap(holder, transition);
+    }
+    Return(value);
+  }
+
+  Bind(&if_out_of_object);
+  {
+    if (transition_to_field) {
+      Label storage_extended(this);
+      GotoIfNot(IsSetWord<StoreHandler::ExtendStorageBits>(handler_word),
+                &storage_extended);
+      Comment("[ Extend storage");
+      ExtendPropertiesBackingStore(holder);
+      Comment("] Extend storage");
+      Goto(&storage_extended);
+
+      Bind(&storage_extended);
+    }
+
+    StoreNamedField(handler_word, holder, false, representation, prepared_value,
+                    transition_to_field, miss);
+    if (transition_to_field) {
+      StoreMap(holder, transition);
+    }
+    Return(value);
+  }
+}
+
+Node* AccessorAssembler::PrepareValueForStore(Node* handler_word, Node* holder,
+                                              Representation representation,
+                                              Node* transition, Node* value,
+                                              Label* bailout) {
+  if (representation.IsDouble()) {
+    value = TryTaggedToFloat64(value, bailout);
+
+  } else if (representation.IsHeapObject()) {
+    GotoIf(TaggedIsSmi(value), bailout);
+
+    Label done(this);
+    if (FLAG_track_constant_fields && !transition) {
+      // Skip field type check in favor of constant value check when storing
+      // to constant field.
+      GotoIf(WordEqual(DecodeWord<StoreHandler::KindBits>(handler_word),
+                       IntPtrConstant(StoreHandler::kStoreConstField)),
+             &done);
+    }
+    Node* value_index_in_descriptor =
+        DecodeWord<StoreHandler::DescriptorValueIndexBits>(handler_word);
+    Node* descriptors =
+        LoadMapDescriptors(transition ? transition : LoadMap(holder));
+    Node* maybe_field_type =
+        LoadFixedArrayElement(descriptors, value_index_in_descriptor);
+
+    GotoIf(TaggedIsSmi(maybe_field_type), &done);
+    // Check that value type matches the field type.
+    {
+      Node* field_type = LoadWeakCellValue(maybe_field_type, bailout);
+      Branch(WordEqual(LoadMap(value), field_type), &done, bailout);
+    }
+    Bind(&done);
+
+  } else if (representation.IsSmi()) {
+    GotoIfNot(TaggedIsSmi(value), bailout);
+
+  } else {
+    DCHECK(representation.IsTagged());
+  }
+  return value;
+}
+
+void AccessorAssembler::ExtendPropertiesBackingStore(Node* object) {
+  Node* properties = LoadProperties(object);
+  Node* length = LoadFixedArrayBaseLength(properties);
+
+  ParameterMode mode = OptimalParameterMode();
+  length = TaggedToParameter(length, mode);
+
+  Node* delta = IntPtrOrSmiConstant(JSObject::kFieldsAdded, mode);
+  Node* new_capacity = IntPtrOrSmiAdd(length, delta, mode);
+
+  // Grow properties array.
+  ElementsKind kind = FAST_ELEMENTS;
+  DCHECK(kMaxNumberOfDescriptors + JSObject::kFieldsAdded <
+         FixedArrayBase::GetMaxLengthForNewSpaceAllocation(kind));
+  // The size of a new properties backing store is guaranteed to be small
+  // enough that the new backing store will be allocated in new space.
+  CSA_ASSERT(this,
+             UintPtrOrSmiLessThan(
+                 new_capacity,
+                 IntPtrOrSmiConstant(
+                     kMaxNumberOfDescriptors + JSObject::kFieldsAdded, mode),
+                 mode));
+
+  Node* new_properties = AllocateFixedArray(kind, new_capacity, mode);
+
+  FillFixedArrayWithValue(kind, new_properties, length, new_capacity,
+                          Heap::kUndefinedValueRootIndex, mode);
+
+  // |new_properties| is guaranteed to be in new space, so we can skip
+  // the write barrier.
+  CopyFixedArrayElements(kind, properties, new_properties, length,
+                         SKIP_WRITE_BARRIER, mode);
+
+  StoreObjectField(object, JSObject::kPropertiesOffset, new_properties);
+}
+
+void AccessorAssembler::StoreNamedField(Node* handler_word, Node* object,
+                                        bool is_inobject,
+                                        Representation representation,
+                                        Node* value, bool transition_to_field,
+                                        Label* bailout) {
+  bool store_value_as_double = representation.IsDouble();
+  Node* property_storage = object;
+  if (!is_inobject) {
+    property_storage = LoadProperties(object);
+  }
+
+  Node* offset = DecodeWord<StoreHandler::FieldOffsetBits>(handler_word);
+  if (representation.IsDouble()) {
+    if (!FLAG_unbox_double_fields || !is_inobject) {
+      if (transition_to_field) {
+        Node* heap_number = AllocateHeapNumberWithValue(value, MUTABLE);
+        // Store the new mutable heap number into the object.
+        value = heap_number;
+        store_value_as_double = false;
+      } else {
+        // Load the heap number.
+        property_storage = LoadObjectField(property_storage, offset);
+        // Store the double value into it.
+        offset = IntPtrConstant(HeapNumber::kValueOffset);
+      }
+    }
+  }
+
+  // Do constant value check if necessary.
+  if (FLAG_track_constant_fields && !transition_to_field) {
+    Label done(this);
+    GotoIfNot(WordEqual(DecodeWord<StoreHandler::KindBits>(handler_word),
+                        IntPtrConstant(StoreHandler::kStoreConstField)),
+              &done);
+    {
+      if (store_value_as_double) {
+        Node* current_value =
+            LoadObjectField(property_storage, offset, MachineType::Float64());
+        GotoIfNot(Float64Equal(current_value, value), bailout);
+      } else {
+        Node* current_value = LoadObjectField(property_storage, offset);
+        GotoIfNot(WordEqual(current_value, value), bailout);
+      }
+      Goto(&done);
+    }
+    Bind(&done);
+  }
+
+  // Do the store.
+  if (store_value_as_double) {
+    StoreObjectFieldNoWriteBarrier(property_storage, offset, value,
+                                   MachineRepresentation::kFloat64);
+  } else if (representation.IsSmi()) {
+    StoreObjectFieldNoWriteBarrier(property_storage, offset, value);
+  } else {
+    StoreObjectField(property_storage, offset, value);
+  }
+}
+
+void AccessorAssembler::EmitFastElementsBoundsCheck(Node* object,
+                                                    Node* elements,
+                                                    Node* intptr_index,
+                                                    Node* is_jsarray_condition,
+                                                    Label* miss) {
+  Variable var_length(this, MachineType::PointerRepresentation());
+  Comment("Fast elements bounds check");
+  Label if_array(this), length_loaded(this, &var_length);
+  GotoIf(is_jsarray_condition, &if_array);
+  {
+    var_length.Bind(SmiUntag(LoadFixedArrayBaseLength(elements)));
+    Goto(&length_loaded);
+  }
+  Bind(&if_array);
+  {
+    var_length.Bind(SmiUntag(LoadJSArrayLength(object)));
+    Goto(&length_loaded);
+  }
+  Bind(&length_loaded);
+  GotoIfNot(UintPtrLessThan(intptr_index, var_length.value()), miss);
+}
+
+void AccessorAssembler::EmitElementLoad(
+    Node* object, Node* elements, Node* elements_kind, Node* intptr_index,
+    Node* is_jsarray_condition, Label* if_hole, Label* rebox_double,
+    Variable* var_double_value, Label* unimplemented_elements_kind,
+    Label* out_of_bounds, Label* miss, ExitPoint* exit_point) {
+  Label if_typed_array(this), if_fast_packed(this), if_fast_holey(this),
+      if_fast_double(this), if_fast_holey_double(this), if_nonfast(this),
+      if_dictionary(this);
+  GotoIf(
+      Int32GreaterThan(elements_kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
+      &if_nonfast);
+
+  EmitFastElementsBoundsCheck(object, elements, intptr_index,
+                              is_jsarray_condition, out_of_bounds);
+  int32_t kinds[] = {// Handled by if_fast_packed.
+                     FAST_SMI_ELEMENTS, FAST_ELEMENTS,
+                     // Handled by if_fast_holey.
+                     FAST_HOLEY_SMI_ELEMENTS, FAST_HOLEY_ELEMENTS,
+                     // Handled by if_fast_double.
+                     FAST_DOUBLE_ELEMENTS,
+                     // Handled by if_fast_holey_double.
+                     FAST_HOLEY_DOUBLE_ELEMENTS};
+  Label* labels[] = {// FAST_{SMI,}_ELEMENTS
+                     &if_fast_packed, &if_fast_packed,
+                     // FAST_HOLEY_{SMI,}_ELEMENTS
+                     &if_fast_holey, &if_fast_holey,
+                     // FAST_DOUBLE_ELEMENTS
+                     &if_fast_double,
+                     // FAST_HOLEY_DOUBLE_ELEMENTS
+                     &if_fast_holey_double};
+  Switch(elements_kind, unimplemented_elements_kind, kinds, labels,
+         arraysize(kinds));
+
+  Bind(&if_fast_packed);
+  {
+    Comment("fast packed elements");
+    exit_point->Return(LoadFixedArrayElement(elements, intptr_index));
+  }
+
+  Bind(&if_fast_holey);
+  {
+    Comment("fast holey elements");
+    Node* element = LoadFixedArrayElement(elements, intptr_index);
+    GotoIf(WordEqual(element, TheHoleConstant()), if_hole);
+    exit_point->Return(element);
+  }
+
+  Bind(&if_fast_double);
+  {
+    Comment("packed double elements");
+    var_double_value->Bind(LoadFixedDoubleArrayElement(elements, intptr_index,
+                                                       MachineType::Float64()));
+    Goto(rebox_double);
+  }
+
+  Bind(&if_fast_holey_double);
+  {
+    Comment("holey double elements");
+    Node* value = LoadFixedDoubleArrayElement(elements, intptr_index,
+                                              MachineType::Float64(), 0,
+                                              INTPTR_PARAMETERS, if_hole);
+    var_double_value->Bind(value);
+    Goto(rebox_double);
+  }
+
+  Bind(&if_nonfast);
+  {
+    STATIC_ASSERT(LAST_ELEMENTS_KIND == LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
+    GotoIf(Int32GreaterThanOrEqual(
+               elements_kind,
+               Int32Constant(FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND)),
+           &if_typed_array);
+    GotoIf(Word32Equal(elements_kind, Int32Constant(DICTIONARY_ELEMENTS)),
+           &if_dictionary);
+    Goto(unimplemented_elements_kind);
+  }
+
+  Bind(&if_dictionary);
+  {
+    Comment("dictionary elements");
+    GotoIf(IntPtrLessThan(intptr_index, IntPtrConstant(0)), out_of_bounds);
+    Variable var_entry(this, MachineType::PointerRepresentation());
+    Label if_found(this);
+    NumberDictionaryLookup<SeededNumberDictionary>(
+        elements, intptr_index, &if_found, &var_entry, if_hole);
+    Bind(&if_found);
+    // Check that the value is a data property.
+    Node* index = EntryToIndex<SeededNumberDictionary>(var_entry.value());
+    Node* details =
+        LoadDetailsByKeyIndex<SeededNumberDictionary>(elements, index);
+    Node* kind = DecodeWord32<PropertyDetails::KindField>(details);
+    // TODO(jkummerow): Support accessors without missing?
+    GotoIfNot(Word32Equal(kind, Int32Constant(kData)), miss);
+    // Finally, load the value.
+    exit_point->Return(
+        LoadValueByKeyIndex<SeededNumberDictionary>(elements, index));
+  }
+
+  Bind(&if_typed_array);
+  {
+    Comment("typed elements");
+    // Check if buffer has been neutered.
+    Node* buffer = LoadObjectField(object, JSArrayBufferView::kBufferOffset);
+    GotoIf(IsDetachedBuffer(buffer), miss);
+
+    // Bounds check.
+    Node* length =
+        SmiUntag(LoadObjectField(object, JSTypedArray::kLengthOffset));
+    GotoIfNot(UintPtrLessThan(intptr_index, length), out_of_bounds);
+
+    // Backing store = external_pointer + base_pointer.
+    Node* external_pointer =
+        LoadObjectField(elements, FixedTypedArrayBase::kExternalPointerOffset,
+                        MachineType::Pointer());
+    Node* base_pointer =
+        LoadObjectField(elements, FixedTypedArrayBase::kBasePointerOffset);
+    Node* backing_store =
+        IntPtrAdd(external_pointer, BitcastTaggedToWord(base_pointer));
+
+    Label uint8_elements(this), int8_elements(this), uint16_elements(this),
+        int16_elements(this), uint32_elements(this), int32_elements(this),
+        float32_elements(this), float64_elements(this);
+    Label* elements_kind_labels[] = {
+        &uint8_elements,  &uint8_elements,   &int8_elements,
+        &uint16_elements, &int16_elements,   &uint32_elements,
+        &int32_elements,  &float32_elements, &float64_elements};
+    int32_t elements_kinds[] = {
+        UINT8_ELEMENTS,  UINT8_CLAMPED_ELEMENTS, INT8_ELEMENTS,
+        UINT16_ELEMENTS, INT16_ELEMENTS,         UINT32_ELEMENTS,
+        INT32_ELEMENTS,  FLOAT32_ELEMENTS,       FLOAT64_ELEMENTS};
+    const size_t kTypedElementsKindCount =
+        LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND -
+        FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND + 1;
+    DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kinds));
+    DCHECK_EQ(kTypedElementsKindCount, arraysize(elements_kind_labels));
+    Switch(elements_kind, miss, elements_kinds, elements_kind_labels,
+           kTypedElementsKindCount);
+    Bind(&uint8_elements);
+    {
+      Comment("UINT8_ELEMENTS");  // Handles UINT8_CLAMPED_ELEMENTS too.
+      Node* element = Load(MachineType::Uint8(), backing_store, intptr_index);
+      exit_point->Return(SmiFromWord32(element));
+    }
+    Bind(&int8_elements);
+    {
+      Comment("INT8_ELEMENTS");
+      Node* element = Load(MachineType::Int8(), backing_store, intptr_index);
+      exit_point->Return(SmiFromWord32(element));
+    }
+    Bind(&uint16_elements);
+    {
+      Comment("UINT16_ELEMENTS");
+      Node* index = WordShl(intptr_index, IntPtrConstant(1));
+      Node* element = Load(MachineType::Uint16(), backing_store, index);
+      exit_point->Return(SmiFromWord32(element));
+    }
+    Bind(&int16_elements);
+    {
+      Comment("INT16_ELEMENTS");
+      Node* index = WordShl(intptr_index, IntPtrConstant(1));
+      Node* element = Load(MachineType::Int16(), backing_store, index);
+      exit_point->Return(SmiFromWord32(element));
+    }
+    Bind(&uint32_elements);
+    {
+      Comment("UINT32_ELEMENTS");
+      Node* index = WordShl(intptr_index, IntPtrConstant(2));
+      Node* element = Load(MachineType::Uint32(), backing_store, index);
+      exit_point->Return(ChangeUint32ToTagged(element));
+    }
+    Bind(&int32_elements);
+    {
+      Comment("INT32_ELEMENTS");
+      Node* index = WordShl(intptr_index, IntPtrConstant(2));
+      Node* element = Load(MachineType::Int32(), backing_store, index);
+      exit_point->Return(ChangeInt32ToTagged(element));
+    }
+    Bind(&float32_elements);
+    {
+      Comment("FLOAT32_ELEMENTS");
+      Node* index = WordShl(intptr_index, IntPtrConstant(2));
+      Node* element = Load(MachineType::Float32(), backing_store, index);
+      var_double_value->Bind(ChangeFloat32ToFloat64(element));
+      Goto(rebox_double);
+    }
+    Bind(&float64_elements);
+    {
+      Comment("FLOAT64_ELEMENTS");
+      Node* index = WordShl(intptr_index, IntPtrConstant(3));
+      Node* element = Load(MachineType::Float64(), backing_store, index);
+      var_double_value->Bind(element);
+      Goto(rebox_double);
+    }
+  }
+}
+
+void AccessorAssembler::CheckPrototype(Node* prototype_cell, Node* name,
+                                       Label* miss) {
+  Node* maybe_prototype = LoadWeakCellValue(prototype_cell, miss);
+
+  Label done(this);
+  Label if_property_cell(this), if_dictionary_object(this);
+
+  // |maybe_prototype| is either a PropertyCell or a slow-mode prototype.
+  Branch(WordEqual(LoadMap(maybe_prototype),
+                   LoadRoot(Heap::kGlobalPropertyCellMapRootIndex)),
+         &if_property_cell, &if_dictionary_object);
+
+  Bind(&if_dictionary_object);
+  {
+    CSA_ASSERT(this, IsDictionaryMap(LoadMap(maybe_prototype)));
+    NameDictionaryNegativeLookup(maybe_prototype, name, miss);
+    Goto(&done);
+  }
+
+  Bind(&if_property_cell);
+  {
+    // Ensure the property cell still contains the hole.
+    Node* value = LoadObjectField(maybe_prototype, PropertyCell::kValueOffset);
+    GotoIf(WordNotEqual(value, LoadRoot(Heap::kTheHoleValueRootIndex)), miss);
+    Goto(&done);
+  }
+
+  Bind(&done);
+}
+
+void AccessorAssembler::NameDictionaryNegativeLookup(Node* object, Node* name,
+                                                     Label* miss) {
+  CSA_ASSERT(this, IsDictionaryMap(LoadMap(object)));
+  Node* properties = LoadProperties(object);
+  // Ensure the property does not exist in a dictionary-mode object.
+  Variable var_name_index(this, MachineType::PointerRepresentation());
+  Label done(this);
+  NameDictionaryLookup<NameDictionary>(properties, name, miss, &var_name_index,
+                                       &done);
+  Bind(&done);
+}
+
+void AccessorAssembler::GenericElementLoad(Node* receiver, Node* receiver_map,
+                                           Node* instance_type, Node* index,
+                                           Label* slow) {
+  Comment("integer index");
+
+  ExitPoint direct_exit(this);
+
+  Label if_element_hole(this), if_oob(this);
+  // Receivers requiring non-standard element accesses (interceptors, access
+  // checks, strings and string wrappers, proxies) are handled in the runtime.
+  GotoIf(Int32LessThanOrEqual(instance_type,
+                              Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
+         slow);
+  Node* elements = LoadElements(receiver);
+  Node* elements_kind = LoadMapElementsKind(receiver_map);
+  Node* is_jsarray_condition =
+      Word32Equal(instance_type, Int32Constant(JS_ARRAY_TYPE));
+  Variable var_double_value(this, MachineRepresentation::kFloat64);
+  Label rebox_double(this, &var_double_value);
+
+  // Unimplemented elements kinds fall back to a runtime call.
+  Label* unimplemented_elements_kind = slow;
+  IncrementCounter(isolate()->counters()->ic_keyed_load_generic_smi(), 1);
+  EmitElementLoad(receiver, elements, elements_kind, index,
+                  is_jsarray_condition, &if_element_hole, &rebox_double,
+                  &var_double_value, unimplemented_elements_kind, &if_oob, slow,
+                  &direct_exit);
+
+  Bind(&rebox_double);
+  Return(AllocateHeapNumberWithValue(var_double_value.value()));
+
+  Bind(&if_oob);
+  {
+    Comment("out of bounds");
+    // Negative keys can't take the fast OOB path.
+    GotoIf(IntPtrLessThan(index, IntPtrConstant(0)), slow);
+    // Positive OOB indices are effectively the same as hole loads.
+    Goto(&if_element_hole);
+  }
+
+  Bind(&if_element_hole);
+  {
+    Comment("found the hole");
+    Label return_undefined(this);
+    BranchIfPrototypesHaveNoElements(receiver_map, &return_undefined, slow);
+
+    Bind(&return_undefined);
+    Return(UndefinedConstant());
+  }
+}
+
+void AccessorAssembler::GenericPropertyLoad(Node* receiver, Node* receiver_map,
+                                            Node* instance_type, Node* key,
+                                            const LoadICParameters* p,
+                                            Label* slow) {
+  Comment("key is unique name");
+  Label if_found_on_receiver(this), if_property_dictionary(this),
+      lookup_prototype_chain(this);
+  Variable var_details(this, MachineRepresentation::kWord32);
+  Variable var_value(this, MachineRepresentation::kTagged);
+
+  // Receivers requiring non-standard accesses (interceptors, access
+  // checks, strings and string wrappers, proxies) are handled in the runtime.
+  GotoIf(Int32LessThanOrEqual(instance_type,
+                              Int32Constant(LAST_SPECIAL_RECEIVER_TYPE)),
+         slow);
+
+  // Check if the receiver has fast or slow properties.
+  Node* properties = LoadProperties(receiver);
+  Node* properties_map = LoadMap(properties);
+  GotoIf(WordEqual(properties_map, LoadRoot(Heap::kHashTableMapRootIndex)),
+         &if_property_dictionary);
+
+  // Try looking up the property on the receiver; if unsuccessful, look
+  // for a handler in the stub cache.
+  Node* bitfield3 = LoadMapBitField3(receiver_map);
+  Node* descriptors = LoadMapDescriptors(receiver_map);
+
+  Label if_descriptor_found(this), stub_cache(this);
+  Variable var_name_index(this, MachineType::PointerRepresentation());
+  DescriptorLookup(key, descriptors, bitfield3, &if_descriptor_found,
+                   &var_name_index, &stub_cache);
+
+  Bind(&if_descriptor_found);
+  {
+    LoadPropertyFromFastObject(receiver, receiver_map, descriptors,
+                               var_name_index.value(), &var_details,
+                               &var_value);
+    Goto(&if_found_on_receiver);
+  }
+
+  Bind(&stub_cache);
+  {
+    Comment("stub cache probe for fast property load");
+    Variable var_handler(this, MachineRepresentation::kTagged);
+    Label found_handler(this, &var_handler), stub_cache_miss(this);
+    TryProbeStubCache(isolate()->load_stub_cache(), receiver, key,
+                      &found_handler, &var_handler, &stub_cache_miss);
+    Bind(&found_handler);
+    { HandleLoadICHandlerCase(p, var_handler.value(), slow); }
+
+    Bind(&stub_cache_miss);
+    {
+      // TODO(jkummerow): Check if the property exists on the prototype
+      // chain. If it doesn't, then there's no point in missing.
+      Comment("KeyedLoadGeneric_miss");
+      TailCallRuntime(Runtime::kKeyedLoadIC_Miss, p->context, p->receiver,
+                      p->name, p->slot, p->vector);
+    }
+  }
+
+  Bind(&if_property_dictionary);
+  {
+    Comment("dictionary property load");
+    // We checked for LAST_CUSTOM_ELEMENTS_RECEIVER before, which rules out
+    // seeing global objects here (which would need special handling).
+
+    Variable var_name_index(this, MachineType::PointerRepresentation());
+    Label dictionary_found(this, &var_name_index);
+    NameDictionaryLookup<NameDictionary>(properties, key, &dictionary_found,
+                                         &var_name_index,
+                                         &lookup_prototype_chain);
+    Bind(&dictionary_found);
+    {
+      LoadPropertyFromNameDictionary(properties, var_name_index.value(),
+                                     &var_details, &var_value);
+      Goto(&if_found_on_receiver);
+    }
+  }
+
+  Bind(&if_found_on_receiver);
+  {
+    Node* value = CallGetterIfAccessor(var_value.value(), var_details.value(),
+                                       p->context, receiver, slow);
+    IncrementCounter(isolate()->counters()->ic_keyed_load_generic_symbol(), 1);
+    Return(value);
+  }
+
+  Bind(&lookup_prototype_chain);
+  {
+    Variable var_holder_map(this, MachineRepresentation::kTagged);
+    Variable var_holder_instance_type(this, MachineRepresentation::kWord32);
+    Label return_undefined(this);
+    Variable* merged_variables[] = {&var_holder_map, &var_holder_instance_type};
+    Label loop(this, arraysize(merged_variables), merged_variables);
+
+    var_holder_map.Bind(receiver_map);
+    var_holder_instance_type.Bind(instance_type);
+    // Private symbols must not be looked up on the prototype chain.
+    GotoIf(IsPrivateSymbol(key), &return_undefined);
+    Goto(&loop);
+    Bind(&loop);
+    {
+      // Bailout if it can be an integer indexed exotic case.
+      GotoIf(Word32Equal(var_holder_instance_type.value(),
+                         Int32Constant(JS_TYPED_ARRAY_TYPE)),
+             slow);
+      Node* proto = LoadMapPrototype(var_holder_map.value());
+      GotoIf(WordEqual(proto, NullConstant()), &return_undefined);
+      Node* proto_map = LoadMap(proto);
+      Node* proto_instance_type = LoadMapInstanceType(proto_map);
+      var_holder_map.Bind(proto_map);
+      var_holder_instance_type.Bind(proto_instance_type);
+      Label next_proto(this), return_value(this, &var_value), goto_slow(this);
+      TryGetOwnProperty(p->context, receiver, proto, proto_map,
+                        proto_instance_type, key, &return_value, &var_value,
+                        &next_proto, &goto_slow);
+
+      // This trampoline and the next are required to appease Turbofan's
+      // variable merging.
+      Bind(&next_proto);
+      Goto(&loop);
+
+      Bind(&goto_slow);
+      Goto(slow);
+
+      Bind(&return_value);
+      Return(var_value.value());
+    }
+
+    Bind(&return_undefined);
+    Return(UndefinedConstant());
+  }
+}
+
+//////////////////// Stub cache access helpers.
+
+enum AccessorAssembler::StubCacheTable : int {
+  kPrimary = static_cast<int>(StubCache::kPrimary),
+  kSecondary = static_cast<int>(StubCache::kSecondary)
+};
+
+Node* AccessorAssembler::StubCachePrimaryOffset(Node* name, Node* map) {
+  // See v8::internal::StubCache::PrimaryOffset().
+  STATIC_ASSERT(StubCache::kCacheIndexShift == Name::kHashShift);
+  // Compute the hash of the name (use entire hash field).
+  Node* hash_field = LoadNameHashField(name);
+  CSA_ASSERT(this,
+             Word32Equal(Word32And(hash_field,
+                                   Int32Constant(Name::kHashNotComputedMask)),
+                         Int32Constant(0)));
+
+  // Using only the low bits in 64-bit mode is unlikely to increase the
+  // risk of collision even if the heap is spread over an area larger than
+  // 4Gb (and not at all if it isn't).
+  Node* map32 = TruncateWordToWord32(BitcastTaggedToWord(map));
+  Node* hash = Int32Add(hash_field, map32);
+  // Base the offset on a simple combination of name and map.
+  hash = Word32Xor(hash, Int32Constant(StubCache::kPrimaryMagic));
+  uint32_t mask = (StubCache::kPrimaryTableSize - 1)
+                  << StubCache::kCacheIndexShift;
+  return ChangeUint32ToWord(Word32And(hash, Int32Constant(mask)));
+}
+
+Node* AccessorAssembler::StubCacheSecondaryOffset(Node* name, Node* seed) {
+  // See v8::internal::StubCache::SecondaryOffset().
+
+  // Use the seed from the primary cache in the secondary cache.
+  Node* name32 = TruncateWordToWord32(BitcastTaggedToWord(name));
+  Node* hash = Int32Sub(TruncateWordToWord32(seed), name32);
+  hash = Int32Add(hash, Int32Constant(StubCache::kSecondaryMagic));
+  int32_t mask = (StubCache::kSecondaryTableSize - 1)
+                 << StubCache::kCacheIndexShift;
+  return ChangeUint32ToWord(Word32And(hash, Int32Constant(mask)));
+}
+
+void AccessorAssembler::TryProbeStubCacheTable(StubCache* stub_cache,
+                                               StubCacheTable table_id,
+                                               Node* entry_offset, Node* name,
+                                               Node* map, Label* if_handler,
+                                               Variable* var_handler,
+                                               Label* if_miss) {
+  StubCache::Table table = static_cast<StubCache::Table>(table_id);
+#ifdef DEBUG
+  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+    Goto(if_miss);
+    return;
+  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+    Goto(if_miss);
+    return;
+  }
+#endif
+  // The {table_offset} holds the entry offset times four (due to masking
+  // and shifting optimizations).
+  const int kMultiplier = sizeof(StubCache::Entry) >> Name::kHashShift;
+  entry_offset = IntPtrMul(entry_offset, IntPtrConstant(kMultiplier));
+
+  // Check that the key in the entry matches the name.
+  Node* key_base =
+      ExternalConstant(ExternalReference(stub_cache->key_reference(table)));
+  Node* entry_key = Load(MachineType::Pointer(), key_base, entry_offset);
+  GotoIf(WordNotEqual(name, entry_key), if_miss);
+
+  // Get the map entry from the cache.
+  DCHECK_EQ(kPointerSize * 2, stub_cache->map_reference(table).address() -
+                                  stub_cache->key_reference(table).address());
+  Node* entry_map =
+      Load(MachineType::Pointer(), key_base,
+           IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize * 2)));
+  GotoIf(WordNotEqual(map, entry_map), if_miss);
+
+  DCHECK_EQ(kPointerSize, stub_cache->value_reference(table).address() -
+                              stub_cache->key_reference(table).address());
+  Node* handler = Load(MachineType::TaggedPointer(), key_base,
+                       IntPtrAdd(entry_offset, IntPtrConstant(kPointerSize)));
+
+  // We found the handler.
+  var_handler->Bind(handler);
+  Goto(if_handler);
+}
+
+void AccessorAssembler::TryProbeStubCache(StubCache* stub_cache, Node* receiver,
+                                          Node* name, Label* if_handler,
+                                          Variable* var_handler,
+                                          Label* if_miss) {
+  Label try_secondary(this), miss(this);
+
+  Counters* counters = isolate()->counters();
+  IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
+
+  // Check that the {receiver} isn't a smi.
+  GotoIf(TaggedIsSmi(receiver), &miss);
+
+  Node* receiver_map = LoadMap(receiver);
+
+  // Probe the primary table.
+  Node* primary_offset = StubCachePrimaryOffset(name, receiver_map);
+  TryProbeStubCacheTable(stub_cache, kPrimary, primary_offset, name,
+                         receiver_map, if_handler, var_handler, &try_secondary);
+
+  Bind(&try_secondary);
+  {
+    // Probe the secondary table.
+    Node* secondary_offset = StubCacheSecondaryOffset(name, primary_offset);
+    TryProbeStubCacheTable(stub_cache, kSecondary, secondary_offset, name,
+                           receiver_map, if_handler, var_handler, &miss);
+  }
+
+  Bind(&miss);
+  {
+    IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
+    Goto(if_miss);
+  }
+}
+
+//////////////////// Entry points into private implementation (one per stub).
+
+void AccessorAssembler::LoadIC(const LoadICParameters* p) {
+  Variable var_handler(this, MachineRepresentation::kTagged);
+  // TODO(ishell): defer blocks when it works.
+  Label if_handler(this, &var_handler), try_polymorphic(this),
+      try_megamorphic(this /*, Label::kDeferred*/),
+      miss(this /*, Label::kDeferred*/);
+
+  Node* receiver_map = LoadReceiverMap(p->receiver);
+  GotoIf(IsSetWord32<Map::Deprecated>(LoadMapBitField3(receiver_map)), &miss);
+
+  // Check monomorphic case.
+  Node* feedback =
+      TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+                         &var_handler, &try_polymorphic);
+  Bind(&if_handler);
+  { HandleLoadICHandlerCase(p, var_handler.value(), &miss); }
+
+  Bind(&try_polymorphic);
+  {
+    // Check polymorphic case.
+    Comment("LoadIC_try_polymorphic");
+    GotoIfNot(WordEqual(LoadMap(feedback), FixedArrayMapConstant()),
+              &try_megamorphic);
+    HandlePolymorphicCase(receiver_map, feedback, &if_handler, &var_handler,
+                          &miss, 2);
+  }
+
+  Bind(&try_megamorphic);
+  {
+    // Check megamorphic case.
+    GotoIfNot(WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+              &miss);
+
+    TryProbeStubCache(isolate()->load_stub_cache(), p->receiver, p->name,
+                      &if_handler, &var_handler, &miss);
+  }
+  Bind(&miss);
+  {
+    TailCallRuntime(Runtime::kLoadIC_Miss, p->context, p->receiver, p->name,
+                    p->slot, p->vector);
+  }
+}
+
+void AccessorAssembler::LoadICProtoArray(
+    const LoadICParameters* p, Node* handler,
+    bool throw_reference_error_if_nonexistent) {
+  Label miss(this);
+  CSA_ASSERT(this, Word32BinaryNot(TaggedIsSmi(handler)));
+  CSA_ASSERT(this, IsFixedArrayMap(LoadMap(handler)));
+
+  ExitPoint direct_exit(this);
+
+  Node* smi_handler = LoadObjectField(handler, LoadHandler::kSmiHandlerOffset);
+  Node* handler_flags = SmiUntag(smi_handler);
+
+  Node* handler_length = LoadAndUntagFixedArrayBaseLength(handler);
+
+  Node* holder =
+      EmitLoadICProtoArrayCheck(p, handler, handler_length, handler_flags,
+                                &miss, throw_reference_error_if_nonexistent);
+
+  HandleLoadICSmiHandlerCase(p, holder, smi_handler, &miss, &direct_exit,
+                             kOnlyProperties);
+
+  Bind(&miss);
+  {
+    TailCallRuntime(Runtime::kLoadIC_Miss, p->context, p->receiver, p->name,
+                    p->slot, p->vector);
+  }
+}
+
+void AccessorAssembler::LoadGlobalIC_TryPropertyCellCase(
+    Node* vector, Node* slot, ExitPoint* exit_point, Label* try_handler,
+    Label* miss, ParameterMode slot_mode) {
+  Comment("LoadGlobalIC_TryPropertyCellCase");
+
+  Node* weak_cell = LoadFixedArrayElement(vector, slot, 0, slot_mode);
+  CSA_ASSERT(this, HasInstanceType(weak_cell, WEAK_CELL_TYPE));
+
+  // Load value or try handler case if the {weak_cell} is cleared.
+  Node* property_cell = LoadWeakCellValue(weak_cell, try_handler);
+  CSA_ASSERT(this, HasInstanceType(property_cell, PROPERTY_CELL_TYPE));
+
+  Node* value = LoadObjectField(property_cell, PropertyCell::kValueOffset);
+  GotoIf(WordEqual(value, TheHoleConstant()), miss);
+  exit_point->Return(value);
+}
+
+void AccessorAssembler::LoadGlobalIC_TryHandlerCase(const LoadICParameters* p,
+                                                    TypeofMode typeof_mode,
+                                                    ExitPoint* exit_point,
+                                                    Label* miss) {
+  Comment("LoadGlobalIC_TryHandlerCase");
+
+  Label call_handler(this);
+
+  Node* handler =
+      LoadFixedArrayElement(p->vector, p->slot, kPointerSize, SMI_PARAMETERS);
+  CSA_ASSERT(this, Word32BinaryNot(TaggedIsSmi(handler)));
+  GotoIf(WordEqual(handler, LoadRoot(Heap::kuninitialized_symbolRootIndex)),
+         miss);
+  GotoIf(IsCodeMap(LoadMap(handler)), &call_handler);
+
+  bool throw_reference_error_if_nonexistent = typeof_mode == NOT_INSIDE_TYPEOF;
+  HandleLoadGlobalICHandlerCase(p, handler, miss, exit_point,
+                                throw_reference_error_if_nonexistent);
+
+  Bind(&call_handler);
+  {
+    LoadWithVectorDescriptor descriptor(isolate());
+    Node* native_context = LoadNativeContext(p->context);
+    Node* receiver =
+        LoadContextElement(native_context, Context::EXTENSION_INDEX);
+    exit_point->ReturnCallStub(descriptor, handler, p->context, receiver,
+                               p->name, p->slot, p->vector);
+  }
+}
+
+void AccessorAssembler::LoadGlobalIC_MissCase(const LoadICParameters* p,
+                                              ExitPoint* exit_point) {
+  Comment("LoadGlobalIC_MissCase");
+
+  exit_point->ReturnCallRuntime(Runtime::kLoadGlobalIC_Miss, p->context,
+                                p->name, p->slot, p->vector);
+}
+
+void AccessorAssembler::LoadGlobalIC(const LoadICParameters* p,
+                                     TypeofMode typeof_mode) {
+  ExitPoint direct_exit(this);
+
+  Label try_handler(this), miss(this);
+  LoadGlobalIC_TryPropertyCellCase(p->vector, p->slot, &direct_exit,
+                                   &try_handler, &miss);
+
+  Bind(&try_handler);
+  LoadGlobalIC_TryHandlerCase(p, typeof_mode, &direct_exit, &miss);
+
+  Bind(&miss);
+  LoadGlobalIC_MissCase(p, &direct_exit);
+}
+
+void AccessorAssembler::KeyedLoadIC(const LoadICParameters* p) {
+  Variable var_handler(this, MachineRepresentation::kTagged);
+  // TODO(ishell): defer blocks when it works.
+  Label if_handler(this, &var_handler), try_polymorphic(this),
+      try_megamorphic(this /*, Label::kDeferred*/),
+      try_polymorphic_name(this /*, Label::kDeferred*/),
+      miss(this /*, Label::kDeferred*/);
+
+  Node* receiver_map = LoadReceiverMap(p->receiver);
+  GotoIf(IsSetWord32<Map::Deprecated>(LoadMapBitField3(receiver_map)), &miss);
+
+  // Check monomorphic case.
+  Node* feedback =
+      TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+                         &var_handler, &try_polymorphic);
+  Bind(&if_handler);
+  { HandleLoadICHandlerCase(p, var_handler.value(), &miss, kSupportElements); }
+
+  Bind(&try_polymorphic);
+  {
+    // Check polymorphic case.
+    Comment("KeyedLoadIC_try_polymorphic");
+    GotoIfNot(WordEqual(LoadMap(feedback), FixedArrayMapConstant()),
+              &try_megamorphic);
+    HandlePolymorphicCase(receiver_map, feedback, &if_handler, &var_handler,
+                          &miss, 2);
+  }
+
+  Bind(&try_megamorphic);
+  {
+    // Check megamorphic case.
+    Comment("KeyedLoadIC_try_megamorphic");
+    GotoIfNot(WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+              &try_polymorphic_name);
+    // TODO(jkummerow): Inline this? Or some of it?
+    TailCallStub(CodeFactory::KeyedLoadIC_Megamorphic(isolate()), p->context,
+                 p->receiver, p->name, p->slot, p->vector);
+  }
+  Bind(&try_polymorphic_name);
+  {
+    // We might have a name in feedback, and a fixed array in the next slot.
+    Comment("KeyedLoadIC_try_polymorphic_name");
+    GotoIfNot(WordEqual(feedback, p->name), &miss);
+    // If the name comparison succeeded, we know we have a fixed array with
+    // at least one map/handler pair.
+    Node* offset = ElementOffsetFromIndex(
+        p->slot, FAST_HOLEY_ELEMENTS, SMI_PARAMETERS,
+        FixedArray::kHeaderSize + kPointerSize - kHeapObjectTag);
+    Node* array = Load(MachineType::AnyTagged(), p->vector, offset);
+    HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler, &miss,
+                          1);
+  }
+  Bind(&miss);
+  {
+    Comment("KeyedLoadIC_miss");
+    TailCallRuntime(Runtime::kKeyedLoadIC_Miss, p->context, p->receiver,
+                    p->name, p->slot, p->vector);
+  }
+}
+
+void AccessorAssembler::KeyedLoadICGeneric(const LoadICParameters* p) {
+  Variable var_index(this, MachineType::PointerRepresentation());
+  Variable var_unique(this, MachineRepresentation::kTagged);
+  var_unique.Bind(p->name);  // Dummy initialization.
+  Label if_index(this), if_unique_name(this), slow(this);
+
+  Node* receiver = p->receiver;
+  GotoIf(TaggedIsSmi(receiver), &slow);
+  Node* receiver_map = LoadMap(receiver);
+  Node* instance_type = LoadMapInstanceType(receiver_map);
+
+  TryToName(p->name, &if_index, &var_index, &if_unique_name, &var_unique,
+            &slow);
+
+  Bind(&if_index);
+  {
+    GenericElementLoad(receiver, receiver_map, instance_type, var_index.value(),
+                       &slow);
+  }
+
+  Bind(&if_unique_name);
+  {
+    GenericPropertyLoad(receiver, receiver_map, instance_type,
+                        var_unique.value(), p, &slow);
+  }
+
+  Bind(&slow);
+  {
+    Comment("KeyedLoadGeneric_slow");
+    IncrementCounter(isolate()->counters()->ic_keyed_load_generic_slow(), 1);
+    // TODO(jkummerow): Should we use the GetProperty TF stub instead?
+    TailCallRuntime(Runtime::kKeyedGetProperty, p->context, p->receiver,
+                    p->name);
+  }
+}
+
+void AccessorAssembler::StoreIC(const StoreICParameters* p) {
+  Variable var_handler(this, MachineRepresentation::kTagged);
+  // TODO(ishell): defer blocks when it works.
+  Label if_handler(this, &var_handler), try_polymorphic(this),
+      try_megamorphic(this /*, Label::kDeferred*/),
+      miss(this /*, Label::kDeferred*/);
+
+  Node* receiver_map = LoadReceiverMap(p->receiver);
+  GotoIf(IsSetWord32<Map::Deprecated>(LoadMapBitField3(receiver_map)), &miss);
+
+  // Check monomorphic case.
+  Node* feedback =
+      TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+                         &var_handler, &try_polymorphic);
+  Bind(&if_handler);
+  {
+    Comment("StoreIC_if_handler");
+    HandleStoreICHandlerCase(p, var_handler.value(), &miss);
+  }
+
+  Bind(&try_polymorphic);
+  {
+    // Check polymorphic case.
+    Comment("StoreIC_try_polymorphic");
+    GotoIfNot(
+        WordEqual(LoadMap(feedback), LoadRoot(Heap::kFixedArrayMapRootIndex)),
+        &try_megamorphic);
+    HandlePolymorphicCase(receiver_map, feedback, &if_handler, &var_handler,
+                          &miss, 2);
+  }
+
+  Bind(&try_megamorphic);
+  {
+    // Check megamorphic case.
+    GotoIfNot(WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+              &miss);
+
+    TryProbeStubCache(isolate()->store_stub_cache(), p->receiver, p->name,
+                      &if_handler, &var_handler, &miss);
+  }
+  Bind(&miss);
+  {
+    TailCallRuntime(Runtime::kStoreIC_Miss, p->context, p->value, p->slot,
+                    p->vector, p->receiver, p->name);
+  }
+}
+
+void AccessorAssembler::KeyedStoreIC(const StoreICParameters* p,
+                                     LanguageMode language_mode) {
+  // TODO(ishell): defer blocks when it works.
+  Label miss(this /*, Label::kDeferred*/);
+  {
+    Variable var_handler(this, MachineRepresentation::kTagged);
+
+    // TODO(ishell): defer blocks when it works.
+    Label if_handler(this, &var_handler), try_polymorphic(this),
+        try_megamorphic(this /*, Label::kDeferred*/),
+        try_polymorphic_name(this /*, Label::kDeferred*/);
+
+    Node* receiver_map = LoadReceiverMap(p->receiver);
+    GotoIf(IsSetWord32<Map::Deprecated>(LoadMapBitField3(receiver_map)), &miss);
+
+    // Check monomorphic case.
+    Node* feedback =
+        TryMonomorphicCase(p->slot, p->vector, receiver_map, &if_handler,
+                           &var_handler, &try_polymorphic);
+    Bind(&if_handler);
+    {
+      Comment("KeyedStoreIC_if_handler");
+      HandleStoreICHandlerCase(p, var_handler.value(), &miss, kSupportElements);
+    }
+
+    Bind(&try_polymorphic);
+    {
+      // CheckPolymorphic case.
+      Comment("KeyedStoreIC_try_polymorphic");
+      GotoIfNot(
+          WordEqual(LoadMap(feedback), LoadRoot(Heap::kFixedArrayMapRootIndex)),
+          &try_megamorphic);
+      Label if_transition_handler(this);
+      Variable var_transition_map_cell(this, MachineRepresentation::kTagged);
+      HandleKeyedStorePolymorphicCase(receiver_map, feedback, &if_handler,
+                                      &var_handler, &if_transition_handler,
+                                      &var_transition_map_cell, &miss);
+      Bind(&if_transition_handler);
+      Comment("KeyedStoreIC_polymorphic_transition");
+      {
+        Node* handler = var_handler.value();
+
+        Label call_handler(this);
+        Variable var_code_handler(this, MachineRepresentation::kTagged);
+        var_code_handler.Bind(handler);
+        GotoIfNot(IsTuple2Map(LoadMap(handler)), &call_handler);
+        {
+          CSA_ASSERT(this, IsTuple2Map(LoadMap(handler)));
+
+          // Check validity cell.
+          Node* validity_cell = LoadObjectField(handler, Tuple2::kValue1Offset);
+          Node* cell_value = LoadObjectField(validity_cell, Cell::kValueOffset);
+          GotoIf(
+              WordNotEqual(cell_value, SmiConstant(Map::kPrototypeChainValid)),
+              &miss);
+
+          var_code_handler.Bind(
+              LoadObjectField(handler, Tuple2::kValue2Offset));
+          Goto(&call_handler);
+        }
+
+        Bind(&call_handler);
+        {
+          Node* code_handler = var_code_handler.value();
+          CSA_ASSERT(this, IsCodeMap(LoadMap(code_handler)));
+
+          Node* transition_map =
+              LoadWeakCellValue(var_transition_map_cell.value(), &miss);
+          StoreTransitionDescriptor descriptor(isolate());
+          TailCallStub(descriptor, code_handler, p->context, p->receiver,
+                       p->name, transition_map, p->value, p->slot, p->vector);
+        }
+      }
+    }
+
+    Bind(&try_megamorphic);
+    {
+      // Check megamorphic case.
+      Comment("KeyedStoreIC_try_megamorphic");
+      GotoIfNot(
+          WordEqual(feedback, LoadRoot(Heap::kmegamorphic_symbolRootIndex)),
+          &try_polymorphic_name);
+      TailCallStub(
+          CodeFactory::KeyedStoreIC_Megamorphic(isolate(), language_mode),
+          p->context, p->receiver, p->name, p->value, p->slot, p->vector);
+    }
+
+    Bind(&try_polymorphic_name);
+    {
+      // We might have a name in feedback, and a fixed array in the next slot.
+      Comment("KeyedStoreIC_try_polymorphic_name");
+      GotoIfNot(WordEqual(feedback, p->name), &miss);
+      // If the name comparison succeeded, we know we have a FixedArray with
+      // at least one map/handler pair.
+      Node* offset = ElementOffsetFromIndex(
+          p->slot, FAST_HOLEY_ELEMENTS, SMI_PARAMETERS,
+          FixedArray::kHeaderSize + kPointerSize - kHeapObjectTag);
+      Node* array = Load(MachineType::AnyTagged(), p->vector, offset);
+      HandlePolymorphicCase(receiver_map, array, &if_handler, &var_handler,
+                            &miss, 1);
+    }
+  }
+  Bind(&miss);
+  {
+    Comment("KeyedStoreIC_miss");
+    TailCallRuntime(Runtime::kKeyedStoreIC_Miss, p->context, p->value, p->slot,
+                    p->vector, p->receiver, p->name);
+  }
+}
+
+//////////////////// Public methods.
+
+void AccessorAssembler::GenerateLoadIC() {
+  typedef LoadWithVectorDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* vector = Parameter(Descriptor::kVector);
+  Node* context = Parameter(Descriptor::kContext);
+
+  LoadICParameters p(context, receiver, name, slot, vector);
+  LoadIC(&p);
+}
+
+void AccessorAssembler::GenerateLoadICTrampoline() {
+  typedef LoadDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* context = Parameter(Descriptor::kContext);
+  Node* vector = LoadFeedbackVectorForStub();
+
+  LoadICParameters p(context, receiver, name, slot, vector);
+  LoadIC(&p);
+}
+
+void AccessorAssembler::GenerateLoadICProtoArray(
+    bool throw_reference_error_if_nonexistent) {
+  typedef LoadICProtoArrayDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* vector = Parameter(Descriptor::kVector);
+  Node* handler = Parameter(Descriptor::kHandler);
+  Node* context = Parameter(Descriptor::kContext);
+
+  LoadICParameters p(context, receiver, name, slot, vector);
+  LoadICProtoArray(&p, handler, throw_reference_error_if_nonexistent);
+}
+
+void AccessorAssembler::GenerateLoadField() {
+  typedef LoadFieldDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = nullptr;
+  Node* slot = nullptr;
+  Node* vector = nullptr;
+  Node* context = Parameter(Descriptor::kContext);
+  LoadICParameters p(context, receiver, name, slot, vector);
+
+  ExitPoint direct_exit(this);
+
+  HandleLoadICSmiHandlerCase(&p, receiver, Parameter(Descriptor::kSmiHandler),
+                             nullptr, &direct_exit, kOnlyProperties);
+}
+
+void AccessorAssembler::GenerateLoadGlobalIC(TypeofMode typeof_mode) {
+  typedef LoadGlobalWithVectorDescriptor Descriptor;
+
+  Node* name = Parameter(Descriptor::kName);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* vector = Parameter(Descriptor::kVector);
+  Node* context = Parameter(Descriptor::kContext);
+
+  LoadICParameters p(context, nullptr, name, slot, vector);
+  LoadGlobalIC(&p, typeof_mode);
+}
+
+void AccessorAssembler::GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode) {
+  typedef LoadGlobalDescriptor Descriptor;
+
+  Node* name = Parameter(Descriptor::kName);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* context = Parameter(Descriptor::kContext);
+  Node* vector = LoadFeedbackVectorForStub();
+
+  LoadICParameters p(context, nullptr, name, slot, vector);
+  LoadGlobalIC(&p, typeof_mode);
+}
+
+void AccessorAssembler::GenerateKeyedLoadIC() {
+  typedef LoadWithVectorDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* vector = Parameter(Descriptor::kVector);
+  Node* context = Parameter(Descriptor::kContext);
+
+  LoadICParameters p(context, receiver, name, slot, vector);
+  KeyedLoadIC(&p);
+}
+
+void AccessorAssembler::GenerateKeyedLoadICTrampoline() {
+  typedef LoadDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* context = Parameter(Descriptor::kContext);
+  Node* vector = LoadFeedbackVectorForStub();
+
+  LoadICParameters p(context, receiver, name, slot, vector);
+  KeyedLoadIC(&p);
+}
+
+void AccessorAssembler::GenerateKeyedLoadIC_Megamorphic() {
+  typedef LoadWithVectorDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* vector = Parameter(Descriptor::kVector);
+  Node* context = Parameter(Descriptor::kContext);
+
+  LoadICParameters p(context, receiver, name, slot, vector);
+  KeyedLoadICGeneric(&p);
+}
+
+void AccessorAssembler::GenerateStoreIC() {
+  typedef StoreWithVectorDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* value = Parameter(Descriptor::kValue);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* vector = Parameter(Descriptor::kVector);
+  Node* context = Parameter(Descriptor::kContext);
+
+  StoreICParameters p(context, receiver, name, value, slot, vector);
+  StoreIC(&p);
+}
+
+void AccessorAssembler::GenerateStoreICTrampoline() {
+  typedef StoreDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* value = Parameter(Descriptor::kValue);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* context = Parameter(Descriptor::kContext);
+  Node* vector = LoadFeedbackVectorForStub();
+
+  StoreICParameters p(context, receiver, name, value, slot, vector);
+  StoreIC(&p);
+}
+
+void AccessorAssembler::GenerateKeyedStoreIC(LanguageMode language_mode) {
+  typedef StoreWithVectorDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* value = Parameter(Descriptor::kValue);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* vector = Parameter(Descriptor::kVector);
+  Node* context = Parameter(Descriptor::kContext);
+
+  StoreICParameters p(context, receiver, name, value, slot, vector);
+  KeyedStoreIC(&p, language_mode);
+}
+
+void AccessorAssembler::GenerateKeyedStoreICTrampoline(
+    LanguageMode language_mode) {
+  typedef StoreDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* value = Parameter(Descriptor::kValue);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* context = Parameter(Descriptor::kContext);
+  Node* vector = LoadFeedbackVectorForStub();
+
+  StoreICParameters p(context, receiver, name, value, slot, vector);
+  KeyedStoreIC(&p, language_mode);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/ic/accessor-assembler.h b/src/ic/accessor-assembler.h
new file mode 100644
index 0000000..9bc2873
--- /dev/null
+++ b/src/ic/accessor-assembler.h
@@ -0,0 +1,284 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SRC_IC_ACCESSOR_ASSEMBLER_H_
+#define V8_SRC_IC_ACCESSOR_ASSEMBLER_H_
+
+#include "src/code-stub-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+class CodeAssemblerState;
+}
+
+class ExitPoint;
+
+class AccessorAssembler : public CodeStubAssembler {
+ public:
+  typedef compiler::Node Node;
+
+  explicit AccessorAssembler(compiler::CodeAssemblerState* state)
+      : CodeStubAssembler(state) {}
+
+  void GenerateLoadIC();
+  void GenerateLoadField();
+  void GenerateLoadICTrampoline();
+  void GenerateKeyedLoadIC();
+  void GenerateKeyedLoadICTrampoline();
+  void GenerateKeyedLoadIC_Megamorphic();
+  void GenerateStoreIC();
+  void GenerateStoreICTrampoline();
+
+  void GenerateLoadICProtoArray(bool throw_reference_error_if_nonexistent);
+
+  void GenerateLoadGlobalIC(TypeofMode typeof_mode);
+  void GenerateLoadGlobalICTrampoline(TypeofMode typeof_mode);
+
+  void GenerateKeyedStoreIC(LanguageMode language_mode);
+  void GenerateKeyedStoreICTrampoline(LanguageMode language_mode);
+
+  void TryProbeStubCache(StubCache* stub_cache, Node* receiver, Node* name,
+                         Label* if_handler, Variable* var_handler,
+                         Label* if_miss);
+
+  Node* StubCachePrimaryOffsetForTesting(Node* name, Node* map) {
+    return StubCachePrimaryOffset(name, map);
+  }
+  Node* StubCacheSecondaryOffsetForTesting(Node* name, Node* map) {
+    return StubCacheSecondaryOffset(name, map);
+  }
+
+  struct LoadICParameters {
+    LoadICParameters(Node* context, Node* receiver, Node* name, Node* slot,
+                     Node* vector)
+        : context(context),
+          receiver(receiver),
+          name(name),
+          slot(slot),
+          vector(vector) {}
+
+    Node* context;
+    Node* receiver;
+    Node* name;
+    Node* slot;
+    Node* vector;
+  };
+
+  void LoadGlobalIC_TryPropertyCellCase(
+      Node* vector, Node* slot, ExitPoint* exit_point, Label* try_handler,
+      Label* miss, ParameterMode slot_mode = SMI_PARAMETERS);
+  void LoadGlobalIC_TryHandlerCase(const LoadICParameters* p,
+                                   TypeofMode typeof_mode,
+                                   ExitPoint* exit_point, Label* miss);
+  void LoadGlobalIC_MissCase(const LoadICParameters* p, ExitPoint* exit_point);
+
+ protected:
+  struct StoreICParameters : public LoadICParameters {
+    StoreICParameters(Node* context, Node* receiver, Node* name, Node* value,
+                      Node* slot, Node* vector)
+        : LoadICParameters(context, receiver, name, slot, vector),
+          value(value) {}
+    Node* value;
+  };
+
+  enum ElementSupport { kOnlyProperties, kSupportElements };
+  void HandleStoreICHandlerCase(
+      const StoreICParameters* p, Node* handler, Label* miss,
+      ElementSupport support_elements = kOnlyProperties);
+
+ private:
+  // Stub generation entry points.
+
+  void LoadIC(const LoadICParameters* p);
+  void LoadICProtoArray(const LoadICParameters* p, Node* handler,
+                        bool throw_reference_error_if_nonexistent);
+  void LoadGlobalIC(const LoadICParameters* p, TypeofMode typeof_mode);
+  void KeyedLoadIC(const LoadICParameters* p);
+  void KeyedLoadICGeneric(const LoadICParameters* p);
+  void StoreIC(const StoreICParameters* p);
+  void KeyedStoreIC(const StoreICParameters* p, LanguageMode language_mode);
+
+  // IC dispatcher behavior.
+
+  // Checks monomorphic case. Returns {feedback} entry of the vector.
+  Node* TryMonomorphicCase(Node* slot, Node* vector, Node* receiver_map,
+                           Label* if_handler, Variable* var_handler,
+                           Label* if_miss);
+  void HandlePolymorphicCase(Node* receiver_map, Node* feedback,
+                             Label* if_handler, Variable* var_handler,
+                             Label* if_miss, int unroll_count);
+  void HandleKeyedStorePolymorphicCase(Node* receiver_map, Node* feedback,
+                                       Label* if_handler, Variable* var_handler,
+                                       Label* if_transition_handler,
+                                       Variable* var_transition_map_cell,
+                                       Label* if_miss);
+
+  // LoadIC implementation.
+
+  void HandleLoadICHandlerCase(
+      const LoadICParameters* p, Node* handler, Label* miss,
+      ElementSupport support_elements = kOnlyProperties);
+
+  void HandleLoadICSmiHandlerCase(const LoadICParameters* p, Node* holder,
+                                  Node* smi_handler, Label* miss,
+                                  ExitPoint* exit_point,
+                                  ElementSupport support_elements);
+
+  void HandleLoadICProtoHandlerCase(const LoadICParameters* p, Node* handler,
+                                    Variable* var_holder,
+                                    Variable* var_smi_handler,
+                                    Label* if_smi_handler, Label* miss,
+                                    ExitPoint* exit_point,
+                                    bool throw_reference_error_if_nonexistent);
+
+  Node* EmitLoadICProtoArrayCheck(const LoadICParameters* p, Node* handler,
+                                  Node* handler_length, Node* handler_flags,
+                                  Label* miss,
+                                  bool throw_reference_error_if_nonexistent);
+
+  // LoadGlobalIC implementation.
+
+  void HandleLoadGlobalICHandlerCase(const LoadICParameters* p, Node* handler,
+                                     Label* miss, ExitPoint* exit_point,
+                                     bool throw_reference_error_if_nonexistent);
+
+  // StoreIC implementation.
+
+  void HandleStoreICElementHandlerCase(const StoreICParameters* p,
+                                       Node* handler, Label* miss);
+
+  void HandleStoreICProtoHandler(const StoreICParameters* p, Node* handler,
+                                 Label* miss);
+  // If |transition| is nullptr then the normal field store is generated or
+  // transitioning store otherwise.
+  void HandleStoreICSmiHandlerCase(Node* handler_word, Node* holder,
+                                   Node* value, Node* transition, Label* miss);
+  // If |transition| is nullptr then the normal field store is generated or
+  // transitioning store otherwise.
+  void HandleStoreFieldAndReturn(Node* handler_word, Node* holder,
+                                 Representation representation, Node* value,
+                                 Node* transition, Label* miss);
+
+  // KeyedLoadIC_Generic implementation.
+
+  void GenericElementLoad(Node* receiver, Node* receiver_map,
+                          Node* instance_type, Node* index, Label* slow);
+
+  void GenericPropertyLoad(Node* receiver, Node* receiver_map,
+                           Node* instance_type, Node* key,
+                           const LoadICParameters* p, Label* slow);
+
+  // Low-level helpers.
+
+  Node* PrepareValueForStore(Node* handler_word, Node* holder,
+                             Representation representation, Node* transition,
+                             Node* value, Label* bailout);
+
+  // Extends properties backing store by JSObject::kFieldsAdded elements.
+  void ExtendPropertiesBackingStore(Node* object);
+
+  void StoreNamedField(Node* handler_word, Node* object, bool is_inobject,
+                       Representation representation, Node* value,
+                       bool transition_to_field, Label* bailout);
+
+  void EmitFastElementsBoundsCheck(Node* object, Node* elements,
+                                   Node* intptr_index,
+                                   Node* is_jsarray_condition, Label* miss);
+  void EmitElementLoad(Node* object, Node* elements, Node* elements_kind,
+                       Node* key, Node* is_jsarray_condition, Label* if_hole,
+                       Label* rebox_double, Variable* var_double_value,
+                       Label* unimplemented_elements_kind, Label* out_of_bounds,
+                       Label* miss, ExitPoint* exit_point);
+  void CheckPrototype(Node* prototype_cell, Node* name, Label* miss);
+  void NameDictionaryNegativeLookup(Node* object, Node* name, Label* miss);
+
+  // Stub cache access helpers.
+
+  // This enum is used here as a replacement for StubCache::Table to avoid
+  // including stub cache header.
+  enum StubCacheTable : int;
+
+  Node* StubCachePrimaryOffset(Node* name, Node* map);
+  Node* StubCacheSecondaryOffset(Node* name, Node* seed);
+
+  void TryProbeStubCacheTable(StubCache* stub_cache, StubCacheTable table_id,
+                              Node* entry_offset, Node* name, Node* map,
+                              Label* if_handler, Variable* var_handler,
+                              Label* if_miss);
+};
+
+// Abstraction over direct and indirect exit points. Direct exits correspond to
+// tailcalls and Return, while indirect exits store the result in a variable
+// and then jump to an exit label.
+class ExitPoint {
+ private:
+  typedef compiler::Node Node;
+  typedef compiler::CodeAssemblerLabel CodeAssemblerLabel;
+  typedef compiler::CodeAssemblerVariable CodeAssemblerVariable;
+
+ public:
+  explicit ExitPoint(CodeStubAssembler* assembler)
+      : ExitPoint(assembler, nullptr, nullptr) {}
+  ExitPoint(CodeStubAssembler* assembler, CodeAssemblerLabel* out,
+            CodeAssemblerVariable* var_result)
+      : out_(out), var_result_(var_result), asm_(assembler) {
+    DCHECK_EQ(out != nullptr, var_result != nullptr);
+  }
+
+  template <class... TArgs>
+  void ReturnCallRuntime(Runtime::FunctionId function, Node* context,
+                         TArgs... args) {
+    if (IsDirect()) {
+      asm_->TailCallRuntime(function, context, args...);
+    } else {
+      IndirectReturn(asm_->CallRuntime(function, context, args...));
+    }
+  }
+
+  template <class... TArgs>
+  void ReturnCallStub(Callable const& callable, Node* context, TArgs... args) {
+    if (IsDirect()) {
+      asm_->TailCallStub(callable, context, args...);
+    } else {
+      IndirectReturn(asm_->CallStub(callable, context, args...));
+    }
+  }
+
+  template <class... TArgs>
+  void ReturnCallStub(const CallInterfaceDescriptor& descriptor, Node* target,
+                      Node* context, TArgs... args) {
+    if (IsDirect()) {
+      asm_->TailCallStub(descriptor, target, context, args...);
+    } else {
+      IndirectReturn(asm_->CallStub(descriptor, target, context, args...));
+    }
+  }
+
+  void Return(Node* const result) {
+    if (IsDirect()) {
+      asm_->Return(result);
+    } else {
+      IndirectReturn(result);
+    }
+  }
+
+  bool IsDirect() const { return out_ == nullptr; }
+
+ private:
+  void IndirectReturn(Node* const result) {
+    var_result_->Bind(result);
+    asm_->Goto(out_);
+  }
+
+  CodeAssemblerLabel* const out_;
+  CodeAssemblerVariable* const var_result_;
+  CodeStubAssembler* const asm_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_SRC_IC_ACCESSOR_ASSEMBLER_H_
diff --git a/src/ic/arm/handler-compiler-arm.cc b/src/ic/arm/handler-compiler-arm.cc
index 6145d43..ebef63c 100644
--- a/src/ic/arm/handler-compiler-arm.cc
+++ b/src/ic/arm/handler-compiler-arm.cc
@@ -135,14 +135,6 @@
   __ add(sp, sp, Operand(2 * kPointerSize));
 }
 
-void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
-  // No-op. Return address is in lr register.
-}
-
-void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
-  // No-op. Return address is in lr register.
-}
-
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
     Handle<Name> name, Register scratch0, Register scratch1) {
@@ -189,27 +181,6 @@
   __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
 }
 
-
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register result, Label* miss) {
-  __ LoadNativeContextSlot(index, result);
-  // Load its initial map. The global functions all have initial maps.
-  __ ldr(result,
-         FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
-  // Load the prototype from the initial map.
-  __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
-    MacroAssembler* masm, Register receiver, Register scratch1,
-    Register scratch2, Label* miss_label) {
-  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
-  __ mov(r0, scratch1);
-  __ Ret();
-}
-
-
 // Generate code to check that a global property cell is empty. Create
 // the property cell at compilation time if no cell exists for the
 // property.
@@ -228,10 +199,12 @@
   __ b(ne, miss);
 }
 
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm, Register receiver, Register holder, Register name,
+    Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+  DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+         Runtime::FunctionForId(id)->nargs);
 
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
-                                     Register holder, Register name,
-                                     Handle<JSObject> holder_obj) {
   STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
   STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
   STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
@@ -239,15 +212,7 @@
   __ push(name);
   __ push(receiver);
   __ push(holder);
-}
 
-
-static void CompileCallLoadPropertyWithInterceptor(
-    MacroAssembler* masm, Register receiver, Register holder, Register name,
-    Handle<JSObject> holder_obj, Runtime::FunctionId id) {
-  DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
-         Runtime::FunctionForId(id)->nargs);
-  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
   __ CallRuntime(id);
 }
 
@@ -355,58 +320,6 @@
   }
 }
 
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
-  __ mov(this->name(), Operand(name));
-}
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
-                                                   Register map_reg,
-                                                   Register scratch,
-                                                   Label* miss) {
-  Handle<WeakCell> cell = Map::WeakCellForMap(transition);
-  DCHECK(!map_reg.is(scratch));
-  __ LoadWeakValue(map_reg, cell, miss);
-  if (transition->CanBeDeprecated()) {
-    __ ldr(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
-    __ tst(scratch, Operand(Map::Deprecated::kMask));
-    __ b(ne, miss);
-  }
-}
-
-
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
-                                                      int descriptor,
-                                                      Register value_reg,
-                                                      Register scratch,
-                                                      Label* miss_label) {
-  DCHECK(!map_reg.is(scratch));
-  DCHECK(!map_reg.is(value_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ LoadInstanceDescriptors(map_reg, scratch);
-  __ ldr(scratch,
-         FieldMemOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
-  __ cmp(value_reg, scratch);
-  __ b(ne, miss_label);
-}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
-                                                        Register value_reg,
-                                                        Label* miss_label) {
-  Register map_reg = scratch1();
-  Register scratch = scratch2();
-  DCHECK(!value_reg.is(map_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ JumpIfSmi(value_reg, miss_label);
-  if (field_type->IsClass()) {
-    __ ldr(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
-    __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
-                    scratch);
-    __ b(ne, miss_label);
-  }
-}
-
 void PropertyHandlerCompiler::GenerateAccessCheck(
     Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
     Label* miss, bool compare_native_contexts_only) {
@@ -538,13 +451,6 @@
   }
 }
 
-
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
-  // Return the constant value.
-  __ Move(r0, value);
-  __ Ret();
-}
-
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
@@ -609,8 +515,18 @@
   // Call the runtime system to load the interceptor.
   DCHECK(holder()->HasNamedInterceptor());
   DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
-  PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
-                           holder());
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ Push(name(), receiver(), holder_reg);
+  // See NamedLoadHandlerCompiler::InterceptorVectorSlotPop() for details.
+  if (holder_reg.is(receiver())) {
+    __ Push(slot(), vector());
+  } else {
+    __ Push(scratch3(), scratch2());  // slot, vector
+  }
 
   __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
 }
diff --git a/src/ic/arm/ic-arm.cc b/src/ic/arm/ic-arm.cc
index babf497..b749027 100644
--- a/src/ic/arm/ic-arm.cc
+++ b/src/ic/arm/ic-arm.cc
@@ -6,530 +6,12 @@
 
 #include "src/codegen.h"
 #include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
 #include "src/ic/stub-cache.h"
 
 namespace v8 {
 namespace internal {
 
 
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-// Helper function used from LoadIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// result:   Register for the result. It is only updated if a jump to the miss
-//           label is not done. Can be the same as elements or name clobbering
-//           one of these in the case of not jumping to the miss label.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
-                                   Register elements, Register name,
-                                   Register result, Register scratch1,
-                                   Register scratch2) {
-  // Main use of the scratch registers.
-  // scratch1: Used as temporary and to hold the capacity of the property
-  //           dictionary.
-  // scratch2: Used as temporary.
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
-                                                   name, scratch1, scratch2);
-
-  // If probing finds an entry check that the value is a normal
-  // property.
-  __ bind(&done);  // scratch2 == elements + 4 * index
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ tst(scratch1, Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
-  __ b(ne, miss);
-
-  // Get the value at the masked, scaled index and return.
-  __ ldr(result,
-         FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// value:    The value to store.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
-                                    Register elements, Register name,
-                                    Register value, Register scratch1,
-                                    Register scratch2) {
-  // Main use of the scratch registers.
-  // scratch1: Used as temporary and to hold the capacity of the property
-  //           dictionary.
-  // scratch2: Used as temporary.
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
-                                                   name, scratch1, scratch2);
-
-  // If probing finds an entry in the dictionary check that the value
-  // is a normal property that is not read only.
-  __ bind(&done);  // scratch2 == elements + 4 * index
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  const int kTypeAndReadOnlyMask =
-      (PropertyDetails::TypeField::kMask |
-       PropertyDetails::AttributesField::encode(READ_ONLY))
-      << kSmiTagSize;
-  __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
-  __ b(ne, miss);
-
-  // Store the value at the masked, scaled index and return.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
-  __ str(value, MemOperand(scratch2));
-
-  // Update the write barrier. Make sure not to clobber the value.
-  __ mov(scratch1, value);
-  __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
-                 kDontSaveFPRegs);
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  Register dictionary = r0;
-  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
-
-  Label slow;
-
-  __ ldr(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
-                                     JSObject::kPropertiesOffset));
-  GenerateDictionaryLoad(masm, &slow, dictionary,
-                         LoadDescriptor::NameRegister(), r0, r3, r4);
-  __ Ret();
-
-  // Dictionary load failed, go slow (but don't miss).
-  __ bind(&slow);
-  GenerateRuntimeGetProperty(masm);
-}
-
-
-// A register that isn't one of the parameters to the load ic.
-static const Register LoadIC_TempRegister() { return r3; }
-
-
-static void LoadIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-  Register slot = LoadDescriptor::SlotRegister();
-  Register vector = LoadWithVectorDescriptor::VectorRegister();
-
-  __ Push(receiver, name, slot, vector);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is in lr.
-  Isolate* isolate = masm->isolate();
-
-  DCHECK(!AreAliased(r4, r5, LoadWithVectorDescriptor::SlotRegister(),
-                     LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, r4, r5);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kLoadIC_Miss);
-}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is in lr.
-
-  __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
-  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kGetProperty);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is in lr.
-  Isolate* isolate = masm->isolate();
-
-  DCHECK(!AreAliased(r4, r5, LoadWithVectorDescriptor::SlotRegister(),
-                     LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, r4, r5);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
-}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is in lr.
-
-  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
-
-  // Perform tail call to the entry.
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedGetProperty);
-}
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
-  __ Push(StoreWithVectorDescriptor::ValueRegister(),
-          StoreWithVectorDescriptor::SlotRegister(),
-          StoreWithVectorDescriptor::VectorRegister(),
-          StoreWithVectorDescriptor::ReceiverRegister(),
-          StoreWithVectorDescriptor::NameRegister());
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
-}
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  // The slow case calls into the runtime to complete the store without causing
-  // an IC miss that would otherwise cause a transition to the generic stub.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-static void KeyedStoreGenerateMegamorphicHelper(
-    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
-    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
-    Register value, Register key, Register receiver, Register receiver_map,
-    Register elements_map, Register elements) {
-  Label transition_smi_elements;
-  Label finish_object_store, non_double_value, transition_double_elements;
-  Label fast_double_without_map_check;
-
-  // Fast case: Do the store, could be either Object or double.
-  __ bind(fast_object);
-  Register scratch = r4;
-  Register address = r5;
-  DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
-                     scratch, address));
-
-  if (check_map == kCheckMap) {
-    __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-    __ cmp(elements_map,
-           Operand(masm->isolate()->factory()->fixed_array_map()));
-    __ b(ne, fast_double);
-  }
-
-  // HOLECHECK: guards "A[i] = V"
-  // We have to go to the runtime if the current value is the hole because
-  // there may be a callback on the element
-  Label holecheck_passed1;
-  __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ ldr(scratch, MemOperand::PointerAddressFromSmiKey(address, key, PreIndex));
-  __ cmp(scratch, Operand(masm->isolate()->factory()->the_hole_value()));
-  __ b(ne, &holecheck_passed1);
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
-  __ bind(&holecheck_passed1);
-
-  // Smi stores don't require further checks.
-  Label non_smi_value;
-  __ JumpIfNotSmi(value, &non_smi_value);
-
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ add(scratch, key, Operand(Smi::FromInt(1)));
-    __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  // It's irrelevant whether array is smi-only or not when writing a smi.
-  __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ str(value, MemOperand::PointerAddressFromSmiKey(address, key));
-  __ Ret();
-
-  __ bind(&non_smi_value);
-  // Escape to elements kind transition case.
-  __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
-
-  // Fast elements array, store the value to the elements backing store.
-  __ bind(&finish_object_store);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ add(scratch, key, Operand(Smi::FromInt(1)));
-    __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ add(address, address, Operand::PointerOffsetFromSmiKey(key));
-  __ str(value, MemOperand(address));
-  // Update write barrier for the elements array address.
-  __ mov(scratch, value);  // Preserve the value which is returned.
-  __ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved,
-                 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-  __ Ret();
-
-  __ bind(fast_double);
-  if (check_map == kCheckMap) {
-    // Check for fast double array case. If this fails, call through to the
-    // runtime.
-    __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
-    __ b(ne, slow);
-  }
-
-  // HOLECHECK: guards "A[i] double hole?"
-  // We have to see if the double version of the hole is present. If so
-  // go to the runtime.
-  __ add(address, elements,
-         Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)) -
-                 kHeapObjectTag));
-  __ ldr(scratch, MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex));
-  __ cmp(scratch, Operand(kHoleNanUpper32));
-  __ b(ne, &fast_double_without_map_check);
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
-  __ bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(value, key, elements, scratch, d0,
-                                 &transition_double_elements);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ add(scratch, key, Operand(Smi::FromInt(1)));
-    __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  __ Ret();
-
-  __ bind(&transition_smi_elements);
-  // Transition the array appropriately depending on the value type.
-  __ ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
-  __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
-  __ b(ne, &non_double_value);
-
-  // Value is a double. Transition FAST_SMI_ELEMENTS ->
-  // FAST_DOUBLE_ELEMENTS and complete the store.
-  __ LoadTransitionedArrayMapConditional(
-      FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
-  AllocationSiteMode mode =
-      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
-  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
-                                                   receiver_map, mode, slow);
-  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&fast_double_without_map_check);
-
-  __ bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
-                                         receiver_map, scratch, slow);
-  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-      masm, receiver, key, value, receiver_map, mode, slow);
-  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-
-  __ bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
-                                         receiver_map, scratch, slow);
-  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateDoubleToObject(
-      masm, receiver, key, value, receiver_map, mode, slow);
-  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
-                                       LanguageMode language_mode) {
-  // ---------- S t a t e --------------
-  //  -- r0     : value
-  //  -- r1     : key
-  //  -- r2     : receiver
-  //  -- lr     : return address
-  // -----------------------------------
-  Label slow, fast_object, fast_object_grow;
-  Label fast_double, fast_double_grow;
-  Label array, extra, check_if_double_array, maybe_name_key, miss;
-
-  // Register usage.
-  Register value = StoreDescriptor::ValueRegister();
-  Register key = StoreDescriptor::NameRegister();
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  DCHECK(receiver.is(r1));
-  DCHECK(key.is(r2));
-  DCHECK(value.is(r0));
-  Register receiver_map = r3;
-  Register elements_map = r6;
-  Register elements = r9;  // Elements array of the receiver.
-  // r4 and r5 are used as general scratch registers.
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &maybe_name_key);
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, &slow);
-  // Get the map of the object.
-  __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks.
-  // The generic stub does not perform map checks.
-  __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
-  __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
-  __ b(ne, &slow);
-  // Check if the object is a JS array or not.
-  __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
-  __ cmp(r4, Operand(JS_ARRAY_TYPE));
-  __ b(eq, &array);
-  // Check that the object is some kind of JS object EXCEPT JS Value type. In
-  // the case that the object is a value-wrapper object, we enter the runtime
-  // system to make sure that indexing into string objects works as intended.
-  STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
-  __ cmp(r4, Operand(JS_OBJECT_TYPE));
-  __ b(lo, &slow);
-
-  // Object case: Check key against length in the elements array.
-  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  // Check array bounds. Both the key and the length of FixedArray are smis.
-  __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ cmp(key, Operand(ip));
-  __ b(lo, &fast_object);
-
-  // Slow case, handle jump to runtime.
-  __ bind(&slow);
-  // Entry registers are intact.
-  // r0: value.
-  // r1: key.
-  // r2: receiver.
-  PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
-  // Never returns to here.
-
-  __ bind(&maybe_name_key);
-  __ ldr(r4, FieldMemOperand(key, HeapObject::kMapOffset));
-  __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
-  __ JumpIfNotUniqueNameInstanceType(r4, &slow);
-
-  // We use register r8, because otherwise probing the megamorphic stub cache
-  // would require pushing temporaries on the stack.
-  // TODO(mvstanton): quit using register r8 when
-  // FLAG_enable_embedded_constant_pool is turned on.
-  DCHECK(!FLAG_enable_embedded_constant_pool);
-  Register temporary2 = r8;
-  // The handlers in the stub cache expect a vector and slot. Since we won't
-  // change the IC from any downstream misses, a dummy vector can be used.
-  Register vector = StoreWithVectorDescriptor::VectorRegister();
-  Register slot = StoreWithVectorDescriptor::SlotRegister();
-
-  DCHECK(!AreAliased(vector, slot, r5, temporary2, r6, r9));
-  Handle<TypeFeedbackVector> dummy_vector =
-      TypeFeedbackVector::DummyVector(masm->isolate());
-  int slot_index = dummy_vector->GetIndex(
-      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
-  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
-  __ mov(slot, Operand(Smi::FromInt(slot_index)));
-
-  masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, r5,
-                                                     temporary2, r6, r9);
-  // Cache miss.
-  __ b(&miss);
-
-  // Extra capacity case: Check if there is extra capacity to
-  // perform the store and update the length. Used for adding one
-  // element to the array by writing to array[array.length].
-  __ bind(&extra);
-  // Condition code from comparing key and array length is still available.
-  __ b(ne, &slow);  // Only support writing to writing to array[array.length].
-  // Check for room in the elements backing store.
-  // Both the key and the length of FixedArray are smis.
-  __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ cmp(key, Operand(ip));
-  __ b(hs, &slow);
-  __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
-  __ b(ne, &check_if_double_array);
-  __ jmp(&fast_object_grow);
-
-  __ bind(&check_if_double_array);
-  __ cmp(elements_map,
-         Operand(masm->isolate()->factory()->fixed_double_array_map()));
-  __ b(ne, &slow);
-  __ jmp(&fast_double_grow);
-
-  // Array case: Get the length and the elements array from the JS
-  // array. Check that the array is in fast mode (and writable); if it
-  // is the length is always a smi.
-  __ bind(&array);
-  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
-  // Check the key against the length in the array.
-  __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  __ cmp(key, Operand(ip));
-  __ b(hs, &extra);
-
-  KeyedStoreGenerateMegamorphicHelper(
-      masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
-      value, key, receiver, receiver_map, elements_map, elements);
-  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
-                                      &fast_double_grow, &slow, kDontCheckMap,
-                                      kIncrementLength, value, key, receiver,
-                                      receiver_map, elements_map, elements);
-
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kStoreIC_Miss);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
-  Label miss;
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  Register dictionary = r5;
-  DCHECK(receiver.is(r1));
-  DCHECK(name.is(r2));
-  DCHECK(value.is(r0));
-  DCHECK(StoreWithVectorDescriptor::VectorRegister().is(r3));
-  DCHECK(StoreWithVectorDescriptor::SlotRegister().is(r4));
-
-  __ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
-  GenerateDictionaryStore(masm, &miss, dictionary, name, value, r6, r9);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->ic_store_normal_hit(), 1, r6, r9);
-  __ Ret();
-
-  __ bind(&miss);
-  __ IncrementCounter(counters->ic_store_normal_miss(), 1, r6, r9);
-  GenerateMiss(masm);
-}
-
-
-#undef __
-
-
 Condition CompareIC::ComputeCondition(Token::Value op) {
   switch (op) {
     case Token::EQ_STRICT:
@@ -585,9 +67,7 @@
   }
 
   if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, cmp=%p, delta=%d\n",
-           static_cast<void*>(address),
-           static_cast<void*>(cmp_instruction_address), delta);
+    LOG(isolate, PatchIC(address, cmp_instruction_address, delta));
   }
 
   Address patch_address =
diff --git a/src/ic/arm/ic-compiler-arm.cc b/src/ic/arm/ic-compiler-arm.cc
deleted file mode 100644
index 3185231..0000000
--- a/src/ic/arm/ic-compiler-arm.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM
-
-#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyICCompiler::GenerateRuntimeSetProperty(
-    MacroAssembler* masm, LanguageMode language_mode) {
-  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
-          StoreDescriptor::ValueRegister());
-
-  __ mov(r0, Operand(Smi::FromInt(language_mode)));
-  __ Push(r0);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/ic/arm/stub-cache-arm.cc b/src/ic/arm/stub-cache-arm.cc
deleted file mode 100644
index b0f93e3..0000000
--- a/src/ic/arm/stub-cache-arm.cc
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM
-
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
-                       StubCache::Table table, Register receiver, Register name,
-                       // The offset is scaled by 4, based on
-                       // kCacheIndexShift, which is two bits
-                       Register offset, Register scratch, Register scratch2,
-                       Register offset_scratch) {
-  ExternalReference key_offset(stub_cache->key_reference(table));
-  ExternalReference value_offset(stub_cache->value_reference(table));
-  ExternalReference map_offset(stub_cache->map_reference(table));
-
-  uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
-  uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
-  uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
-
-  // Check the relative positions of the address fields.
-  DCHECK(value_off_addr > key_off_addr);
-  DCHECK((value_off_addr - key_off_addr) % 4 == 0);
-  DCHECK((value_off_addr - key_off_addr) < (256 * 4));
-  DCHECK(map_off_addr > key_off_addr);
-  DCHECK((map_off_addr - key_off_addr) % 4 == 0);
-  DCHECK((map_off_addr - key_off_addr) < (256 * 4));
-
-  Label miss;
-  Register base_addr = scratch;
-  scratch = no_reg;
-
-  // Multiply by 3 because there are 3 fields per entry (name, code, map).
-  __ add(offset_scratch, offset, Operand(offset, LSL, 1));
-
-  // Calculate the base address of the entry.
-  __ add(base_addr, offset_scratch, Operand(key_offset));
-
-  // Check that the key in the entry matches the name.
-  __ ldr(ip, MemOperand(base_addr, 0));
-  __ cmp(name, ip);
-  __ b(ne, &miss);
-
-  // Check the map matches.
-  __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
-  __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ cmp(ip, scratch2);
-  __ b(ne, &miss);
-
-  // Get the code entry from the cache.
-  Register code = scratch2;
-  scratch2 = no_reg;
-  __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr));
-
-#ifdef DEBUG
-  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-    __ jmp(&miss);
-  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-    __ jmp(&miss);
-  }
-#endif
-
-  // Jump to the first instruction in the code stub.
-  __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag));
-
-  // Miss: fall through.
-  __ bind(&miss);
-}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
-                              Register name, Register scratch, Register extra,
-                              Register extra2, Register extra3) {
-  Label miss;
-
-  // Make sure that code is valid. The multiplying code relies on the
-  // entry size being 12.
-  DCHECK(sizeof(Entry) == 12);
-
-  // Make sure that there are no register conflicts.
-  DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
-
-  // Check scratch, extra and extra2 registers are valid.
-  DCHECK(!scratch.is(no_reg));
-  DCHECK(!extra.is(no_reg));
-  DCHECK(!extra2.is(no_reg));
-  DCHECK(!extra3.is(no_reg));
-
-#ifdef DEBUG
-  // If vector-based ics are in use, ensure that scratch, extra, extra2 and
-  // extra3 don't conflict with the vector and slot registers, which need
-  // to be preserved for a handler call or miss.
-  if (IC::ICUseVector(ic_kind_)) {
-    Register vector, slot;
-    if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
-      vector = StoreWithVectorDescriptor::VectorRegister();
-      slot = StoreWithVectorDescriptor::SlotRegister();
-    } else {
-      DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
-      vector = LoadWithVectorDescriptor::VectorRegister();
-      slot = LoadWithVectorDescriptor::SlotRegister();
-    }
-    DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
-  }
-#endif
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
-                      extra3);
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, &miss);
-
-  // Get the map of the receiver and compute the hash.
-  __ ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
-  __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ add(scratch, scratch, Operand(ip));
-  __ eor(scratch, scratch, Operand(kPrimaryMagic));
-  __ mov(ip, Operand(kPrimaryTableSize - 1));
-  __ and_(scratch, scratch, Operand(ip, LSL, kCacheIndexShift));
-
-  // Probe the primary table.
-  ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
-             extra3);
-
-  // Primary miss: Compute hash for secondary probe.
-  __ sub(scratch, scratch, Operand(name));
-  __ add(scratch, scratch, Operand(kSecondaryMagic));
-  __ mov(ip, Operand(kSecondaryTableSize - 1));
-  __ and_(scratch, scratch, Operand(ip, LSL, kCacheIndexShift));
-
-  // Probe the secondary table.
-  ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
-             extra3);
-
-  // Cache miss: Fall-through and let caller handle the miss by
-  // entering the runtime system.
-  __ bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
-                      extra3);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/ic/arm64/handler-compiler-arm64.cc b/src/ic/arm64/handler-compiler-arm64.cc
index 58d0bb7..b7dc589 100644
--- a/src/ic/arm64/handler-compiler-arm64.cc
+++ b/src/ic/arm64/handler-compiler-arm64.cc
@@ -44,14 +44,6 @@
   __ Drop(2);
 }
 
-void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
-  // No-op. Return address is in lr register.
-}
-
-void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
-  // No-op. Return address is in lr register.
-}
-
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
     Handle<Name> name, Register scratch0, Register scratch1) {
@@ -91,31 +83,6 @@
   __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
 }
 
-
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register result, Label* miss) {
-  __ LoadNativeContextSlot(index, result);
-  // Load its initial map. The global functions all have initial maps.
-  __ Ldr(result,
-         FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
-  // Load the prototype from the initial map.
-  __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
-    MacroAssembler* masm, Register receiver, Register scratch1,
-    Register scratch2, Label* miss_label) {
-  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
-  // TryGetFunctionPrototype can't put the result directly in x0 because the
-  // 3 inputs registers can't alias and we call this function from
-  // LoadIC::GenerateFunctionPrototype, where receiver is x0. So we explicitly
-  // move the result in x0.
-  __ Mov(x0, scratch1);
-  __ Ret();
-}
-
-
 // Generate code to check that a global property cell is empty. Create
 // the property cell at compilation time if no cell exists for the
 // property.
@@ -132,25 +99,18 @@
   __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, miss);
 }
 
-
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
-                                     Register holder, Register name,
-                                     Handle<JSObject> holder_obj) {
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
-
-  __ Push(name, receiver, holder);
-}
-
-
 static void CompileCallLoadPropertyWithInterceptor(
     MacroAssembler* masm, Register receiver, Register holder, Register name,
     Handle<JSObject> holder_obj, Runtime::FunctionId id) {
   DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
          Runtime::FunctionForId(id)->nargs);
-  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ Push(name, receiver, holder);
+
   __ CallRuntime(id);
 }
 
@@ -386,57 +346,6 @@
   }
 }
 
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
-  __ Mov(this->name(), Operand(name));
-}
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
-                                                   Register map_reg,
-                                                   Register scratch,
-                                                   Label* miss) {
-  Handle<WeakCell> cell = Map::WeakCellForMap(transition);
-  DCHECK(!map_reg.is(scratch));
-  __ LoadWeakValue(map_reg, cell, miss);
-  if (transition->CanBeDeprecated()) {
-    __ Ldrsw(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
-    __ TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, miss);
-  }
-}
-
-
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
-                                                      int descriptor,
-                                                      Register value_reg,
-                                                      Register scratch,
-                                                      Label* miss_label) {
-  DCHECK(!map_reg.is(scratch));
-  DCHECK(!map_reg.is(value_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ LoadInstanceDescriptors(map_reg, scratch);
-  __ Ldr(scratch,
-         FieldMemOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
-  __ Cmp(value_reg, scratch);
-  __ B(ne, miss_label);
-}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
-                                                        Register value_reg,
-                                                        Label* miss_label) {
-  Register map_reg = scratch1();
-  Register scratch = scratch2();
-  DCHECK(!value_reg.is(map_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ JumpIfSmi(value_reg, miss_label);
-  if (field_type->IsClass()) {
-    __ Ldr(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
-    __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
-                    scratch);
-    __ B(ne, miss_label);
-  }
-}
-
 void PropertyHandlerCompiler::GenerateAccessCheck(
     Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
     Label* miss, bool compare_native_contexts_only) {
@@ -572,13 +481,6 @@
   }
 }
 
-
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
-  // Return the constant value.
-  __ LoadObject(x0, value);
-  __ Ret();
-}
-
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(!AreAliased(receiver(), this->name(), scratch1(), scratch2(),
@@ -644,8 +546,18 @@
   // Call the runtime system to load the interceptor.
   DCHECK(holder()->HasNamedInterceptor());
   DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
-  PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
-                           holder());
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ Push(name(), receiver(), holder_reg);
+  // See NamedLoadHandlerCompiler::InterceptorVectorSlotPop() for details.
+  if (holder_reg.is(receiver())) {
+    __ Push(slot(), vector());
+  } else {
+    __ Push(scratch3(), scratch2());  // slot, vector
+  }
 
   __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
 }
diff --git a/src/ic/arm64/ic-arm64.cc b/src/ic/arm64/ic-arm64.cc
index 0ced207..8c7d4f2 100644
--- a/src/ic/arm64/ic-arm64.cc
+++ b/src/ic/arm64/ic-arm64.cc
@@ -6,489 +6,12 @@
 
 #include "src/codegen.h"
 #include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
 #include "src/ic/stub-cache.h"
 
 namespace v8 {
 namespace internal {
 
 
-#define __ ACCESS_MASM(masm)
-
-// Helper function used from LoadIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// result:   Register for the result. It is only updated if a jump to the miss
-//           label is not done.
-// The scratch registers need to be different from elements, name and result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
-                                   Register elements, Register name,
-                                   Register result, Register scratch1,
-                                   Register scratch2) {
-  DCHECK(!AreAliased(elements, name, scratch1, scratch2));
-  DCHECK(!AreAliased(result, scratch1, scratch2));
-
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
-                                                   name, scratch1, scratch2);
-
-  // If probing finds an entry check that the value is a normal property.
-  __ Bind(&done);
-
-  static const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ Tst(scratch1, Smi::FromInt(PropertyDetails::TypeField::kMask));
-  __ B(ne, miss);
-
-  // Get the value at the masked, scaled index and return.
-  __ Ldr(result,
-         FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// value:    The value to store (never clobbered).
-//
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
-                                    Register elements, Register name,
-                                    Register value, Register scratch1,
-                                    Register scratch2) {
-  DCHECK(!AreAliased(elements, name, value, scratch1, scratch2));
-
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
-                                                   name, scratch1, scratch2);
-
-  // If probing finds an entry in the dictionary check that the value
-  // is a normal property that is not read only.
-  __ Bind(&done);
-
-  static const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  static const int kTypeAndReadOnlyMask =
-      PropertyDetails::TypeField::kMask |
-      PropertyDetails::AttributesField::encode(READ_ONLY);
-  __ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
-  __ Tst(scratch1, kTypeAndReadOnlyMask);
-  __ B(ne, miss);
-
-  // Store the value at the masked, scaled index and return.
-  static const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag);
-  __ Str(value, MemOperand(scratch2));
-
-  // Update the write barrier. Make sure not to clobber the value.
-  __ Mov(scratch1, value);
-  __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
-                 kDontSaveFPRegs);
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  Register dictionary = x0;
-  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
-  Label slow;
-
-  __ Ldr(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
-                                     JSObject::kPropertiesOffset));
-  GenerateDictionaryLoad(masm, &slow, dictionary,
-                         LoadDescriptor::NameRegister(), x0, x3, x4);
-  __ Ret();
-
-  // Dictionary load failed, go slow (but don't miss).
-  __ Bind(&slow);
-  GenerateRuntimeGetProperty(masm);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is in lr.
-  Isolate* isolate = masm->isolate();
-  ASM_LOCATION("LoadIC::GenerateMiss");
-
-  DCHECK(!AreAliased(x4, x5, LoadWithVectorDescriptor::SlotRegister(),
-                     LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, x4, x5);
-
-  // Perform tail call to the entry.
-  __ Push(LoadWithVectorDescriptor::ReceiverRegister(),
-          LoadWithVectorDescriptor::NameRegister(),
-          LoadWithVectorDescriptor::SlotRegister(),
-          LoadWithVectorDescriptor::VectorRegister());
-  __ TailCallRuntime(Runtime::kLoadIC_Miss);
-}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is in lr.
-  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kGetProperty);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is in lr.
-  Isolate* isolate = masm->isolate();
-
-  DCHECK(!AreAliased(x10, x11, LoadWithVectorDescriptor::SlotRegister(),
-                     LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, x10, x11);
-
-  __ Push(LoadWithVectorDescriptor::ReceiverRegister(),
-          LoadWithVectorDescriptor::NameRegister(),
-          LoadWithVectorDescriptor::SlotRegister(),
-          LoadWithVectorDescriptor::VectorRegister());
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
-}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is in lr.
-  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedGetProperty);
-}
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
-  __ Push(StoreWithVectorDescriptor::ValueRegister(),
-          StoreWithVectorDescriptor::SlotRegister(),
-          StoreWithVectorDescriptor::VectorRegister(),
-          StoreWithVectorDescriptor::ReceiverRegister(),
-          StoreWithVectorDescriptor::NameRegister());
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  ASM_LOCATION("KeyedStoreIC::GenerateMiss");
-  StoreIC_PushArgs(masm);
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
-}
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
-  ASM_LOCATION("KeyedStoreIC::GenerateSlow");
-  StoreIC_PushArgs(masm);
-
-  // The slow case calls into the runtime to complete the store without causing
-  // an IC miss that would otherwise cause a transition to the generic stub.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-static void KeyedStoreGenerateMegamorphicHelper(
-    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
-    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
-    Register value, Register key, Register receiver, Register receiver_map,
-    Register elements_map, Register elements) {
-  DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
-                     x10, x11));
-
-  Label transition_smi_elements;
-  Label transition_double_elements;
-  Label fast_double_without_map_check;
-  Label non_double_value;
-  Label finish_store;
-
-  __ Bind(fast_object);
-  if (check_map == kCheckMap) {
-    __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-    __ Cmp(elements_map,
-           Operand(masm->isolate()->factory()->fixed_array_map()));
-    __ B(ne, fast_double);
-  }
-
-  // HOLECHECK: guards "A[i] = V"
-  // We have to go to the runtime if the current value is the hole because there
-  // may be a callback on the element.
-  Label holecheck_passed;
-  __ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag);
-  __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
-  __ Ldr(x11, MemOperand(x10));
-  __ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed);
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
-  __ bind(&holecheck_passed);
-
-  // Smi stores don't require further checks.
-  __ JumpIfSmi(value, &finish_store);
-
-  // Escape to elements kind transition case.
-  __ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements);
-
-  __ Bind(&finish_store);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ Add(x10, key, Smi::FromInt(1));
-    __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-
-  Register address = x11;
-  __ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
-  __ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
-  __ Str(value, MemOperand(address));
-
-  Label dont_record_write;
-  __ JumpIfSmi(value, &dont_record_write);
-
-  // Update write barrier for the elements array address.
-  __ Mov(x10, value);  // Preserve the value which is returned.
-  __ RecordWrite(elements, address, x10, kLRHasNotBeenSaved, kDontSaveFPRegs,
-                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-
-  __ Bind(&dont_record_write);
-  __ Ret();
-
-
-  __ Bind(fast_double);
-  if (check_map == kCheckMap) {
-    // Check for fast double array case. If this fails, call through to the
-    // runtime.
-    __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow);
-  }
-
-  // HOLECHECK: guards "A[i] double hole?"
-  // We have to see if the double version of the hole is present. If so go to
-  // the runtime.
-  __ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
-  __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
-  __ Ldr(x11, MemOperand(x10));
-  __ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check);
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
-
-  __ Bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(value, key, elements, x10, d0,
-                                 &transition_double_elements);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ Add(x10, key, Smi::FromInt(1));
-    __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  __ Ret();
-
-
-  __ Bind(&transition_smi_elements);
-  // Transition the array appropriately depending on the value type.
-  __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
-  __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value);
-
-  // Value is a double. Transition FAST_SMI_ELEMENTS ->
-  // FAST_DOUBLE_ELEMENTS and complete the store.
-  __ LoadTransitionedArrayMapConditional(
-      FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, x10, x11, slow);
-  AllocationSiteMode mode =
-      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
-  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
-                                                   receiver_map, mode, slow);
-  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ B(&fast_double_without_map_check);
-
-  __ Bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS.
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
-                                         receiver_map, x10, x11, slow);
-
-  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-      masm, receiver, key, value, receiver_map, mode, slow);
-
-  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ B(&finish_store);
-
-  __ Bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
-                                         receiver_map, x10, x11, slow);
-  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateDoubleToObject(
-      masm, receiver, key, value, receiver_map, mode, slow);
-  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ B(&finish_store);
-}
-
-
-void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
-                                       LanguageMode language_mode) {
-  ASM_LOCATION("KeyedStoreIC::GenerateMegamorphic");
-  Label slow;
-  Label array;
-  Label fast_object;
-  Label extra;
-  Label fast_object_grow;
-  Label fast_double_grow;
-  Label fast_double;
-  Label maybe_name_key;
-  Label miss;
-
-  Register value = StoreDescriptor::ValueRegister();
-  Register key = StoreDescriptor::NameRegister();
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  DCHECK(receiver.is(x1));
-  DCHECK(key.is(x2));
-  DCHECK(value.is(x0));
-
-  Register receiver_map = x3;
-  Register elements = x4;
-  Register elements_map = x5;
-
-  __ JumpIfNotSmi(key, &maybe_name_key);
-  __ JumpIfSmi(receiver, &slow);
-  __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
-  // Check that the receiver does not require access checks.
-  // The generic stub does not perform map checks.
-  __ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
-  __ TestAndBranchIfAnySet(x10, (1 << Map::kIsAccessCheckNeeded), &slow);
-
-  // Check if the object is a JS array or not.
-  Register instance_type = x10;
-  __ CompareInstanceType(receiver_map, instance_type, JS_ARRAY_TYPE);
-  __ B(eq, &array);
-  // Check that the object is some kind of JS object EXCEPT JS Value type. In
-  // the case that the object is a value-wrapper object, we enter the runtime
-  // system to make sure that indexing into string objects works as intended.
-  STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
-  __ Cmp(instance_type, JS_OBJECT_TYPE);
-  __ B(lo, &slow);
-
-  // Object case: Check key against length in the elements array.
-  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  // Check array bounds. Both the key and the length of FixedArray are smis.
-  __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ Cmp(x10, Operand::UntagSmi(key));
-  __ B(hi, &fast_object);
-
-
-  __ Bind(&slow);
-  // Slow case, handle jump to runtime.
-  // Live values:
-  //  x0: value
-  //  x1: key
-  //  x2: receiver
-  PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
-  // Never returns to here.
-
-  __ bind(&maybe_name_key);
-  __ Ldr(x10, FieldMemOperand(key, HeapObject::kMapOffset));
-  __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
-  __ JumpIfNotUniqueNameInstanceType(x10, &slow);
-
-  // The handlers in the stub cache expect a vector and slot. Since we won't
-  // change the IC from any downstream misses, a dummy vector can be used.
-  Register vector = StoreWithVectorDescriptor::VectorRegister();
-  Register slot = StoreWithVectorDescriptor::SlotRegister();
-  DCHECK(!AreAliased(vector, slot, x5, x6, x7, x8));
-  Handle<TypeFeedbackVector> dummy_vector =
-      TypeFeedbackVector::DummyVector(masm->isolate());
-  int slot_index = dummy_vector->GetIndex(
-      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
-  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
-  __ Mov(slot, Operand(Smi::FromInt(slot_index)));
-
-  masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, x5,
-                                                     x6, x7, x8);
-  // Cache miss.
-  __ B(&miss);
-
-  __ Bind(&extra);
-  // Extra capacity case: Check if there is extra capacity to
-  // perform the store and update the length. Used for adding one
-  // element to the array by writing to array[array.length].
-
-  // Check for room in the elements backing store.
-  // Both the key and the length of FixedArray are smis.
-  __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ Cmp(x10, Operand::UntagSmi(key));
-  __ B(ls, &slow);
-
-  __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
-  __ B(eq, &fast_object_grow);
-  __ Cmp(elements_map,
-         Operand(masm->isolate()->factory()->fixed_double_array_map()));
-  __ B(eq, &fast_double_grow);
-  __ B(&slow);
-
-
-  __ Bind(&array);
-  // Array case: Get the length and the elements array from the JS
-  // array. Check that the array is in fast mode (and writable); if it
-  // is the length is always a smi.
-
-  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
-  // Check the key against the length in the array.
-  __ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset));
-  __ Cmp(x10, Operand::UntagSmi(key));
-  __ B(eq, &extra);  // We can handle the case where we are appending 1 element.
-  __ B(lo, &slow);
-
-  KeyedStoreGenerateMegamorphicHelper(
-      masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
-      value, key, receiver, receiver_map, elements_map, elements);
-  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
-                                      &fast_double_grow, &slow, kDontCheckMap,
-                                      kIncrementLength, value, key, receiver,
-                                      receiver_map, elements_map, elements);
-
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  // Tail call to the entry.
-  __ TailCallRuntime(Runtime::kStoreIC_Miss);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
-  Label miss;
-  Register value = StoreDescriptor::ValueRegister();
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  Register dictionary = x5;
-  DCHECK(!AreAliased(value, receiver, name,
-                     StoreWithVectorDescriptor::SlotRegister(),
-                     StoreWithVectorDescriptor::VectorRegister(), x5, x6, x7));
-
-  __ Ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
-  GenerateDictionaryStore(masm, &miss, dictionary, name, value, x6, x7);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->ic_store_normal_hit(), 1, x6, x7);
-  __ Ret();
-
-  // Cache miss: Jump to runtime.
-  __ Bind(&miss);
-  __ IncrementCounter(counters->ic_store_normal_miss(), 1, x6, x7);
-  GenerateMiss(masm);
-}
-
-
 Condition CompareIC::ComputeCondition(Token::Value op) {
   switch (op) {
     case Token::EQ_STRICT:
@@ -536,9 +59,7 @@
   }
 
   if (FLAG_trace_ic) {
-    PrintF("[  Patching ic at %p, marker=%p, SMI check=%p\n",
-           static_cast<void*>(address), static_cast<void*>(info_address),
-           static_cast<void*>(info.SmiCheck()));
+    LOG(isolate, PatchIC(address, info_address, info.SmiCheckDelta()));
   }
 
   // Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi()
diff --git a/src/ic/arm64/ic-compiler-arm64.cc b/src/ic/arm64/ic-compiler-arm64.cc
deleted file mode 100644
index c99c637..0000000
--- a/src/ic/arm64/ic-compiler-arm64.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM64
-
-#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void PropertyICCompiler::GenerateRuntimeSetProperty(
-    MacroAssembler* masm, LanguageMode language_mode) {
-  ASM_LOCATION("PropertyICCompiler::GenerateRuntimeSetProperty");
-
-  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
-          StoreDescriptor::ValueRegister());
-
-  __ Mov(x10, Smi::FromInt(language_mode));
-  __ Push(x10);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty);
-}
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_ARM64
diff --git a/src/ic/arm64/stub-cache-arm64.cc b/src/ic/arm64/stub-cache-arm64.cc
deleted file mode 100644
index 81c8207..0000000
--- a/src/ic/arm64/stub-cache-arm64.cc
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_ARM64
-
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-
-// Probe primary or secondary table.
-// If the entry is found in the cache, the generated code jump to the first
-// instruction of the stub in the cache.
-// If there is a miss the code fall trough.
-//
-// 'receiver', 'name' and 'offset' registers are preserved on miss.
-static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
-                       StubCache::Table table, Register receiver, Register name,
-                       // The offset is scaled by 4, based on
-                       // kCacheIndexShift, which is two bits
-                       Register offset, Register scratch, Register scratch2,
-                       Register scratch3) {
-  // Some code below relies on the fact that the Entry struct contains
-  // 3 pointers (name, code, map).
-  STATIC_ASSERT(sizeof(StubCache::Entry) == (3 * kPointerSize));
-
-  ExternalReference key_offset(stub_cache->key_reference(table));
-  ExternalReference value_offset(stub_cache->value_reference(table));
-  ExternalReference map_offset(stub_cache->map_reference(table));
-
-  uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
-  uintptr_t value_off_addr =
-      reinterpret_cast<uintptr_t>(value_offset.address());
-  uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
-
-  Label miss;
-
-  DCHECK(!AreAliased(name, offset, scratch, scratch2, scratch3));
-
-  // Multiply by 3 because there are 3 fields per entry.
-  __ Add(scratch3, offset, Operand(offset, LSL, 1));
-
-  // Calculate the base address of the entry.
-  __ Mov(scratch, key_offset);
-  __ Add(
-      scratch, scratch,
-      Operand(scratch3, LSL, kPointerSizeLog2 - StubCache::kCacheIndexShift));
-
-  // Check that the key in the entry matches the name.
-  __ Ldr(scratch2, MemOperand(scratch));
-  __ Cmp(name, scratch2);
-  __ B(ne, &miss);
-
-  // Check the map matches.
-  __ Ldr(scratch2, MemOperand(scratch, map_off_addr - key_off_addr));
-  __ Ldr(scratch3, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ Cmp(scratch2, scratch3);
-  __ B(ne, &miss);
-
-  // Get the code entry from the cache.
-  __ Ldr(scratch, MemOperand(scratch, value_off_addr - key_off_addr));
-
-#ifdef DEBUG
-  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-    __ B(&miss);
-  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-    __ B(&miss);
-  }
-#endif
-
-  // Jump to the first instruction in the code stub.
-  __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
-  __ Br(scratch);
-
-  // Miss: fall through.
-  __ Bind(&miss);
-}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
-                              Register name, Register scratch, Register extra,
-                              Register extra2, Register extra3) {
-  Label miss;
-
-  // Make sure that there are no register conflicts.
-  DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
-
-  // Make sure extra and extra2 registers are valid.
-  DCHECK(!extra.is(no_reg));
-  DCHECK(!extra2.is(no_reg));
-  DCHECK(!extra3.is(no_reg));
-
-#ifdef DEBUG
-  // If vector-based ics are in use, ensure that scratch, extra, extra2 and
-  // extra3 don't conflict with the vector and slot registers, which need
-  // to be preserved for a handler call or miss.
-  if (IC::ICUseVector(ic_kind_)) {
-    Register vector, slot;
-    if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
-      vector = StoreWithVectorDescriptor::VectorRegister();
-      slot = StoreWithVectorDescriptor::SlotRegister();
-    } else {
-      DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
-      vector = LoadWithVectorDescriptor::VectorRegister();
-      slot = LoadWithVectorDescriptor::SlotRegister();
-    }
-    DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
-  }
-#endif
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
-                      extra3);
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, &miss);
-
-  // Compute the hash for primary table.
-  __ Ldr(scratch.W(), FieldMemOperand(name, Name::kHashFieldOffset));
-  __ Ldr(extra, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ Add(scratch, scratch, extra);
-  __ Eor(scratch, scratch, kPrimaryMagic);
-  __ And(scratch, scratch,
-         Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
-
-  // Probe the primary table.
-  ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
-             extra3);
-
-  // Primary miss: Compute hash for secondary table.
-  __ Sub(scratch, scratch, Operand(name));
-  __ Add(scratch, scratch, Operand(kSecondaryMagic));
-  __ And(scratch, scratch,
-         Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
-
-  // Probe the secondary table.
-  ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
-             extra3);
-
-  // Cache miss: Fall-through and let caller handle the miss by
-  // entering the runtime system.
-  __ Bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
-                      extra3);
-}
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_ARM64
diff --git a/src/ic/call-optimization.cc b/src/ic/call-optimization.cc
index f7a1f69..6780ac4 100644
--- a/src/ic/call-optimization.cc
+++ b/src/ic/call-optimization.cc
@@ -3,7 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/ic/call-optimization.h"
-
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/ic/handler-compiler.cc b/src/ic/handler-compiler.cc
index 05e9031..6a9734d 100644
--- a/src/ic/handler-compiler.cc
+++ b/src/ic/handler-compiler.cc
@@ -24,60 +24,6 @@
   return handle(code);
 }
 
-
-Handle<Code> NamedLoadHandlerCompiler::ComputeLoadNonexistent(
-    Handle<Name> name, Handle<Map> receiver_map) {
-  Isolate* isolate = name->GetIsolate();
-  if (receiver_map->prototype()->IsNull(isolate)) {
-    // TODO(jkummerow/verwaest): If there is no prototype and the property
-    // is nonexistent, introduce a builtin to handle this (fast properties
-    // -> return undefined, dictionary properties -> do negative lookup).
-    return Handle<Code>();
-  }
-  CacheHolderFlag flag;
-  Handle<Map> stub_holder_map =
-      IC::GetHandlerCacheHolder(receiver_map, false, isolate, &flag);
-
-  // If no dictionary mode objects are present in the prototype chain, the load
-  // nonexistent IC stub can be shared for all names for a given map and we use
-  // the empty string for the map cache in that case. If there are dictionary
-  // mode objects involved, we need to do negative lookups in the stub and
-  // therefore the stub will be specific to the name.
-  Handle<Name> cache_name =
-      receiver_map->is_dictionary_map()
-          ? name
-          : Handle<Name>::cast(isolate->factory()->nonexistent_symbol());
-  Handle<Map> current_map = stub_holder_map;
-  Handle<JSObject> last(JSObject::cast(receiver_map->prototype()));
-  while (true) {
-    if (current_map->is_dictionary_map()) cache_name = name;
-    if (current_map->prototype()->IsNull(isolate)) break;
-    if (name->IsPrivate()) {
-      // TODO(verwaest): Use nonexistent_private_symbol.
-      cache_name = name;
-      if (!current_map->has_hidden_prototype()) break;
-    }
-
-    last = handle(JSObject::cast(current_map->prototype()));
-    current_map = handle(last->map());
-  }
-  // Compile the stub that is either shared for all names or
-  // name specific if there are global objects involved.
-  Handle<Code> handler = PropertyHandlerCompiler::Find(
-      cache_name, stub_holder_map, Code::LOAD_IC, flag);
-  if (!handler.is_null()) {
-    TRACE_HANDLER_STATS(isolate, LoadIC_HandlerCacheHit_NonExistent);
-    return handler;
-  }
-
-  TRACE_HANDLER_STATS(isolate, LoadIC_LoadNonexistent);
-  NamedLoadHandlerCompiler compiler(isolate, receiver_map, last, flag);
-  handler = compiler.CompileLoadNonexistent(cache_name);
-  Map::UpdateCodeCache(stub_holder_map, cache_name, handler);
-  return handler;
-}
-
-
 Handle<Code> PropertyHandlerCompiler::GetCode(Code::Kind kind,
                                               Handle<Name> name) {
   Code::Flags flags = Code::ComputeHandlerFlags(kind, cache_holder());
@@ -149,87 +95,6 @@
   return reg;
 }
 
-
-void PropertyHandlerCompiler::NonexistentFrontendHeader(Handle<Name> name,
-                                                        Label* miss,
-                                                        Register scratch1,
-                                                        Register scratch2) {
-  Register holder_reg;
-  Handle<Map> last_map;
-  if (holder().is_null()) {
-    holder_reg = receiver();
-    last_map = map();
-    // If |type| has null as its prototype, |holder()| is
-    // Handle<JSObject>::null().
-    DCHECK(last_map->prototype() == isolate()->heap()->null_value());
-  } else {
-    last_map = handle(holder()->map());
-    // This condition matches the branches below.
-    bool need_holder =
-        last_map->is_dictionary_map() && !last_map->IsJSGlobalObjectMap();
-    holder_reg =
-        FrontendHeader(receiver(), name, miss,
-                       need_holder ? RETURN_HOLDER : DONT_RETURN_ANYTHING);
-  }
-
-  if (last_map->is_dictionary_map()) {
-    if (last_map->IsJSGlobalObjectMap()) {
-      Handle<JSGlobalObject> global =
-          holder().is_null()
-              ? Handle<JSGlobalObject>::cast(isolate()->global_object())
-              : Handle<JSGlobalObject>::cast(holder());
-      GenerateCheckPropertyCell(masm(), global, name, scratch1, miss);
-    } else {
-      if (!name->IsUniqueName()) {
-        DCHECK(name->IsString());
-        name = factory()->InternalizeString(Handle<String>::cast(name));
-      }
-      DCHECK(holder().is_null() ||
-             holder()->property_dictionary()->FindEntry(name) ==
-                 NameDictionary::kNotFound);
-      GenerateDictionaryNegativeLookup(masm(), miss, holder_reg, name, scratch1,
-                                       scratch2);
-    }
-  }
-}
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadField(Handle<Name> name,
-                                                        FieldIndex field) {
-  Register reg = Frontend(name);
-  __ Move(receiver(), reg);
-  LoadFieldStub stub(isolate(), field);
-  GenerateTailCall(masm(), stub.GetCode());
-  return GetCode(kind(), name);
-}
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadConstant(Handle<Name> name,
-                                                           int constant_index) {
-  Register reg = Frontend(name);
-  __ Move(receiver(), reg);
-  LoadConstantStub stub(isolate(), constant_index);
-  GenerateTailCall(masm(), stub.GetCode());
-  return GetCode(kind(), name);
-}
-
-
-Handle<Code> NamedLoadHandlerCompiler::CompileLoadNonexistent(
-    Handle<Name> name) {
-  Label miss;
-  if (IC::ShouldPushPopSlotAndVector(kind())) {
-    DCHECK(kind() == Code::LOAD_IC);
-    PushVectorAndSlot();
-  }
-  NonexistentFrontendHeader(name, &miss, scratch2(), scratch3());
-  if (IC::ShouldPushPopSlotAndVector(kind())) {
-    DiscardVectorAndSlot();
-  }
-  GenerateLoadConstant(isolate()->factory()->undefined_value());
-  FrontendFooter(name, &miss);
-  return GetCode(kind(), name);
-}
-
 Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
     Handle<Name> name, Handle<AccessorInfo> callback, Handle<Code> slow_stub) {
   if (V8_UNLIKELY(FLAG_runtime_stats)) {
@@ -298,10 +163,13 @@
     case LookupIterator::NOT_FOUND:
     case LookupIterator::INTEGER_INDEXED_EXOTIC:
       break;
-    case LookupIterator::DATA:
-      inline_followup =
-          it->property_details().type() == DATA && !it->is_dictionary_holder();
+    case LookupIterator::DATA: {
+      PropertyDetails details = it->property_details();
+      inline_followup = details.kind() == kData &&
+                        details.location() == kField &&
+                        !it->is_dictionary_holder();
       break;
+    }
     case LookupIterator::ACCESSOR: {
       Handle<Object> accessors = it->GetAccessors();
       if (accessors->IsAccessorInfo()) {
@@ -409,10 +277,13 @@
     case LookupIterator::TRANSITION:
       UNREACHABLE();
     case LookupIterator::DATA: {
-      DCHECK_EQ(DATA, it->property_details().type());
-      __ Move(receiver(), reg);
-      LoadFieldStub stub(isolate(), it->GetFieldIndex());
-      GenerateTailCall(masm(), stub.GetCode());
+      DCHECK_EQ(kData, it->property_details().kind());
+      DCHECK_EQ(kField, it->property_details().location());
+      __ Move(LoadFieldDescriptor::ReceiverRegister(), reg);
+      Handle<Object> smi_handler =
+          LoadIC::SimpleFieldLoad(isolate(), it->GetFieldIndex());
+      __ Move(LoadFieldDescriptor::SmiHandlerRegister(), smi_handler);
+      GenerateTailCall(masm(), isolate()->builtins()->LoadField());
       break;
     }
     case LookupIterator::ACCESSOR:
@@ -440,150 +311,6 @@
   return GetCode(kind(), name);
 }
 
-
-// TODO(verwaest): Cleanup. holder() is actually the receiver.
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
-    Handle<Map> transition, Handle<Name> name) {
-  Label miss;
-
-  // Ensure that the StoreTransitionStub we are going to call has the same
-  // number of stack arguments. This means that we don't have to adapt them
-  // if we decide to call the transition or miss stub.
-  STATIC_ASSERT(Descriptor::kStackArgumentsCount ==
-                StoreTransitionDescriptor::kStackArgumentsCount);
-  STATIC_ASSERT(Descriptor::kStackArgumentsCount == 0 ||
-                Descriptor::kStackArgumentsCount == 3);
-  STATIC_ASSERT(Descriptor::kParameterCount - Descriptor::kValue ==
-                StoreTransitionDescriptor::kParameterCount -
-                    StoreTransitionDescriptor::kValue);
-  STATIC_ASSERT(Descriptor::kParameterCount - Descriptor::kSlot ==
-                StoreTransitionDescriptor::kParameterCount -
-                    StoreTransitionDescriptor::kSlot);
-  STATIC_ASSERT(Descriptor::kParameterCount - Descriptor::kVector ==
-                StoreTransitionDescriptor::kParameterCount -
-                    StoreTransitionDescriptor::kVector);
-
-  if (Descriptor::kPassLastArgsOnStack) {
-    __ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
-  }
-
-  bool need_save_restore = IC::ShouldPushPopSlotAndVector(kind());
-  if (need_save_restore) {
-    PushVectorAndSlot();
-  }
-
-  // Check that we are allowed to write this.
-  bool is_nonexistent = holder()->map() == transition->GetBackPointer();
-  if (is_nonexistent) {
-    // Find the top object.
-    Handle<JSObject> last;
-    PrototypeIterator::WhereToEnd end =
-        name->IsPrivate() ? PrototypeIterator::END_AT_NON_HIDDEN
-                          : PrototypeIterator::END_AT_NULL;
-    PrototypeIterator iter(isolate(), holder(), kStartAtPrototype, end);
-    while (!iter.IsAtEnd()) {
-      last = PrototypeIterator::GetCurrent<JSObject>(iter);
-      iter.Advance();
-    }
-    if (!last.is_null()) set_holder(last);
-    NonexistentFrontendHeader(name, &miss, scratch1(), scratch2());
-  } else {
-    FrontendHeader(receiver(), name, &miss, DONT_RETURN_ANYTHING);
-    DCHECK(holder()->HasFastProperties());
-  }
-
-  int descriptor = transition->LastAdded();
-  Handle<DescriptorArray> descriptors(transition->instance_descriptors());
-  PropertyDetails details = descriptors->GetDetails(descriptor);
-  Representation representation = details.representation();
-  DCHECK(!representation.IsNone());
-
-  // Stub is never generated for objects that require access checks.
-  DCHECK(!transition->is_access_check_needed());
-
-  // Call to respective StoreTransitionStub.
-  Register map_reg = StoreTransitionDescriptor::MapRegister();
-
-  if (details.type() == DATA_CONSTANT) {
-    DCHECK(descriptors->GetValue(descriptor)->IsJSFunction());
-    GenerateRestoreMap(transition, map_reg, scratch1(), &miss);
-    GenerateConstantCheck(map_reg, descriptor, value(), scratch1(), &miss);
-    if (need_save_restore) {
-      PopVectorAndSlot();
-    }
-    GenerateRestoreName(name);
-    StoreMapStub stub(isolate());
-    GenerateTailCall(masm(), stub.GetCode());
-
-  } else {
-    if (representation.IsHeapObject()) {
-      GenerateFieldTypeChecks(descriptors->GetFieldType(descriptor), value(),
-                              &miss);
-    }
-    StoreTransitionStub::StoreMode store_mode =
-        Map::cast(transition->GetBackPointer())->unused_property_fields() == 0
-            ? StoreTransitionStub::ExtendStorageAndStoreMapAndValue
-            : StoreTransitionStub::StoreMapAndValue;
-    GenerateRestoreMap(transition, map_reg, scratch1(), &miss);
-    if (need_save_restore) {
-      PopVectorAndSlot();
-    }
-    // We need to pass name on the stack.
-    PopReturnAddress(this->name());
-    __ Push(name);
-    PushReturnAddress(this->name());
-
-    FieldIndex index = FieldIndex::ForDescriptor(*transition, descriptor);
-    __ Move(StoreNamedTransitionDescriptor::FieldOffsetRegister(),
-            Smi::FromInt(index.index() << kPointerSizeLog2));
-
-    StoreTransitionStub stub(isolate(), index.is_inobject(), representation,
-                             store_mode);
-    GenerateTailCall(masm(), stub.GetCode());
-  }
-
-  __ bind(&miss);
-  if (need_save_restore) {
-    PopVectorAndSlot();
-  }
-  GenerateRestoreName(name);
-  TailCallBuiltin(masm(), MissBuiltin(kind()));
-
-  return GetCode(kind(), name);
-}
-
-bool NamedStoreHandlerCompiler::RequiresFieldTypeChecks(
-    FieldType* field_type) const {
-  return field_type->IsClass();
-}
-
-
-Handle<Code> NamedStoreHandlerCompiler::CompileStoreField(LookupIterator* it) {
-  Label miss;
-  DCHECK(it->representation().IsHeapObject());
-
-  FieldType* field_type = *it->GetFieldType();
-  bool need_save_restore = false;
-  if (RequiresFieldTypeChecks(field_type)) {
-    need_save_restore = IC::ShouldPushPopSlotAndVector(kind());
-    if (Descriptor::kPassLastArgsOnStack) {
-      __ LoadParameterFromStack<Descriptor>(value(), Descriptor::kValue);
-    }
-    if (need_save_restore) PushVectorAndSlot();
-    GenerateFieldTypeChecks(field_type, value(), &miss);
-    if (need_save_restore) PopVectorAndSlot();
-  }
-
-  StoreFieldStub stub(isolate(), it->GetFieldIndex(), it->representation());
-  GenerateTailCall(masm(), stub.GetCode());
-
-  __ bind(&miss);
-  if (need_save_restore) PopVectorAndSlot();
-  TailCallBuiltin(masm(), MissBuiltin(kind()));
-  return GetCode(kind(), it->name());
-}
-
-
 Handle<Code> NamedStoreHandlerCompiler::CompileStoreViaSetter(
     Handle<JSObject> object, Handle<Name> name, int accessor_index,
     int expected_arguments) {
@@ -625,7 +352,7 @@
   }
   if (receiver_map->IsStringMap()) {
     TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadIndexedStringStub);
-    return LoadIndexedStringStub(isolate).GetCode();
+    return isolate->builtins()->KeyedLoadIC_IndexedString();
   }
   InstanceType instance_type = receiver_map->instance_type();
   if (instance_type < FIRST_JS_RECEIVER_TYPE) {
@@ -640,13 +367,8 @@
   }
   bool is_js_array = instance_type == JS_ARRAY_TYPE;
   if (elements_kind == DICTIONARY_ELEMENTS) {
-    if (FLAG_tf_load_ic_stub) {
-      TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadElementDH);
-      return LoadHandler::LoadElement(isolate, elements_kind, false,
-                                      is_js_array);
-    }
-    TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadDictionaryElementStub);
-    return LoadDictionaryElementStub(isolate).GetCode();
+    TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadElementDH);
+    return LoadHandler::LoadElement(isolate, elements_kind, false, is_js_array);
   }
   DCHECK(IsFastElementsKind(elements_kind) ||
          IsFixedTypedArrayElementsKind(elements_kind));
@@ -654,16 +376,9 @@
   bool convert_hole_to_undefined =
       is_js_array && elements_kind == FAST_HOLEY_ELEMENTS &&
       *receiver_map == isolate->get_initial_js_array_map(elements_kind);
-  if (FLAG_tf_load_ic_stub) {
-    TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadElementDH);
-    return LoadHandler::LoadElement(isolate, elements_kind,
-                                    convert_hole_to_undefined, is_js_array);
-  } else {
-    TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadFastElementStub);
-    return LoadFastElementStub(isolate, is_js_array, elements_kind,
-                               convert_hole_to_undefined)
-        .GetCode();
-  }
+  TRACE_HANDLER_STATS(isolate, KeyedLoadIC_LoadElementDH);
+  return LoadHandler::LoadElement(isolate, elements_kind,
+                                  convert_hole_to_undefined, is_js_array);
 }
 
 void ElementHandlerCompiler::CompileElementHandlers(
diff --git a/src/ic/handler-compiler.h b/src/ic/handler-compiler.h
index 0dec36a..a37375a 100644
--- a/src/ic/handler-compiler.h
+++ b/src/ic/handler-compiler.h
@@ -40,8 +40,6 @@
   // Frontend loads from receiver(), returns holder register which may be
   // different.
   Register Frontend(Handle<Name> name);
-  void NonexistentFrontendHeader(Handle<Name> name, Label* miss,
-                                 Register scratch1, Register scratch2);
 
   // When FLAG_vector_ics is true, handlers that have the possibility of missing
   // will need to save and pass these to miss handlers.
@@ -52,9 +50,6 @@
 
   void DiscardVectorAndSlot();
 
-  void PushReturnAddress(Register tmp);
-  void PopReturnAddress(Register tmp);
-
   // TODO(verwaest): Make non-static.
   static void GenerateApiAccessorCall(MacroAssembler* masm,
                                       const CallOptimization& optimization,
@@ -134,8 +129,6 @@
 
   virtual ~NamedLoadHandlerCompiler() {}
 
-  Handle<Code> CompileLoadField(Handle<Name> name, FieldIndex index);
-
   Handle<Code> CompileLoadCallback(Handle<Name> name,
                                    Handle<AccessorInfo> callback,
                                    Handle<Code> slow_stub);
@@ -144,8 +137,6 @@
                                    const CallOptimization& call_optimization,
                                    int accessor_index, Handle<Code> slow_stub);
 
-  Handle<Code> CompileLoadConstant(Handle<Name> name, int constant_index);
-
   // The LookupIterator is used to perform a lookup behind the interceptor. If
   // the iterator points to a LookupIterator::PROPERTY, its access will be
   // inlined.
@@ -157,10 +148,6 @@
   Handle<Code> CompileLoadGlobal(Handle<PropertyCell> cell, Handle<Name> name,
                                  bool is_configurable);
 
-  // Static interface
-  static Handle<Code> ComputeLoadNonexistent(Handle<Name> name,
-                                             Handle<Map> map);
-
   static void GenerateLoadViaGetter(MacroAssembler* masm, Handle<Map> map,
                                     Register receiver, Register holder,
                                     int accessor_index, int expected_arguments,
@@ -171,12 +158,6 @@
                           no_reg);
   }
 
-  static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
-                                            Register receiver,
-                                            Register scratch1,
-                                            Register scratch2,
-                                            Label* miss_label);
-
   // These constants describe the structure of the interceptor arguments on the
   // stack. The arguments are pushed by the (platform-specific)
   // PushInterceptorArguments and read by LoadPropertyWithInterceptorOnly and
@@ -193,11 +174,7 @@
   virtual void FrontendFooter(Handle<Name> name, Label* miss);
 
  private:
-  Handle<Code> CompileLoadNonexistent(Handle<Name> name);
-  void GenerateLoadConstant(Handle<Object> value);
   void GenerateLoadCallback(Register reg, Handle<AccessorInfo> callback);
-  void GenerateLoadCallback(const CallOptimization& call_optimization,
-                            Handle<Map> receiver_map);
 
   // Helper emits no code if vector-ics are disabled.
   void InterceptorVectorSlotPush(Register holder_reg);
@@ -209,17 +186,6 @@
                                            Register holder_reg);
   void GenerateLoadPostInterceptor(LookupIterator* it, Register reg);
 
-  // Generates prototype loading code that uses the objects from the
-  // context we were in when this function was called. If the context
-  // has changed, a jump to miss is performed. This ties the generated
-  // code to a particular context and so must not be used in cases
-  // where the generated code is not allowed to have references to
-  // objects from a context.
-  static void GenerateDirectLoadGlobalFunctionPrototype(MacroAssembler* masm,
-                                                        int index,
-                                                        Register prototype,
-                                                        Label* miss);
-
   Register scratch3() { return registers_[4]; }
 };
 
@@ -244,9 +210,6 @@
 
   void ZapStackArgumentsRegisterAliases();
 
-  Handle<Code> CompileStoreTransition(Handle<Map> transition,
-                                      Handle<Name> name);
-  Handle<Code> CompileStoreField(LookupIterator* it);
   Handle<Code> CompileStoreCallback(Handle<JSObject> object, Handle<Name> name,
                                     Handle<AccessorInfo> callback,
                                     LanguageMode language_mode);
@@ -275,18 +238,6 @@
   void GenerateRestoreName(Label* label, Handle<Name> name);
 
  private:
-  void GenerateRestoreName(Handle<Name> name);
-  void GenerateRestoreMap(Handle<Map> transition, Register map_reg,
-                          Register scratch, Label* miss);
-
-  void GenerateConstantCheck(Register map_reg, int descriptor,
-                             Register value_reg, Register scratch,
-                             Label* miss_label);
-
-  bool RequiresFieldTypeChecks(FieldType* field_type) const;
-  void GenerateFieldTypeChecks(FieldType* field_type, Register value_reg,
-                               Label* miss_label);
-
   static Register value();
 };
 
diff --git a/src/ic/handler-configuration-inl.h b/src/ic/handler-configuration-inl.h
index 505d67c..437c528 100644
--- a/src/ic/handler-configuration-inl.h
+++ b/src/ic/handler-configuration-inl.h
@@ -103,8 +103,10 @@
   }
   int value_index = DescriptorArray::ToValueIndex(descriptor);
 
-  DCHECK(kind == kStoreField || kind == kTransitionToField);
-  DCHECK_IMPLIES(kind == kStoreField, !extend_storage);
+  DCHECK(kind == kStoreField || kind == kTransitionToField ||
+         (kind == kStoreConstField && FLAG_track_constant_fields));
+  DCHECK_IMPLIES(extend_storage, kind == kTransitionToField);
+  DCHECK_IMPLIES(field_index.is_inobject(), !extend_storage);
 
   int config = StoreHandler::KindBits::encode(kind) |
                StoreHandler::ExtendStorageBits::encode(extend_storage) |
@@ -117,9 +119,12 @@
 
 Handle<Object> StoreHandler::StoreField(Isolate* isolate, int descriptor,
                                         FieldIndex field_index,
+                                        PropertyConstness constness,
                                         Representation representation) {
-  return StoreField(isolate, kStoreField, descriptor, field_index,
-                    representation, false);
+  DCHECK_IMPLIES(!FLAG_track_constant_fields, constness == kMutable);
+  Kind kind = constness == kMutable ? kStoreField : kStoreConstField;
+  return StoreField(isolate, kind, descriptor, field_index, representation,
+                    false);
 }
 
 Handle<Object> StoreHandler::TransitionToField(Isolate* isolate, int descriptor,
@@ -132,6 +137,7 @@
 
 Handle<Object> StoreHandler::TransitionToConstant(Isolate* isolate,
                                                   int descriptor) {
+  DCHECK(!FLAG_track_constant_fields);
   int value_index = DescriptorArray::ToValueIndex(descriptor);
   int config =
       StoreHandler::KindBits::encode(StoreHandler::kTransitionToConstant) |
diff --git a/src/ic/handler-configuration.h b/src/ic/handler-configuration.h
index a529173..539d448 100644
--- a/src/ic/handler-configuration.h
+++ b/src/ic/handler-configuration.h
@@ -121,8 +121,10 @@
   enum Kind {
     kStoreElement,
     kStoreField,
+    kStoreConstField,
     kTransitionToField,
-    kTransitionToConstant
+    // TODO(ishell): remove once constant field tracking is done.
+    kTransitionToConstant = kStoreConstField
   };
   class KindBits : public BitField<Kind, 0, 2> {};
 
@@ -175,6 +177,7 @@
   // Creates a Smi-handler for storing a field to fast object.
   static inline Handle<Object> StoreField(Isolate* isolate, int descriptor,
                                           FieldIndex field_index,
+                                          PropertyConstness constness,
                                           Representation representation);
 
   // Creates a Smi-handler for transitioning store to a field.
diff --git a/src/ic/ia32/handler-compiler-ia32.cc b/src/ic/ia32/handler-compiler-ia32.cc
index 68fd1b9..f0f8fad 100644
--- a/src/ic/ia32/handler-compiler-ia32.cc
+++ b/src/ic/ia32/handler-compiler-ia32.cc
@@ -83,16 +83,6 @@
   __ add(esp, Immediate(2 * kPointerSize));
 }
 
-void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
-  MacroAssembler* masm = this->masm();
-  __ push(tmp);
-}
-
-void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
-  MacroAssembler* masm = this->masm();
-  __ pop(tmp);
-}
-
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
     Handle<Name> name, Register scratch0, Register scratch1) {
@@ -132,27 +122,6 @@
   __ DecrementCounter(counters->negative_lookups_miss(), 1);
 }
 
-
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register result, Label* miss) {
-  __ LoadGlobalFunction(index, result);
-  // Load its initial map. The global functions all have initial maps.
-  __ mov(result,
-         FieldOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
-  // Load the prototype from the initial map.
-  __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
-    MacroAssembler* masm, Register receiver, Register scratch1,
-    Register scratch2, Label* miss_label) {
-  // TODO(mvstanton): This isn't used on ia32. Move all the other
-  // platform implementations into a code stub so this method can be removed.
-  UNREACHABLE();
-}
-
-
 // Generate call to api function.
 // This function uses push() to generate smaller, faster code than
 // the version above. It is an optimization that should will be removed
@@ -324,10 +293,12 @@
   }
 }
 
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm, Register receiver, Register holder, Register name,
+    Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+  DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+         Runtime::FunctionForId(id)->nargs);
 
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
-                                     Register holder, Register name,
-                                     Handle<JSObject> holder_obj) {
   STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
   STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
   STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
@@ -335,15 +306,7 @@
   __ push(name);
   __ push(receiver);
   __ push(holder);
-}
 
-
-static void CompileCallLoadPropertyWithInterceptor(
-    MacroAssembler* masm, Register receiver, Register holder, Register name,
-    Handle<JSObject> holder_obj, Runtime::FunctionId id) {
-  DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
-         Runtime::FunctionForId(id)->nargs);
-  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
   __ CallRuntime(id);
 }
 
@@ -359,58 +322,6 @@
   }
 }
 
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
-  __ mov(this->name(), Immediate(name));
-}
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
-                                                   Register map_reg,
-                                                   Register scratch,
-                                                   Label* miss) {
-  Handle<WeakCell> cell = Map::WeakCellForMap(transition);
-  DCHECK(!map_reg.is(scratch));
-  __ LoadWeakValue(map_reg, cell, miss);
-  if (transition->CanBeDeprecated()) {
-    __ mov(scratch, FieldOperand(map_reg, Map::kBitField3Offset));
-    __ and_(scratch, Immediate(Map::Deprecated::kMask));
-    __ j(not_zero, miss);
-  }
-}
-
-
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
-                                                      int descriptor,
-                                                      Register value_reg,
-                                                      Register scratch,
-                                                      Label* miss_label) {
-  DCHECK(!map_reg.is(scratch));
-  DCHECK(!map_reg.is(value_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ LoadInstanceDescriptors(map_reg, scratch);
-  __ mov(scratch,
-         FieldOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
-  __ cmp(value_reg, scratch);
-  __ j(not_equal, miss_label);
-}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
-                                                        Register value_reg,
-                                                        Label* miss_label) {
-  Register map_reg = scratch1();
-  Register scratch = scratch2();
-  DCHECK(!value_reg.is(map_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ JumpIfSmi(value_reg, miss_label);
-  if (field_type->IsClass()) {
-    __ mov(map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
-    __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
-                    scratch);
-    __ j(not_equal, miss_label);
-  }
-}
-
 void PropertyHandlerCompiler::GenerateAccessCheck(
     Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
     Label* miss, bool compare_native_contexts_only) {
@@ -540,14 +451,6 @@
   }
 }
 
-
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
-  // Return the constant value.
-  __ LoadObject(eax, value);
-  __ ret(0);
-}
-
-
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
@@ -620,10 +523,26 @@
   DCHECK(holder()->HasNamedInterceptor());
   DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
   // Call the runtime system to load the interceptor.
-  __ pop(scratch2());  // save old return address
-  PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
-                           holder());
-  __ push(scratch2());  // restore old return address
+
+  // Stack:
+  //   return address
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ push(receiver());
+  __ push(holder_reg);
+  // See NamedLoadHandlerCompiler::InterceptorVectorSlotPop() for details.
+  if (holder_reg.is(receiver())) {
+    __ push(slot());
+    __ push(vector());
+  } else {
+    __ push(scratch3());  // slot
+    __ push(scratch2());  // vector
+  }
+  __ push(Operand(esp, 4 * kPointerSize));  // return address
+  __ mov(Operand(esp, 5 * kPointerSize), name());
 
   __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
 }
diff --git a/src/ic/ia32/ic-compiler-ia32.cc b/src/ic/ia32/ic-compiler-ia32.cc
deleted file mode 100644
index a52f046..0000000
--- a/src/ic/ia32/ic-compiler-ia32.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_IA32
-
-#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyICCompiler::GenerateRuntimeSetProperty(
-    MacroAssembler* masm, LanguageMode language_mode) {
-  typedef StoreWithVectorDescriptor Descriptor;
-  STATIC_ASSERT(Descriptor::kStackArgumentsCount == 3);
-  // ----------- S t a t e -------------
-  //  -- esp[12] : value
-  //  -- esp[8]  : slot
-  //  -- esp[4]  : vector
-  //  -- esp[0]  : return address
-  // -----------------------------------
-  __ LoadParameterFromStack<Descriptor>(Descriptor::ValueRegister(),
-                                        Descriptor::kValue);
-
-  __ mov(Operand(esp, 12), Descriptor::ReceiverRegister());
-  __ mov(Operand(esp, 8), Descriptor::NameRegister());
-  __ mov(Operand(esp, 4), Descriptor::ValueRegister());
-  __ pop(ebx);
-  __ push(Immediate(Smi::FromInt(language_mode)));
-  __ push(ebx);  // return address
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ic/ia32/ic-ia32.cc b/src/ic/ia32/ic-ia32.cc
index 44a5b9f..c4b4cdc 100644
--- a/src/ic/ia32/ic-ia32.cc
+++ b/src/ic/ia32/ic-ia32.cc
@@ -6,532 +6,11 @@
 
 #include "src/codegen.h"
 #include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
 #include "src/ic/stub-cache.h"
 
 namespace v8 {
 namespace internal {
 
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-// Helper function used to load a property from a dictionary backing
-// storage. This function may fail to load a property even though it is
-// in the dictionary, so code at miss_label must always call a backup
-// property load that is complete. This function is safe to call if
-// name is not internalized, and will jump to the miss_label in that
-// case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
-                                   Register elements, Register name,
-                                   Register r0, Register r1, Register result) {
-  // Register use:
-  //
-  // elements - holds the property dictionary on entry and is unchanged.
-  //
-  // name - holds the name of the property on entry and is unchanged.
-  //
-  // Scratch registers:
-  //
-  // r0   - used for the index into the property dictionary
-  //
-  // r1   - used to hold the capacity of the property dictionary.
-  //
-  // result - holds the result on exit.
-
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
-                                                   elements, name, r0, r1);
-
-  // If probing finds an entry in the dictionary, r0 contains the
-  // index into the dictionary. Check that the value is a normal
-  // property.
-  __ bind(&done);
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
-          Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
-  __ j(not_zero, miss_label);
-
-  // Get the value at the masked, scaled index.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ mov(result, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
-}
-
-
-// Helper function used to store a property to a dictionary backing
-// storage. This function may fail to store a property eventhough it
-// is in the dictionary, so code at miss_label must always call a
-// backup property store that is complete. This function is safe to
-// call if name is not internalized, and will jump to the miss_label in
-// that case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
-                                    Register elements, Register name,
-                                    Register value, Register r0, Register r1) {
-  // Register use:
-  //
-  // elements - holds the property dictionary on entry and is clobbered.
-  //
-  // name - holds the name of the property on entry and is unchanged.
-  //
-  // value - holds the value to store and is unchanged.
-  //
-  // r0 - used for index into the property dictionary and is clobbered.
-  //
-  // r1 - used to hold the capacity of the property dictionary and is clobbered.
-  Label done;
-
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
-                                                   elements, name, r0, r1);
-
-  // If probing finds an entry in the dictionary, r0 contains the
-  // index into the dictionary. Check that the value is a normal
-  // property that is not read only.
-  __ bind(&done);
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  const int kTypeAndReadOnlyMask =
-      (PropertyDetails::TypeField::kMask |
-       PropertyDetails::AttributesField::encode(READ_ONLY))
-      << kSmiTagSize;
-  __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
-          Immediate(kTypeAndReadOnlyMask));
-  __ j(not_zero, miss_label);
-
-  // Store the value at the masked, scaled index.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ lea(r0, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
-  __ mov(Operand(r0, 0), value);
-
-  // Update write barrier. Make sure not to clobber the value.
-  __ mov(r1, value);
-  __ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
-}
-
-static void KeyedStoreGenerateMegamorphicHelper(
-    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
-    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
-  Label transition_smi_elements;
-  Label finish_object_store, non_double_value, transition_double_elements;
-  Label fast_double_without_map_check;
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register key = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  DCHECK(receiver.is(edx));
-  DCHECK(key.is(ecx));
-  DCHECK(value.is(eax));
-  // key is a smi.
-  // ebx: FixedArray receiver->elements
-  // edi: receiver map
-  // Fast case: Do the store, could either Object or double.
-  __ bind(fast_object);
-  if (check_map == kCheckMap) {
-    __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
-    __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
-    __ j(not_equal, fast_double);
-  }
-
-  // HOLECHECK: guards "A[i] = V"
-  // We have to go to the runtime if the current value is the hole because
-  // there may be a callback on the element
-  Label holecheck_passed1;
-  __ cmp(FixedArrayElementOperand(ebx, key),
-         masm->isolate()->factory()->the_hole_value());
-  __ j(not_equal, &holecheck_passed1);
-  __ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow);
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-
-  __ bind(&holecheck_passed1);
-
-  // Smi stores don't require further checks.
-  Label non_smi_value;
-  __ JumpIfNotSmi(value, &non_smi_value);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ add(FieldOperand(receiver, JSArray::kLengthOffset),
-           Immediate(Smi::FromInt(1)));
-  }
-  // It's irrelevant whether array is smi-only or not when writing a smi.
-  __ mov(FixedArrayElementOperand(ebx, key), value);
-  __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
-
-  __ bind(&non_smi_value);
-  // Escape to elements kind transition case.
-  __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ CheckFastObjectElements(edi, &transition_smi_elements);
-
-  // Fast elements array, store the value to the elements backing store.
-  __ bind(&finish_object_store);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ add(FieldOperand(receiver, JSArray::kLengthOffset),
-           Immediate(Smi::FromInt(1)));
-  }
-  __ mov(FixedArrayElementOperand(ebx, key), value);
-  // Update write barrier for the elements array address.
-  __ mov(edx, value);  // Preserve the value which is returned.
-  __ RecordWriteArray(ebx, edx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
-
-  __ bind(fast_double);
-  if (check_map == kCheckMap) {
-    // Check for fast double array case. If this fails, call through to the
-    // runtime.
-    __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
-    __ j(not_equal, slow);
-    // If the value is a number, store it as a double in the FastDoubleElements
-    // array.
-  }
-
-  // HOLECHECK: guards "A[i] double hole?"
-  // We have to see if the double version of the hole is present. If so
-  // go to the runtime.
-  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
-  __ cmp(FieldOperand(ebx, key, times_4, offset), Immediate(kHoleNanUpper32));
-  __ j(not_equal, &fast_double_without_map_check);
-  __ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow);
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-
-  __ bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(value, ebx, key, edi, xmm0,
-                                 &transition_double_elements);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ add(FieldOperand(receiver, JSArray::kLengthOffset),
-           Immediate(Smi::FromInt(1)));
-  }
-  __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
-
-  __ bind(&transition_smi_elements);
-  __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
-
-  // Transition the array appropriately depending on the value type.
-  __ CheckMap(value, masm->isolate()->factory()->heap_number_map(),
-              &non_double_value, DONT_DO_SMI_CHECK);
-
-  // Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
-  // and complete the store.
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
-                                         FAST_DOUBLE_ELEMENTS, ebx, edi, slow);
-  AllocationSiteMode mode =
-      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
-  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
-                                                   ebx, mode, slow);
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&fast_double_without_map_check);
-
-  __ bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, ebx,
-                                         edi, slow);
-  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-      masm, receiver, key, value, ebx, mode, slow);
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-
-  __ bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
-                                         ebx, edi, slow);
-  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateDoubleToObject(masm, receiver, key,
-                                                      value, ebx, mode, slow);
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
-                                       LanguageMode language_mode) {
-  typedef StoreWithVectorDescriptor Descriptor;
-  // Return address is on the stack.
-  Label slow, fast_object, fast_object_grow;
-  Label fast_double, fast_double_grow;
-  Label array, extra, check_if_double_array, maybe_name_key, miss;
-  Register receiver = Descriptor::ReceiverRegister();
-  Register key = Descriptor::NameRegister();
-  DCHECK(receiver.is(edx));
-  DCHECK(key.is(ecx));
-
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, &slow);
-  // Get the map from the receiver.
-  __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks.
-  // The generic stub does not perform map checks.
-  __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
-            Immediate(1 << Map::kIsAccessCheckNeeded));
-  __ j(not_zero, &slow);
-
-  __ LoadParameterFromStack<Descriptor>(Descriptor::ValueRegister(),
-                                        Descriptor::kValue);
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &maybe_name_key);
-  __ CmpInstanceType(edi, JS_ARRAY_TYPE);
-  __ j(equal, &array);
-  // Check that the object is some kind of JS object EXCEPT JS Value type. In
-  // the case that the object is a value-wrapper object, we enter the runtime
-  // system to make sure that indexing into string objects works as intended.
-  STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
-  __ CmpInstanceType(edi, JS_OBJECT_TYPE);
-  __ j(below, &slow);
-
-  // Object case: Check key against length in the elements array.
-  // Key is a smi.
-  // edi: receiver map
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-  // Check array bounds. Both the key and the length of FixedArray are smis.
-  __ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset));
-  __ j(below, &fast_object);
-
-  // Slow case: call runtime.
-  __ bind(&slow);
-  PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
-  // Never returns to here.
-
-  __ bind(&maybe_name_key);
-  __ mov(ebx, FieldOperand(key, HeapObject::kMapOffset));
-  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-  __ JumpIfNotUniqueNameInstanceType(ebx, &slow);
-
-  masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, edi,
-                                                     no_reg);
-
-  // Cache miss.
-  __ jmp(&miss);
-
-  // Extra capacity case: Check if there is extra capacity to
-  // perform the store and update the length. Used for adding one
-  // element to the array by writing to array[array.length].
-  __ bind(&extra);
-  // receiver is a JSArray.
-  // key is a smi.
-  // ebx: receiver->elements, a FixedArray
-  // edi: receiver map
-  // flags: compare (key, receiver.length())
-  // do not leave holes in the array:
-  __ j(not_equal, &slow);
-  __ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset));
-  __ j(above_equal, &slow);
-  __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
-  __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
-  __ j(not_equal, &check_if_double_array);
-  __ jmp(&fast_object_grow);
-
-  __ bind(&check_if_double_array);
-  __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
-  __ j(not_equal, &slow);
-  __ jmp(&fast_double_grow);
-
-  // Array case: Get the length and the elements array from the JS
-  // array. Check that the array is in fast mode (and writable); if it
-  // is the length is always a smi.
-  __ bind(&array);
-  // receiver is a JSArray.
-  // key is a smi.
-  // edi: receiver map
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-
-  // Check the key against the length in the array and fall through to the
-  // common store code.
-  __ cmp(key, FieldOperand(receiver, JSArray::kLengthOffset));  // Compare smis.
-  __ j(above_equal, &extra);
-
-  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object, &fast_double, &slow,
-                                      kCheckMap, kDontIncrementLength);
-  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
-                                      &fast_double_grow, &slow, kDontCheckMap,
-                                      kIncrementLength);
-
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  Register dictionary = eax;
-  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
-
-  Label slow;
-
-  __ mov(dictionary, FieldOperand(LoadDescriptor::ReceiverRegister(),
-                                  JSObject::kPropertiesOffset));
-  GenerateDictionaryLoad(masm, &slow, dictionary,
-                         LoadDescriptor::NameRegister(), edi, ebx, eax);
-  __ ret(0);
-
-  // Dictionary load failed, go slow (but don't miss).
-  __ bind(&slow);
-  GenerateRuntimeGetProperty(masm);
-}
-
-
-static void LoadIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-
-  Register slot = LoadDescriptor::SlotRegister();
-  Register vector = LoadWithVectorDescriptor::VectorRegister();
-  DCHECK(!edi.is(receiver) && !edi.is(name) && !edi.is(slot) &&
-         !edi.is(vector));
-
-  __ pop(edi);
-  __ push(receiver);
-  __ push(name);
-  __ push(slot);
-  __ push(vector);
-  __ push(edi);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  // Return address is on the stack.
-  __ IncrementCounter(masm->isolate()->counters()->ic_load_miss(), 1);
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kLoadIC_Miss);
-}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // Return address is on the stack.
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-  DCHECK(!ebx.is(receiver) && !ebx.is(name));
-
-  __ pop(ebx);
-  __ push(receiver);
-  __ push(name);
-  __ push(ebx);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kGetProperty);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  // Return address is on the stack.
-  __ IncrementCounter(masm->isolate()->counters()->ic_keyed_load_miss(), 1);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
-}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // Return address is on the stack.
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-  DCHECK(!ebx.is(receiver) && !ebx.is(name));
-
-  __ pop(ebx);
-  __ push(receiver);
-  __ push(name);
-  __ push(ebx);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedGetProperty);
-}
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();
-  Register name = StoreWithVectorDescriptor::NameRegister();
-
-  STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
-  // Current stack layout:
-  // - esp[12]   -- value
-  // - esp[8]    -- slot
-  // - esp[4]    -- vector
-  // - esp[0]    -- return address
-
-  Register return_address = StoreWithVectorDescriptor::SlotRegister();
-  __ pop(return_address);
-  __ push(receiver);
-  __ push(name);
-  __ push(return_address);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  // Return address is on the stack.
-  StoreIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kStoreIC_Miss);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
-  typedef StoreWithVectorDescriptor Descriptor;
-  Label restore_miss;
-  Register receiver = Descriptor::ReceiverRegister();
-  Register name = Descriptor::NameRegister();
-  Register value = Descriptor::ValueRegister();
-  // Since the slot and vector values are passed on the stack we can use
-  // respective registers as scratch registers.
-  Register scratch1 = Descriptor::VectorRegister();
-  Register scratch2 = Descriptor::SlotRegister();
-
-  __ LoadParameterFromStack<Descriptor>(value, Descriptor::kValue);
-
-  // A lot of registers are needed for storing to slow case objects.
-  // Push and restore receiver but rely on GenerateDictionaryStore preserving
-  // the value and name.
-  __ push(receiver);
-
-  Register dictionary = receiver;
-  __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
-  GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value,
-                          scratch1, scratch2);
-  __ Drop(1);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->ic_store_normal_hit(), 1);
-  __ ret(Descriptor::kStackArgumentsCount * kPointerSize);
-
-  __ bind(&restore_miss);
-  __ pop(receiver);
-  __ IncrementCounter(counters->ic_store_normal_miss(), 1);
-  GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  // Return address is on the stack.
-  StoreIC_PushArgs(masm);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
-}
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
-  // Return address is on the stack.
-  StoreIC_PushArgs(masm);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-#undef __
-
 
 Condition CompareIC::ComputeCondition(Token::Value op) {
   switch (op) {
@@ -582,9 +61,7 @@
   // condition code uses at the patched jump.
   uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
   if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, test=%p, delta=%d\n",
-           static_cast<void*>(address),
-           static_cast<void*>(test_instruction_address), delta);
+    LOG(isolate, PatchIC(address, test_instruction_address, delta));
   }
 
   // Patch with a short conditional jump. Enabling means switching from a short
diff --git a/src/ic/ia32/stub-cache-ia32.cc b/src/ic/ia32/stub-cache-ia32.cc
deleted file mode 100644
index 82700d3..0000000
--- a/src/ic/ia32/stub-cache-ia32.cc
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_IA32
-
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
-                       StubCache::Table table, Register name, Register receiver,
-                       // The offset is scaled by 4, based on
-                       // kCacheIndexShift, which is two bits
-                       Register offset, Register extra) {
-  ExternalReference key_offset(stub_cache->key_reference(table));
-  ExternalReference value_offset(stub_cache->value_reference(table));
-  ExternalReference map_offset(stub_cache->map_reference(table));
-
-  Label miss;
-  Code::Kind ic_kind = stub_cache->ic_kind();
-  bool is_vector_store =
-      IC::ICUseVector(ic_kind) &&
-      (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
-
-  // Multiply by 3 because there are 3 fields per entry (name, code, map).
-  __ lea(offset, Operand(offset, offset, times_2, 0));
-
-  if (extra.is_valid()) {
-    // Get the code entry from the cache.
-    __ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
-
-    // Check that the key in the entry matches the name.
-    __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
-    __ j(not_equal, &miss);
-
-    // Check the map matches.
-    __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
-    __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
-    __ j(not_equal, &miss);
-
-#ifdef DEBUG
-    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-      __ jmp(&miss);
-    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-      __ jmp(&miss);
-    }
-#endif
-
-    if (is_vector_store) {
-      // The value, vector and slot were passed to the IC on the stack and
-      // they are still there. So we can just jump to the handler.
-      DCHECK(extra.is(StoreWithVectorDescriptor::SlotRegister()));
-      __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
-      __ jmp(extra);
-    } else {
-      // The vector and slot were pushed onto the stack before starting the
-      // probe, and need to be dropped before calling the handler.
-      __ pop(LoadWithVectorDescriptor::VectorRegister());
-      __ pop(LoadDescriptor::SlotRegister());
-      __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
-      __ jmp(extra);
-    }
-
-    __ bind(&miss);
-  } else {
-    DCHECK(ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
-
-    // Save the offset on the stack.
-    __ push(offset);
-
-    // Check that the key in the entry matches the name.
-    __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
-    __ j(not_equal, &miss);
-
-    // Check the map matches.
-    __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
-    __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
-    __ j(not_equal, &miss);
-
-    // Restore offset register.
-    __ mov(offset, Operand(esp, 0));
-
-    // Get the code entry from the cache.
-    __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
-
-#ifdef DEBUG
-    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-      __ jmp(&miss);
-    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-      __ jmp(&miss);
-    }
-#endif
-
-    // Restore offset and re-load code entry from cache.
-    __ pop(offset);
-    __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
-
-    // Jump to the first instruction in the code stub.
-    if (is_vector_store) {
-      DCHECK(offset.is(StoreWithVectorDescriptor::SlotRegister()));
-    }
-    __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
-    __ jmp(offset);
-
-    // Pop at miss.
-    __ bind(&miss);
-    __ pop(offset);
-  }
-}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
-                              Register name, Register scratch, Register extra,
-                              Register extra2, Register extra3) {
-  Label miss;
-
-  // Assert that code is valid.  The multiplying code relies on the entry size
-  // being 12.
-  DCHECK(sizeof(Entry) == 12);
-
-  // Assert that there are no register conflicts.
-  DCHECK(!scratch.is(receiver));
-  DCHECK(!scratch.is(name));
-  DCHECK(!extra.is(receiver));
-  DCHECK(!extra.is(name));
-  DCHECK(!extra.is(scratch));
-
-  // Assert scratch and extra registers are valid, and extra2/3 are unused.
-  DCHECK(!scratch.is(no_reg));
-  DCHECK(extra2.is(no_reg));
-  DCHECK(extra3.is(no_reg));
-
-  Register offset = scratch;
-  scratch = no_reg;
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, &miss);
-
-  // Get the map of the receiver and compute the hash.
-  __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
-  __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ xor_(offset, kPrimaryMagic);
-  // We mask out the last two bits because they are not part of the hash and
-  // they are always 01 for maps.  Also in the two 'and' instructions below.
-  __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
-  // ProbeTable expects the offset to be pointer scaled, which it is, because
-  // the heap object tag size is 2 and the pointer size log 2 is also 2.
-  DCHECK(kCacheIndexShift == kPointerSizeLog2);
-
-  // Probe the primary table.
-  ProbeTable(this, masm, kPrimary, name, receiver, offset, extra);
-
-  // Primary miss: Compute hash for secondary probe.
-  __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
-  __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ xor_(offset, kPrimaryMagic);
-  __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
-  __ sub(offset, name);
-  __ add(offset, Immediate(kSecondaryMagic));
-  __ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
-
-  // Probe the secondary table.
-  ProbeTable(this, masm, kSecondary, name, receiver, offset, extra);
-
-  // Cache miss: Fall-through and let caller handle the miss by
-  // entering the runtime system.
-  __ bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ic/ic-compiler.cc b/src/ic/ic-compiler.cc
deleted file mode 100644
index 750c88d..0000000
--- a/src/ic/ic-compiler.cc
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/ic/ic-compiler.h"
-
-#include "src/ic/handler-compiler.h"
-#include "src/ic/ic-inl.h"
-
-namespace v8 {
-namespace internal {
-
-Handle<Code> PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
-    Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) {
-  Isolate* isolate = receiver_map->GetIsolate();
-
-  DCHECK(store_mode == STANDARD_STORE ||
-         store_mode == STORE_AND_GROW_NO_TRANSITION ||
-         store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
-         store_mode == STORE_NO_TRANSITION_HANDLE_COW);
-
-  PropertyICCompiler compiler(isolate);
-  Handle<Code> code =
-      compiler.CompileKeyedStoreMonomorphicHandler(receiver_map, store_mode);
-  return code;
-}
-
-void PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
-    MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
-    CodeHandleList* handlers, KeyedAccessStoreMode store_mode) {
-  Isolate* isolate = receiver_maps->at(0)->GetIsolate();
-  DCHECK(store_mode == STANDARD_STORE ||
-         store_mode == STORE_AND_GROW_NO_TRANSITION ||
-         store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
-         store_mode == STORE_NO_TRANSITION_HANDLE_COW);
-  PropertyICCompiler compiler(isolate);
-  compiler.CompileKeyedStorePolymorphicHandlers(
-      receiver_maps, transitioned_maps, handlers, store_mode);
-}
-
-
-void PropertyICCompiler::CompileKeyedStorePolymorphicHandlers(
-    MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
-    CodeHandleList* handlers, KeyedAccessStoreMode store_mode) {
-  for (int i = 0; i < receiver_maps->length(); ++i) {
-    Handle<Map> receiver_map(receiver_maps->at(i));
-    Handle<Code> cached_stub;
-    Handle<Map> transitioned_map;
-    {
-      Map* tmap = receiver_map->FindElementsKindTransitionedMap(receiver_maps);
-      if (tmap != nullptr) transitioned_map = handle(tmap);
-    }
-
-    // TODO(mvstanton): The code below is doing pessimistic elements
-    // transitions. I would like to stop doing that and rely on Allocation Site
-    // Tracking to do a better job of ensuring the data types are what they need
-    // to be. Not all the elements are in place yet, pessimistic elements
-    // transitions are still important for performance.
-    if (!transitioned_map.is_null()) {
-      bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
-      ElementsKind elements_kind = receiver_map->elements_kind();
-      TRACE_HANDLER_STATS(isolate(),
-                          KeyedStoreIC_ElementsTransitionAndStoreStub);
-      cached_stub =
-          ElementsTransitionAndStoreStub(isolate(), elements_kind,
-                                         transitioned_map->elements_kind(),
-                                         is_js_array, store_mode).GetCode();
-    } else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
-      // TODO(mvstanton): Consider embedding store_mode in the state of the slow
-      // keyed store ic for uniformity.
-      TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_SlowStub);
-      cached_stub = isolate()->builtins()->KeyedStoreIC_Slow();
-    } else {
-      cached_stub =
-          CompileKeyedStoreMonomorphicHandler(receiver_map, store_mode);
-    }
-    DCHECK(!cached_stub.is_null());
-    handlers->Add(cached_stub);
-    transitioned_maps->Add(transitioned_map);
-  }
-}
-
-
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> PropertyICCompiler::CompileKeyedStoreMonomorphicHandler(
-    Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) {
-  ElementsKind elements_kind = receiver_map->elements_kind();
-  bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
-  Handle<Code> stub;
-  if (receiver_map->has_sloppy_arguments_elements()) {
-    TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_KeyedStoreSloppyArgumentsStub);
-    stub = KeyedStoreSloppyArgumentsStub(isolate(), store_mode).GetCode();
-  } else if (receiver_map->has_fast_elements() ||
-             receiver_map->has_fixed_typed_array_elements()) {
-    TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_StoreFastElementStub);
-    stub = StoreFastElementStub(isolate(), is_jsarray, elements_kind,
-                                store_mode).GetCode();
-  } else {
-    TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_StoreElementStub);
-    stub = StoreElementStub(isolate(), elements_kind, store_mode).GetCode();
-  }
-  return stub;
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
diff --git a/src/ic/ic-compiler.h b/src/ic/ic-compiler.h
deleted file mode 100644
index fa3ba15..0000000
--- a/src/ic/ic-compiler.h
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_IC_IC_COMPILER_H_
-#define V8_IC_IC_COMPILER_H_
-
-#include "src/ic/access-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-
-class PropertyICCompiler : public PropertyAccessCompiler {
- public:
-  // Keyed
-  static Handle<Code> ComputeKeyedStoreMonomorphicHandler(
-      Handle<Map> receiver_map, KeyedAccessStoreMode store_mode);
-  static void ComputeKeyedStorePolymorphicHandlers(
-      MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
-      CodeHandleList* handlers, KeyedAccessStoreMode store_mode);
-
-  // Helpers
-  // TODO(verwaest): Move all uses of these helpers to the PropertyICCompiler
-  // and make the helpers private.
-  static void GenerateRuntimeSetProperty(MacroAssembler* masm,
-                                         LanguageMode language_mode);
-
-
- private:
-  explicit PropertyICCompiler(Isolate* isolate)
-      : PropertyAccessCompiler(isolate, Code::KEYED_STORE_IC,
-                               kCacheOnReceiver) {}
-
-  Handle<Code> CompileKeyedStoreMonomorphicHandler(
-      Handle<Map> receiver_map, KeyedAccessStoreMode store_mode);
-  void CompileKeyedStorePolymorphicHandlers(MapHandleList* receiver_maps,
-                                            MapHandleList* transitioned_maps,
-                                            CodeHandleList* handlers,
-                                            KeyedAccessStoreMode store_mode);
-};
-
-
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_IC_IC_COMPILER_H_
diff --git a/src/ic/ic-inl.h b/src/ic/ic-inl.h
index 1b5d063..aacb690 100644
--- a/src/ic/ic-inl.h
+++ b/src/ic/ic-inl.h
@@ -7,6 +7,7 @@
 
 #include "src/ic/ic.h"
 
+#include "src/assembler-inl.h"
 #include "src/debug/debug.h"
 #include "src/macro-assembler.h"
 #include "src/prototype.h"
@@ -45,7 +46,10 @@
   // Convert target address to the code object. Code::GetCodeFromTargetAddress
   // is safe for use during GC where the map might be marked.
   Code* result = Code::GetCodeFromTargetAddress(target);
-  DCHECK(result->is_inline_cache_stub());
+  // The result can be an IC dispatcher (for vector-based ICs), an IC handler
+  // (for old-style patching ICs) or CEntryStub (for IC dispatchers inlined to
+  // bytecode handlers).
+  DCHECK(result->is_inline_cache_stub() || result->is_stub());
   return result;
 }
 
@@ -54,25 +58,13 @@
                             Address constant_pool) {
   if (AddressIsDeoptimizedCode(target->GetIsolate(), address)) return;
 
-  DCHECK(target->is_inline_cache_stub() || target->is_compare_ic_stub());
-
-  DCHECK(!target->is_inline_cache_stub() ||
-         (target->kind() != Code::LOAD_IC &&
-          target->kind() != Code::KEYED_LOAD_IC &&
-          target->kind() != Code::CALL_IC && target->kind() != Code::STORE_IC &&
-          target->kind() != Code::KEYED_STORE_IC));
+  // Only these three old-style ICs still do code patching.
+  DCHECK(target->is_binary_op_stub() || target->is_compare_ic_stub() ||
+         target->is_to_boolean_ic_stub());
 
   Heap* heap = target->GetHeap();
   Code* old_target = GetTargetAtAddress(address, constant_pool);
-#ifdef DEBUG
-  // STORE_IC and KEYED_STORE_IC use Code::extra_ic_state() to mark
-  // ICs as language mode. The language mode of the IC must be preserved.
-  if (old_target->kind() == Code::STORE_IC ||
-      old_target->kind() == Code::KEYED_STORE_IC) {
-    DCHECK(StoreICState::GetLanguageMode(old_target->extra_ic_state()) ==
-           StoreICState::GetLanguageMode(target->extra_ic_state()));
-  }
-#endif
+
   Assembler::set_target_address_at(heap->isolate(), address, constant_pool,
                                    target->instruction_start());
   if (heap->gc_state() == Heap::MARK_COMPACT) {
@@ -93,8 +85,8 @@
 }
 
 bool IC::IsHandler(Object* object) {
-  return (object->IsSmi() && (object != nullptr)) || object->IsTuple3() ||
-         object->IsFixedArray() ||
+  return (object->IsSmi() && (object != nullptr)) || object->IsTuple2() ||
+         object->IsTuple3() || object->IsFixedArray() ||
          (object->IsCode() && Code::cast(object)->is_handler());
 }
 
@@ -132,14 +124,6 @@
 }
 
 
-Code* IC::get_host() {
-  return isolate()
-      ->inner_pointer_to_code_cache()
-      ->GetCacheEntry(address())
-      ->code;
-}
-
-
 bool IC::AddressIsDeoptimizedCode() const {
   return AddressIsDeoptimizedCode(isolate(), address());
 }
diff --git a/src/ic/ic-state.cc b/src/ic/ic-state.cc
index f948036..a217b11 100644
--- a/src/ic/ic-state.cc
+++ b/src/ic/ic-state.cc
@@ -4,7 +4,10 @@
 
 #include "src/ic/ic-state.h"
 
+#include "src/ast/ast-types.h"
+#include "src/feedback-vector.h"
 #include "src/ic/ic.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -16,11 +19,6 @@
 }
 
 
-std::ostream& operator<<(std::ostream& os, const CallICState& s) {
-  return os << "(" << s.convert_mode() << ", " << s.tail_call_mode() << ")";
-}
-
-
 // static
 STATIC_CONST_MEMBER_DEFINITION const int BinaryOpICState::FIRST_TOKEN;
 
@@ -61,6 +59,23 @@
   return extra_ic_state;
 }
 
+std::string BinaryOpICState::ToString() const {
+  std::string ret = "(";
+  ret += Token::Name(op_);
+  if (CouldCreateAllocationMementos()) ret += "_CreateAllocationMementos";
+  ret += ":";
+  ret += BinaryOpICState::KindToString(left_kind_);
+  ret += "*";
+  if (fixed_right_arg_.IsJust()) {
+    ret += fixed_right_arg_.FromJust();
+  } else {
+    ret += BinaryOpICState::KindToString(right_kind_);
+  }
+  ret += "->";
+  ret += BinaryOpICState::KindToString(result_kind_);
+  ret += ")";
+  return ret;
+}
 
 // static
 void BinaryOpICState::GenerateAheadOfTime(
diff --git a/src/ic/ic-state.h b/src/ic/ic-state.h
index 1ba37b9..16651c5 100644
--- a/src/ic/ic-state.h
+++ b/src/ic/ic-state.h
@@ -11,6 +11,7 @@
 namespace v8 {
 namespace internal {
 
+class AstType;
 
 const int kMaxKeyedPolymorphism = 4;
 
@@ -22,38 +23,6 @@
 };
 
 
-class CallICState final BASE_EMBEDDED {
- public:
-  explicit CallICState(ExtraICState extra_ic_state)
-      : bit_field_(extra_ic_state) {}
-  CallICState(ConvertReceiverMode convert_mode, TailCallMode tail_call_mode)
-      : bit_field_(ConvertModeBits::encode(convert_mode) |
-                   TailCallModeBits::encode(tail_call_mode)) {}
-
-  ExtraICState GetExtraICState() const { return bit_field_; }
-
-  static void GenerateAheadOfTime(Isolate*,
-                                  void (*Generate)(Isolate*,
-                                                   const CallICState&));
-
-  ConvertReceiverMode convert_mode() const {
-    return ConvertModeBits::decode(bit_field_);
-  }
-  TailCallMode tail_call_mode() const {
-    return TailCallModeBits::decode(bit_field_);
-  }
-
- private:
-  typedef BitField<ConvertReceiverMode, 0, 2> ConvertModeBits;
-  typedef BitField<TailCallMode, ConvertModeBits::kNext, 1> TailCallModeBits;
-
-  int const bit_field_;
-};
-
-
-std::ostream& operator<<(std::ostream& os, const CallICState& s);
-
-
 class BinaryOpICState final BASE_EMBEDDED {
  public:
   BinaryOpICState(Isolate* isolate, ExtraICState extra_ic_state);
@@ -82,6 +51,7 @@
   }
 
   ExtraICState GetExtraICState() const;
+  std::string ToString() const;
 
   static void GenerateAheadOfTime(Isolate*,
                                   void (*Generate)(Isolate*,
@@ -212,60 +182,6 @@
                            Handle<Object> y);
 };
 
-class LoadGlobalICState final BASE_EMBEDDED {
- private:
-  class TypeofModeBits : public BitField<TypeofMode, 0, 1> {};
-  STATIC_ASSERT(static_cast<int>(INSIDE_TYPEOF) == 0);
-  const ExtraICState state_;
-
- public:
-  static const uint32_t kNextBitFieldOffset = TypeofModeBits::kNext;
-
-  explicit LoadGlobalICState(ExtraICState extra_ic_state)
-      : state_(extra_ic_state) {}
-
-  explicit LoadGlobalICState(TypeofMode typeof_mode)
-      : state_(TypeofModeBits::encode(typeof_mode)) {}
-
-  ExtraICState GetExtraICState() const { return state_; }
-
-  TypeofMode typeof_mode() const { return TypeofModeBits::decode(state_); }
-
-  static TypeofMode GetTypeofMode(ExtraICState state) {
-    return LoadGlobalICState(state).typeof_mode();
-  }
-};
-
-
-class StoreICState final BASE_EMBEDDED {
- public:
-  explicit StoreICState(ExtraICState extra_ic_state) : state_(extra_ic_state) {}
-
-  explicit StoreICState(LanguageMode mode)
-      : state_(LanguageModeState::encode(mode)) {}
-
-  ExtraICState GetExtraICState() const { return state_; }
-
-  LanguageMode language_mode() const {
-    return LanguageModeState::decode(state_);
-  }
-
-  static LanguageMode GetLanguageMode(ExtraICState state) {
-    return StoreICState(state).language_mode();
-  }
-
-  class LanguageModeState : public BitField<LanguageMode, 1, 1> {};
-  STATIC_ASSERT(i::LANGUAGE_END == 2);
-
-  // For convenience, a statically declared encoding of strict mode extra
-  // IC state.
-  static const ExtraICState kStrictModeState = STRICT
-                                               << LanguageModeState::kShift;
-
- private:
-  const ExtraICState state_;
-};
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/ic/ic-stats.cc b/src/ic/ic-stats.cc
new file mode 100644
index 0000000..de2529f
--- /dev/null
+++ b/src/ic/ic-stats.cc
@@ -0,0 +1,144 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/ic/ic-stats.h"
+
+#include "src/flags.h"
+#include "src/objects-inl.h"
+#include "src/tracing/trace-event.h"
+#include "src/tracing/traced-value.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+base::LazyInstance<ICStats>::type ICStats::instance_ =
+    LAZY_INSTANCE_INITIALIZER;
+
+ICStats::ICStats() : ic_infos_(MAX_IC_INFO), pos_(0) {
+  base::NoBarrier_Store(&enabled_, 0);
+}
+
+void ICStats::Begin() {
+  if (V8_LIKELY(!FLAG_ic_stats)) return;
+  base::NoBarrier_Store(&enabled_, 1);
+}
+
+void ICStats::End() {
+  if (base::NoBarrier_Load(&enabled_) != 1) return;
+  ++pos_;
+  if (pos_ == MAX_IC_INFO) {
+    Dump();
+  }
+  base::NoBarrier_Store(&enabled_, 0);
+}
+
+void ICStats::Reset() {
+  for (auto ic_info : ic_infos_) {
+    ic_info.Reset();
+  }
+  pos_ = 0;
+}
+
+void ICStats::Dump() {
+  auto value = v8::tracing::TracedValue::Create();
+  value->BeginArray("data");
+  for (int i = 0; i < pos_; ++i) {
+    ic_infos_[i].AppendToTracedValue(value.get());
+  }
+  value->EndArray();
+
+  TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.ic_stats"), "V8.ICStats",
+                       TRACE_EVENT_SCOPE_THREAD, "ic-stats", std::move(value));
+  Reset();
+}
+
+const char* ICStats::GetOrCacheScriptName(Script* script) {
+  if (script_name_map_.find(script) != script_name_map_.end()) {
+    return script_name_map_[script].get();
+  }
+  Object* script_name_raw = script->name();
+  if (script_name_raw->IsString()) {
+    String* script_name = String::cast(script_name_raw);
+    char* c_script_name =
+        script_name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL)
+            .release();
+    script_name_map_.insert(
+        std::make_pair(script, std::unique_ptr<char[]>(c_script_name)));
+    return c_script_name;
+  } else {
+    script_name_map_.insert(
+        std::make_pair(script, std::unique_ptr<char[]>(nullptr)));
+    return nullptr;
+  }
+  return nullptr;
+}
+
+const char* ICStats::GetOrCacheFunctionName(JSFunction* function) {
+  if (function_name_map_.find(function) != function_name_map_.end()) {
+    return function_name_map_[function].get();
+  }
+  SharedFunctionInfo* shared = function->shared();
+  ic_infos_[pos_].is_optimized = function->IsOptimized();
+  char* function_name = shared->DebugName()->ToCString().release();
+  function_name_map_.insert(
+      std::make_pair(function, std::unique_ptr<char[]>(function_name)));
+  return function_name;
+}
+
+ICInfo::ICInfo()
+    : function_name(nullptr),
+      script_offset(0),
+      script_name(nullptr),
+      line_num(-1),
+      is_constructor(false),
+      is_optimized(false),
+      map(nullptr),
+      is_dictionary_map(0),
+      number_of_own_descriptors(0) {}
+
+void ICInfo::Reset() {
+  type.clear();
+  function_name = nullptr;
+  script_offset = 0;
+  script_name = nullptr;
+  line_num = -1;
+  is_constructor = false;
+  is_optimized = false;
+  state.clear();
+  map = nullptr;
+  is_dictionary_map = false;
+  number_of_own_descriptors = 0;
+  instance_type.clear();
+}
+
+void ICInfo::AppendToTracedValue(v8::tracing::TracedValue* value) const {
+  value->BeginDictionary();
+  value->SetString("type", type);
+  if (function_name) {
+    value->SetString("functionName", function_name);
+    if (is_optimized) {
+      value->SetInteger("optimized", is_optimized);
+    }
+  }
+  if (script_offset) value->SetInteger("offset", script_offset);
+  if (script_name) value->SetString("scriptName", script_name);
+  if (line_num != -1) value->SetInteger("lineNum", line_num);
+  if (is_constructor) value->SetInteger("constructor", is_constructor);
+  if (!state.empty()) value->SetString("state", state);
+  if (map) {
+    // V8 cannot represent integer above 2^53 - 1 in JavaScript from JSON,
+    // thus `map` should be converted to a string rather than an integer.
+    std::stringstream ss;
+    ss << map;
+    value->SetString("map", ss.str());
+  }
+  if (map) value->SetInteger("dict", is_dictionary_map);
+  if (map) value->SetInteger("own", number_of_own_descriptors);
+  if (!instance_type.empty()) value->SetString("instanceType", instance_type);
+  value->EndDictionary();
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/ic/ic-stats.h b/src/ic/ic-stats.h
new file mode 100644
index 0000000..a3015d0
--- /dev/null
+++ b/src/ic/ic-stats.h
@@ -0,0 +1,77 @@
+// Copyright 2016 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IC_IC_STATS_H_
+#define V8_IC_IC_STATS_H_
+
+#include <memory>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+#include "src/base/atomicops.h"
+#include "src/base/lazy-instance.h"
+
+namespace v8 {
+
+namespace tracing {
+class TracedValue;
+}
+
+namespace internal {
+
+class JSFunction;
+class Script;
+
+struct ICInfo {
+  ICInfo();
+  void Reset();
+  void AppendToTracedValue(v8::tracing::TracedValue* value) const;
+  std::string type;
+  const char* function_name;
+  int script_offset;
+  const char* script_name;
+  int line_num;
+  bool is_constructor;
+  bool is_optimized;
+  std::string state;
+  // Address of the map.
+  void* map;
+  // Whether map is a dictionary map.
+  bool is_dictionary_map;
+  // Number of own descriptors.
+  unsigned number_of_own_descriptors;
+  std::string instance_type;
+};
+
+class ICStats {
+ public:
+  const int MAX_IC_INFO = 4096;
+
+  ICStats();
+  void Dump();
+  void Begin();
+  void End();
+  void Reset();
+  V8_INLINE ICInfo& Current() {
+    DCHECK(pos_ >= 0 && pos_ < MAX_IC_INFO);
+    return ic_infos_[pos_];
+  }
+  const char* GetOrCacheScriptName(Script* script);
+  const char* GetOrCacheFunctionName(JSFunction* function);
+  V8_INLINE static ICStats* instance() { return instance_.Pointer(); }
+
+ private:
+  static base::LazyInstance<ICStats>::type instance_;
+  base::Atomic32 enabled_;
+  std::vector<ICInfo> ic_infos_;
+  std::unordered_map<Script*, std::unique_ptr<char[]>> script_name_map_;
+  std::unordered_map<JSFunction*, std::unique_ptr<char[]>> function_name_map_;
+  int pos_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_IC_IC_STATS_H_
diff --git a/src/ic/ic.cc b/src/ic/ic.cc
index 7e0cefd..f11f94a 100644
--- a/src/ic/ic.cc
+++ b/src/ic/ic.cc
@@ -19,8 +19,8 @@
 #include "src/ic/call-optimization.h"
 #include "src/ic/handler-compiler.h"
 #include "src/ic/handler-configuration-inl.h"
-#include "src/ic/ic-compiler.h"
 #include "src/ic/ic-inl.h"
+#include "src/ic/ic-stats.h"
 #include "src/ic/stub-cache.h"
 #include "src/isolate-inl.h"
 #include "src/macro-assembler.h"
@@ -29,6 +29,7 @@
 #include "src/runtime/runtime-utils.h"
 #include "src/runtime/runtime.h"
 #include "src/tracing/trace-event.h"
+#include "src/tracing/tracing-category-observer.h"
 
 namespace v8 {
 namespace internal {
@@ -64,33 +65,10 @@
   return "";
 }
 
-
-#ifdef DEBUG
-
-#define TRACE_GENERIC_IC(isolate, type, reason)                \
-  do {                                                         \
-    if (FLAG_trace_ic) {                                       \
-      PrintF("[%s patching generic stub in ", type);           \
-      JavaScriptFrame::PrintTop(isolate, stdout, false, true); \
-      PrintF(" (%s)]\n", reason);                              \
-    }                                                          \
-  } while (false)
-
-#else
-
-#define TRACE_GENERIC_IC(isolate, type, reason)      \
-  do {                                               \
-    if (FLAG_trace_ic) {                             \
-      PrintF("[%s patching generic stub in ", type); \
-      PrintF("(see below) (%s)]\n", reason);         \
-    }                                                \
-  } while (false)
-
-#endif  // DEBUG
-
+#define TRACE_GENERIC_IC(reason) set_slow_stub_reason(reason);
 
 void IC::TraceIC(const char* type, Handle<Object> name) {
-  if (FLAG_trace_ic) {
+  if (FLAG_ic_stats) {
     if (AddressIsDeoptimizedCode()) return;
     DCHECK(UseVector());
     State new_state = nexus()->StateFromFeedback();
@@ -98,64 +76,110 @@
   }
 }
 
+Address IC::GetAbstractPC(int* line, int* column) const {
+  JavaScriptFrameIterator it(isolate());
+
+  JavaScriptFrame* frame = it.frame();
+  DCHECK(!frame->is_builtin());
+  int position = frame->position();
+
+  Object* maybe_script = frame->function()->shared()->script();
+  if (maybe_script->IsScript()) {
+    Handle<Script> script(Script::cast(maybe_script), isolate());
+    Script::PositionInfo info;
+    Script::GetPositionInfo(script, position, &info, Script::WITH_OFFSET);
+    *line = info.line + 1;
+    *column = info.column + 1;
+  } else {
+    *line = position;
+    *column = -1;
+  }
+
+  if (frame->is_interpreted()) {
+    InterpretedFrame* iframe = static_cast<InterpretedFrame*>(frame);
+    Address bytecode_start =
+        reinterpret_cast<Address>(iframe->GetBytecodeArray()) - kHeapObjectTag +
+        BytecodeArray::kHeaderSize;
+    return bytecode_start + iframe->GetBytecodeOffset();
+  }
+
+  return frame->pc();
+}
 
 void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
                  State new_state) {
-  if (!FLAG_trace_ic) return;
-  PrintF("[%s%s in ", is_keyed() ? "Keyed" : "", type);
+  if (V8_LIKELY(!FLAG_ic_stats)) return;
 
-  // TODO(jkummerow): Add support for "apply". The logic is roughly:
-  // marker = [fp_ + kMarkerOffset];
-  // if marker is smi and marker.value == INTERNAL and
-  //     the frame's code == builtin(Builtins::kFunctionApply):
-  // then print "apply from" and advance one frame
-
-  Object* maybe_function =
-      Memory::Object_at(fp_ + JavaScriptFrameConstants::kFunctionOffset);
-  if (maybe_function->IsJSFunction()) {
-    JSFunction* function = JSFunction::cast(maybe_function);
-    int code_offset = 0;
-    if (function->IsInterpreted()) {
-      code_offset = InterpretedFrame::GetBytecodeOffset(fp());
-    } else {
-      code_offset =
-          static_cast<int>(pc() - function->code()->instruction_start());
-    }
-    JavaScriptFrame::PrintFunctionAndOffset(function, function->abstract_code(),
-                                            code_offset, stdout, true);
-  }
-
-  const char* modifier = "";
-  if (kind() == Code::KEYED_STORE_IC) {
-    KeyedAccessStoreMode mode =
-        casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
-    modifier = GetTransitionMarkModifier(mode);
-  }
   Map* map = nullptr;
   if (!receiver_map().is_null()) {
     map = *receiver_map();
   }
-  PrintF(" (%c->%c%s) map=(%p", TransitionMarkFromState(old_state),
-         TransitionMarkFromState(new_state), modifier,
-         reinterpret_cast<void*>(map));
-  if (map != nullptr) {
-    PrintF(" dict=%u own=%u type=", map->is_dictionary_map(),
-           map->NumberOfOwnDescriptors());
-    std::cout << map->instance_type();
+
+  const char* modifier = "";
+  if (IsKeyedStoreIC()) {
+    KeyedAccessStoreMode mode =
+        casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
+    modifier = GetTransitionMarkModifier(mode);
   }
-  PrintF(") ");
-  name->ShortPrint(stdout);
-  PrintF("]\n");
+
+  if (!(FLAG_ic_stats &
+        v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
+    int line;
+    int column;
+    Address pc = GetAbstractPC(&line, &column);
+    LOG(isolate(), ICEvent(type, is_keyed(), pc, line, column, map, *name,
+                           TransitionMarkFromState(old_state),
+                           TransitionMarkFromState(new_state), modifier,
+                           slow_stub_reason_));
+    return;
+  }
+
+  ICStats::instance()->Begin();
+  ICInfo& ic_info = ICStats::instance()->Current();
+  ic_info.type = is_keyed() ? "Keyed" : "";
+  ic_info.type += type;
+
+  Object* maybe_function =
+      Memory::Object_at(fp_ + JavaScriptFrameConstants::kFunctionOffset);
+  DCHECK(maybe_function->IsJSFunction());
+  JSFunction* function = JSFunction::cast(maybe_function);
+  int code_offset = 0;
+  if (function->IsInterpreted()) {
+    code_offset = InterpretedFrame::GetBytecodeOffset(fp());
+  } else {
+    code_offset =
+        static_cast<int>(pc() - function->code()->instruction_start());
+  }
+  JavaScriptFrame::CollectFunctionAndOffsetForICStats(
+      function, function->abstract_code(), code_offset);
+
+  // Reserve enough space for IC transition state, the longest length is 17.
+  ic_info.state.reserve(17);
+  ic_info.state = "(";
+  ic_info.state += TransitionMarkFromState(old_state);
+  ic_info.state += "->";
+  ic_info.state += TransitionMarkFromState(new_state);
+  ic_info.state += modifier;
+  ic_info.state += ")";
+  ic_info.map = reinterpret_cast<void*>(map);
+  if (map != nullptr) {
+    ic_info.is_dictionary_map = map->is_dictionary_map();
+    ic_info.number_of_own_descriptors = map->NumberOfOwnDescriptors();
+    ic_info.instance_type = std::to_string(map->instance_type());
+  }
+  // TODO(lpy) Add name as key field in ICStats.
+  ICStats::instance()->End();
 }
 
 
 #define TRACE_IC(type, name) TraceIC(type, name)
 
-
 IC::IC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus)
     : isolate_(isolate),
       vector_set_(false),
+      kind_(FeedbackSlotKind::kInvalid),
       target_maps_set_(false),
+      slow_stub_reason_(nullptr),
       nexus_(nexus) {
   // To improve the performance of the (much used) IC code, we unfold a few
   // levels of the stack frame iteration code. This yields a ~35% speedup when
@@ -192,9 +216,9 @@
   // function's frame. Check if the there is an additional frame, and if there
   // is skip this frame. However, the pc should not be updated. The call to
   // ICs happen from bytecode handlers.
-  Object* frame_type =
-      Memory::Object_at(fp + TypedFrameConstants::kFrameTypeOffset);
-  if (frame_type == Smi::FromInt(StackFrame::STUB)) {
+  intptr_t frame_marker =
+      Memory::intptr_at(fp + TypedFrameConstants::kFrameTypeOffset);
+  if (frame_marker == StackFrame::TypeToMarker(StackFrame::STUB)) {
     fp = Memory::Address_at(fp + TypedFrameConstants::kCallerFPOffset);
   }
   fp_ = fp;
@@ -202,18 +226,36 @@
     constant_pool_address_ = constant_pool;
   }
   pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
-  Code* target = this->target();
-  kind_ = target->kind();
-  state_ = UseVector() ? nexus->StateFromFeedback() : StateFromCode(target);
+  if (nexus) {
+    kind_ = nexus->kind();
+    DCHECK(UseVector());
+    state_ = nexus->StateFromFeedback();
+    extra_ic_state_ = kNoExtraICState;
+  } else {
+    Code* target = this->target();
+    Code::Kind kind = target->kind();
+    if (kind == Code::BINARY_OP_IC) {
+      kind_ = FeedbackSlotKind::kBinaryOp;
+    } else if (kind == Code::COMPARE_IC) {
+      kind_ = FeedbackSlotKind::kCompareOp;
+    } else if (kind == Code::TO_BOOLEAN_IC) {
+      kind_ = FeedbackSlotKind::kToBoolean;
+    } else {
+      UNREACHABLE();
+      kind_ = FeedbackSlotKind::kInvalid;
+    }
+    DCHECK(!UseVector());
+    state_ = StateFromCode(target);
+    extra_ic_state_ = target->extra_ic_state();
+  }
   old_state_ = state_;
-  extra_ic_state_ = target->extra_ic_state();
 }
 
 // The ICs that don't pass slot and vector through the stack have to
 // save/restore them in the dispatcher.
 bool IC::ShouldPushPopSlotAndVector(Code::Kind kind) {
   if (kind == Code::LOAD_IC || kind == Code::LOAD_GLOBAL_IC ||
-      kind == Code::KEYED_LOAD_IC || kind == Code::CALL_IC) {
+      kind == Code::KEYED_LOAD_IC) {
     return true;
   }
   if (kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC) {
@@ -244,7 +286,7 @@
   }
 }
 
-SharedFunctionInfo* IC::GetSharedFunctionInfo() const {
+JSFunction* IC::GetHostFunction() const {
   // Compute the JavaScript frame for the frame pointer of this IC
   // structure. We need this to be able to find the function
   // corresponding to the frame.
@@ -253,16 +295,7 @@
   JavaScriptFrame* frame = JavaScriptFrame::cast(it.frame());
   // Find the function on the stack and both the active code for the
   // function and the original code.
-  JSFunction* function = frame->function();
-  return function->shared();
-}
-
-
-Code* IC::GetCode() const {
-  HandleScope scope(isolate());
-  Handle<SharedFunctionInfo> shared(GetSharedFunctionInfo(), isolate());
-  Code* code = shared->code();
-  return code;
+  return frame->function();
 }
 
 static void LookupForRead(LookupIterator* it) {
@@ -305,7 +338,7 @@
 
   // This is a contextual access, always just update the handler and stay
   // monomorphic.
-  if (kind() == Code::LOAD_GLOBAL_IC) return true;
+  if (IsLoadGlobalIC()) return true;
 
   // The current map wasn't handled yet. There's no reason to stay monomorphic,
   // *unless* we're moving from a deprecated map to its replacement, or
@@ -342,7 +375,7 @@
   update_receiver_map(receiver);
   if (!name->IsString()) return;
   if (state() != MONOMORPHIC && state() != POLYMORPHIC) return;
-  if (receiver->IsUndefined(isolate()) || receiver->IsNull(isolate())) return;
+  if (receiver->IsNullOrUndefined(isolate())) return;
 
   // Remove the target from the code cache if it became invalid
   // because of changes in the prototype chain to avoid hitting it
@@ -402,12 +435,14 @@
 }
 
 // static
-void IC::OnTypeFeedbackChanged(Isolate* isolate, Code* host) {
-  if (host->kind() != Code::FUNCTION) return;
+void IC::OnFeedbackChanged(Isolate* isolate, JSFunction* host_function) {
+  Code* host = host_function->shared()->code();
 
-  TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
-  info->change_own_type_change_checksum();
-  host->set_profiler_ticks(0);
+  if (host->kind() == Code::FUNCTION) {
+    TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
+    info->change_own_type_change_checksum();
+    host->set_profiler_ticks(0);
+  }
   isolate->runtime_profiler()->NotifyICChanged();
   // TODO(2029): When an optimized function is patched, it would
   // be nice to propagate the corresponding type information to its
@@ -417,6 +452,7 @@
 void IC::PostPatching(Address address, Code* target, Code* old_target) {
   // Type vector based ICs update these statistics at a different time because
   // they don't always patch on state change.
+  // TODO(ishell): DCHECK
   if (ICUseVector(target->kind())) return;
 
   DCHECK(old_target->is_inline_cache_stub());
@@ -462,58 +498,6 @@
   }
 }
 
-
-void KeyedLoadIC::Clear(Isolate* isolate, Code* host, KeyedLoadICNexus* nexus) {
-  if (IsCleared(nexus)) return;
-  // Make sure to also clear the map used in inline fast cases.  If we
-  // do not clear these maps, cached code can keep objects alive
-  // through the embedded maps.
-  nexus->ConfigurePremonomorphic();
-  OnTypeFeedbackChanged(isolate, host);
-}
-
-
-void CallIC::Clear(Isolate* isolate, Code* host, CallICNexus* nexus) {
-  // Determine our state.
-  Object* feedback = nexus->vector()->Get(nexus->slot());
-  State state = nexus->StateFromFeedback();
-
-  if (state != UNINITIALIZED && !feedback->IsAllocationSite()) {
-    nexus->ConfigureUninitialized();
-    // The change in state must be processed.
-    OnTypeFeedbackChanged(isolate, host);
-  }
-}
-
-
-void LoadIC::Clear(Isolate* isolate, Code* host, LoadICNexus* nexus) {
-  if (IsCleared(nexus)) return;
-  nexus->ConfigurePremonomorphic();
-  OnTypeFeedbackChanged(isolate, host);
-}
-
-void LoadGlobalIC::Clear(Isolate* isolate, Code* host,
-                         LoadGlobalICNexus* nexus) {
-  if (IsCleared(nexus)) return;
-  nexus->ConfigureUninitialized();
-  OnTypeFeedbackChanged(isolate, host);
-}
-
-void StoreIC::Clear(Isolate* isolate, Code* host, StoreICNexus* nexus) {
-  if (IsCleared(nexus)) return;
-  nexus->ConfigurePremonomorphic();
-  OnTypeFeedbackChanged(isolate, host);
-}
-
-
-void KeyedStoreIC::Clear(Isolate* isolate, Code* host,
-                         KeyedStoreICNexus* nexus) {
-  if (IsCleared(nexus)) return;
-  nexus->ConfigurePremonomorphic();
-  OnTypeFeedbackChanged(isolate, host);
-}
-
-
 void CompareIC::Clear(Isolate* isolate, Address address, Code* target,
                       Address constant_pool) {
   DCHECK(CodeStub::GetMajorKey(target) == CodeStub::CompareIC);
@@ -538,13 +522,13 @@
   if (new_state == PREMONOMORPHIC) {
     nexus()->ConfigurePremonomorphic();
   } else if (new_state == MEGAMORPHIC) {
-    if (kind() == Code::LOAD_IC || kind() == Code::STORE_IC) {
+    if (IsLoadIC() || IsStoreIC() || IsStoreOwnIC()) {
       nexus()->ConfigureMegamorphic();
-    } else if (kind() == Code::KEYED_LOAD_IC) {
+    } else if (IsKeyedLoadIC()) {
       KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
       nexus->ConfigureMegamorphicKeyed(key->IsName() ? PROPERTY : ELEMENT);
     } else {
-      DCHECK(kind() == Code::KEYED_STORE_IC);
+      DCHECK(IsKeyedStoreIC());
       KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
       nexus->ConfigureMegamorphicKeyed(key->IsName() ? PROPERTY : ELEMENT);
     }
@@ -553,74 +537,131 @@
   }
 
   vector_set_ = true;
-  OnTypeFeedbackChanged(isolate(), get_host());
+  OnFeedbackChanged(isolate(), GetHostFunction());
 }
 
 void IC::ConfigureVectorState(Handle<Name> name, Handle<Map> map,
                               Handle<Object> handler) {
   DCHECK(UseVector());
-  if (kind() == Code::LOAD_IC) {
-    LoadICNexus* nexus = casted_nexus<LoadICNexus>();
-    nexus->ConfigureMonomorphic(map, handler);
-  } else if (kind() == Code::LOAD_GLOBAL_IC) {
-    LoadGlobalICNexus* nexus = casted_nexus<LoadGlobalICNexus>();
-    nexus->ConfigureHandlerMode(Handle<Code>::cast(handler));
-  } else if (kind() == Code::KEYED_LOAD_IC) {
-    KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
-    nexus->ConfigureMonomorphic(name, map, handler);
-  } else if (kind() == Code::STORE_IC) {
-    StoreICNexus* nexus = casted_nexus<StoreICNexus>();
-    nexus->ConfigureMonomorphic(map, handler);
-  } else {
-    DCHECK(kind() == Code::KEYED_STORE_IC);
-    KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
-    nexus->ConfigureMonomorphic(name, map, handler);
+  switch (kind_) {
+    case FeedbackSlotKind::kLoadProperty: {
+      LoadICNexus* nexus = casted_nexus<LoadICNexus>();
+      nexus->ConfigureMonomorphic(map, handler);
+      break;
+    }
+    case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+    case FeedbackSlotKind::kLoadGlobalInsideTypeof: {
+      LoadGlobalICNexus* nexus = casted_nexus<LoadGlobalICNexus>();
+      nexus->ConfigureHandlerMode(handler);
+      break;
+    }
+    case FeedbackSlotKind::kLoadKeyed: {
+      KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
+      nexus->ConfigureMonomorphic(name, map, handler);
+      break;
+    }
+    case FeedbackSlotKind::kStoreNamedSloppy:
+    case FeedbackSlotKind::kStoreNamedStrict:
+    case FeedbackSlotKind::kStoreOwnNamed: {
+      StoreICNexus* nexus = casted_nexus<StoreICNexus>();
+      nexus->ConfigureMonomorphic(map, handler);
+      break;
+    }
+    case FeedbackSlotKind::kStoreKeyedSloppy:
+    case FeedbackSlotKind::kStoreKeyedStrict: {
+      KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
+      nexus->ConfigureMonomorphic(name, map, handler);
+      break;
+    }
+    case FeedbackSlotKind::kCall:
+    case FeedbackSlotKind::kBinaryOp:
+    case FeedbackSlotKind::kCompareOp:
+    case FeedbackSlotKind::kToBoolean:
+    case FeedbackSlotKind::kCreateClosure:
+    case FeedbackSlotKind::kLiteral:
+    case FeedbackSlotKind::kGeneral:
+    case FeedbackSlotKind::kStoreDataPropertyInLiteral:
+    case FeedbackSlotKind::kInvalid:
+    case FeedbackSlotKind::kKindsNumber:
+      UNREACHABLE();
+      break;
   }
 
   vector_set_ = true;
-  OnTypeFeedbackChanged(isolate(), get_host());
+  OnFeedbackChanged(isolate(), GetHostFunction());
 }
 
 void IC::ConfigureVectorState(Handle<Name> name, MapHandleList* maps,
                               List<Handle<Object>>* handlers) {
   DCHECK(UseVector());
-  if (kind() == Code::LOAD_IC) {
-    LoadICNexus* nexus = casted_nexus<LoadICNexus>();
-    nexus->ConfigurePolymorphic(maps, handlers);
-  } else if (kind() == Code::KEYED_LOAD_IC) {
-    KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
-    nexus->ConfigurePolymorphic(name, maps, handlers);
-  } else if (kind() == Code::STORE_IC) {
-    StoreICNexus* nexus = casted_nexus<StoreICNexus>();
-    nexus->ConfigurePolymorphic(maps, handlers);
-  } else {
-    DCHECK(kind() == Code::KEYED_STORE_IC);
-    KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
-    nexus->ConfigurePolymorphic(name, maps, handlers);
+  switch (kind_) {
+    case FeedbackSlotKind::kLoadProperty: {
+      LoadICNexus* nexus = casted_nexus<LoadICNexus>();
+      nexus->ConfigurePolymorphic(maps, handlers);
+      break;
+    }
+    case FeedbackSlotKind::kLoadKeyed: {
+      KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
+      nexus->ConfigurePolymorphic(name, maps, handlers);
+      break;
+    }
+    case FeedbackSlotKind::kStoreNamedSloppy:
+    case FeedbackSlotKind::kStoreNamedStrict:
+    case FeedbackSlotKind::kStoreOwnNamed: {
+      StoreICNexus* nexus = casted_nexus<StoreICNexus>();
+      nexus->ConfigurePolymorphic(maps, handlers);
+      break;
+    }
+    case FeedbackSlotKind::kStoreKeyedSloppy:
+    case FeedbackSlotKind::kStoreKeyedStrict: {
+      KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
+      nexus->ConfigurePolymorphic(name, maps, handlers);
+      break;
+    }
+    case FeedbackSlotKind::kCall:
+    case FeedbackSlotKind::kLoadGlobalNotInsideTypeof:
+    case FeedbackSlotKind::kLoadGlobalInsideTypeof:
+    case FeedbackSlotKind::kBinaryOp:
+    case FeedbackSlotKind::kCompareOp:
+    case FeedbackSlotKind::kToBoolean:
+    case FeedbackSlotKind::kCreateClosure:
+    case FeedbackSlotKind::kLiteral:
+    case FeedbackSlotKind::kGeneral:
+    case FeedbackSlotKind::kStoreDataPropertyInLiteral:
+    case FeedbackSlotKind::kInvalid:
+    case FeedbackSlotKind::kKindsNumber:
+      UNREACHABLE();
+      break;
   }
 
   vector_set_ = true;
-  OnTypeFeedbackChanged(isolate(), get_host());
+  OnFeedbackChanged(isolate(), GetHostFunction());
 }
 
-
 void IC::ConfigureVectorState(MapHandleList* maps,
                               MapHandleList* transitioned_maps,
-                              CodeHandleList* handlers) {
+                              List<Handle<Object>>* handlers) {
   DCHECK(UseVector());
-  DCHECK(kind() == Code::KEYED_STORE_IC);
+  DCHECK(IsKeyedStoreIC());
   KeyedStoreICNexus* nexus = casted_nexus<KeyedStoreICNexus>();
   nexus->ConfigurePolymorphic(maps, transitioned_maps, handlers);
 
   vector_set_ = true;
-  OnTypeFeedbackChanged(isolate(), get_host());
+  OnFeedbackChanged(isolate(), GetHostFunction());
 }
 
 
 MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
   // If the object is undefined or null it's illegal to try to get any
   // of its properties; throw a TypeError in that case.
-  if (object->IsUndefined(isolate()) || object->IsNull(isolate())) {
+  if (object->IsNullOrUndefined(isolate())) {
+    if (FLAG_use_ic && state() != UNINITIALIZED && state() != PREMONOMORPHIC) {
+      // Ensure the IC state progresses.
+      TRACE_HANDLER_STATS(isolate(), LoadIC_NonReceiver);
+      update_receiver_map(object);
+      PatchCache(name, slow_stub());
+      TRACE_IC("LoadIC", name);
+    }
     return TypeError(MessageTemplate::kNonObjectPropertyLoad, object, name);
   }
 
@@ -792,11 +833,8 @@
 
 void IC::PatchCache(Handle<Name> name, Handle<Object> handler) {
   DCHECK(IsHandler(*handler));
-  // Currently only LoadIC and KeyedLoadIC support non-code handlers.
-  DCHECK_IMPLIES(!handler->IsCode(), kind() == Code::LOAD_IC ||
-                                         kind() == Code::KEYED_LOAD_IC ||
-                                         kind() == Code::STORE_IC ||
-                                         kind() == Code::KEYED_STORE_IC);
+  // Currently only load and store ICs support non-code handlers.
+  DCHECK_IMPLIES(!handler->IsCode(), IsAnyLoad() || IsAnyStore());
   switch (state()) {
     case UNINITIALIZED:
     case PREMONOMORPHIC:
@@ -804,7 +842,7 @@
       break;
     case RECOMPUTE_HANDLER:
     case MONOMORPHIC:
-      if (kind() == Code::LOAD_GLOBAL_IC) {
+      if (IsLoadGlobalIC()) {
         UpdateMonomorphicIC(handler, name);
         break;
       }
@@ -831,23 +869,9 @@
   }
 }
 
-Handle<Code> KeyedStoreIC::ChooseMegamorphicStub(Isolate* isolate,
-                                                 ExtraICState extra_state) {
-  DCHECK(!FLAG_tf_store_ic_stub);
-  LanguageMode mode = StoreICState::GetLanguageMode(extra_state);
-  return is_strict(mode)
-             ? isolate->builtins()->KeyedStoreIC_Megamorphic_Strict()
-             : isolate->builtins()->KeyedStoreIC_Megamorphic();
-}
-
-Handle<Object> LoadIC::SimpleFieldLoad(FieldIndex index) {
-  if (FLAG_tf_load_ic_stub) {
-    TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldDH);
-    return LoadHandler::LoadField(isolate(), index);
-  }
-  TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldStub);
-  LoadFieldStub stub(isolate(), index);
-  return stub.GetCode();
+Handle<Object> LoadIC::SimpleFieldLoad(Isolate* isolate, FieldIndex index) {
+  TRACE_HANDLER_STATS(isolate, LoadIC_LoadFieldDH);
+  return LoadHandler::LoadField(isolate, index);
 }
 
 namespace {
@@ -1044,7 +1068,7 @@
     if (holder->HasFastProperties()) {
       if (getter->IsJSFunction()) {
         Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
-        if (!receiver->IsJSObject() && !function->shared()->IsBuiltin() &&
+        if (!receiver->IsJSObject() && function->shared()->IsUserJavaScript() &&
             is_sloppy(function->shared()->language_mode())) {
           // Calling sloppy non-builtins with a value as the receiver
           // requires boxing.
@@ -1063,7 +1087,7 @@
 
 
 void LoadIC::UpdateCaches(LookupIterator* lookup) {
-  if (state() == UNINITIALIZED && kind() != Code::LOAD_GLOBAL_IC) {
+  if (state() == UNINITIALIZED && !IsLoadGlobalIC()) {
     // This is the first time we execute this inline cache. Set the target to
     // the pre monomorphic stub to delay setting the monomorphic state.
     TRACE_HANDLER_STATS(isolate(), LoadIC_Premonomorphic);
@@ -1077,26 +1101,12 @@
       lookup->state() == LookupIterator::ACCESS_CHECK) {
     code = slow_stub();
   } else if (!lookup->IsFound()) {
-    if (kind() == Code::LOAD_IC) {
-      TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNonexistentDH);
-      code = LoadNonExistent(receiver_map(), lookup->name());
-    } else if (kind() == Code::LOAD_GLOBAL_IC) {
-      code = NamedLoadHandlerCompiler::ComputeLoadNonexistent(lookup->name(),
-                                                              receiver_map());
-      // TODO(jkummerow/verwaest): Introduce a builtin that handles this case.
-      if (code.is_null()) code = slow_stub();
-    } else {
-      code = slow_stub();
-    }
+    TRACE_HANDLER_STATS(isolate(), LoadIC_LoadNonexistentDH);
+    code = LoadNonExistent(receiver_map(), lookup->name());
   } else {
-    if (kind() == Code::LOAD_GLOBAL_IC &&
-        lookup->state() == LookupIterator::DATA &&
-        lookup->GetHolder<Object>()->IsJSGlobalObject()) {
-#if DEBUG
-      Handle<Object> holder = lookup->GetHolder<Object>();
-      Handle<Object> receiver = lookup->GetReceiver();
-      DCHECK_EQ(*receiver, *holder);
-#endif
+    if (IsLoadGlobalIC() && lookup->state() == LookupIterator::DATA &&
+        lookup->GetReceiver().is_identical_to(lookup->GetHolder<Object>())) {
+      DCHECK(lookup->GetReceiver()->IsJSGlobalObject());
       // Now update the cell in the feedback vector.
       LoadGlobalICNexus* nexus = casted_nexus<LoadGlobalICNexus>();
       nexus->ConfigurePropertyCellMode(lookup->GetPropertyCell());
@@ -1104,26 +1114,19 @@
       return;
     } else if (lookup->state() == LookupIterator::ACCESSOR) {
       if (!IsCompatibleReceiver(lookup, receiver_map())) {
-        TRACE_GENERIC_IC(isolate(), "LoadIC", "incompatible receiver type");
+        TRACE_GENERIC_IC("incompatible receiver type");
         code = slow_stub();
       }
     } else if (lookup->state() == LookupIterator::INTERCEPTOR) {
-      if (kind() == Code::LOAD_GLOBAL_IC) {
-        // The interceptor handler requires name but it is not passed explicitly
-        // to LoadGlobalIC and the LoadGlobalIC dispatcher also does not load
-        // it so we will just use slow stub.
+      // Perform a lookup behind the interceptor. Copy the LookupIterator
+      // since the original iterator will be used to fetch the value.
+      LookupIterator it = *lookup;
+      it.Next();
+      LookupForRead(&it);
+      if (it.state() == LookupIterator::ACCESSOR &&
+          !IsCompatibleReceiver(&it, receiver_map())) {
+        TRACE_GENERIC_IC("incompatible receiver type");
         code = slow_stub();
-      } else {
-        // Perform a lookup behind the interceptor. Copy the LookupIterator
-        // since the original iterator will be used to fetch the value.
-        LookupIterator it = *lookup;
-        it.Next();
-        LookupForRead(&it);
-        if (it.state() == LookupIterator::ACCESSOR &&
-            !IsCompatibleReceiver(&it, receiver_map())) {
-          TRACE_GENERIC_IC(isolate(), "LoadIC", "incompatible receiver type");
-          code = slow_stub();
-        }
       }
     }
     if (code.is_null()) code = ComputeHandler(lookup);
@@ -1134,20 +1137,12 @@
 }
 
 StubCache* IC::stub_cache() {
-  switch (kind()) {
-    case Code::LOAD_IC:
-    case Code::KEYED_LOAD_IC:
-      return isolate()->load_stub_cache();
-
-    case Code::STORE_IC:
-    case Code::KEYED_STORE_IC:
-      return isolate()->store_stub_cache();
-
-    default:
-      break;
+  if (IsAnyLoad()) {
+    return isolate()->load_stub_cache();
+  } else {
+    DCHECK(IsAnyStore());
+    return isolate()->store_stub_cache();
   }
-  UNREACHABLE();
-  return nullptr;
 }
 
 void IC::UpdateMegamorphicCache(Map* map, Name* name, Object* handler) {
@@ -1157,8 +1152,7 @@
 void IC::TraceHandlerCacheHitStats(LookupIterator* lookup) {
   if (!FLAG_runtime_call_stats) return;
 
-  if (kind() == Code::LOAD_IC || kind() == Code::LOAD_GLOBAL_IC ||
-      kind() == Code::KEYED_LOAD_IC) {
+  if (IsAnyLoad()) {
     switch (lookup->state()) {
       case LookupIterator::ACCESS_CHECK:
         TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_AccessCheck);
@@ -1185,7 +1179,7 @@
         TRACE_HANDLER_STATS(isolate(), LoadIC_HandlerCacheHit_Transition);
         break;
     }
-  } else if (kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC) {
+  } else if (IsAnyStore()) {
     switch (lookup->state()) {
       case LookupIterator::ACCESS_CHECK:
         TRACE_HANDLER_STATS(isolate(), StoreIC_HandlerCacheHit_AccessCheck);
@@ -1232,19 +1226,18 @@
       lookup->GetReceiver().is_identical_to(lookup->GetHolder<JSObject>());
   CacheHolderFlag flag;
   Handle<Map> stub_holder_map;
-  if (kind() == Code::LOAD_IC || kind() == Code::LOAD_GLOBAL_IC ||
-      kind() == Code::KEYED_LOAD_IC) {
+  if (IsAnyLoad()) {
     stub_holder_map = IC::GetHandlerCacheHolder(
         receiver_map(), receiver_is_holder, isolate(), &flag);
   } else {
-    DCHECK(kind() == Code::STORE_IC || kind() == Code::KEYED_STORE_IC);
+    DCHECK(IsAnyStore());
     // Store handlers cannot be cached on prototypes.
     flag = kCacheOnReceiver;
     stub_holder_map = receiver_map();
   }
 
   Handle<Object> handler = PropertyHandlerCompiler::Find(
-      lookup->name(), stub_holder_map, kind(), flag);
+      lookup->name(), stub_holder_map, handler_kind(), flag);
   // Use the cached value if it exists, and if it is different from the
   // handler that just missed.
   if (!handler.is_null()) {
@@ -1288,7 +1281,7 @@
   if (receiver->IsString() &&
       Name::Equals(isolate()->factory()->length_string(), lookup->name())) {
     FieldIndex index = FieldIndex::ForInObjectOffset(String::kLengthOffset);
-    return SimpleFieldLoad(index);
+    return SimpleFieldLoad(isolate(), index);
   }
 
   if (receiver->IsStringWrapper() &&
@@ -1307,8 +1300,7 @@
            ->has_non_instance_prototype()) {
     Handle<Code> stub;
     TRACE_HANDLER_STATS(isolate(), LoadIC_FunctionPrototypeStub);
-    FunctionPrototypeStub function_prototype_stub(isolate());
-    return function_prototype_stub.GetCode();
+    return isolate()->builtins()->LoadIC_FunctionPrototype();
   }
 
   Handle<Map> map = receiver_map();
@@ -1326,7 +1318,7 @@
       if (Accessors::IsJSObjectFieldAccessor(map, lookup->name(),
                                              &object_offset)) {
         FieldIndex index = FieldIndex::ForInObjectOffset(object_offset, *map);
-        return SimpleFieldLoad(index);
+        return SimpleFieldLoad(isolate(), index);
       }
 
       if (IsCompatibleReceiver(lookup, map)) {
@@ -1337,7 +1329,7 @@
             return slow_stub();
           }
           // When debugging we need to go the slow path to flood the accessor.
-          if (GetSharedFunctionInfo()->HasDebugInfo()) {
+          if (GetHostFunction()->shared()->HasDebugInfo()) {
             TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
             return slow_stub();
           }
@@ -1356,26 +1348,15 @@
             TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
             return slow_stub();
           }
-          if (FLAG_tf_load_ic_stub) {
-            Handle<Object> smi_handler = LoadHandler::LoadApiGetter(
-                isolate(), lookup->GetAccessorIndex());
-            if (receiver_is_holder) {
-              TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterDH);
-              return smi_handler;
-            }
-            if (kind() != Code::LOAD_GLOBAL_IC) {
-              TRACE_HANDLER_STATS(isolate(),
-                                  LoadIC_LoadApiGetterFromPrototypeDH);
-              return LoadFromPrototype(map, holder, lookup->name(),
-                                       smi_handler);
-            }
-          } else {
-            if (receiver_is_holder) {
-              TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterStub);
-              int index = lookup->GetAccessorIndex();
-              LoadApiGetterStub stub(isolate(), true, index);
-              return stub.GetCode();
-            }
+          Handle<Object> smi_handler =
+              LoadHandler::LoadApiGetter(isolate(), lookup->GetAccessorIndex());
+          if (receiver_is_holder) {
+            TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterDH);
+            return smi_handler;
+          }
+          if (!IsLoadGlobalIC()) {
+            TRACE_HANDLER_STATS(isolate(), LoadIC_LoadApiGetterFromPrototypeDH);
+            return LoadFromPrototype(map, holder, lookup->name(), smi_handler);
           }
           break;  // Custom-compiled handler.
         }
@@ -1385,8 +1366,9 @@
     }
 
     case LookupIterator::DATA: {
+      DCHECK_EQ(kData, lookup->property_details().kind());
       if (lookup->is_dictionary_holder()) {
-        if (kind() != Code::LOAD_IC && kind() != Code::LOAD_GLOBAL_IC) {
+        if (!IsLoadIC() && !IsLoadGlobalIC()) {  // IsKeyedLoadIC()?
           TRACE_HANDLER_STATS(isolate(), LoadIC_SlowStub);
           return slow_stub();
         }
@@ -1406,40 +1388,26 @@
       }
 
       // -------------- Fields --------------
-      if (lookup->property_details().type() == DATA) {
+      if (lookup->property_details().location() == kField) {
         FieldIndex field = lookup->GetFieldIndex();
-        Handle<Object> smi_handler = SimpleFieldLoad(field);
+        Handle<Object> smi_handler = SimpleFieldLoad(isolate(), field);
         if (receiver_is_holder) {
           return smi_handler;
         }
-        if (FLAG_tf_load_ic_stub && kind() != Code::LOAD_GLOBAL_IC) {
-          TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldFromPrototypeDH);
-          return LoadFromPrototype(map, holder, lookup->name(), smi_handler);
-        }
-        break;  // Custom-compiled handler.
+        TRACE_HANDLER_STATS(isolate(), LoadIC_LoadFieldFromPrototypeDH);
+        return LoadFromPrototype(map, holder, lookup->name(), smi_handler);
       }
 
       // -------------- Constant properties --------------
-      DCHECK(lookup->property_details().type() == DATA_CONSTANT);
-      if (FLAG_tf_load_ic_stub) {
-        Handle<Object> smi_handler =
-            LoadHandler::LoadConstant(isolate(), lookup->GetConstantIndex());
-        if (receiver_is_holder) {
-          TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantDH);
-          return smi_handler;
-        }
-        if (kind() != Code::LOAD_GLOBAL_IC) {
-          TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantFromPrototypeDH);
-          return LoadFromPrototype(map, holder, lookup->name(), smi_handler);
-        }
-      } else {
-        if (receiver_is_holder) {
-          TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantStub);
-          LoadConstantStub stub(isolate(), lookup->GetConstantIndex());
-          return stub.GetCode();
-        }
+      DCHECK_EQ(kDescriptor, lookup->property_details().location());
+      Handle<Object> smi_handler =
+          LoadHandler::LoadConstant(isolate(), lookup->GetConstantIndex());
+      if (receiver_is_holder) {
+        TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantDH);
+        return smi_handler;
       }
-      break;  // Custom-compiled handler.
+      TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstantFromPrototypeDH);
+      return LoadFromPrototype(map, holder, lookup->name(), smi_handler);
     }
 
     case LookupIterator::INTEGER_INDEXED_EXOTIC:
@@ -1507,7 +1475,7 @@
           return ComputeHandler(lookup);
         }
         DCHECK(holder->HasFastProperties());
-        DCHECK(!GetSharedFunctionInfo()->HasDebugInfo());
+        DCHECK(!GetHostFunction()->shared()->HasDebugInfo());
         Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
                               isolate());
         CallOptimization call_optimization(getter);
@@ -1543,33 +1511,15 @@
     }
 
     case LookupIterator::DATA: {
-      if (lookup->is_dictionary_holder()) {
-        DCHECK(kind() == Code::LOAD_IC || kind() == Code::LOAD_GLOBAL_IC);
-        DCHECK(holder->IsJSGlobalObject());
-        TRACE_HANDLER_STATS(isolate(), LoadIC_LoadGlobal);
-        NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
-        Handle<PropertyCell> cell = lookup->GetPropertyCell();
-        Handle<Code> code = compiler.CompileLoadGlobal(
-            cell, lookup->name(), lookup->IsConfigurable());
-        return code;
-      }
-
-      // -------------- Fields --------------
-      if (lookup->property_details().type() == DATA) {
-        FieldIndex field = lookup->GetFieldIndex();
-        DCHECK(!receiver_is_holder);
-        TRACE_HANDLER_STATS(isolate(), LoadIC_LoadField);
-        NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
-        return compiler.CompileLoadField(lookup->name(), field);
-      }
-
-      // -------------- Constant properties --------------
-      DCHECK(lookup->property_details().type() == DATA_CONSTANT);
-      DCHECK(!receiver_is_holder);
-      TRACE_HANDLER_STATS(isolate(), LoadIC_LoadConstant);
+      DCHECK(lookup->is_dictionary_holder());
+      DCHECK(IsLoadIC() || IsLoadGlobalIC());
+      DCHECK(holder->IsJSGlobalObject());
+      TRACE_HANDLER_STATS(isolate(), LoadIC_LoadGlobal);
       NamedLoadHandlerCompiler compiler(isolate(), map, holder, cache_holder);
-      return compiler.CompileLoadConstant(lookup->name(),
-                                          lookup->GetConstantIndex());
+      Handle<PropertyCell> cell = lookup->GetPropertyCell();
+      Handle<Code> code = compiler.CompileLoadGlobal(cell, lookup->name(),
+                                                     lookup->IsConfigurable());
+      return code;
     }
 
     case LookupIterator::INTEGER_INDEXED_EXOTIC:
@@ -1599,6 +1549,8 @@
     }
   } else if (key->IsUndefined(isolate)) {
     key = isolate->factory()->undefined_string();
+  } else if (key->IsString()) {
+    key = isolate->factory()->InternalizeString(Handle<String>::cast(key));
   }
   return key;
 }
@@ -1620,11 +1572,11 @@
     Handle<Map> map = target_receiver_maps.at(i);
     if (map.is_null()) continue;
     if (map->instance_type() == JS_VALUE_TYPE) {
-      TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "JSValue");
+      TRACE_GENERIC_IC("JSValue");
       return;
     }
     if (map->instance_type() == JS_PROXY_TYPE) {
-      TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "JSProxy");
+      TRACE_GENERIC_IC("JSProxy");
       return;
     }
   }
@@ -1652,14 +1604,14 @@
   if (!AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map)) {
     // If the miss wasn't due to an unseen map, a polymorphic stub
     // won't help, use the generic stub.
-    TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "same map added twice");
+    TRACE_GENERIC_IC("same map added twice");
     return;
   }
 
   // If the maximum number of receiver maps has been exceeded, use the generic
   // version of the IC.
   if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
-    TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "max polymorph exceeded");
+    TRACE_GENERIC_IC("max polymorph exceeded");
     return;
   }
 
@@ -1706,7 +1658,6 @@
 
   if (!is_vector_set()) {
     ConfigureVectorState(MEGAMORPHIC, key);
-    TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
     TRACE_IC("LoadIC", key);
   }
 
@@ -1839,7 +1790,14 @@
 
   // If the object is undefined or null it's illegal to try to set any
   // properties on it; throw a TypeError in that case.
-  if (object->IsUndefined(isolate()) || object->IsNull(isolate())) {
+  if (object->IsNullOrUndefined(isolate())) {
+    if (FLAG_use_ic && state() != UNINITIALIZED && state() != PREMONOMORPHIC) {
+      // Ensure the IC state progresses.
+      TRACE_HANDLER_STATS(isolate(), StoreIC_NonReceiver);
+      update_receiver_map(object);
+      PatchCache(name, slow_stub());
+      TRACE_IC("StoreIC", name);
+    }
     return TypeError(MessageTemplate::kNonObjectPropertyStore, object, name);
   }
 
@@ -1865,12 +1823,13 @@
     return;
   }
 
-  bool use_ic = LookupForWrite(lookup, value, store_mode);
-  if (!use_ic) {
-    TRACE_GENERIC_IC(isolate(), "StoreIC", "LookupForWrite said 'false'");
+  Handle<Object> handler;
+  if (LookupForWrite(lookup, value, store_mode)) {
+    handler = ComputeHandler(lookup, value);
+  } else {
+    TRACE_GENERIC_IC("LookupForWrite said 'false'");
+    handler = slow_stub();
   }
-  Handle<Object> handler = use_ic ? ComputeHandler(lookup, value)
-                                  : Handle<Object>::cast(slow_stub());
 
   PatchCache(lookup->name(), handler);
   TRACE_IC("StoreIC", lookup->name());
@@ -1890,11 +1849,12 @@
   DCHECK(!transition->is_access_check_needed());
 
   Handle<Object> smi_handler;
-  if (details.type() == DATA_CONSTANT) {
+  DCHECK_EQ(kData, details.kind());
+  if (details.location() == kDescriptor) {
     smi_handler = StoreHandler::TransitionToConstant(isolate(), descriptor);
 
   } else {
-    DCHECK_EQ(DATA, details.type());
+    DCHECK_EQ(kField, details.location());
     bool extend_storage =
         Map::cast(transition->GetBackPointer())->unused_property_fields() == 0;
 
@@ -1967,18 +1927,15 @@
       }
       // Currently not handled by CompileStoreTransition.
       if (!holder->HasFastProperties()) {
-        TRACE_GENERIC_IC(isolate(), "StoreIC", "transition from slow");
+        TRACE_GENERIC_IC("transition from slow");
         TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
         return slow_stub();
       }
       DCHECK(lookup->IsCacheableTransition());
-      if (FLAG_tf_store_ic_stub) {
-        Handle<Map> transition = lookup->transition_map();
-        TRACE_HANDLER_STATS(isolate(), StoreIC_StoreTransitionDH);
-        return StoreTransition(receiver_map(), holder, transition,
-                               lookup->name());
-      }
-      break;  // Custom-compiled handler.
+      Handle<Map> transition = lookup->transition_map();
+      TRACE_HANDLER_STATS(isolate(), StoreIC_StoreTransitionDH);
+      return StoreTransition(receiver_map(), holder, transition,
+                             lookup->name());
     }
 
     case LookupIterator::INTERCEPTOR: {
@@ -1990,7 +1947,7 @@
 
     case LookupIterator::ACCESSOR: {
       if (!holder->HasFastProperties()) {
-        TRACE_GENERIC_IC(isolate(), "StoreIC", "accessor on slow map");
+        TRACE_GENERIC_IC("accessor on slow map");
         TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
         return slow_stub();
       }
@@ -1998,20 +1955,19 @@
       if (accessors->IsAccessorInfo()) {
         Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(accessors);
         if (v8::ToCData<Address>(info->setter()) == nullptr) {
-          TRACE_GENERIC_IC(isolate(), "StoreIC", "setter == nullptr");
+          TRACE_GENERIC_IC("setter == nullptr");
           TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
           return slow_stub();
         }
         if (AccessorInfo::cast(*accessors)->is_special_data_property() &&
             !lookup->HolderIsReceiverOrHiddenPrototype()) {
-          TRACE_GENERIC_IC(isolate(), "StoreIC",
-                           "special data property in prototype chain");
+          TRACE_GENERIC_IC("special data property in prototype chain");
           TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
           return slow_stub();
         }
         if (!AccessorInfo::IsCompatibleReceiverMap(isolate(), info,
                                                    receiver_map())) {
-          TRACE_GENERIC_IC(isolate(), "StoreIC", "incompatible receiver type");
+          TRACE_GENERIC_IC("incompatible receiver type");
           TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
           return slow_stub();
         }
@@ -2024,7 +1980,7 @@
         Handle<Object> setter(Handle<AccessorPair>::cast(accessors)->setter(),
                               isolate());
         if (!setter->IsJSFunction() && !setter->IsFunctionTemplateInfo()) {
-          TRACE_GENERIC_IC(isolate(), "StoreIC", "setter not a function");
+          TRACE_GENERIC_IC("setter not a function");
           TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
           return slow_stub();
         }
@@ -2033,7 +1989,7 @@
           if (call_optimization.IsCompatibleReceiver(receiver, holder)) {
             break;  // Custom-compiled handler.
           }
-          TRACE_GENERIC_IC(isolate(), "StoreIC", "incompatible receiver");
+          TRACE_GENERIC_IC("incompatible receiver");
           TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
           return slow_stub();
         }
@@ -2044,6 +2000,7 @@
     }
 
     case LookupIterator::DATA: {
+      DCHECK_EQ(kData, lookup->property_details().kind());
       if (lookup->is_dictionary_holder()) {
         if (holder->IsJSGlobalObject()) {
           break;  // Custom-compiled handler.
@@ -2054,33 +2011,18 @@
       }
 
       // -------------- Fields --------------
-      if (lookup->property_details().type() == DATA) {
-        if (FLAG_tf_store_ic_stub) {
-          TRACE_HANDLER_STATS(isolate(), StoreIC_StoreFieldDH);
-          int descriptor = lookup->GetFieldDescriptorIndex();
-          FieldIndex index = lookup->GetFieldIndex();
-          return StoreHandler::StoreField(isolate(), descriptor, index,
-                                          lookup->representation());
-        } else {
-          bool use_stub = true;
-          if (lookup->representation().IsHeapObject()) {
-            // Only use a generic stub if no types need to be tracked.
-            Handle<FieldType> field_type = lookup->GetFieldType();
-            use_stub = !field_type->IsClass();
-          }
-          if (use_stub) {
-            TRACE_HANDLER_STATS(isolate(), StoreIC_StoreFieldStub);
-            StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
-                                lookup->representation());
-            return stub.GetCode();
-          }
-        }
-        break;  // Custom-compiled handler.
+      if (lookup->property_details().location() == kField) {
+        TRACE_HANDLER_STATS(isolate(), StoreIC_StoreFieldDH);
+        int descriptor = lookup->GetFieldDescriptorIndex();
+        FieldIndex index = lookup->GetFieldIndex();
+        return StoreHandler::StoreField(isolate(), descriptor, index,
+                                        lookup->constness(),
+                                        lookup->representation());
       }
 
       // -------------- Constant properties --------------
-      DCHECK(lookup->property_details().type() == DATA_CONSTANT);
-      TRACE_GENERIC_IC(isolate(), "StoreIC", "constant property");
+      DCHECK_EQ(kDescriptor, lookup->property_details().location());
+      TRACE_GENERIC_IC("constant property");
       TRACE_HANDLER_STATS(isolate(), StoreIC_SlowStub);
       return slow_stub();
     }
@@ -2117,15 +2059,7 @@
         cell->set_value(isolate()->heap()->the_hole_value());
         return code;
       }
-      DCHECK(!FLAG_tf_store_ic_stub);
-      Handle<Map> transition = lookup->transition_map();
-      // Currently not handled by CompileStoreTransition.
-      DCHECK(holder->HasFastProperties());
-
-      DCHECK(lookup->IsCacheableTransition());
-      TRACE_HANDLER_STATS(isolate(), StoreIC_StoreTransition);
-      NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
-      return compiler.CompileStoreTransition(transition, lookup->name());
+      UNREACHABLE();
     }
 
     case LookupIterator::INTERCEPTOR:
@@ -2144,6 +2078,11 @@
         DCHECK(!info->is_sloppy() || receiver->IsJSReceiver());
         TRACE_HANDLER_STATS(isolate(), StoreIC_StoreCallback);
         NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
+        // TODO(ishell): don't hard-code language mode into the handler because
+        // this handler can be re-used through megamorphic stub cache for wrong
+        // language mode.
+        // Better pass vector/slot to Runtime::kStoreCallbackProperty and
+        // let it decode the language mode from the IC kind.
         Handle<Code> code = compiler.CompileStoreCallback(
             receiver, lookup->name(), info, language_mode());
         return code;
@@ -2173,40 +2112,18 @@
     }
 
     case LookupIterator::DATA: {
-      if (lookup->is_dictionary_holder()) {
-        DCHECK(holder->IsJSGlobalObject());
-        TRACE_HANDLER_STATS(isolate(), StoreIC_StoreGlobal);
-        DCHECK(holder.is_identical_to(receiver) ||
-               receiver->map()->prototype() == *holder);
-        auto cell = lookup->GetPropertyCell();
-        auto updated_type =
-            PropertyCell::UpdatedType(cell, value, lookup->property_details());
-        auto code = PropertyCellStoreHandler(
-            isolate(), receiver, Handle<JSGlobalObject>::cast(holder),
-            lookup->name(), cell, updated_type);
-        return code;
-      }
-
-      // -------------- Fields --------------
-      if (lookup->property_details().type() == DATA) {
-        DCHECK(!FLAG_tf_store_ic_stub);
-#ifdef DEBUG
-        bool use_stub = true;
-        if (lookup->representation().IsHeapObject()) {
-          // Only use a generic stub if no types need to be tracked.
-          Handle<FieldType> field_type = lookup->GetFieldType();
-          use_stub = !field_type->IsClass();
-        }
-        DCHECK(!use_stub);
-#endif
-        TRACE_HANDLER_STATS(isolate(), StoreIC_StoreField);
-        NamedStoreHandlerCompiler compiler(isolate(), receiver_map(), holder);
-        return compiler.CompileStoreField(lookup);
-      }
-
-      // -------------- Constant properties --------------
-      DCHECK(lookup->property_details().type() == DATA_CONSTANT);
-      UNREACHABLE();
+      DCHECK(lookup->is_dictionary_holder());
+      DCHECK(holder->IsJSGlobalObject());
+      TRACE_HANDLER_STATS(isolate(), StoreIC_StoreGlobal);
+      DCHECK(holder.is_identical_to(receiver) ||
+             receiver->map()->prototype() == *holder);
+      auto cell = lookup->GetPropertyCell();
+      auto updated_type =
+          PropertyCell::UpdatedType(cell, value, lookup->property_details());
+      auto code = PropertyCellStoreHandler(isolate(), receiver,
+                                           Handle<JSGlobalObject>::cast(holder),
+                                           lookup->name(), cell, updated_type);
+      return code;
     }
 
     case LookupIterator::INTEGER_INDEXED_EXOTIC:
@@ -2227,16 +2144,14 @@
     Handle<Map> monomorphic_map =
         ComputeTransitionedMap(receiver_map, store_mode);
     store_mode = GetNonTransitioningStoreMode(store_mode);
-    Handle<Code> handler =
-        PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(monomorphic_map,
-                                                                store_mode);
+    Handle<Object> handler = StoreElementHandler(monomorphic_map, store_mode);
     return ConfigureVectorState(Handle<Name>(), monomorphic_map, handler);
   }
 
   for (int i = 0; i < target_receiver_maps.length(); i++) {
     if (!target_receiver_maps.at(i).is_null() &&
         target_receiver_maps.at(i)->instance_type() == JS_VALUE_TYPE) {
-      TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "JSValue");
+      TRACE_GENERIC_IC("JSValue");
       return;
     }
   }
@@ -2261,9 +2176,8 @@
       // if they at least come from the same origin for a transitioning store,
       // stay MONOMORPHIC and use the map for the most generic ElementsKind.
       store_mode = GetNonTransitioningStoreMode(store_mode);
-      Handle<Code> handler =
-          PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(
-              transitioned_receiver_map, store_mode);
+      Handle<Object> handler =
+          StoreElementHandler(transitioned_receiver_map, store_mode);
       ConfigureVectorState(Handle<Name>(), transitioned_receiver_map, handler);
       return;
     }
@@ -2275,9 +2189,7 @@
       // A "normal" IC that handles stores can switch to a version that can
       // grow at the end of the array, handle OOB accesses or copy COW arrays
       // and still stay MONOMORPHIC.
-      Handle<Code> handler =
-          PropertyICCompiler::ComputeKeyedStoreMonomorphicHandler(receiver_map,
-                                                                  store_mode);
+      Handle<Object> handler = StoreElementHandler(receiver_map, store_mode);
       return ConfigureVectorState(Handle<Name>(), receiver_map, handler);
     }
   }
@@ -2297,7 +2209,7 @@
   if (!map_added) {
     // If the miss wasn't due to an unseen map, a polymorphic stub
     // won't help, use the megamorphic stub which can handle everything.
-    TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "same map added twice");
+    TRACE_GENERIC_IC("same map added twice");
     return;
   }
 
@@ -2312,7 +2224,7 @@
     if (store_mode == STANDARD_STORE) {
       store_mode = old_store_mode;
     } else if (store_mode != old_store_mode) {
-      TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "store mode mismatch");
+      TRACE_GENERIC_IC("store mode mismatch");
       return;
     }
   }
@@ -2329,16 +2241,15 @@
     }
     if (external_arrays != 0 &&
         external_arrays != target_receiver_maps.length()) {
-      TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
-                       "unsupported combination of external and normal arrays");
+      TRACE_GENERIC_IC("unsupported combination of external and normal arrays");
       return;
     }
   }
 
   MapHandleList transitioned_maps(target_receiver_maps.length());
-  CodeHandleList handlers(target_receiver_maps.length());
-  PropertyICCompiler::ComputeKeyedStorePolymorphicHandlers(
-      &target_receiver_maps, &transitioned_maps, &handlers, store_mode);
+  List<Handle<Object>> handlers(target_receiver_maps.length());
+  StoreElementPolymorphicHandlers(&target_receiver_maps, &transitioned_maps,
+                                  &handlers, store_mode);
   ConfigureVectorState(&target_receiver_maps, &transitioned_maps, &handlers);
 }
 
@@ -2372,6 +2283,91 @@
   return MaybeHandle<Map>().ToHandleChecked();
 }
 
+Handle<Object> KeyedStoreIC::StoreElementHandler(
+    Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) {
+  DCHECK(store_mode == STANDARD_STORE ||
+         store_mode == STORE_AND_GROW_NO_TRANSITION ||
+         store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
+         store_mode == STORE_NO_TRANSITION_HANDLE_COW);
+
+  ElementsKind elements_kind = receiver_map->elements_kind();
+  bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
+  Handle<Code> stub;
+  if (receiver_map->has_sloppy_arguments_elements()) {
+    TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_KeyedStoreSloppyArgumentsStub);
+    stub = KeyedStoreSloppyArgumentsStub(isolate(), store_mode).GetCode();
+  } else if (receiver_map->has_fast_elements() ||
+             receiver_map->has_fixed_typed_array_elements()) {
+    TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_StoreFastElementStub);
+    stub =
+        StoreFastElementStub(isolate(), is_jsarray, elements_kind, store_mode)
+            .GetCode();
+  } else {
+    TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_StoreElementStub);
+    DCHECK_EQ(DICTIONARY_ELEMENTS, elements_kind);
+    stub = StoreSlowElementStub(isolate(), store_mode).GetCode();
+  }
+  Handle<Object> validity_cell =
+      Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+  if (validity_cell.is_null()) {
+    return stub;
+  }
+  return isolate()->factory()->NewTuple2(validity_cell, stub);
+}
+
+void KeyedStoreIC::StoreElementPolymorphicHandlers(
+    MapHandleList* receiver_maps, MapHandleList* transitioned_maps,
+    List<Handle<Object>>* handlers, KeyedAccessStoreMode store_mode) {
+  DCHECK(store_mode == STANDARD_STORE ||
+         store_mode == STORE_AND_GROW_NO_TRANSITION ||
+         store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
+         store_mode == STORE_NO_TRANSITION_HANDLE_COW);
+
+  for (int i = 0; i < receiver_maps->length(); ++i) {
+    Handle<Map> receiver_map(receiver_maps->at(i));
+    Handle<Object> handler;
+    Handle<Map> transitioned_map;
+    {
+      Map* tmap = receiver_map->FindElementsKindTransitionedMap(receiver_maps);
+      if (tmap != nullptr) transitioned_map = handle(tmap);
+    }
+
+    // TODO(mvstanton): The code below is doing pessimistic elements
+    // transitions. I would like to stop doing that and rely on Allocation Site
+    // Tracking to do a better job of ensuring the data types are what they need
+    // to be. Not all the elements are in place yet, pessimistic elements
+    // transitions are still important for performance.
+    if (!transitioned_map.is_null()) {
+      bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+      ElementsKind elements_kind = receiver_map->elements_kind();
+      TRACE_HANDLER_STATS(isolate(),
+                          KeyedStoreIC_ElementsTransitionAndStoreStub);
+      Handle<Code> stub =
+          ElementsTransitionAndStoreStub(isolate(), elements_kind,
+                                         transitioned_map->elements_kind(),
+                                         is_js_array, store_mode)
+              .GetCode();
+      Handle<Object> validity_cell =
+          Map::GetOrCreatePrototypeChainValidityCell(receiver_map, isolate());
+      if (validity_cell.is_null()) {
+        handler = stub;
+      } else {
+        handler = isolate()->factory()->NewTuple2(validity_cell, stub);
+      }
+
+    } else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
+      // TODO(mvstanton): Consider embedding store_mode in the state of the slow
+      // keyed store ic for uniformity.
+      TRACE_HANDLER_STATS(isolate(), KeyedStoreIC_SlowStub);
+      handler = isolate()->builtins()->KeyedStoreIC_Slow();
+    } else {
+      handler = StoreElementHandler(receiver_map, store_mode);
+    }
+    DCHECK(!handler.is_null());
+    handlers->Add(handler);
+    transitioned_maps->Add(transitioned_map);
+  }
+}
 
 bool IsOutOfBoundsAccess(Handle<JSObject> receiver, uint32_t index) {
   uint32_t length = 0;
@@ -2464,8 +2460,7 @@
         Object);
     if (!is_vector_set()) {
       ConfigureVectorState(MEGAMORPHIC, key);
-      TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
-                       "unhandled internalized string key");
+      TRACE_GENERIC_IC("unhandled internalized string key");
       TRACE_IC("StoreIC", key);
     }
     return store_handle;
@@ -2479,23 +2474,20 @@
     // the runtime to enable optimization of element hole access.
     Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
     if (heap_object->map()->IsMapInArrayPrototypeChain()) {
-      TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "map in array prototype");
+      TRACE_GENERIC_IC("map in array prototype");
       use_ic = false;
     }
   }
 
   Handle<Map> old_receiver_map;
-  bool sloppy_arguments_elements = false;
+  bool is_arguments = false;
   bool key_is_valid_index = false;
   KeyedAccessStoreMode store_mode = STANDARD_STORE;
   if (use_ic && object->IsJSObject()) {
     Handle<JSObject> receiver = Handle<JSObject>::cast(object);
     old_receiver_map = handle(receiver->map(), isolate());
-    sloppy_arguments_elements =
-        !is_sloppy(language_mode()) &&
-        receiver->elements()->map() ==
-            isolate()->heap()->sloppy_arguments_elements_map();
-    if (!sloppy_arguments_elements) {
+    is_arguments = receiver->IsJSArgumentsObject();
+    if (!is_arguments) {
       key_is_valid_index = key->IsSmi() && Smi::cast(*key)->value() >= 0;
       if (key_is_valid_index) {
         uint32_t index = static_cast<uint32_t>(Smi::cast(*key)->value());
@@ -2512,8 +2504,8 @@
 
   if (use_ic) {
     if (!old_receiver_map.is_null()) {
-      if (sloppy_arguments_elements) {
-        TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "arguments receiver");
+      if (is_arguments) {
+        TRACE_GENERIC_IC("arguments receiver");
       } else if (key_is_valid_index) {
         // We should go generic if receiver isn't a dictionary, but our
         // prototype chain does have dictionary elements. This ensures that
@@ -2522,20 +2514,18 @@
         if (!old_receiver_map->DictionaryElementsInPrototypeChainOnly()) {
           UpdateStoreElement(old_receiver_map, store_mode);
         } else {
-          TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
-                           "dictionary or proxy prototype");
+          TRACE_GENERIC_IC("dictionary or proxy prototype");
         }
       } else {
-        TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "non-smi-like key");
+        TRACE_GENERIC_IC("non-smi-like key");
       }
     } else {
-      TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "non-JSObject receiver");
+      TRACE_GENERIC_IC("non-JSObject receiver");
     }
   }
 
   if (!is_vector_set()) {
     ConfigureVectorState(MEGAMORPHIC, key);
-    TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic");
   }
   TRACE_IC("StoreIC", key);
 
@@ -2543,48 +2533,6 @@
 }
 
 
-void CallIC::HandleMiss(Handle<Object> function) {
-  Handle<Object> name = isolate()->factory()->empty_string();
-  CallICNexus* nexus = casted_nexus<CallICNexus>();
-  Object* feedback = nexus->GetFeedback();
-
-  // Hand-coded MISS handling is easier if CallIC slots don't contain smis.
-  DCHECK(!feedback->IsSmi());
-
-  if (feedback->IsWeakCell() || !function->IsJSFunction() ||
-      feedback->IsAllocationSite()) {
-    // We are going generic.
-    nexus->ConfigureMegamorphic();
-  } else {
-    DCHECK(feedback == *TypeFeedbackVector::UninitializedSentinel(isolate()));
-    Handle<JSFunction> js_function = Handle<JSFunction>::cast(function);
-
-    Handle<JSFunction> array_function =
-        Handle<JSFunction>(isolate()->native_context()->array_function());
-    if (array_function.is_identical_to(js_function)) {
-      // Alter the slot.
-      nexus->ConfigureMonomorphicArray();
-    } else if (js_function->context()->native_context() !=
-               *isolate()->native_context()) {
-      // Don't collect cross-native context feedback for the CallIC.
-      // TODO(bmeurer): We should collect the SharedFunctionInfo as
-      // feedback in this case instead.
-      nexus->ConfigureMegamorphic();
-    } else {
-      nexus->ConfigureMonomorphic(js_function);
-    }
-  }
-
-  if (function->IsJSFunction()) {
-    Handle<JSFunction> js_function = Handle<JSFunction>::cast(function);
-    name = handle(js_function->shared()->name(), isolate());
-  }
-
-  OnTypeFeedbackChanged(isolate(), get_host());
-  TRACE_IC("CallIC", name);
-}
-
-
 #undef TRACE_IC
 
 
@@ -2593,55 +2541,36 @@
 //
 
 // Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(Runtime_CallIC_Miss) {
-  HandleScope scope(isolate);
-  DCHECK_EQ(3, args.length());
-  // Runtime functions don't follow the IC's calling convention.
-  Handle<Object> function = args.at<Object>(0);
-  Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(1);
-  Handle<Smi> slot = args.at<Smi>(2);
-  FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
-  CallICNexus nexus(vector, vector_slot);
-  CallIC ic(isolate, &nexus);
-  ic.HandleMiss(function);
-  return *function;
-}
-
-
-// Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(Runtime_LoadIC_Miss) {
   HandleScope scope(isolate);
   DCHECK_EQ(4, args.length());
   // Runtime functions don't follow the IC's calling convention.
-  Handle<Object> receiver = args.at<Object>(0);
+  Handle<Object> receiver = args.at(0);
+  Handle<Name> key = args.at<Name>(1);
   Handle<Smi> slot = args.at<Smi>(2);
-  Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
-  FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
+  Handle<FeedbackVector> vector = args.at<FeedbackVector>(3);
+  FeedbackSlot vector_slot = vector->ToSlot(slot->value());
   // A monomorphic or polymorphic KeyedLoadIC with a string key can call the
   // LoadIC miss handler if the handler misses. Since the vector Nexus is
   // set up outside the IC, handle that here.
-  FeedbackVectorSlotKind kind = vector->GetKind(vector_slot);
-  if (kind == FeedbackVectorSlotKind::LOAD_IC) {
-    Handle<Name> key = args.at<Name>(1);
+  FeedbackSlotKind kind = vector->GetKind(vector_slot);
+  if (IsLoadICKind(kind)) {
     LoadICNexus nexus(vector, vector_slot);
-    LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+    LoadIC ic(isolate, &nexus);
     ic.UpdateState(receiver, key);
     RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
 
-  } else if (kind == FeedbackVectorSlotKind::LOAD_GLOBAL_IC) {
-    Handle<Name> key(vector->GetName(vector_slot), isolate);
-    DCHECK_NE(*key, isolate->heap()->empty_string());
+  } else if (IsLoadGlobalICKind(kind)) {
     DCHECK_EQ(*isolate->global_object(), *receiver);
     LoadGlobalICNexus nexus(vector, vector_slot);
-    LoadGlobalIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+    LoadGlobalIC ic(isolate, &nexus);
     ic.UpdateState(receiver, key);
     RETURN_RESULT_OR_FAILURE(isolate, ic.Load(key));
 
   } else {
-    Handle<Name> key = args.at<Name>(1);
-    DCHECK_EQ(FeedbackVectorSlotKind::KEYED_LOAD_IC, kind);
+    DCHECK(IsKeyedLoadICKind(kind));
     KeyedLoadICNexus nexus(vector, vector_slot);
-    KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+    KeyedLoadIC ic(isolate, &nexus);
     ic.UpdateState(receiver, key);
     RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
   }
@@ -2650,19 +2579,16 @@
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Miss) {
   HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
+  DCHECK_EQ(3, args.length());
   // Runtime functions don't follow the IC's calling convention.
   Handle<JSGlobalObject> global = isolate->global_object();
-  Handle<Smi> slot = args.at<Smi>(0);
-  Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(1);
-  FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
-  DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC,
-            vector->GetKind(vector_slot));
-  Handle<String> name(vector->GetName(vector_slot), isolate);
-  DCHECK_NE(*name, isolate->heap()->empty_string());
+  Handle<String> name = args.at<String>(0);
+  Handle<Smi> slot = args.at<Smi>(1);
+  Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
+  FeedbackSlot vector_slot = vector->ToSlot(slot->value());
 
   LoadGlobalICNexus nexus(vector, vector_slot);
-  LoadGlobalIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+  LoadGlobalIC ic(isolate, &nexus);
   ic.UpdateState(global, name);
 
   Handle<Object> result;
@@ -2672,20 +2598,12 @@
 
 RUNTIME_FUNCTION(Runtime_LoadGlobalIC_Slow) {
   HandleScope scope(isolate);
-  DCHECK_EQ(2, args.length());
-  CONVERT_SMI_ARG_CHECKED(slot, 0);
-  CONVERT_ARG_HANDLE_CHECKED(TypeFeedbackVector, vector, 1);
+  DCHECK_EQ(3, args.length());
+  CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
 
-  FeedbackVectorSlot vector_slot = vector->ToSlot(slot);
-  DCHECK_EQ(FeedbackVectorSlotKind::LOAD_GLOBAL_IC,
-            vector->GetKind(vector_slot));
-  Handle<String> name(vector->GetName(vector_slot), isolate);
-  DCHECK_NE(*name, isolate->heap()->empty_string());
-
-  Handle<JSGlobalObject> global = isolate->global_object();
-
+  Handle<Context> native_context = isolate->native_context();
   Handle<ScriptContextTable> script_contexts(
-      global->native_context()->script_context_table());
+      native_context->script_context_table());
 
   ScriptContextTable::LookupResult lookup_result;
   if (ScriptContextTable::Lookup(script_contexts, name, &lookup_result)) {
@@ -2700,17 +2618,20 @@
     return *result;
   }
 
+  Handle<JSGlobalObject> global(native_context->global_object(), isolate);
   Handle<Object> result;
   bool is_found = false;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, result,
       Runtime::GetObjectProperty(isolate, global, name, &is_found));
   if (!is_found) {
-    LoadICNexus nexus(isolate);
-    LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+    Handle<Smi> slot = args.at<Smi>(1);
+    Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
+    FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+    FeedbackSlotKind kind = vector->GetKind(vector_slot);
     // It is actually a LoadGlobalICs here but the predicate handles this case
     // properly.
-    if (ic.ShouldThrowReferenceError()) {
+    if (LoadIC::ShouldThrowReferenceError(kind)) {
       THROW_NEW_ERROR_RETURN_FAILURE(
           isolate, NewReferenceError(MessageTemplate::kNotDefined, name));
     }
@@ -2723,75 +2644,56 @@
   HandleScope scope(isolate);
   DCHECK_EQ(4, args.length());
   // Runtime functions don't follow the IC's calling convention.
-  Handle<Object> receiver = args.at<Object>(0);
-  Handle<Object> key = args.at<Object>(1);
+  Handle<Object> receiver = args.at(0);
+  Handle<Object> key = args.at(1);
   Handle<Smi> slot = args.at<Smi>(2);
-  Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
-  FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
+  Handle<FeedbackVector> vector = args.at<FeedbackVector>(3);
+  FeedbackSlot vector_slot = vector->ToSlot(slot->value());
   KeyedLoadICNexus nexus(vector, vector_slot);
-  KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+  KeyedLoadIC ic(isolate, &nexus);
   ic.UpdateState(receiver, key);
   RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
 }
 
-
-RUNTIME_FUNCTION(Runtime_KeyedLoadIC_MissFromStubFailure) {
-  HandleScope scope(isolate);
-  typedef LoadWithVectorDescriptor Descriptor;
-  DCHECK_EQ(Descriptor::kParameterCount, args.length());
-  Handle<Object> receiver = args.at<Object>(Descriptor::kReceiver);
-  Handle<Object> key = args.at<Object>(Descriptor::kName);
-  Handle<Smi> slot = args.at<Smi>(Descriptor::kSlot);
-  Handle<TypeFeedbackVector> vector =
-      args.at<TypeFeedbackVector>(Descriptor::kVector);
-  FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
-  KeyedLoadICNexus nexus(vector, vector_slot);
-  KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
-  ic.UpdateState(receiver, key);
-  RETURN_RESULT_OR_FAILURE(isolate, ic.Load(receiver, key));
-}
-
-
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(Runtime_StoreIC_Miss) {
   HandleScope scope(isolate);
   DCHECK_EQ(5, args.length());
   // Runtime functions don't follow the IC's calling convention.
-  Handle<Object> value = args.at<Object>(0);
+  Handle<Object> value = args.at(0);
   Handle<Smi> slot = args.at<Smi>(1);
-  Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(2);
-  Handle<Object> receiver = args.at<Object>(3);
+  Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
+  Handle<Object> receiver = args.at(3);
   Handle<Name> key = args.at<Name>(4);
-  FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
-  if (vector->GetKind(vector_slot) == FeedbackVectorSlotKind::STORE_IC) {
+  FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+  FeedbackSlotKind kind = vector->GetKind(vector_slot);
+  if (IsStoreICKind(kind) || IsStoreOwnICKind(kind)) {
     StoreICNexus nexus(vector, vector_slot);
-    StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+    StoreIC ic(isolate, &nexus);
     ic.UpdateState(receiver, key);
     RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
   } else {
-    DCHECK_EQ(FeedbackVectorSlotKind::KEYED_STORE_IC,
-              vector->GetKind(vector_slot));
+    DCHECK(IsKeyedStoreICKind(kind));
     KeyedStoreICNexus nexus(vector, vector_slot);
-    KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+    KeyedStoreIC ic(isolate, &nexus);
     ic.UpdateState(receiver, key);
     RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
   }
 }
 
-
 // Used from ic-<arch>.cc.
 RUNTIME_FUNCTION(Runtime_KeyedStoreIC_Miss) {
   HandleScope scope(isolate);
   DCHECK_EQ(5, args.length());
   // Runtime functions don't follow the IC's calling convention.
-  Handle<Object> value = args.at<Object>(0);
+  Handle<Object> value = args.at(0);
   Handle<Smi> slot = args.at<Smi>(1);
-  Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(2);
-  Handle<Object> receiver = args.at<Object>(3);
-  Handle<Object> key = args.at<Object>(4);
-  FeedbackVectorSlot vector_slot = vector->ToSlot(slot->value());
+  Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
+  Handle<Object> receiver = args.at(3);
+  Handle<Object> key = args.at(4);
+  FeedbackSlot vector_slot = vector->ToSlot(slot->value());
   KeyedStoreICNexus nexus(vector, vector_slot);
-  KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+  KeyedStoreIC ic(isolate, &nexus);
   ic.UpdateState(receiver, key);
   RETURN_RESULT_OR_FAILURE(isolate, ic.Store(receiver, key, value));
 }
@@ -2801,14 +2703,13 @@
   HandleScope scope(isolate);
   DCHECK_EQ(5, args.length());
   // Runtime functions don't follow the IC's calling convention.
-  Handle<Object> value = args.at<Object>(0);
-  // slot and vector parameters are not used.
-  Handle<Object> object = args.at<Object>(3);
-  Handle<Object> key = args.at<Object>(4);
-  LanguageMode language_mode;
-  KeyedStoreICNexus nexus(isolate);
-  KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
-  language_mode = ic.language_mode();
+  Handle<Object> value = args.at(0);
+  Handle<Smi> slot = args.at<Smi>(1);
+  Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
+  Handle<Object> object = args.at(3);
+  Handle<Object> key = args.at(4);
+  FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+  LanguageMode language_mode = vector->GetLanguageMode(vector_slot);
   RETURN_RESULT_OR_FAILURE(
       isolate,
       Runtime::SetObjectProperty(isolate, object, key, value, language_mode));
@@ -2817,15 +2718,16 @@
 
 RUNTIME_FUNCTION(Runtime_ElementsTransitionAndStoreIC_Miss) {
   HandleScope scope(isolate);
+  DCHECK_EQ(6, args.length());
   // Runtime functions don't follow the IC's calling convention.
-  Handle<Object> object = args.at<Object>(0);
-  Handle<Object> key = args.at<Object>(1);
-  Handle<Object> value = args.at<Object>(2);
+  Handle<Object> object = args.at(0);
+  Handle<Object> key = args.at(1);
+  Handle<Object> value = args.at(2);
   Handle<Map> map = args.at<Map>(3);
-  LanguageMode language_mode;
-  KeyedStoreICNexus nexus(isolate);
-  KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
-  language_mode = ic.language_mode();
+  Handle<Smi> slot = args.at<Smi>(4);
+  Handle<FeedbackVector> vector = args.at<FeedbackVector>(5);
+  FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+  LanguageMode language_mode = vector->GetLanguageMode(vector_slot);
   if (object->IsJSObject()) {
     JSObject::TransitionElementsKind(Handle<JSObject>::cast(object),
                                      map->elements_kind());
@@ -2931,15 +2833,25 @@
   }
   set_target(*new_target);
 
-  if (FLAG_trace_ic) {
-    OFStream os(stdout);
-    os << "[BinaryOpIC" << old_state << " => " << state << " @ "
-       << static_cast<void*>(*new_target) << " <- ";
-    JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
-    if (!allocation_site.is_null()) {
-      os << " using allocation site " << static_cast<void*>(*allocation_site);
-    }
-    os << "]" << std::endl;
+  if (FLAG_ic_stats &
+      v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
+    auto ic_stats = ICStats::instance();
+    ic_stats->Begin();
+    ICInfo& ic_info = ic_stats->Current();
+    ic_info.type = "BinaryOpIC";
+    ic_info.state = old_state.ToString();
+    ic_info.state += " => ";
+    ic_info.state += state.ToString();
+    JavaScriptFrame::CollectTopFrameForICStats(isolate());
+    ic_stats->End();
+  } else if (FLAG_ic_stats) {
+    int line;
+    int column;
+    Address pc = GetAbstractPC(&line, &column);
+    LOG(isolate(),
+        BinaryOpIC(pc, line, column, *new_target, old_state.ToString().c_str(),
+                   state.ToString().c_str(),
+                   allocation_site.is_null() ? nullptr : *allocation_site));
   }
 
   // Patch the inlined smi code as necessary.
@@ -2957,8 +2869,8 @@
   HandleScope scope(isolate);
   DCHECK_EQ(2, args.length());
   typedef BinaryOpDescriptor Descriptor;
-  Handle<Object> left = args.at<Object>(Descriptor::kLeft);
-  Handle<Object> right = args.at<Object>(Descriptor::kRight);
+  Handle<Object> left = args.at(Descriptor::kLeft);
+  Handle<Object> right = args.at(Descriptor::kRight);
   BinaryOpIC ic(isolate);
   RETURN_RESULT_OR_FAILURE(
       isolate, ic.Transition(Handle<AllocationSite>::null(), left, right));
@@ -2971,8 +2883,8 @@
   typedef BinaryOpWithAllocationSiteDescriptor Descriptor;
   Handle<AllocationSite> allocation_site =
       args.at<AllocationSite>(Descriptor::kAllocationSite);
-  Handle<Object> left = args.at<Object>(Descriptor::kLeft);
-  Handle<Object> right = args.at<Object>(Descriptor::kRight);
+  Handle<Object> left = args.at(Descriptor::kLeft);
+  Handle<Object> right = args.at(Descriptor::kRight);
   BinaryOpIC ic(isolate);
   RETURN_RESULT_OR_FAILURE(isolate,
                            ic.Transition(allocation_site, left, right));
@@ -3005,17 +2917,40 @@
   Handle<Code> new_target = stub.GetCode();
   set_target(*new_target);
 
-  if (FLAG_trace_ic) {
-    PrintF("[CompareIC in ");
-    JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
-    PrintF(" ((%s+%s=%s)->(%s+%s=%s))#%s @ %p]\n",
-           CompareICState::GetStateName(old_stub.left()),
-           CompareICState::GetStateName(old_stub.right()),
-           CompareICState::GetStateName(old_stub.state()),
-           CompareICState::GetStateName(new_left),
-           CompareICState::GetStateName(new_right),
-           CompareICState::GetStateName(state), Token::Name(op_),
-           static_cast<void*>(*stub.GetCode()));
+  if (FLAG_ic_stats &
+      v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
+    auto ic_stats = ICStats::instance();
+    ic_stats->Begin();
+    ICInfo& ic_info = ic_stats->Current();
+    ic_info.type = "CompareIC";
+    JavaScriptFrame::CollectTopFrameForICStats(isolate());
+    ic_info.state = "((";
+    ic_info.state += CompareICState::GetStateName(old_stub.left());
+    ic_info.state += "+";
+    ic_info.state += CompareICState::GetStateName(old_stub.right());
+    ic_info.state += "=";
+    ic_info.state += CompareICState::GetStateName(old_stub.state());
+    ic_info.state += ")->(";
+    ic_info.state += CompareICState::GetStateName(new_left);
+    ic_info.state += "+";
+    ic_info.state += CompareICState::GetStateName(new_right);
+    ic_info.state += "=";
+    ic_info.state += CompareICState::GetStateName(state);
+    ic_info.state += "))#";
+    ic_info.state += Token::Name(op_);
+    ic_stats->End();
+  } else if (FLAG_ic_stats) {
+    int line;
+    int column;
+    Address pc = GetAbstractPC(&line, &column);
+    LOG(isolate(),
+        CompareIC(pc, line, column, *stub.GetCode(), Token::Name(op_),
+                  CompareICState::GetStateName(old_stub.left()),
+                  CompareICState::GetStateName(old_stub.right()),
+                  CompareICState::GetStateName(old_stub.state()),
+                  CompareICState::GetStateName(new_left),
+                  CompareICState::GetStateName(new_right),
+                  CompareICState::GetStateName(state)));
   }
 
   // Activate inlined smi code.
@@ -3032,7 +2967,7 @@
   HandleScope scope(isolate);
   DCHECK(args.length() == 3);
   CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2)));
-  return ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1));
+  return ic.UpdateCaches(args.at(0), args.at(1));
 }
 
 
@@ -3045,9 +2980,36 @@
 
 Handle<Object> ToBooleanIC::ToBoolean(Handle<Object> object) {
   ToBooleanICStub stub(isolate(), extra_ic_state());
+  ToBooleanHints old_hints = stub.hints();
   bool to_boolean_value = stub.UpdateStatus(object);
+  ToBooleanHints new_hints = stub.hints();
   Handle<Code> code = stub.GetCode();
   set_target(*code);
+
+  // Note: Although a no-op transition is semantically OK, it is hinting at a
+  // bug somewhere in our state transition machinery.
+  DCHECK_NE(old_hints, new_hints);
+  if (V8_UNLIKELY(FLAG_ic_stats)) {
+    if (FLAG_ic_stats &
+        v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING) {
+      auto ic_stats = ICStats::instance();
+      ic_stats->Begin();
+      ICInfo& ic_info = ic_stats->Current();
+      ic_info.type = "ToBooleanIC";
+      ic_info.state = ToString(old_hints);
+      ic_info.state += "=>";
+      ic_info.state += ToString(new_hints);
+      ic_stats->End();
+    } else {
+      int line;
+      int column;
+      Address pc = GetAbstractPC(&line, &column);
+      LOG(isolate(),
+          ToBooleanIC(pc, line, column, *code, ToString(old_hints).c_str(),
+                      ToString(new_hints).c_str()));
+    }
+  }
+
   return isolate()->factory()->ToBoolean(to_boolean_value);
 }
 
@@ -3055,7 +3017,7 @@
 RUNTIME_FUNCTION(Runtime_ToBooleanIC_Miss) {
   DCHECK(args.length() == 1);
   HandleScope scope(isolate);
-  Handle<Object> object = args.at<Object>(0);
+  Handle<Object> object = args.at(0);
   ToBooleanIC ic(isolate);
   return *ic.ToBoolean(object);
 }
@@ -3066,7 +3028,7 @@
   Handle<JSObject> holder = args.at<JSObject>(1);
   Handle<HeapObject> callback_or_cell = args.at<HeapObject>(2);
   Handle<Name> name = args.at<Name>(3);
-  Handle<Object> value = args.at<Object>(4);
+  Handle<Object> value = args.at(4);
   CONVERT_LANGUAGE_MODE_ARG_CHECKED(language_mode, 5);
   HandleScope scope(isolate);
 
@@ -3110,7 +3072,7 @@
   Handle<Name> name =
       args.at<Name>(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex);
   Handle<Object> receiver =
-      args.at<Object>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
+      args.at(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
   Handle<JSObject> holder =
       args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex);
   HandleScope scope(isolate);
@@ -3142,11 +3104,11 @@
  */
 RUNTIME_FUNCTION(Runtime_LoadPropertyWithInterceptor) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength);
+  DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength + 2);
   Handle<Name> name =
       args.at<Name>(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex);
   Handle<Object> receiver =
-      args.at<Object>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
+      args.at(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
   Handle<JSObject> holder =
       args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex);
 
@@ -3181,26 +3143,33 @@
 
   if (it.IsFound()) return *result;
 
-#ifdef DEBUG
-  LoadICNexus nexus(isolate);
-  LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
-  // It could actually be any kind of LoadICs here but the predicate handles
-  // all the cases properly.
-  DCHECK(!ic.ShouldThrowReferenceError());
-#endif
+  Handle<Smi> slot = args.at<Smi>(3);
+  Handle<FeedbackVector> vector = args.at<FeedbackVector>(4);
+  FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+  FeedbackSlotKind slot_kind = vector->GetKind(vector_slot);
+  // It could actually be any kind of load IC slot here but the predicate
+  // handles all the cases properly.
+  if (!LoadIC::ShouldThrowReferenceError(slot_kind)) {
+    return isolate->heap()->undefined_value();
+  }
 
-  return isolate->heap()->undefined_value();
+  // Throw a reference error.
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate, NewReferenceError(MessageTemplate::kNotDefined, it.name()));
 }
 
 
 RUNTIME_FUNCTION(Runtime_StorePropertyWithInterceptor) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
-  StoreICNexus nexus(isolate);
-  StoreIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
-  Handle<JSObject> receiver = args.at<JSObject>(0);
-  Handle<Name> name = args.at<Name>(1);
-  Handle<Object> value = args.at<Object>(2);
+  DCHECK_EQ(5, args.length());
+  // Runtime functions don't follow the IC's calling convention.
+  Handle<Object> value = args.at(0);
+  Handle<Smi> slot = args.at<Smi>(1);
+  Handle<FeedbackVector> vector = args.at<FeedbackVector>(2);
+  Handle<JSObject> receiver = args.at<JSObject>(3);
+  Handle<Name> name = args.at<Name>(4);
+  FeedbackSlot vector_slot = vector->ToSlot(slot->value());
+  LanguageMode language_mode = vector->GetLanguageMode(vector_slot);
 
   DCHECK(receiver->HasNamedInterceptor());
   InterceptorInfo* interceptor = receiver->GetNamedInterceptor();
@@ -3225,7 +3194,7 @@
   DCHECK_EQ(LookupIterator::INTERCEPTOR, it.state());
   it.Next();
 
-  MAYBE_RETURN(Object::SetProperty(&it, value, ic.language_mode(),
+  MAYBE_RETURN(Object::SetProperty(&it, value, language_mode,
                                    JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED),
                isolate->heap()->exception());
   return *value;
diff --git a/src/ic/ic.h b/src/ic/ic.h
index 9e69cc8..c9818f5 100644
--- a/src/ic/ic.h
+++ b/src/ic/ic.h
@@ -5,6 +5,8 @@
 #ifndef V8_IC_H_
 #define V8_IC_H_
 
+#include "src/factory.h"
+#include "src/feedback-vector.h"
 #include "src/ic/ic-state.h"
 #include "src/macro-assembler.h"
 #include "src/messages.h"
@@ -45,16 +47,12 @@
   // Clear the inline cache to initial state.
   static void Clear(Isolate* isolate, Address address, Address constant_pool);
 
-#ifdef DEBUG
-  bool IsLoadStub() const {
-    return kind_ == Code::LOAD_IC || kind_ == Code::LOAD_GLOBAL_IC ||
-           kind_ == Code::KEYED_LOAD_IC;
+  bool IsAnyLoad() const {
+    return IsLoadIC() || IsLoadGlobalIC() || IsKeyedLoadIC();
   }
-  bool IsStoreStub() const {
-    return kind_ == Code::STORE_IC || kind_ == Code::KEYED_STORE_IC;
+  bool IsAnyStore() const {
+    return IsStoreIC() || IsStoreOwnIC() || IsKeyedStoreIC();
   }
-  bool IsCallStub() const { return kind_ == Code::CALL_IC; }
-#endif
 
   static inline Handle<Map> GetHandlerCacheHolder(Handle<Map> receiver_map,
                                                   bool receiver_is_holder,
@@ -64,15 +62,15 @@
                                              Isolate* isolate,
                                              CacheHolderFlag* flag);
 
-  static bool IsCleared(FeedbackNexus* nexus) {
-    InlineCacheState state = nexus->StateFromFeedback();
-    return !FLAG_use_ic || state == UNINITIALIZED || state == PREMONOMORPHIC;
-  }
-
   static bool ICUseVector(Code::Kind kind) {
     return kind == Code::LOAD_IC || kind == Code::LOAD_GLOBAL_IC ||
-           kind == Code::KEYED_LOAD_IC || kind == Code::CALL_IC ||
-           kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC;
+           kind == Code::KEYED_LOAD_IC || kind == Code::STORE_IC ||
+           kind == Code::KEYED_STORE_IC;
+  }
+  static bool ICUseVector(FeedbackSlotKind kind) {
+    return IsLoadICKind(kind) || IsLoadGlobalICKind(kind) ||
+           IsKeyedLoadICKind(kind) || IsStoreICKind(kind) ||
+           IsStoreOwnICKind(kind) || IsKeyedStoreICKind(kind);
   }
 
   // The ICs that don't pass slot and vector through the stack have to
@@ -83,15 +81,20 @@
 
   static inline bool IsHandler(Object* object);
 
+  // Nofity the IC system that a feedback has changed.
+  static void OnFeedbackChanged(Isolate* isolate, JSFunction* host_function);
+
  protected:
   Address fp() const { return fp_; }
   Address pc() const { return *pc_address_; }
+
+  void set_slow_stub_reason(const char* reason) { slow_stub_reason_ = reason; }
+
+  Address GetAbstractPC(int* line, int* column) const;
   Isolate* isolate() const { return isolate_; }
 
-  // Get the shared function info of the caller.
-  SharedFunctionInfo* GetSharedFunctionInfo() const;
-  // Get the code object of the caller.
-  Code* GetCode() const;
+  // Get the caller function object.
+  JSFunction* GetHostFunction() const;
 
   inline bool AddressIsDeoptimizedCode() const;
   inline static bool AddressIsDeoptimizedCode(Isolate* isolate,
@@ -120,7 +123,7 @@
   // keyed stores).
   void ConfigureVectorState(MapHandleList* maps,
                             MapHandleList* transitioned_maps,
-                            CodeHandleList* handlers);
+                            List<Handle<Object>>* handlers);
 
   char TransitionMarkFromState(IC::State state);
   void TraceIC(const char* type, Handle<Object> name);
@@ -136,8 +139,6 @@
                                          Address constant_pool);
   static inline void SetTargetAtAddress(Address address, Code* target,
                                         Address constant_pool);
-  // As a vector-based IC, type feedback must be updated differently.
-  static void OnTypeFeedbackChanged(Isolate* isolate, Code* host);
   static void PostPatching(Address address, Code* target, Code* old_target);
 
   void TraceHandlerCacheHitStats(LookupIterator* lookup);
@@ -165,15 +166,18 @@
   void CopyICToMegamorphicCache(Handle<Name> name);
   bool IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map);
   void PatchCache(Handle<Name> name, Handle<Object> code);
-  Code::Kind kind() const { return kind_; }
-  bool is_keyed() const {
-    return kind_ == Code::KEYED_LOAD_IC || kind_ == Code::KEYED_STORE_IC;
-  }
+  FeedbackSlotKind kind() const { return kind_; }
+  bool IsLoadIC() const { return IsLoadICKind(kind_); }
+  bool IsLoadGlobalIC() const { return IsLoadGlobalICKind(kind_); }
+  bool IsKeyedLoadIC() const { return IsKeyedLoadICKind(kind_); }
+  bool IsStoreIC() const { return IsStoreICKind(kind_); }
+  bool IsStoreOwnIC() const { return IsStoreOwnICKind(kind_); }
+  bool IsKeyedStoreIC() const { return IsKeyedStoreICKind(kind_); }
+  bool is_keyed() const { return IsKeyedLoadIC() || IsKeyedStoreIC(); }
   Code::Kind handler_kind() const {
-    if (kind_ == Code::KEYED_LOAD_IC) return Code::LOAD_IC;
-    DCHECK(kind_ == Code::LOAD_IC || kind_ == Code::STORE_IC ||
-           kind_ == Code::KEYED_STORE_IC);
-    return kind_;
+    if (IsAnyLoad()) return Code::LOAD_IC;
+    DCHECK(IsAnyStore());
+    return Code::STORE_IC;
   }
   bool ShouldRecomputeHandler(Handle<String> name);
 
@@ -200,8 +204,8 @@
     return target_maps_.length() > 0 ? *target_maps_.at(0) : NULL;
   }
 
-  Handle<TypeFeedbackVector> vector() const { return nexus()->vector_handle(); }
-  FeedbackVectorSlot slot() const { return nexus()->slot(); }
+  Handle<FeedbackVector> vector() const { return nexus()->vector_handle(); }
+  FeedbackSlot slot() const { return nexus()->slot(); }
   State saved_state() const {
     return state() == RECOMPUTE_HANDLER ? old_state_ : state();
   }
@@ -212,7 +216,6 @@
   }
   FeedbackNexus* nexus() const { return nexus_; }
 
-  inline Code* get_host();
   inline Code* target() const;
 
  private:
@@ -244,7 +247,7 @@
   bool vector_set_;
   State old_state_;  // For saving if we marked as prototype failure.
   State state_;
-  Code::Kind kind_;
+  FeedbackSlotKind kind_;
   Handle<Map> receiver_map_;
   MaybeHandle<Object> maybe_handler_;
 
@@ -252,6 +255,8 @@
   MapHandleList target_maps_;
   bool target_maps_set_;
 
+  const char* slow_stub_reason_;
+
   FeedbackNexus* nexus_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(IC);
@@ -264,38 +269,28 @@
       : IC(EXTRA_CALL_FRAME, isolate, nexus) {
     DCHECK(nexus != NULL);
   }
-
-  void HandleMiss(Handle<Object> function);
-
-  static void Clear(Isolate* isolate, Code* host, CallICNexus* nexus);
 };
 
 
 class LoadIC : public IC {
  public:
-  LoadIC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = NULL)
-      : IC(depth, isolate, nexus) {
+  LoadIC(Isolate* isolate, FeedbackNexus* nexus)
+      : IC(NO_EXTRA_FRAME, isolate, nexus) {
     DCHECK(nexus != NULL);
-    DCHECK(IsLoadStub());
+    DCHECK(IsAnyLoad());
+  }
+
+  static bool ShouldThrowReferenceError(FeedbackSlotKind kind) {
+    return kind == FeedbackSlotKind::kLoadGlobalNotInsideTypeof;
   }
 
   bool ShouldThrowReferenceError() const {
-    return kind() == Code::LOAD_GLOBAL_IC &&
-           LoadGlobalICState::GetTypeofMode(extra_ic_state()) ==
-               NOT_INSIDE_TYPEOF;
+    return ShouldThrowReferenceError(kind());
   }
 
-  // Code generator routines.
-
-  static void GenerateMiss(MacroAssembler* masm);
-  static void GenerateRuntimeGetProperty(MacroAssembler* masm);
-  static void GenerateNormal(MacroAssembler* masm);
-
   MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
                                            Handle<Name> name);
 
-  static void Clear(Isolate* isolate, Code* host, LoadICNexus* nexus);
-
  protected:
   virtual Handle<Code> slow_stub() const {
     return isolate()->builtins()->LoadIC_Slow();
@@ -312,7 +307,7 @@
 
  private:
   // Creates a data handler that represents a load of a field by given index.
-  Handle<Object> SimpleFieldLoad(FieldIndex index);
+  static Handle<Object> SimpleFieldLoad(Isolate* isolate, FieldIndex index);
 
   // Creates a data handler that represents a prototype chain check followed
   // by given Smi-handler that encoded a load from the holder.
@@ -325,17 +320,16 @@
   Handle<Object> LoadNonExistent(Handle<Map> receiver_map, Handle<Name> name);
 
   friend class IC;
+  friend class NamedLoadHandlerCompiler;
 };
 
 class LoadGlobalIC : public LoadIC {
  public:
-  LoadGlobalIC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = NULL)
-      : LoadIC(depth, isolate, nexus) {}
+  LoadGlobalIC(Isolate* isolate, FeedbackNexus* nexus)
+      : LoadIC(isolate, nexus) {}
 
   MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Name> name);
 
-  static void Clear(Isolate* isolate, Code* host, LoadGlobalICNexus* nexus);
-
  protected:
   Handle<Code> slow_stub() const override {
     return isolate()->builtins()->LoadGlobalIC_Slow();
@@ -344,21 +338,14 @@
 
 class KeyedLoadIC : public LoadIC {
  public:
-  KeyedLoadIC(FrameDepth depth, Isolate* isolate,
-              KeyedLoadICNexus* nexus = NULL)
-      : LoadIC(depth, isolate, nexus) {
+  KeyedLoadIC(Isolate* isolate, KeyedLoadICNexus* nexus)
+      : LoadIC(isolate, nexus) {
     DCHECK(nexus != NULL);
   }
 
   MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
                                            Handle<Object> key);
 
-  // Code generator routines.
-  static void GenerateMiss(MacroAssembler* masm);
-  static void GenerateRuntimeGetProperty(MacroAssembler* masm);
-
-  static void Clear(Isolate* isolate, Code* host, KeyedLoadICNexus* nexus);
-
  protected:
   // receiver is HeapObject because it could be a String or a JSObject
   void UpdateLoadElement(Handle<HeapObject> receiver);
@@ -370,20 +357,15 @@
 
 class StoreIC : public IC {
  public:
-  StoreIC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = NULL)
-      : IC(depth, isolate, nexus) {
-    DCHECK(IsStoreStub());
+  StoreIC(Isolate* isolate, FeedbackNexus* nexus)
+      : IC(NO_EXTRA_FRAME, isolate, nexus) {
+    DCHECK(IsAnyStore());
   }
 
   LanguageMode language_mode() const {
-    return StoreICState::GetLanguageMode(extra_ic_state());
+    return nexus()->vector()->GetLanguageMode(nexus()->slot());
   }
 
-  // Code generators for stub routines. Only called once at startup.
-  static void GenerateSlow(MacroAssembler* masm);
-  static void GenerateMiss(MacroAssembler* masm);
-  static void GenerateNormal(MacroAssembler* masm);
-
   MUST_USE_RESULT MaybeHandle<Object> Store(
       Handle<Object> object, Handle<Name> name, Handle<Object> value,
       JSReceiver::StoreFromKeyed store_mode =
@@ -392,20 +374,11 @@
   bool LookupForWrite(LookupIterator* it, Handle<Object> value,
                       JSReceiver::StoreFromKeyed store_mode);
 
-  static void Clear(Isolate* isolate, Code* host, StoreICNexus* nexus);
-
  protected:
   // Stub accessors.
   Handle<Code> slow_stub() const {
-    switch (language_mode()) {
-      case SLOPPY:
-        return isolate()->builtins()->StoreIC_SlowSloppy();
-      case STRICT:
-        return isolate()->builtins()->StoreIC_SlowStrict();
-      default:
-        UNREACHABLE();
-        return Handle<Code>();
-    }
+    // StoreIC and KeyedStoreIC share the same slow stub.
+    return isolate()->builtins()->KeyedStoreIC_Slow();
   }
 
   // Update the inline cache and the global stub cache based on the
@@ -437,25 +410,13 @@
     return casted_nexus<KeyedStoreICNexus>()->GetKeyedAccessStoreMode();
   }
 
-  KeyedStoreIC(FrameDepth depth, Isolate* isolate,
-               KeyedStoreICNexus* nexus = NULL)
-      : StoreIC(depth, isolate, nexus) {}
+  KeyedStoreIC(Isolate* isolate, KeyedStoreICNexus* nexus)
+      : StoreIC(isolate, nexus) {}
 
   MUST_USE_RESULT MaybeHandle<Object> Store(Handle<Object> object,
                                             Handle<Object> name,
                                             Handle<Object> value);
 
-  // Code generators for stub routines.  Only called once at startup.
-  static void GenerateMiss(MacroAssembler* masm);
-  static void GenerateSlow(MacroAssembler* masm);
-  static void GenerateMegamorphic(MacroAssembler* masm,
-                                  LanguageMode language_mode);
-
-  static Handle<Code> ChooseMegamorphicStub(Isolate* isolate,
-                                            ExtraICState extra_state);
-
-  static void Clear(Isolate* isolate, Code* host, KeyedStoreICNexus* nexus);
-
  protected:
   void UpdateStoreElement(Handle<Map> receiver_map,
                           KeyedAccessStoreMode store_mode);
@@ -464,6 +425,14 @@
   Handle<Map> ComputeTransitionedMap(Handle<Map> map,
                                      KeyedAccessStoreMode store_mode);
 
+  Handle<Object> StoreElementHandler(Handle<Map> receiver_map,
+                                     KeyedAccessStoreMode store_mode);
+
+  void StoreElementPolymorphicHandlers(MapHandleList* receiver_maps,
+                                       MapHandleList* transitioned_maps,
+                                       List<Handle<Object>>* handlers,
+                                       KeyedAccessStoreMode store_mode);
+
   friend class IC;
 };
 
diff --git a/src/ic/keyed-store-generic.cc b/src/ic/keyed-store-generic.cc
index 30faba8..8962386 100644
--- a/src/ic/keyed-store-generic.cc
+++ b/src/ic/keyed-store-generic.cc
@@ -4,19 +4,25 @@
 
 #include "src/ic/keyed-store-generic.h"
 
-#include "src/compiler/code-assembler.h"
+#include "src/code-factory.h"
+#include "src/code-stub-assembler.h"
 #include "src/contexts.h"
+#include "src/ic/accessor-assembler.h"
+#include "src/interface-descriptors.h"
 #include "src/isolate.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
 
 using compiler::Node;
 
-class KeyedStoreGenericAssembler : public CodeStubAssembler {
+class KeyedStoreGenericAssembler : public AccessorAssembler {
  public:
-  void KeyedStoreGeneric(const StoreICParameters* p,
-                         LanguageMode language_mode);
+  explicit KeyedStoreGenericAssembler(compiler::CodeAssemblerState* state)
+      : AccessorAssembler(state) {}
+
+  void KeyedStoreGeneric(LanguageMode language_mode);
 
  private:
   enum UpdateLength {
@@ -30,7 +36,8 @@
                                Node* value, Node* context, Label* slow);
 
   void EmitGenericPropertyStore(Node* receiver, Node* receiver_map,
-                                const StoreICParameters* p, Label* slow);
+                                const StoreICParameters* p, Label* slow,
+                                LanguageMode language_mode);
 
   void BranchIfPrototypesHaveNonFastElements(Node* receiver_map,
                                              Label* non_fast_elements,
@@ -60,16 +67,25 @@
                                 ElementsKind packed_kind,
                                 ElementsKind packed_kind_2, Label* bailout);
 
-  // Do not add fields, so that this is safe to reinterpret_cast to CSA.
+  void JumpIfDataProperty(Node* details, Label* writable, Label* readonly);
+  void LookupPropertyOnPrototypeChain(Node* receiver_map, Node* name,
+                                      Label* accessor,
+                                      Variable* var_accessor_pair,
+                                      Variable* var_accessor_holder,
+                                      Label* readonly, Label* bailout);
+
+  void CheckFieldType(Node* descriptors, Node* name_index, Node* representation,
+                      Node* value, Label* bailout);
+  void OverwriteExistingFastProperty(Node* object, Node* object_map,
+                                     Node* properties, Node* descriptors,
+                                     Node* descriptor_name_index, Node* details,
+                                     Node* value, Label* slow);
 };
 
-void KeyedStoreGenericGenerator::Generate(
-    CodeStubAssembler* assembler, const CodeStubAssembler::StoreICParameters* p,
-    LanguageMode language_mode) {
-  STATIC_ASSERT(sizeof(CodeStubAssembler) ==
-                sizeof(KeyedStoreGenericAssembler));
-  auto assm = reinterpret_cast<KeyedStoreGenericAssembler*>(assembler);
-  assm->KeyedStoreGeneric(p, language_mode);
+void KeyedStoreGenericGenerator::Generate(compiler::CodeAssemblerState* state,
+                                          LanguageMode language_mode) {
+  KeyedStoreGenericAssembler assembler(state);
+  assembler.KeyedStoreGeneric(language_mode);
 }
 
 void KeyedStoreGenericAssembler::BranchIfPrototypesHaveNonFastElements(
@@ -94,9 +110,7 @@
            non_fast_elements);
     Node* elements_kind = LoadMapElementsKind(prototype_map);
     STATIC_ASSERT(FIRST_ELEMENTS_KIND == FIRST_FAST_ELEMENTS_KIND);
-    GotoIf(Int32LessThanOrEqual(elements_kind,
-                                Int32Constant(LAST_FAST_ELEMENTS_KIND)),
-           &loop_body);
+    GotoIf(IsFastElementsKind(elements_kind), &loop_body);
     GotoIf(Word32Equal(elements_kind, Int32Constant(NO_ELEMENTS)), &loop_body);
     Goto(non_fast_elements);
   }
@@ -112,7 +126,7 @@
     TrapAllocationMemento(receiver, bailout);
   }
   Label perform_transition(this), check_holey_map(this);
-  Variable var_target_map(this, MachineType::PointerRepresentation());
+  Variable var_target_map(this, MachineRepresentation::kTagged);
   // Check if the receiver has the default |from_kind| map.
   {
     Node* packed_map =
@@ -143,7 +157,7 @@
       GrowElementsCapacity(receiver, elements, from_kind, to_kind, capacity,
                            capacity, INTPTR_PARAMETERS, bailout);
     }
-    StoreObjectField(receiver, JSObject::kMapOffset, var_target_map.value());
+    StoreMap(receiver, var_target_map.value());
   }
 }
 
@@ -160,7 +174,7 @@
   }
   Node* holey_map =
       LoadContextElement(native_context, Context::ArrayMapIndex(holey_kind));
-  StoreObjectField(receiver, JSObject::kMapOffset, holey_map);
+  StoreMap(receiver, holey_map);
   Goto(done);
 }
 
@@ -219,6 +233,15 @@
   if (update_length != kDontChangeLength) {
     CSA_ASSERT(this, Word32Equal(LoadMapInstanceType(receiver_map),
                                  Int32Constant(JS_ARRAY_TYPE)));
+    // Check if the length property is writable. The fast check is only
+    // supported for fast properties.
+    GotoIf(IsDictionaryMap(receiver_map), slow);
+    // The length property is non-configurable, so it's guaranteed to always
+    // be the first property.
+    Node* descriptors = LoadMapDescriptors(receiver_map);
+    Node* details =
+        LoadFixedArrayElement(descriptors, DescriptorArray::ToDetailsIndex(0));
+    GotoIf(IsSetSmi(details, PropertyDetails::kAttributesReadOnlyMask), slow);
   }
   STATIC_ASSERT(FixedArray::kHeaderSize == FixedDoubleArray::kHeaderSize);
   const int kHeaderSize = FixedArray::kHeaderSize - kHeapObjectTag;
@@ -251,7 +274,7 @@
     // can always be stored.
     {
       Label non_smi_value(this);
-      GotoUnless(TaggedIsSmi(value), &non_smi_value);
+      GotoIfNot(TaggedIsSmi(value), &non_smi_value);
       // If we're about to introduce holes, ensure holey elements.
       if (update_length == kBumpLengthWithGap) {
         TryChangeToHoleyMapMulti(receiver, receiver_map, elements_kind, context,
@@ -276,7 +299,7 @@
         TryChangeToHoleyMap(receiver, receiver_map, elements_kind, context,
                             FAST_ELEMENTS, slow);
       }
-      Store(MachineRepresentation::kTagged, elements, offset, value);
+      Store(elements, offset, value);
       MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
 
       Bind(&must_transition);
@@ -320,7 +343,7 @@
                            FAST_SMI_ELEMENTS, target_kind, slow);
         // The elements backing store didn't change, no reload necessary.
         CSA_ASSERT(this, WordEqual(elements, LoadElements(receiver)));
-        Store(MachineRepresentation::kTagged, elements, offset, value);
+        Store(elements, offset, value);
         MaybeUpdateLengthAndReturn(receiver, intptr_index, value,
                                    update_length);
       }
@@ -356,8 +379,8 @@
     // Try to store the value as a double.
     {
       Label non_number_value(this);
-      Node* double_value = PrepareValueForWrite(value, Representation::Double(),
-                                                &non_number_value);
+      Node* double_value = TryTaggedToFloat64(value, &non_number_value);
+
       // Make sure we do not store signalling NaNs into double arrays.
       double_value = Float64SilenceNaN(double_value);
       // If we're about to introduce holes, ensure holey elements.
@@ -384,7 +407,7 @@
       Node* fast_elements = LoadElements(receiver);
       Node* fast_offset = ElementOffsetFromIndex(
           intptr_index, FAST_ELEMENTS, INTPTR_PARAMETERS, kHeaderSize);
-      Store(MachineRepresentation::kTagged, fast_elements, fast_offset, value);
+      Store(fast_elements, fast_offset, value);
       MaybeUpdateLengthAndReturn(receiver, intptr_index, value, update_length);
     }
   }
@@ -399,14 +422,13 @@
 void KeyedStoreGenericAssembler::EmitGenericElementStore(
     Node* receiver, Node* receiver_map, Node* instance_type, Node* intptr_index,
     Node* value, Node* context, Label* slow) {
-  Label if_in_bounds(this), if_increment_length_by_one(this),
+  Label if_fast(this), if_in_bounds(this), if_increment_length_by_one(this),
       if_bump_length_with_gap(this), if_grow(this), if_nonfast(this),
       if_typed_array(this), if_dictionary(this);
   Node* elements = LoadElements(receiver);
   Node* elements_kind = LoadMapElementsKind(receiver_map);
-  GotoIf(
-      Int32GreaterThan(elements_kind, Int32Constant(LAST_FAST_ELEMENTS_KIND)),
-      &if_nonfast);
+  Branch(IsFastElementsKind(elements_kind), &if_fast, &if_nonfast);
+  Bind(&if_fast);
 
   Label if_array(this);
   GotoIf(Word32Equal(instance_type, Int32Constant(JS_ARRAY_TYPE)), &if_array);
@@ -447,6 +469,8 @@
 
   // Out-of-capacity accesses (index >= capacity) jump here. Additionally,
   // an ElementsKind transition might be necessary.
+  // The index can also be negative at this point! Jump to the runtime in that
+  // case to convert it to a named property.
   Bind(&if_grow);
   {
     Comment("Grow backing store");
@@ -482,37 +506,419 @@
   }
 }
 
-void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
-    Node* receiver, Node* receiver_map, const StoreICParameters* p,
-    Label* slow) {
-  Comment("stub cache probe");
-  // TODO(jkummerow): Don't rely on the stub cache as much.
-  // - existing properties can be overwritten inline (unless readonly).
-  // - for dictionary mode receivers, we can even add properties inline
-  //   (unless the prototype chain prevents it).
-  Variable var_handler(this, MachineRepresentation::kTagged);
-  Label found_handler(this, &var_handler), stub_cache_miss(this);
-  TryProbeStubCache(isolate()->store_stub_cache(), receiver, p->name,
-                    &found_handler, &var_handler, &stub_cache_miss);
-  Bind(&found_handler);
+void KeyedStoreGenericAssembler::JumpIfDataProperty(Node* details,
+                                                    Label* writable,
+                                                    Label* readonly) {
+  // Accessor properties never have the READ_ONLY attribute set.
+  GotoIf(IsSetWord32(details, PropertyDetails::kAttributesReadOnlyMask),
+         readonly);
+  Node* kind = DecodeWord32<PropertyDetails::KindField>(details);
+  GotoIf(Word32Equal(kind, Int32Constant(kData)), writable);
+  // Fall through if it's an accessor property.
+}
+
+void KeyedStoreGenericAssembler::LookupPropertyOnPrototypeChain(
+    Node* receiver_map, Node* name, Label* accessor,
+    Variable* var_accessor_pair, Variable* var_accessor_holder, Label* readonly,
+    Label* bailout) {
+  Label ok_to_write(this);
+  Variable var_holder(this, MachineRepresentation::kTagged);
+  var_holder.Bind(LoadMapPrototype(receiver_map));
+  Variable var_holder_map(this, MachineRepresentation::kTagged);
+  var_holder_map.Bind(LoadMap(var_holder.value()));
+
+  Variable* merged_variables[] = {&var_holder, &var_holder_map};
+  Label loop(this, arraysize(merged_variables), merged_variables);
+  Goto(&loop);
+  Bind(&loop);
   {
-    Comment("KeyedStoreGeneric found handler");
-    HandleStoreICHandlerCase(p, var_handler.value(), slow);
+    Node* holder = var_holder.value();
+    Node* holder_map = var_holder_map.value();
+    Node* instance_type = LoadMapInstanceType(holder_map);
+    Label next_proto(this);
+    {
+      Label found(this), found_fast(this), found_dict(this), found_global(this);
+      Variable var_meta_storage(this, MachineRepresentation::kTagged);
+      Variable var_entry(this, MachineType::PointerRepresentation());
+      TryLookupProperty(holder, holder_map, instance_type, name, &found_fast,
+                        &found_dict, &found_global, &var_meta_storage,
+                        &var_entry, &next_proto, bailout);
+      Bind(&found_fast);
+      {
+        Node* descriptors = var_meta_storage.value();
+        Node* name_index = var_entry.value();
+        Node* details =
+            LoadDetailsByKeyIndex<DescriptorArray>(descriptors, name_index);
+        JumpIfDataProperty(details, &ok_to_write, readonly);
+
+        // Accessor case.
+        // TODO(jkummerow): Implement a trimmed-down LoadAccessorFromFastObject.
+        Variable var_details(this, MachineRepresentation::kWord32);
+        LoadPropertyFromFastObject(holder, holder_map, descriptors, name_index,
+                                   &var_details, var_accessor_pair);
+        var_accessor_holder->Bind(holder);
+        Goto(accessor);
+      }
+
+      Bind(&found_dict);
+      {
+        Node* dictionary = var_meta_storage.value();
+        Node* entry = var_entry.value();
+        Node* details =
+            LoadDetailsByKeyIndex<NameDictionary>(dictionary, entry);
+        JumpIfDataProperty(details, &ok_to_write, readonly);
+
+        // Accessor case.
+        var_accessor_pair->Bind(
+            LoadValueByKeyIndex<NameDictionary>(dictionary, entry));
+        var_accessor_holder->Bind(holder);
+        Goto(accessor);
+      }
+
+      Bind(&found_global);
+      {
+        Node* dictionary = var_meta_storage.value();
+        Node* entry = var_entry.value();
+        Node* property_cell =
+            LoadValueByKeyIndex<GlobalDictionary>(dictionary, entry);
+        Node* value =
+            LoadObjectField(property_cell, PropertyCell::kValueOffset);
+        GotoIf(WordEqual(value, TheHoleConstant()), &next_proto);
+        Node* details = LoadAndUntagToWord32ObjectField(
+            property_cell, PropertyCell::kDetailsOffset);
+        JumpIfDataProperty(details, &ok_to_write, readonly);
+
+        // Accessor case.
+        var_accessor_pair->Bind(value);
+        var_accessor_holder->Bind(holder);
+        Goto(accessor);
+      }
+    }
+
+    Bind(&next_proto);
+    // Bailout if it can be an integer indexed exotic case.
+    GotoIf(Word32Equal(instance_type, Int32Constant(JS_TYPED_ARRAY_TYPE)),
+           bailout);
+    Node* proto = LoadMapPrototype(holder_map);
+    GotoIf(WordEqual(proto, NullConstant()), &ok_to_write);
+    var_holder.Bind(proto);
+    var_holder_map.Bind(LoadMap(proto));
+    Goto(&loop);
   }
-  Bind(&stub_cache_miss);
+  Bind(&ok_to_write);
+}
+
+void KeyedStoreGenericAssembler::CheckFieldType(Node* descriptors,
+                                                Node* name_index,
+                                                Node* representation,
+                                                Node* value, Label* bailout) {
+  Label r_smi(this), r_double(this), r_heapobject(this), all_fine(this);
+  // Ignore FLAG_track_fields etc. and always emit code for all checks,
+  // because this builtin is part of the snapshot and therefore should
+  // be flag independent.
+  GotoIf(Word32Equal(representation, Int32Constant(Representation::kSmi)),
+         &r_smi);
+  GotoIf(Word32Equal(representation, Int32Constant(Representation::kDouble)),
+         &r_double);
+  GotoIf(
+      Word32Equal(representation, Int32Constant(Representation::kHeapObject)),
+      &r_heapobject);
+  GotoIf(Word32Equal(representation, Int32Constant(Representation::kNone)),
+         bailout);
+  CSA_ASSERT(this, Word32Equal(representation,
+                               Int32Constant(Representation::kTagged)));
+  Goto(&all_fine);
+
+  Bind(&r_smi);
+  { Branch(TaggedIsSmi(value), &all_fine, bailout); }
+
+  Bind(&r_double);
   {
-    Comment("KeyedStoreGeneric_miss");
-    TailCallRuntime(Runtime::kKeyedStoreIC_Miss, p->context, p->value, p->slot,
-                    p->vector, p->receiver, p->name);
+    GotoIf(TaggedIsSmi(value), &all_fine);
+    Node* value_map = LoadMap(value);
+    // While supporting mutable HeapNumbers would be straightforward, such
+    // objects should not end up here anyway.
+    CSA_ASSERT(this,
+               WordNotEqual(value_map,
+                            LoadRoot(Heap::kMutableHeapNumberMapRootIndex)));
+    Branch(IsHeapNumberMap(value_map), &all_fine, bailout);
+  }
+
+  Bind(&r_heapobject);
+  {
+    GotoIf(TaggedIsSmi(value), bailout);
+    Node* field_type =
+        LoadValueByKeyIndex<DescriptorArray>(descriptors, name_index);
+    intptr_t kNoneType = reinterpret_cast<intptr_t>(FieldType::None());
+    intptr_t kAnyType = reinterpret_cast<intptr_t>(FieldType::Any());
+    // FieldType::None can't hold any value.
+    GotoIf(WordEqual(field_type, IntPtrConstant(kNoneType)), bailout);
+    // FieldType::Any can hold any value.
+    GotoIf(WordEqual(field_type, IntPtrConstant(kAnyType)), &all_fine);
+    CSA_ASSERT(this, IsWeakCell(field_type));
+    // Cleared WeakCells count as FieldType::None, which can't hold any value.
+    field_type = LoadWeakCellValue(field_type, bailout);
+    // FieldType::Class(...) performs a map check.
+    CSA_ASSERT(this, IsMap(field_type));
+    Branch(WordEqual(LoadMap(value), field_type), &all_fine, bailout);
+  }
+
+  Bind(&all_fine);
+}
+
+void KeyedStoreGenericAssembler::OverwriteExistingFastProperty(
+    Node* object, Node* object_map, Node* properties, Node* descriptors,
+    Node* descriptor_name_index, Node* details, Node* value, Label* slow) {
+  // Properties in descriptors can't be overwritten without map transition.
+  GotoIf(Word32NotEqual(DecodeWord32<PropertyDetails::LocationField>(details),
+                        Int32Constant(kField)),
+         slow);
+
+  if (FLAG_track_constant_fields) {
+    // TODO(ishell): Taking the slow path is not necessary if new and old
+    // values are identical.
+    GotoIf(Word32Equal(DecodeWord32<PropertyDetails::ConstnessField>(details),
+                       Int32Constant(kConst)),
+           slow);
+  }
+
+  Label done(this);
+  Node* representation =
+      DecodeWord32<PropertyDetails::RepresentationField>(details);
+
+  CheckFieldType(descriptors, descriptor_name_index, representation, value,
+                 slow);
+  Node* field_index =
+      DecodeWordFromWord32<PropertyDetails::FieldIndexField>(details);
+  Node* inobject_properties = LoadMapInobjectProperties(object_map);
+
+  Label inobject(this), backing_store(this);
+  Branch(UintPtrLessThan(field_index, inobject_properties), &inobject,
+         &backing_store);
+
+  Bind(&inobject);
+  {
+    Node* field_offset =
+        IntPtrMul(IntPtrSub(LoadMapInstanceSize(object_map),
+                            IntPtrSub(inobject_properties, field_index)),
+                  IntPtrConstant(kPointerSize));
+    Label tagged_rep(this), double_rep(this);
+    Branch(Word32Equal(representation, Int32Constant(Representation::kDouble)),
+           &double_rep, &tagged_rep);
+    Bind(&double_rep);
+    {
+      Node* double_value = ChangeNumberToFloat64(value);
+      if (FLAG_unbox_double_fields) {
+        StoreObjectFieldNoWriteBarrier(object, field_offset, double_value,
+                                       MachineRepresentation::kFloat64);
+      } else {
+        Node* mutable_heap_number = LoadObjectField(object, field_offset);
+        StoreHeapNumberValue(mutable_heap_number, double_value);
+      }
+      Goto(&done);
+    }
+
+    Bind(&tagged_rep);
+    {
+      StoreObjectField(object, field_offset, value);
+      Goto(&done);
+    }
+  }
+
+  Bind(&backing_store);
+  {
+    Node* backing_store_index = IntPtrSub(field_index, inobject_properties);
+    Label tagged_rep(this), double_rep(this);
+    Branch(Word32Equal(representation, Int32Constant(Representation::kDouble)),
+           &double_rep, &tagged_rep);
+    Bind(&double_rep);
+    {
+      Node* double_value = ChangeNumberToFloat64(value);
+      Node* mutable_heap_number =
+          LoadFixedArrayElement(properties, backing_store_index);
+      StoreHeapNumberValue(mutable_heap_number, double_value);
+      Goto(&done);
+    }
+    Bind(&tagged_rep);
+    {
+      StoreFixedArrayElement(properties, backing_store_index, value);
+      Goto(&done);
+    }
+  }
+  Bind(&done);
+}
+
+void KeyedStoreGenericAssembler::EmitGenericPropertyStore(
+    Node* receiver, Node* receiver_map, const StoreICParameters* p, Label* slow,
+    LanguageMode language_mode) {
+  Variable var_accessor_pair(this, MachineRepresentation::kTagged);
+  Variable var_accessor_holder(this, MachineRepresentation::kTagged);
+  Label stub_cache(this), fast_properties(this), dictionary_properties(this),
+      accessor(this), readonly(this);
+  Node* properties = LoadProperties(receiver);
+  Node* properties_map = LoadMap(properties);
+  Branch(WordEqual(properties_map, LoadRoot(Heap::kHashTableMapRootIndex)),
+         &dictionary_properties, &fast_properties);
+
+  Bind(&fast_properties);
+  {
+    Comment("fast property store");
+    Node* bitfield3 = LoadMapBitField3(receiver_map);
+    Node* descriptors = LoadMapDescriptors(receiver_map);
+    Label descriptor_found(this);
+    Variable var_name_index(this, MachineType::PointerRepresentation());
+    // TODO(jkummerow): Maybe look for existing map transitions?
+    Label* notfound = &stub_cache;
+    DescriptorLookup(p->name, descriptors, bitfield3, &descriptor_found,
+                     &var_name_index, notfound);
+
+    Bind(&descriptor_found);
+    {
+      Node* name_index = var_name_index.value();
+      Node* details =
+          LoadDetailsByKeyIndex<DescriptorArray>(descriptors, name_index);
+      Label data_property(this);
+      JumpIfDataProperty(details, &data_property, &readonly);
+
+      // Accessor case.
+      // TODO(jkummerow): Implement a trimmed-down LoadAccessorFromFastObject.
+      Variable var_details(this, MachineRepresentation::kWord32);
+      LoadPropertyFromFastObject(receiver, receiver_map, descriptors,
+                                 name_index, &var_details, &var_accessor_pair);
+      var_accessor_holder.Bind(receiver);
+      Goto(&accessor);
+
+      Bind(&data_property);
+      {
+        OverwriteExistingFastProperty(receiver, receiver_map, properties,
+                                      descriptors, name_index, details,
+                                      p->value, slow);
+        Return(p->value);
+      }
+    }
+  }
+
+  Bind(&dictionary_properties);
+  {
+    Comment("dictionary property store");
+    // We checked for LAST_CUSTOM_ELEMENTS_RECEIVER before, which rules out
+    // seeing global objects here (which would need special handling).
+
+    Variable var_name_index(this, MachineType::PointerRepresentation());
+    Label dictionary_found(this, &var_name_index), not_found(this);
+    NameDictionaryLookup<NameDictionary>(properties, p->name, &dictionary_found,
+                                         &var_name_index, &not_found);
+    Bind(&dictionary_found);
+    {
+      Label overwrite(this);
+      Node* details = LoadDetailsByKeyIndex<NameDictionary>(
+          properties, var_name_index.value());
+      JumpIfDataProperty(details, &overwrite, &readonly);
+
+      // Accessor case.
+      var_accessor_pair.Bind(LoadValueByKeyIndex<NameDictionary>(
+          properties, var_name_index.value()));
+      var_accessor_holder.Bind(receiver);
+      Goto(&accessor);
+
+      Bind(&overwrite);
+      {
+        StoreValueByKeyIndex<NameDictionary>(properties, var_name_index.value(),
+                                             p->value);
+        Return(p->value);
+      }
+    }
+
+    Bind(&not_found);
+    {
+      LookupPropertyOnPrototypeChain(receiver_map, p->name, &accessor,
+                                     &var_accessor_pair, &var_accessor_holder,
+                                     &readonly, slow);
+      Add<NameDictionary>(properties, p->name, p->value, slow);
+      Return(p->value);
+    }
+  }
+
+  Bind(&accessor);
+  {
+    Label not_callable(this);
+    Node* accessor_pair = var_accessor_pair.value();
+    GotoIf(IsAccessorInfoMap(LoadMap(accessor_pair)), slow);
+    CSA_ASSERT(this, HasInstanceType(accessor_pair, ACCESSOR_PAIR_TYPE));
+    Node* setter = LoadObjectField(accessor_pair, AccessorPair::kSetterOffset);
+    Node* setter_map = LoadMap(setter);
+    // FunctionTemplateInfo setters are not supported yet.
+    GotoIf(IsFunctionTemplateInfoMap(setter_map), slow);
+    GotoIfNot(IsCallableMap(setter_map), &not_callable);
+
+    Callable callable = CodeFactory::Call(isolate());
+    CallJS(callable, p->context, setter, receiver, p->value);
+    Return(p->value);
+
+    Bind(&not_callable);
+    {
+      if (language_mode == STRICT) {
+        Node* message =
+            SmiConstant(Smi::FromInt(MessageTemplate::kNoSetterInCallback));
+        TailCallRuntime(Runtime::kThrowTypeError, p->context, message, p->name,
+                        var_accessor_holder.value());
+      } else {
+        DCHECK_EQ(SLOPPY, language_mode);
+        Return(p->value);
+      }
+    }
+  }
+
+  Bind(&readonly);
+  {
+    if (language_mode == STRICT) {
+      Node* message =
+          SmiConstant(Smi::FromInt(MessageTemplate::kStrictReadOnlyProperty));
+      Node* type = Typeof(p->receiver, p->context);
+      TailCallRuntime(Runtime::kThrowTypeError, p->context, message, p->name,
+                      type, p->receiver);
+    } else {
+      DCHECK_EQ(SLOPPY, language_mode);
+      Return(p->value);
+    }
+  }
+
+  Bind(&stub_cache);
+  {
+    Comment("stub cache probe");
+    Variable var_handler(this, MachineRepresentation::kTagged);
+    Label found_handler(this, &var_handler), stub_cache_miss(this);
+    TryProbeStubCache(isolate()->store_stub_cache(), receiver, p->name,
+                      &found_handler, &var_handler, &stub_cache_miss);
+    Bind(&found_handler);
+    {
+      Comment("KeyedStoreGeneric found handler");
+      HandleStoreICHandlerCase(p, var_handler.value(), &stub_cache_miss);
+    }
+    Bind(&stub_cache_miss);
+    {
+      Comment("KeyedStoreGeneric_miss");
+      TailCallRuntime(Runtime::kKeyedStoreIC_Miss, p->context, p->value,
+                      p->slot, p->vector, p->receiver, p->name);
+    }
   }
 }
 
-void KeyedStoreGenericAssembler::KeyedStoreGeneric(const StoreICParameters* p,
-                                                   LanguageMode language_mode) {
+void KeyedStoreGenericAssembler::KeyedStoreGeneric(LanguageMode language_mode) {
+  typedef StoreWithVectorDescriptor Descriptor;
+
+  Node* receiver = Parameter(Descriptor::kReceiver);
+  Node* name = Parameter(Descriptor::kName);
+  Node* value = Parameter(Descriptor::kValue);
+  Node* slot = Parameter(Descriptor::kSlot);
+  Node* vector = Parameter(Descriptor::kVector);
+  Node* context = Parameter(Descriptor::kContext);
+
   Variable var_index(this, MachineType::PointerRepresentation());
+  Variable var_unique(this, MachineRepresentation::kTagged);
+  var_unique.Bind(name);  // Dummy initialization.
   Label if_index(this), if_unique_name(this), slow(this);
 
-  Node* receiver = p->receiver;
   GotoIf(TaggedIsSmi(receiver), &slow);
   Node* receiver_map = LoadMap(receiver);
   Node* instance_type = LoadMapInstanceType(receiver_map);
@@ -522,26 +928,28 @@
                               Int32Constant(LAST_CUSTOM_ELEMENTS_RECEIVER)),
          &slow);
 
-  TryToName(p->name, &if_index, &var_index, &if_unique_name, &slow);
+  TryToName(name, &if_index, &var_index, &if_unique_name, &var_unique, &slow);
 
   Bind(&if_index);
   {
     Comment("integer index");
     EmitGenericElementStore(receiver, receiver_map, instance_type,
-                            var_index.value(), p->value, p->context, &slow);
+                            var_index.value(), value, context, &slow);
   }
 
   Bind(&if_unique_name);
   {
     Comment("key is unique name");
-    EmitGenericPropertyStore(receiver, receiver_map, p, &slow);
+    StoreICParameters p(context, receiver, var_unique.value(), value, slot,
+                        vector);
+    EmitGenericPropertyStore(receiver, receiver_map, &p, &slow, language_mode);
   }
 
   Bind(&slow);
   {
     Comment("KeyedStoreGeneric_slow");
-    TailCallRuntime(Runtime::kSetProperty, p->context, p->receiver, p->name,
-                    p->value, SmiConstant(language_mode));
+    TailCallRuntime(Runtime::kSetProperty, context, receiver, name, value,
+                    SmiConstant(language_mode));
   }
 }
 
diff --git a/src/ic/keyed-store-generic.h b/src/ic/keyed-store-generic.h
index daeb61f..8028736 100644
--- a/src/ic/keyed-store-generic.h
+++ b/src/ic/keyed-store-generic.h
@@ -5,15 +5,18 @@
 #ifndef V8_SRC_IC_KEYED_STORE_GENERIC_H_
 #define V8_SRC_IC_KEYED_STORE_GENERIC_H_
 
-#include "src/code-stub-assembler.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
 
+namespace compiler {
+class CodeAssemblerState;
+}
+
 class KeyedStoreGenericGenerator {
  public:
-  static void Generate(CodeStubAssembler* assembler,
-                       const CodeStubAssembler::StoreICParameters* p,
+  static void Generate(compiler::CodeAssemblerState* state,
                        LanguageMode language_mode);
 };
 
diff --git a/src/ic/mips/handler-compiler-mips.cc b/src/ic/mips/handler-compiler-mips.cc
index b2ddea5..c14652c 100644
--- a/src/ic/mips/handler-compiler-mips.cc
+++ b/src/ic/mips/handler-compiler-mips.cc
@@ -129,14 +129,6 @@
   __ Addu(sp, sp, Operand(2 * kPointerSize));
 }
 
-void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
-  // No-op. Return address is in ra register.
-}
-
-void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
-  // No-op. Return address is in ra register.
-}
-
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
     Handle<Name> name, Register scratch0, Register scratch1) {
@@ -181,27 +173,6 @@
   __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
 }
 
-
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register result, Label* miss) {
-  __ LoadNativeContextSlot(index, result);
-  // Load its initial map. The global functions all have initial maps.
-  __ lw(result,
-        FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
-  // Load the prototype from the initial map.
-  __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
-    MacroAssembler* masm, Register receiver, Register scratch1,
-    Register scratch2, Label* miss_label) {
-  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, scratch1);
-}
-
-
 // Generate code to check that a global property cell is empty. Create
 // the property cell at compilation time if no cell exists for the
 // property.
@@ -219,24 +190,18 @@
   __ Branch(miss, ne, scratch, Operand(at));
 }
 
-
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
-                                     Register holder, Register name,
-                                     Handle<JSObject> holder_obj) {
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
-  __ Push(name, receiver, holder);
-}
-
-
 static void CompileCallLoadPropertyWithInterceptor(
     MacroAssembler* masm, Register receiver, Register holder, Register name,
     Handle<JSObject> holder_obj, Runtime::FunctionId id) {
   DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
          Runtime::FunctionForId(id)->nargs);
-  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ Push(name, receiver, holder);
+
   __ CallRuntime(id);
 }
 
@@ -342,57 +307,6 @@
   }
 }
 
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
-  __ li(this->name(), Operand(name));
-}
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
-                                                   Register map_reg,
-                                                   Register scratch,
-                                                   Label* miss) {
-  Handle<WeakCell> cell = Map::WeakCellForMap(transition);
-  DCHECK(!map_reg.is(scratch));
-  __ LoadWeakValue(map_reg, cell, miss);
-  if (transition->CanBeDeprecated()) {
-    __ lw(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
-    __ And(at, scratch, Operand(Map::Deprecated::kMask));
-    __ Branch(miss, ne, at, Operand(zero_reg));
-  }
-}
-
-
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
-                                                      int descriptor,
-                                                      Register value_reg,
-                                                      Register scratch,
-                                                      Label* miss_label) {
-  DCHECK(!map_reg.is(scratch));
-  DCHECK(!map_reg.is(value_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ LoadInstanceDescriptors(map_reg, scratch);
-  __ lw(scratch,
-        FieldMemOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
-  __ Branch(miss_label, ne, value_reg, Operand(scratch));
-}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
-                                                        Register value_reg,
-                                                        Label* miss_label) {
-  Register map_reg = scratch1();
-  Register scratch = scratch2();
-  DCHECK(!value_reg.is(map_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ JumpIfSmi(value_reg, miss_label);
-  if (field_type->IsClass()) {
-    __ lw(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
-    // Compare map directly within the Branch() functions.
-    __ GetWeakValue(scratch, Map::WeakCellForMap(field_type->AsClass()));
-    __ Branch(miss_label, ne, map_reg, Operand(scratch));
-  }
-}
-
 void PropertyHandlerCompiler::GenerateAccessCheck(
     Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
     Label* miss, bool compare_native_contexts_only) {
@@ -520,14 +434,6 @@
   }
 }
 
-
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
-  // Return the constant value.
-  __ li(v0, value);
-  __ Ret();
-}
-
-
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
@@ -591,8 +497,18 @@
   // Call the runtime system to load the interceptor.
   DCHECK(holder()->HasNamedInterceptor());
   DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
-  PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
-                           holder());
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ Push(name(), receiver(), holder_reg);
+  // See NamedLoadHandlerCompiler::InterceptorVectorSlotPop() for details.
+  if (holder_reg.is(receiver())) {
+    __ Push(slot(), vector());
+  } else {
+    __ Push(scratch3(), scratch2());  // slot, vector
+  }
 
   __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
 }
diff --git a/src/ic/mips/ic-compiler-mips.cc b/src/ic/mips/ic-compiler-mips.cc
deleted file mode 100644
index 86a602b..0000000
--- a/src/ic/mips/ic-compiler-mips.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_MIPS
-
-#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyICCompiler::GenerateRuntimeSetProperty(
-    MacroAssembler* masm, LanguageMode language_mode) {
-  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
-          StoreDescriptor::ValueRegister());
-
-  __ li(a0, Operand(Smi::FromInt(language_mode)));
-  __ Push(a0);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/ic/mips/ic-mips.cc b/src/ic/mips/ic-mips.cc
index 561c9d3..fd39972 100644
--- a/src/ic/mips/ic-mips.cc
+++ b/src/ic/mips/ic-mips.cc
@@ -6,528 +6,12 @@
 
 #include "src/codegen.h"
 #include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
 #include "src/ic/stub-cache.h"
 
 namespace v8 {
 namespace internal {
 
 
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-// Helper function used from LoadIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// result:   Register for the result. It is only updated if a jump to the miss
-//           label is not done. Can be the same as elements or name clobbering
-//           one of these in the case of not jumping to the miss label.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-// The address returned from GenerateStringDictionaryProbes() in scratch2
-// is used.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
-                                   Register elements, Register name,
-                                   Register result, Register scratch1,
-                                   Register scratch2) {
-  // Main use of the scratch registers.
-  // scratch1: Used as temporary and to hold the capacity of the property
-  //           dictionary.
-  // scratch2: Used as temporary.
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
-                                                   name, scratch1, scratch2);
-
-  // If probing finds an entry check that the value is a normal
-  // property.
-  __ bind(&done);  // scratch2 == elements + 4 * index.
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ And(at, scratch1,
-         Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
-  __ Branch(miss, ne, at, Operand(zero_reg));
-
-  // Get the value at the masked, scaled index and return.
-  __ lw(result,
-        FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// value:    The value to store.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-// The address returned from GenerateStringDictionaryProbes() in scratch2
-// is used.
-static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
-                                    Register elements, Register name,
-                                    Register value, Register scratch1,
-                                    Register scratch2) {
-  // Main use of the scratch registers.
-  // scratch1: Used as temporary and to hold the capacity of the property
-  //           dictionary.
-  // scratch2: Used as temporary.
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
-                                                   name, scratch1, scratch2);
-
-  // If probing finds an entry in the dictionary check that the value
-  // is a normal property that is not read only.
-  __ bind(&done);  // scratch2 == elements + 4 * index.
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  const int kTypeAndReadOnlyMask =
-      (PropertyDetails::TypeField::kMask |
-       PropertyDetails::AttributesField::encode(READ_ONLY))
-      << kSmiTagSize;
-  __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ And(at, scratch1, Operand(kTypeAndReadOnlyMask));
-  __ Branch(miss, ne, at, Operand(zero_reg));
-
-  // Store the value at the masked, scaled index and return.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ Addu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
-  __ sw(value, MemOperand(scratch2));
-
-  // Update the write barrier. Make sure not to clobber the value.
-  __ mov(scratch1, value);
-  __ RecordWrite(elements, scratch2, scratch1, kRAHasNotBeenSaved,
-                 kDontSaveFPRegs);
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  Register dictionary = a0;
-  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
-
-  Label slow;
-
-  __ lw(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
-                                    JSObject::kPropertiesOffset));
-  GenerateDictionaryLoad(masm, &slow, dictionary,
-                         LoadDescriptor::NameRegister(), v0, a3, t0);
-  __ Ret();
-
-  // Dictionary load failed, go slow (but don't miss).
-  __ bind(&slow);
-  GenerateRuntimeGetProperty(masm);
-}
-
-
-// A register that isn't one of the parameters to the load ic.
-static const Register LoadIC_TempRegister() { return a3; }
-
-
-static void LoadIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-  Register slot = LoadDescriptor::SlotRegister();
-  Register vector = LoadWithVectorDescriptor::VectorRegister();
-
-  __ Push(receiver, name, slot, vector);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is in ra.
-  Isolate* isolate = masm->isolate();
-
-  DCHECK(!AreAliased(t0, t1, LoadWithVectorDescriptor::SlotRegister(),
-                     LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, t0, t1);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kLoadIC_Miss);
-}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is in ra.
-
-  __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
-  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kGetProperty);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is in ra.
-  Isolate* isolate = masm->isolate();
-
-  DCHECK(!AreAliased(t0, t1, LoadWithVectorDescriptor::SlotRegister(),
-                     LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, t0, t1);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
-}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is in ra.
-
-  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedGetProperty);
-}
-
-static void KeyedStoreGenerateMegamorphicHelper(
-    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
-    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
-    Register value, Register key, Register receiver, Register receiver_map,
-    Register elements_map, Register elements) {
-  Label transition_smi_elements;
-  Label finish_object_store, non_double_value, transition_double_elements;
-  Label fast_double_without_map_check;
-
-  // Fast case: Do the store, could be either Object or double.
-  __ bind(fast_object);
-  Register scratch = t0;
-  Register scratch2 = t4;
-  Register scratch3 = t5;
-  Register address = t1;
-  DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
-                     scratch, scratch2, scratch3, address));
-
-  if (check_map == kCheckMap) {
-    __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-    __ Branch(fast_double, ne, elements_map,
-              Operand(masm->isolate()->factory()->fixed_array_map()));
-  }
-
-  // HOLECHECK: guards "A[i] = V"
-  // We have to go to the runtime if the current value is the hole because
-  // there may be a callback on the element.
-  Label holecheck_passed1;
-  __ Addu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
-  __ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
-  __ lw(scratch, MemOperand(address));
-  __ Branch(&holecheck_passed1, ne, scratch,
-            Operand(masm->isolate()->factory()->the_hole_value()));
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
-  __ bind(&holecheck_passed1);
-
-  // Smi stores don't require further checks.
-  Label non_smi_value;
-  __ JumpIfNotSmi(value, &non_smi_value);
-
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ Addu(scratch, key, Operand(Smi::FromInt(1)));
-    __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  // It's irrelevant whether array is smi-only or not when writing a smi.
-  __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
-  __ sw(value, MemOperand(address));
-  __ Ret(USE_DELAY_SLOT);
-  __ Move(v0, value);  // Ensure the stub returns correct value.
-
-  __ bind(&non_smi_value);
-  // Escape to elements kind transition case.
-  __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
-
-  // Fast elements array, store the value to the elements backing store.
-  __ bind(&finish_object_store);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ Addu(scratch, key, Operand(Smi::FromInt(1)));
-    __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
-  __ sw(value, MemOperand(address));
-  // Update write barrier for the elements array address.
-  __ mov(scratch, value);  // Preserve the value which is returned.
-  __ RecordWrite(elements, address, scratch, kRAHasNotBeenSaved,
-                 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-  __ Ret(USE_DELAY_SLOT);
-  __ Move(v0, value);  // Ensure the stub returns correct value.
-
-  __ bind(fast_double);
-  if (check_map == kCheckMap) {
-    // Check for fast double array case. If this fails, call through to the
-    // runtime.
-    __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
-    __ Branch(slow, ne, elements_map, Operand(at));
-  }
-
-  // HOLECHECK: guards "A[i] double hole?"
-  // We have to see if the double version of the hole is present. If so
-  // go to the runtime.
-  __ Addu(address, elements, Operand(FixedDoubleArray::kHeaderSize +
-                                     kHoleNanUpper32Offset - kHeapObjectTag));
-  __ Lsa(address, address, key, kPointerSizeLog2);
-  __ lw(scratch, MemOperand(address));
-  __ Branch(&fast_double_without_map_check, ne, scratch,
-            Operand(kHoleNanUpper32));
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
-  __ bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(value, key, elements, scratch, scratch2,
-                                 scratch3, &transition_double_elements);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ Addu(scratch, key, Operand(Smi::FromInt(1)));
-    __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  __ Ret(USE_DELAY_SLOT);
-  __ Move(v0, value);  // Ensure the stub returns correct value.
-
-  __ bind(&transition_smi_elements);
-  // Transition the array appropriately depending on the value type.
-  __ lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
-  __ Branch(&non_double_value, ne, scratch, Operand(at));
-
-  // Value is a double. Transition FAST_SMI_ELEMENTS ->
-  // FAST_DOUBLE_ELEMENTS and complete the store.
-  __ LoadTransitionedArrayMapConditional(
-      FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
-  AllocationSiteMode mode =
-      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
-  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
-                                                   receiver_map, mode, slow);
-  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&fast_double_without_map_check);
-
-  __ bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
-                                         receiver_map, scratch, slow);
-  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-      masm, receiver, key, value, receiver_map, mode, slow);
-  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-
-  __ bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
-                                         receiver_map, scratch, slow);
-  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateDoubleToObject(
-      masm, receiver, key, value, receiver_map, mode, slow);
-  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
-                                       LanguageMode language_mode) {
-  // ---------- S t a t e --------------
-  //  -- a0     : value
-  //  -- a1     : key
-  //  -- a2     : receiver
-  //  -- ra     : return address
-  // -----------------------------------
-  Label slow, fast_object, fast_object_grow;
-  Label fast_double, fast_double_grow;
-  Label array, extra, check_if_double_array, maybe_name_key, miss;
-
-  // Register usage.
-  Register value = StoreDescriptor::ValueRegister();
-  Register key = StoreDescriptor::NameRegister();
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  DCHECK(value.is(a0));
-  Register receiver_map = a3;
-  Register elements_map = t2;
-  Register elements = t3;  // Elements array of the receiver.
-  // t0 and t1 are used as general scratch registers.
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &maybe_name_key);
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, &slow);
-  // Get the map of the object.
-  __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks.
-  // The generic stub does not perform map checks.
-  __ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
-  __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded));
-  __ Branch(&slow, ne, t0, Operand(zero_reg));
-  // Check if the object is a JS array or not.
-  __ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
-  __ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE));
-  // Check that the object is some kind of JS object EXCEPT JS Value type. In
-  // the case that the object is a value-wrapper object, we enter the runtime
-  // system to make sure that indexing into string objects works as intended.
-  STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
-  __ Branch(&slow, lo, t0, Operand(JS_OBJECT_TYPE));
-
-  // Object case: Check key against length in the elements array.
-  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  // Check array bounds. Both the key and the length of FixedArray are smis.
-  __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ Branch(&fast_object, lo, key, Operand(t0));
-
-  // Slow case, handle jump to runtime.
-  __ bind(&slow);
-  // Entry registers are intact.
-  // a0: value.
-  // a1: key.
-  // a2: receiver.
-  PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
-  // Never returns to here.
-
-  __ bind(&maybe_name_key);
-  __ lw(t0, FieldMemOperand(key, HeapObject::kMapOffset));
-  __ lb(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
-  __ JumpIfNotUniqueNameInstanceType(t0, &slow);
-
-  // The handlers in the stub cache expect a vector and slot. Since we won't
-  // change the IC from any downstream misses, a dummy vector can be used.
-  Register vector = StoreWithVectorDescriptor::VectorRegister();
-  Register slot = StoreWithVectorDescriptor::SlotRegister();
-  DCHECK(!AreAliased(vector, slot, t1, t2, t4, t5));
-  Handle<TypeFeedbackVector> dummy_vector =
-      TypeFeedbackVector::DummyVector(masm->isolate());
-  int slot_index = dummy_vector->GetIndex(
-      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
-  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
-  __ li(slot, Operand(Smi::FromInt(slot_index)));
-
-  masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, t1,
-                                                     t2, t4, t5);
-  // Cache miss.
-  __ Branch(&miss);
-
-  // Extra capacity case: Check if there is extra capacity to
-  // perform the store and update the length. Used for adding one
-  // element to the array by writing to array[array.length].
-  __ bind(&extra);
-  // Condition code from comparing key and array length is still available.
-  // Only support writing to array[array.length].
-  __ Branch(&slow, ne, key, Operand(t0));
-  // Check for room in the elements backing store.
-  // Both the key and the length of FixedArray are smis.
-  __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ Branch(&slow, hs, key, Operand(t0));
-  __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ Branch(&check_if_double_array, ne, elements_map,
-            Heap::kFixedArrayMapRootIndex);
-
-  __ jmp(&fast_object_grow);
-
-  __ bind(&check_if_double_array);
-  __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
-  __ jmp(&fast_double_grow);
-
-  // Array case: Get the length and the elements array from the JS
-  // array. Check that the array is in fast mode (and writable); if it
-  // is the length is always a smi.
-  __ bind(&array);
-  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
-  // Check the key against the length in the array.
-  __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  __ Branch(&extra, hs, key, Operand(t0));
-
-  KeyedStoreGenerateMegamorphicHelper(
-      masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
-      value, key, receiver, receiver_map, elements_map, elements);
-  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
-                                      &fast_double_grow, &slow, kDontCheckMap,
-                                      kIncrementLength, value, key, receiver,
-                                      receiver_map, elements_map, elements);
-
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
-  __ Push(StoreWithVectorDescriptor::ValueRegister(),
-          StoreWithVectorDescriptor::SlotRegister(),
-          StoreWithVectorDescriptor::VectorRegister(),
-          StoreWithVectorDescriptor::ReceiverRegister(),
-          StoreWithVectorDescriptor::NameRegister());
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
-}
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  // The slow case calls into the runtime to complete the store without causing
-  // an IC miss that would otherwise cause a transition to the generic stub.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kStoreIC_Miss);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
-  Label miss;
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  Register dictionary = t1;
-  DCHECK(receiver.is(a1));
-  DCHECK(name.is(a2));
-  DCHECK(value.is(a0));
-  DCHECK(StoreWithVectorDescriptor::VectorRegister().is(a3));
-  DCHECK(StoreWithVectorDescriptor::SlotRegister().is(t0));
-
-  __ lw(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
-  GenerateDictionaryStore(masm, &miss, dictionary, name, value, t2, t5);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->ic_store_normal_hit(), 1, t2, t5);
-  __ Ret(USE_DELAY_SLOT);
-  __ Move(v0, value);  // Ensure the stub returns correct value.
-
-  __ bind(&miss);
-  __ IncrementCounter(counters->ic_store_normal_miss(), 1, t2, t5);
-  GenerateMiss(masm);
-}
-
-
-#undef __
-
-
 Condition CompareIC::ComputeCondition(Token::Value op) {
   switch (op) {
     case Token::EQ_STRICT:
@@ -585,9 +69,7 @@
   }
 
   if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, andi=%p, delta=%d\n",
-           static_cast<void*>(address),
-           static_cast<void*>(andi_instruction_address), delta);
+    LOG(isolate, PatchIC(address, andi_instruction_address, delta));
   }
 
   Address patch_address =
diff --git a/src/ic/mips/stub-cache-mips.cc b/src/ic/mips/stub-cache-mips.cc
deleted file mode 100644
index d476c1e..0000000
--- a/src/ic/mips/stub-cache-mips.cc
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_MIPS
-
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
-                       StubCache::Table table, Register receiver, Register name,
-                       // The offset is scaled by 4, based on
-                       // kCacheIndexShift, which is two bits
-                       Register offset, Register scratch, Register scratch2,
-                       Register offset_scratch) {
-  ExternalReference key_offset(stub_cache->key_reference(table));
-  ExternalReference value_offset(stub_cache->value_reference(table));
-  ExternalReference map_offset(stub_cache->map_reference(table));
-
-  uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
-  uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
-  uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
-
-  // Check the relative positions of the address fields.
-  DCHECK(value_off_addr > key_off_addr);
-  DCHECK((value_off_addr - key_off_addr) % 4 == 0);
-  DCHECK((value_off_addr - key_off_addr) < (256 * 4));
-  DCHECK(map_off_addr > key_off_addr);
-  DCHECK((map_off_addr - key_off_addr) % 4 == 0);
-  DCHECK((map_off_addr - key_off_addr) < (256 * 4));
-
-  Label miss;
-  Register base_addr = scratch;
-  scratch = no_reg;
-
-  // Multiply by 3 because there are 3 fields per entry (name, code, map).
-  __ Lsa(offset_scratch, offset, offset, 1);
-
-  // Calculate the base address of the entry.
-  __ li(base_addr, Operand(key_offset));
-  __ Addu(base_addr, base_addr, offset_scratch);
-
-  // Check that the key in the entry matches the name.
-  __ lw(at, MemOperand(base_addr, 0));
-  __ Branch(&miss, ne, name, Operand(at));
-
-  // Check the map matches.
-  __ lw(at, MemOperand(base_addr, map_off_addr - key_off_addr));
-  __ lw(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ Branch(&miss, ne, at, Operand(scratch2));
-
-  // Get the code entry from the cache.
-  Register code = scratch2;
-  scratch2 = no_reg;
-  __ lw(code, MemOperand(base_addr, value_off_addr - key_off_addr));
-
-#ifdef DEBUG
-  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-    __ jmp(&miss);
-  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-    __ jmp(&miss);
-  }
-#endif
-
-  // Jump to the first instruction in the code stub.
-  __ Addu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(at);
-
-  // Miss: fall through.
-  __ bind(&miss);
-}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
-                              Register name, Register scratch, Register extra,
-                              Register extra2, Register extra3) {
-  Label miss;
-
-  // Make sure that code is valid. The multiplying code relies on the
-  // entry size being 12.
-  DCHECK(sizeof(Entry) == 12);
-
-  // Make sure that there are no register conflicts.
-  DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
-
-  // Check register validity.
-  DCHECK(!scratch.is(no_reg));
-  DCHECK(!extra.is(no_reg));
-  DCHECK(!extra2.is(no_reg));
-  DCHECK(!extra3.is(no_reg));
-
-#ifdef DEBUG
-  // If vector-based ics are in use, ensure that scratch, extra, extra2 and
-  // extra3 don't conflict with the vector and slot registers, which need
-  // to be preserved for a handler call or miss.
-  if (IC::ICUseVector(ic_kind_)) {
-    Register vector, slot;
-    if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
-      vector = StoreWithVectorDescriptor::VectorRegister();
-      slot = StoreWithVectorDescriptor::SlotRegister();
-    } else {
-      DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
-      vector = LoadWithVectorDescriptor::VectorRegister();
-      slot = LoadWithVectorDescriptor::SlotRegister();
-    }
-    DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
-  }
-#endif
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
-                      extra3);
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, &miss);
-
-  // Get the map of the receiver and compute the hash.
-  __ lw(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
-  __ lw(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ Addu(scratch, scratch, at);
-  __ Xor(scratch, scratch, Operand(kPrimaryMagic));
-  __ And(scratch, scratch,
-         Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
-
-  // Probe the primary table.
-  ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
-             extra3);
-
-  // Primary miss: Compute hash for secondary probe.
-  __ Subu(scratch, scratch, name);
-  __ Addu(scratch, scratch, Operand(kSecondaryMagic));
-  __ And(scratch, scratch,
-         Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
-
-  // Probe the secondary table.
-  ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
-             extra3);
-
-  // Cache miss: Fall-through and let caller handle the miss by
-  // entering the runtime system.
-  __ bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
-                      extra3);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/ic/mips64/handler-compiler-mips64.cc b/src/ic/mips64/handler-compiler-mips64.cc
index 249f8fe..1a38d32 100644
--- a/src/ic/mips64/handler-compiler-mips64.cc
+++ b/src/ic/mips64/handler-compiler-mips64.cc
@@ -129,14 +129,6 @@
   __ Daddu(sp, sp, Operand(2 * kPointerSize));
 }
 
-void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
-  // No-op. Return address is in ra register.
-}
-
-void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
-  // No-op. Return address is in ra register.
-}
-
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
     Handle<Name> name, Register scratch0, Register scratch1) {
@@ -181,27 +173,6 @@
   __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
 }
 
-
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register result, Label* miss) {
-  __ LoadNativeContextSlot(index, result);
-  // Load its initial map. The global functions all have initial maps.
-  __ ld(result,
-        FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
-  // Load the prototype from the initial map.
-  __ ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
-    MacroAssembler* masm, Register receiver, Register scratch1,
-    Register scratch2, Label* miss_label) {
-  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, scratch1);
-}
-
-
 // Generate code to check that a global property cell is empty. Create
 // the property cell at compilation time if no cell exists for the
 // property.
@@ -219,24 +190,18 @@
   __ Branch(miss, ne, scratch, Operand(at));
 }
 
-
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
-                                     Register holder, Register name,
-                                     Handle<JSObject> holder_obj) {
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
-  __ Push(name, receiver, holder);
-}
-
-
 static void CompileCallLoadPropertyWithInterceptor(
     MacroAssembler* masm, Register receiver, Register holder, Register name,
     Handle<JSObject> holder_obj, Runtime::FunctionId id) {
   DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
          Runtime::FunctionForId(id)->nargs);
-  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ Push(name, receiver, holder);
+
   __ CallRuntime(id);
 }
 
@@ -342,57 +307,6 @@
   }
 }
 
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
-  __ li(this->name(), Operand(name));
-}
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
-                                                   Register map_reg,
-                                                   Register scratch,
-                                                   Label* miss) {
-  Handle<WeakCell> cell = Map::WeakCellForMap(transition);
-  DCHECK(!map_reg.is(scratch));
-  __ LoadWeakValue(map_reg, cell, miss);
-  if (transition->CanBeDeprecated()) {
-    __ lwu(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
-    __ And(at, scratch, Operand(Map::Deprecated::kMask));
-    __ Branch(miss, ne, at, Operand(zero_reg));
-  }
-}
-
-
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
-                                                      int descriptor,
-                                                      Register value_reg,
-                                                      Register scratch,
-                                                      Label* miss_label) {
-  DCHECK(!map_reg.is(scratch));
-  DCHECK(!map_reg.is(value_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ LoadInstanceDescriptors(map_reg, scratch);
-  __ ld(scratch,
-        FieldMemOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
-  __ Branch(miss_label, ne, value_reg, Operand(scratch));
-}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
-                                                        Register value_reg,
-                                                        Label* miss_label) {
-  Register map_reg = scratch1();
-  Register scratch = scratch2();
-  DCHECK(!value_reg.is(map_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ JumpIfSmi(value_reg, miss_label);
-  if (field_type->IsClass()) {
-    __ ld(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
-    // Compare map directly within the Branch() functions.
-    __ GetWeakValue(scratch, Map::WeakCellForMap(field_type->AsClass()));
-    __ Branch(miss_label, ne, map_reg, Operand(scratch));
-  }
-}
-
 void PropertyHandlerCompiler::GenerateAccessCheck(
     Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
     Label* miss, bool compare_native_contexts_only) {
@@ -520,14 +434,6 @@
   }
 }
 
-
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
-  // Return the constant value.
-  __ li(v0, value);
-  __ Ret();
-}
-
-
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
@@ -591,8 +497,18 @@
   // Call the runtime system to load the interceptor.
   DCHECK(holder()->HasNamedInterceptor());
   DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
-  PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
-                           holder());
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ Push(name(), receiver(), holder_reg);
+  // See NamedLoadHandlerCompiler::InterceptorVectorSlotPop() for details.
+  if (holder_reg.is(receiver())) {
+    __ Push(slot(), vector());
+  } else {
+    __ Push(scratch3(), scratch2());  // slot, vector
+  }
 
   __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
 }
diff --git a/src/ic/mips64/ic-compiler-mips64.cc b/src/ic/mips64/ic-compiler-mips64.cc
deleted file mode 100644
index 276f3af..0000000
--- a/src/ic/mips64/ic-compiler-mips64.cc
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_MIPS64
-
-#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyICCompiler::GenerateRuntimeSetProperty(
-    MacroAssembler* masm, LanguageMode language_mode) {
-  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
-          StoreDescriptor::ValueRegister());
-
-  __ li(a0, Operand(Smi::FromInt(language_mode)));
-  __ Push(a0);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_MIPS64
diff --git a/src/ic/mips64/ic-mips64.cc b/src/ic/mips64/ic-mips64.cc
index 57efa35..0e2032a 100644
--- a/src/ic/mips64/ic-mips64.cc
+++ b/src/ic/mips64/ic-mips64.cc
@@ -6,529 +6,12 @@
 
 #include "src/codegen.h"
 #include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
 #include "src/ic/stub-cache.h"
 
 namespace v8 {
 namespace internal {
 
 
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-// Helper function used from LoadIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// result:   Register for the result. It is only updated if a jump to the miss
-//           label is not done. Can be the same as elements or name clobbering
-//           one of these in the case of not jumping to the miss label.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-// The address returned from GenerateStringDictionaryProbes() in scratch2
-// is used.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
-                                   Register elements, Register name,
-                                   Register result, Register scratch1,
-                                   Register scratch2) {
-  // Main use of the scratch registers.
-  // scratch1: Used as temporary and to hold the capacity of the property
-  //           dictionary.
-  // scratch2: Used as temporary.
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
-                                                   name, scratch1, scratch2);
-
-  // If probing finds an entry check that the value is a normal
-  // property.
-  __ bind(&done);  // scratch2 == elements + 4 * index.
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ ld(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ And(at, scratch1,
-         Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
-  __ Branch(miss, ne, at, Operand(zero_reg));
-
-  // Get the value at the masked, scaled index and return.
-  __ ld(result,
-        FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// value:    The value to store.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-// The address returned from GenerateStringDictionaryProbes() in scratch2
-// is used.
-static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
-                                    Register elements, Register name,
-                                    Register value, Register scratch1,
-                                    Register scratch2) {
-  // Main use of the scratch registers.
-  // scratch1: Used as temporary and to hold the capacity of the property
-  //           dictionary.
-  // scratch2: Used as temporary.
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
-                                                   name, scratch1, scratch2);
-
-  // If probing finds an entry in the dictionary check that the value
-  // is a normal property that is not read only.
-  __ bind(&done);  // scratch2 == elements + 4 * index.
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  const int kTypeAndReadOnlyMask =
-      (PropertyDetails::TypeField::kMask |
-       PropertyDetails::AttributesField::encode(READ_ONLY));
-  __ ld(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ And(at, scratch1, Operand(Smi::FromInt(kTypeAndReadOnlyMask)));
-  __ Branch(miss, ne, at, Operand(zero_reg));
-
-  // Store the value at the masked, scaled index and return.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ Daddu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
-  __ sd(value, MemOperand(scratch2));
-
-  // Update the write barrier. Make sure not to clobber the value.
-  __ mov(scratch1, value);
-  __ RecordWrite(elements, scratch2, scratch1, kRAHasNotBeenSaved,
-                 kDontSaveFPRegs);
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  Register dictionary = a0;
-  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
-  Label slow;
-
-  __ ld(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
-                                    JSObject::kPropertiesOffset));
-  GenerateDictionaryLoad(masm, &slow, dictionary,
-                         LoadDescriptor::NameRegister(), v0, a3, a4);
-  __ Ret();
-
-  // Dictionary load failed, go slow (but don't miss).
-  __ bind(&slow);
-  GenerateRuntimeGetProperty(masm);
-}
-
-
-// A register that isn't one of the parameters to the load ic.
-static const Register LoadIC_TempRegister() { return a3; }
-
-
-static void LoadIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-  Register slot = LoadDescriptor::SlotRegister();
-  Register vector = LoadWithVectorDescriptor::VectorRegister();
-
-  __ Push(receiver, name, slot, vector);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is on the stack.
-  Isolate* isolate = masm->isolate();
-
-  DCHECK(!AreAliased(a4, a5, LoadWithVectorDescriptor::SlotRegister(),
-                     LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, a4, a5);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kLoadIC_Miss);
-}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is in ra.
-
-  __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
-  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kGetProperty);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is in ra.
-  Isolate* isolate = masm->isolate();
-
-  DCHECK(!AreAliased(a4, a5, LoadWithVectorDescriptor::SlotRegister(),
-                     LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, a4, a5);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
-}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is in ra.
-
-  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedGetProperty);
-}
-
-static void KeyedStoreGenerateMegamorphicHelper(
-    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
-    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
-    Register value, Register key, Register receiver, Register receiver_map,
-    Register elements_map, Register elements) {
-  Label transition_smi_elements;
-  Label finish_object_store, non_double_value, transition_double_elements;
-  Label fast_double_without_map_check;
-
-  // Fast case: Do the store, could be either Object or double.
-  __ bind(fast_object);
-  Register scratch = a4;
-  Register scratch2 = t0;
-  Register address = a5;
-  DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
-                     scratch, scratch2, address));
-
-  if (check_map == kCheckMap) {
-    __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-    __ Branch(fast_double, ne, elements_map,
-              Operand(masm->isolate()->factory()->fixed_array_map()));
-  }
-
-  // HOLECHECK: guards "A[i] = V"
-  // We have to go to the runtime if the current value is the hole because
-  // there may be a callback on the element.
-  Label holecheck_passed1;
-  __ Daddu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
-  __ SmiScale(at, key, kPointerSizeLog2);
-  __ daddu(address, address, at);
-  __ ld(scratch, MemOperand(address));
-
-  __ Branch(&holecheck_passed1, ne, scratch,
-            Operand(masm->isolate()->factory()->the_hole_value()));
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
-  __ bind(&holecheck_passed1);
-
-  // Smi stores don't require further checks.
-  Label non_smi_value;
-  __ JumpIfNotSmi(value, &non_smi_value);
-
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
-    __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  // It's irrelevant whether array is smi-only or not when writing a smi.
-  __ Daddu(address, elements,
-           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ SmiScale(scratch, key, kPointerSizeLog2);
-  __ Daddu(address, address, scratch);
-  __ sd(value, MemOperand(address));
-  __ Ret(USE_DELAY_SLOT);
-  __ Move(v0, value);  // Ensure the stub returns correct value.
-
-  __ bind(&non_smi_value);
-  // Escape to elements kind transition case.
-  __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
-
-  // Fast elements array, store the value to the elements backing store.
-  __ bind(&finish_object_store);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
-    __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  __ Daddu(address, elements,
-           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ SmiScale(scratch, key, kPointerSizeLog2);
-  __ Daddu(address, address, scratch);
-  __ sd(value, MemOperand(address));
-  // Update write barrier for the elements array address.
-  __ mov(scratch, value);  // Preserve the value which is returned.
-  __ RecordWrite(elements, address, scratch, kRAHasNotBeenSaved,
-                 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-  __ Ret(USE_DELAY_SLOT);
-  __ Move(v0, value);  // Ensure the stub returns correct value.
-
-  __ bind(fast_double);
-  if (check_map == kCheckMap) {
-    // Check for fast double array case. If this fails, call through to the
-    // runtime.
-    __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
-    __ Branch(slow, ne, elements_map, Operand(at));
-  }
-
-  // HOLECHECK: guards "A[i] double hole?"
-  // We have to see if the double version of the hole is present. If so
-  // go to the runtime.
-  __ Daddu(address, elements,
-           Operand(FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
-                   kHeapObjectTag));
-  __ SmiScale(at, key, kPointerSizeLog2);
-  __ daddu(address, address, at);
-  __ lw(scratch, MemOperand(address));
-  __ Branch(&fast_double_without_map_check, ne, scratch,
-            Operand(static_cast<int32_t>(kHoleNanUpper32)));
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
-  __ bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(value, key, elements, scratch, scratch2,
-                                 &transition_double_elements);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
-    __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  __ Ret(USE_DELAY_SLOT);
-  __ Move(v0, value);  // Ensure the stub returns correct value.
-
-  __ bind(&transition_smi_elements);
-  // Transition the array appropriately depending on the value type.
-  __ ld(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
-  __ Branch(&non_double_value, ne, scratch, Operand(at));
-
-  // Value is a double. Transition FAST_SMI_ELEMENTS ->
-  // FAST_DOUBLE_ELEMENTS and complete the store.
-  __ LoadTransitionedArrayMapConditional(
-      FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
-  AllocationSiteMode mode =
-      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
-  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
-                                                   receiver_map, mode, slow);
-  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&fast_double_without_map_check);
-
-  __ bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
-                                         receiver_map, scratch, slow);
-  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-      masm, receiver, key, value, receiver_map, mode, slow);
-  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-
-  __ bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
-                                         receiver_map, scratch, slow);
-  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateDoubleToObject(
-      masm, receiver, key, value, receiver_map, mode, slow);
-  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
-                                       LanguageMode language_mode) {
-  // ---------- S t a t e --------------
-  //  -- a0     : value
-  //  -- a1     : key
-  //  -- a2     : receiver
-  //  -- ra     : return address
-  // -----------------------------------
-  Label slow, fast_object, fast_object_grow;
-  Label fast_double, fast_double_grow;
-  Label array, extra, check_if_double_array, maybe_name_key, miss;
-
-  // Register usage.
-  Register value = StoreDescriptor::ValueRegister();
-  Register key = StoreDescriptor::NameRegister();
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  DCHECK(value.is(a0));
-  Register receiver_map = a3;
-  Register elements_map = a6;
-  Register elements = a7;  // Elements array of the receiver.
-  // a4 and a5 are used as general scratch registers.
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &maybe_name_key);
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, &slow);
-  // Get the map of the object.
-  __ ld(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks.
-  // The generic stub does not perform map checks.
-  __ lbu(a4, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
-  __ And(a4, a4, Operand(1 << Map::kIsAccessCheckNeeded));
-  __ Branch(&slow, ne, a4, Operand(zero_reg));
-  // Check if the object is a JS array or not.
-  __ lbu(a4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
-  __ Branch(&array, eq, a4, Operand(JS_ARRAY_TYPE));
-  // Check that the object is some kind of JSObject.
-  __ Branch(&slow, lt, a4, Operand(FIRST_JS_OBJECT_TYPE));
-
-  // Object case: Check key against length in the elements array.
-  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  // Check array bounds. Both the key and the length of FixedArray are smis.
-  __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ Branch(&fast_object, lo, key, Operand(a4));
-
-  // Slow case, handle jump to runtime.
-  __ bind(&slow);
-  // Entry registers are intact.
-  // a0: value.
-  // a1: key.
-  // a2: receiver.
-  PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
-  // Never returns to here.
-
-  __ bind(&maybe_name_key);
-  __ ld(a4, FieldMemOperand(key, HeapObject::kMapOffset));
-  __ lb(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
-  __ JumpIfNotUniqueNameInstanceType(a4, &slow);
-
-  // The handlers in the stub cache expect a vector and slot. Since we won't
-  // change the IC from any downstream misses, a dummy vector can be used.
-  Register vector = StoreWithVectorDescriptor::VectorRegister();
-  Register slot = StoreWithVectorDescriptor::SlotRegister();
-
-  DCHECK(!AreAliased(vector, slot, a5, a6, a7, t0));
-  Handle<TypeFeedbackVector> dummy_vector =
-      TypeFeedbackVector::DummyVector(masm->isolate());
-  int slot_index = dummy_vector->GetIndex(
-      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
-  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
-  __ li(slot, Operand(Smi::FromInt(slot_index)));
-
-  masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, a5,
-                                                     a6, a7, t0);
-  // Cache miss.
-  __ Branch(&miss);
-
-  // Extra capacity case: Check if there is extra capacity to
-  // perform the store and update the length. Used for adding one
-  // element to the array by writing to array[array.length].
-  __ bind(&extra);
-  // Condition code from comparing key and array length is still available.
-  // Only support writing to array[array.length].
-  __ Branch(&slow, ne, key, Operand(a4));
-  // Check for room in the elements backing store.
-  // Both the key and the length of FixedArray are smis.
-  __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ Branch(&slow, hs, key, Operand(a4));
-  __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ Branch(&check_if_double_array, ne, elements_map,
-            Heap::kFixedArrayMapRootIndex);
-
-  __ jmp(&fast_object_grow);
-
-  __ bind(&check_if_double_array);
-  __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
-  __ jmp(&fast_double_grow);
-
-  // Array case: Get the length and the elements array from the JS
-  // array. Check that the array is in fast mode (and writable); if it
-  // is the length is always a smi.
-  __ bind(&array);
-  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
-  // Check the key against the length in the array.
-  __ ld(a4, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  __ Branch(&extra, hs, key, Operand(a4));
-
-  KeyedStoreGenerateMegamorphicHelper(
-      masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
-      value, key, receiver, receiver_map, elements_map, elements);
-  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
-                                      &fast_double_grow, &slow, kDontCheckMap,
-                                      kIncrementLength, value, key, receiver,
-                                      receiver_map, elements_map, elements);
-
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
-  __ Push(StoreWithVectorDescriptor::ValueRegister(),
-          StoreWithVectorDescriptor::SlotRegister(),
-          StoreWithVectorDescriptor::VectorRegister(),
-          StoreWithVectorDescriptor::ReceiverRegister(),
-          StoreWithVectorDescriptor::NameRegister());
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
-}
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  // The slow case calls into the runtime to complete the store without causing
-  // an IC miss that would otherwise cause a transition to the generic stub.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kStoreIC_Miss);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
-  Label miss;
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  Register dictionary = a5;
-  DCHECK(!AreAliased(
-      value, receiver, name, StoreWithVectorDescriptor::VectorRegister(),
-      StoreWithVectorDescriptor::SlotRegister(), dictionary, a6, a7));
-
-  __ ld(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
-  GenerateDictionaryStore(masm, &miss, dictionary, name, value, a6, a7);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->ic_store_normal_hit(), 1, a6, a7);
-  __ Ret(USE_DELAY_SLOT);
-  __ Move(v0, value);  // Ensure the stub returns correct value.
-
-  __ bind(&miss);
-  __ IncrementCounter(counters->ic_store_normal_miss(), 1, a6, a7);
-  GenerateMiss(masm);
-}
-
-
-#undef __
-
-
 Condition CompareIC::ComputeCondition(Token::Value op) {
   switch (op) {
     case Token::EQ_STRICT:
@@ -586,9 +69,7 @@
   }
 
   if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, andi=%p, delta=%d\n",
-           static_cast<void*>(address),
-           static_cast<void*>(andi_instruction_address), delta);
+    LOG(isolate, PatchIC(address, andi_instruction_address, delta));
   }
 
   Address patch_address =
diff --git a/src/ic/mips64/stub-cache-mips64.cc b/src/ic/mips64/stub-cache-mips64.cc
deleted file mode 100644
index 6a87b7b..0000000
--- a/src/ic/mips64/stub-cache-mips64.cc
+++ /dev/null
@@ -1,161 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_MIPS64
-
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
-                       StubCache::Table table, Register receiver, Register name,
-                       // The offset is scaled by 4, based on
-                       // kCacheIndexShift, which is two bits
-                       Register offset, Register scratch, Register scratch2,
-                       Register offset_scratch) {
-  ExternalReference key_offset(stub_cache->key_reference(table));
-  ExternalReference value_offset(stub_cache->value_reference(table));
-  ExternalReference map_offset(stub_cache->map_reference(table));
-
-  uint64_t key_off_addr = reinterpret_cast<uint64_t>(key_offset.address());
-  uint64_t value_off_addr = reinterpret_cast<uint64_t>(value_offset.address());
-  uint64_t map_off_addr = reinterpret_cast<uint64_t>(map_offset.address());
-
-  // Check the relative positions of the address fields.
-  DCHECK(value_off_addr > key_off_addr);
-  DCHECK((value_off_addr - key_off_addr) % 4 == 0);
-  DCHECK((value_off_addr - key_off_addr) < (256 * 4));
-  DCHECK(map_off_addr > key_off_addr);
-  DCHECK((map_off_addr - key_off_addr) % 4 == 0);
-  DCHECK((map_off_addr - key_off_addr) < (256 * 4));
-
-  Label miss;
-  Register base_addr = scratch;
-  scratch = no_reg;
-
-  // Multiply by 3 because there are 3 fields per entry (name, code, map).
-  __ Dlsa(offset_scratch, offset, offset, 1);
-
-  // Calculate the base address of the entry.
-  __ li(base_addr, Operand(key_offset));
-  __ Dlsa(base_addr, base_addr, offset_scratch,
-          kPointerSizeLog2 - StubCache::kCacheIndexShift);
-
-  // Check that the key in the entry matches the name.
-  __ ld(at, MemOperand(base_addr, 0));
-  __ Branch(&miss, ne, name, Operand(at));
-
-  // Check the map matches.
-  __ ld(at, MemOperand(base_addr,
-                       static_cast<int32_t>(map_off_addr - key_off_addr)));
-  __ ld(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ Branch(&miss, ne, at, Operand(scratch2));
-
-  // Get the code entry from the cache.
-  Register code = scratch2;
-  scratch2 = no_reg;
-  __ ld(code, MemOperand(base_addr,
-                         static_cast<int32_t>(value_off_addr - key_off_addr)));
-
-#ifdef DEBUG
-  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-    __ jmp(&miss);
-  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-    __ jmp(&miss);
-  }
-#endif
-
-  // Jump to the first instruction in the code stub.
-  __ Daddu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(at);
-
-  // Miss: fall through.
-  __ bind(&miss);
-}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
-                              Register name, Register scratch, Register extra,
-                              Register extra2, Register extra3) {
-  Label miss;
-
-  // Make sure that code is valid. The multiplying code relies on the
-  // entry size being 12.
-  // DCHECK(sizeof(Entry) == 12);
-  // DCHECK(sizeof(Entry) == 3 * kPointerSize);
-
-  // Make sure that there are no register conflicts.
-  DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
-
-  // Check register validity.
-  DCHECK(!scratch.is(no_reg));
-  DCHECK(!extra.is(no_reg));
-  DCHECK(!extra2.is(no_reg));
-  DCHECK(!extra3.is(no_reg));
-
-#ifdef DEBUG
-  // If vector-based ics are in use, ensure that scratch, extra, extra2 and
-  // extra3 don't conflict with the vector and slot registers, which need
-  // to be preserved for a handler call or miss.
-  if (IC::ICUseVector(ic_kind_)) {
-    Register vector, slot;
-    if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
-      vector = StoreWithVectorDescriptor::VectorRegister();
-      slot = StoreWithVectorDescriptor::SlotRegister();
-    } else {
-      DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
-      vector = LoadWithVectorDescriptor::VectorRegister();
-      slot = LoadWithVectorDescriptor::SlotRegister();
-    }
-    DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
-  }
-#endif
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
-                      extra3);
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, &miss);
-
-  // Get the map of the receiver and compute the hash.
-  __ lwu(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
-  __ ld(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ Addu(scratch, scratch, at);
-  __ Xor(scratch, scratch, Operand(kPrimaryMagic));
-  __ And(scratch, scratch,
-         Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
-
-  // Probe the primary table.
-  ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
-             extra3);
-
-  // Primary miss: Compute hash for secondary probe.
-  __ Subu(scratch, scratch, name);
-  __ Addu(scratch, scratch, kSecondaryMagic);
-  __ And(scratch, scratch,
-         Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
-
-  // Probe the secondary table.
-  ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
-             extra3);
-
-  // Cache miss: Fall-through and let caller handle the miss by
-  // entering the runtime system.
-  __ bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
-                      extra3);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_MIPS64
diff --git a/src/ic/ppc/handler-compiler-ppc.cc b/src/ic/ppc/handler-compiler-ppc.cc
index e0caaa6..3da558d 100644
--- a/src/ic/ppc/handler-compiler-ppc.cc
+++ b/src/ic/ppc/handler-compiler-ppc.cc
@@ -130,14 +130,6 @@
   __ addi(sp, sp, Operand(2 * kPointerSize));
 }
 
-void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
-  // No-op. Return address is in lr register.
-}
-
-void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
-  // No-op. Return address is in lr register.
-}
-
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
     Handle<Name> name, Register scratch0, Register scratch1) {
@@ -184,27 +176,6 @@
   __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
 }
 
-
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register result, Label* miss) {
-  __ LoadNativeContextSlot(index, result);
-  // Load its initial map. The global functions all have initial maps.
-  __ LoadP(result,
-           FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
-  // Load the prototype from the initial map.
-  __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
-    MacroAssembler* masm, Register receiver, Register scratch1,
-    Register scratch2, Label* miss_label) {
-  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
-  __ mr(r3, scratch1);
-  __ Ret();
-}
-
-
 // Generate code to check that a global property cell is empty. Create
 // the property cell at compilation time if no cell exists for the
 // property.
@@ -224,25 +195,18 @@
 }
 
 
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
-                                     Register holder, Register name,
-                                     Handle<JSObject> holder_obj) {
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
-  __ push(name);
-  __ push(receiver);
-  __ push(holder);
-}
-
-
 static void CompileCallLoadPropertyWithInterceptor(
     MacroAssembler* masm, Register receiver, Register holder, Register name,
     Handle<JSObject> holder_obj, Runtime::FunctionId id) {
   DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
          Runtime::FunctionForId(id)->nargs);
-  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ Push(name, receiver, holder);
+
   __ CallRuntime(id);
 }
 
@@ -350,58 +314,6 @@
   }
 }
 
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
-  __ mov(this->name(), Operand(name));
-}
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
-                                                   Register map_reg,
-                                                   Register scratch,
-                                                   Label* miss) {
-  Handle<WeakCell> cell = Map::WeakCellForMap(transition);
-  DCHECK(!map_reg.is(scratch));
-  __ LoadWeakValue(map_reg, cell, miss);
-  if (transition->CanBeDeprecated()) {
-    __ lwz(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
-    __ DecodeField<Map::Deprecated>(r0, scratch, SetRC);
-    __ bne(miss, cr0);
-  }
-}
-
-
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
-                                                      int descriptor,
-                                                      Register value_reg,
-                                                      Register scratch,
-                                                      Label* miss_label) {
-  DCHECK(!map_reg.is(scratch));
-  DCHECK(!map_reg.is(value_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ LoadInstanceDescriptors(map_reg, scratch);
-  __ LoadP(scratch, FieldMemOperand(
-                        scratch, DescriptorArray::GetValueOffset(descriptor)));
-  __ cmp(value_reg, scratch);
-  __ bne(miss_label);
-}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
-                                                        Register value_reg,
-                                                        Label* miss_label) {
-  Register map_reg = scratch1();
-  Register scratch = scratch2();
-  DCHECK(!value_reg.is(map_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ JumpIfSmi(value_reg, miss_label);
-  if (field_type->IsClass()) {
-    __ LoadP(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
-    __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
-                    scratch);
-    __ bne(miss_label);
-  }
-}
-
 void PropertyHandlerCompiler::GenerateAccessCheck(
     Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
     Label* miss, bool compare_native_contexts_only) {
@@ -538,14 +450,6 @@
   }
 }
 
-
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
-  // Return the constant value.
-  __ Move(r3, value);
-  __ Ret();
-}
-
-
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
@@ -610,8 +514,18 @@
   // Call the runtime system to load the interceptor.
   DCHECK(holder()->HasNamedInterceptor());
   DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
-  PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
-                           holder());
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ Push(name(), receiver(), holder_reg);
+  // See NamedLoadHandlerCompiler::InterceptorVectorSlotPop() for details.
+  if (holder_reg.is(receiver())) {
+    __ Push(slot(), vector());
+  } else {
+    __ Push(scratch3(), scratch2());  // slot, vector
+  }
 
   __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
 }
diff --git a/src/ic/ppc/ic-compiler-ppc.cc b/src/ic/ppc/ic-compiler-ppc.cc
deleted file mode 100644
index c6b36f2..0000000
--- a/src/ic/ppc/ic-compiler-ppc.cc
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_PPC
-
-#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyICCompiler::GenerateRuntimeSetProperty(
-    MacroAssembler* masm, LanguageMode language_mode) {
-  __ mov(r0, Operand(Smi::FromInt(language_mode)));
-  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
-          StoreDescriptor::ValueRegister(), r0);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_PPC
diff --git a/src/ic/ppc/ic-ppc.cc b/src/ic/ppc/ic-ppc.cc
index 359a6a4..0f25846 100644
--- a/src/ic/ppc/ic-ppc.cc
+++ b/src/ic/ppc/ic-ppc.cc
@@ -6,527 +6,12 @@
 
 #include "src/codegen.h"
 #include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
 #include "src/ic/stub-cache.h"
 
 namespace v8 {
 namespace internal {
 
 
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-// Helper function used from LoadIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// result:   Register for the result. It is only updated if a jump to the miss
-//           label is not done. Can be the same as elements or name clobbering
-//           one of these in the case of not jumping to the miss label.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
-                                   Register elements, Register name,
-                                   Register result, Register scratch1,
-                                   Register scratch2) {
-  // Main use of the scratch registers.
-  // scratch1: Used as temporary and to hold the capacity of the property
-  //           dictionary.
-  // scratch2: Used as temporary.
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
-                                                   name, scratch1, scratch2);
-
-  // If probing finds an entry check that the value is a normal
-  // property.
-  __ bind(&done);  // scratch2 == elements + 4 * index
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ mr(r0, scratch2);
-  __ LoadSmiLiteral(scratch2, Smi::FromInt(PropertyDetails::TypeField::kMask));
-  __ and_(scratch2, scratch1, scratch2, SetRC);
-  __ bne(miss, cr0);
-  __ mr(scratch2, r0);
-
-  // Get the value at the masked, scaled index and return.
-  __ LoadP(result,
-           FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// value:    The value to store.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
-                                    Register elements, Register name,
-                                    Register value, Register scratch1,
-                                    Register scratch2) {
-  // Main use of the scratch registers.
-  // scratch1: Used as temporary and to hold the capacity of the property
-  //           dictionary.
-  // scratch2: Used as temporary.
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
-                                                   name, scratch1, scratch2);
-
-  // If probing finds an entry in the dictionary check that the value
-  // is a normal property that is not read only.
-  __ bind(&done);  // scratch2 == elements + 4 * index
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  int kTypeAndReadOnlyMask =
-      PropertyDetails::TypeField::kMask |
-      PropertyDetails::AttributesField::encode(READ_ONLY);
-  __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ mr(r0, scratch2);
-  __ LoadSmiLiteral(scratch2, Smi::FromInt(kTypeAndReadOnlyMask));
-  __ and_(scratch2, scratch1, scratch2, SetRC);
-  __ bne(miss, cr0);
-  __ mr(scratch2, r0);
-
-  // Store the value at the masked, scaled index and return.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ addi(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
-  __ StoreP(value, MemOperand(scratch2));
-
-  // Update the write barrier. Make sure not to clobber the value.
-  __ mr(scratch1, value);
-  __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
-                 kDontSaveFPRegs);
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  Register dictionary = r3;
-  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
-
-  Label slow;
-
-  __ LoadP(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
-                                       JSObject::kPropertiesOffset));
-  GenerateDictionaryLoad(masm, &slow, dictionary,
-                         LoadDescriptor::NameRegister(), r3, r6, r7);
-  __ Ret();
-
-  // Dictionary load failed, go slow (but don't miss).
-  __ bind(&slow);
-  GenerateRuntimeGetProperty(masm);
-}
-
-
-// A register that isn't one of the parameters to the load ic.
-static const Register LoadIC_TempRegister() { return r6; }
-
-
-static void LoadIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-  Register slot = LoadDescriptor::SlotRegister();
-  Register vector = LoadWithVectorDescriptor::VectorRegister();
-
-  __ Push(receiver, name, slot, vector);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is in lr.
-  Isolate* isolate = masm->isolate();
-
-  DCHECK(!AreAliased(r7, r8, LoadWithVectorDescriptor::SlotRegister(),
-                     LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, r7, r8);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kLoadIC_Miss);
-}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is in lr.
-
-  __ mr(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
-  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kGetProperty);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is in lr.
-  Isolate* isolate = masm->isolate();
-
-  DCHECK(!AreAliased(r7, r8, LoadWithVectorDescriptor::SlotRegister(),
-                     LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, r7, r8);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
-}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is in lr.
-
-  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedGetProperty);
-}
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
-  __ Push(StoreWithVectorDescriptor::ValueRegister(),
-          StoreWithVectorDescriptor::SlotRegister(),
-          StoreWithVectorDescriptor::VectorRegister(),
-          StoreWithVectorDescriptor::ReceiverRegister(),
-          StoreWithVectorDescriptor::NameRegister());
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
-}
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  // The slow case calls into the runtime to complete the store without causing
-  // an IC miss that would otherwise cause a transition to the generic stub.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-static void KeyedStoreGenerateMegamorphicHelper(
-    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
-    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
-    Register value, Register key, Register receiver, Register receiver_map,
-    Register elements_map, Register elements) {
-  Label transition_smi_elements;
-  Label finish_object_store, non_double_value, transition_double_elements;
-  Label fast_double_without_map_check;
-
-  // Fast case: Do the store, could be either Object or double.
-  __ bind(fast_object);
-  Register scratch = r7;
-  Register address = r8;
-  DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
-                     scratch, address));
-
-  if (check_map == kCheckMap) {
-    __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-    __ mov(scratch, Operand(masm->isolate()->factory()->fixed_array_map()));
-    __ cmp(elements_map, scratch);
-    __ bne(fast_double);
-  }
-
-  // HOLECHECK: guards "A[i] = V"
-  // We have to go to the runtime if the current value is the hole because
-  // there may be a callback on the element
-  Label holecheck_passed1;
-  __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ SmiToPtrArrayOffset(scratch, key);
-  __ LoadPX(scratch, MemOperand(address, scratch));
-  __ Cmpi(scratch, Operand(masm->isolate()->factory()->the_hole_value()), r0);
-  __ bne(&holecheck_passed1);
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
-  __ bind(&holecheck_passed1);
-
-  // Smi stores don't require further checks.
-  Label non_smi_value;
-  __ JumpIfNotSmi(value, &non_smi_value);
-
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
-    __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0);
-  }
-  // It's irrelevant whether array is smi-only or not when writing a smi.
-  __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ SmiToPtrArrayOffset(scratch, key);
-  __ StorePX(value, MemOperand(address, scratch));
-  __ Ret();
-
-  __ bind(&non_smi_value);
-  // Escape to elements kind transition case.
-  __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
-
-  // Fast elements array, store the value to the elements backing store.
-  __ bind(&finish_object_store);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
-    __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0);
-  }
-  __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ SmiToPtrArrayOffset(scratch, key);
-  __ StorePUX(value, MemOperand(address, scratch));
-  // Update write barrier for the elements array address.
-  __ mr(scratch, value);  // Preserve the value which is returned.
-  __ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved,
-                 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-  __ Ret();
-
-  __ bind(fast_double);
-  if (check_map == kCheckMap) {
-    // Check for fast double array case. If this fails, call through to the
-    // runtime.
-    __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
-    __ bne(slow);
-  }
-
-  // HOLECHECK: guards "A[i] double hole?"
-  // We have to see if the double version of the hole is present. If so
-  // go to the runtime.
-  __ addi(address, elements,
-          Operand((FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
-                   kHeapObjectTag)));
-  __ SmiToDoubleArrayOffset(scratch, key);
-  __ lwzx(scratch, MemOperand(address, scratch));
-  __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
-  __ bne(&fast_double_without_map_check);
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
-  __ bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(value, key, elements, scratch, d0,
-                                 &transition_double_elements);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
-    __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0);
-  }
-  __ Ret();
-
-  __ bind(&transition_smi_elements);
-  // Transition the array appropriately depending on the value type.
-  __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
-  __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
-  __ bne(&non_double_value);
-
-  // Value is a double. Transition FAST_SMI_ELEMENTS ->
-  // FAST_DOUBLE_ELEMENTS and complete the store.
-  __ LoadTransitionedArrayMapConditional(
-      FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
-  AllocationSiteMode mode =
-      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
-  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
-                                                   receiver_map, mode, slow);
-  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ b(&fast_double_without_map_check);
-
-  __ bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
-                                         receiver_map, scratch, slow);
-  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-      masm, receiver, key, value, receiver_map, mode, slow);
-  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ b(&finish_object_store);
-
-  __ bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
-                                         receiver_map, scratch, slow);
-  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateDoubleToObject(
-      masm, receiver, key, value, receiver_map, mode, slow);
-  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ b(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
-                                       LanguageMode language_mode) {
-  // ---------- S t a t e --------------
-  //  -- r3     : value
-  //  -- r4     : key
-  //  -- r5     : receiver
-  //  -- lr     : return address
-  // -----------------------------------
-  Label slow, fast_object, fast_object_grow;
-  Label fast_double, fast_double_grow;
-  Label array, extra, check_if_double_array, maybe_name_key, miss;
-
-  // Register usage.
-  Register value = StoreDescriptor::ValueRegister();
-  Register key = StoreDescriptor::NameRegister();
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  DCHECK(receiver.is(r4));
-  DCHECK(key.is(r5));
-  DCHECK(value.is(r3));
-  Register receiver_map = r6;
-  Register elements_map = r9;
-  Register elements = r10;  // Elements array of the receiver.
-  // r7 and r8 are used as general scratch registers.
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &maybe_name_key);
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, &slow);
-  // Get the map of the object.
-  __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks.
-  // The generic stub does not perform map checks.
-  __ lbz(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
-  __ andi(r0, ip, Operand(1 << Map::kIsAccessCheckNeeded));
-  __ bne(&slow, cr0);
-  // Check if the object is a JS array or not.
-  __ lbz(r7, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
-  __ cmpi(r7, Operand(JS_ARRAY_TYPE));
-  __ beq(&array);
-  // Check that the object is some kind of JSObject.
-  __ cmpi(r7, Operand(FIRST_JS_OBJECT_TYPE));
-  __ blt(&slow);
-
-  // Object case: Check key against length in the elements array.
-  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  // Check array bounds. Both the key and the length of FixedArray are smis.
-  __ LoadP(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ cmpl(key, ip);
-  __ blt(&fast_object);
-
-  // Slow case, handle jump to runtime.
-  __ bind(&slow);
-  // Entry registers are intact.
-  // r3: value.
-  // r4: key.
-  // r5: receiver.
-  PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
-  // Never returns to here.
-
-  __ bind(&maybe_name_key);
-  __ LoadP(r7, FieldMemOperand(key, HeapObject::kMapOffset));
-  __ lbz(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset));
-  __ JumpIfNotUniqueNameInstanceType(r7, &slow);
-
-  // The handlers in the stub cache expect a vector and slot. Since we won't
-  // change the IC from any downstream misses, a dummy vector can be used.
-  Register vector = StoreWithVectorDescriptor::VectorRegister();
-  Register slot = StoreWithVectorDescriptor::SlotRegister();
-  DCHECK(!AreAliased(vector, slot, r8, r9, r10, r11));
-  Handle<TypeFeedbackVector> dummy_vector =
-      TypeFeedbackVector::DummyVector(masm->isolate());
-  int slot_index = dummy_vector->GetIndex(
-      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
-  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
-  __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
-
-  masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, r8,
-                                                     r9, r10, r11);
-  // Cache miss.
-  __ b(&miss);
-
-  // Extra capacity case: Check if there is extra capacity to
-  // perform the store and update the length. Used for adding one
-  // element to the array by writing to array[array.length].
-  __ bind(&extra);
-  // Condition code from comparing key and array length is still available.
-  __ bne(&slow);  // Only support writing to writing to array[array.length].
-  // Check for room in the elements backing store.
-  // Both the key and the length of FixedArray are smis.
-  __ LoadP(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ cmpl(key, ip);
-  __ bge(&slow);
-  __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ mov(ip, Operand(masm->isolate()->factory()->fixed_array_map()));
-  __ cmp(elements_map, ip);  // PPC - I think I can re-use ip here
-  __ bne(&check_if_double_array);
-  __ b(&fast_object_grow);
-
-  __ bind(&check_if_double_array);
-  __ mov(ip, Operand(masm->isolate()->factory()->fixed_double_array_map()));
-  __ cmp(elements_map, ip);  // PPC - another ip re-use
-  __ bne(&slow);
-  __ b(&fast_double_grow);
-
-  // Array case: Get the length and the elements array from the JS
-  // array. Check that the array is in fast mode (and writable); if it
-  // is the length is always a smi.
-  __ bind(&array);
-  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
-  // Check the key against the length in the array.
-  __ LoadP(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  __ cmpl(key, ip);
-  __ bge(&extra);
-
-  KeyedStoreGenerateMegamorphicHelper(
-      masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
-      value, key, receiver, receiver_map, elements_map, elements);
-  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
-                                      &fast_double_grow, &slow, kDontCheckMap,
-                                      kIncrementLength, value, key, receiver,
-                                      receiver_map, elements_map, elements);
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kStoreIC_Miss);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
-  Label miss;
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  Register dictionary = r8;
-  DCHECK(receiver.is(r4));
-  DCHECK(name.is(r5));
-  DCHECK(value.is(r3));
-  DCHECK(StoreWithVectorDescriptor::VectorRegister().is(r6));
-  DCHECK(StoreWithVectorDescriptor::SlotRegister().is(r7));
-
-  __ LoadP(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
-  GenerateDictionaryStore(masm, &miss, dictionary, name, value, r9, r10);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->ic_store_normal_hit(), 1, r9, r10);
-  __ Ret();
-
-  __ bind(&miss);
-  __ IncrementCounter(counters->ic_store_normal_miss(), 1, r9, r10);
-  GenerateMiss(masm);
-}
-
-
-#undef __
-
-
 Condition CompareIC::ComputeCondition(Token::Value op) {
   switch (op) {
     case Token::EQ_STRICT:
@@ -585,9 +70,7 @@
   }
 
   if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, cmp=%p, delta=%d\n",
-           static_cast<void*>(address),
-           static_cast<void*>(cmp_instruction_address), delta);
+    LOG(isolate, PatchIC(address, cmp_instruction_address, delta));
   }
 
   Address patch_address =
diff --git a/src/ic/ppc/stub-cache-ppc.cc b/src/ic/ppc/stub-cache-ppc.cc
deleted file mode 100644
index 3dad306..0000000
--- a/src/ic/ppc/stub-cache-ppc.cc
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_PPC
-
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
-                       StubCache::Table table, Register receiver, Register name,
-                       // The offset is scaled by 4, based on
-                       // kCacheIndexShift, which is two bits
-                       Register offset, Register scratch, Register scratch2,
-                       Register offset_scratch) {
-  ExternalReference key_offset(stub_cache->key_reference(table));
-  ExternalReference value_offset(stub_cache->value_reference(table));
-  ExternalReference map_offset(stub_cache->map_reference(table));
-
-  uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
-  uintptr_t value_off_addr =
-      reinterpret_cast<uintptr_t>(value_offset.address());
-  uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
-
-  // Check the relative positions of the address fields.
-  DCHECK(value_off_addr > key_off_addr);
-  DCHECK((value_off_addr - key_off_addr) % 4 == 0);
-  DCHECK((value_off_addr - key_off_addr) < (256 * 4));
-  DCHECK(map_off_addr > key_off_addr);
-  DCHECK((map_off_addr - key_off_addr) % 4 == 0);
-  DCHECK((map_off_addr - key_off_addr) < (256 * 4));
-
-  Label miss;
-  Register base_addr = scratch;
-  scratch = no_reg;
-
-  // Multiply by 3 because there are 3 fields per entry (name, code, map).
-  __ ShiftLeftImm(offset_scratch, offset, Operand(1));
-  __ add(offset_scratch, offset, offset_scratch);
-
-  // Calculate the base address of the entry.
-  __ mov(base_addr, Operand(key_offset));
-#if V8_TARGET_ARCH_PPC64
-  DCHECK(kPointerSizeLog2 > StubCache::kCacheIndexShift);
-  __ ShiftLeftImm(offset_scratch, offset_scratch,
-                  Operand(kPointerSizeLog2 - StubCache::kCacheIndexShift));
-#else
-  DCHECK(kPointerSizeLog2 == StubCache::kCacheIndexShift);
-#endif
-  __ add(base_addr, base_addr, offset_scratch);
-
-  // Check that the key in the entry matches the name.
-  __ LoadP(ip, MemOperand(base_addr, 0));
-  __ cmp(name, ip);
-  __ bne(&miss);
-
-  // Check the map matches.
-  __ LoadP(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
-  __ LoadP(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ cmp(ip, scratch2);
-  __ bne(&miss);
-
-  // Get the code entry from the cache.
-  Register code = scratch2;
-  scratch2 = no_reg;
-  __ LoadP(code, MemOperand(base_addr, value_off_addr - key_off_addr));
-
-#ifdef DEBUG
-  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-    __ b(&miss);
-  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-    __ b(&miss);
-  }
-#endif
-
-  // Jump to the first instruction in the code stub.
-  __ addi(r0, code, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ mtctr(r0);
-  __ bctr();
-
-  // Miss: fall through.
-  __ bind(&miss);
-}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
-                              Register name, Register scratch, Register extra,
-                              Register extra2, Register extra3) {
-  Label miss;
-
-#if V8_TARGET_ARCH_PPC64
-  // Make sure that code is valid. The multiplying code relies on the
-  // entry size being 24.
-  DCHECK(sizeof(Entry) == 24);
-#else
-  // Make sure that code is valid. The multiplying code relies on the
-  // entry size being 12.
-  DCHECK(sizeof(Entry) == 12);
-#endif
-
-  // Make sure that there are no register conflicts.
-  DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
-
-  // Check scratch, extra and extra2 registers are valid.
-  DCHECK(!scratch.is(no_reg));
-  DCHECK(!extra.is(no_reg));
-  DCHECK(!extra2.is(no_reg));
-  DCHECK(!extra3.is(no_reg));
-
-#ifdef DEBUG
-  // If vector-based ics are in use, ensure that scratch, extra, extra2 and
-  // extra3 don't conflict with the vector and slot registers, which need
-  // to be preserved for a handler call or miss.
-  if (IC::ICUseVector(ic_kind_)) {
-    Register vector, slot;
-    if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
-      vector = StoreWithVectorDescriptor::VectorRegister();
-      slot = StoreWithVectorDescriptor::SlotRegister();
-    } else {
-      DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
-      vector = LoadWithVectorDescriptor::VectorRegister();
-      slot = LoadWithVectorDescriptor::SlotRegister();
-    }
-    DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
-  }
-#endif
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
-                      extra3);
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, &miss);
-
-  // Get the map of the receiver and compute the hash.
-  __ lwz(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
-  __ LoadP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ add(scratch, scratch, ip);
-  __ Xor(scratch, scratch, Operand(kPrimaryMagic));
-  // The mask omits the last two bits because they are not part of the hash.
-  __ andi(scratch, scratch,
-          Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
-
-  // Probe the primary table.
-  ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
-             extra3);
-
-  // Primary miss: Compute hash for secondary probe.
-  __ sub(scratch, scratch, name);
-  __ Add(scratch, scratch, kSecondaryMagic, r0);
-  __ andi(scratch, scratch,
-          Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
-
-  // Probe the secondary table.
-  ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
-             extra3);
-
-  // Cache miss: Fall-through and let caller handle the miss by
-  // entering the runtime system.
-  __ bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
-                      extra3);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_PPC
diff --git a/src/ic/s390/handler-compiler-s390.cc b/src/ic/s390/handler-compiler-s390.cc
index 72658ec..9f08797 100644
--- a/src/ic/s390/handler-compiler-s390.cc
+++ b/src/ic/s390/handler-compiler-s390.cc
@@ -125,14 +125,6 @@
   __ la(sp, MemOperand(sp, 2 * kPointerSize));
 }
 
-void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
-  // No-op. Return address is in lr register.
-}
-
-void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
-  // No-op. Return address is in lr register.
-}
-
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
     Handle<Name> name, Register scratch0, Register scratch1) {
@@ -177,24 +169,6 @@
   __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
 }
 
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register result, Label* miss) {
-  __ LoadNativeContextSlot(index, result);
-  // Load its initial map. The global functions all have initial maps.
-  __ LoadP(result,
-           FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
-  // Load the prototype from the initial map.
-  __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
-}
-
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
-    MacroAssembler* masm, Register receiver, Register scratch1,
-    Register scratch2, Label* miss_label) {
-  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
-  __ LoadRR(r2, scratch1);
-  __ Ret();
-}
-
 // Generate code to check that a global property cell is empty. Create
 // the property cell at compilation time if no cell exists for the
 // property.
@@ -212,24 +186,18 @@
   __ bne(miss);
 }
 
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
-                                     Register holder, Register name,
-                                     Handle<JSObject> holder_obj) {
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
-  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
-  __ Push(name);
-  __ Push(receiver);
-  __ Push(holder);
-}
-
 static void CompileCallLoadPropertyWithInterceptor(
     MacroAssembler* masm, Register receiver, Register holder, Register name,
     Handle<JSObject> holder_obj, Runtime::FunctionId id) {
   DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
          Runtime::FunctionForId(id)->nargs);
-  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ Push(name, receiver, holder);
+
   __ CallRuntime(id);
 }
 
@@ -335,54 +303,6 @@
   }
 }
 
-void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
-  __ mov(this->name(), Operand(name));
-}
-
-void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
-                                                   Register map_reg,
-                                                   Register scratch,
-                                                   Label* miss) {
-  Handle<WeakCell> cell = Map::WeakCellForMap(transition);
-  DCHECK(!map_reg.is(scratch));
-  __ LoadWeakValue(map_reg, cell, miss);
-  if (transition->CanBeDeprecated()) {
-    __ LoadlW(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
-    __ DecodeField<Map::Deprecated>(r0, scratch);
-    __ bne(miss);
-  }
-}
-
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
-                                                      int descriptor,
-                                                      Register value_reg,
-                                                      Register scratch,
-                                                      Label* miss_label) {
-  DCHECK(!map_reg.is(scratch));
-  DCHECK(!map_reg.is(value_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ LoadInstanceDescriptors(map_reg, scratch);
-  __ CmpP(value_reg, FieldMemOperand(
-                         scratch, DescriptorArray::GetValueOffset(descriptor)));
-  __ bne(miss_label);
-}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
-                                                        Register value_reg,
-                                                        Label* miss_label) {
-  Register map_reg = scratch1();
-  Register scratch = scratch2();
-  DCHECK(!value_reg.is(map_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ JumpIfSmi(value_reg, miss_label);
-  if (field_type->IsClass()) {
-    __ LoadP(map_reg, FieldMemOperand(value_reg, HeapObject::kMapOffset));
-    __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
-                    scratch);
-    __ bne(miss_label);
-  }
-}
-
 void PropertyHandlerCompiler::GenerateAccessCheck(
     Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
     Label* miss, bool compare_native_contexts_only) {
@@ -512,12 +432,6 @@
   }
 }
 
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
-  // Return the constant value.
-  __ Move(r2, value);
-  __ Ret();
-}
-
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
@@ -580,8 +494,18 @@
   // Call the runtime system to load the interceptor.
   DCHECK(holder()->HasNamedInterceptor());
   DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
-  PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
-                           holder());
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ Push(name(), receiver(), holder_reg);
+  // See NamedLoadHandlerCompiler::InterceptorVectorSlotPop() for details.
+  if (holder_reg.is(receiver())) {
+    __ Push(slot(), vector());
+  } else {
+    __ Push(scratch3(), scratch2());  // slot, vector
+  }
 
   __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
 }
diff --git a/src/ic/s390/ic-compiler-s390.cc b/src/ic/s390/ic-compiler-s390.cc
deleted file mode 100644
index a7691d8..0000000
--- a/src/ic/s390/ic-compiler-s390.cc
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_S390
-
-#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-void PropertyICCompiler::GenerateRuntimeSetProperty(
-    MacroAssembler* masm, LanguageMode language_mode) {
-  __ mov(r0, Operand(Smi::FromInt(language_mode)));
-  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
-          StoreDescriptor::ValueRegister(), r0);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty);
-}
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_S390
diff --git a/src/ic/s390/ic-s390.cc b/src/ic/s390/ic-s390.cc
index bd83af1..494a4cd 100644
--- a/src/ic/s390/ic-s390.cc
+++ b/src/ic/s390/ic-s390.cc
@@ -6,514 +6,11 @@
 
 #include "src/ic/ic.h"
 #include "src/codegen.h"
-#include "src/ic/ic-compiler.h"
 #include "src/ic/stub-cache.h"
 
 namespace v8 {
 namespace internal {
 
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-// Helper function used from LoadIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// result:   Register for the result. It is only updated if a jump to the miss
-//           label is not done. Can be the same as elements or name clobbering
-//           one of these in the case of not jumping to the miss label.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
-                                   Register elements, Register name,
-                                   Register result, Register scratch1,
-                                   Register scratch2) {
-  // Main use of the scratch registers.
-  // scratch1: Used as temporary and to hold the capacity of the property
-  //           dictionary.
-  // scratch2: Used as temporary.
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
-                                                   name, scratch1, scratch2);
-
-  // If probing finds an entry check that the value is a normal
-  // property.
-  __ bind(&done);  // scratch2 == elements + 4 * index
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ LoadRR(r0, scratch2);
-  __ LoadSmiLiteral(scratch2, Smi::FromInt(PropertyDetails::TypeField::kMask));
-  __ AndP(scratch2, scratch1);
-  __ bne(miss);
-  __ LoadRR(scratch2, r0);
-
-  // Get the value at the masked, scaled index and return.
-  __ LoadP(result,
-           FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// value:    The value to store.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
-                                    Register elements, Register name,
-                                    Register value, Register scratch1,
-                                    Register scratch2) {
-  // Main use of the scratch registers.
-  // scratch1: Used as temporary and to hold the capacity of the property
-  //           dictionary.
-  // scratch2: Used as temporary.
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
-                                                   name, scratch1, scratch2);
-
-  // If probing finds an entry in the dictionary check that the value
-  // is a normal property that is not read only.
-  __ bind(&done);  // scratch2 == elements + 4 * index
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  int kTypeAndReadOnlyMask =
-      PropertyDetails::TypeField::kMask |
-      PropertyDetails::AttributesField::encode(READ_ONLY);
-  __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ LoadRR(r0, scratch2);
-  __ LoadSmiLiteral(scratch2, Smi::FromInt(kTypeAndReadOnlyMask));
-  __ AndP(scratch2, scratch1);
-  __ bne(miss /*, cr0*/);
-  __ LoadRR(scratch2, r0);
-
-  // Store the value at the masked, scaled index and return.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ AddP(scratch2, Operand(kValueOffset - kHeapObjectTag));
-  __ StoreP(value, MemOperand(scratch2));
-
-  // Update the write barrier. Make sure not to clobber the value.
-  __ LoadRR(scratch1, value);
-  __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
-                 kDontSaveFPRegs);
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  Register dictionary = r2;
-  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
-
-  Label slow;
-
-  __ LoadP(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
-                                       JSObject::kPropertiesOffset));
-  GenerateDictionaryLoad(masm, &slow, dictionary,
-                         LoadDescriptor::NameRegister(), r2, r5, r6);
-  __ Ret();
-
-  // Dictionary load failed, go slow (but don't miss).
-  __ bind(&slow);
-  GenerateRuntimeGetProperty(masm);
-}
-
-// A register that isn't one of the parameters to the load ic.
-static const Register LoadIC_TempRegister() { return r5; }
-
-static void LoadIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-  Register slot = LoadDescriptor::SlotRegister();
-  Register vector = LoadWithVectorDescriptor::VectorRegister();
-
-  __ Push(receiver, name, slot, vector);
-}
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is in lr.
-  Isolate* isolate = masm->isolate();
-
-  DCHECK(!AreAliased(r6, r7, LoadWithVectorDescriptor::SlotRegister(),
-                     LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, r6, r7);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kLoadIC_Miss);
-}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is in lr.
-
-  __ LoadRR(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
-  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kGetProperty);
-}
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is in lr.
-  Isolate* isolate = masm->isolate();
-
-  DCHECK(!AreAliased(r6, r7, LoadWithVectorDescriptor::SlotRegister(),
-                     LoadWithVectorDescriptor::VectorRegister()));
-  __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, r6, r7);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
-}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is in lr.
-
-  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedGetProperty);
-}
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
-  __ Push(StoreWithVectorDescriptor::ValueRegister(),
-          StoreWithVectorDescriptor::SlotRegister(),
-          StoreWithVectorDescriptor::VectorRegister(),
-          StoreWithVectorDescriptor::ReceiverRegister(),
-          StoreWithVectorDescriptor::NameRegister());
-}
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
-}
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  // The slow case calls into the runtime to complete the store without causing
-  // an IC miss that would otherwise cause a transition to the generic stub.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-static void KeyedStoreGenerateMegamorphicHelper(
-    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
-    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
-    Register value, Register key, Register receiver, Register receiver_map,
-    Register elements_map, Register elements) {
-  Label transition_smi_elements;
-  Label finish_object_store, non_double_value, transition_double_elements;
-  Label fast_double_without_map_check;
-
-  // Fast case: Do the store, could be either Object or double.
-  __ bind(fast_object);
-  Register scratch = r6;
-  Register address = r7;
-  DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
-                     scratch, address));
-
-  if (check_map == kCheckMap) {
-    __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-    __ CmpP(elements_map,
-            Operand(masm->isolate()->factory()->fixed_array_map()));
-    __ bne(fast_double);
-  }
-
-  // HOLECHECK: guards "A[i] = V"
-  // We have to go to the runtime if the current value is the hole because
-  // there may be a callback on the element
-  Label holecheck_passed1;
-  // @TODO(joransiu) : Fold AddP into memref of LoadP
-  __ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ SmiToPtrArrayOffset(scratch, key);
-  __ LoadP(scratch, MemOperand(address, scratch));
-  __ CmpP(scratch, Operand(masm->isolate()->factory()->the_hole_value()));
-  __ bne(&holecheck_passed1, Label::kNear);
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
-  __ bind(&holecheck_passed1);
-
-  // Smi stores don't require further checks.
-  Label non_smi_value;
-  __ JumpIfNotSmi(value, &non_smi_value);
-
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
-    __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  // It's irrelevant whether array is smi-only or not when writing a smi.
-  __ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ SmiToPtrArrayOffset(scratch, key);
-  __ StoreP(value, MemOperand(address, scratch));
-  __ Ret();
-
-  __ bind(&non_smi_value);
-  // Escape to elements kind transition case.
-  __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
-
-  // Fast elements array, store the value to the elements backing store.
-  __ bind(&finish_object_store);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
-    __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  __ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ SmiToPtrArrayOffset(scratch, key);
-  __ StoreP(value, MemOperand(address, scratch));
-  __ la(address, MemOperand(address, scratch));
-  // Update write barrier for the elements array address.
-  __ LoadRR(scratch, value);  // Preserve the value which is returned.
-  __ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved,
-                 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-  __ Ret();
-
-  __ bind(fast_double);
-  if (check_map == kCheckMap) {
-    // Check for fast double array case. If this fails, call through to the
-    // runtime.
-    __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
-    __ bne(slow);
-  }
-
-  // HOLECHECK: guards "A[i] double hole?"
-  // We have to see if the double version of the hole is present. If so
-  // go to the runtime.
-  // @TODO(joransiu) : Fold AddP Operand into LoadlW
-  __ AddP(address, elements,
-          Operand((FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
-                   kHeapObjectTag)));
-  __ SmiToDoubleArrayOffset(scratch, key);
-  __ LoadlW(scratch, MemOperand(address, scratch));
-  __ CmpP(scratch, Operand(kHoleNanUpper32));
-  __ bne(&fast_double_without_map_check, Label::kNear);
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
-
-  __ bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(value, key, elements, scratch, d0,
-                                 &transition_double_elements);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
-    __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  __ Ret();
-
-  __ bind(&transition_smi_elements);
-  // Transition the array appropriately depending on the value type.
-  __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
-  __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
-  __ bne(&non_double_value);
-
-  // Value is a double. Transition FAST_SMI_ELEMENTS ->
-  // FAST_DOUBLE_ELEMENTS and complete the store.
-  __ LoadTransitionedArrayMapConditional(
-      FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
-  AllocationSiteMode mode =
-      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
-  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
-                                                   receiver_map, mode, slow);
-  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ b(&fast_double_without_map_check);
-
-  __ bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
-                                         receiver_map, scratch, slow);
-  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-      masm, receiver, key, value, receiver_map, mode, slow);
-  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ b(&finish_object_store);
-
-  __ bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
-                                         receiver_map, scratch, slow);
-  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateDoubleToObject(
-      masm, receiver, key, value, receiver_map, mode, slow);
-  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ b(&finish_object_store);
-}
-
-void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
-                                       LanguageMode language_mode) {
-  // ---------- S t a t e --------------
-  //  -- r2     : value
-  //  -- r3     : key
-  //  -- r4     : receiver
-  //  -- lr     : return address
-  // -----------------------------------
-  Label slow, fast_object, fast_object_grow;
-  Label fast_double, fast_double_grow;
-  Label array, extra, check_if_double_array, maybe_name_key, miss;
-
-  // Register usage.
-  Register value = StoreDescriptor::ValueRegister();
-  Register key = StoreDescriptor::NameRegister();
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  DCHECK(receiver.is(r3));
-  DCHECK(key.is(r4));
-  DCHECK(value.is(r2));
-  Register receiver_map = r5;
-  Register elements_map = r8;
-  Register elements = r9;  // Elements array of the receiver.
-  // r6 and r7 are used as general scratch registers.
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &maybe_name_key);
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, &slow);
-  // Get the map of the object.
-  __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks.
-  // The generic stub does not perform map checks.
-  __ LoadlB(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
-  __ AndP(r0, ip, Operand(1 << Map::kIsAccessCheckNeeded));
-  __ bne(&slow, Label::kNear);
-  // Check if the object is a JS array or not.
-  __ LoadlB(r6, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
-  __ CmpP(r6, Operand(JS_ARRAY_TYPE));
-  __ beq(&array);
-  // Check that the object is some kind of JSObject.
-  __ CmpP(r6, Operand(FIRST_JS_OBJECT_TYPE));
-  __ blt(&slow, Label::kNear);
-
-  // Object case: Check key against length in the elements array.
-  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  // Check array bounds. Both the key and the length of FixedArray are smis.
-  __ CmpLogicalP(key, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ blt(&fast_object);
-
-  // Slow case, handle jump to runtime.
-  __ bind(&slow);
-  // Entry registers are intact.
-  // r2: value.
-  // r3: key.
-  // r4: receiver.
-  PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
-  // Never returns to here.
-
-  __ bind(&maybe_name_key);
-  __ LoadP(r6, FieldMemOperand(key, HeapObject::kMapOffset));
-  __ LoadlB(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset));
-  __ JumpIfNotUniqueNameInstanceType(r6, &slow);
-
-  // The handlers in the stub cache expect a vector and slot. Since we won't
-  // change the IC from any downstream misses, a dummy vector can be used.
-  Register vector = StoreWithVectorDescriptor::VectorRegister();
-  Register slot = StoreWithVectorDescriptor::SlotRegister();
-  DCHECK(!AreAliased(vector, slot, r7, r8, r9, ip));
-  Handle<TypeFeedbackVector> dummy_vector =
-      TypeFeedbackVector::DummyVector(masm->isolate());
-  int slot_index = dummy_vector->GetIndex(
-      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
-  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
-  __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
-
-  masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, r7,
-                                                     r8, r9, ip);
-  // Cache miss.
-  __ b(&miss);
-
-  // Extra capacity case: Check if there is extra capacity to
-  // perform the store and update the length. Used for adding one
-  // element to the array by writing to array[array.length].
-  __ bind(&extra);
-  // Condition code from comparing key and array length is still available.
-  __ bne(&slow);  // Only support writing to writing to array[array.length].
-  // Check for room in the elements backing store.
-  // Both the key and the length of FixedArray are smis.
-  __ CmpLogicalP(key, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ bge(&slow);
-  __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ CmpP(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
-  __ bne(&check_if_double_array, Label::kNear);
-  __ b(&fast_object_grow);
-
-  __ bind(&check_if_double_array);
-  __ CmpP(elements_map,
-          Operand(masm->isolate()->factory()->fixed_double_array_map()));
-  __ bne(&slow);
-  __ b(&fast_double_grow);
-
-  // Array case: Get the length and the elements array from the JS
-  // array. Check that the array is in fast mode (and writable); if it
-  // is the length is always a smi.
-  __ bind(&array);
-  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
-  // Check the key against the length in the array.
-  __ CmpLogicalP(key, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  __ bge(&extra);
-
-  KeyedStoreGenerateMegamorphicHelper(
-      masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
-      value, key, receiver, receiver_map, elements_map, elements);
-  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
-                                      &fast_double_grow, &slow, kDontCheckMap,
-                                      kIncrementLength, value, key, receiver,
-                                      receiver_map, elements_map, elements);
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  StoreIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kStoreIC_Miss);
-}
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
-  Label miss;
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  Register dictionary = r7;
-  DCHECK(receiver.is(r3));
-  DCHECK(name.is(r4));
-  DCHECK(value.is(r2));
-  DCHECK(StoreWithVectorDescriptor::VectorRegister().is(r5));
-  DCHECK(StoreWithVectorDescriptor::SlotRegister().is(r6));
-
-  __ LoadP(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
-  GenerateDictionaryStore(masm, &miss, dictionary, name, value, r8, r9);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->ic_store_normal_hit(), 1, r8, r9);
-  __ Ret();
-
-  __ bind(&miss);
-  __ IncrementCounter(counters->ic_store_normal_miss(), 1, r8, r9);
-  GenerateMiss(masm);
-}
-
-#undef __
 
 Condition CompareIC::ComputeCondition(Token::Value op) {
   switch (op) {
@@ -573,9 +70,7 @@
   }
 
   if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, cmp=%p, delta=%d\n",
-           static_cast<void*>(address),
-           static_cast<void*>(cmp_instruction_address), delta);
+    LOG(isolate, PatchIC(address, cmp_instruction_address, delta));
   }
 
   // Expected sequence to enable by changing the following
@@ -624,13 +119,13 @@
     cc = static_cast<Condition>((branch_instr & 0x00f00000) >> 20);
     DCHECK((cc == ne) || (cc == eq));
     cc = (cc == ne) ? eq : ne;
-    patcher.masm()->brc(cc, Operand((branch_instr & 0xffff) << 1));
+    patcher.masm()->brc(cc, Operand(branch_instr & 0xffff));
   } else if (Instruction::S390OpcodeValue(branch_address) == BRCL) {
     cc = static_cast<Condition>(
         (branch_instr & (static_cast<uint64_t>(0x00f0) << 32)) >> 36);
     DCHECK((cc == ne) || (cc == eq));
     cc = (cc == ne) ? eq : ne;
-    patcher.masm()->brcl(cc, Operand((branch_instr & 0xffffffff) << 1));
+    patcher.masm()->brcl(cc, Operand(branch_instr & 0xffffffff));
   } else {
     DCHECK(false);
   }
diff --git a/src/ic/s390/stub-cache-s390.cc b/src/ic/s390/stub-cache-s390.cc
deleted file mode 100644
index a0564a3..0000000
--- a/src/ic/s390/stub-cache-s390.cc
+++ /dev/null
@@ -1,173 +0,0 @@
-// Copyright 2015 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_S390
-
-#include "src/ic/stub-cache.h"
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
-                       StubCache::Table table, Register receiver, Register name,
-                       // The offset is scaled by 4, based on
-                       // kCacheIndexShift, which is two bits
-                       Register offset, Register scratch, Register scratch2,
-                       Register offset_scratch) {
-  ExternalReference key_offset(stub_cache->key_reference(table));
-  ExternalReference value_offset(stub_cache->value_reference(table));
-  ExternalReference map_offset(stub_cache->map_reference(table));
-
-  uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
-  uintptr_t value_off_addr =
-      reinterpret_cast<uintptr_t>(value_offset.address());
-  uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
-
-  // Check the relative positions of the address fields.
-  DCHECK(value_off_addr > key_off_addr);
-  DCHECK((value_off_addr - key_off_addr) % 4 == 0);
-  DCHECK((value_off_addr - key_off_addr) < (256 * 4));
-  DCHECK(map_off_addr > key_off_addr);
-  DCHECK((map_off_addr - key_off_addr) % 4 == 0);
-  DCHECK((map_off_addr - key_off_addr) < (256 * 4));
-
-  Label miss;
-  Register base_addr = scratch;
-  scratch = no_reg;
-
-  // Multiply by 3 because there are 3 fields per entry (name, code, map).
-  __ ShiftLeftP(offset_scratch, offset, Operand(1));
-  __ AddP(offset_scratch, offset, offset_scratch);
-
-  // Calculate the base address of the entry.
-  __ mov(base_addr, Operand(key_offset));
-#if V8_TARGET_ARCH_S390X
-  DCHECK(kPointerSizeLog2 > StubCache::kCacheIndexShift);
-  __ ShiftLeftP(offset_scratch, offset_scratch,
-                Operand(kPointerSizeLog2 - StubCache::kCacheIndexShift));
-#else
-  DCHECK(kPointerSizeLog2 == StubCache::kCacheIndexShift);
-#endif
-  __ AddP(base_addr, base_addr, offset_scratch);
-
-  // Check that the key in the entry matches the name.
-  __ CmpP(name, MemOperand(base_addr, 0));
-  __ bne(&miss, Label::kNear);
-
-  // Check the map matches.
-  __ LoadP(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
-  __ CmpP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ bne(&miss, Label::kNear);
-
-  // Get the code entry from the cache.
-  Register code = scratch2;
-  scratch2 = no_reg;
-  __ LoadP(code, MemOperand(base_addr, value_off_addr - key_off_addr));
-
-#ifdef DEBUG
-  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-    __ b(&miss, Label::kNear);
-  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-    __ b(&miss, Label::kNear);
-  }
-#endif
-
-  // Jump to the first instruction in the code stub.
-  // TODO(joransiu): Combine into indirect branch
-  __ la(code, MemOperand(code, Code::kHeaderSize - kHeapObjectTag));
-  __ b(code);
-
-  // Miss: fall through.
-  __ bind(&miss);
-}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
-                              Register name, Register scratch, Register extra,
-                              Register extra2, Register extra3) {
-  Label miss;
-
-#if V8_TARGET_ARCH_S390X
-  // Make sure that code is valid. The multiplying code relies on the
-  // entry size being 24.
-  DCHECK(sizeof(Entry) == 24);
-#else
-  // Make sure that code is valid. The multiplying code relies on the
-  // entry size being 12.
-  DCHECK(sizeof(Entry) == 12);
-#endif
-
-  // Make sure that there are no register conflicts.
-  DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
-
-  // Check scratch, extra and extra2 registers are valid.
-  DCHECK(!scratch.is(no_reg));
-  DCHECK(!extra.is(no_reg));
-  DCHECK(!extra2.is(no_reg));
-  DCHECK(!extra3.is(no_reg));
-
-#ifdef DEBUG
-  // If vector-based ics are in use, ensure that scratch, extra, extra2 and
-  // extra3 don't conflict with the vector and slot registers, which need
-  // to be preserved for a handler call or miss.
-  if (IC::ICUseVector(ic_kind_)) {
-    Register vector, slot;
-    if (ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC) {
-      vector = StoreWithVectorDescriptor::VectorRegister();
-      slot = StoreWithVectorDescriptor::SlotRegister();
-    } else {
-      DCHECK(ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC);
-      vector = LoadWithVectorDescriptor::VectorRegister();
-      slot = LoadWithVectorDescriptor::SlotRegister();
-    }
-    DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
-  }
-#endif
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
-                      extra3);
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, &miss);
-
-  // Get the map of the receiver and compute the hash.
-  __ LoadlW(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
-  __ LoadP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ AddP(scratch, scratch, ip);
-  __ XorP(scratch, scratch, Operand(kPrimaryMagic));
-  // The mask omits the last two bits because they are not part of the hash.
-  __ AndP(scratch, scratch,
-          Operand((kPrimaryTableSize - 1) << kCacheIndexShift));
-
-  // Probe the primary table.
-  ProbeTable(this, masm, kPrimary, receiver, name, scratch, extra, extra2,
-             extra3);
-
-  // Primary miss: Compute hash for secondary probe.
-  __ SubP(scratch, scratch, name);
-  __ AddP(scratch, scratch, Operand(kSecondaryMagic));
-  __ AndP(scratch, scratch,
-          Operand((kSecondaryTableSize - 1) << kCacheIndexShift));
-
-  // Probe the secondary table.
-  ProbeTable(this, masm, kSecondary, receiver, name, scratch, extra, extra2,
-             extra3);
-
-  // Cache miss: Fall-through and let caller handle the miss by
-  // entering the runtime system.
-  __ bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
-                      extra3);
-}
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_S390
diff --git a/src/ic/stub-cache.cc b/src/ic/stub-cache.cc
index 84dbf48..5fc8cc3 100644
--- a/src/ic/stub-cache.cc
+++ b/src/ic/stub-cache.cc
@@ -6,6 +6,8 @@
 
 #include "src/ast/ast.h"
 #include "src/base/bits.h"
+#include "src/counters.h"
+#include "src/heap/heap.h"
 #include "src/ic/ic-inl.h"
 #include "src/type-info.h"
 
@@ -99,12 +101,12 @@
   Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal);
   for (int i = 0; i < kPrimaryTableSize; i++) {
     primary_[i].key = isolate()->heap()->empty_string();
-    primary_[i].map = NULL;
+    primary_[i].map = nullptr;
     primary_[i].value = empty;
   }
   for (int j = 0; j < kSecondaryTableSize; j++) {
     secondary_[j].key = isolate()->heap()->empty_string();
-    secondary_[j].map = NULL;
+    secondary_[j].map = nullptr;
     secondary_[j].value = empty;
   }
 }
@@ -116,9 +118,9 @@
   for (int i = 0; i < kPrimaryTableSize; i++) {
     if (primary_[i].key == *name) {
       Map* map = primary_[i].map;
-      // Map can be NULL, if the stub is constant function call
+      // Map can be nullptr, if the stub is constant function call
       // with a primitive receiver.
-      if (map == NULL) continue;
+      if (map == nullptr) continue;
 
       int offset = PrimaryOffset(*name, map);
       if (entry(primary_, offset) == &primary_[i] &&
@@ -131,9 +133,9 @@
   for (int i = 0; i < kSecondaryTableSize; i++) {
     if (secondary_[i].key == *name) {
       Map* map = secondary_[i].map;
-      // Map can be NULL, if the stub is constant function call
+      // Map can be nullptr, if the stub is constant function call
       // with a primitive receiver.
-      if (map == NULL) continue;
+      if (map == nullptr) continue;
 
       // Lookup in primary table and skip duplicates.
       int primary_offset = PrimaryOffset(*name, map);
diff --git a/src/ic/stub-cache.h b/src/ic/stub-cache.h
index bdd7f4a..4054b32 100644
--- a/src/ic/stub-cache.h
+++ b/src/ic/stub-cache.h
@@ -48,13 +48,6 @@
   // Collect all maps that match the name.
   void CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
                            Handle<Context> native_context, Zone* zone);
-  // Generate code for probing the stub cache table.
-  // Arguments extra, extra2 and extra3 may be used to pass additional scratch
-  // registers. Set to no_reg if not needed.
-  // If leave_frame is true, then exit a frame before the tail call.
-  void GenerateProbe(MacroAssembler* masm, Register receiver, Register name,
-                     Register scratch, Register extra, Register extra2 = no_reg,
-                     Register extra3 = no_reg);
 
   enum Table { kPrimary, kSecondary };
 
@@ -81,7 +74,7 @@
         return StubCache::secondary_;
     }
     UNREACHABLE();
-    return NULL;
+    return nullptr;
   }
 
   Isolate* isolate() { return isolate_; }
@@ -99,7 +92,7 @@
 
   // Some magic number used in primary and secondary hash computations.
   static const int kPrimaryMagic = 0x3d532433;
-  static const int kSecondaryMagic = 0xb16b00b5;
+  static const int kSecondaryMagic = 0xb16ca6e5;
 
   static int PrimaryOffsetForTesting(Name* name, Map* map) {
     return PrimaryOffset(name, map);
diff --git a/src/ic/x64/access-compiler-x64.cc b/src/ic/x64/access-compiler-x64.cc
index 9e95b95..4bbbba5 100644
--- a/src/ic/x64/access-compiler-x64.cc
+++ b/src/ic/x64/access-compiler-x64.cc
@@ -5,6 +5,7 @@
 #if V8_TARGET_ARCH_X64
 
 #include "src/ic/access-compiler.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/ic/x64/handler-compiler-x64.cc b/src/ic/x64/handler-compiler-x64.cc
index 36acccc..425ed47 100644
--- a/src/ic/x64/handler-compiler-x64.cc
+++ b/src/ic/x64/handler-compiler-x64.cc
@@ -44,16 +44,6 @@
   __ addp(rsp, Immediate(2 * kPointerSize));
 }
 
-void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
-  MacroAssembler* masm = this->masm();
-  __ Push(tmp);
-}
-
-void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
-  MacroAssembler* masm = this->masm();
-  __ Pop(tmp);
-}
-
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
     Handle<Name> name, Register scratch0, Register scratch1) {
@@ -93,30 +83,12 @@
   __ DecrementCounter(counters->negative_lookups_miss(), 1);
 }
 
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm, Register receiver, Register holder, Register name,
+    Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+  DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+         Runtime::FunctionForId(id)->nargs);
 
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register result, Label* miss) {
-  __ LoadNativeContextSlot(index, result);
-  // Load its initial map. The global functions all have initial maps.
-  __ movp(result,
-          FieldOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
-  // Load the prototype from the initial map.
-  __ movp(result, FieldOperand(result, Map::kPrototypeOffset));
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
-    MacroAssembler* masm, Register receiver, Register result, Register scratch,
-    Label* miss_label) {
-  __ TryGetFunctionPrototype(receiver, result, miss_label);
-  if (!result.is(rax)) __ movp(rax, result);
-  __ ret(0);
-}
-
-
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
-                                     Register holder, Register name,
-                                     Handle<JSObject> holder_obj) {
   STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
   STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
   STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
@@ -124,15 +96,7 @@
   __ Push(name);
   __ Push(receiver);
   __ Push(holder);
-}
 
-
-static void CompileCallLoadPropertyWithInterceptor(
-    MacroAssembler* masm, Register receiver, Register holder, Register name,
-    Handle<JSObject> holder_obj, Runtime::FunctionId id) {
-  DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
-         Runtime::FunctionForId(id)->nargs);
-  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
   __ CallRuntime(id);
 }
 
@@ -348,59 +312,6 @@
   }
 }
 
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
-  __ Move(this->name(), name);
-}
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
-                                                   Register map_reg,
-                                                   Register scratch,
-                                                   Label* miss) {
-  Handle<WeakCell> cell = Map::WeakCellForMap(transition);
-  DCHECK(!map_reg.is(scratch));
-  __ LoadWeakValue(map_reg, cell, miss);
-  if (transition->CanBeDeprecated()) {
-    __ movl(scratch, FieldOperand(map_reg, Map::kBitField3Offset));
-    __ andl(scratch, Immediate(Map::Deprecated::kMask));
-    __ j(not_zero, miss);
-  }
-}
-
-
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
-                                                      int descriptor,
-                                                      Register value_reg,
-                                                      Register scratch,
-                                                      Label* miss_label) {
-  DCHECK(!map_reg.is(scratch));
-  DCHECK(!map_reg.is(value_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ LoadInstanceDescriptors(map_reg, scratch);
-  __ movp(scratch,
-          FieldOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
-  __ cmpp(value_reg, scratch);
-  __ j(not_equal, miss_label);
-}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
-                                                        Register value_reg,
-                                                        Label* miss_label) {
-  Register map_reg = scratch1();
-  Register scratch = scratch2();
-  DCHECK(!value_reg.is(map_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ JumpIfSmi(value_reg, miss_label);
-  if (field_type->IsClass()) {
-    Label do_store;
-    __ movp(map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
-    __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
-                    scratch);
-    __ j(not_equal, miss_label);
-  }
-}
-
 void PropertyHandlerCompiler::GenerateAccessCheck(
     Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
     Label* miss, bool compare_native_contexts_only) {
@@ -533,13 +444,6 @@
   }
 }
 
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
-  // Return the constant value.
-  __ Move(rax, value);
-  __ ret(0);
-}
-
-
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
@@ -606,10 +510,26 @@
   // Call the runtime system to load the interceptor.
   DCHECK(holder()->HasNamedInterceptor());
   DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
-  __ PopReturnAddressTo(scratch2());
-  PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
-                           holder());
-  __ PushReturnAddressFrom(scratch2());
+
+  // Stack:
+  //   return address
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ Push(receiver());
+  __ Push(holder_reg);
+  // See NamedLoadHandlerCompiler::InterceptorVectorSlotPop() for details.
+  if (holder_reg.is(receiver())) {
+    __ Push(slot());
+    __ Push(vector());
+  } else {
+    __ Push(scratch3());  // slot
+    __ Push(scratch2());  // vector
+  }
+  __ Push(Operand(rsp, 4 * kPointerSize));  // return address
+  __ movp(Operand(rsp, 5 * kPointerSize), name());
 
   __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
 }
diff --git a/src/ic/x64/ic-compiler-x64.cc b/src/ic/x64/ic-compiler-x64.cc
deleted file mode 100644
index 9d73433..0000000
--- a/src/ic/x64/ic-compiler-x64.cc
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X64
-
-#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyICCompiler::GenerateRuntimeSetProperty(
-    MacroAssembler* masm, LanguageMode language_mode) {
-  // Return address is on the stack.
-  DCHECK(!rbx.is(StoreDescriptor::ReceiverRegister()) &&
-         !rbx.is(StoreDescriptor::NameRegister()) &&
-         !rbx.is(StoreDescriptor::ValueRegister()));
-
-  __ PopReturnAddressTo(rbx);
-  __ Push(StoreDescriptor::ReceiverRegister());
-  __ Push(StoreDescriptor::NameRegister());
-  __ Push(StoreDescriptor::ValueRegister());
-  __ Push(Smi::FromInt(language_mode));
-  __ PushReturnAddressFrom(rbx);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_X64
diff --git a/src/ic/x64/ic-x64.cc b/src/ic/x64/ic-x64.cc
index a916e22..3b87bc9 100644
--- a/src/ic/x64/ic-x64.cc
+++ b/src/ic/x64/ic-x64.cc
@@ -6,530 +6,12 @@
 
 #include "src/codegen.h"
 #include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
 #include "src/ic/stub-cache.h"
+#include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
 
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-// Helper function used to load a property from a dictionary backing storage.
-// This function may return false negatives, so miss_label
-// must always call a backup property load that is complete.
-// This function is safe to call if name is not an internalized string,
-// and will jump to the miss_label in that case.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
-                                   Register elements, Register name,
-                                   Register r0, Register r1, Register result) {
-  // Register use:
-  //
-  // elements - holds the property dictionary on entry and is unchanged.
-  //
-  // name - holds the name of the property on entry and is unchanged.
-  //
-  // r0   - used to hold the capacity of the property dictionary.
-  //
-  // r1   - used to hold the index into the property dictionary.
-  //
-  // result - holds the result on exit if the load succeeded.
-
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
-                                                   elements, name, r0, r1);
-
-  // If probing finds an entry in the dictionary, r1 contains the
-  // index into the dictionary. Check that the value is a normal
-  // property.
-  __ bind(&done);
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ Test(Operand(elements, r1, times_pointer_size,
-                  kDetailsOffset - kHeapObjectTag),
-          Smi::FromInt(PropertyDetails::TypeField::kMask));
-  __ j(not_zero, miss_label);
-
-  // Get the value at the masked, scaled index.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ movp(result, Operand(elements, r1, times_pointer_size,
-                          kValueOffset - kHeapObjectTag));
-}
-
-
-// Helper function used to store a property to a dictionary backing
-// storage. This function may fail to store a property even though it
-// is in the dictionary, so code at miss_label must always call a
-// backup property store that is complete. This function is safe to
-// call if name is not an internalized string, and will jump to the miss_label
-// in that case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
-                                    Register elements, Register name,
-                                    Register value, Register scratch0,
-                                    Register scratch1) {
-  // Register use:
-  //
-  // elements - holds the property dictionary on entry and is clobbered.
-  //
-  // name - holds the name of the property on entry and is unchanged.
-  //
-  // value - holds the value to store and is unchanged.
-  //
-  // scratch0 - used during the positive dictionary lookup and is clobbered.
-  //
-  // scratch1 - used for index into the property dictionary and is clobbered.
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(
-      masm, miss_label, &done, elements, name, scratch0, scratch1);
-
-  // If probing finds an entry in the dictionary, scratch0 contains the
-  // index into the dictionary. Check that the value is a normal
-  // property that is not read only.
-  __ bind(&done);
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  const int kTypeAndReadOnlyMask =
-      PropertyDetails::TypeField::kMask |
-      PropertyDetails::AttributesField::encode(READ_ONLY);
-  __ Test(Operand(elements, scratch1, times_pointer_size,
-                  kDetailsOffset - kHeapObjectTag),
-          Smi::FromInt(kTypeAndReadOnlyMask));
-  __ j(not_zero, miss_label);
-
-  // Store the value at the masked, scaled index.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ leap(scratch1, Operand(elements, scratch1, times_pointer_size,
-                            kValueOffset - kHeapObjectTag));
-  __ movp(Operand(scratch1, 0), value);
-
-  // Update write barrier. Make sure not to clobber the value.
-  __ movp(scratch0, value);
-  __ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs);
-}
-
-static void KeyedStoreGenerateMegamorphicHelper(
-    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
-    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
-  Label transition_smi_elements;
-  Label finish_object_store, non_double_value, transition_double_elements;
-  Label fast_double_without_map_check;
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register key = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  DCHECK(receiver.is(rdx));
-  DCHECK(key.is(rcx));
-  DCHECK(value.is(rax));
-  // Fast case: Do the store, could be either Object or double.
-  __ bind(fast_object);
-  // rbx: receiver's elements array (a FixedArray)
-  // receiver is a JSArray.
-  // r9: map of receiver
-  if (check_map == kCheckMap) {
-    __ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
-    __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
-    __ j(not_equal, fast_double);
-  }
-
-  // HOLECHECK: guards "A[i] = V"
-  // We have to go to the runtime if the current value is the hole because
-  // there may be a callback on the element
-  Label holecheck_passed1;
-  __ movp(kScratchRegister,
-          FieldOperand(rbx, key, times_pointer_size, FixedArray::kHeaderSize));
-  __ CompareRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
-  __ j(not_equal, &holecheck_passed1);
-  __ JumpIfDictionaryInPrototypeChain(receiver, rdi, kScratchRegister, slow);
-
-  __ bind(&holecheck_passed1);
-
-  // Smi stores don't require further checks.
-  Label non_smi_value;
-  __ JumpIfNotSmi(value, &non_smi_value);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ leal(rdi, Operand(key, 1));
-    __ Integer32ToSmiField(FieldOperand(receiver, JSArray::kLengthOffset), rdi);
-  }
-  // It's irrelevant whether array is smi-only or not when writing a smi.
-  __ movp(FieldOperand(rbx, key, times_pointer_size, FixedArray::kHeaderSize),
-          value);
-  __ ret(0);
-
-  __ bind(&non_smi_value);
-  // Writing a non-smi, check whether array allows non-smi elements.
-  // r9: receiver's map
-  __ CheckFastObjectElements(r9, &transition_smi_elements);
-
-  __ bind(&finish_object_store);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ leal(rdi, Operand(key, 1));
-    __ Integer32ToSmiField(FieldOperand(receiver, JSArray::kLengthOffset), rdi);
-  }
-  __ movp(FieldOperand(rbx, key, times_pointer_size, FixedArray::kHeaderSize),
-          value);
-  __ movp(rdx, value);  // Preserve the value which is returned.
-  __ RecordWriteArray(rbx, rdx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ ret(0);
-
-  __ bind(fast_double);
-  if (check_map == kCheckMap) {
-    // Check for fast double array case. If this fails, call through to the
-    // runtime.
-    // rdi: elements array's map
-    __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
-    __ j(not_equal, slow);
-  }
-
-  // HOLECHECK: guards "A[i] double hole?"
-  // We have to see if the double version of the hole is present. If so
-  // go to the runtime.
-  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
-  __ cmpl(FieldOperand(rbx, key, times_8, offset), Immediate(kHoleNanUpper32));
-  __ j(not_equal, &fast_double_without_map_check);
-  __ JumpIfDictionaryInPrototypeChain(receiver, rdi, kScratchRegister, slow);
-
-  __ bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(value, rbx, key, kScratchDoubleReg,
-                                 &transition_double_elements);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ leal(rdi, Operand(key, 1));
-    __ Integer32ToSmiField(FieldOperand(receiver, JSArray::kLengthOffset), rdi);
-  }
-  __ ret(0);
-
-  __ bind(&transition_smi_elements);
-  __ movp(rbx, FieldOperand(receiver, HeapObject::kMapOffset));
-
-  // Transition the array appropriately depending on the value type.
-  __ movp(r9, FieldOperand(value, HeapObject::kMapOffset));
-  __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
-  __ j(not_equal, &non_double_value);
-
-  // Value is a double. Transition FAST_SMI_ELEMENTS ->
-  // FAST_DOUBLE_ELEMENTS and complete the store.
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
-                                         FAST_DOUBLE_ELEMENTS, rbx, rdi, slow);
-  AllocationSiteMode mode =
-      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
-  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
-                                                   rbx, mode, slow);
-  __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&fast_double_without_map_check);
-
-  __ bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, rbx,
-                                         rdi, slow);
-  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-      masm, receiver, key, value, rbx, mode, slow);
-  __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-
-  __ bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ movp(rbx, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
-                                         rbx, rdi, slow);
-  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateDoubleToObject(masm, receiver, key,
-                                                      value, rbx, mode, slow);
-  __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
-                                       LanguageMode language_mode) {
-  // Return address is on the stack.
-  Label slow, slow_with_tagged_index, fast_object, fast_object_grow;
-  Label fast_double, fast_double_grow;
-  Label array, extra, check_if_double_array, maybe_name_key, miss;
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register key = StoreDescriptor::NameRegister();
-  DCHECK(receiver.is(rdx));
-  DCHECK(key.is(rcx));
-
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, &slow_with_tagged_index);
-  // Get the map from the receiver.
-  __ movp(r9, FieldOperand(receiver, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks.
-  // The generic stub does not perform map checks.
-  __ testb(FieldOperand(r9, Map::kBitFieldOffset),
-           Immediate(1 << Map::kIsAccessCheckNeeded));
-  __ j(not_zero, &slow_with_tagged_index);
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &maybe_name_key);
-  __ SmiToInteger32(key, key);
-
-  __ CmpInstanceType(r9, JS_ARRAY_TYPE);
-  __ j(equal, &array);
-  // Check that the object is some kind of JS object EXCEPT JS Value type. In
-  // the case that the object is a value-wrapper object, we enter the runtime
-  // system to make sure that indexing into string objects works as intended.
-  STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
-  __ CmpInstanceType(r9, JS_OBJECT_TYPE);
-  __ j(below, &slow);
-
-  // Object case: Check key against length in the elements array.
-  __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
-  // Check array bounds.
-  __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), key);
-  // rbx: FixedArray
-  __ j(above, &fast_object);
-
-  // Slow case: call runtime.
-  __ bind(&slow);
-  __ Integer32ToSmi(key, key);
-  __ bind(&slow_with_tagged_index);
-  PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
-  // Never returns to here.
-
-  __ bind(&maybe_name_key);
-  __ movp(r9, FieldOperand(key, HeapObject::kMapOffset));
-  __ movzxbp(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
-  __ JumpIfNotUniqueNameInstanceType(r9, &slow_with_tagged_index);
-
-  Register vector = StoreWithVectorDescriptor::VectorRegister();
-  Register slot = StoreWithVectorDescriptor::SlotRegister();
-  // The handlers in the stub cache expect a vector and slot. Since we won't
-  // change the IC from any downstream misses, a dummy vector can be used.
-  Handle<TypeFeedbackVector> dummy_vector =
-      TypeFeedbackVector::DummyVector(masm->isolate());
-  int slot_index = dummy_vector->GetIndex(
-      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
-  __ Move(vector, dummy_vector);
-  __ Move(slot, Smi::FromInt(slot_index));
-
-  masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, r9,
-                                                     no_reg);
-  // Cache miss.
-  __ jmp(&miss);
-
-  // Extra capacity case: Check if there is extra capacity to
-  // perform the store and update the length. Used for adding one
-  // element to the array by writing to array[array.length].
-  __ bind(&extra);
-  // receiver is a JSArray.
-  // rbx: receiver's elements array (a FixedArray)
-  // flags: smicompare (receiver.length(), rbx)
-  __ j(not_equal, &slow);  // do not leave holes in the array
-  __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), key);
-  __ j(below_equal, &slow);
-  // Increment index to get new length.
-  __ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
-  __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
-  __ j(not_equal, &check_if_double_array);
-  __ jmp(&fast_object_grow);
-
-  __ bind(&check_if_double_array);
-  // rdi: elements array's map
-  __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
-  __ j(not_equal, &slow);
-  __ jmp(&fast_double_grow);
-
-  // Array case: Get the length and the elements array from the JS
-  // array. Check that the array is in fast mode (and writable); if it
-  // is the length is always a smi.
-  __ bind(&array);
-  // receiver is a JSArray.
-  __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
-
-  // Check the key against the length in the array, compute the
-  // address to store into and fall through to fast case.
-  __ SmiCompareInteger32(FieldOperand(receiver, JSArray::kLengthOffset), key);
-  __ j(below_equal, &extra);
-
-  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object, &fast_double, &slow,
-                                      kCheckMap, kDontIncrementLength);
-  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
-                                      &fast_double_grow, &slow, kDontCheckMap,
-                                      kIncrementLength);
-
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  Register dictionary = rax;
-  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
-
-  Label slow;
-
-  __ movp(dictionary, FieldOperand(LoadDescriptor::ReceiverRegister(),
-                                   JSObject::kPropertiesOffset));
-  GenerateDictionaryLoad(masm, &slow, dictionary,
-                         LoadDescriptor::NameRegister(), rbx, rdi, rax);
-  __ ret(0);
-
-  // Dictionary load failed, go slow (but don't miss).
-  __ bind(&slow);
-  LoadIC::GenerateRuntimeGetProperty(masm);
-}
-
-
-static void LoadIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-  Register slot = LoadDescriptor::SlotRegister();
-  Register vector = LoadWithVectorDescriptor::VectorRegister();
-  DCHECK(!rdi.is(receiver) && !rdi.is(name) && !rdi.is(slot) &&
-         !rdi.is(vector));
-
-  __ PopReturnAddressTo(rdi);
-  __ Push(receiver);
-  __ Push(name);
-  __ Push(slot);
-  __ Push(vector);
-  __ PushReturnAddressFrom(rdi);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is on the stack.
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->ic_load_miss(), 1);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kLoadIC_Miss);
-}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is on the stack.
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-
-  DCHECK(!rbx.is(receiver) && !rbx.is(name));
-
-  __ PopReturnAddressTo(rbx);
-  __ Push(receiver);
-  __ Push(name);
-  __ PushReturnAddressFrom(rbx);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kGetProperty);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  // The return address is on the stack.
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->ic_keyed_load_miss(), 1);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
-}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // The return address is on the stack.
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-
-  DCHECK(!rbx.is(receiver) && !rbx.is(name));
-
-  __ PopReturnAddressTo(rbx);
-  __ Push(receiver);
-  __ Push(name);
-  __ PushReturnAddressFrom(rbx);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedGetProperty);
-}
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();
-  Register name = StoreWithVectorDescriptor::NameRegister();
-  Register value = StoreWithVectorDescriptor::ValueRegister();
-  Register slot = StoreWithVectorDescriptor::SlotRegister();
-  Register vector = StoreWithVectorDescriptor::VectorRegister();
-  Register temp = r11;
-  DCHECK(!AreAliased(receiver, name, value, slot, vector, temp));
-
-  __ PopReturnAddressTo(temp);
-  __ Push(value);
-  __ Push(slot);
-  __ Push(vector);
-  __ Push(receiver);
-  __ Push(name);
-  __ PushReturnAddressFrom(temp);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  // Return address is on the stack.
-  StoreIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kStoreIC_Miss);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register name = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  Register dictionary = r11;
-  DCHECK(!AreAliased(dictionary, StoreWithVectorDescriptor::VectorRegister(),
-                     StoreWithVectorDescriptor::SlotRegister()));
-
-  Label miss;
-
-  __ movp(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
-  GenerateDictionaryStore(masm, &miss, dictionary, name, value, r8, r9);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->ic_store_normal_hit(), 1);
-  __ ret(0);
-
-  __ bind(&miss);
-  __ IncrementCounter(counters->ic_store_normal_miss(), 1);
-  GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  // Return address is on the stack.
-  StoreIC_PushArgs(masm);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
-}
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
-  // Return address is on the stack.
-  StoreIC_PushArgs(masm);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-#undef __
-
 
 Condition CompareIC::ComputeCondition(Token::Value op) {
   switch (op) {
@@ -580,9 +62,7 @@
   // condition code uses at the patched jump.
   uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
   if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, test=%p, delta=%d\n",
-           static_cast<void*>(address),
-           static_cast<void*>(test_instruction_address), delta);
+    LOG(isolate, PatchIC(address, test_instruction_address, delta));
   }
 
   // Patch with a short conditional jump. Enabling means switching from a short
diff --git a/src/ic/x64/stub-cache-x64.cc b/src/ic/x64/stub-cache-x64.cc
deleted file mode 100644
index 946aee5..0000000
--- a/src/ic/x64/stub-cache-x64.cc
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X64
-
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
-                       StubCache::Table table, Register receiver, Register name,
-                       // The offset is scaled by 4, based on
-                       // kCacheIndexShift, which is two bits
-                       Register offset) {
-  // We need to scale up the pointer by 2 when the offset is scaled by less
-  // than the pointer size.
-  DCHECK(kPointerSize == kInt64Size
-             ? kPointerSizeLog2 == StubCache::kCacheIndexShift + 1
-             : kPointerSizeLog2 == StubCache::kCacheIndexShift);
-  ScaleFactor scale_factor = kPointerSize == kInt64Size ? times_2 : times_1;
-
-  DCHECK_EQ(3u * kPointerSize, sizeof(StubCache::Entry));
-  // The offset register holds the entry offset times four (due to masking
-  // and shifting optimizations).
-  ExternalReference key_offset(stub_cache->key_reference(table));
-  ExternalReference value_offset(stub_cache->value_reference(table));
-  Label miss;
-
-  // Multiply by 3 because there are 3 fields per entry (name, code, map).
-  __ leap(offset, Operand(offset, offset, times_2, 0));
-
-  __ LoadAddress(kScratchRegister, key_offset);
-
-  // Check that the key in the entry matches the name.
-  __ cmpp(name, Operand(kScratchRegister, offset, scale_factor, 0));
-  __ j(not_equal, &miss);
-
-  // Get the map entry from the cache.
-  // Use key_offset + kPointerSize * 2, rather than loading map_offset.
-  DCHECK(stub_cache->map_reference(table).address() -
-             stub_cache->key_reference(table).address() ==
-         kPointerSize * 2);
-  __ movp(kScratchRegister,
-          Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2));
-  __ cmpp(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ j(not_equal, &miss);
-
-  // Get the code entry from the cache.
-  __ LoadAddress(kScratchRegister, value_offset);
-  __ movp(kScratchRegister, Operand(kScratchRegister, offset, scale_factor, 0));
-
-#ifdef DEBUG
-  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-    __ jmp(&miss);
-  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-    __ jmp(&miss);
-  }
-#endif
-
-  // Jump to the first instruction in the code stub.
-  __ addp(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
-  __ jmp(kScratchRegister);
-
-  __ bind(&miss);
-}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
-                              Register name, Register scratch, Register extra,
-                              Register extra2, Register extra3) {
-  Label miss;
-  USE(extra);   // The register extra is not used on the X64 platform.
-  USE(extra2);  // The register extra2 is not used on the X64 platform.
-  USE(extra3);  // The register extra2 is not used on the X64 platform.
-  // Make sure that code is valid. The multiplying code relies on the
-  // entry size being 3 * kPointerSize.
-  DCHECK(sizeof(Entry) == 3 * kPointerSize);
-
-  // Make sure that there are no register conflicts.
-  DCHECK(!scratch.is(receiver));
-  DCHECK(!scratch.is(name));
-
-  // Check scratch register is valid, extra and extra2 are unused.
-  DCHECK(!scratch.is(no_reg));
-  DCHECK(extra2.is(no_reg));
-  DCHECK(extra3.is(no_reg));
-
-#ifdef DEBUG
-  // If vector-based ics are in use, ensure that scratch doesn't conflict with
-  // the vector and slot registers, which need to be preserved for a handler
-  // call or miss.
-  if (IC::ICUseVector(ic_kind_)) {
-    if (ic_kind_ == Code::LOAD_IC || ic_kind_ == Code::KEYED_LOAD_IC) {
-      Register vector = LoadWithVectorDescriptor::VectorRegister();
-      Register slot = LoadDescriptor::SlotRegister();
-      DCHECK(!AreAliased(vector, slot, scratch));
-    } else {
-      DCHECK(ic_kind_ == Code::STORE_IC || ic_kind_ == Code::KEYED_STORE_IC);
-      Register vector = StoreWithVectorDescriptor::VectorRegister();
-      Register slot = StoreWithVectorDescriptor::SlotRegister();
-      DCHECK(!AreAliased(vector, slot, scratch));
-    }
-  }
-#endif
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, &miss);
-
-  // Get the map of the receiver and compute the hash.
-  __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
-  // Use only the low 32 bits of the map pointer.
-  __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ xorp(scratch, Immediate(kPrimaryMagic));
-  // We mask out the last two bits because they are not part of the hash and
-  // they are always 01 for maps.  Also in the two 'and' instructions below.
-  __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
-
-  // Probe the primary table.
-  ProbeTable(this, masm, kPrimary, receiver, name, scratch);
-
-  // Primary miss: Compute hash for secondary probe.
-  __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
-  __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ xorp(scratch, Immediate(kPrimaryMagic));
-  __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
-  __ subl(scratch, name);
-  __ addl(scratch, Immediate(kSecondaryMagic));
-  __ andp(scratch, Immediate((kSecondaryTableSize - 1) << kCacheIndexShift));
-
-  // Probe the secondary table.
-  ProbeTable(this, masm, kSecondary, receiver, name, scratch);
-
-  // Cache miss: Fall-through and let caller handle the miss by
-  // entering the runtime system.
-  __ bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_X64
diff --git a/src/ic/x87/OWNERS b/src/ic/x87/OWNERS
index dd9998b..61245ae 100644
--- a/src/ic/x87/OWNERS
+++ b/src/ic/x87/OWNERS
@@ -1 +1,2 @@
 weiliang.lin@intel.com
+chunyang.dai@intel.com
diff --git a/src/ic/x87/handler-compiler-x87.cc b/src/ic/x87/handler-compiler-x87.cc
index a5c32d3..5a61eee 100644
--- a/src/ic/x87/handler-compiler-x87.cc
+++ b/src/ic/x87/handler-compiler-x87.cc
@@ -83,16 +83,6 @@
   __ add(esp, Immediate(2 * kPointerSize));
 }
 
-void PropertyHandlerCompiler::PushReturnAddress(Register tmp) {
-  MacroAssembler* masm = this->masm();
-  __ push(tmp);
-}
-
-void PropertyHandlerCompiler::PopReturnAddress(Register tmp) {
-  MacroAssembler* masm = this->masm();
-  __ pop(tmp);
-}
-
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
     Handle<Name> name, Register scratch0, Register scratch1) {
@@ -132,27 +122,6 @@
   __ DecrementCounter(counters->negative_lookups_miss(), 1);
 }
 
-
-void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register result, Label* miss) {
-  __ LoadGlobalFunction(index, result);
-  // Load its initial map. The global functions all have initial maps.
-  __ mov(result,
-         FieldOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
-  // Load the prototype from the initial map.
-  __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
-}
-
-
-void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
-    MacroAssembler* masm, Register receiver, Register scratch1,
-    Register scratch2, Label* miss_label) {
-  // TODO(mvstanton): This isn't used on ia32. Move all the other
-  // platform implementations into a code stub so this method can be removed.
-  UNREACHABLE();
-}
-
-
 // Generate call to api function.
 // This function uses push() to generate smaller, faster code than
 // the version above. It is an optimization that should will be removed
@@ -324,10 +293,12 @@
   }
 }
 
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm, Register receiver, Register holder, Register name,
+    Handle<JSObject> holder_obj, Runtime::FunctionId id) {
+  DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
+         Runtime::FunctionForId(id)->nargs);
 
-static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
-                                     Register holder, Register name,
-                                     Handle<JSObject> holder_obj) {
   STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
   STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
   STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
@@ -335,15 +306,7 @@
   __ push(name);
   __ push(receiver);
   __ push(holder);
-}
 
-
-static void CompileCallLoadPropertyWithInterceptor(
-    MacroAssembler* masm, Register receiver, Register holder, Register name,
-    Handle<JSObject> holder_obj, Runtime::FunctionId id) {
-  DCHECK(NamedLoadHandlerCompiler::kInterceptorArgsLength ==
-         Runtime::FunctionForId(id)->nargs);
-  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
   __ CallRuntime(id);
 }
 
@@ -359,58 +322,6 @@
   }
 }
 
-
-void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
-  __ mov(this->name(), Immediate(name));
-}
-
-
-void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
-                                                   Register map_reg,
-                                                   Register scratch,
-                                                   Label* miss) {
-  Handle<WeakCell> cell = Map::WeakCellForMap(transition);
-  DCHECK(!map_reg.is(scratch));
-  __ LoadWeakValue(map_reg, cell, miss);
-  if (transition->CanBeDeprecated()) {
-    __ mov(scratch, FieldOperand(map_reg, Map::kBitField3Offset));
-    __ and_(scratch, Immediate(Map::Deprecated::kMask));
-    __ j(not_zero, miss);
-  }
-}
-
-
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
-                                                      int descriptor,
-                                                      Register value_reg,
-                                                      Register scratch,
-                                                      Label* miss_label) {
-  DCHECK(!map_reg.is(scratch));
-  DCHECK(!map_reg.is(value_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ LoadInstanceDescriptors(map_reg, scratch);
-  __ mov(scratch,
-         FieldOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
-  __ cmp(value_reg, scratch);
-  __ j(not_equal, miss_label);
-}
-
-void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(FieldType* field_type,
-                                                        Register value_reg,
-                                                        Label* miss_label) {
-  Register map_reg = scratch1();
-  Register scratch = scratch2();
-  DCHECK(!value_reg.is(map_reg));
-  DCHECK(!value_reg.is(scratch));
-  __ JumpIfSmi(value_reg, miss_label);
-  if (field_type->IsClass()) {
-    __ mov(map_reg, FieldOperand(value_reg, HeapObject::kMapOffset));
-    __ CmpWeakValue(map_reg, Map::WeakCellForMap(field_type->AsClass()),
-                    scratch);
-    __ j(not_equal, miss_label);
-  }
-}
-
 void PropertyHandlerCompiler::GenerateAccessCheck(
     Handle<WeakCell> native_context_cell, Register scratch1, Register scratch2,
     Label* miss, bool compare_native_contexts_only) {
@@ -540,14 +451,6 @@
   }
 }
 
-
-void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
-  // Return the constant value.
-  __ LoadObject(eax, value);
-  __ ret(0);
-}
-
-
 void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     LookupIterator* it, Register holder_reg) {
   DCHECK(holder()->HasNamedInterceptor());
@@ -620,10 +523,26 @@
   DCHECK(holder()->HasNamedInterceptor());
   DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined(isolate()));
   // Call the runtime system to load the interceptor.
-  __ pop(scratch2());  // save old return address
-  PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
-                           holder());
-  __ push(scratch2());  // restore old return address
+
+  // Stack:
+  //   return address
+
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 3);
+  __ push(receiver());
+  __ push(holder_reg);
+  // See NamedLoadHandlerCompiler::InterceptorVectorSlotPop() for details.
+  if (holder_reg.is(receiver())) {
+    __ push(slot());
+    __ push(vector());
+  } else {
+    __ push(scratch3());  // slot
+    __ push(scratch2());  // vector
+  }
+  __ push(Operand(esp, 4 * kPointerSize));  // return address
+  __ mov(Operand(esp, 5 * kPointerSize), name());
 
   __ TailCallRuntime(Runtime::kLoadPropertyWithInterceptor);
 }
diff --git a/src/ic/x87/ic-compiler-x87.cc b/src/ic/x87/ic-compiler-x87.cc
deleted file mode 100644
index 11a8cdc..0000000
--- a/src/ic/x87/ic-compiler-x87.cc
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-void PropertyICCompiler::GenerateRuntimeSetProperty(
-    MacroAssembler* masm, LanguageMode language_mode) {
-  typedef StoreWithVectorDescriptor Descriptor;
-  STATIC_ASSERT(Descriptor::kStackArgumentsCount == 3);
-  // ----------- S t a t e -------------
-  //  -- esp[12] : value
-  //  -- esp[8]  : slot
-  //  -- esp[4]  : vector
-  //  -- esp[0]  : return address
-  // -----------------------------------
-  __ LoadParameterFromStack<Descriptor>(Descriptor::ValueRegister(),
-                                        Descriptor::kValue);
-
-  __ mov(Operand(esp, 12), Descriptor::ReceiverRegister());
-  __ mov(Operand(esp, 8), Descriptor::NameRegister());
-  __ mov(Operand(esp, 4), Descriptor::ValueRegister());
-  __ pop(ebx);
-  __ push(Immediate(Smi::FromInt(language_mode)));
-  __ push(ebx);  // return address
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_X87
diff --git a/src/ic/x87/ic-x87.cc b/src/ic/x87/ic-x87.cc
index f96e509..7564c00 100644
--- a/src/ic/x87/ic-x87.cc
+++ b/src/ic/x87/ic-x87.cc
@@ -6,532 +6,11 @@
 
 #include "src/codegen.h"
 #include "src/ic/ic.h"
-#include "src/ic/ic-compiler.h"
 #include "src/ic/stub-cache.h"
 
 namespace v8 {
 namespace internal {
 
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-// Helper function used to load a property from a dictionary backing
-// storage. This function may fail to load a property even though it is
-// in the dictionary, so code at miss_label must always call a backup
-// property load that is complete. This function is safe to call if
-// name is not internalized, and will jump to the miss_label in that
-// case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
-                                   Register elements, Register name,
-                                   Register r0, Register r1, Register result) {
-  // Register use:
-  //
-  // elements - holds the property dictionary on entry and is unchanged.
-  //
-  // name - holds the name of the property on entry and is unchanged.
-  //
-  // Scratch registers:
-  //
-  // r0   - used for the index into the property dictionary
-  //
-  // r1   - used to hold the capacity of the property dictionary.
-  //
-  // result - holds the result on exit.
-
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
-                                                   elements, name, r0, r1);
-
-  // If probing finds an entry in the dictionary, r0 contains the
-  // index into the dictionary. Check that the value is a normal
-  // property.
-  __ bind(&done);
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
-          Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
-  __ j(not_zero, miss_label);
-
-  // Get the value at the masked, scaled index.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ mov(result, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
-}
-
-
-// Helper function used to store a property to a dictionary backing
-// storage. This function may fail to store a property eventhough it
-// is in the dictionary, so code at miss_label must always call a
-// backup property store that is complete. This function is safe to
-// call if name is not internalized, and will jump to the miss_label in
-// that case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
-                                    Register elements, Register name,
-                                    Register value, Register r0, Register r1) {
-  // Register use:
-  //
-  // elements - holds the property dictionary on entry and is clobbered.
-  //
-  // name - holds the name of the property on entry and is unchanged.
-  //
-  // value - holds the value to store and is unchanged.
-  //
-  // r0 - used for index into the property dictionary and is clobbered.
-  //
-  // r1 - used to hold the capacity of the property dictionary and is clobbered.
-  Label done;
-
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
-                                                   elements, name, r0, r1);
-
-  // If probing finds an entry in the dictionary, r0 contains the
-  // index into the dictionary. Check that the value is a normal
-  // property that is not read only.
-  __ bind(&done);
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  const int kTypeAndReadOnlyMask =
-      (PropertyDetails::TypeField::kMask |
-       PropertyDetails::AttributesField::encode(READ_ONLY))
-      << kSmiTagSize;
-  __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
-          Immediate(kTypeAndReadOnlyMask));
-  __ j(not_zero, miss_label);
-
-  // Store the value at the masked, scaled index.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ lea(r0, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
-  __ mov(Operand(r0, 0), value);
-
-  // Update write barrier. Make sure not to clobber the value.
-  __ mov(r1, value);
-  __ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
-}
-
-static void KeyedStoreGenerateMegamorphicHelper(
-    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
-    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
-  Label transition_smi_elements;
-  Label finish_object_store, non_double_value, transition_double_elements;
-  Label fast_double_without_map_check;
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register key = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  DCHECK(receiver.is(edx));
-  DCHECK(key.is(ecx));
-  DCHECK(value.is(eax));
-  // key is a smi.
-  // ebx: FixedArray receiver->elements
-  // edi: receiver map
-  // Fast case: Do the store, could either Object or double.
-  __ bind(fast_object);
-  if (check_map == kCheckMap) {
-    __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
-    __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
-    __ j(not_equal, fast_double);
-  }
-
-  // HOLECHECK: guards "A[i] = V"
-  // We have to go to the runtime if the current value is the hole because
-  // there may be a callback on the element
-  Label holecheck_passed1;
-  __ cmp(FixedArrayElementOperand(ebx, key),
-         masm->isolate()->factory()->the_hole_value());
-  __ j(not_equal, &holecheck_passed1);
-  __ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow);
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-
-  __ bind(&holecheck_passed1);
-
-  // Smi stores don't require further checks.
-  Label non_smi_value;
-  __ JumpIfNotSmi(value, &non_smi_value);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ add(FieldOperand(receiver, JSArray::kLengthOffset),
-           Immediate(Smi::FromInt(1)));
-  }
-  // It's irrelevant whether array is smi-only or not when writing a smi.
-  __ mov(FixedArrayElementOperand(ebx, key), value);
-  __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
-
-  __ bind(&non_smi_value);
-  // Escape to elements kind transition case.
-  __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ CheckFastObjectElements(edi, &transition_smi_elements);
-
-  // Fast elements array, store the value to the elements backing store.
-  __ bind(&finish_object_store);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ add(FieldOperand(receiver, JSArray::kLengthOffset),
-           Immediate(Smi::FromInt(1)));
-  }
-  __ mov(FixedArrayElementOperand(ebx, key), value);
-  // Update write barrier for the elements array address.
-  __ mov(edx, value);  // Preserve the value which is returned.
-  __ RecordWriteArray(ebx, edx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-  __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
-
-  __ bind(fast_double);
-  if (check_map == kCheckMap) {
-    // Check for fast double array case. If this fails, call through to the
-    // runtime.
-    __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
-    __ j(not_equal, slow);
-    // If the value is a number, store it as a double in the FastDoubleElements
-    // array.
-  }
-
-  // HOLECHECK: guards "A[i] double hole?"
-  // We have to see if the double version of the hole is present. If so
-  // go to the runtime.
-  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
-  __ cmp(FieldOperand(ebx, key, times_4, offset), Immediate(kHoleNanUpper32));
-  __ j(not_equal, &fast_double_without_map_check);
-  __ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow);
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-
-  __ bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(value, ebx, key, edi,
-                                 &transition_double_elements, false);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ add(FieldOperand(receiver, JSArray::kLengthOffset),
-           Immediate(Smi::FromInt(1)));
-  }
-  __ ret(StoreWithVectorDescriptor::kStackArgumentsCount * kPointerSize);
-
-  __ bind(&transition_smi_elements);
-  __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
-
-  // Transition the array appropriately depending on the value type.
-  __ CheckMap(value, masm->isolate()->factory()->heap_number_map(),
-              &non_double_value, DONT_DO_SMI_CHECK);
-
-  // Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
-  // and complete the store.
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
-                                         FAST_DOUBLE_ELEMENTS, ebx, edi, slow);
-  AllocationSiteMode mode =
-      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
-  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
-                                                   ebx, mode, slow);
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&fast_double_without_map_check);
-
-  __ bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, ebx,
-                                         edi, slow);
-  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-      masm, receiver, key, value, ebx, mode, slow);
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-
-  __ bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
-                                         ebx, edi, slow);
-  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateDoubleToObject(masm, receiver, key,
-                                                      value, ebx, mode, slow);
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
-                                       LanguageMode language_mode) {
-  typedef StoreWithVectorDescriptor Descriptor;
-  // Return address is on the stack.
-  Label slow, fast_object, fast_object_grow;
-  Label fast_double, fast_double_grow;
-  Label array, extra, check_if_double_array, maybe_name_key, miss;
-  Register receiver = Descriptor::ReceiverRegister();
-  Register key = Descriptor::NameRegister();
-  DCHECK(receiver.is(edx));
-  DCHECK(key.is(ecx));
-
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, &slow);
-  // Get the map from the receiver.
-  __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks.
-  // The generic stub does not perform map checks.
-  __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
-            Immediate(1 << Map::kIsAccessCheckNeeded));
-  __ j(not_zero, &slow);
-
-  __ LoadParameterFromStack<Descriptor>(Descriptor::ValueRegister(),
-                                        Descriptor::kValue);
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &maybe_name_key);
-  __ CmpInstanceType(edi, JS_ARRAY_TYPE);
-  __ j(equal, &array);
-  // Check that the object is some kind of JS object EXCEPT JS Value type. In
-  // the case that the object is a value-wrapper object, we enter the runtime
-  // system to make sure that indexing into string objects works as intended.
-  STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
-  __ CmpInstanceType(edi, JS_OBJECT_TYPE);
-  __ j(below, &slow);
-
-  // Object case: Check key against length in the elements array.
-  // Key is a smi.
-  // edi: receiver map
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-  // Check array bounds. Both the key and the length of FixedArray are smis.
-  __ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset));
-  __ j(below, &fast_object);
-
-  // Slow case: call runtime.
-  __ bind(&slow);
-  PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
-  // Never returns to here.
-
-  __ bind(&maybe_name_key);
-  __ mov(ebx, FieldOperand(key, HeapObject::kMapOffset));
-  __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
-  __ JumpIfNotUniqueNameInstanceType(ebx, &slow);
-
-  masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, edi,
-                                                     no_reg);
-
-  // Cache miss.
-  __ jmp(&miss);
-
-  // Extra capacity case: Check if there is extra capacity to
-  // perform the store and update the length. Used for adding one
-  // element to the array by writing to array[array.length].
-  __ bind(&extra);
-  // receiver is a JSArray.
-  // key is a smi.
-  // ebx: receiver->elements, a FixedArray
-  // edi: receiver map
-  // flags: compare (key, receiver.length())
-  // do not leave holes in the array:
-  __ j(not_equal, &slow);
-  __ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset));
-  __ j(above_equal, &slow);
-  __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
-  __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
-  __ j(not_equal, &check_if_double_array);
-  __ jmp(&fast_object_grow);
-
-  __ bind(&check_if_double_array);
-  __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
-  __ j(not_equal, &slow);
-  __ jmp(&fast_double_grow);
-
-  // Array case: Get the length and the elements array from the JS
-  // array. Check that the array is in fast mode (and writable); if it
-  // is the length is always a smi.
-  __ bind(&array);
-  // receiver is a JSArray.
-  // key is a smi.
-  // edi: receiver map
-  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
-
-  // Check the key against the length in the array and fall through to the
-  // common store code.
-  __ cmp(key, FieldOperand(receiver, JSArray::kLengthOffset));  // Compare smis.
-  __ j(above_equal, &extra);
-
-  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object, &fast_double, &slow,
-                                      kCheckMap, kDontIncrementLength);
-  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
-                                      &fast_double_grow, &slow, kDontCheckMap,
-                                      kIncrementLength);
-
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  Register dictionary = eax;
-  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
-  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
-
-  Label slow;
-
-  __ mov(dictionary, FieldOperand(LoadDescriptor::ReceiverRegister(),
-                                  JSObject::kPropertiesOffset));
-  GenerateDictionaryLoad(masm, &slow, dictionary,
-                         LoadDescriptor::NameRegister(), edi, ebx, eax);
-  __ ret(0);
-
-  // Dictionary load failed, go slow (but don't miss).
-  __ bind(&slow);
-  GenerateRuntimeGetProperty(masm);
-}
-
-
-static void LoadIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-
-  Register slot = LoadDescriptor::SlotRegister();
-  Register vector = LoadWithVectorDescriptor::VectorRegister();
-  DCHECK(!edi.is(receiver) && !edi.is(name) && !edi.is(slot) &&
-         !edi.is(vector));
-
-  __ pop(edi);
-  __ push(receiver);
-  __ push(name);
-  __ push(slot);
-  __ push(vector);
-  __ push(edi);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  // Return address is on the stack.
-  __ IncrementCounter(masm->isolate()->counters()->ic_load_miss(), 1);
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kLoadIC_Miss);
-}
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // Return address is on the stack.
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-  DCHECK(!ebx.is(receiver) && !ebx.is(name));
-
-  __ pop(ebx);
-  __ push(receiver);
-  __ push(name);
-  __ push(ebx);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kGetProperty);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  // Return address is on the stack.
-  __ IncrementCounter(masm->isolate()->counters()->ic_keyed_load_miss(), 1);
-
-  LoadIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
-}
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // Return address is on the stack.
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register name = LoadDescriptor::NameRegister();
-  DCHECK(!ebx.is(receiver) && !ebx.is(name));
-
-  __ pop(ebx);
-  __ push(receiver);
-  __ push(name);
-  __ push(ebx);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedGetProperty);
-}
-
-static void StoreIC_PushArgs(MacroAssembler* masm) {
-  Register receiver = StoreWithVectorDescriptor::ReceiverRegister();
-  Register name = StoreWithVectorDescriptor::NameRegister();
-
-  STATIC_ASSERT(StoreWithVectorDescriptor::kStackArgumentsCount == 3);
-  // Current stack layout:
-  // - esp[12]   -- value
-  // - esp[8]    -- slot
-  // - esp[4]    -- vector
-  // - esp[0]    -- return address
-
-  Register return_address = StoreWithVectorDescriptor::SlotRegister();
-  __ pop(return_address);
-  __ push(receiver);
-  __ push(name);
-  __ push(return_address);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  // Return address is on the stack.
-  StoreIC_PushArgs(masm);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kStoreIC_Miss);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
-  typedef StoreWithVectorDescriptor Descriptor;
-  Label restore_miss;
-  Register receiver = Descriptor::ReceiverRegister();
-  Register name = Descriptor::NameRegister();
-  Register value = Descriptor::ValueRegister();
-  // Since the slot and vector values are passed on the stack we can use
-  // respective registers as scratch registers.
-  Register scratch1 = Descriptor::VectorRegister();
-  Register scratch2 = Descriptor::SlotRegister();
-
-  __ LoadParameterFromStack<Descriptor>(value, Descriptor::kValue);
-
-  // A lot of registers are needed for storing to slow case objects.
-  // Push and restore receiver but rely on GenerateDictionaryStore preserving
-  // the value and name.
-  __ push(receiver);
-
-  Register dictionary = receiver;
-  __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
-  GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value,
-                          scratch1, scratch2);
-  __ Drop(1);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->ic_store_normal_hit(), 1);
-  __ ret(Descriptor::kStackArgumentsCount * kPointerSize);
-
-  __ bind(&restore_miss);
-  __ pop(receiver);
-  __ IncrementCounter(counters->ic_store_normal_miss(), 1);
-  GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  // Return address is on the stack.
-  StoreIC_PushArgs(masm);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
-}
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
-  // Return address is on the stack.
-  StoreIC_PushArgs(masm);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
-}
-
-#undef __
-
 
 Condition CompareIC::ComputeCondition(Token::Value op) {
   switch (op) {
@@ -582,9 +61,7 @@
   // condition code uses at the patched jump.
   uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
   if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, test=%p, delta=%d\n",
-           static_cast<void*>(address),
-           static_cast<void*>(test_instruction_address), delta);
+    LOG(isolate, PatchIC(address, test_instruction_address, delta));
   }
 
   // Patch with a short conditional jump. Enabling means switching from a short
diff --git a/src/ic/x87/stub-cache-x87.cc b/src/ic/x87/stub-cache-x87.cc
deleted file mode 100644
index 68fa615..0000000
--- a/src/ic/x87/stub-cache-x87.cc
+++ /dev/null
@@ -1,185 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/codegen.h"
-#include "src/ic/ic.h"
-#include "src/ic/stub-cache.h"
-#include "src/interface-descriptors.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-static void ProbeTable(StubCache* stub_cache, MacroAssembler* masm,
-                       StubCache::Table table, Register name, Register receiver,
-                       // The offset is scaled by 4, based on
-                       // kCacheIndexShift, which is two bits
-                       Register offset, Register extra) {
-  ExternalReference key_offset(stub_cache->key_reference(table));
-  ExternalReference value_offset(stub_cache->value_reference(table));
-  ExternalReference map_offset(stub_cache->map_reference(table));
-
-  Label miss;
-  Code::Kind ic_kind = stub_cache->ic_kind();
-  bool is_vector_store =
-      IC::ICUseVector(ic_kind) &&
-      (ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
-
-  // Multiply by 3 because there are 3 fields per entry (name, code, map).
-  __ lea(offset, Operand(offset, offset, times_2, 0));
-
-  if (extra.is_valid()) {
-    // Get the code entry from the cache.
-    __ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
-
-    // Check that the key in the entry matches the name.
-    __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
-    __ j(not_equal, &miss);
-
-    // Check the map matches.
-    __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
-    __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
-    __ j(not_equal, &miss);
-
-#ifdef DEBUG
-    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-      __ jmp(&miss);
-    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-      __ jmp(&miss);
-    }
-#endif
-
-    if (is_vector_store) {
-      // The value, vector and slot were passed to the IC on the stack and
-      // they are still there. So we can just jump to the handler.
-      DCHECK(extra.is(StoreWithVectorDescriptor::SlotRegister()));
-      __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
-      __ jmp(extra);
-    } else {
-      // The vector and slot were pushed onto the stack before starting the
-      // probe, and need to be dropped before calling the handler.
-      __ pop(LoadWithVectorDescriptor::VectorRegister());
-      __ pop(LoadDescriptor::SlotRegister());
-      __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
-      __ jmp(extra);
-    }
-
-    __ bind(&miss);
-  } else {
-    DCHECK(ic_kind == Code::STORE_IC || ic_kind == Code::KEYED_STORE_IC);
-
-    // Save the offset on the stack.
-    __ push(offset);
-
-    // Check that the key in the entry matches the name.
-    __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
-    __ j(not_equal, &miss);
-
-    // Check the map matches.
-    __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
-    __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
-    __ j(not_equal, &miss);
-
-    // Restore offset register.
-    __ mov(offset, Operand(esp, 0));
-
-    // Get the code entry from the cache.
-    __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
-
-#ifdef DEBUG
-    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-      __ jmp(&miss);
-    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-      __ jmp(&miss);
-    }
-#endif
-
-    // Restore offset and re-load code entry from cache.
-    __ pop(offset);
-    __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
-
-    // Jump to the first instruction in the code stub.
-    if (is_vector_store) {
-      DCHECK(offset.is(StoreWithVectorDescriptor::SlotRegister()));
-    }
-    __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
-    __ jmp(offset);
-
-    // Pop at miss.
-    __ bind(&miss);
-    __ pop(offset);
-  }
-}
-
-void StubCache::GenerateProbe(MacroAssembler* masm, Register receiver,
-                              Register name, Register scratch, Register extra,
-                              Register extra2, Register extra3) {
-  Label miss;
-
-  // Assert that code is valid.  The multiplying code relies on the entry size
-  // being 12.
-  DCHECK(sizeof(Entry) == 12);
-
-  // Assert that there are no register conflicts.
-  DCHECK(!scratch.is(receiver));
-  DCHECK(!scratch.is(name));
-  DCHECK(!extra.is(receiver));
-  DCHECK(!extra.is(name));
-  DCHECK(!extra.is(scratch));
-
-  // Assert scratch and extra registers are valid, and extra2/3 are unused.
-  DCHECK(!scratch.is(no_reg));
-  DCHECK(extra2.is(no_reg));
-  DCHECK(extra3.is(no_reg));
-
-  Register offset = scratch;
-  scratch = no_reg;
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, &miss);
-
-  // Get the map of the receiver and compute the hash.
-  __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
-  __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ xor_(offset, kPrimaryMagic);
-  // We mask out the last two bits because they are not part of the hash and
-  // they are always 01 for maps.  Also in the two 'and' instructions below.
-  __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
-  // ProbeTable expects the offset to be pointer scaled, which it is, because
-  // the heap object tag size is 2 and the pointer size log 2 is also 2.
-  DCHECK(kCacheIndexShift == kPointerSizeLog2);
-
-  // Probe the primary table.
-  ProbeTable(this, masm, kPrimary, name, receiver, offset, extra);
-
-  // Primary miss: Compute hash for secondary probe.
-  __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
-  __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ xor_(offset, kPrimaryMagic);
-  __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
-  __ sub(offset, name);
-  __ add(offset, Immediate(kSecondaryMagic));
-  __ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
-
-  // Probe the secondary table.
-  ProbeTable(this, masm, kSecondary, name, receiver, offset, extra);
-
-  // Cache miss: Fall-through and let caller handle the miss by
-  // entering the runtime system.
-  __ bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
-}
-
-
-#undef __
-}  // namespace internal
-}  // namespace v8
-
-#endif  // V8_TARGET_ARCH_X87