Upgrade V8 to version 4.9.385.28

https://chromium.googlesource.com/v8/v8/+/4.9.385.28

FPIIM-449

Change-Id: I4b2e74289d4bf3667f2f3dc8aa2e541f63e26eb4
diff --git a/src/ic/mips/ic-mips.cc b/src/ic/mips/ic-mips.cc
index 7c8a5ea..a27d6b5 100644
--- a/src/ic/mips/ic-mips.cc
+++ b/src/ic/mips/ic-mips.cc
@@ -2,9 +2,6 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-
-#include "src/v8.h"
-
 #if V8_TARGET_ARCH_MIPS
 
 #include "src/codegen.h"
@@ -28,7 +25,6 @@
   // Register usage:
   //   type: holds the receiver instance type on entry.
   __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
-  __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE));
   __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
 }
 
@@ -160,12 +156,11 @@
 
 
 // Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
                                   Register key, Register elements,
                                   Register scratch1, Register scratch2,
-                                  Register result, Label* not_fast_array,
-                                  Label* out_of_range) {
+                                  Register result, Label* slow,
+                                  LanguageMode language_mode) {
   // Register use:
   //
   // receiver - holds the receiver on entry.
@@ -174,8 +169,6 @@
   // key      - holds the smi key on entry.
   //            Unchanged unless 'result' is the same register.
   //
-  // elements - holds the elements of the receiver on exit.
-  //
   // result   - holds the result on exit if the load succeeded.
   //            Allowed to be the the same as 'receiver' or 'key'.
   //            Unchanged on bailout so 'receiver' and 'key' can be safely
@@ -183,25 +176,55 @@
   //
   // Scratch registers:
   //
-  // scratch1 - used to hold elements map and elements length.
-  //            Holds the elements map if not_fast_array branch is taken.
+  // elements - holds the elements of the receiver and its prototypes.
   //
-  // scratch2 - used to hold the loaded value.
+  // scratch1 - used to hold elements length, bit fields, base addresses.
+  //
+  // scratch2 - used to hold maps, prototypes, and the loaded value.
+  Label check_prototypes, check_next_prototype;
+  Label done, in_bounds, absent;
 
   __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  if (not_fast_array != NULL) {
-    // Check that the object is in fast mode (not dictionary).
-    __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
-    __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
-    __ Branch(not_fast_array, ne, scratch1, Operand(at));
-  } else {
-    __ AssertFastElements(elements);
-  }
+  __ AssertFastElements(elements);
 
   // Check that the key (index) is within bounds.
   __ lw(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ Branch(out_of_range, hs, key, Operand(scratch1));
+  __ Branch(&in_bounds, lo, key, Operand(scratch1));
+  // Out-of-bounds. Check the prototype chain to see if we can just return
+  // 'undefined'.
+  // Negative keys can't take the fast OOB path.
+  __ Branch(slow, lt, key, Operand(zero_reg));
+  __ bind(&check_prototypes);
+  __ lw(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ bind(&check_next_prototype);
+  __ lw(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
+  // scratch2: current prototype
+  __ LoadRoot(at, Heap::kNullValueRootIndex);
+  __ Branch(&absent, eq, scratch2, Operand(at));
+  __ lw(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
+  __ lw(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
+  // elements: elements of current prototype
+  // scratch2: map of current prototype
+  __ lbu(scratch1, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
+  __ Branch(slow, lo, scratch1, Operand(JS_OBJECT_TYPE));
+  __ lbu(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
+  __ And(at, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
+                               (1 << Map::kHasIndexedInterceptor)));
+  __ Branch(slow, ne, at, Operand(zero_reg));
+  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
+  __ Branch(slow, ne, elements, Operand(at));
+  __ Branch(&check_next_prototype);
 
+  __ bind(&absent);
+  if (is_strong(language_mode)) {
+    // Strong mode accesses must throw in this case, so call the runtime.
+    __ Branch(slow);
+  } else {
+    __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+    __ Branch(&done);
+  }
+
+  __ bind(&in_bounds);
   // Fast case: Do the load.
   __ Addu(scratch1, elements,
           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -212,10 +235,10 @@
   __ lw(scratch2, MemOperand(at));
 
   __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-  // In case the loaded value is the_hole we have to consult GetProperty
-  // to ensure the prototype chain is searched.
-  __ Branch(out_of_range, eq, scratch2, Operand(at));
-  __ mov(result, scratch2);
+  // In case the loaded value is the_hole we have to check the prototype chain.
+  __ Branch(&check_prototypes, eq, scratch2, Operand(at));
+  __ Move(result, scratch2);
+  __ bind(&done);
 }
 
 
@@ -249,7 +272,7 @@
 }
 
 
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
+void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
   Register dictionary = a0;
   DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
   DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
@@ -264,7 +287,7 @@
 
   // Dictionary load failed, go slow (but don't miss).
   __ bind(&slow);
-  GenerateRuntimeGetProperty(masm);
+  GenerateRuntimeGetProperty(masm, language_mode);
 }
 
 
@@ -275,14 +298,10 @@
 static void LoadIC_PushArgs(MacroAssembler* masm) {
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register name = LoadDescriptor::NameRegister();
-  if (FLAG_vector_ics) {
-    Register slot = VectorLoadICDescriptor::SlotRegister();
-    Register vector = VectorLoadICDescriptor::VectorRegister();
+  Register slot = LoadDescriptor::SlotRegister();
+  Register vector = LoadWithVectorDescriptor::VectorRegister();
 
-    __ Push(receiver, name, slot, vector);
-  } else {
-    __ Push(receiver, name);
-  }
+  __ Push(receiver, name, slot, vector);
 }
 
 
@@ -290,138 +309,27 @@
   // The return address is in ra.
   Isolate* isolate = masm->isolate();
 
-  DCHECK(!FLAG_vector_ics ||
-         !AreAliased(t0, t1, VectorLoadICDescriptor::SlotRegister(),
-                     VectorLoadICDescriptor::VectorRegister()));
+  DCHECK(!AreAliased(t0, t1, LoadWithVectorDescriptor::SlotRegister(),
+                     LoadWithVectorDescriptor::VectorRegister()));
   __ IncrementCounter(isolate->counters()->load_miss(), 1, t0, t1);
 
   LoadIC_PushArgs(masm);
 
   // Perform tail call to the entry.
-  ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
-  int arg_count = FLAG_vector_ics ? 4 : 2;
-  __ TailCallExternalReference(ref, arg_count, 1);
+  __ TailCallRuntime(Runtime::kLoadIC_Miss);
 }
 
 
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
+                                        LanguageMode language_mode) {
   // The return address is in ra.
 
   __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
   __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
 
-  __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
-}
-
-
-static MemOperand GenerateMappedArgumentsLookup(
-    MacroAssembler* masm, Register object, Register key, Register scratch1,
-    Register scratch2, Register scratch3, Label* unmapped_case,
-    Label* slow_case) {
-  Heap* heap = masm->isolate()->heap();
-
-  // Check that the receiver is a JSObject. Because of the map check
-  // later, we do not need to check for interceptors or whether it
-  // requires access checks.
-  __ JumpIfSmi(object, slow_case);
-  // Check that the object is some kind of JSObject.
-  __ GetObjectType(object, scratch1, scratch2);
-  __ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE));
-
-  // Check that the key is a positive smi.
-  __ And(scratch1, key, Operand(0x80000001));
-  __ Branch(slow_case, ne, scratch1, Operand(zero_reg));
-
-  // Load the elements into scratch1 and check its map.
-  Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
-  __ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
-  __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-  // Check if element is in the range of mapped arguments. If not, jump
-  // to the unmapped lookup with the parameter map in scratch1.
-  __ lw(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
-  __ Subu(scratch2, scratch2, Operand(Smi::FromInt(2)));
-  __ Branch(unmapped_case, Ugreater_equal, key, Operand(scratch2));
-
-  // Load element index and check whether it is the hole.
-  const int kOffset =
-      FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
-
-  __ li(scratch3, Operand(kPointerSize >> 1));
-  __ Mul(scratch3, key, scratch3);
-  __ Addu(scratch3, scratch3, Operand(kOffset));
-
-  __ Addu(scratch2, scratch1, scratch3);
-  __ lw(scratch2, MemOperand(scratch2));
-  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
-  __ Branch(unmapped_case, eq, scratch2, Operand(scratch3));
-
-  // Load value from context and return it. We can reuse scratch1 because
-  // we do not jump to the unmapped lookup (which requires the parameter
-  // map in scratch1).
-  __ lw(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
-  __ li(scratch3, Operand(kPointerSize >> 1));
-  __ Mul(scratch3, scratch2, scratch3);
-  __ Addu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
-  __ Addu(scratch2, scratch1, scratch3);
-  return MemOperand(scratch2);
-}
-
-
-static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
-                                                  Register key,
-                                                  Register parameter_map,
-                                                  Register scratch,
-                                                  Label* slow_case) {
-  // Element is in arguments backing store, which is referenced by the
-  // second element of the parameter_map. The parameter_map register
-  // must be loaded with the parameter map of the arguments object and is
-  // overwritten.
-  const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
-  Register backing_store = parameter_map;
-  __ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
-  __ CheckMap(backing_store, scratch, Heap::kFixedArrayMapRootIndex, slow_case,
-              DONT_DO_SMI_CHECK);
-  __ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
-  __ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
-  __ li(scratch, Operand(kPointerSize >> 1));
-  __ Mul(scratch, key, scratch);
-  __ Addu(scratch, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ Addu(scratch, backing_store, scratch);
-  return MemOperand(scratch);
-}
-
-
-void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
-  Register receiver = StoreDescriptor::ReceiverRegister();
-  Register key = StoreDescriptor::NameRegister();
-  Register value = StoreDescriptor::ValueRegister();
-  DCHECK(value.is(a0));
-
-  Label slow, notin;
-  // Store address is returned in register (of MemOperand) mapped_location.
-  MemOperand mapped_location = GenerateMappedArgumentsLookup(
-      masm, receiver, key, a3, t0, t1, &notin, &slow);
-  __ sw(value, mapped_location);
-  __ mov(t5, value);
-  DCHECK_EQ(mapped_location.offset(), 0);
-  __ RecordWrite(a3, mapped_location.rm(), t5, kRAHasNotBeenSaved,
-                 kDontSaveFPRegs);
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, value);  // (In delay slot) return the value stored in v0.
-  __ bind(&notin);
-  // The unmapped lookup expects that the parameter map is in a3.
-  // Store address is returned in register (of MemOperand) unmapped_location.
-  MemOperand unmapped_location =
-      GenerateUnmappedArgumentsLookup(masm, key, a3, t0, &slow);
-  __ sw(value, unmapped_location);
-  __ mov(t5, value);
-  DCHECK_EQ(unmapped_location.offset(), 0);
-  __ RecordWrite(a3, unmapped_location.rm(), t5, kRAHasNotBeenSaved,
-                 kDontSaveFPRegs);
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);  // (In delay slot) return the value stored in v0.
-  __ bind(&slow);
-  GenerateMiss(masm);
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
+                                              : Runtime::kGetProperty);
 }
 
 
@@ -429,32 +337,31 @@
   // The return address is in ra.
   Isolate* isolate = masm->isolate();
 
-  DCHECK(!FLAG_vector_ics ||
-         !AreAliased(t0, t1, VectorLoadICDescriptor::SlotRegister(),
-                     VectorLoadICDescriptor::VectorRegister()));
+  DCHECK(!AreAliased(t0, t1, LoadWithVectorDescriptor::SlotRegister(),
+                     LoadWithVectorDescriptor::VectorRegister()));
   __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, t0, t1);
 
   LoadIC_PushArgs(masm);
 
   // Perform tail call to the entry.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
-
-  int arg_count = FLAG_vector_ics ? 4 : 2;
-  __ TailCallExternalReference(ref, arg_count, 1);
+  __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
 }
 
 
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
+                                             LanguageMode language_mode) {
   // The return address is in ra.
 
   __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
 
-  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
+                                              : Runtime::kKeyedGetProperty);
 }
 
 
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
+                                      LanguageMode language_mode) {
   // The return address is in ra.
   Label slow, check_name, index_smi, index_name, property_array_property;
   Label probe_dictionary, check_number_dictionary;
@@ -478,7 +385,8 @@
   // Check the receiver's map to see if it has fast elements.
   __ CheckFastElements(a0, a3, &check_number_dictionary);
 
-  GenerateFastArrayLoad(masm, receiver, key, a0, a3, t0, v0, NULL, &slow);
+  GenerateFastArrayLoad(masm, receiver, key, a0, a3, t0, v0, &slow,
+                        language_mode);
   __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, t0, a3);
   __ Ret();
 
@@ -499,7 +407,7 @@
   __ bind(&slow);
   __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, t0,
                       a3);
-  GenerateRuntimeGetProperty(masm);
+  GenerateRuntimeGetProperty(masm, language_mode);
 
   __ bind(&check_name);
   GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
@@ -508,92 +416,31 @@
                                  Map::kHasNamedInterceptor, &slow);
 
 
-  // If the receiver is a fast-case object, check the keyed lookup
-  // cache. Otherwise probe the dictionary.
+  // If the receiver is a fast-case object, check the stub cache. Otherwise
+  // probe the dictionary.
   __ lw(a3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
   __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset));
   __ LoadRoot(at, Heap::kHashTableMapRootIndex);
   __ Branch(&probe_dictionary, eq, t0, Operand(at));
 
-  // Load the map of the receiver, compute the keyed lookup cache hash
-  // based on 32 bits of the map pointer and the name hash.
-  __ lw(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ sra(a3, a0, KeyedLookupCache::kMapHashShift);
-  __ lw(t0, FieldMemOperand(key, Name::kHashFieldOffset));
-  __ sra(at, t0, Name::kHashShift);
-  __ xor_(a3, a3, at);
-  int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
-  __ And(a3, a3, Operand(mask));
+  // The handlers in the stub cache expect a vector and slot. Since we won't
+  // change the IC from any downstream misses, a dummy vector can be used.
+  Register vector = LoadWithVectorDescriptor::VectorRegister();
+  Register slot = LoadWithVectorDescriptor::SlotRegister();
+  DCHECK(!AreAliased(vector, slot, t0, t1, t2, t5));
+  Handle<TypeFeedbackVector> dummy_vector =
+      TypeFeedbackVector::DummyVector(masm->isolate());
+  int slot_index = dummy_vector->GetIndex(
+      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
+  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
+  __ li(slot, Operand(Smi::FromInt(slot_index)));
 
-  // Load the key (consisting of map and unique name) from the cache and
-  // check for match.
-  Label load_in_object_property;
-  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
-  Label hit_on_nth_entry[kEntriesPerBucket];
-  ExternalReference cache_keys =
-      ExternalReference::keyed_lookup_cache_keys(isolate);
-  __ li(t0, Operand(cache_keys));
-  __ sll(at, a3, kPointerSizeLog2 + 1);
-  __ addu(t0, t0, at);
-
-  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
-    Label try_next_entry;
-    __ lw(t1, MemOperand(t0, kPointerSize * i * 2));
-    __ Branch(&try_next_entry, ne, a0, Operand(t1));
-    __ lw(t1, MemOperand(t0, kPointerSize * (i * 2 + 1)));
-    __ Branch(&hit_on_nth_entry[i], eq, key, Operand(t1));
-    __ bind(&try_next_entry);
-  }
-
-  __ lw(t1, MemOperand(t0, kPointerSize * (kEntriesPerBucket - 1) * 2));
-  __ Branch(&slow, ne, a0, Operand(t1));
-  __ lw(t1, MemOperand(t0, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1)));
-  __ Branch(&slow, ne, key, Operand(t1));
-
-  // Get field offset.
-  // a0     : receiver's map
-  // a3     : lookup cache index
-  ExternalReference cache_field_offsets =
-      ExternalReference::keyed_lookup_cache_field_offsets(isolate);
-
-  // Hit on nth entry.
-  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
-    __ bind(&hit_on_nth_entry[i]);
-    __ li(t0, Operand(cache_field_offsets));
-    __ sll(at, a3, kPointerSizeLog2);
-    __ addu(at, t0, at);
-    __ lw(t1, MemOperand(at, kPointerSize * i));
-    __ lbu(t2, FieldMemOperand(a0, Map::kInObjectPropertiesOffset));
-    __ Subu(t1, t1, t2);
-    __ Branch(&property_array_property, ge, t1, Operand(zero_reg));
-    if (i != 0) {
-      __ Branch(&load_in_object_property);
-    }
-  }
-
-  // Load in-object property.
-  __ bind(&load_in_object_property);
-  __ lbu(t2, FieldMemOperand(a0, Map::kInstanceSizeOffset));
-  __ addu(t2, t2, t1);  // Index from start of object.
-  __ Subu(receiver, receiver, Operand(kHeapObjectTag));  // Remove the heap tag.
-  __ sll(at, t2, kPointerSizeLog2);
-  __ addu(at, receiver, at);
-  __ lw(v0, MemOperand(at));
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
-                      t0, a3);
-  __ Ret();
-
-  // Load property array property.
-  __ bind(&property_array_property);
-  __ lw(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-  __ Addu(receiver, receiver, FixedArray::kHeaderSize - kHeapObjectTag);
-  __ sll(v0, t1, kPointerSizeLog2);
-  __ Addu(v0, v0, receiver);
-  __ lw(v0, MemOperand(v0));
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
-                      t0, a3);
-  __ Ret();
-
+  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+      Code::ComputeHandlerFlags(Code::LOAD_IC));
+  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, flags,
+                                               receiver, key, t0, t1, t2, t5);
+  // Cache miss.
+  GenerateMiss(masm);
 
   // Do a quick inline probe of the receiver's dictionary, if it
   // exists.
@@ -626,8 +473,13 @@
 
   // Fast case: Do the store, could be either Object or double.
   __ bind(fast_object);
-  Register scratch_value = t0;
+  Register scratch = t0;
+  Register scratch2 = t4;
+  Register scratch3 = t5;
   Register address = t1;
+  DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
+                     scratch, scratch2, scratch3, address));
+
   if (check_map == kCheckMap) {
     __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
     __ Branch(fast_double, ne, elements_map,
@@ -641,11 +493,10 @@
   __ Addu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
   __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
   __ addu(address, address, at);
-  __ lw(scratch_value, MemOperand(address));
-  __ Branch(&holecheck_passed1, ne, scratch_value,
+  __ lw(scratch, MemOperand(address));
+  __ Branch(&holecheck_passed1, ne, scratch,
             Operand(masm->isolate()->factory()->the_hole_value()));
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
-                                      slow);
+  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
 
   __ bind(&holecheck_passed1);
 
@@ -655,35 +506,34 @@
 
   if (increment_length == kIncrementLength) {
     // Add 1 to receiver->length.
-    __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
-    __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+    __ Addu(scratch, key, Operand(Smi::FromInt(1)));
+    __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
   }
   // It's irrelevant whether array is smi-only or not when writing a smi.
   __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(address, address, scratch_value);
+  __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(address, address, scratch);
   __ sw(value, MemOperand(address));
   __ Ret();
 
   __ bind(&non_smi_value);
   // Escape to elements kind transition case.
-  __ CheckFastObjectElements(receiver_map, scratch_value,
-                             &transition_smi_elements);
+  __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
 
   // Fast elements array, store the value to the elements backing store.
   __ bind(&finish_object_store);
   if (increment_length == kIncrementLength) {
     // Add 1 to receiver->length.
-    __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
-    __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+    __ Addu(scratch, key, Operand(Smi::FromInt(1)));
+    __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
   }
   __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(address, address, scratch_value);
+  __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(address, address, scratch);
   __ sw(value, MemOperand(address));
   // Update write barrier for the elements array address.
-  __ mov(scratch_value, value);  // Preserve the value which is returned.
-  __ RecordWrite(elements, address, scratch_value, kRAHasNotBeenSaved,
+  __ mov(scratch, value);  // Preserve the value which is returned.
+  __ RecordWrite(elements, address, scratch, kRAHasNotBeenSaved,
                  kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
   __ Ret();
 
@@ -702,34 +552,31 @@
                                      kHoleNanUpper32Offset - kHeapObjectTag));
   __ sll(at, key, kPointerSizeLog2);
   __ addu(address, address, at);
-  __ lw(scratch_value, MemOperand(address));
-  __ Branch(&fast_double_without_map_check, ne, scratch_value,
+  __ lw(scratch, MemOperand(address));
+  __ Branch(&fast_double_without_map_check, ne, scratch,
             Operand(kHoleNanUpper32));
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
-                                      slow);
+  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
 
   __ bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(value, key,
-                                 elements,  // Overwritten.
-                                 a3,        // Scratch regs...
-                                 t0, t1, &transition_double_elements);
+  __ StoreNumberToDoubleElements(value, key, elements, scratch, scratch2,
+                                 scratch3, &transition_double_elements);
   if (increment_length == kIncrementLength) {
     // Add 1 to receiver->length.
-    __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
-    __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+    __ Addu(scratch, key, Operand(Smi::FromInt(1)));
+    __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
   }
   __ Ret();
 
   __ bind(&transition_smi_elements);
   // Transition the array appropriately depending on the value type.
-  __ lw(t0, FieldMemOperand(value, HeapObject::kMapOffset));
+  __ lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
   __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
-  __ Branch(&non_double_value, ne, t0, Operand(at));
+  __ Branch(&non_double_value, ne, scratch, Operand(at));
 
   // Value is a double. Transition FAST_SMI_ELEMENTS ->
   // FAST_DOUBLE_ELEMENTS and complete the store.
   __ LoadTransitionedArrayMapConditional(
-      FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, t0, slow);
+      FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
   AllocationSiteMode mode =
       AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
   ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
@@ -740,7 +587,7 @@
   __ bind(&non_double_value);
   // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
   __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
-                                         receiver_map, t0, slow);
+                                         receiver_map, scratch, slow);
   mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
   ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
       masm, receiver, key, value, receiver_map, mode, slow);
@@ -752,7 +599,7 @@
   // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
   // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
   __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
-                                         receiver_map, t0, slow);
+                                         receiver_map, scratch, slow);
   mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
   ElementsTransitionGenerator::GenerateDoubleToObject(
       masm, receiver, key, value, receiver_map, mode, slow);
@@ -762,7 +609,7 @@
 
 
 void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
-                                       StrictMode strict_mode) {
+                                       LanguageMode language_mode) {
   // ---------- S t a t e --------------
   //  -- a0     : value
   //  -- a1     : key
@@ -798,8 +645,11 @@
   // Check if the object is a JS array or not.
   __ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
   __ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE));
-  // Check that the object is some kind of JSObject.
-  __ Branch(&slow, lt, t0, Operand(FIRST_JS_OBJECT_TYPE));
+  // Check that the object is some kind of JS object EXCEPT JS Value type. In
+  // the case that the object is a value-wrapper object, we enter the runtime
+  // system to make sure that indexing into string objects works as intended.
+  STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
+  __ Branch(&slow, lo, t0, Operand(JS_OBJECT_TYPE));
 
   // Object case: Check key against length in the elements array.
   __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
@@ -813,17 +663,30 @@
   // a0: value.
   // a1: key.
   // a2: receiver.
-  PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
+  PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
   // Never returns to here.
 
   __ bind(&maybe_name_key);
   __ lw(t0, FieldMemOperand(key, HeapObject::kMapOffset));
   __ lb(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
   __ JumpIfNotUniqueNameInstanceType(t0, &slow);
+
+  // The handlers in the stub cache expect a vector and slot. Since we won't
+  // change the IC from any downstream misses, a dummy vector can be used.
+  Register vector = VectorStoreICDescriptor::VectorRegister();
+  Register slot = VectorStoreICDescriptor::SlotRegister();
+  DCHECK(!AreAliased(vector, slot, t1, t2, t4, t5));
+  Handle<TypeFeedbackVector> dummy_vector =
+      TypeFeedbackVector::DummyVector(masm->isolate());
+  int slot_index = dummy_vector->GetIndex(
+      FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
+  __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
+  __ li(slot, Operand(Smi::FromInt(slot_index)));
+
   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
       Code::ComputeHandlerFlags(Code::STORE_IC));
-  masm->isolate()->stub_cache()->GenerateProbe(
-      masm, Code::STORE_IC, flags, false, receiver, key, a3, t0, t1, t2);
+  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
+                                               receiver, key, t1, t2, t4, t5);
   // Cache miss.
   __ Branch(&miss);
 
@@ -871,14 +734,18 @@
 }
 
 
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  // Push receiver, key and value for runtime call.
+static void StoreIC_PushArgs(MacroAssembler* masm) {
   __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
-          StoreDescriptor::ValueRegister());
+          StoreDescriptor::ValueRegister(),
+          VectorStoreICDescriptor::SlotRegister(),
+          VectorStoreICDescriptor::VectorRegister());
+}
 
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
-  __ TailCallExternalReference(ref, 3, 1);
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+  StoreIC_PushArgs(masm);
+
+  __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
 }
 
 
@@ -892,8 +759,8 @@
   // Get the receiver from the stack and probe the stub cache.
   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
       Code::ComputeHandlerFlags(Code::STORE_IC));
-  masm->isolate()->stub_cache()->GenerateProbe(
-      masm, Code::STORE_IC, flags, false, receiver, name, a3, t0, t1, t2);
+  masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
+                                               receiver, name, a3, t0, t1, t2);
 
   // Cache miss: Jump to runtime.
   GenerateMiss(masm);
@@ -901,12 +768,10 @@
 
 
 void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
-          StoreDescriptor::ValueRegister());
+  StoreIC_PushArgs(masm);
+
   // Perform tail call to the entry.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
-  __ TailCallExternalReference(ref, 3, 1);
+  __ TailCallRuntime(Runtime::kStoreIC_Miss);
 }
 
 
@@ -915,20 +780,22 @@
   Register receiver = StoreDescriptor::ReceiverRegister();
   Register name = StoreDescriptor::NameRegister();
   Register value = StoreDescriptor::ValueRegister();
-  Register dictionary = a3;
+  Register dictionary = t1;
   DCHECK(receiver.is(a1));
   DCHECK(name.is(a2));
   DCHECK(value.is(a0));
+  DCHECK(VectorStoreICDescriptor::VectorRegister().is(a3));
+  DCHECK(VectorStoreICDescriptor::SlotRegister().is(t0));
 
   __ lw(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
 
-  GenerateDictionaryStore(masm, &miss, dictionary, name, value, t0, t1);
+  GenerateDictionaryStore(masm, &miss, dictionary, name, value, t2, t5);
   Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->store_normal_hit(), 1, t0, t1);
+  __ IncrementCounter(counters->store_normal_hit(), 1, t2, t5);
   __ Ret();
 
   __ bind(&miss);
-  __ IncrementCounter(counters->store_normal_miss(), 1, t0, t1);
+  __ IncrementCounter(counters->store_normal_miss(), 1, t2, t5);
   GenerateMiss(masm);
 }
 
@@ -969,7 +836,8 @@
 }
 
 
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+void PatchInlinedSmiCode(Isolate* isolate, Address address,
+                         InlinedSmiCheck check) {
   Address andi_instruction_address =
       address + Assembler::kCallTargetAddressOffset;
 
@@ -999,8 +867,6 @@
   Address patch_address =
       andi_instruction_address - delta * Instruction::kInstrSize;
   Instr instr_at_patch = Assembler::instr_at(patch_address);
-  Instr branch_instr =
-      Assembler::instr_at(patch_address + Instruction::kInstrSize);
   // This is patching a conditional "jump if not smi/jump if smi" site.
   // Enabling by changing from
   //   andi at, rx, 0
@@ -1009,26 +875,57 @@
   //   andi at, rx, #kSmiTagMask
   //   Branch <target>, ne, at, Operand(zero_reg)
   // and vice-versa to be disabled again.
-  CodePatcher patcher(patch_address, 2);
+  CodePatcher patcher(isolate, patch_address, 2);
   Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
   if (check == ENABLE_INLINED_SMI_CHECK) {
     DCHECK(Assembler::IsAndImmediate(instr_at_patch));
-    DCHECK_EQ(0, Assembler::GetImmediate16(instr_at_patch));
+    DCHECK_EQ(0u, Assembler::GetImmediate16(instr_at_patch));
     patcher.masm()->andi(at, reg, kSmiTagMask);
   } else {
-    DCHECK(check == DISABLE_INLINED_SMI_CHECK);
+    DCHECK_EQ(check, DISABLE_INLINED_SMI_CHECK);
     DCHECK(Assembler::IsAndImmediate(instr_at_patch));
     patcher.masm()->andi(at, reg, 0);
   }
+  Instr branch_instr =
+      Assembler::instr_at(patch_address + Instruction::kInstrSize);
   DCHECK(Assembler::IsBranch(branch_instr));
-  if (Assembler::IsBeq(branch_instr)) {
-    patcher.ChangeBranchCondition(ne);
-  } else {
-    DCHECK(Assembler::IsBne(branch_instr));
-    patcher.ChangeBranchCondition(eq);
+
+  uint32_t opcode = Assembler::GetOpcodeField(branch_instr);
+  // Currently only the 'eq' and 'ne' cond values are supported and the simple
+  // branch instructions and their r6 variants (with opcode being the branch
+  // type). There are some special cases (see Assembler::IsBranch()) so
+  // extending this would be tricky.
+  DCHECK(opcode == BEQ ||    // BEQ
+         opcode == BNE ||    // BNE
+         opcode == POP10 ||  // BEQC
+         opcode == POP30 ||  // BNEC
+         opcode == POP66 ||  // BEQZC
+         opcode == POP76);   // BNEZC
+  switch (opcode) {
+    case BEQ:
+      opcode = BNE;  // change BEQ to BNE.
+      break;
+    case POP10:
+      opcode = POP30;  // change BEQC to BNEC.
+      break;
+    case POP66:
+      opcode = POP76;  // change BEQZC to BNEZC.
+      break;
+    case BNE:
+      opcode = BEQ;  // change BNE to BEQ.
+      break;
+    case POP30:
+      opcode = POP10;  // change BNEC to BEQC.
+      break;
+    case POP76:
+      opcode = POP66;  // change BNEZC to BEQZC.
+      break;
+    default:
+      UNIMPLEMENTED();
   }
+  patcher.ChangeBranchCondition(branch_instr, opcode);
 }
-}
-}  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_TARGET_ARCH_MIPS