Update V8 to r5388 as required by WebKit r66666

Change-Id: Ib3c42e9b7226d22c65c7077c543fe31afe62a318
diff --git a/src/objects.cc b/src/objects.cc
index 1a4ed05..9b43d24 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -54,7 +54,8 @@
 const int kSetterIndex = 1;
 
 
-static Object* CreateJSValue(JSFunction* constructor, Object* value) {
+MUST_USE_RESULT static Object* CreateJSValue(JSFunction* constructor,
+                                             Object* value) {
   Object* result = Heap::AllocateJSObject(constructor);
   if (result->IsFailure()) return result;
   JSValue::cast(result)->set_value(value);
@@ -2098,6 +2099,124 @@
 }
 
 
+bool NormalizedMapCache::IsCacheable(JSObject* object) {
+  // Caching for global objects is not worth it (there are too few of them).
+  return !object->IsGlobalObject();
+}
+
+
+Object* NormalizedMapCache::Get(JSObject* obj, PropertyNormalizationMode mode) {
+  Object* result;
+
+  Map* fast = obj->map();
+  if (!IsCacheable(obj)) {
+    result = fast->CopyNormalized(mode);
+    if (result->IsFailure()) return result;
+  } else {
+    int index = Hash(fast) % kEntries;
+    result = get(index);
+
+    if (result->IsMap() && CheckHit(Map::cast(result), fast, mode)) {
+#ifdef DEBUG
+      if (FLAG_enable_slow_asserts) {
+        // Make sure that the new slow map has exactly the same hash as the
+        // original fast map. This way we can use hash to check if a slow map
+        // is already in the hash (see Contains method).
+        ASSERT(Hash(fast) == Hash(Map::cast(result)));
+        // The cached map should match newly created normalized map bit-by-bit.
+        Object* fresh = fast->CopyNormalized(mode);
+        if (!fresh->IsFailure()) {
+          // Copy the unused byte so that the assertion below works.
+          Map::cast(fresh)->address()[Map::kUnusedOffset] =
+              Map::cast(result)->address()[Map::kUnusedOffset];
+          ASSERT(memcmp(Map::cast(fresh)->address(),
+                        Map::cast(result)->address(),
+                        Map::kSize) == 0);
+        }
+      }
+#endif
+      return result;
+    }
+
+    result = fast->CopyNormalized(mode);
+    if (result->IsFailure()) return result;
+    set(index, result);
+  }
+  Counters::normalized_maps.Increment();
+
+  return result;
+}
+
+
+bool NormalizedMapCache::Contains(Map* map) {
+  // If the map is present in the cache it can only be at one place:
+  // at the index calculated from the hash. We assume that a slow map has the
+  // same hash as a fast map it has been generated from.
+  int index = Hash(map) % kEntries;
+  return get(index) == map;
+}
+
+
+void NormalizedMapCache::Clear() {
+  int entries = length();
+  for (int i = 0; i != entries; i++) {
+    set_undefined(i);
+  }
+}
+
+
+int NormalizedMapCache::Hash(Map* fast) {
+  // For performance reasons we only hash the 3 most variable fields of a map:
+  // constructor, prototype and bit_field2.
+
+  // Shift away the tag.
+  int hash = (static_cast<uint32_t>(
+        reinterpret_cast<uintptr_t>(fast->constructor())) >> 2);
+
+  // XOR-ing the prototype and constructor directly yields too many zero bits
+  // when the two pointers are close (which is fairly common).
+  // To avoid this we shift the prototype 4 bits relatively to the constructor.
+  hash ^= (static_cast<uint32_t>(
+        reinterpret_cast<uintptr_t>(fast->prototype())) << 2);
+
+  return hash ^ (hash >> 16) ^ fast->bit_field2();
+}
+
+
+bool NormalizedMapCache::CheckHit(Map* slow,
+                                  Map* fast,
+                                  PropertyNormalizationMode mode) {
+#ifdef DEBUG
+  slow->NormalizedMapVerify();
+#endif
+  return
+    slow->constructor() == fast->constructor() &&
+    slow->prototype() == fast->prototype() &&
+    slow->inobject_properties() == ((mode == CLEAR_INOBJECT_PROPERTIES) ?
+                                    0 :
+                                    fast->inobject_properties()) &&
+    slow->instance_type() == fast->instance_type() &&
+    slow->bit_field() == fast->bit_field() &&
+    slow->bit_field2() == fast->bit_field2();
+}
+
+
+Object* JSObject::UpdateMapCodeCache(String* name, Code* code) {
+  if (!HasFastProperties() &&
+      NormalizedMapCache::IsCacheable(this) &&
+      Top::context()->global_context()->normalized_map_cache()->
+          Contains(map())) {
+    // Replace the map with the identical copy that can be safely modified.
+    Object* obj = map()->CopyNormalized(KEEP_INOBJECT_PROPERTIES);
+    if (obj->IsFailure()) return obj;
+    Counters::normalized_maps.Increment();
+
+    set_map(Map::cast(obj));
+  }
+  return map()->UpdateCodeCache(name, code);
+}
+
+
 Object* JSObject::NormalizeProperties(PropertyNormalizationMode mode,
                                       int expected_additional_properties) {
   if (!HasFastProperties()) return this;
@@ -2162,28 +2281,22 @@
   int index = map()->instance_descriptors()->NextEnumerationIndex();
   dictionary->SetNextEnumerationIndex(index);
 
-  // Allocate new map.
-  obj = map()->CopyDropDescriptors();
+  obj = Top::context()->global_context()->
+      normalized_map_cache()->Get(this, mode);
   if (obj->IsFailure()) return obj;
   Map* new_map = Map::cast(obj);
 
-  // Clear inobject properties if needed by adjusting the instance size and
-  // putting in a filler object instead of the inobject properties.
-  if (mode == CLEAR_INOBJECT_PROPERTIES && map()->inobject_properties() > 0) {
-    int instance_size_delta = map()->inobject_properties() * kPointerSize;
-    int new_instance_size = map()->instance_size() - instance_size_delta;
-    new_map->set_inobject_properties(0);
-    new_map->set_instance_size(new_instance_size);
-    new_map->set_visitor_id(StaticVisitorBase::GetVisitorId(new_map));
-    Heap::CreateFillerObjectAt(this->address() + new_instance_size,
-                               instance_size_delta);
-  }
-  new_map->set_unused_property_fields(0);
-
   // We have now successfully allocated all the necessary objects.
   // Changes can now be made with the guarantee that all of them take effect.
+
+  // Resize the object in the heap if necessary.
+  int new_instance_size = new_map->instance_size();
+  int instance_size_delta = map()->instance_size() - new_instance_size;
+  ASSERT(instance_size_delta >= 0);
+  Heap::CreateFillerObjectAt(this->address() + new_instance_size,
+                             instance_size_delta);
+
   set_map(new_map);
-  map()->set_instance_descriptors(Heap::empty_descriptor_array());
 
   set_properties(dictionary);
 
@@ -2571,7 +2684,8 @@
 Object* JSObject::PreventExtensions() {
   // If there are fast elements we normalize.
   if (HasFastElements()) {
-    NormalizeElements();
+    Object* ok = NormalizeElements();
+    if (ok->IsFailure()) return ok;
   }
   // Make sure that we never go back to fast case.
   element_dictionary()->set_requires_slow_elements();
@@ -3083,6 +3197,33 @@
 }
 
 
+Object* Map::CopyNormalized(PropertyNormalizationMode mode) {
+  int new_instance_size = instance_size();
+  if (mode == CLEAR_INOBJECT_PROPERTIES) {
+    new_instance_size -= inobject_properties() * kPointerSize;
+  }
+
+  Object* result = Heap::AllocateMap(instance_type(), new_instance_size);
+  if (result->IsFailure()) return result;
+
+  if (mode != CLEAR_INOBJECT_PROPERTIES) {
+    Map::cast(result)->set_inobject_properties(inobject_properties());
+  }
+
+  Map::cast(result)->set_prototype(prototype());
+  Map::cast(result)->set_constructor(constructor());
+
+  Map::cast(result)->set_bit_field(bit_field());
+  Map::cast(result)->set_bit_field2(bit_field2());
+
+#ifdef DEBUG
+  Map::cast(result)->NormalizedMapVerify();
+#endif
+
+  return result;
+}
+
+
 Object* Map::CopyDropTransitions() {
   Object* new_map = CopyDropDescriptors();
   if (new_map->IsFailure()) return new_map;
@@ -4850,24 +4991,18 @@
 }
 
 
-static inline uint32_t HashField(uint32_t hash,
-                                 bool is_array_index,
-                                 int length = -1) {
-  uint32_t result = (hash << String::kHashShift);
-  if (is_array_index) {
-    // For array indexes mix the length into the hash as an array index could
-    // be zero.
-    ASSERT(length > 0);
-    ASSERT(length <= String::kMaxArrayIndexSize);
-    ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
-           (1 << String::kArrayIndexValueBits));
-    ASSERT(String::kMaxArrayIndexSize < (1 << String::kArrayIndexValueBits));
-    result &= ~String::kIsNotArrayIndexMask;
-    result |= length << String::kArrayIndexHashLengthShift;
-  } else {
-    result |= String::kIsNotArrayIndexMask;
-  }
-  return result;
+uint32_t StringHasher::MakeCachedArrayIndex(uint32_t value, int length) {
+  value <<= String::kHashShift;
+  // For array indexes mix the length into the hash as an array index could
+  // be zero.
+  ASSERT(length > 0);
+  ASSERT(length <= String::kMaxArrayIndexSize);
+  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+         (1 << String::kArrayIndexValueBits));
+  ASSERT(String::kMaxArrayIndexSize < (1 << String::kArrayIndexValueBits));
+  value &= ~String::kIsNotArrayIndexMask;
+  value |= length << String::kArrayIndexHashLengthShift;
+  return value;
 }
 
 
@@ -4875,14 +5010,11 @@
   ASSERT(is_valid());
   if (length_ <= String::kMaxHashCalcLength) {
     if (is_array_index()) {
-      return v8::internal::HashField(array_index(), true, length_);
-    } else {
-      return v8::internal::HashField(GetHash(), false);
+      return MakeCachedArrayIndex(array_index(), length_);
     }
-    uint32_t payload = v8::internal::HashField(GetHash(), false);
-    return payload;
+    return (GetHash() << String::kHashShift) | String::kIsNotArrayIndexMask;
   } else {
-    return v8::internal::HashField(length_, false);
+    return (length_ << String::kHashShift) | String::kIsNotArrayIndexMask;
   }
 }