update V8 to r5532 as required by WebKit r68651
Change-Id: I5f75eeffbf64b30dd5080348528d277f293490ad
diff --git a/src/objects.cc b/src/objects.cc
index ef51851..737bf57 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1476,8 +1476,8 @@
FixedArray* new_properties = 0; // Will always be NULL or a valid pointer.
int new_unused_property_fields = map()->unused_property_fields() - 1;
if (map()->unused_property_fields() == 0) {
- new_unused_property_fields = kFieldsAdded - 1;
- Object* new_properties_unchecked =
+ new_unused_property_fields = kFieldsAdded - 1;
+ Object* new_properties_unchecked =
properties()->CopySize(properties()->length() + kFieldsAdded);
if (new_properties_unchecked->IsFailure()) return new_properties_unchecked;
new_properties = FixedArray::cast(new_properties_unchecked);
@@ -2099,61 +2099,34 @@
}
-bool NormalizedMapCache::IsCacheable(JSObject* object) {
- // Caching for global objects is not worth it (there are too few of them).
- return !object->IsGlobalObject();
-}
-
-
Object* NormalizedMapCache::Get(JSObject* obj, PropertyNormalizationMode mode) {
- Object* result;
-
Map* fast = obj->map();
- if (!IsCacheable(obj)) {
- result = fast->CopyNormalized(mode);
- if (result->IsFailure()) return result;
- } else {
- int index = Hash(fast) % kEntries;
- result = get(index);
-
- if (result->IsMap() && CheckHit(Map::cast(result), fast, mode)) {
+ int index = Hash(fast) % kEntries;
+ Object* result = get(index);
+ if (result->IsMap() && CheckHit(Map::cast(result), fast, mode)) {
#ifdef DEBUG
- if (FLAG_enable_slow_asserts) {
- // Make sure that the new slow map has exactly the same hash as the
- // original fast map. This way we can use hash to check if a slow map
- // is already in the hash (see Contains method).
- ASSERT(Hash(fast) == Hash(Map::cast(result)));
- // The cached map should match newly created normalized map bit-by-bit.
- Object* fresh = fast->CopyNormalized(mode);
- if (!fresh->IsFailure()) {
- ASSERT(memcmp(Map::cast(fresh)->address(),
- Map::cast(result)->address(),
- Map::kSize) == 0);
- }
+ if (FLAG_enable_slow_asserts) {
+ // The cached map should match newly created normalized map bit-by-bit.
+ Object* fresh = fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
+ if (!fresh->IsFailure()) {
+ ASSERT(memcmp(Map::cast(fresh)->address(),
+ Map::cast(result)->address(),
+ Map::kSize) == 0);
}
-#endif
- return result;
}
-
- result = fast->CopyNormalized(mode);
- if (result->IsFailure()) return result;
- set(index, result);
+#endif
+ return result;
}
+
+ result = fast->CopyNormalized(mode, SHARED_NORMALIZED_MAP);
+ if (result->IsFailure()) return result;
+ set(index, result);
Counters::normalized_maps.Increment();
return result;
}
-bool NormalizedMapCache::Contains(Map* map) {
- // If the map is present in the cache it can only be at one place:
- // at the index calculated from the hash. We assume that a slow map has the
- // same hash as a fast map it has been generated from.
- int index = Hash(map) % kEntries;
- return get(index) == map;
-}
-
-
void NormalizedMapCache::Clear() {
int entries = length();
for (int i = 0; i != entries; i++) {
@@ -2184,7 +2157,7 @@
Map* fast,
PropertyNormalizationMode mode) {
#ifdef DEBUG
- slow->NormalizedMapVerify();
+ slow->SharedMapVerify();
#endif
return
slow->constructor() == fast->constructor() &&
@@ -2194,17 +2167,17 @@
fast->inobject_properties()) &&
slow->instance_type() == fast->instance_type() &&
slow->bit_field() == fast->bit_field() &&
- slow->bit_field2() == fast->bit_field2();
+ (slow->bit_field2() & ~(1<<Map::kIsShared)) == fast->bit_field2();
}
Object* JSObject::UpdateMapCodeCache(String* name, Code* code) {
- if (!HasFastProperties() &&
- NormalizedMapCache::IsCacheable(this) &&
- Top::context()->global_context()->normalized_map_cache()->
- Contains(map())) {
- // Replace the map with the identical copy that can be safely modified.
- Object* obj = map()->CopyNormalized(KEEP_INOBJECT_PROPERTIES);
+ if (map()->is_shared()) {
+ // Fast case maps are never marked as shared.
+ ASSERT(!HasFastProperties());
+ // Replace the map with an identical copy that can be safely modified.
+ Object* obj = map()->CopyNormalized(KEEP_INOBJECT_PROPERTIES,
+ UNIQUE_NORMALIZED_MAP);
if (obj->IsFailure()) return obj;
Counters::normalized_maps.Increment();
@@ -3189,12 +3162,14 @@
}
Map::cast(result)->set_bit_field(bit_field());
Map::cast(result)->set_bit_field2(bit_field2());
+ Map::cast(result)->set_is_shared(false);
Map::cast(result)->ClearCodeCache();
return result;
}
-Object* Map::CopyNormalized(PropertyNormalizationMode mode) {
+Object* Map::CopyNormalized(PropertyNormalizationMode mode,
+ NormalizedMapSharingMode sharing) {
int new_instance_size = instance_size();
if (mode == CLEAR_INOBJECT_PROPERTIES) {
new_instance_size -= inobject_properties() * kPointerSize;
@@ -3213,8 +3188,12 @@
Map::cast(result)->set_bit_field(bit_field());
Map::cast(result)->set_bit_field2(bit_field2());
+ Map::cast(result)->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
+
#ifdef DEBUG
- Map::cast(result)->NormalizedMapVerify();
+ if (Map::cast(result)->is_shared()) {
+ Map::cast(result)->SharedMapVerify();
+ }
#endif
return result;
@@ -3271,6 +3250,47 @@
}
+void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
+ Map* current = this;
+ while (current != Heap::meta_map()) {
+ DescriptorArray* d = reinterpret_cast<DescriptorArray*>(
+ *RawField(current, Map::kInstanceDescriptorsOffset));
+ if (d == Heap::empty_descriptor_array()) {
+ Map* prev = current->map();
+ current->set_map(Heap::meta_map());
+ callback(current, data);
+ current = prev;
+ continue;
+ }
+
+ FixedArray* contents = reinterpret_cast<FixedArray*>(
+ d->get(DescriptorArray::kContentArrayIndex));
+ Object** map_or_index_field = RawField(contents, HeapObject::kMapOffset);
+ Object* map_or_index = *map_or_index_field;
+ bool map_done = true;
+ for (int i = map_or_index->IsSmi() ? Smi::cast(map_or_index)->value() : 0;
+ i < contents->length();
+ i += 2) {
+ PropertyDetails details(Smi::cast(contents->get(i + 1)));
+ if (details.IsTransition()) {
+ Map* next = reinterpret_cast<Map*>(contents->get(i));
+ next->set_map(current);
+ *map_or_index_field = Smi::FromInt(i + 2);
+ current = next;
+ map_done = false;
+ break;
+ }
+ }
+ if (!map_done) continue;
+ *map_or_index_field = Heap::fixed_array_map();
+ Map* prev = current->map();
+ current->set_map(Heap::meta_map());
+ callback(current, data);
+ current = prev;
+ }
+}
+
+
Object* CodeCache::Update(String* name, Code* code) {
ASSERT(code->ic_state() == MONOMORPHIC);
@@ -3825,7 +3845,7 @@
}
-void DescriptorArray::Sort() {
+void DescriptorArray::SortUnchecked() {
// In-place heap sort.
int len = number_of_descriptors();
@@ -3875,7 +3895,11 @@
parent_index = child_index;
}
}
+}
+
+void DescriptorArray::Sort() {
+ SortUnchecked();
SLOW_ASSERT(IsSortedNoDuplicates());
}
@@ -5269,6 +5293,13 @@
}
+void SharedFunctionInfo::ForbidInlineConstructor() {
+ set_compiler_hints(BooleanBit::set(compiler_hints(),
+ kHasOnlySimpleThisPropertyAssignments,
+ false));
+}
+
+
void SharedFunctionInfo::SetThisPropertyAssignmentsInfo(
bool only_simple_this_property_assignments,
FixedArray* assignments) {
@@ -5366,6 +5397,107 @@
}
+void SharedFunctionInfo::StartInobjectSlackTracking(Map* map) {
+ ASSERT(!IsInobjectSlackTrackingInProgress());
+
+ // Only initiate the tracking the first time.
+ if (live_objects_may_exist()) return;
+ set_live_objects_may_exist(true);
+
+ // No tracking during the snapshot construction phase.
+ if (Serializer::enabled()) return;
+
+ if (map->unused_property_fields() == 0) return;
+
+ // Nonzero counter is a leftover from the previous attempt interrupted
+ // by GC, keep it.
+ if (construction_count() == 0) {
+ set_construction_count(kGenerousAllocationCount);
+ }
+ set_initial_map(map);
+ ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubGeneric),
+ construct_stub());
+ set_construct_stub(Builtins::builtin(Builtins::JSConstructStubCountdown));
+}
+
+
+// Called from GC, hence reinterpret_cast and unchecked accessors.
+void SharedFunctionInfo::DetachInitialMap() {
+ Map* map = reinterpret_cast<Map*>(initial_map());
+
+ // Make the map remember to restore the link if it survives the GC.
+ map->set_bit_field2(
+ map->bit_field2() | (1 << Map::kAttachedToSharedFunctionInfo));
+
+ // Undo state changes made by StartInobjectTracking (except the
+ // construction_count). This way if the initial map does not survive the GC
+ // then StartInobjectTracking will be called again the next time the
+ // constructor is called. The countdown will continue and (possibly after
+ // several more GCs) CompleteInobjectSlackTracking will eventually be called.
+ set_initial_map(Heap::raw_unchecked_undefined_value());
+ ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubCountdown),
+ *RawField(this, kConstructStubOffset));
+ set_construct_stub(Builtins::builtin(Builtins::JSConstructStubGeneric));
+ // It is safe to clear the flag: it will be set again if the map is live.
+ set_live_objects_may_exist(false);
+}
+
+
+// Called from GC, hence reinterpret_cast and unchecked accessors.
+void SharedFunctionInfo::AttachInitialMap(Map* map) {
+ map->set_bit_field2(
+ map->bit_field2() & ~(1 << Map::kAttachedToSharedFunctionInfo));
+
+ // Resume inobject slack tracking.
+ set_initial_map(map);
+ ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubGeneric),
+ *RawField(this, kConstructStubOffset));
+ set_construct_stub(Builtins::builtin(Builtins::JSConstructStubCountdown));
+ // The map survived the gc, so there may be objects referencing it.
+ set_live_objects_may_exist(true);
+}
+
+
+static void GetMinInobjectSlack(Map* map, void* data) {
+ int slack = map->unused_property_fields();
+ if (*reinterpret_cast<int*>(data) > slack) {
+ *reinterpret_cast<int*>(data) = slack;
+ }
+}
+
+
+static void ShrinkInstanceSize(Map* map, void* data) {
+ int slack = *reinterpret_cast<int*>(data);
+ map->set_inobject_properties(map->inobject_properties() - slack);
+ map->set_unused_property_fields(map->unused_property_fields() - slack);
+ map->set_instance_size(map->instance_size() - slack * kPointerSize);
+
+ // Visitor id might depend on the instance size, recalculate it.
+ map->set_visitor_id(StaticVisitorBase::GetVisitorId(map));
+}
+
+
+void SharedFunctionInfo::CompleteInobjectSlackTracking() {
+ ASSERT(live_objects_may_exist() && IsInobjectSlackTrackingInProgress());
+ Map* map = Map::cast(initial_map());
+
+ set_initial_map(Heap::undefined_value());
+ ASSERT_EQ(Builtins::builtin(Builtins::JSConstructStubCountdown),
+ construct_stub());
+ set_construct_stub(Builtins::builtin(Builtins::JSConstructStubGeneric));
+
+ int slack = map->unused_property_fields();
+ map->TraverseTransitionTree(&GetMinInobjectSlack, &slack);
+ if (slack != 0) {
+ // Resize the initial map and all maps in its transition tree.
+ map->TraverseTransitionTree(&ShrinkInstanceSize, &slack);
+ // Give the correct expected_nof_properties to initial maps created later.
+ ASSERT(expected_nof_properties() >= slack);
+ set_expected_nof_properties(expected_nof_properties() - slack);
+ }
+}
+
+
void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
@@ -5919,21 +6051,24 @@
}
-bool JSObject::HasLocalElement(uint32_t index) {
+JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
!Top::MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
Top::ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return false;
+ return UNDEFINED_ELEMENT;
}
// Check for lookup interceptor
if (HasIndexedInterceptor()) {
- return HasElementWithInterceptor(this, index);
+ return HasElementWithInterceptor(this, index) ? INTERCEPTED_ELEMENT
+ : UNDEFINED_ELEMENT;
}
// Handle [] on String objects.
- if (this->IsStringObjectWithCharacterAt(index)) return true;
+ if (this->IsStringObjectWithCharacterAt(index)) {
+ return STRING_CHARACTER_ELEMENT;
+ }
switch (GetElementsKind()) {
case FAST_ELEMENTS: {
@@ -5941,12 +6076,16 @@
static_cast<uint32_t>
(Smi::cast(JSArray::cast(this)->length())->value()) :
static_cast<uint32_t>(FixedArray::cast(elements())->length());
- return (index < length) &&
- !FixedArray::cast(elements())->get(index)->IsTheHole();
+ if ((index < length) &&
+ !FixedArray::cast(elements())->get(index)->IsTheHole()) {
+ return FAST_ELEMENT;
+ }
+ break;
}
case PIXEL_ELEMENTS: {
PixelArray* pixels = PixelArray::cast(elements());
- return (index < static_cast<uint32_t>(pixels->length()));
+ if (index < static_cast<uint32_t>(pixels->length())) return FAST_ELEMENT;
+ break;
}
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
@@ -5956,18 +6095,22 @@
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS: {
ExternalArray* array = ExternalArray::cast(elements());
- return (index < static_cast<uint32_t>(array->length()));
+ if (index < static_cast<uint32_t>(array->length())) return FAST_ELEMENT;
+ break;
}
case DICTIONARY_ELEMENTS: {
- return element_dictionary()->FindEntry(index)
- != NumberDictionary::kNotFound;
+ if (element_dictionary()->FindEntry(index) !=
+ NumberDictionary::kNotFound) {
+ return DICTIONARY_ELEMENT;
+ }
+ break;
}
default:
UNREACHABLE();
break;
}
- UNREACHABLE();
- return Heap::null_value();
+
+ return UNDEFINED_ELEMENT;
}
@@ -8710,11 +8853,11 @@
// No free slot - extend break point info array.
Handle<FixedArray> old_break_points =
Handle<FixedArray>(FixedArray::cast(debug_info->break_points()));
- debug_info->set_break_points(*Factory::NewFixedArray(
- old_break_points->length() +
- Debug::kEstimatedNofBreakPointsInFunction));
Handle<FixedArray> new_break_points =
- Handle<FixedArray>(FixedArray::cast(debug_info->break_points()));
+ Factory::NewFixedArray(old_break_points->length() +
+ Debug::kEstimatedNofBreakPointsInFunction);
+
+ debug_info->set_break_points(*new_break_points);
for (int i = 0; i < old_break_points->length(); i++) {
new_break_points->set(i, old_break_points->get(i));
}