Version 3.7.5
Added initial gyp infrastructure for MIPS.
Implemented performance improvements to the incremental garbage collector.
Added optimizations and stability improvements on all platforms.
git-svn-id: http://v8.googlecode.com/svn/trunk@9950 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/heap.cc b/src/heap.cc
index ef1eb77..0cbe13f 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -143,6 +143,7 @@
number_idle_notifications_(0),
last_idle_notification_gc_count_(0),
last_idle_notification_gc_count_init_(false),
+ promotion_queue_(this),
configured_(false),
chunks_queued_for_free_(NULL) {
// Allow build-time customization of the max semispace size. Building
@@ -447,6 +448,7 @@
// hope that eventually there will be no weak callbacks invocations.
// Therefore stop recollecting after several attempts.
mark_compact_collector()->SetFlags(kMakeHeapIterableMask);
+ isolate_->compilation_cache()->Clear();
const int kMaxNumberOfAttempts = 7;
for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
@@ -454,6 +456,8 @@
}
}
mark_compact_collector()->SetFlags(kNoGCFlags);
+ new_space_.Shrink();
+ incremental_marking()->UncommitMarkingDeque();
}
@@ -985,6 +989,42 @@
}
+void PromotionQueue::Initialize() {
+ // Assumes that a NewSpacePage exactly fits a number of promotion queue
+ // entries (where each is a pair of intptr_t). This allows us to simplify
+ // the test fpr when to switch pages.
+ ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
+ == 0);
+ limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
+ front_ = rear_ =
+ reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
+ emergency_stack_ = NULL;
+ guard_ = false;
+}
+
+
+void PromotionQueue::RelocateQueueHead() {
+ ASSERT(emergency_stack_ == NULL);
+
+ Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
+ intptr_t* head_start = rear_;
+ intptr_t* head_end =
+ Min(front_, reinterpret_cast<intptr_t*>(p->body_limit()));
+
+ int entries_count =
+ static_cast<int>(head_end - head_start) / kEntrySizeInWords;
+
+ emergency_stack_ = new List<Entry>(2 * entries_count);
+
+ while (head_start != head_end) {
+ int size = static_cast<int>(*(head_start++));
+ HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
+ emergency_stack_->Add(Entry(obj, size));
+ }
+ rear_ = head_end;
+}
+
+
void Heap::Scavenge() {
#ifdef DEBUG
if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
@@ -1033,7 +1073,7 @@
// frees up its size in bytes from the top of the new space, and
// objects are at least one pointer in size.
Address new_space_front = new_space_.ToSpaceStart();
- promotion_queue_.Initialize(new_space_.ToSpaceEnd());
+ promotion_queue_.Initialize();
#ifdef DEBUG
store_buffer()->Clean();
@@ -1073,10 +1113,11 @@
&scavenge_visitor);
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
-
UpdateNewSpaceReferencesInExternalStringTable(
&UpdateNewSpaceReferenceInExternalStringTableEntry);
+ promotion_queue_.Destroy();
+
LiveObjectList::UpdateReferencesForScavengeGC();
isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
incremental_marking()->UpdateMarkingDequeAfterScavenge();
@@ -1483,6 +1524,7 @@
}
}
MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size);
+ heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
Object* result = allocation->ToObjectUnchecked();
*slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);