Upgrade V8 to version 4.9.385.28
https://chromium.googlesource.com/v8/v8/+/4.9.385.28
FPIIM-449
Change-Id: I4b2e74289d4bf3667f2f3dc8aa2e541f63e26eb4
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
index c9a310a..65bfdd9 100644
--- a/src/heap/mark-compact.cc
+++ b/src/heap/mark-compact.cc
@@ -2,36 +2,47 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/heap/mark-compact.h"
#include "src/base/atomicops.h"
#include "src/base/bits.h"
+#include "src/base/sys-info.h"
#include "src/code-stubs.h"
#include "src/compilation-cache.h"
-#include "src/cpu-profiler.h"
#include "src/deoptimizer.h"
#include "src/execution.h"
+#include "src/frames-inl.h"
#include "src/gdb-jit.h"
#include "src/global-handles.h"
+#include "src/heap/array-buffer-tracker.h"
+#include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking.h"
-#include "src/heap/mark-compact.h"
+#include "src/heap/mark-compact-inl.h"
+#include "src/heap/object-stats.h"
#include "src/heap/objects-visiting.h"
#include "src/heap/objects-visiting-inl.h"
+#include "src/heap/slots-buffer.h"
#include "src/heap/spaces-inl.h"
-#include "src/heap-profiler.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
+#include "src/profiler/cpu-profiler.h"
+#include "src/v8.h"
namespace v8 {
namespace internal {
const char* Marking::kWhiteBitPattern = "00";
-const char* Marking::kBlackBitPattern = "10";
-const char* Marking::kGreyBitPattern = "11";
+const char* Marking::kBlackBitPattern = "11";
+const char* Marking::kGreyBitPattern = "10";
const char* Marking::kImpossibleBitPattern = "01";
+// The following has to hold in order for {Marking::MarkBitFrom} to not produce
+// invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
+STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2);
+
+
// -------------------------------------------------------------------------
// MarkCompactCollector
@@ -40,20 +51,21 @@
#ifdef DEBUG
state_(IDLE),
#endif
- reduce_memory_footprint_(false),
- abort_incremental_marking_(false),
marking_parity_(ODD_MARKING_PARITY),
- compacting_(false),
was_marked_incrementally_(false),
- sweeping_in_progress_(false),
- pending_sweeper_jobs_semaphore_(0),
evacuation_(false),
- migration_slots_buffer_(NULL),
+ slots_buffer_allocator_(nullptr),
+ migration_slots_buffer_(nullptr),
heap_(heap),
marking_deque_memory_(NULL),
- marking_deque_memory_committed_(false),
- code_flusher_(NULL),
- have_code_to_deoptimize_(false) {
+ marking_deque_memory_committed_(0),
+ code_flusher_(nullptr),
+ have_code_to_deoptimize_(false),
+ compacting_(false),
+ sweeping_in_progress_(false),
+ compaction_in_progress_(false),
+ pending_sweeper_tasks_semaphore_(0),
+ pending_compaction_tasks_semaphore_(0) {
}
#ifdef VERIFY_HEAP
@@ -61,7 +73,7 @@
public:
explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
@@ -70,7 +82,7 @@
}
}
- void VisitEmbeddedPointer(RelocInfo* rinfo) {
+ void VisitEmbeddedPointer(RelocInfo* rinfo) override {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
Object* p = rinfo->target_object();
@@ -78,7 +90,7 @@
}
}
- void VisitCell(RelocInfo* rinfo) {
+ void VisitCell(RelocInfo* rinfo) override {
Code* code = rinfo->host();
DCHECK(rinfo->rmode() == RelocInfo::CELL);
if (!code->IsWeakObject(rinfo->target_cell())) {
@@ -99,9 +111,12 @@
for (Address current = bottom; current < top; current += kPointerSize) {
object = HeapObject::FromAddress(current);
if (MarkCompactCollector::IsMarked(object)) {
+ CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
CHECK(current >= next_object_must_be_here_or_later);
object->Iterate(&visitor);
next_object_must_be_here_or_later = current + object->Size();
+ // The next word for sure belongs to the current object, jump over it.
+ current += kPointerSize;
}
}
}
@@ -134,11 +149,8 @@
static void VerifyMarking(Heap* heap) {
- VerifyMarking(heap->old_pointer_space());
- VerifyMarking(heap->old_data_space());
+ VerifyMarking(heap->old_space());
VerifyMarking(heap->code_space());
- VerifyMarking(heap->cell_space());
- VerifyMarking(heap->property_cell_space());
VerifyMarking(heap->map_space());
VerifyMarking(heap->new_space());
@@ -157,7 +169,7 @@
class VerifyEvacuationVisitor : public ObjectVisitor {
public:
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
for (Object** current = start; current < end; current++) {
if ((*current)->IsHeapObject()) {
HeapObject* object = HeapObject::cast(*current);
@@ -170,7 +182,7 @@
static void VerifyEvacuation(Page* page) {
VerifyEvacuationVisitor visitor;
- HeapObjectIterator iterator(page, NULL);
+ HeapObjectIterator iterator(page);
for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
heap_object = iterator.Next()) {
// We skip free space objects.
@@ -200,8 +212,7 @@
static void VerifyEvacuation(Heap* heap, PagedSpace* space) {
- if (FLAG_use_allocation_folding &&
- (space == heap->old_pointer_space() || space == heap->old_data_space())) {
+ if (FLAG_use_allocation_folding && (space == heap->old_space())) {
return;
}
PageIterator it(space);
@@ -215,11 +226,8 @@
static void VerifyEvacuation(Heap* heap) {
- VerifyEvacuation(heap, heap->old_pointer_space());
- VerifyEvacuation(heap, heap->old_data_space());
+ VerifyEvacuation(heap, heap->old_space());
VerifyEvacuation(heap, heap->code_space());
- VerifyEvacuation(heap, heap->cell_space());
- VerifyEvacuation(heap, heap->property_cell_space());
VerifyEvacuation(heap, heap->map_space());
VerifyEvacuation(heap->new_space());
@@ -230,18 +238,37 @@
void MarkCompactCollector::SetUp() {
- free_list_old_data_space_.Reset(new FreeList(heap_->old_data_space()));
- free_list_old_pointer_space_.Reset(new FreeList(heap_->old_pointer_space()));
+ DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+ free_list_old_space_.Reset(new FreeList(heap_->old_space()));
+ free_list_code_space_.Reset(new FreeList(heap_->code_space()));
+ free_list_map_space_.Reset(new FreeList(heap_->map_space()));
+ EnsureMarkingDequeIsReserved();
+ EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
+ slots_buffer_allocator_ = new SlotsBufferAllocator();
+
+ if (FLAG_flush_code) {
+ code_flusher_ = new CodeFlusher(isolate());
+ if (FLAG_trace_code_flushing) {
+ PrintF("[code-flushing is now on]\n");
+ }
+ }
}
void MarkCompactCollector::TearDown() {
AbortCompaction();
delete marking_deque_memory_;
+ delete slots_buffer_allocator_;
+ delete code_flusher_;
}
void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
+ DCHECK(!p->NeverEvacuate());
p->MarkEvacuationCandidate();
evacuation_candidates_.Add(p);
}
@@ -261,16 +288,9 @@
if (!compacting_) {
DCHECK(evacuation_candidates_.length() == 0);
-#ifdef ENABLE_GDB_JIT_INTERFACE
- // If GDBJIT interface is active disable compaction.
- if (FLAG_gdbjit) return false;
-#endif
+ CollectEvacuationCandidates(heap()->old_space());
- CollectEvacuationCandidates(heap()->old_pointer_space());
- CollectEvacuationCandidates(heap()->old_data_space());
-
- if (FLAG_compact_code_space && (mode == NON_INCREMENTAL_COMPACTION ||
- FLAG_incremental_code_compaction)) {
+ if (FLAG_compact_code_space) {
CollectEvacuationCandidates(heap()->code_space());
} else if (FLAG_trace_fragmentation) {
TraceFragmentation(heap()->code_space());
@@ -278,13 +298,10 @@
if (FLAG_trace_fragmentation) {
TraceFragmentation(heap()->map_space());
- TraceFragmentation(heap()->cell_space());
- TraceFragmentation(heap()->property_cell_space());
}
- heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
- heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
- heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
+ heap()->old_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
+ heap()->code_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
compacting_ = evacuation_candidates_.length() > 0;
}
@@ -293,23 +310,66 @@
}
+void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() {
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
+ heap_->store_buffer()->ClearInvalidStoreBufferEntries();
+ }
+
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_CLEAR_SLOTS_BUFFER);
+ int number_of_pages = evacuation_candidates_.length();
+ for (int i = 0; i < number_of_pages; i++) {
+ Page* p = evacuation_candidates_[i];
+ SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer());
+ }
+ }
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap) {
+ VerifyValidStoreAndSlotsBufferEntries();
+ }
+#endif
+}
+
+
+#ifdef VERIFY_HEAP
+static void VerifyValidSlotsBufferEntries(Heap* heap, PagedSpace* space) {
+ PageIterator it(space);
+ while (it.has_next()) {
+ Page* p = it.next();
+ SlotsBuffer::VerifySlots(heap, p->slots_buffer());
+ }
+}
+
+
+void MarkCompactCollector::VerifyValidStoreAndSlotsBufferEntries() {
+ heap()->store_buffer()->VerifyValidStoreBufferEntries();
+
+ VerifyValidSlotsBufferEntries(heap(), heap()->old_space());
+ VerifyValidSlotsBufferEntries(heap(), heap()->code_space());
+ VerifyValidSlotsBufferEntries(heap(), heap()->map_space());
+
+ LargeObjectIterator it(heap()->lo_space());
+ for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+ SlotsBuffer::VerifySlots(heap(), chunk->slots_buffer());
+ }
+}
+#endif
+
+
void MarkCompactCollector::CollectGarbage() {
// Make sure that Prepare() has been called. The individual steps below will
// update the state as they proceed.
DCHECK(state_ == PREPARE_GC);
MarkLiveObjects();
+
DCHECK(heap_->incremental_marking()->IsStopped());
- if (FLAG_collect_maps) ClearNonLiveReferences();
-
- ProcessAndClearWeakCells();
-
- ClearWeakCollections();
-
- heap_->set_encountered_weak_cells(Smi::FromInt(0));
-
- isolate()->global_handles()->CollectPhantomCallbackData();
+ ClearNonLiveReferences();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
@@ -319,23 +379,9 @@
SweepSpaces();
-#ifdef VERIFY_HEAP
- if (heap()->weak_embedded_objects_verification_enabled()) {
- VerifyWeakEmbeddedObjectsInCode();
- }
- if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
- VerifyOmittedMapChecks();
- }
-#endif
+ EvacuateNewSpaceAndCandidates();
Finish();
-
- if (marking_parity_ == EVEN_MARKING_PARITY) {
- marking_parity_ = ODD_MARKING_PARITY;
- } else {
- DCHECK(marking_parity_ == ODD_MARKING_PARITY);
- marking_parity_ = EVEN_MARKING_PARITY;
- }
}
@@ -363,11 +409,8 @@
void MarkCompactCollector::VerifyMarkbitsAreClean() {
- VerifyMarkbitsAreClean(heap_->old_pointer_space());
- VerifyMarkbitsAreClean(heap_->old_data_space());
+ VerifyMarkbitsAreClean(heap_->old_space());
VerifyMarkbitsAreClean(heap_->code_space());
- VerifyMarkbitsAreClean(heap_->cell_space());
- VerifyMarkbitsAreClean(heap_->property_cell_space());
VerifyMarkbitsAreClean(heap_->map_space());
VerifyMarkbitsAreClean(heap_->new_space());
@@ -423,23 +466,42 @@
void MarkCompactCollector::ClearMarkbits() {
ClearMarkbitsInPagedSpace(heap_->code_space());
ClearMarkbitsInPagedSpace(heap_->map_space());
- ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
- ClearMarkbitsInPagedSpace(heap_->old_data_space());
- ClearMarkbitsInPagedSpace(heap_->cell_space());
- ClearMarkbitsInPagedSpace(heap_->property_cell_space());
+ ClearMarkbitsInPagedSpace(heap_->old_space());
ClearMarkbitsInNewSpace(heap_->new_space());
LargeObjectIterator it(heap_->lo_space());
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
- mark_bit.Clear();
- mark_bit.Next().Clear();
+ Marking::MarkWhite(Marking::MarkBitFrom(obj));
Page::FromAddress(obj->address())->ResetProgressBar();
Page::FromAddress(obj->address())->ResetLiveBytes();
}
}
+class MarkCompactCollector::CompactionTask : public CancelableTask {
+ public:
+ explicit CompactionTask(Heap* heap, CompactionSpaceCollection* spaces)
+ : CancelableTask(heap->isolate()), spaces_(spaces) {}
+
+ virtual ~CompactionTask() {}
+
+ private:
+ // v8::internal::CancelableTask overrides.
+ void RunInternal() override {
+ MarkCompactCollector* mark_compact =
+ isolate()->heap()->mark_compact_collector();
+ SlotsBuffer* evacuation_slots_buffer = nullptr;
+ mark_compact->EvacuatePages(spaces_, &evacuation_slots_buffer);
+ mark_compact->AddEvacuationSlotsBufferSynchronized(evacuation_slots_buffer);
+ mark_compact->pending_compaction_tasks_semaphore_.Signal();
+ }
+
+ CompactionSpaceCollection* spaces_;
+
+ DISALLOW_COPY_AND_ASSIGN(CompactionTask);
+};
+
+
class MarkCompactCollector::SweeperTask : public v8::Task {
public:
SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {}
@@ -448,9 +510,9 @@
private:
// v8::Task overrides.
- void Run() OVERRIDE {
+ void Run() override {
heap_->mark_compact_collector()->SweepInParallel(space_, 0);
- heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
+ heap_->mark_compact_collector()->pending_sweeper_tasks_semaphore_.Signal();
}
Heap* heap_;
@@ -461,14 +523,40 @@
void MarkCompactCollector::StartSweeperThreads() {
- DCHECK(free_list_old_pointer_space_.get()->IsEmpty());
- DCHECK(free_list_old_data_space_.get()->IsEmpty());
+ DCHECK(free_list_old_space_.get()->IsEmpty());
+ DCHECK(free_list_code_space_.get()->IsEmpty());
+ DCHECK(free_list_map_space_.get()->IsEmpty());
V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new SweeperTask(heap(), heap()->old_data_space()),
+ new SweeperTask(heap(), heap()->old_space()),
v8::Platform::kShortRunningTask);
V8::GetCurrentPlatform()->CallOnBackgroundThread(
- new SweeperTask(heap(), heap()->old_pointer_space()),
+ new SweeperTask(heap(), heap()->code_space()),
v8::Platform::kShortRunningTask);
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ new SweeperTask(heap(), heap()->map_space()),
+ v8::Platform::kShortRunningTask);
+}
+
+
+void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) {
+ PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
+ if (!page->SweepingCompleted()) {
+ SweepInParallel(page, owner);
+ if (!page->SweepingCompleted()) {
+ // We were not able to sweep that page, i.e., a concurrent
+ // sweeper thread currently owns this page. Wait for the sweeper
+ // thread to be done with this page.
+ page->WaitUntilSweepingCompleted();
+ }
+ }
+}
+
+
+void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
+ if (FLAG_concurrent_sweeping && !IsSweepingCompleted()) {
+ SweepInParallel(heap()->paged_space(space->identity()), 0);
+ space->RefillFreeList();
+ }
}
@@ -478,20 +566,22 @@
// If sweeping is not completed or not running at all, we try to complete it
// here.
if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
- SweepInParallel(heap()->paged_space(OLD_DATA_SPACE), 0);
- SweepInParallel(heap()->paged_space(OLD_POINTER_SPACE), 0);
+ SweepInParallel(heap()->paged_space(OLD_SPACE), 0);
+ SweepInParallel(heap()->paged_space(CODE_SPACE), 0);
+ SweepInParallel(heap()->paged_space(MAP_SPACE), 0);
}
- // Wait twice for both jobs.
+
if (FLAG_concurrent_sweeping) {
- pending_sweeper_jobs_semaphore_.Wait();
- pending_sweeper_jobs_semaphore_.Wait();
+ pending_sweeper_tasks_semaphore_.Wait();
+ pending_sweeper_tasks_semaphore_.Wait();
+ pending_sweeper_tasks_semaphore_.Wait();
}
+
ParallelSweepSpacesComplete();
sweeping_in_progress_ = false;
- RefillFreeList(heap()->paged_space(OLD_DATA_SPACE));
- RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE));
- heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
- heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
+ heap()->old_space()->RefillFreeList();
+ heap()->code_space()->RefillFreeList();
+ heap()->map_space()->RefillFreeList();
#ifdef VERIFY_HEAP
if (FLAG_verify_heap && !evacuation()) {
@@ -502,40 +592,21 @@
bool MarkCompactCollector::IsSweepingCompleted() {
- if (!pending_sweeper_jobs_semaphore_.WaitFor(
+ if (!pending_sweeper_tasks_semaphore_.WaitFor(
base::TimeDelta::FromSeconds(0))) {
return false;
}
- pending_sweeper_jobs_semaphore_.Signal();
+ pending_sweeper_tasks_semaphore_.Signal();
return true;
}
-void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
- FreeList* free_list;
-
- if (space == heap()->old_pointer_space()) {
- free_list = free_list_old_pointer_space_.get();
- } else if (space == heap()->old_data_space()) {
- free_list = free_list_old_data_space_.get();
- } else {
- // Any PagedSpace might invoke RefillFreeLists, so we need to make sure
- // to only refill them for old data and pointer spaces.
- return;
- }
-
- intptr_t freed_bytes = space->free_list()->Concatenate(free_list);
- space->AddToAccountingStats(freed_bytes);
- space->DecrementUnsweptFreeBytes(freed_bytes);
-}
-
-
-void Marking::TransferMark(Address old_start, Address new_start) {
+void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) {
// This is only used when resizing an object.
DCHECK(MemoryChunk::FromAddress(old_start) ==
MemoryChunk::FromAddress(new_start));
- if (!heap_->incremental_marking()->IsMarking()) return;
+ if (!heap->incremental_marking()->IsMarking()) return;
// If the mark doesn't move, we don't check the color of the object.
// It doesn't matter whether the object is black, since it hasn't changed
@@ -550,17 +621,14 @@
#endif
if (Marking::IsBlack(old_mark_bit)) {
- old_mark_bit.Clear();
- DCHECK(IsWhite(old_mark_bit));
+ Marking::BlackToWhite(old_mark_bit);
Marking::MarkBlack(new_mark_bit);
return;
} else if (Marking::IsGrey(old_mark_bit)) {
- old_mark_bit.Clear();
- old_mark_bit.Next().Clear();
- DCHECK(IsWhite(old_mark_bit));
- heap_->incremental_marking()->WhiteToGreyAndPush(
+ Marking::GreyToWhite(old_mark_bit);
+ heap->incremental_marking()->WhiteToGreyAndPush(
HeapObject::FromAddress(new_start), new_mark_bit);
- heap_->incremental_marking()->RestartIfNotMarking();
+ heap->incremental_marking()->RestartIfNotMarking();
}
#ifdef DEBUG
@@ -574,18 +642,12 @@
switch (space) {
case NEW_SPACE:
return "NEW_SPACE";
- case OLD_POINTER_SPACE:
- return "OLD_POINTER_SPACE";
- case OLD_DATA_SPACE:
- return "OLD_DATA_SPACE";
+ case OLD_SPACE:
+ return "OLD_SPACE";
case CODE_SPACE:
return "CODE_SPACE";
case MAP_SPACE:
return "MAP_SPACE";
- case CELL_SPACE:
- return "CELL_SPACE";
- case PROPERTY_CELL_SPACE:
- return "PROPERTY_CELL_SPACE";
case LO_SPACE:
return "LO_SPACE";
default:
@@ -596,203 +658,173 @@
}
-// Returns zero for pages that have so little fragmentation that it is not
-// worth defragmenting them. Otherwise a positive integer that gives an
-// estimate of fragmentation on an arbitrary scale.
-static int FreeListFragmentation(PagedSpace* space, Page* p) {
- // If page was not swept then there are no free list items on it.
- if (!p->WasSwept()) {
- if (FLAG_trace_fragmentation) {
- PrintF("%p [%s]: %d bytes live (unswept)\n", reinterpret_cast<void*>(p),
- AllocationSpaceName(space->identity()), p->LiveBytes());
- }
- return 0;
- }
+void MarkCompactCollector::ComputeEvacuationHeuristics(
+ int area_size, int* target_fragmentation_percent,
+ int* max_evacuated_bytes) {
+ // For memory reducing mode we directly define both constants.
+ const int kTargetFragmentationPercentForReduceMemory = 20;
+ const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize;
- PagedSpace::SizeStats sizes;
- space->ObtainFreeListStatistics(p, &sizes);
+ // For regular mode (which is latency critical) we define less aggressive
+ // defaults to start and switch to a trace-based (using compaction speed)
+ // approach as soon as we have enough samples.
+ const int kTargetFragmentationPercent = 70;
+ const int kMaxEvacuatedBytes = 4 * Page::kPageSize;
+ // Time to take for a single area (=payload of page). Used as soon as there
+ // exist enough compaction speed samples.
+ const int kTargetMsPerArea = 1;
- intptr_t ratio;
- intptr_t ratio_threshold;
- intptr_t area_size = space->AreaSize();
- if (space->identity() == CODE_SPACE) {
- ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / area_size;
- ratio_threshold = 10;
+ if (heap()->ShouldReduceMemory()) {
+ *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
+ *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
} else {
- ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / area_size;
- ratio_threshold = 15;
+ const intptr_t estimated_compaction_speed =
+ heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
+ if (estimated_compaction_speed != 0) {
+ // Estimate the target fragmentation based on traced compaction speed
+ // and a goal for a single page.
+ const intptr_t estimated_ms_per_area =
+ 1 + static_cast<intptr_t>(area_size) / estimated_compaction_speed;
+ *target_fragmentation_percent =
+ 100 - 100 * kTargetMsPerArea / estimated_ms_per_area;
+ if (*target_fragmentation_percent <
+ kTargetFragmentationPercentForReduceMemory) {
+ *target_fragmentation_percent =
+ kTargetFragmentationPercentForReduceMemory;
+ }
+ } else {
+ *target_fragmentation_percent = kTargetFragmentationPercent;
+ }
+ *max_evacuated_bytes = kMaxEvacuatedBytes;
}
-
- if (FLAG_trace_fragmentation) {
- PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
- reinterpret_cast<void*>(p), AllocationSpaceName(space->identity()),
- static_cast<int>(sizes.small_size_),
- static_cast<double>(sizes.small_size_ * 100) / area_size,
- static_cast<int>(sizes.medium_size_),
- static_cast<double>(sizes.medium_size_ * 100) / area_size,
- static_cast<int>(sizes.large_size_),
- static_cast<double>(sizes.large_size_ * 100) / area_size,
- static_cast<int>(sizes.huge_size_),
- static_cast<double>(sizes.huge_size_ * 100) / area_size,
- (ratio > ratio_threshold) ? "[fragmented]" : "");
- }
-
- if (FLAG_always_compact && sizes.Total() != area_size) {
- return 1;
- }
-
- if (ratio <= ratio_threshold) return 0; // Not fragmented.
-
- return static_cast<int>(ratio - ratio_threshold);
}
void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
- DCHECK(space->identity() == OLD_POINTER_SPACE ||
- space->identity() == OLD_DATA_SPACE ||
- space->identity() == CODE_SPACE);
+ DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
- static const int kMaxMaxEvacuationCandidates = 1000;
int number_of_pages = space->CountTotalPages();
- int max_evacuation_candidates =
- static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1);
+ int area_size = space->AreaSize();
- if (FLAG_stress_compaction || FLAG_always_compact) {
- max_evacuation_candidates = kMaxMaxEvacuationCandidates;
- }
-
- class Candidate {
- public:
- Candidate() : fragmentation_(0), page_(NULL) {}
- Candidate(int f, Page* p) : fragmentation_(f), page_(p) {}
-
- int fragmentation() { return fragmentation_; }
- Page* page() { return page_; }
-
- private:
- int fragmentation_;
- Page* page_;
- };
-
- enum CompactionMode { COMPACT_FREE_LISTS, REDUCE_MEMORY_FOOTPRINT };
-
- CompactionMode mode = COMPACT_FREE_LISTS;
-
- intptr_t reserved = number_of_pages * space->AreaSize();
- intptr_t over_reserved = reserved - space->SizeOfObjects();
- static const intptr_t kFreenessThreshold = 50;
-
- if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) {
- // If reduction of memory footprint was requested, we are aggressive
- // about choosing pages to free. We expect that half-empty pages
- // are easier to compact so slightly bump the limit.
- mode = REDUCE_MEMORY_FOOTPRINT;
- max_evacuation_candidates += 2;
- }
-
-
- if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) {
- // If over-usage is very high (more than a third of the space), we
- // try to free all mostly empty pages. We expect that almost empty
- // pages are even easier to compact so bump the limit even more.
- mode = REDUCE_MEMORY_FOOTPRINT;
- max_evacuation_candidates *= 2;
- }
-
- if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
- PrintF(
- "Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
- "evacuation candidate limit: %d\n",
- static_cast<double>(over_reserved) / MB,
- static_cast<double>(reserved) / MB,
- static_cast<int>(kFreenessThreshold), max_evacuation_candidates);
- }
-
- intptr_t estimated_release = 0;
-
- Candidate candidates[kMaxMaxEvacuationCandidates];
-
- max_evacuation_candidates =
- Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
-
- int count = 0;
- int fragmentation = 0;
- Candidate* least = NULL;
+ // Pairs of (live_bytes_in_page, page).
+ typedef std::pair<int, Page*> LiveBytesPagePair;
+ std::vector<LiveBytesPagePair> pages;
+ pages.reserve(number_of_pages);
PageIterator it(space);
- if (it.has_next()) it.next(); // Never compact the first page.
-
while (it.has_next()) {
Page* p = it.next();
- p->ClearEvacuationCandidate();
-
- if (FLAG_stress_compaction) {
- unsigned int counter = space->heap()->ms_count();
- uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
- if ((counter & 1) == (page_number & 1)) fragmentation = 1;
- } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
- // Don't try to release too many pages.
- if (estimated_release >= over_reserved) {
- continue;
- }
-
- intptr_t free_bytes = 0;
-
- if (!p->WasSwept()) {
- free_bytes = (p->area_size() - p->LiveBytes());
- } else {
- PagedSpace::SizeStats sizes;
- space->ObtainFreeListStatistics(p, &sizes);
- free_bytes = sizes.Total();
- }
-
- int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
-
- if (free_pct >= kFreenessThreshold) {
- estimated_release += free_bytes;
- fragmentation = free_pct;
- } else {
- fragmentation = 0;
- }
-
- if (FLAG_trace_fragmentation) {
- PrintF("%p [%s]: %d (%.2f%%) free %s\n", reinterpret_cast<void*>(p),
- AllocationSpaceName(space->identity()),
- static_cast<int>(free_bytes),
- static_cast<double>(free_bytes * 100) / p->area_size(),
- (fragmentation > 0) ? "[fragmented]" : "");
- }
- } else {
- fragmentation = FreeListFragmentation(space, p);
+ if (p->NeverEvacuate()) continue;
+ if (p->IsFlagSet(Page::POPULAR_PAGE)) {
+ // This page had slots buffer overflow on previous GC, skip it.
+ p->ClearFlag(Page::POPULAR_PAGE);
+ continue;
}
+ // Invariant: Evacuation candidates are just created when marking is
+ // started. At the end of a GC all evacuation candidates are cleared and
+ // their slot buffers are released.
+ CHECK(!p->IsEvacuationCandidate());
+ CHECK(p->slots_buffer() == NULL);
+ DCHECK(p->area_size() == area_size);
+ int live_bytes =
+ p->WasSwept() ? p->LiveBytesFromFreeList() : p->LiveBytes();
+ pages.push_back(std::make_pair(live_bytes, p));
+ }
- if (fragmentation != 0) {
- if (count < max_evacuation_candidates) {
- candidates[count++] = Candidate(fragmentation, p);
- } else {
- if (least == NULL) {
- for (int i = 0; i < max_evacuation_candidates; i++) {
- if (least == NULL ||
- candidates[i].fragmentation() < least->fragmentation()) {
- least = candidates + i;
- }
- }
- }
- if (least->fragmentation() < fragmentation) {
- *least = Candidate(fragmentation, p);
- least = NULL;
- }
+ int candidate_count = 0;
+ int total_live_bytes = 0;
+
+ const bool reduce_memory = heap()->ShouldReduceMemory();
+ if (FLAG_manual_evacuation_candidates_selection) {
+ for (size_t i = 0; i < pages.size(); i++) {
+ Page* p = pages[i].second;
+ if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
+ candidate_count++;
+ total_live_bytes += pages[i].first;
+ p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
+ AddEvacuationCandidate(p);
}
}
+ } else if (FLAG_stress_compaction) {
+ for (size_t i = 0; i < pages.size(); i++) {
+ Page* p = pages[i].second;
+ if (i % 2 == 0) {
+ candidate_count++;
+ total_live_bytes += pages[i].first;
+ AddEvacuationCandidate(p);
+ }
+ }
+ } else {
+ // The following approach determines the pages that should be evacuated.
+ //
+ // We use two conditions to decide whether a page qualifies as an evacuation
+ // candidate, or not:
+ // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
+ // between live bytes and capacity of this page (= area).
+ // * Evacuation quota: A global quota determining how much bytes should be
+ // compacted.
+ //
+ // The algorithm sorts all pages by live bytes and then iterates through
+ // them starting with the page with the most free memory, adding them to the
+ // set of evacuation candidates as long as both conditions (fragmentation
+ // and quota) hold.
+ int max_evacuated_bytes;
+ int target_fragmentation_percent;
+ ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
+ &max_evacuated_bytes);
+
+ const intptr_t free_bytes_threshold =
+ target_fragmentation_percent * (area_size / 100);
+
+ // Sort pages from the most free to the least free, then select
+ // the first n pages for evacuation such that:
+ // - the total size of evacuated objects does not exceed the specified
+ // limit.
+ // - fragmentation of (n+1)-th page does not exceed the specified limit.
+ std::sort(pages.begin(), pages.end(),
+ [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
+ return a.first < b.first;
+ });
+ for (size_t i = 0; i < pages.size(); i++) {
+ int live_bytes = pages[i].first;
+ int free_bytes = area_size - live_bytes;
+ if (FLAG_always_compact ||
+ ((free_bytes >= free_bytes_threshold) &&
+ ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
+ candidate_count++;
+ total_live_bytes += live_bytes;
+ }
+ if (FLAG_trace_fragmentation_verbose) {
+ PrintIsolate(isolate(),
+ "compaction-selection-page: space=%s free_bytes_page=%d "
+ "fragmentation_limit_kb=%d fragmentation_limit_percent=%d "
+ "sum_compaction_kb=%d "
+ "compaction_limit_kb=%d\n",
+ AllocationSpaceName(space->identity()), free_bytes / KB,
+ free_bytes_threshold / KB, target_fragmentation_percent,
+ total_live_bytes / KB, max_evacuated_bytes / KB);
+ }
+ }
+ // How many pages we will allocated for the evacuated objects
+ // in the worst case: ceil(total_live_bytes / area_size)
+ int estimated_new_pages = (total_live_bytes + area_size - 1) / area_size;
+ DCHECK_LE(estimated_new_pages, candidate_count);
+ int estimated_released_pages = candidate_count - estimated_new_pages;
+ // Avoid (compact -> expand) cycles.
+ if ((estimated_released_pages == 0) && !FLAG_always_compact) {
+ candidate_count = 0;
+ }
+ for (int i = 0; i < candidate_count; i++) {
+ AddEvacuationCandidate(pages[i].second);
+ }
}
- for (int i = 0; i < count; i++) {
- AddEvacuationCandidate(candidates[i].page());
- }
-
- if (count > 0 && FLAG_trace_fragmentation) {
- PrintF("Collected %d evacuation candidates for space %s\n", count,
- AllocationSpaceName(space->identity()));
+ if (FLAG_trace_fragmentation) {
+ PrintIsolate(isolate(),
+ "compaction-selection: space=%s reduce_memory=%d pages=%d "
+ "total_live_bytes=%d\n",
+ AllocationSpaceName(space->identity()), reduce_memory,
+ candidate_count, total_live_bytes / KB);
}
}
@@ -802,13 +834,12 @@
int npages = evacuation_candidates_.length();
for (int i = 0; i < npages; i++) {
Page* p = evacuation_candidates_[i];
- slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
+ slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
p->ClearEvacuationCandidate();
p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
}
compacting_ = false;
evacuation_candidates_.Rewind(0);
- invalidated_code_.Rewind(0);
}
DCHECK_EQ(0, evacuation_candidates_.length());
}
@@ -829,12 +860,17 @@
EnsureSweepingCompleted();
}
+ // If concurrent unmapping tasks are still running, we should wait for
+ // them here.
+ heap()->WaitUntilUnmappingOfFreeChunksCompleted();
+
// Clear marking bits if incremental marking is aborted.
- if (was_marked_incrementally_ && abort_incremental_marking_) {
- heap()->incremental_marking()->Abort();
+ if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
+ heap()->incremental_marking()->Stop();
ClearMarkbits();
AbortWeakCollections();
AbortWeakCells();
+ AbortTransitionArrays();
AbortCompaction();
was_marked_incrementally_ = false;
}
@@ -860,10 +896,21 @@
void MarkCompactCollector::Finish() {
+ GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_FINISH);
+
+ // The hashing of weak_object_to_code_table is no longer valid.
+ heap()->weak_object_to_code_table()->Rehash(
+ heap()->isolate()->factory()->undefined_value());
+
+ // Clear the marking state of live large objects.
+ heap_->lo_space()->ClearMarkingStateOfLiveObjects();
+
#ifdef DEBUG
DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
state_ = IDLE;
#endif
+ heap_->isolate()->inner_pointer_to_code_cache()->Flush();
+
// The stub cache is not traversed during GC; clear the cache to
// force lazy re-initialization of it. This must be done after the
// GC, because it relies on the new address of certain old space
@@ -877,6 +924,13 @@
}
heap_->incremental_marking()->ClearIdleMarkingDelayCounter();
+
+ if (marking_parity_ == EVEN_MARKING_PARITY) {
+ marking_parity_ = ODD_MARKING_PARITY;
+ } else {
+ DCHECK(marking_parity_ == ODD_MARKING_PARITY);
+ marking_parity_ = EVEN_MARKING_PARITY;
+ }
}
@@ -922,15 +976,20 @@
Code* code = shared->code();
MarkBit code_mark = Marking::MarkBitFrom(code);
- if (!code_mark.Get()) {
+ if (Marking::IsWhite(code_mark)) {
if (FLAG_trace_code_flushing && shared->is_compiled()) {
PrintF("[code-flushing clears: ");
shared->ShortPrint();
PrintF(" - age: %d]\n", code->GetAge());
}
+ // Always flush the optimized code map if there is one.
+ if (!shared->OptimizedCodeMapIsCleared()) {
+ shared->ClearOptimizedCodeMap();
+ }
shared->set_code(lazy_compile);
candidate->set_code(lazy_compile);
} else {
+ DCHECK(Marking::IsBlack(code_mark));
candidate->set_code(code);
}
@@ -938,13 +997,13 @@
// setter did not record the slot update and we have to do that manually.
Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
- isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(slot,
- target);
+ isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(
+ candidate, slot, target);
Object** shared_code_slot =
HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
isolate_->heap()->mark_compact_collector()->RecordSlot(
- shared_code_slot, shared_code_slot, *shared_code_slot);
+ shared, shared_code_slot, *shared_code_slot);
candidate = next_candidate;
}
@@ -964,18 +1023,22 @@
Code* code = candidate->code();
MarkBit code_mark = Marking::MarkBitFrom(code);
- if (!code_mark.Get()) {
+ if (Marking::IsWhite(code_mark)) {
if (FLAG_trace_code_flushing && candidate->is_compiled()) {
PrintF("[code-flushing clears: ");
candidate->ShortPrint();
PrintF(" - age: %d]\n", code->GetAge());
}
+ // Always flush the optimized code map if there is one.
+ if (!candidate->OptimizedCodeMapIsCleared()) {
+ candidate->ClearOptimizedCodeMap();
+ }
candidate->set_code(lazy_compile);
}
Object** code_slot =
HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
- isolate_->heap()->mark_compact_collector()->RecordSlot(code_slot, code_slot,
+ isolate_->heap()->mark_compact_collector()->RecordSlot(candidate, code_slot,
*code_slot);
candidate = next_candidate;
@@ -985,54 +1048,6 @@
}
-void CodeFlusher::ProcessOptimizedCodeMaps() {
- STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
-
- SharedFunctionInfo* holder = optimized_code_map_holder_head_;
- SharedFunctionInfo* next_holder;
-
- while (holder != NULL) {
- next_holder = GetNextCodeMap(holder);
- ClearNextCodeMap(holder);
-
- FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
- int new_length = SharedFunctionInfo::kEntriesStart;
- int old_length = code_map->length();
- for (int i = SharedFunctionInfo::kEntriesStart; i < old_length;
- i += SharedFunctionInfo::kEntryLength) {
- Code* code =
- Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
- if (!Marking::MarkBitFrom(code).Get()) continue;
-
- // Move every slot in the entry.
- for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) {
- int dst_index = new_length++;
- Object** slot = code_map->RawFieldOfElementAt(dst_index);
- Object* object = code_map->get(i + j);
- code_map->set(dst_index, object);
- if (j == SharedFunctionInfo::kOsrAstIdOffset) {
- DCHECK(object->IsSmi());
- } else {
- DCHECK(
- Marking::IsBlack(Marking::MarkBitFrom(HeapObject::cast(*slot))));
- isolate_->heap()->mark_compact_collector()->RecordSlot(slot, slot,
- *slot);
- }
- }
- }
-
- // Trim the optimized code map if entries have been removed.
- if (new_length < old_length) {
- holder->TrimOptimizedCodeMap(old_length - new_length);
- }
-
- holder = next_holder;
- }
-
- optimized_code_map_holder_head_ = NULL;
-}
-
-
void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
// Make sure previous flushing decisions are revisited.
isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
@@ -1103,79 +1118,6 @@
}
-void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
- DCHECK(!FixedArray::cast(code_map_holder->optimized_code_map())
- ->get(SharedFunctionInfo::kNextMapIndex)
- ->IsUndefined());
-
- // Make sure previous flushing decisions are revisited.
- isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
-
- if (FLAG_trace_code_flushing) {
- PrintF("[code-flushing abandons code-map: ");
- code_map_holder->ShortPrint();
- PrintF("]\n");
- }
-
- SharedFunctionInfo* holder = optimized_code_map_holder_head_;
- SharedFunctionInfo* next_holder;
- if (holder == code_map_holder) {
- next_holder = GetNextCodeMap(code_map_holder);
- optimized_code_map_holder_head_ = next_holder;
- ClearNextCodeMap(code_map_holder);
- } else {
- while (holder != NULL) {
- next_holder = GetNextCodeMap(holder);
-
- if (next_holder == code_map_holder) {
- next_holder = GetNextCodeMap(code_map_holder);
- SetNextCodeMap(holder, next_holder);
- ClearNextCodeMap(code_map_holder);
- break;
- }
-
- holder = next_holder;
- }
- }
-}
-
-
-void CodeFlusher::EvictJSFunctionCandidates() {
- JSFunction* candidate = jsfunction_candidates_head_;
- JSFunction* next_candidate;
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
- EvictCandidate(candidate);
- candidate = next_candidate;
- }
- DCHECK(jsfunction_candidates_head_ == NULL);
-}
-
-
-void CodeFlusher::EvictSharedFunctionInfoCandidates() {
- SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
- SharedFunctionInfo* next_candidate;
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
- EvictCandidate(candidate);
- candidate = next_candidate;
- }
- DCHECK(shared_function_info_candidates_head_ == NULL);
-}
-
-
-void CodeFlusher::EvictOptimizedCodeMaps() {
- SharedFunctionInfo* holder = optimized_code_map_holder_head_;
- SharedFunctionInfo* next_holder;
- while (holder != NULL) {
- next_holder = GetNextCodeMap(holder);
- EvictOptimizedCodeMap(holder);
- holder = next_holder;
- }
- DCHECK(optimized_code_map_holder_head_ == NULL);
-}
-
-
void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
Heap* heap = isolate_->heap();
@@ -1191,81 +1133,26 @@
}
-MarkCompactCollector::~MarkCompactCollector() {
- if (code_flusher_ != NULL) {
- delete code_flusher_;
- code_flusher_ = NULL;
- }
-}
-
-
-static inline HeapObject* ShortCircuitConsString(Object** p) {
- // Optimization: If the heap object pointed to by p is a non-internalized
- // cons string whose right substring is HEAP->empty_string, update
- // it in place to its left substring. Return the updated value.
- //
- // Here we assume that if we change *p, we replace it with a heap object
- // (i.e., the left substring of a cons string is always a heap object).
- //
- // The check performed is:
- // object->IsConsString() && !object->IsInternalizedString() &&
- // (ConsString::cast(object)->second() == HEAP->empty_string())
- // except the maps for the object and its possible substrings might be
- // marked.
- HeapObject* object = HeapObject::cast(*p);
- Map* map = object->map();
- InstanceType type = map->instance_type();
- if (!IsShortcutCandidate(type)) return object;
-
- Object* second = reinterpret_cast<ConsString*>(object)->second();
- Heap* heap = map->GetHeap();
- if (second != heap->empty_string()) {
- return object;
- }
-
- // Since we don't have the object's start, it is impossible to update the
- // page dirty marks. Therefore, we only replace the string with its left
- // substring when page dirty marks do not change.
- Object* first = reinterpret_cast<ConsString*>(object)->first();
- if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
-
- *p = first;
- return HeapObject::cast(first);
-}
-
-
class MarkCompactMarkingVisitor
: public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
public:
- static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id, Map* map,
- HeapObject* obj);
-
- static void ObjectStatsCountFixedArray(
- FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
- FixedArraySubInstanceType dictionary_type);
-
- template <MarkCompactMarkingVisitor::VisitorId id>
- class ObjectStatsTracker {
- public:
- static inline void Visit(Map* map, HeapObject* obj);
- };
-
static void Initialize();
- INLINE(static void VisitPointer(Heap* heap, Object** p)) {
- MarkObjectByPointer(heap->mark_compact_collector(), p, p);
+ INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
+ MarkObjectByPointer(heap->mark_compact_collector(), object, p);
}
- INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
+ INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
+ Object** start, Object** end)) {
// Mark all objects pointed to in [start, end).
const int kMinRangeForMarkingRecursion = 64;
if (end - start >= kMinRangeForMarkingRecursion) {
- if (VisitUnmarkedObjects(heap, start, end)) return;
+ if (VisitUnmarkedObjects(heap, object, start, end)) return;
// We are close to a stack overflow, so just mark the objects.
}
MarkCompactCollector* collector = heap->mark_compact_collector();
for (Object** p = start; p < end; p++) {
- MarkObjectByPointer(collector, start, p);
+ MarkObjectByPointer(collector, object, p);
}
}
@@ -1279,7 +1166,7 @@
// Returns true if object needed marking and false otherwise.
INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
MarkBit mark_bit = Marking::MarkBitFrom(object);
- if (!mark_bit.Get()) {
+ if (Marking::IsWhite(mark_bit)) {
heap->mark_compact_collector()->SetMark(object, mark_bit);
return true;
}
@@ -1288,12 +1175,12 @@
// Mark object pointed to by p.
INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
- Object** anchor_slot, Object** p)) {
+ HeapObject* object, Object** p)) {
if (!(*p)->IsHeapObject()) return;
- HeapObject* object = ShortCircuitConsString(p);
- collector->RecordSlot(anchor_slot, p, object);
- MarkBit mark = Marking::MarkBitFrom(object);
- collector->MarkObject(object, mark);
+ HeapObject* target_object = HeapObject::cast(*p);
+ collector->RecordSlot(object, p, target_object);
+ MarkBit mark = Marking::MarkBitFrom(target_object);
+ collector->MarkObject(target_object, mark);
}
@@ -1316,8 +1203,8 @@
// Visit all unmarked objects pointed to by [start, end).
// Returns false if the operation fails (lack of stack space).
- INLINE(static bool VisitUnmarkedObjects(Heap* heap, Object** start,
- Object** end)) {
+ INLINE(static bool VisitUnmarkedObjects(Heap* heap, HeapObject* object,
+ Object** start, Object** end)) {
// Return false is we are close to the stack limit.
StackLimitCheck check(heap->isolate());
if (check.HasOverflowed()) return false;
@@ -1327,10 +1214,10 @@
for (Object** p = start; p < end; p++) {
Object* o = *p;
if (!o->IsHeapObject()) continue;
- collector->RecordSlot(start, p, o);
+ collector->RecordSlot(object, p, o);
HeapObject* obj = HeapObject::cast(o);
MarkBit mark = Marking::MarkBitFrom(obj);
- if (mark.Get()) continue;
+ if (Marking::IsBlackOrGrey(mark)) continue;
VisitUnmarkedObject(collector, obj);
}
return true;
@@ -1368,11 +1255,11 @@
FixedArray* data = FixedArray::cast(re->data());
Object** slot =
data->data_start() + JSRegExp::saved_code_index(is_one_byte);
- heap->mark_compact_collector()->RecordSlot(slot, slot, code);
+ heap->mark_compact_collector()->RecordSlot(data, slot, code);
// Set a number in the 0-255 range to guarantee no smi overflow.
re->SetDataAt(JSRegExp::code_index(is_one_byte),
- Smi::FromInt(heap->sweep_generation() & 0xff));
+ Smi::FromInt(heap->ms_count() & 0xff));
} else if (code->IsSmi()) {
int value = Smi::cast(code)->value();
// The regexp has not been compiled yet or there was a compilation error.
@@ -1382,7 +1269,7 @@
}
// Check if we should flush now.
- if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
+ if (value == ((heap->ms_count() - kRegExpCodeThreshold) & 0xff)) {
re->SetDataAt(JSRegExp::code_index(is_one_byte),
Smi::FromInt(JSRegExp::kUninitializedValue));
re->SetDataAt(JSRegExp::saved_code_index(is_one_byte),
@@ -1411,131 +1298,6 @@
// Visit the fields of the RegExp, including the updated FixedArray.
VisitJSRegExp(map, object);
}
-
- static VisitorDispatchTable<Callback> non_count_table_;
-};
-
-
-void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
- FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
- FixedArraySubInstanceType dictionary_type) {
- Heap* heap = fixed_array->map()->GetHeap();
- if (fixed_array->map() != heap->fixed_cow_array_map() &&
- fixed_array->map() != heap->fixed_double_array_map() &&
- fixed_array != heap->empty_fixed_array()) {
- if (fixed_array->IsDictionary()) {
- heap->RecordFixedArraySubTypeStats(dictionary_type, fixed_array->Size());
- } else {
- heap->RecordFixedArraySubTypeStats(fast_type, fixed_array->Size());
- }
- }
-}
-
-
-void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
- MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) {
- Heap* heap = map->GetHeap();
- int object_size = obj->Size();
- heap->RecordObjectStats(map->instance_type(), object_size);
- non_count_table_.GetVisitorById(id)(map, obj);
- if (obj->IsJSObject()) {
- JSObject* object = JSObject::cast(obj);
- ObjectStatsCountFixedArray(object->elements(), DICTIONARY_ELEMENTS_SUB_TYPE,
- FAST_ELEMENTS_SUB_TYPE);
- ObjectStatsCountFixedArray(object->properties(),
- DICTIONARY_PROPERTIES_SUB_TYPE,
- FAST_PROPERTIES_SUB_TYPE);
- }
-}
-
-
-template <MarkCompactMarkingVisitor::VisitorId id>
-void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(Map* map,
- HeapObject* obj) {
- ObjectStatsVisitBase(id, map, obj);
-}
-
-
-template <>
-class MarkCompactMarkingVisitor::ObjectStatsTracker<
- MarkCompactMarkingVisitor::kVisitMap> {
- public:
- static inline void Visit(Map* map, HeapObject* obj) {
- Heap* heap = map->GetHeap();
- Map* map_obj = Map::cast(obj);
- DCHECK(map->instance_type() == MAP_TYPE);
- DescriptorArray* array = map_obj->instance_descriptors();
- if (map_obj->owns_descriptors() &&
- array != heap->empty_descriptor_array()) {
- int fixed_array_size = array->Size();
- heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
- fixed_array_size);
- }
- if (map_obj->HasTransitionArray()) {
- int fixed_array_size = map_obj->transitions()->Size();
- heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE,
- fixed_array_size);
- }
- if (map_obj->has_code_cache()) {
- CodeCache* cache = CodeCache::cast(map_obj->code_cache());
- heap->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE,
- cache->default_cache()->Size());
- if (!cache->normal_type_cache()->IsUndefined()) {
- heap->RecordFixedArraySubTypeStats(
- MAP_CODE_CACHE_SUB_TYPE,
- FixedArray::cast(cache->normal_type_cache())->Size());
- }
- }
- ObjectStatsVisitBase(kVisitMap, map, obj);
- }
-};
-
-
-template <>
-class MarkCompactMarkingVisitor::ObjectStatsTracker<
- MarkCompactMarkingVisitor::kVisitCode> {
- public:
- static inline void Visit(Map* map, HeapObject* obj) {
- Heap* heap = map->GetHeap();
- int object_size = obj->Size();
- DCHECK(map->instance_type() == CODE_TYPE);
- Code* code_obj = Code::cast(obj);
- heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetRawAge(),
- object_size);
- ObjectStatsVisitBase(kVisitCode, map, obj);
- }
-};
-
-
-template <>
-class MarkCompactMarkingVisitor::ObjectStatsTracker<
- MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
- public:
- static inline void Visit(Map* map, HeapObject* obj) {
- Heap* heap = map->GetHeap();
- SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
- if (sfi->scope_info() != heap->empty_fixed_array()) {
- heap->RecordFixedArraySubTypeStats(
- SCOPE_INFO_SUB_TYPE, FixedArray::cast(sfi->scope_info())->Size());
- }
- ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
- }
-};
-
-
-template <>
-class MarkCompactMarkingVisitor::ObjectStatsTracker<
- MarkCompactMarkingVisitor::kVisitFixedArray> {
- public:
- static inline void Visit(Map* map, HeapObject* obj) {
- Heap* heap = map->GetHeap();
- FixedArray* fixed_array = FixedArray::cast(obj);
- if (fixed_array == heap->string_table()) {
- heap->RecordFixedArraySubTypeStats(STRING_TABLE_SUB_TYPE,
- fixed_array->Size());
- }
- ObjectStatsVisitBase(kVisitFixedArray, map, obj);
- }
};
@@ -1545,20 +1307,11 @@
table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
if (FLAG_track_gc_object_stats) {
- // Copy the visitor table to make call-through possible.
- non_count_table_.CopyFrom(&table_);
-#define VISITOR_ID_COUNT_FUNCTION(id) \
- table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
- VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION)
-#undef VISITOR_ID_COUNT_FUNCTION
+ ObjectStatsVisitor::Initialize(&table_);
}
}
-VisitorDispatchTable<MarkCompactMarkingVisitor::Callback>
- MarkCompactMarkingVisitor::non_count_table_;
-
-
class CodeMarkingVisitor : public ThreadVisitor {
public:
explicit CodeMarkingVisitor(MarkCompactCollector* collector)
@@ -1578,11 +1331,11 @@
explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
: collector_(collector) {}
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
for (Object** p = start; p < end; p++) VisitPointer(p);
}
- void VisitPointer(Object** slot) {
+ void VisitPointer(Object** slot) override {
Object* obj = *slot;
if (obj->IsSharedFunctionInfo()) {
SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
@@ -1610,19 +1363,15 @@
MarkBit code_mark = Marking::MarkBitFrom(code);
MarkObject(code, code_mark);
if (frame->is_optimized()) {
- MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(),
- frame->LookupCode());
+ Code* optimized_code = frame->LookupCode();
+ MarkBit optimized_code_mark = Marking::MarkBitFrom(optimized_code);
+ MarkObject(optimized_code, optimized_code_mark);
}
}
}
void MarkCompactCollector::PrepareForCodeFlushing() {
- // Enable code flushing for non-incremental cycles.
- if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
- EnableCodeFlushing(!was_marked_incrementally_);
- }
-
// If code flushing is disabled, there is no need to prepare for it.
if (!is_code_flushing_enabled()) return;
@@ -1657,24 +1406,24 @@
explicit RootMarkingVisitor(Heap* heap)
: collector_(heap->mark_compact_collector()) {}
- void VisitPointer(Object** p) { MarkObjectByPointer(p); }
+ void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
}
// Skip the weak next code link in a code object, which is visited in
// ProcessTopOptimizedFrame.
- void VisitNextCodeLink(Object** p) {}
+ void VisitNextCodeLink(Object** p) override {}
private:
void MarkObjectByPointer(Object** p) {
if (!(*p)->IsHeapObject()) return;
// Replace flat cons strings in place.
- HeapObject* object = ShortCircuitConsString(p);
+ HeapObject* object = HeapObject::cast(*p);
MarkBit mark_bit = Marking::MarkBitFrom(object);
- if (mark_bit.Get()) return;
+ if (Marking::IsBlackOrGrey(mark_bit)) return;
Map* map = object->map();
// Mark the object.
@@ -1700,12 +1449,12 @@
public:
explicit StringTableCleaner(Heap* heap) : heap_(heap), pointers_removed_(0) {}
- virtual void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
// Visit all HeapObject pointers in [start, end).
for (Object** p = start; p < end; p++) {
Object* o = *p;
if (o->IsHeapObject() &&
- !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
+ Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(o)))) {
if (finalize_external_strings) {
DCHECK(o->IsExternalString());
heap_->FinalizeExternalString(String::cast(*p));
@@ -1738,7 +1487,9 @@
class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
public:
virtual Object* RetainAs(Object* object) {
- if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
+ MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(object));
+ DCHECK(!Marking::IsGrey(mark_bit));
+ if (Marking::IsBlack(mark_bit)) {
return object;
} else if (object->IsAllocationSite() &&
!(AllocationSite::cast(object)->IsZombie())) {
@@ -1759,155 +1510,260 @@
// iterator. Stop when the marking stack is filled or the end of the space
// is reached, whichever comes first.
template <class T>
-static void DiscoverGreyObjectsWithIterator(Heap* heap,
- MarkingDeque* marking_deque,
- T* it) {
+void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) {
// The caller should ensure that the marking stack is initially not full,
// so that we don't waste effort pointlessly scanning for objects.
- DCHECK(!marking_deque->IsFull());
+ DCHECK(!marking_deque()->IsFull());
- Map* filler_map = heap->one_pointer_filler_map();
+ Map* filler_map = heap()->one_pointer_filler_map();
for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
MarkBit markbit = Marking::MarkBitFrom(object);
if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
Marking::GreyToBlack(markbit);
- MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
- marking_deque->PushBlack(object);
- if (marking_deque->IsFull()) return;
+ PushBlack(object);
+ if (marking_deque()->IsFull()) return;
}
}
}
-static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
-
-
-static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
- MemoryChunk* p) {
- DCHECK(!marking_deque->IsFull());
- DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
- DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
- for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
- Address cell_base = it.CurrentCellBase();
- MarkBit::CellType* cell = it.CurrentCell();
-
- const MarkBit::CellType current_cell = *cell;
- if (current_cell == 0) continue;
-
- MarkBit::CellType grey_objects;
- if (it.HasNext()) {
- const MarkBit::CellType next_cell = *(cell + 1);
- grey_objects = current_cell & ((current_cell >> 1) |
- (next_cell << (Bitmap::kBitsPerCell - 1)));
- } else {
- grey_objects = current_cell & (current_cell >> 1);
- }
-
- int offset = 0;
- while (grey_objects != 0) {
- int trailing_zeros = base::bits::CountTrailingZeros32(grey_objects);
- grey_objects >>= trailing_zeros;
- offset += trailing_zeros;
- MarkBit markbit(cell, 1 << offset, false);
- DCHECK(Marking::IsGrey(markbit));
- Marking::GreyToBlack(markbit);
- Address addr = cell_base + offset * kPointerSize;
- HeapObject* object = HeapObject::FromAddress(addr);
- MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
- marking_deque->PushBlack(object);
- if (marking_deque->IsFull()) return;
- offset += 2;
- grey_objects >>= 2;
- }
-
- grey_objects >>= (Bitmap::kBitsPerCell - 1);
+void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
+ DCHECK(!marking_deque()->IsFull());
+ LiveObjectIterator<kGreyObjects> it(p);
+ HeapObject* object = NULL;
+ while ((object = it.Next()) != NULL) {
+ MarkBit markbit = Marking::MarkBitFrom(object);
+ DCHECK(Marking::IsGrey(markbit));
+ Marking::GreyToBlack(markbit);
+ PushBlack(object);
+ if (marking_deque()->IsFull()) return;
}
}
-int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
- NewSpace* new_space, NewSpacePage* p) {
- DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
- DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
- DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+class MarkCompactCollector::HeapObjectVisitor {
+ public:
+ virtual ~HeapObjectVisitor() {}
+ virtual bool Visit(HeapObject* object) = 0;
+};
- MarkBit::CellType* cells = p->markbits()->cells();
- int survivors_size = 0;
- for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
- Address cell_base = it.CurrentCellBase();
- MarkBit::CellType* cell = it.CurrentCell();
+class MarkCompactCollector::EvacuateVisitorBase
+ : public MarkCompactCollector::HeapObjectVisitor {
+ public:
+ EvacuateVisitorBase(Heap* heap, SlotsBuffer** evacuation_slots_buffer)
+ : heap_(heap), evacuation_slots_buffer_(evacuation_slots_buffer) {}
- MarkBit::CellType current_cell = *cell;
- if (current_cell == 0) continue;
+ bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
+ HeapObject** target_object) {
+ int size = object->Size();
+ AllocationAlignment alignment = object->RequiredAlignment();
+ AllocationResult allocation = target_space->AllocateRaw(size, alignment);
+ if (allocation.To(target_object)) {
+ heap_->mark_compact_collector()->MigrateObject(
+ *target_object, object, size, target_space->identity(),
+ evacuation_slots_buffer_);
+ return true;
+ }
+ return false;
+ }
- int offset = 0;
- while (current_cell != 0) {
- int trailing_zeros = base::bits::CountTrailingZeros32(current_cell);
- current_cell >>= trailing_zeros;
- offset += trailing_zeros;
- Address address = cell_base + offset * kPointerSize;
- HeapObject* object = HeapObject::FromAddress(address);
+ protected:
+ Heap* heap_;
+ SlotsBuffer** evacuation_slots_buffer_;
+};
- int size = object->Size();
- survivors_size += size;
- Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
+class MarkCompactCollector::EvacuateNewSpaceVisitor final
+ : public MarkCompactCollector::EvacuateVisitorBase {
+ public:
+ static const intptr_t kLabSize = 4 * KB;
+ static const intptr_t kMaxLabObjectSize = 256;
- offset++;
- current_cell >>= 1;
+ explicit EvacuateNewSpaceVisitor(Heap* heap,
+ SlotsBuffer** evacuation_slots_buffer,
+ HashMap* local_pretenuring_feedback)
+ : EvacuateVisitorBase(heap, evacuation_slots_buffer),
+ buffer_(LocalAllocationBuffer::InvalidBuffer()),
+ space_to_allocate_(NEW_SPACE),
+ promoted_size_(0),
+ semispace_copied_size_(0),
+ local_pretenuring_feedback_(local_pretenuring_feedback) {}
- // TODO(hpayer): Refactor EvacuateObject and call this function instead.
- if (heap()->ShouldBePromoted(object->address(), size) &&
- TryPromoteObject(object, size)) {
- continue;
+ bool Visit(HeapObject* object) override {
+ heap_->UpdateAllocationSite(object, local_pretenuring_feedback_);
+ int size = object->Size();
+ HeapObject* target_object = nullptr;
+ if (heap_->ShouldBePromoted(object->address(), size) &&
+ TryEvacuateObject(heap_->old_space(), object, &target_object)) {
+ // If we end up needing more special cases, we should factor this out.
+ if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
+ heap_->array_buffer_tracker()->Promote(
+ JSArrayBuffer::cast(target_object));
}
+ promoted_size_ += size;
+ return true;
+ }
+ HeapObject* target = nullptr;
+ AllocationSpace space = AllocateTargetObject(object, &target);
+ heap_->mark_compact_collector()->MigrateObject(
+ HeapObject::cast(target), object, size, space,
+ (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_);
+ if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
+ heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
+ }
+ semispace_copied_size_ += size;
+ return true;
+ }
- AllocationResult allocation = new_space->AllocateRaw(size);
- if (allocation.IsRetry()) {
- if (!new_space->AddFreshPage()) {
- // Shouldn't happen. We are sweeping linearly, and to-space
- // has the same number of pages as from-space, so there is
- // always room.
- UNREACHABLE();
+ intptr_t promoted_size() { return promoted_size_; }
+ intptr_t semispace_copied_size() { return semispace_copied_size_; }
+
+ private:
+ enum NewSpaceAllocationMode {
+ kNonstickyBailoutOldSpace,
+ kStickyBailoutOldSpace,
+ };
+
+ inline AllocationSpace AllocateTargetObject(HeapObject* old_object,
+ HeapObject** target_object) {
+ const int size = old_object->Size();
+ AllocationAlignment alignment = old_object->RequiredAlignment();
+ AllocationResult allocation;
+ if (space_to_allocate_ == NEW_SPACE) {
+ if (size > kMaxLabObjectSize) {
+ allocation =
+ AllocateInNewSpace(size, alignment, kNonstickyBailoutOldSpace);
+ } else {
+ allocation = AllocateInLab(size, alignment);
+ }
+ }
+ if (allocation.IsRetry() || (space_to_allocate_ == OLD_SPACE)) {
+ allocation = AllocateInOldSpace(size, alignment);
+ }
+ bool ok = allocation.To(target_object);
+ DCHECK(ok);
+ USE(ok);
+ return space_to_allocate_;
+ }
+
+ inline bool NewLocalAllocationBuffer() {
+ AllocationResult result =
+ AllocateInNewSpace(kLabSize, kWordAligned, kStickyBailoutOldSpace);
+ LocalAllocationBuffer saved_old_buffer = buffer_;
+ buffer_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize);
+ if (buffer_.IsValid()) {
+ buffer_.TryMerge(&saved_old_buffer);
+ return true;
+ }
+ return false;
+ }
+
+ inline AllocationResult AllocateInNewSpace(int size_in_bytes,
+ AllocationAlignment alignment,
+ NewSpaceAllocationMode mode) {
+ AllocationResult allocation =
+ heap_->new_space()->AllocateRawSynchronized(size_in_bytes, alignment);
+ if (allocation.IsRetry()) {
+ if (!heap_->new_space()->AddFreshPageSynchronized()) {
+ if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
+ } else {
+ allocation = heap_->new_space()->AllocateRawSynchronized(size_in_bytes,
+ alignment);
+ if (allocation.IsRetry()) {
+ if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
}
- allocation = new_space->AllocateRaw(size);
- DCHECK(!allocation.IsRetry());
}
- Object* target = allocation.ToObjectChecked();
-
- MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE);
- heap()->IncrementSemiSpaceCopiedObjectSize(size);
}
- *cells = 0;
+ return allocation;
}
- return survivors_size;
-}
+
+ inline AllocationResult AllocateInOldSpace(int size_in_bytes,
+ AllocationAlignment alignment) {
+ AllocationResult allocation =
+ heap_->old_space()->AllocateRaw(size_in_bytes, alignment);
+ if (allocation.IsRetry()) {
+ FatalProcessOutOfMemory(
+ "MarkCompactCollector: semi-space copy, fallback in old gen\n");
+ }
+ return allocation;
+ }
+
+ inline AllocationResult AllocateInLab(int size_in_bytes,
+ AllocationAlignment alignment) {
+ AllocationResult allocation;
+ if (!buffer_.IsValid()) {
+ if (!NewLocalAllocationBuffer()) {
+ space_to_allocate_ = OLD_SPACE;
+ return AllocationResult::Retry(OLD_SPACE);
+ }
+ }
+ allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
+ if (allocation.IsRetry()) {
+ if (!NewLocalAllocationBuffer()) {
+ space_to_allocate_ = OLD_SPACE;
+ return AllocationResult::Retry(OLD_SPACE);
+ } else {
+ allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
+ if (allocation.IsRetry()) {
+ space_to_allocate_ = OLD_SPACE;
+ return AllocationResult::Retry(OLD_SPACE);
+ }
+ }
+ }
+ return allocation;
+ }
+
+ LocalAllocationBuffer buffer_;
+ AllocationSpace space_to_allocate_;
+ intptr_t promoted_size_;
+ intptr_t semispace_copied_size_;
+ HashMap* local_pretenuring_feedback_;
+};
-static void DiscoverGreyObjectsInSpace(Heap* heap, MarkingDeque* marking_deque,
- PagedSpace* space) {
+class MarkCompactCollector::EvacuateOldSpaceVisitor final
+ : public MarkCompactCollector::EvacuateVisitorBase {
+ public:
+ EvacuateOldSpaceVisitor(Heap* heap,
+ CompactionSpaceCollection* compaction_spaces,
+ SlotsBuffer** evacuation_slots_buffer)
+ : EvacuateVisitorBase(heap, evacuation_slots_buffer),
+ compaction_spaces_(compaction_spaces) {}
+
+ bool Visit(HeapObject* object) override {
+ CompactionSpace* target_space = compaction_spaces_->Get(
+ Page::FromAddress(object->address())->owner()->identity());
+ HeapObject* target_object = nullptr;
+ if (TryEvacuateObject(target_space, object, &target_object)) {
+ DCHECK(object->map_word().IsForwardingAddress());
+ return true;
+ }
+ return false;
+ }
+
+ private:
+ CompactionSpaceCollection* compaction_spaces_;
+};
+
+
+void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
PageIterator it(space);
while (it.has_next()) {
Page* p = it.next();
- DiscoverGreyObjectsOnPage(marking_deque, p);
- if (marking_deque->IsFull()) return;
+ DiscoverGreyObjectsOnPage(p);
+ if (marking_deque()->IsFull()) return;
}
}
-static void DiscoverGreyObjectsInNewSpace(Heap* heap,
- MarkingDeque* marking_deque) {
- NewSpace* space = heap->new_space();
+void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
+ NewSpace* space = heap()->new_space();
NewSpacePageIterator it(space->bottom(), space->top());
while (it.has_next()) {
NewSpacePage* page = it.next();
- DiscoverGreyObjectsOnPage(marking_deque, page);
- if (marking_deque->IsFull()) return;
+ DiscoverGreyObjectsOnPage(page);
+ if (marking_deque()->IsFull()) return;
}
}
@@ -1917,7 +1773,7 @@
if (!o->IsHeapObject()) return false;
HeapObject* heap_object = HeapObject::cast(o);
MarkBit mark = Marking::MarkBitFrom(heap_object);
- return !mark.Get();
+ return Marking::IsWhite(mark);
}
@@ -1927,7 +1783,7 @@
DCHECK(o->IsHeapObject());
HeapObject* heap_object = HeapObject::cast(o);
MarkBit mark = Marking::MarkBitFrom(heap_object);
- return !mark.Get();
+ return Marking::IsWhite(mark);
}
@@ -1935,7 +1791,7 @@
StringTable* string_table = heap()->string_table();
// Mark the string table itself.
MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
- if (!string_table_mark.Get()) {
+ if (Marking::IsWhite(string_table_mark)) {
// String table could have already been marked by visiting the handles list.
SetMark(string_table, string_table_mark);
}
@@ -1959,8 +1815,6 @@
// Handle the string table specially.
MarkStringTable(visitor);
- MarkWeakObjectToCodeTable();
-
// There may be overflowed objects in the heap. Visit them now.
while (marking_deque_.overflowed()) {
RefillMarkingDeque();
@@ -1969,7 +1823,8 @@
}
-void MarkCompactCollector::MarkImplicitRefGroups() {
+void MarkCompactCollector::MarkImplicitRefGroups(
+ MarkObjectFunction mark_object) {
List<ImplicitRefGroup*>* ref_groups =
isolate()->global_handles()->implicit_ref_groups();
@@ -1987,9 +1842,7 @@
// A parent object is marked, so mark all child heap objects.
for (size_t j = 0; j < entry->length; ++j) {
if ((*children[j])->IsHeapObject()) {
- HeapObject* child = HeapObject::cast(*children[j]);
- MarkBit mark = Marking::MarkBitFrom(child);
- MarkObject(child, mark);
+ mark_object(heap(), HeapObject::cast(*children[j]));
}
}
@@ -2001,16 +1854,6 @@
}
-void MarkCompactCollector::MarkWeakObjectToCodeTable() {
- HeapObject* weak_object_to_code_table =
- HeapObject::cast(heap()->weak_object_to_code_table());
- if (!IsMarked(weak_object_to_code_table)) {
- MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table);
- SetMark(weak_object_to_code_table, mark);
- }
-}
-
-
// Mark all objects reachable from the objects on the marking stack.
// Before: the marking stack contains zero or more heap object pointers.
// After: the marking stack is empty, and all objects reachable from the
@@ -2042,33 +1885,23 @@
// overflowed objects in the heap so the overflow flag on the markings stack
// is cleared.
void MarkCompactCollector::RefillMarkingDeque() {
+ isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow);
DCHECK(marking_deque_.overflowed());
- DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_);
+ DiscoverGreyObjectsInNewSpace();
if (marking_deque_.IsFull()) return;
- DiscoverGreyObjectsInSpace(heap(), &marking_deque_,
- heap()->old_pointer_space());
+ DiscoverGreyObjectsInSpace(heap()->old_space());
if (marking_deque_.IsFull()) return;
- DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->old_data_space());
+ DiscoverGreyObjectsInSpace(heap()->code_space());
if (marking_deque_.IsFull()) return;
- DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->code_space());
- if (marking_deque_.IsFull()) return;
-
- DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->map_space());
- if (marking_deque_.IsFull()) return;
-
- DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->cell_space());
- if (marking_deque_.IsFull()) return;
-
- DiscoverGreyObjectsInSpace(heap(), &marking_deque_,
- heap()->property_cell_space());
+ DiscoverGreyObjectsInSpace(heap()->map_space());
if (marking_deque_.IsFull()) return;
LargeObjectIterator lo_it(heap()->lo_space());
- DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &lo_it);
+ DiscoverGreyObjectsWithIterator(&lo_it);
if (marking_deque_.IsFull()) return;
marking_deque_.ClearOverflowed();
@@ -2098,7 +1931,7 @@
if (!only_process_harmony_weak_collections) {
isolate()->global_handles()->IterateObjectGroups(
visitor, &IsUnmarkedHeapObjectWithHeap);
- MarkImplicitRefGroups();
+ MarkImplicitRefGroups(&MarkCompactMarkingVisitor::MarkObject);
}
ProcessWeakCollections();
work_to_do = !marking_deque_.IsEmpty();
@@ -2116,7 +1949,7 @@
if (it.frame()->type() == StackFrame::OPTIMIZED) {
Code* code = it.frame()->LookupCode();
if (!code->CanDeoptAt(it.frame()->pc())) {
- code->CodeIterateBody(visitor);
+ Code::BodyDescriptor::IterateBody(code, visitor);
}
ProcessMarkingDeque();
return;
@@ -2125,40 +1958,84 @@
}
-void MarkCompactCollector::EnsureMarkingDequeIsCommittedAndInitialize() {
+void MarkCompactCollector::EnsureMarkingDequeIsReserved() {
+ DCHECK(!marking_deque_.in_use());
if (marking_deque_memory_ == NULL) {
- marking_deque_memory_ = new base::VirtualMemory(4 * MB);
+ marking_deque_memory_ = new base::VirtualMemory(kMaxMarkingDequeSize);
+ marking_deque_memory_committed_ = 0;
}
- if (!marking_deque_memory_committed_) {
- bool success = marking_deque_memory_->Commit(
- reinterpret_cast<Address>(marking_deque_memory_->address()),
- marking_deque_memory_->size(),
+ if (marking_deque_memory_ == NULL) {
+ V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsReserved");
+ }
+}
+
+
+void MarkCompactCollector::EnsureMarkingDequeIsCommitted(size_t max_size) {
+ // If the marking deque is too small, we try to allocate a bigger one.
+ // If that fails, make do with a smaller one.
+ CHECK(!marking_deque_.in_use());
+ for (size_t size = max_size; size >= kMinMarkingDequeSize; size >>= 1) {
+ base::VirtualMemory* memory = marking_deque_memory_;
+ size_t currently_committed = marking_deque_memory_committed_;
+
+ if (currently_committed == size) return;
+
+ if (currently_committed > size) {
+ bool success = marking_deque_memory_->Uncommit(
+ reinterpret_cast<Address>(marking_deque_memory_->address()) + size,
+ currently_committed - size);
+ if (success) {
+ marking_deque_memory_committed_ = size;
+ return;
+ }
+ UNREACHABLE();
+ }
+
+ bool success = memory->Commit(
+ reinterpret_cast<Address>(memory->address()) + currently_committed,
+ size - currently_committed,
false); // Not executable.
- CHECK(success);
- marking_deque_memory_committed_ = true;
- InitializeMarkingDeque();
+ if (success) {
+ marking_deque_memory_committed_ = size;
+ return;
+ }
}
+ V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsCommitted");
}
void MarkCompactCollector::InitializeMarkingDeque() {
- if (marking_deque_memory_committed_) {
- Address addr = static_cast<Address>(marking_deque_memory_->address());
- size_t size = marking_deque_memory_->size();
- if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
- marking_deque_.Initialize(addr, addr + size);
- }
+ DCHECK(!marking_deque_.in_use());
+ DCHECK(marking_deque_memory_committed_ > 0);
+ Address addr = static_cast<Address>(marking_deque_memory_->address());
+ size_t size = marking_deque_memory_committed_;
+ if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
+ marking_deque_.Initialize(addr, addr + size);
}
-void MarkCompactCollector::UncommitMarkingDeque() {
- if (marking_deque_memory_committed_) {
- bool success = marking_deque_memory_->Uncommit(
- reinterpret_cast<Address>(marking_deque_memory_->address()),
- marking_deque_memory_->size());
- CHECK(success);
- marking_deque_memory_committed_ = false;
+void MarkingDeque::Initialize(Address low, Address high) {
+ DCHECK(!in_use_);
+ HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
+ HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
+ array_ = obj_low;
+ mask_ = base::bits::RoundDownToPowerOfTwo32(
+ static_cast<uint32_t>(obj_high - obj_low)) -
+ 1;
+ top_ = bottom_ = 0;
+ overflowed_ = false;
+ in_use_ = true;
+}
+
+
+void MarkingDeque::Uninitialize(bool aborting) {
+ if (!aborting) {
+ DCHECK(IsEmpty());
+ DCHECK(!overflowed_);
}
+ DCHECK(in_use_);
+ top_ = bottom_ = 0xdecbad;
+ in_use_ = false;
}
@@ -2166,20 +2043,26 @@
GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK);
double start_time = 0.0;
if (FLAG_print_cumulative_gc_stat) {
- start_time = base::OS::TimeCurrentMillis();
+ start_time = heap_->MonotonicallyIncreasingTimeInMs();
}
// The recursive GC marker detects when it is nearing stack overflow,
// and switches to a different marking system. JS interrupts interfere
// with the C stack limit check.
PostponeInterruptsScope postpone(isolate());
- IncrementalMarking* incremental_marking = heap_->incremental_marking();
- if (was_marked_incrementally_) {
- incremental_marking->Finalize();
- } else {
- // Abort any pending incremental activities e.g. incremental sweeping.
- incremental_marking->Abort();
- InitializeMarkingDeque();
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
+ IncrementalMarking* incremental_marking = heap_->incremental_marking();
+ if (was_marked_incrementally_) {
+ incremental_marking->Finalize();
+ } else {
+ // Abort any pending incremental activities e.g. incremental sweeping.
+ incremental_marking->Stop();
+ if (marking_deque_.in_use()) {
+ marking_deque_.Uninitialize(true);
+ }
+ }
}
#ifdef DEBUG
@@ -2187,45 +2070,26 @@
state_ = MARK_LIVE_OBJECTS;
#endif
- EnsureMarkingDequeIsCommittedAndInitialize();
+ EnsureMarkingDequeIsCommittedAndInitialize(
+ MarkCompactCollector::kMaxMarkingDequeSize);
- PrepareForCodeFlushing();
-
- if (was_marked_incrementally_) {
- // There is no write barrier on cells so we have to scan them now at the end
- // of the incremental marking.
- {
- HeapObjectIterator cell_iterator(heap()->cell_space());
- HeapObject* cell;
- while ((cell = cell_iterator.Next()) != NULL) {
- DCHECK(cell->IsCell());
- if (IsMarked(cell)) {
- int offset = Cell::kValueOffset;
- MarkCompactMarkingVisitor::VisitPointer(
- heap(), reinterpret_cast<Object**>(cell->address() + offset));
- }
- }
- }
- {
- HeapObjectIterator js_global_property_cell_iterator(
- heap()->property_cell_space());
- HeapObject* cell;
- while ((cell = js_global_property_cell_iterator.Next()) != NULL) {
- DCHECK(cell->IsPropertyCell());
- if (IsMarked(cell)) {
- MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell);
- }
- }
- }
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
+ PrepareForCodeFlushing();
}
RootMarkingVisitor root_visitor(heap());
- MarkRoots(&root_visitor);
-
- ProcessTopOptimizedFrame(&root_visitor);
{
- GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_WEAKCLOSURE);
+ GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
+ MarkRoots(&root_visitor);
+ ProcessTopOptimizedFrame(&root_visitor);
+ }
+
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
// The objects reachable from the roots are marked, yet unreachable
// objects are unmarked. Mark objects reachable due to host
@@ -2253,274 +2117,254 @@
ProcessEphemeralMarking(&root_visitor, true);
}
- AfterMarking();
-
if (FLAG_print_cumulative_gc_stat) {
- heap_->tracer()->AddMarkingTime(base::OS::TimeCurrentMillis() - start_time);
+ heap_->tracer()->AddMarkingTime(heap_->MonotonicallyIncreasingTimeInMs() -
+ start_time);
}
-}
-
-
-void MarkCompactCollector::AfterMarking() {
- // Prune the string table removing all strings only pointed to by the
- // string table. Cannot use string_table() here because the string
- // table is marked.
- StringTable* string_table = heap()->string_table();
- InternalizedStringTableCleaner internalized_visitor(heap());
- string_table->IterateElements(&internalized_visitor);
- string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
-
- ExternalStringTableCleaner external_visitor(heap());
- heap()->external_string_table_.Iterate(&external_visitor);
- heap()->external_string_table_.CleanUp();
-
- // Process the weak references.
- MarkCompactWeakObjectRetainer mark_compact_object_retainer;
- heap()->ProcessWeakReferences(&mark_compact_object_retainer);
-
- // Remove object groups after marking phase.
- heap()->isolate()->global_handles()->RemoveObjectGroups();
- heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
-
- // Flush code from collected candidates.
- if (is_code_flushing_enabled()) {
- code_flusher_->ProcessCandidates();
- // If incremental marker does not support code flushing, we need to
- // disable it before incremental marking steps for next cycle.
- if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
- EnableCodeFlushing(false);
- }
- }
-
if (FLAG_track_gc_object_stats) {
- heap()->CheckpointObjectStats();
+ if (FLAG_trace_gc_object_stats) {
+ heap()->object_stats_->TraceObjectStats();
+ }
+ heap()->object_stats_->CheckpointObjectStats();
}
}
void MarkCompactCollector::ClearNonLiveReferences() {
- // Iterate over the map space, setting map transitions that go from
- // a marked map to an unmarked map to null transitions. This action
- // is carried out only on maps of JSObjects and related subtypes.
- HeapObjectIterator map_iterator(heap()->map_space());
- for (HeapObject* obj = map_iterator.Next(); obj != NULL;
- obj = map_iterator.Next()) {
- Map* map = Map::cast(obj);
+ GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
- if (!map->CanTransition()) continue;
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_CLEAR_STRING_TABLE);
- MarkBit map_mark = Marking::MarkBitFrom(map);
- ClearNonLivePrototypeTransitions(map);
- ClearNonLiveMapTransitions(map, map_mark);
+ // Prune the string table removing all strings only pointed to by the
+ // string table. Cannot use string_table() here because the string
+ // table is marked.
+ StringTable* string_table = heap()->string_table();
+ InternalizedStringTableCleaner internalized_visitor(heap());
+ string_table->IterateElements(&internalized_visitor);
+ string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
- if (map_mark.Get()) {
- ClearNonLiveDependentCode(map->dependent_code());
- } else {
- ClearDependentCode(map->dependent_code());
- map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
- }
+ ExternalStringTableCleaner external_visitor(heap());
+ heap()->external_string_table_.Iterate(&external_visitor);
+ heap()->external_string_table_.CleanUp();
}
- // Iterate over property cell space, removing dependent code that is not
- // otherwise kept alive by strong references.
- HeapObjectIterator cell_iterator(heap_->property_cell_space());
- for (HeapObject* cell = cell_iterator.Next(); cell != NULL;
- cell = cell_iterator.Next()) {
- if (IsMarked(cell)) {
- ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
- }
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
+ // Process the weak references.
+ MarkCompactWeakObjectRetainer mark_compact_object_retainer;
+ heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
}
- // Iterate over allocation sites, removing dependent code that is not
- // otherwise kept alive by strong references.
- Object* undefined = heap()->undefined_value();
- for (Object* site = heap()->allocation_sites_list(); site != undefined;
- site = AllocationSite::cast(site)->weak_next()) {
- if (IsMarked(site)) {
- ClearNonLiveDependentCode(AllocationSite::cast(site)->dependent_code());
- }
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_CLEAR_GLOBAL_HANDLES);
+
+ // Remove object groups after marking phase.
+ heap()->isolate()->global_handles()->RemoveObjectGroups();
+ heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
}
- if (heap_->weak_object_to_code_table()->IsHashTable()) {
- WeakHashTable* table =
- WeakHashTable::cast(heap_->weak_object_to_code_table());
- uint32_t capacity = table->Capacity();
- for (uint32_t i = 0; i < capacity; i++) {
- uint32_t key_index = table->EntryToIndex(i);
- Object* key = table->get(key_index);
- if (!table->IsKey(key)) continue;
- uint32_t value_index = table->EntryToValueIndex(i);
- Object* value = table->get(value_index);
- if (key->IsCell() && !IsMarked(key)) {
- Cell* cell = Cell::cast(key);
- Object* object = cell->value();
- if (IsMarked(object)) {
- MarkBit mark = Marking::MarkBitFrom(cell);
- SetMark(cell, mark);
- Object** value_slot = HeapObject::RawField(cell, Cell::kValueOffset);
- RecordSlot(value_slot, value_slot, *value_slot);
- }
- }
- if (IsMarked(key)) {
- if (!IsMarked(value)) {
- HeapObject* obj = HeapObject::cast(value);
- MarkBit mark = Marking::MarkBitFrom(obj);
- SetMark(obj, mark);
- }
- ClearNonLiveDependentCode(DependentCode::cast(value));
- } else {
- ClearDependentCode(DependentCode::cast(value));
- table->set(key_index, heap_->the_hole_value());
- table->set(value_index, heap_->the_hole_value());
- table->ElementRemoved();
+ // Flush code from collected candidates.
+ if (is_code_flushing_enabled()) {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_CLEAR_CODE_FLUSH);
+ code_flusher_->ProcessCandidates();
+ }
+
+
+ DependentCode* dependent_code_list;
+ Object* non_live_map_list;
+ ClearWeakCells(&non_live_map_list, &dependent_code_list);
+
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
+ ClearSimpleMapTransitions(non_live_map_list);
+ ClearFullMapTransitions();
+ }
+
+ MarkDependentCodeForDeoptimization(dependent_code_list);
+
+ ClearWeakCollections();
+
+ ClearInvalidStoreAndSlotsBufferEntries();
+}
+
+
+void MarkCompactCollector::MarkDependentCodeForDeoptimization(
+ DependentCode* list_head) {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_CLEAR_DEPENDENT_CODE);
+ Isolate* isolate = this->isolate();
+ DependentCode* current = list_head;
+ while (current->length() > 0) {
+ have_code_to_deoptimize_ |= current->MarkCodeForDeoptimization(
+ isolate, DependentCode::kWeakCodeGroup);
+ current = current->next_link();
+ }
+
+ WeakHashTable* table = heap_->weak_object_to_code_table();
+ uint32_t capacity = table->Capacity();
+ for (uint32_t i = 0; i < capacity; i++) {
+ uint32_t key_index = table->EntryToIndex(i);
+ Object* key = table->get(key_index);
+ if (!table->IsKey(key)) continue;
+ uint32_t value_index = table->EntryToValueIndex(i);
+ Object* value = table->get(value_index);
+ DCHECK(key->IsWeakCell());
+ if (WeakCell::cast(key)->cleared()) {
+ have_code_to_deoptimize_ |=
+ DependentCode::cast(value)->MarkCodeForDeoptimization(
+ isolate, DependentCode::kWeakCodeGroup);
+ table->set(key_index, heap_->the_hole_value());
+ table->set(value_index, heap_->the_hole_value());
+ table->ElementRemoved();
+ }
+ }
+}
+
+
+void MarkCompactCollector::ClearSimpleMapTransitions(
+ Object* non_live_map_list) {
+ Object* the_hole_value = heap()->the_hole_value();
+ Object* weak_cell_obj = non_live_map_list;
+ while (weak_cell_obj != Smi::FromInt(0)) {
+ WeakCell* weak_cell = WeakCell::cast(weak_cell_obj);
+ Map* map = Map::cast(weak_cell->value());
+ DCHECK(Marking::IsWhite(Marking::MarkBitFrom(map)));
+ Object* potential_parent = map->constructor_or_backpointer();
+ if (potential_parent->IsMap()) {
+ Map* parent = Map::cast(potential_parent);
+ if (Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent)) &&
+ parent->raw_transitions() == weak_cell) {
+ ClearSimpleMapTransition(parent, map);
}
}
+ weak_cell->clear();
+ weak_cell_obj = weak_cell->next();
+ weak_cell->clear_next(the_hole_value);
}
}
-void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
- int number_of_transitions = map->NumberOfProtoTransitions();
- FixedArray* prototype_transitions = map->GetPrototypeTransitions();
-
- int new_number_of_transitions = 0;
- const int header = Map::kProtoTransitionHeaderSize;
- const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
- const int map_offset = header + Map::kProtoTransitionMapOffset;
- const int step = Map::kProtoTransitionElementsPerEntry;
- for (int i = 0; i < number_of_transitions; i++) {
- Object* prototype = prototype_transitions->get(proto_offset + i * step);
- Object* cached_map = prototype_transitions->get(map_offset + i * step);
- if (IsMarked(prototype) && IsMarked(cached_map)) {
- DCHECK(!prototype->IsUndefined());
- int proto_index = proto_offset + new_number_of_transitions * step;
- int map_index = map_offset + new_number_of_transitions * step;
- if (new_number_of_transitions != i) {
- prototype_transitions->set(proto_index, prototype,
- UPDATE_WRITE_BARRIER);
- prototype_transitions->set(map_index, cached_map, SKIP_WRITE_BARRIER);
- }
- Object** slot = prototype_transitions->RawFieldOfElementAt(proto_index);
- RecordSlot(slot, slot, prototype);
- new_number_of_transitions++;
- }
- }
-
- if (new_number_of_transitions != number_of_transitions) {
- map->SetNumberOfProtoTransitions(new_number_of_transitions);
- }
-
- // Fill slots that became free with undefined value.
- for (int i = new_number_of_transitions * step;
- i < number_of_transitions * step; i++) {
- prototype_transitions->set_undefined(header + i);
- }
-}
-
-
-void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
- MarkBit map_mark) {
- Object* potential_parent = map->GetBackPointer();
- if (!potential_parent->IsMap()) return;
- Map* parent = Map::cast(potential_parent);
-
- // Follow back pointer, check whether we are dealing with a map transition
- // from a live map to a dead path and in case clear transitions of parent.
- bool current_is_alive = map_mark.Get();
- bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
- if (!current_is_alive && parent_is_alive) {
- ClearMapTransitions(parent);
- }
-}
-
-
-// Clear a possible back pointer in case the transition leads to a dead map.
-// Return true in case a back pointer has been cleared and false otherwise.
-bool MarkCompactCollector::ClearMapBackPointer(Map* target) {
- if (Marking::MarkBitFrom(target).Get()) return false;
- target->SetBackPointer(heap_->undefined_value(), SKIP_WRITE_BARRIER);
- return true;
-}
-
-
-void MarkCompactCollector::ClearMapTransitions(Map* map) {
- // If there are no transitions to be cleared, return.
- // TODO(verwaest) Should be an assert, otherwise back pointers are not
- // properly cleared.
- if (!map->HasTransitionArray()) return;
-
- TransitionArray* t = map->transitions();
-
- int transition_index = 0;
-
+void MarkCompactCollector::ClearSimpleMapTransition(Map* map,
+ Map* dead_transition) {
+ // A previously existing simple transition (stored in a WeakCell) is going
+ // to be cleared. Clear the useless cell pointer, and take ownership
+ // of the descriptor array.
+ map->set_raw_transitions(Smi::FromInt(0));
+ int number_of_own_descriptors = map->NumberOfOwnDescriptors();
DescriptorArray* descriptors = map->instance_descriptors();
- bool descriptors_owner_died = false;
+ if (descriptors == dead_transition->instance_descriptors() &&
+ number_of_own_descriptors > 0) {
+ TrimDescriptorArray(map, descriptors);
+ DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
+ map->set_owns_descriptors(true);
+ }
+}
- // Compact all live descriptors to the left.
- for (int i = 0; i < t->number_of_transitions(); ++i) {
- Map* target = t->GetTarget(i);
- if (ClearMapBackPointer(target)) {
- if (target->instance_descriptors() == descriptors) {
+
+void MarkCompactCollector::ClearFullMapTransitions() {
+ HeapObject* undefined = heap()->undefined_value();
+ Object* obj = heap()->encountered_transition_arrays();
+ while (obj != Smi::FromInt(0)) {
+ TransitionArray* array = TransitionArray::cast(obj);
+ int num_transitions = array->number_of_entries();
+ DCHECK_EQ(TransitionArray::NumberOfTransitions(array), num_transitions);
+ if (num_transitions > 0) {
+ Map* map = array->GetTarget(0);
+ Map* parent = Map::cast(map->constructor_or_backpointer());
+ bool parent_is_alive =
+ Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent));
+ DescriptorArray* descriptors =
+ parent_is_alive ? parent->instance_descriptors() : nullptr;
+ bool descriptors_owner_died =
+ CompactTransitionArray(parent, array, descriptors);
+ if (descriptors_owner_died) {
+ TrimDescriptorArray(parent, descriptors);
+ }
+ }
+ obj = array->next_link();
+ array->set_next_link(undefined, SKIP_WRITE_BARRIER);
+ }
+ heap()->set_encountered_transition_arrays(Smi::FromInt(0));
+}
+
+
+bool MarkCompactCollector::CompactTransitionArray(
+ Map* map, TransitionArray* transitions, DescriptorArray* descriptors) {
+ int num_transitions = transitions->number_of_entries();
+ bool descriptors_owner_died = false;
+ int transition_index = 0;
+ // Compact all live transitions to the left.
+ for (int i = 0; i < num_transitions; ++i) {
+ Map* target = transitions->GetTarget(i);
+ DCHECK_EQ(target->constructor_or_backpointer(), map);
+ if (Marking::IsWhite(Marking::MarkBitFrom(target))) {
+ if (descriptors != nullptr &&
+ target->instance_descriptors() == descriptors) {
descriptors_owner_died = true;
}
} else {
if (i != transition_index) {
- Name* key = t->GetKey(i);
- t->SetKey(transition_index, key);
- Object** key_slot = t->GetKeySlot(transition_index);
- RecordSlot(key_slot, key_slot, key);
+ Name* key = transitions->GetKey(i);
+ transitions->SetKey(transition_index, key);
+ Object** key_slot = transitions->GetKeySlot(transition_index);
+ RecordSlot(transitions, key_slot, key);
// Target slots do not need to be recorded since maps are not compacted.
- t->SetTarget(transition_index, t->GetTarget(i));
+ transitions->SetTarget(transition_index, transitions->GetTarget(i));
}
transition_index++;
}
}
-
// If there are no transitions to be cleared, return.
- // TODO(verwaest) Should be an assert, otherwise back pointers are not
- // properly cleared.
- if (transition_index == t->number_of_transitions()) return;
-
- int number_of_own_descriptors = map->NumberOfOwnDescriptors();
-
- if (descriptors_owner_died) {
- if (number_of_own_descriptors > 0) {
- TrimDescriptorArray(map, descriptors, number_of_own_descriptors);
- DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
- map->set_owns_descriptors(true);
- } else {
- DCHECK(descriptors == heap_->empty_descriptor_array());
- }
+ if (transition_index == num_transitions) {
+ DCHECK(!descriptors_owner_died);
+ return false;
}
-
// Note that we never eliminate a transition array, though we might right-trim
// such that number_of_transitions() == 0. If this assumption changes,
// TransitionArray::Insert() will need to deal with the case that a transition
// array disappeared during GC.
- int trim = t->number_of_transitions_storage() - transition_index;
+ int trim = TransitionArray::Capacity(transitions) - transition_index;
if (trim > 0) {
- heap_->RightTrimFixedArray<Heap::FROM_GC>(
- t, t->IsSimpleTransition() ? trim
- : trim * TransitionArray::kTransitionSize);
- t->SetNumberOfTransitions(transition_index);
+ heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
+ transitions, trim * TransitionArray::kTransitionSize);
+ transitions->SetNumberOfTransitions(transition_index);
}
- DCHECK(map->HasTransitionArray());
+ return descriptors_owner_died;
}
void MarkCompactCollector::TrimDescriptorArray(Map* map,
- DescriptorArray* descriptors,
- int number_of_own_descriptors) {
+ DescriptorArray* descriptors) {
+ int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+ if (number_of_own_descriptors == 0) {
+ DCHECK(descriptors == heap_->empty_descriptor_array());
+ return;
+ }
+
int number_of_descriptors = descriptors->number_of_descriptors_storage();
int to_trim = number_of_descriptors - number_of_own_descriptors;
- if (to_trim == 0) return;
+ if (to_trim > 0) {
+ heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
+ descriptors, to_trim * DescriptorArray::kDescriptorSize);
+ descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
- heap_->RightTrimFixedArray<Heap::FROM_GC>(
- descriptors, to_trim * DescriptorArray::kDescriptorSize);
- descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
+ if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
+ descriptors->Sort();
- if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
- descriptors->Sort();
+ if (FLAG_unbox_double_fields) {
+ LayoutDescriptor* layout_descriptor = map->layout_descriptor();
+ layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
+ number_of_own_descriptors);
+ SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
+ }
+ }
+ DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
+ map->set_owns_descriptors(true);
}
@@ -2528,7 +2372,8 @@
DescriptorArray* descriptors) {
int live_enum = map->EnumLength();
if (live_enum == kInvalidEnumCacheSentinel) {
- live_enum = map->NumberOfDescribedProperties(OWN_DESCRIPTORS, DONT_ENUM);
+ live_enum =
+ map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS);
}
if (live_enum == 0) return descriptors->ClearEnumCache();
@@ -2536,82 +2381,17 @@
int to_trim = enum_cache->length() - live_enum;
if (to_trim <= 0) return;
- heap_->RightTrimFixedArray<Heap::FROM_GC>(descriptors->GetEnumCache(),
- to_trim);
+ heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
+ descriptors->GetEnumCache(), to_trim);
if (!descriptors->HasEnumIndicesCache()) return;
FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
- heap_->RightTrimFixedArray<Heap::FROM_GC>(enum_indices_cache, to_trim);
-}
-
-
-void MarkCompactCollector::ClearDependentCode(DependentCode* entries) {
- DisallowHeapAllocation no_allocation;
- DependentCode::GroupStartIndexes starts(entries);
- int number_of_entries = starts.number_of_entries();
- if (number_of_entries == 0) return;
- int g = DependentCode::kWeakCodeGroup;
- for (int i = starts.at(g); i < starts.at(g + 1); i++) {
- // If the entry is compilation info then the map must be alive,
- // and ClearDependentCode shouldn't be called.
- DCHECK(entries->is_code_at(i));
- Code* code = entries->code_at(i);
- if (IsMarked(code) && !code->marked_for_deoptimization()) {
- DependentCode::SetMarkedForDeoptimization(
- code, static_cast<DependentCode::DependencyGroup>(g));
- code->InvalidateEmbeddedObjects();
- have_code_to_deoptimize_ = true;
- }
- }
- for (int i = 0; i < number_of_entries; i++) {
- entries->clear_at(i);
- }
-}
-
-
-int MarkCompactCollector::ClearNonLiveDependentCodeInGroup(
- DependentCode* entries, int group, int start, int end, int new_start) {
- int survived = 0;
- for (int i = start; i < end; i++) {
- Object* obj = entries->object_at(i);
- DCHECK(obj->IsCode() || IsMarked(obj));
- if (IsMarked(obj) &&
- (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) {
- if (new_start + survived != i) {
- entries->set_object_at(new_start + survived, obj);
- }
- Object** slot = entries->slot_at(new_start + survived);
- RecordSlot(slot, slot, obj);
- survived++;
- }
- }
- entries->set_number_of_entries(
- static_cast<DependentCode::DependencyGroup>(group), survived);
- return survived;
-}
-
-
-void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
- DisallowHeapAllocation no_allocation;
- DependentCode::GroupStartIndexes starts(entries);
- int number_of_entries = starts.number_of_entries();
- if (number_of_entries == 0) return;
- int new_number_of_entries = 0;
- // Go through all groups, remove dead codes and compact.
- for (int g = 0; g < DependentCode::kGroupCount; g++) {
- int survived = ClearNonLiveDependentCodeInGroup(
- entries, g, starts.at(g), starts.at(g + 1), new_number_of_entries);
- new_number_of_entries += survived;
- }
- for (int i = new_number_of_entries; i < number_of_entries; i++) {
- entries->clear_at(i);
- }
+ heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(enum_indices_cache,
+ to_trim);
}
void MarkCompactCollector::ProcessWeakCollections() {
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS);
Object* weak_collection_obj = heap()->encountered_weak_collections();
while (weak_collection_obj != Smi::FromInt(0)) {
JSWeakCollection* weak_collection =
@@ -2619,15 +2399,14 @@
DCHECK(MarkCompactCollector::IsMarked(weak_collection));
if (weak_collection->table()->IsHashTable()) {
ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
- Object** anchor = reinterpret_cast<Object**>(table->address());
for (int i = 0; i < table->Capacity(); i++) {
if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
Object** key_slot =
table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
- RecordSlot(anchor, key_slot, *key_slot);
+ RecordSlot(table, key_slot, *key_slot);
Object** value_slot =
table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
- MarkCompactMarkingVisitor::MarkObjectByPointer(this, anchor,
+ MarkCompactMarkingVisitor::MarkObjectByPointer(this, table,
value_slot);
}
}
@@ -2639,7 +2418,7 @@
void MarkCompactCollector::ClearWeakCollections() {
GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR);
+ GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
Object* weak_collection_obj = heap()->encountered_weak_collections();
while (weak_collection_obj != Smi::FromInt(0)) {
JSWeakCollection* weak_collection =
@@ -2662,8 +2441,6 @@
void MarkCompactCollector::AbortWeakCollections() {
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_WEAKCOLLECTION_ABORT);
Object* weak_collection_obj = heap()->encountered_weak_collections();
while (weak_collection_obj != Smi::FromInt(0)) {
JSWeakCollection* weak_collection =
@@ -2675,50 +2452,225 @@
}
-void MarkCompactCollector::ProcessAndClearWeakCells() {
- HeapObject* undefined = heap()->undefined_value();
- Object* weak_cell_obj = heap()->encountered_weak_cells();
+void MarkCompactCollector::ClearWeakCells(Object** non_live_map_list,
+ DependentCode** dependent_code_list) {
+ Heap* heap = this->heap();
+ GCTracer::Scope gc_scope(heap->tracer(),
+ GCTracer::Scope::MC_CLEAR_WEAK_CELLS);
+ Object* weak_cell_obj = heap->encountered_weak_cells();
+ Object* the_hole_value = heap->the_hole_value();
+ DependentCode* dependent_code_head =
+ DependentCode::cast(heap->empty_fixed_array());
+ Object* non_live_map_head = Smi::FromInt(0);
while (weak_cell_obj != Smi::FromInt(0)) {
WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
+ Object* next_weak_cell = weak_cell->next();
+ bool clear_value = true;
+ bool clear_next = true;
// We do not insert cleared weak cells into the list, so the value
// cannot be a Smi here.
HeapObject* value = HeapObject::cast(weak_cell->value());
if (!MarkCompactCollector::IsMarked(value)) {
- weak_cell->clear();
+ // Cells for new-space objects embedded in optimized code are wrapped in
+ // WeakCell and put into Heap::weak_object_to_code_table.
+ // Such cells do not have any strong references but we want to keep them
+ // alive as long as the cell value is alive.
+ // TODO(ulan): remove this once we remove Heap::weak_object_to_code_table.
+ if (value->IsCell()) {
+ Object* cell_value = Cell::cast(value)->value();
+ if (cell_value->IsHeapObject() &&
+ MarkCompactCollector::IsMarked(HeapObject::cast(cell_value))) {
+ // Resurrect the cell.
+ MarkBit mark = Marking::MarkBitFrom(value);
+ SetMark(value, mark);
+ Object** slot = HeapObject::RawField(value, Cell::kValueOffset);
+ RecordSlot(value, slot, *slot);
+ slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
+ RecordSlot(weak_cell, slot, *slot);
+ clear_value = false;
+ }
+ }
+ if (value->IsMap()) {
+ // The map is non-live.
+ Map* map = Map::cast(value);
+ // Add dependent code to the dependent_code_list.
+ DependentCode* candidate = map->dependent_code();
+ // We rely on the fact that the weak code group comes first.
+ STATIC_ASSERT(DependentCode::kWeakCodeGroup == 0);
+ if (candidate->length() > 0 &&
+ candidate->group() == DependentCode::kWeakCodeGroup) {
+ candidate->set_next_link(dependent_code_head);
+ dependent_code_head = candidate;
+ }
+ // Add the weak cell to the non_live_map list.
+ weak_cell->set_next(non_live_map_head);
+ non_live_map_head = weak_cell;
+ clear_value = false;
+ clear_next = false;
+ }
} else {
+ // The value of the weak cell is alive.
Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
- heap()->mark_compact_collector()->RecordSlot(slot, slot, value);
+ RecordSlot(weak_cell, slot, *slot);
+ clear_value = false;
}
- weak_cell_obj = weak_cell->next();
- weak_cell->set_next(undefined, SKIP_WRITE_BARRIER);
+ if (clear_value) {
+ weak_cell->clear();
+ }
+ if (clear_next) {
+ weak_cell->clear_next(the_hole_value);
+ }
+ weak_cell_obj = next_weak_cell;
}
- heap()->set_encountered_weak_cells(Smi::FromInt(0));
+ heap->set_encountered_weak_cells(Smi::FromInt(0));
+ *non_live_map_list = non_live_map_head;
+ *dependent_code_list = dependent_code_head;
}
void MarkCompactCollector::AbortWeakCells() {
- Object* undefined = heap()->undefined_value();
+ Object* the_hole_value = heap()->the_hole_value();
Object* weak_cell_obj = heap()->encountered_weak_cells();
while (weak_cell_obj != Smi::FromInt(0)) {
WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
weak_cell_obj = weak_cell->next();
- weak_cell->set_next(undefined, SKIP_WRITE_BARRIER);
+ weak_cell->clear_next(the_hole_value);
}
heap()->set_encountered_weak_cells(Smi::FromInt(0));
}
-void MarkCompactCollector::RecordMigratedSlot(Object* value, Address slot) {
+void MarkCompactCollector::AbortTransitionArrays() {
+ HeapObject* undefined = heap()->undefined_value();
+ Object* obj = heap()->encountered_transition_arrays();
+ while (obj != Smi::FromInt(0)) {
+ TransitionArray* array = TransitionArray::cast(obj);
+ obj = array->next_link();
+ array->set_next_link(undefined, SKIP_WRITE_BARRIER);
+ }
+ heap()->set_encountered_transition_arrays(Smi::FromInt(0));
+}
+
+
+void MarkCompactCollector::RecordMigratedSlot(
+ Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer) {
+ // When parallel compaction is in progress, store and slots buffer entries
+ // require synchronization.
if (heap_->InNewSpace(value)) {
- heap_->store_buffer()->Mark(slot);
+ if (compaction_in_progress_) {
+ heap_->store_buffer()->MarkSynchronized(slot);
+ } else {
+ heap_->store_buffer()->Mark(slot);
+ }
} else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
- SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
+ SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
reinterpret_cast<Object**>(slot),
SlotsBuffer::IGNORE_OVERFLOW);
}
}
+void MarkCompactCollector::RecordMigratedCodeEntrySlot(
+ Address code_entry, Address code_entry_slot,
+ SlotsBuffer** evacuation_slots_buffer) {
+ if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
+ SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
+ SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
+ SlotsBuffer::IGNORE_OVERFLOW);
+ }
+}
+
+
+void MarkCompactCollector::RecordMigratedCodeObjectSlot(
+ Address code_object, SlotsBuffer** evacuation_slots_buffer) {
+ SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
+ SlotsBuffer::RELOCATED_CODE_OBJECT, code_object,
+ SlotsBuffer::IGNORE_OVERFLOW);
+}
+
+
+static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
+ if (RelocInfo::IsCodeTarget(rmode)) {
+ return SlotsBuffer::CODE_TARGET_SLOT;
+ } else if (RelocInfo::IsCell(rmode)) {
+ return SlotsBuffer::CELL_TARGET_SLOT;
+ } else if (RelocInfo::IsEmbeddedObject(rmode)) {
+ return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
+ } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
+ return SlotsBuffer::DEBUG_TARGET_SLOT;
+ }
+ UNREACHABLE();
+ return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
+}
+
+
+static inline SlotsBuffer::SlotType DecodeSlotType(
+ SlotsBuffer::ObjectSlot slot) {
+ return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
+}
+
+
+void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
+ Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
+ RelocInfo::Mode rmode = rinfo->rmode();
+ if (target_page->IsEvacuationCandidate() &&
+ (rinfo->host() == NULL ||
+ !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
+ Address addr = rinfo->pc();
+ SlotsBuffer::SlotType slot_type = SlotTypeForRMode(rmode);
+ if (rinfo->IsInConstantPool()) {
+ addr = rinfo->constant_pool_entry_address();
+ if (RelocInfo::IsCodeTarget(rmode)) {
+ slot_type = SlotsBuffer::CODE_ENTRY_SLOT;
+ } else {
+ DCHECK(RelocInfo::IsEmbeddedObject(rmode));
+ slot_type = SlotsBuffer::OBJECT_SLOT;
+ }
+ }
+ bool success = SlotsBuffer::AddTo(
+ slots_buffer_allocator_, target_page->slots_buffer_address(), slot_type,
+ addr, SlotsBuffer::FAIL_ON_OVERFLOW);
+ if (!success) {
+ EvictPopularEvacuationCandidate(target_page);
+ }
+ }
+}
+
+
+class RecordMigratedSlotVisitor final : public ObjectVisitor {
+ public:
+ RecordMigratedSlotVisitor(MarkCompactCollector* collector,
+ SlotsBuffer** evacuation_slots_buffer)
+ : collector_(collector),
+ evacuation_slots_buffer_(evacuation_slots_buffer) {}
+
+ V8_INLINE void VisitPointer(Object** p) override {
+ collector_->RecordMigratedSlot(*p, reinterpret_cast<Address>(p),
+ evacuation_slots_buffer_);
+ }
+
+ V8_INLINE void VisitPointers(Object** start, Object** end) override {
+ while (start < end) {
+ collector_->RecordMigratedSlot(*start, reinterpret_cast<Address>(start),
+ evacuation_slots_buffer_);
+ ++start;
+ }
+ }
+
+ V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {
+ if (collector_->compacting_) {
+ Address code_entry = Memory::Address_at(code_entry_slot);
+ collector_->RecordMigratedCodeEntrySlot(code_entry, code_entry_slot,
+ evacuation_slots_buffer_);
+ }
+ }
+
+ private:
+ MarkCompactCollector* collector_;
+ SlotsBuffer** evacuation_slots_buffer_;
+};
+
+
// We scavenge new space simultaneously with sweeping. This is done in two
// passes.
//
@@ -2733,85 +2685,32 @@
// pointer iteration. This is an issue if the store buffer overflows and we
// have to scan the entire old space, including dead objects, looking for
// pointers to new space.
-void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
- int size, AllocationSpace dest) {
+void MarkCompactCollector::MigrateObject(
+ HeapObject* dst, HeapObject* src, int size, AllocationSpace dest,
+ SlotsBuffer** evacuation_slots_buffer) {
Address dst_addr = dst->address();
Address src_addr = src->address();
DCHECK(heap()->AllowedToBeMigrated(src, dest));
- DCHECK(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
- if (dest == OLD_POINTER_SPACE) {
- Address src_slot = src_addr;
- Address dst_slot = dst_addr;
+ DCHECK(dest != LO_SPACE);
+ if (dest == OLD_SPACE) {
+ DCHECK_OBJECT_SIZE(size);
+ DCHECK(evacuation_slots_buffer != nullptr);
DCHECK(IsAligned(size, kPointerSize));
- bool may_contain_raw_values = src->MayContainRawValues();
-#if V8_DOUBLE_FIELDS_UNBOXING
- LayoutDescriptorHelper helper(src->map());
- bool has_only_tagged_fields = helper.all_fields_tagged();
-#endif
- for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
- Object* value = Memory::Object_at(src_slot);
-
- Memory::Object_at(dst_slot) = value;
-
-#if V8_DOUBLE_FIELDS_UNBOXING
- if (!may_contain_raw_values &&
- (has_only_tagged_fields ||
- helper.IsTagged(static_cast<int>(src_slot - src_addr))))
-#else
- if (!may_contain_raw_values)
-#endif
- {
- RecordMigratedSlot(value, dst_slot);
- }
-
- src_slot += kPointerSize;
- dst_slot += kPointerSize;
- }
-
- if (compacting_ && dst->IsJSFunction()) {
- Address code_entry_slot = dst_addr + JSFunction::kCodeEntryOffset;
- Address code_entry = Memory::Address_at(code_entry_slot);
-
- if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
- SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
- SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
- SlotsBuffer::IGNORE_OVERFLOW);
- }
- } else if (dst->IsConstantPoolArray()) {
- // We special case ConstantPoolArrays since they could contain integers
- // value entries which look like tagged pointers.
- // TODO(mstarzinger): restructure this code to avoid this special-casing.
- ConstantPoolArray* array = ConstantPoolArray::cast(dst);
- ConstantPoolArray::Iterator code_iter(array, ConstantPoolArray::CODE_PTR);
- while (!code_iter.is_finished()) {
- Address code_entry_slot =
- dst_addr + array->OffsetOfElementAt(code_iter.next_index());
- Address code_entry = Memory::Address_at(code_entry_slot);
-
- if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
- SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
- SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
- SlotsBuffer::IGNORE_OVERFLOW);
- }
- }
- ConstantPoolArray::Iterator heap_iter(array, ConstantPoolArray::HEAP_PTR);
- while (!heap_iter.is_finished()) {
- Address heap_slot =
- dst_addr + array->OffsetOfElementAt(heap_iter.next_index());
- Object* value = Memory::Object_at(heap_slot);
- RecordMigratedSlot(value, heap_slot);
- }
- }
+ heap()->MoveBlock(dst->address(), src->address(), size);
+ RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer);
+ dst->IterateBody(&visitor);
} else if (dest == CODE_SPACE) {
+ DCHECK_CODEOBJECT_SIZE(size, heap()->code_space());
+ DCHECK(evacuation_slots_buffer != nullptr);
PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
heap()->MoveBlock(dst_addr, src_addr, size);
- SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
- SlotsBuffer::RELOCATED_CODE_OBJECT, dst_addr,
- SlotsBuffer::IGNORE_OVERFLOW);
+ RecordMigratedCodeObjectSlot(dst_addr, evacuation_slots_buffer);
Code::cast(dst)->Relocate(dst_addr - src_addr);
} else {
- DCHECK(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
+ DCHECK_OBJECT_SIZE(size);
+ DCHECK(evacuation_slots_buffer == nullptr);
+ DCHECK(dest == NEW_SPACE);
heap()->MoveBlock(dst_addr, src_addr, size);
}
heap()->OnMoveEvent(dst, src, size);
@@ -2819,19 +2718,73 @@
}
+static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
+ SlotsBuffer::SlotType slot_type, Address addr) {
+ switch (slot_type) {
+ case SlotsBuffer::CODE_TARGET_SLOT: {
+ RelocInfo rinfo(isolate, addr, RelocInfo::CODE_TARGET, 0, NULL);
+ rinfo.Visit(isolate, v);
+ break;
+ }
+ case SlotsBuffer::CELL_TARGET_SLOT: {
+ RelocInfo rinfo(isolate, addr, RelocInfo::CELL, 0, NULL);
+ rinfo.Visit(isolate, v);
+ break;
+ }
+ case SlotsBuffer::CODE_ENTRY_SLOT: {
+ v->VisitCodeEntry(addr);
+ break;
+ }
+ case SlotsBuffer::RELOCATED_CODE_OBJECT: {
+ HeapObject* obj = HeapObject::FromAddress(addr);
+ Code::BodyDescriptor::IterateBody(obj, v);
+ break;
+ }
+ case SlotsBuffer::DEBUG_TARGET_SLOT: {
+ RelocInfo rinfo(isolate, addr, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION, 0,
+ NULL);
+ if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
+ break;
+ }
+ case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
+ RelocInfo rinfo(isolate, addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
+ rinfo.Visit(isolate, v);
+ break;
+ }
+ case SlotsBuffer::OBJECT_SLOT: {
+ v->VisitPointer(reinterpret_cast<Object**>(addr));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+}
+
+
// Visitor for updating pointers from live objects in old spaces to new space.
// It does not expect to encounter pointers to dead objects.
class PointersUpdatingVisitor : public ObjectVisitor {
public:
explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
- void VisitPointer(Object** p) { UpdatePointer(p); }
+ void VisitPointer(Object** p) override { UpdatePointer(p); }
- void VisitPointers(Object** start, Object** end) {
+ void VisitPointers(Object** start, Object** end) override {
for (Object** p = start; p < end; p++) UpdatePointer(p);
}
- void VisitEmbeddedPointer(RelocInfo* rinfo) {
+ void VisitCell(RelocInfo* rinfo) override {
+ DCHECK(rinfo->rmode() == RelocInfo::CELL);
+ Object* cell = rinfo->target_cell();
+ Object* old_cell = cell;
+ VisitPointer(&cell);
+ if (cell != old_cell) {
+ rinfo->set_target_cell(reinterpret_cast<Cell*>(cell));
+ }
+ }
+
+ void VisitEmbeddedPointer(RelocInfo* rinfo) override {
DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
Object* target = rinfo->target_object();
Object* old_target = target;
@@ -2843,7 +2796,7 @@
}
}
- void VisitCodeTarget(RelocInfo* rinfo) {
+ void VisitCodeTarget(RelocInfo* rinfo) override {
DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
Object* old_target = target;
@@ -2853,7 +2806,7 @@
}
}
- void VisitCodeAgeSequence(RelocInfo* rinfo) {
+ void VisitCodeAgeSequence(RelocInfo* rinfo) override {
DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
Object* stub = rinfo->code_age_stub();
DCHECK(stub != NULL);
@@ -2863,14 +2816,13 @@
}
}
- void VisitDebugTarget(RelocInfo* rinfo) {
- DCHECK((RelocInfo::IsJSReturn(rinfo->rmode()) &&
- rinfo->IsPatchedReturnSequence()) ||
- (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
- rinfo->IsPatchedDebugBreakSlotSequence()));
- Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+ void VisitDebugTarget(RelocInfo* rinfo) override {
+ DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+ rinfo->IsPatchedDebugBreakSlotSequence());
+ Object* target =
+ Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
VisitPointer(&target);
- rinfo->set_call_address(Code::cast(target)->instruction_start());
+ rinfo->set_debug_call_address(Code::cast(target)->instruction_start());
}
static inline void UpdateSlot(Heap* heap, Object** slot) {
@@ -2884,7 +2836,9 @@
MapWord map_word = heap_obj->map_word();
if (map_word.IsForwardingAddress()) {
DCHECK(heap->InFromSpace(heap_obj) ||
- MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
+ MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
+ Page::FromAddress(heap_obj->address())
+ ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
HeapObject* target = map_word.ToForwardingAddress();
base::NoBarrier_CompareAndSwap(
reinterpret_cast<base::AtomicWord*>(slot),
@@ -2902,20 +2856,42 @@
};
-static void UpdatePointer(HeapObject** address, HeapObject* object) {
- Address new_addr = Memory::Address_at(object->address());
+void MarkCompactCollector::UpdateSlots(SlotsBuffer* buffer) {
+ PointersUpdatingVisitor v(heap_);
+ size_t buffer_size = buffer->Size();
- // The new space sweep will overwrite the map word of dead objects
- // with NULL. In this case we do not need to transfer this entry to
- // the store buffer which we are rebuilding.
- // We perform the pointer update with a no barrier compare-and-swap. The
- // compare and swap may fail in the case where the pointer update tries to
- // update garbage memory which was concurrently accessed by the sweeper.
- if (new_addr != NULL) {
- base::NoBarrier_CompareAndSwap(
- reinterpret_cast<base::AtomicWord*>(address),
- reinterpret_cast<base::AtomicWord>(object),
- reinterpret_cast<base::AtomicWord>(HeapObject::FromAddress(new_addr)));
+ for (size_t slot_idx = 0; slot_idx < buffer_size; ++slot_idx) {
+ SlotsBuffer::ObjectSlot slot = buffer->Get(slot_idx);
+ if (!SlotsBuffer::IsTypedSlot(slot)) {
+ PointersUpdatingVisitor::UpdateSlot(heap_, slot);
+ } else {
+ ++slot_idx;
+ DCHECK(slot_idx < buffer_size);
+ UpdateSlot(heap_->isolate(), &v, DecodeSlotType(slot),
+ reinterpret_cast<Address>(buffer->Get(slot_idx)));
+ }
+ }
+}
+
+
+void MarkCompactCollector::UpdateSlotsRecordedIn(SlotsBuffer* buffer) {
+ while (buffer != NULL) {
+ UpdateSlots(buffer);
+ buffer = buffer->next();
+ }
+}
+
+
+static void UpdatePointer(HeapObject** address, HeapObject* object) {
+ MapWord map_word = object->map_word();
+ // The store buffer can still contain stale pointers in dead large objects.
+ // Ignore these pointers here.
+ DCHECK(map_word.IsForwardingAddress() ||
+ object->GetHeap()->lo_space()->FindPage(
+ reinterpret_cast<Address>(address)) != NULL);
+ if (map_word.IsForwardingAddress()) {
+ // Update the corresponding slot.
+ *address = map_word.ToForwardingAddress();
}
}
@@ -2932,27 +2908,158 @@
}
-bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
- int object_size) {
- DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
-
- OldSpace* target_space = heap()->TargetSpace(object);
-
- DCHECK(target_space == heap()->old_pointer_space() ||
- target_space == heap()->old_data_space());
- HeapObject* target;
- AllocationResult allocation = target_space->AllocateRaw(object_size);
- if (allocation.To(&target)) {
- MigrateObject(target, object, object_size, target_space->identity());
- heap()->IncrementPromotedObjectsSize(object_size);
- return true;
+bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot,
+ HeapObject** out_object) {
+ Space* owner = p->owner();
+ if (owner == heap_->lo_space() || owner == NULL) {
+ Object* large_object = heap_->lo_space()->FindObject(slot);
+ // This object has to exist, otherwise we would not have recorded a slot
+ // for it.
+ CHECK(large_object->IsHeapObject());
+ HeapObject* large_heap_object = HeapObject::cast(large_object);
+ if (IsMarked(large_heap_object)) {
+ *out_object = large_heap_object;
+ return true;
+ }
+ return false;
}
+ uint32_t mark_bit_index = p->AddressToMarkbitIndex(slot);
+ unsigned int cell_index = mark_bit_index >> Bitmap::kBitsPerCellLog2;
+ MarkBit::CellType index_mask = 1u << Bitmap::IndexInCell(mark_bit_index);
+ MarkBit::CellType* cells = p->markbits()->cells();
+ Address base_address = p->area_start();
+ unsigned int base_address_cell_index = Bitmap::IndexToCell(
+ Bitmap::CellAlignIndex(p->AddressToMarkbitIndex(base_address)));
+
+ // Check if the slot points to the start of an object. This can happen e.g.
+ // when we left trim a fixed array. Such slots are invalid and we can remove
+ // them.
+ if (index_mask > 1) {
+ if ((cells[cell_index] & index_mask) != 0 &&
+ (cells[cell_index] & (index_mask >> 1)) == 0) {
+ return false;
+ }
+ } else {
+ // Left trimming moves the mark bits so we cannot be in the very first cell.
+ DCHECK(cell_index != base_address_cell_index);
+ if ((cells[cell_index] & index_mask) != 0 &&
+ (cells[cell_index - 1] & (1u << Bitmap::kBitIndexMask)) == 0) {
+ return false;
+ }
+ }
+
+ // Check if the object is in the current cell.
+ MarkBit::CellType slot_mask;
+ if ((cells[cell_index] == 0) ||
+ (base::bits::CountTrailingZeros32(cells[cell_index]) >
+ base::bits::CountTrailingZeros32(cells[cell_index] | index_mask))) {
+ // If we are already in the first cell, there is no live object.
+ if (cell_index == base_address_cell_index) return false;
+
+ // If not, find a cell in a preceding cell slot that has a mark bit set.
+ do {
+ cell_index--;
+ } while (cell_index > base_address_cell_index && cells[cell_index] == 0);
+
+ // The slot must be in a dead object if there are no preceding cells that
+ // have mark bits set.
+ if (cells[cell_index] == 0) {
+ return false;
+ }
+
+ // The object is in a preceding cell. Set the mask to find any object.
+ slot_mask = ~0u;
+ } else {
+ // We are interested in object mark bits right before the slot.
+ slot_mask = index_mask + (index_mask - 1);
+ }
+
+ MarkBit::CellType current_cell = cells[cell_index];
+ CHECK(current_cell != 0);
+
+ // Find the last live object in the cell.
+ unsigned int leading_zeros =
+ base::bits::CountLeadingZeros32(current_cell & slot_mask);
+ CHECK(leading_zeros != Bitmap::kBitsPerCell);
+ int offset = static_cast<int>(Bitmap::kBitIndexMask - leading_zeros) - 1;
+
+ base_address += (cell_index - base_address_cell_index) *
+ Bitmap::kBitsPerCell * kPointerSize;
+ Address address = base_address + offset * kPointerSize;
+ HeapObject* object = HeapObject::FromAddress(address);
+ CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+ CHECK(object->address() < reinterpret_cast<Address>(slot));
+ if ((object->address() + kPointerSize) <= slot &&
+ (object->address() + object->Size()) > slot) {
+ // If the slot is within the last found object in the cell, the slot is
+ // in a live object.
+ // Slots pointing to the first word of an object are invalid and removed.
+ // This can happen when we move the object header while left trimming.
+ *out_object = object;
+ return true;
+ }
return false;
}
-void MarkCompactCollector::EvacuateNewSpace() {
+bool MarkCompactCollector::IsSlotInBlackObjectSlow(Page* p, Address slot) {
+ // This function does not support large objects right now.
+ Space* owner = p->owner();
+ if (owner == heap_->lo_space() || owner == NULL) {
+ Object* large_object = heap_->lo_space()->FindObject(slot);
+ // This object has to exist, otherwise we would not have recorded a slot
+ // for it.
+ CHECK(large_object->IsHeapObject());
+ HeapObject* large_heap_object = HeapObject::cast(large_object);
+ if (IsMarked(large_heap_object)) {
+ return true;
+ }
+ return false;
+ }
+
+ LiveObjectIterator<kBlackObjects> it(p);
+ HeapObject* object = NULL;
+ while ((object = it.Next()) != NULL) {
+ int size = object->Size();
+
+ if (object->address() > slot) return false;
+ if (object->address() <= slot && slot < (object->address() + size)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+bool MarkCompactCollector::IsSlotInLiveObject(Address slot) {
+ HeapObject* object = NULL;
+ // The target object is black but we don't know if the source slot is black.
+ // The source object could have died and the slot could be part of a free
+ // space. Find out based on mark bits if the slot is part of a live object.
+ if (!IsSlotInBlackObject(Page::FromAddress(slot), slot, &object)) {
+ return false;
+ }
+
+ DCHECK(object != NULL);
+ int offset = static_cast<int>(slot - object->address());
+ return object->IsValidSlot(offset);
+}
+
+
+void MarkCompactCollector::VerifyIsSlotInLiveObject(Address slot,
+ HeapObject* object) {
+ // The target object has to be black.
+ CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+
+ // The target object is black but we don't know if the source slot is black.
+ // The source object could have died and the slot could be part of a free
+ // space. Use the mark bit iterator to find out about liveness of the slot.
+ CHECK(IsSlotInBlackObjectSlow(Page::FromAddress(slot), slot));
+}
+
+
+void MarkCompactCollector::EvacuateNewSpacePrologue() {
// There are soft limits in the allocation code, designed trigger a mark
// sweep collection by failing allocations. But since we are already in
// a mark-sweep allocation, there is no sense in trying to trigger one.
@@ -2969,114 +3076,247 @@
new_space->Flip();
new_space->ResetAllocationInfo();
- int survivors_size = 0;
+ newspace_evacuation_candidates_.Clear();
+ NewSpacePageIterator it(from_bottom, from_top);
+ while (it.has_next()) {
+ newspace_evacuation_candidates_.Add(it.next());
+ }
+}
+
+HashMap* MarkCompactCollector::EvacuateNewSpaceInParallel() {
+ HashMap* local_pretenuring_feedback = new HashMap(
+ HashMap::PointersMatch, kInitialLocalPretenuringFeedbackCapacity);
+ EvacuateNewSpaceVisitor new_space_visitor(heap(), &migration_slots_buffer_,
+ local_pretenuring_feedback);
// First pass: traverse all objects in inactive semispace, remove marks,
// migrate live objects and write forwarding addresses. This stage puts
// new entries in the store buffer and may cause some pages to be marked
// scan-on-scavenge.
- NewSpacePageIterator it(from_bottom, from_top);
- while (it.has_next()) {
- NewSpacePage* p = it.next();
- survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p);
+ for (int i = 0; i < newspace_evacuation_candidates_.length(); i++) {
+ NewSpacePage* p =
+ reinterpret_cast<NewSpacePage*>(newspace_evacuation_candidates_[i]);
+ bool ok = VisitLiveObjects(p, &new_space_visitor, kClearMarkbits);
+ USE(ok);
+ DCHECK(ok);
}
-
- heap_->IncrementYoungSurvivorsCounter(survivors_size);
- new_space->set_age_mark(new_space->top());
+ heap_->IncrementPromotedObjectsSize(
+ static_cast<int>(new_space_visitor.promoted_size()));
+ heap_->IncrementSemiSpaceCopiedObjectSize(
+ static_cast<int>(new_space_visitor.semispace_copied_size()));
+ heap_->IncrementYoungSurvivorsCounter(
+ static_cast<int>(new_space_visitor.promoted_size()) +
+ static_cast<int>(new_space_visitor.semispace_copied_size()));
+ return local_pretenuring_feedback;
}
-void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
- AlwaysAllocateScope always_allocate(isolate());
- PagedSpace* space = static_cast<PagedSpace*>(p->owner());
- DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
- p->SetWasSwept();
+void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized(
+ SlotsBuffer* evacuation_slots_buffer) {
+ base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_);
+ evacuation_slots_buffers_.Add(evacuation_slots_buffer);
+}
- int offsets[16];
- for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
- Address cell_base = it.CurrentCellBase();
- MarkBit::CellType* cell = it.CurrentCell();
+int MarkCompactCollector::NumberOfParallelCompactionTasks() {
+ if (!FLAG_parallel_compaction) return 1;
+ // Compute the number of needed tasks based on a target compaction time, the
+ // profiled compaction speed and marked live memory.
+ //
+ // The number of parallel compaction tasks is limited by:
+ // - #evacuation pages
+ // - (#cores - 1)
+ // - a hard limit
+ const double kTargetCompactionTimeInMs = 1;
+ const int kMaxCompactionTasks = 8;
- if (*cell == 0) continue;
+ intptr_t compaction_speed =
+ heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
+ if (compaction_speed == 0) return 1;
- int live_objects = MarkWordToObjectStarts(*cell, offsets);
- for (int i = 0; i < live_objects; i++) {
- Address object_addr = cell_base + offsets[i] * kPointerSize;
- HeapObject* object = HeapObject::FromAddress(object_addr);
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+ intptr_t live_bytes = 0;
+ for (Page* page : evacuation_candidates_) {
+ live_bytes += page->LiveBytes();
+ }
- int size = object->Size();
+ const int cores = Max(1, base::SysInfo::NumberOfProcessors() - 1);
+ const int tasks =
+ 1 + static_cast<int>(static_cast<double>(live_bytes) / compaction_speed /
+ kTargetCompactionTimeInMs);
+ const int tasks_capped_pages = Min(evacuation_candidates_.length(), tasks);
+ const int tasks_capped_cores = Min(cores, tasks_capped_pages);
+ const int tasks_capped_hard = Min(kMaxCompactionTasks, tasks_capped_cores);
+ return tasks_capped_hard;
+}
- HeapObject* target_object;
- AllocationResult allocation = space->AllocateRaw(size);
- if (!allocation.To(&target_object)) {
- // If allocation failed, use emergency memory and re-try allocation.
- CHECK(space->HasEmergencyMemory());
- space->UseEmergencyMemory();
- allocation = space->AllocateRaw(size);
- }
- if (!allocation.To(&target_object)) {
- // OS refused to give us memory.
- V8::FatalProcessOutOfMemory("Evacuation");
- return;
- }
- MigrateObject(target_object, object, size, space->identity());
- DCHECK(object->map_word().IsForwardingAddress());
+void MarkCompactCollector::EvacuatePagesInParallel() {
+ const int num_pages = evacuation_candidates_.length();
+ if (num_pages == 0) return;
+
+ // Used for trace summary.
+ intptr_t live_bytes = 0;
+ intptr_t compaction_speed = 0;
+ if (FLAG_trace_fragmentation) {
+ for (Page* page : evacuation_candidates_) {
+ live_bytes += page->LiveBytes();
}
-
- // Clear marking bits for current cell.
- *cell = 0;
+ compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
}
- p->ResetLiveBytes();
+ const int num_tasks = NumberOfParallelCompactionTasks();
+
+ // Set up compaction spaces.
+ CompactionSpaceCollection** compaction_spaces_for_tasks =
+ new CompactionSpaceCollection*[num_tasks];
+ for (int i = 0; i < num_tasks; i++) {
+ compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
+ }
+
+ heap()->old_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
+ num_tasks);
+ heap()->code_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
+ num_tasks);
+
+ uint32_t* task_ids = new uint32_t[num_tasks - 1];
+ // Kick off parallel tasks.
+ StartParallelCompaction(compaction_spaces_for_tasks, task_ids, num_tasks);
+ // Wait for unfinished and not-yet-started tasks.
+ WaitUntilCompactionCompleted(task_ids, num_tasks - 1);
+ delete[] task_ids;
+
+ double compaction_duration = 0.0;
+ intptr_t compacted_memory = 0;
+ // Merge back memory (compacted and unused) from compaction spaces.
+ for (int i = 0; i < num_tasks; i++) {
+ heap()->old_space()->MergeCompactionSpace(
+ compaction_spaces_for_tasks[i]->Get(OLD_SPACE));
+ heap()->code_space()->MergeCompactionSpace(
+ compaction_spaces_for_tasks[i]->Get(CODE_SPACE));
+ compacted_memory += compaction_spaces_for_tasks[i]->bytes_compacted();
+ compaction_duration += compaction_spaces_for_tasks[i]->duration();
+ delete compaction_spaces_for_tasks[i];
+ }
+ delete[] compaction_spaces_for_tasks;
+ heap()->tracer()->AddCompactionEvent(compaction_duration, compacted_memory);
+
+ // Finalize sequentially.
+ int abandoned_pages = 0;
+ for (int i = 0; i < num_pages; i++) {
+ Page* p = evacuation_candidates_[i];
+ switch (p->parallel_compaction_state().Value()) {
+ case MemoryChunk::ParallelCompactingState::kCompactingAborted:
+ // We have partially compacted the page, i.e., some objects may have
+ // moved, others are still in place.
+ // We need to:
+ // - Leave the evacuation candidate flag for later processing of
+ // slots buffer entries.
+ // - Leave the slots buffer there for processing of entries added by
+ // the write barrier.
+ // - Rescan the page as slot recording in the migration buffer only
+ // happens upon moving (which we potentially didn't do).
+ // - Leave the page in the list of pages of a space since we could not
+ // fully evacuate it.
+ // - Mark them for rescanning for store buffer entries as we otherwise
+ // might have stale store buffer entries that become "valid" again
+ // after reusing the memory. Note that all existing store buffer
+ // entries of such pages are filtered before rescanning.
+ DCHECK(p->IsEvacuationCandidate());
+ p->SetFlag(Page::COMPACTION_WAS_ABORTED);
+ p->set_scan_on_scavenge(true);
+ abandoned_pages++;
+ break;
+ case MemoryChunk::kCompactingFinalize:
+ DCHECK(p->IsEvacuationCandidate());
+ p->SetWasSwept();
+ p->Unlink();
+ break;
+ case MemoryChunk::kCompactingDone:
+ DCHECK(p->IsFlagSet(Page::POPULAR_PAGE));
+ DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
+ break;
+ default:
+ // We should not observe kCompactingInProgress, or kCompactingDone.
+ UNREACHABLE();
+ }
+ p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
+ }
+ if (FLAG_trace_fragmentation) {
+ PrintIsolate(isolate(),
+ "%8.0f ms: compaction: parallel=%d pages=%d aborted=%d "
+ "tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX
+ "d compaction_speed=%" V8_PTR_PREFIX "d\n",
+ isolate()->time_millis_since_init(), FLAG_parallel_compaction,
+ num_pages, abandoned_pages, num_tasks,
+ base::SysInfo::NumberOfProcessors(), live_bytes,
+ compaction_speed);
+ }
}
-void MarkCompactCollector::EvacuatePages() {
- int npages = evacuation_candidates_.length();
- for (int i = 0; i < npages; i++) {
+void MarkCompactCollector::StartParallelCompaction(
+ CompactionSpaceCollection** compaction_spaces, uint32_t* task_ids,
+ int len) {
+ compaction_in_progress_ = true;
+ for (int i = 1; i < len; i++) {
+ CompactionTask* task = new CompactionTask(heap(), compaction_spaces[i]);
+ task_ids[i - 1] = task->id();
+ V8::GetCurrentPlatform()->CallOnBackgroundThread(
+ task, v8::Platform::kShortRunningTask);
+ }
+
+ // Contribute in main thread.
+ EvacuatePages(compaction_spaces[0], &migration_slots_buffer_);
+}
+
+
+void MarkCompactCollector::WaitUntilCompactionCompleted(uint32_t* task_ids,
+ int len) {
+ // Try to cancel compaction tasks that have not been run (as they might be
+ // stuck in a worker queue). Tasks that cannot be canceled, have either
+ // already completed or are still running, hence we need to wait for their
+ // semaphore signal.
+ for (int i = 0; i < len; i++) {
+ if (!heap()->isolate()->cancelable_task_manager()->TryAbort(task_ids[i])) {
+ pending_compaction_tasks_semaphore_.Wait();
+ }
+ }
+ compaction_in_progress_ = false;
+}
+
+
+void MarkCompactCollector::EvacuatePages(
+ CompactionSpaceCollection* compaction_spaces,
+ SlotsBuffer** evacuation_slots_buffer) {
+ EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces,
+ evacuation_slots_buffer);
+ for (int i = 0; i < evacuation_candidates_.length(); i++) {
Page* p = evacuation_candidates_[i];
DCHECK(p->IsEvacuationCandidate() ||
p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
- DCHECK(static_cast<int>(p->parallel_sweeping()) ==
- MemoryChunk::SWEEPING_DONE);
- PagedSpace* space = static_cast<PagedSpace*>(p->owner());
- // Allocate emergency memory for the case when compaction fails due to out
- // of memory.
- if (!space->HasEmergencyMemory()) {
- space->CreateEmergencyMemory();
- }
- if (p->IsEvacuationCandidate()) {
- // During compaction we might have to request a new page. Check that we
- // have an emergency page and the space still has room for that.
- if (space->HasEmergencyMemory() && space->CanExpand()) {
- EvacuateLiveObjectsFromPage(p);
- // Unlink the page from the list of pages here. We must not iterate
- // over that page later (e.g. when scan on scavenge pages are
- // processed). The page itself will be freed later and is still
- // reachable from the evacuation candidates list.
- p->Unlink();
- } else {
- // Without room for expansion evacuation is not guaranteed to succeed.
- // Pessimistically abandon unevacuated pages.
- for (int j = i; j < npages; j++) {
- Page* page = evacuation_candidates_[j];
- slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
- page->ClearEvacuationCandidate();
- page->SetFlag(Page::RESCAN_ON_EVACUATION);
+ DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) ==
+ MemoryChunk::kSweepingDone);
+ if (p->parallel_compaction_state().TrySetValue(
+ MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
+ if (p->IsEvacuationCandidate()) {
+ DCHECK_EQ(p->parallel_compaction_state().Value(),
+ MemoryChunk::kCompactingInProgress);
+ double start = heap()->MonotonicallyIncreasingTimeInMs();
+ intptr_t live_bytes = p->LiveBytes();
+ AlwaysAllocateScope always_allocate(isolate());
+ if (VisitLiveObjects(p, &visitor, kClearMarkbits)) {
+ p->ResetLiveBytes();
+ p->parallel_compaction_state().SetValue(
+ MemoryChunk::kCompactingFinalize);
+ compaction_spaces->ReportCompactionProgress(
+ heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes);
+ } else {
+ p->parallel_compaction_state().SetValue(
+ MemoryChunk::kCompactingAborted);
}
- break;
- }
- }
- }
- if (npages > 0) {
- // Release emergency memory.
- PagedSpaces spaces(heap());
- for (PagedSpace* space = spaces.next(); space != NULL;
- space = spaces.next()) {
- if (space->HasEmergencyMemory()) {
- space->FreeEmergencyMemory();
+ } else {
+ // There could be popular pages in the list of evacuation candidates
+ // which we do compact.
+ p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
}
}
}
@@ -3098,45 +3338,6 @@
};
-static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
- SlotsBuffer::SlotType slot_type, Address addr) {
- switch (slot_type) {
- case SlotsBuffer::CODE_TARGET_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
- rinfo.Visit(isolate, v);
- break;
- }
- case SlotsBuffer::CODE_ENTRY_SLOT: {
- v->VisitCodeEntry(addr);
- break;
- }
- case SlotsBuffer::RELOCATED_CODE_OBJECT: {
- HeapObject* obj = HeapObject::FromAddress(addr);
- Code::cast(obj)->CodeIterateBody(v);
- break;
- }
- case SlotsBuffer::DEBUG_TARGET_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
- if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
- break;
- }
- case SlotsBuffer::JS_RETURN_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
- if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(isolate, v);
- break;
- }
- case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
- RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
- rinfo.Visit(isolate, v);
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-}
-
-
enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
@@ -3153,7 +3354,6 @@
DCHECK(free_list == NULL);
return space->Free(start, size);
} else {
- // TODO(hpayer): account for wasted bytes in concurrent sweeping too.
return size - free_list->Free(start, size);
}
}
@@ -3178,58 +3378,52 @@
Address free_start = p->area_start();
DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
- int offsets[16];
+ // If we use the skip list for code space pages, we have to lock the skip
+ // list because it could be accessed concurrently by the runtime or the
+ // deoptimizer.
SkipList* skip_list = p->skip_list();
- int curr_region = -1;
if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
skip_list->Clear();
}
intptr_t freed_bytes = 0;
intptr_t max_freed_bytes = 0;
+ int curr_region = -1;
- for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
- Address cell_base = it.CurrentCellBase();
- MarkBit::CellType* cell = it.CurrentCell();
- int live_objects = MarkWordToObjectStarts(*cell, offsets);
- int live_index = 0;
- for (; live_objects != 0; live_objects--) {
- Address free_end = cell_base + offsets[live_index++] * kPointerSize;
- if (free_end != free_start) {
- int size = static_cast<int>(free_end - free_start);
- if (free_space_mode == ZAP_FREE_SPACE) {
- memset(free_start, 0xcc, size);
- }
- freed_bytes = Free<parallelism>(space, free_list, free_start, size);
- max_freed_bytes = Max(freed_bytes, max_freed_bytes);
-#ifdef ENABLE_GDB_JIT_INTERFACE
- if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
- GDBJITInterface::RemoveCodeRange(free_start, free_end);
- }
-#endif
+ LiveObjectIterator<kBlackObjects> it(p);
+ HeapObject* object = NULL;
+ while ((object = it.Next()) != NULL) {
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+ Address free_end = object->address();
+ if (free_end != free_start) {
+ int size = static_cast<int>(free_end - free_start);
+ if (free_space_mode == ZAP_FREE_SPACE) {
+ memset(free_start, 0xcc, size);
}
- HeapObject* live_object = HeapObject::FromAddress(free_end);
- DCHECK(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
- Map* map = live_object->synchronized_map();
- int size = live_object->SizeFromMap(map);
- if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
- live_object->IterateBody(map->instance_type(), size, v);
- }
- if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
- int new_region_start = SkipList::RegionNumber(free_end);
- int new_region_end =
- SkipList::RegionNumber(free_end + size - kPointerSize);
- if (new_region_start != curr_region || new_region_end != curr_region) {
- skip_list->AddObject(free_end, size);
- curr_region = new_region_end;
- }
- }
- free_start = free_end + size;
+ freed_bytes = Free<parallelism>(space, free_list, free_start, size);
+ max_freed_bytes = Max(freed_bytes, max_freed_bytes);
}
- // Clear marking bits for current cell.
- *cell = 0;
+ Map* map = object->synchronized_map();
+ int size = object->SizeFromMap(map);
+ if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
+ object->IterateBody(map->instance_type(), size, v);
+ }
+ if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
+ int new_region_start = SkipList::RegionNumber(free_end);
+ int new_region_end =
+ SkipList::RegionNumber(free_end + size - kPointerSize);
+ if (new_region_start != curr_region || new_region_end != curr_region) {
+ skip_list->AddObject(free_end, size);
+ curr_region = new_region_end;
+ }
+ }
+ free_start = free_end + size;
}
+
+ // Clear the mark bits of that page and reset live bytes count.
+ Bitmap::Clear(p);
+
if (free_start != p->area_end()) {
int size = static_cast<int>(p->area_end() - free_start);
if (free_space_mode == ZAP_FREE_SPACE) {
@@ -3237,18 +3431,12 @@
}
freed_bytes = Free<parallelism>(space, free_list, free_start, size);
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
-#ifdef ENABLE_GDB_JIT_INTERFACE
- if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
- GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
- }
-#endif
}
- p->ResetLiveBytes();
if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) {
// When concurrent sweeping is active, the page will be marked after
// sweeping by the main thread.
- p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
+ p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingFinalize);
} else {
p->SetWasSwept();
}
@@ -3256,70 +3444,6 @@
}
-static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
- Page* p = Page::FromAddress(code->address());
-
- if (p->IsEvacuationCandidate() || p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
- return false;
- }
-
- Address code_start = code->address();
- Address code_end = code_start + code->Size();
-
- uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
- uint32_t end_index =
- MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
-
- Bitmap* b = p->markbits();
-
- MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
- MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
-
- MarkBit::CellType* start_cell = start_mark_bit.cell();
- MarkBit::CellType* end_cell = end_mark_bit.cell();
-
- if (value) {
- MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
- MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
-
- if (start_cell == end_cell) {
- *start_cell |= start_mask & end_mask;
- } else {
- *start_cell |= start_mask;
- for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
- *cell = ~0;
- }
- *end_cell |= end_mask;
- }
- } else {
- for (MarkBit::CellType* cell = start_cell; cell <= end_cell; cell++) {
- *cell = 0;
- }
- }
-
- return true;
-}
-
-
-static bool IsOnInvalidatedCodeObject(Address addr) {
- // We did not record any slots in large objects thus
- // we can safely go to the page from the slot address.
- Page* p = Page::FromAddress(addr);
-
- // First check owner's identity because old pointer and old data spaces
- // are swept lazily and might still have non-zero mark-bits on some
- // pages.
- if (p->owner()->identity() != CODE_SPACE) return false;
-
- // In code space only bits on evacuation candidates (but we don't record
- // any slots on them) and under invalidated code objects are non-zero.
- MarkBit mark_bit =
- p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
-
- return mark_bit.Get();
-}
-
-
void MarkCompactCollector::InvalidateCode(Code* code) {
if (heap_->incremental_marking()->IsCompacting() &&
!ShouldSkipEvacuationSlotRecording(code)) {
@@ -3329,7 +3453,11 @@
MarkBit mark_bit = Marking::MarkBitFrom(code);
if (Marking::IsWhite(mark_bit)) return;
- invalidated_code_.Add(code);
+ // Ignore all slots that might have been recorded in the body of the
+ // deoptimized code object. Assumption: no slots will be recorded for
+ // this object after invalidating it.
+ RemoveObjectSlots(code->instruction_start(),
+ code->address() + code->Size());
}
}
@@ -3340,145 +3468,252 @@
}
-bool MarkCompactCollector::MarkInvalidatedCode() {
- bool code_marked = false;
-
- int length = invalidated_code_.length();
- for (int i = 0; i < length; i++) {
- Code* code = invalidated_code_[i];
-
- if (SetMarkBitsUnderInvalidatedCode(code, true)) {
- code_marked = true;
+void MarkCompactCollector::RemoveObjectSlots(Address start_slot,
+ Address end_slot) {
+ // Remove entries by replacing them with an old-space slot containing a smi
+ // that is located in an unmovable page.
+ int npages = evacuation_candidates_.length();
+ for (int i = 0; i < npages; i++) {
+ Page* p = evacuation_candidates_[i];
+ DCHECK(p->IsEvacuationCandidate() ||
+ p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
+ if (p->IsEvacuationCandidate()) {
+ SlotsBuffer::RemoveObjectSlots(heap_, p->slots_buffer(), start_slot,
+ end_slot);
}
}
-
- return code_marked;
-}
-
-
-void MarkCompactCollector::RemoveDeadInvalidatedCode() {
- int length = invalidated_code_.length();
- for (int i = 0; i < length; i++) {
- if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
- }
}
-void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
- int length = invalidated_code_.length();
- for (int i = 0; i < length; i++) {
- Code* code = invalidated_code_[i];
- if (code != NULL) {
- code->Iterate(visitor);
- SetMarkBitsUnderInvalidatedCode(code, false);
+#ifdef VERIFY_HEAP
+static void VerifyAllBlackObjects(MemoryChunk* page) {
+ LiveObjectIterator<kAllLiveObjects> it(page);
+ HeapObject* object = NULL;
+ while ((object = it.Next()) != NULL) {
+ CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+ }
+}
+#endif // VERIFY_HEAP
+
+
+bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page,
+ HeapObjectVisitor* visitor,
+ IterationMode mode) {
+#ifdef VERIFY_HEAP
+ VerifyAllBlackObjects(page);
+#endif // VERIFY_HEAP
+
+ LiveObjectIterator<kBlackObjects> it(page);
+ HeapObject* object = nullptr;
+ while ((object = it.Next()) != nullptr) {
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+ if (!visitor->Visit(object)) {
+ if (mode == kClearMarkbits) {
+ page->markbits()->ClearRange(
+ page->AddressToMarkbitIndex(page->area_start()),
+ page->AddressToMarkbitIndex(object->address()));
+ RecomputeLiveBytes(page);
+ }
+ return false;
}
}
- invalidated_code_.Rewind(0);
+ if (mode == kClearMarkbits) {
+ Bitmap::Clear(page);
+ }
+ return true;
+}
+
+
+void MarkCompactCollector::RecomputeLiveBytes(MemoryChunk* page) {
+ LiveObjectIterator<kBlackObjects> it(page);
+ int new_live_size = 0;
+ HeapObject* object = nullptr;
+ while ((object = it.Next()) != nullptr) {
+ new_live_size += object->Size();
+ }
+ page->SetLiveBytes(new_live_size);
+}
+
+
+void MarkCompactCollector::VisitLiveObjectsBody(Page* page,
+ ObjectVisitor* visitor) {
+#ifdef VERIFY_HEAP
+ VerifyAllBlackObjects(page);
+#endif // VERIFY_HEAP
+
+ LiveObjectIterator<kBlackObjects> it(page);
+ HeapObject* object = NULL;
+ while ((object = it.Next()) != NULL) {
+ DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+ Map* map = object->synchronized_map();
+ int size = object->SizeFromMap(map);
+ object->IterateBody(map->instance_type(), size, visitor);
+ }
+}
+
+
+void MarkCompactCollector::SweepAbortedPages() {
+ // Second pass on aborted pages.
+ for (int i = 0; i < evacuation_candidates_.length(); i++) {
+ Page* p = evacuation_candidates_[i];
+ if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
+ p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED);
+ PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+ switch (space->identity()) {
+ case OLD_SPACE:
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
+ break;
+ case CODE_SPACE:
+ if (FLAG_zap_code_space) {
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
+ ZAP_FREE_SPACE>(space, NULL, p, nullptr);
+ } else {
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, NULL, p, nullptr);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
+ }
+ }
}
void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
+ GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
Heap::RelocationLock relocation_lock(heap());
- bool code_slots_filtering_required;
+ HashMap* local_pretenuring_feedback = nullptr;
{
GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_SWEEP_NEWSPACE);
- code_slots_filtering_required = MarkInvalidatedCode();
- EvacuateNewSpace();
+ GCTracer::Scope::MC_EVACUATE_NEW_SPACE);
+ EvacuationScope evacuation_scope(this);
+ EvacuateNewSpacePrologue();
+ local_pretenuring_feedback = EvacuateNewSpaceInParallel();
+ heap_->new_space()->set_age_mark(heap_->new_space()->top());
}
{
GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_EVACUATE_PAGES);
+ GCTracer::Scope::MC_EVACUATE_CANDIDATES);
EvacuationScope evacuation_scope(this);
- EvacuatePages();
+ EvacuatePagesInParallel();
+ }
+
+ {
+ heap_->MergeAllocationSitePretenuringFeedback(*local_pretenuring_feedback);
+ delete local_pretenuring_feedback;
+ }
+
+ UpdatePointersAfterEvacuation();
+
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
+ // After updating all pointers, we can finally sweep the aborted pages,
+ // effectively overriding any forward pointers.
+ SweepAbortedPages();
+
+ // EvacuateNewSpaceAndCandidates iterates over new space objects and for
+ // ArrayBuffers either re-registers them as live or promotes them. This is
+ // needed to properly free them.
+ heap()->array_buffer_tracker()->FreeDead(false);
+
+ // Deallocate evacuated candidate pages.
+ ReleaseEvacuationCandidates();
+ }
+
+#ifdef VERIFY_HEAP
+ if (FLAG_verify_heap && !sweeping_in_progress_) {
+ VerifyEvacuation(heap());
+ }
+#endif
+}
+
+
+void MarkCompactCollector::UpdatePointersAfterEvacuation() {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
+ {
+ GCTracer::Scope gc_scope(
+ heap()->tracer(),
+ GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
+ UpdateSlotsRecordedIn(migration_slots_buffer_);
+ if (FLAG_trace_fragmentation_verbose) {
+ PrintF(" migration slots buffer: %d\n",
+ SlotsBuffer::SizeOfChain(migration_slots_buffer_));
+ }
+ slots_buffer_allocator_->DeallocateChain(&migration_slots_buffer_);
+ DCHECK(migration_slots_buffer_ == NULL);
+
+ // TODO(hpayer): Process the slots buffers in parallel. This has to be done
+ // after evacuation of all pages finishes.
+ int buffers = evacuation_slots_buffers_.length();
+ for (int i = 0; i < buffers; i++) {
+ SlotsBuffer* buffer = evacuation_slots_buffers_[i];
+ UpdateSlotsRecordedIn(buffer);
+ slots_buffer_allocator_->DeallocateChain(&buffer);
+ }
+ evacuation_slots_buffers_.Rewind(0);
}
// Second pass: find pointers to new space and update them.
PointersUpdatingVisitor updating_visitor(heap());
{
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
+ GCTracer::Scope gc_scope(
+ heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
// Update pointers in to space.
- SemiSpaceIterator to_it(heap()->new_space()->bottom(),
- heap()->new_space()->top());
+ SemiSpaceIterator to_it(heap()->new_space());
for (HeapObject* object = to_it.Next(); object != NULL;
object = to_it.Next()) {
Map* map = object->map();
object->IterateBody(map->instance_type(), object->SizeFromMap(map),
&updating_visitor);
}
- }
-
- {
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
// Update roots.
heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
- }
- {
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
StoreBufferRebuildScope scope(heap_, heap_->store_buffer(),
&Heap::ScavengeStoreBufferCallback);
- heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps(
- &UpdatePointer);
- }
-
- {
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
- SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_,
- code_slots_filtering_required);
- if (FLAG_trace_fragmentation) {
- PrintF(" migration slots buffer: %d\n",
- SlotsBuffer::SizeOfChain(migration_slots_buffer_));
- }
-
- if (compacting_ && was_marked_incrementally_) {
- // It's difficult to filter out slots recorded for large objects.
- LargeObjectIterator it(heap_->lo_space());
- for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
- // LargeObjectSpace is not swept yet thus we have to skip
- // dead objects explicitly.
- if (!IsMarked(obj)) continue;
-
- Page* p = Page::FromAddress(obj->address());
- if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
- obj->Iterate(&updating_visitor);
- p->ClearFlag(Page::RESCAN_ON_EVACUATION);
- }
- }
- }
+ heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
}
int npages = evacuation_candidates_.length();
{
GCTracer::Scope gc_scope(
heap()->tracer(),
- GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
+ GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED);
for (int i = 0; i < npages; i++) {
Page* p = evacuation_candidates_[i];
DCHECK(p->IsEvacuationCandidate() ||
p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
if (p->IsEvacuationCandidate()) {
- SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer(),
- code_slots_filtering_required);
- if (FLAG_trace_fragmentation) {
+ UpdateSlotsRecordedIn(p->slots_buffer());
+ if (FLAG_trace_fragmentation_verbose) {
PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
SlotsBuffer::SizeOfChain(p->slots_buffer()));
}
+ slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
// Important: skip list should be cleared only after roots were updated
// because root iteration traverses the stack and might have to find
// code objects from non-updated pc pointing into evacuation candidate.
SkipList* list = p->skip_list();
if (list != NULL) list->Clear();
- } else {
+
+ // First pass on aborted pages, fixing up all live objects.
+ if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
+ p->ClearEvacuationCandidate();
+ VisitLiveObjectsBody(p, &updating_visitor);
+ }
+ }
+
+ if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
if (FLAG_gc_verbose) {
PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
reinterpret_cast<intptr_t>(p));
@@ -3487,12 +3722,7 @@
p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
switch (space->identity()) {
- case OLD_DATA_SPACE:
- Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
- IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
- &updating_visitor);
- break;
- case OLD_POINTER_SPACE:
+ case OLD_SPACE:
Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
&updating_visitor);
@@ -3516,51 +3746,18 @@
}
}
- GCTracer::Scope gc_scope(heap()->tracer(),
- GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
+ {
+ GCTracer::Scope gc_scope(heap()->tracer(),
+ GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
+ heap_->string_table()->Iterate(&updating_visitor);
- // Update pointers from cells.
- HeapObjectIterator cell_iterator(heap_->cell_space());
- for (HeapObject* cell = cell_iterator.Next(); cell != NULL;
- cell = cell_iterator.Next()) {
- if (cell->IsCell()) {
- Cell::BodyDescriptor::IterateBody(cell, &updating_visitor);
- }
+ // Update pointers from external string table.
+ heap_->UpdateReferencesInExternalStringTable(
+ &UpdateReferenceInExternalStringTableEntry);
+
+ EvacuationWeakObjectRetainer evacuation_object_retainer;
+ heap()->ProcessAllWeakReferences(&evacuation_object_retainer);
}
-
- HeapObjectIterator js_global_property_cell_iterator(
- heap_->property_cell_space());
- for (HeapObject* cell = js_global_property_cell_iterator.Next(); cell != NULL;
- cell = js_global_property_cell_iterator.Next()) {
- if (cell->IsPropertyCell()) {
- PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
- }
- }
-
- heap_->string_table()->Iterate(&updating_visitor);
- updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address());
- if (heap_->weak_object_to_code_table()->IsHashTable()) {
- WeakHashTable* table =
- WeakHashTable::cast(heap_->weak_object_to_code_table());
- table->Iterate(&updating_visitor);
- table->Rehash(heap_->isolate()->factory()->undefined_value());
- }
-
- // Update pointers from external string table.
- heap_->UpdateReferencesInExternalStringTable(
- &UpdateReferenceInExternalStringTableEntry);
-
- EvacuationWeakObjectRetainer evacuation_object_retainer;
- heap()->ProcessWeakReferences(&evacuation_object_retainer);
-
- // Visit invalidated code (we ignored all slots on it) and clear mark-bits
- // under it.
- ProcessInvalidatedCode(&updating_visitor);
-
- heap_->isolate()->inner_pointer_to_code_cache()->Flush();
-
- slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
- DCHECK(migration_slots_buffer_ == NULL);
}
@@ -3584,410 +3781,17 @@
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
space->Free(p->area_start(), p->area_size());
p->set_scan_on_scavenge(false);
- slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
p->ResetLiveBytes();
+ CHECK(p->WasSwept());
space->ReleasePage(p);
}
evacuation_candidates_.Rewind(0);
compacting_ = false;
+ heap()->FilterStoreBufferEntriesOnAboutToBeFreedPages();
heap()->FreeQueuedChunks();
}
-static const int kStartTableEntriesPerLine = 5;
-static const int kStartTableLines = 171;
-static const int kStartTableInvalidLine = 127;
-static const int kStartTableUnusedEntry = 126;
-
-#define _ kStartTableUnusedEntry
-#define X kStartTableInvalidLine
-// Mark-bit to object start offset table.
-//
-// The line is indexed by the mark bits in a byte. The first number on
-// the line describes the number of live object starts for the line and the
-// other numbers on the line describe the offsets (in words) of the object
-// starts.
-//
-// Since objects are at least 2 words large we don't have entries for two
-// consecutive 1 bits. All entries after 170 have at least 2 consecutive bits.
-char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
- 0, _, _,
- _, _, // 0
- 1, 0, _,
- _, _, // 1
- 1, 1, _,
- _, _, // 2
- X, _, _,
- _, _, // 3
- 1, 2, _,
- _, _, // 4
- 2, 0, 2,
- _, _, // 5
- X, _, _,
- _, _, // 6
- X, _, _,
- _, _, // 7
- 1, 3, _,
- _, _, // 8
- 2, 0, 3,
- _, _, // 9
- 2, 1, 3,
- _, _, // 10
- X, _, _,
- _, _, // 11
- X, _, _,
- _, _, // 12
- X, _, _,
- _, _, // 13
- X, _, _,
- _, _, // 14
- X, _, _,
- _, _, // 15
- 1, 4, _,
- _, _, // 16
- 2, 0, 4,
- _, _, // 17
- 2, 1, 4,
- _, _, // 18
- X, _, _,
- _, _, // 19
- 2, 2, 4,
- _, _, // 20
- 3, 0, 2,
- 4, _, // 21
- X, _, _,
- _, _, // 22
- X, _, _,
- _, _, // 23
- X, _, _,
- _, _, // 24
- X, _, _,
- _, _, // 25
- X, _, _,
- _, _, // 26
- X, _, _,
- _, _, // 27
- X, _, _,
- _, _, // 28
- X, _, _,
- _, _, // 29
- X, _, _,
- _, _, // 30
- X, _, _,
- _, _, // 31
- 1, 5, _,
- _, _, // 32
- 2, 0, 5,
- _, _, // 33
- 2, 1, 5,
- _, _, // 34
- X, _, _,
- _, _, // 35
- 2, 2, 5,
- _, _, // 36
- 3, 0, 2,
- 5, _, // 37
- X, _, _,
- _, _, // 38
- X, _, _,
- _, _, // 39
- 2, 3, 5,
- _, _, // 40
- 3, 0, 3,
- 5, _, // 41
- 3, 1, 3,
- 5, _, // 42
- X, _, _,
- _, _, // 43
- X, _, _,
- _, _, // 44
- X, _, _,
- _, _, // 45
- X, _, _,
- _, _, // 46
- X, _, _,
- _, _, // 47
- X, _, _,
- _, _, // 48
- X, _, _,
- _, _, // 49
- X, _, _,
- _, _, // 50
- X, _, _,
- _, _, // 51
- X, _, _,
- _, _, // 52
- X, _, _,
- _, _, // 53
- X, _, _,
- _, _, // 54
- X, _, _,
- _, _, // 55
- X, _, _,
- _, _, // 56
- X, _, _,
- _, _, // 57
- X, _, _,
- _, _, // 58
- X, _, _,
- _, _, // 59
- X, _, _,
- _, _, // 60
- X, _, _,
- _, _, // 61
- X, _, _,
- _, _, // 62
- X, _, _,
- _, _, // 63
- 1, 6, _,
- _, _, // 64
- 2, 0, 6,
- _, _, // 65
- 2, 1, 6,
- _, _, // 66
- X, _, _,
- _, _, // 67
- 2, 2, 6,
- _, _, // 68
- 3, 0, 2,
- 6, _, // 69
- X, _, _,
- _, _, // 70
- X, _, _,
- _, _, // 71
- 2, 3, 6,
- _, _, // 72
- 3, 0, 3,
- 6, _, // 73
- 3, 1, 3,
- 6, _, // 74
- X, _, _,
- _, _, // 75
- X, _, _,
- _, _, // 76
- X, _, _,
- _, _, // 77
- X, _, _,
- _, _, // 78
- X, _, _,
- _, _, // 79
- 2, 4, 6,
- _, _, // 80
- 3, 0, 4,
- 6, _, // 81
- 3, 1, 4,
- 6, _, // 82
- X, _, _,
- _, _, // 83
- 3, 2, 4,
- 6, _, // 84
- 4, 0, 2,
- 4, 6, // 85
- X, _, _,
- _, _, // 86
- X, _, _,
- _, _, // 87
- X, _, _,
- _, _, // 88
- X, _, _,
- _, _, // 89
- X, _, _,
- _, _, // 90
- X, _, _,
- _, _, // 91
- X, _, _,
- _, _, // 92
- X, _, _,
- _, _, // 93
- X, _, _,
- _, _, // 94
- X, _, _,
- _, _, // 95
- X, _, _,
- _, _, // 96
- X, _, _,
- _, _, // 97
- X, _, _,
- _, _, // 98
- X, _, _,
- _, _, // 99
- X, _, _,
- _, _, // 100
- X, _, _,
- _, _, // 101
- X, _, _,
- _, _, // 102
- X, _, _,
- _, _, // 103
- X, _, _,
- _, _, // 104
- X, _, _,
- _, _, // 105
- X, _, _,
- _, _, // 106
- X, _, _,
- _, _, // 107
- X, _, _,
- _, _, // 108
- X, _, _,
- _, _, // 109
- X, _, _,
- _, _, // 110
- X, _, _,
- _, _, // 111
- X, _, _,
- _, _, // 112
- X, _, _,
- _, _, // 113
- X, _, _,
- _, _, // 114
- X, _, _,
- _, _, // 115
- X, _, _,
- _, _, // 116
- X, _, _,
- _, _, // 117
- X, _, _,
- _, _, // 118
- X, _, _,
- _, _, // 119
- X, _, _,
- _, _, // 120
- X, _, _,
- _, _, // 121
- X, _, _,
- _, _, // 122
- X, _, _,
- _, _, // 123
- X, _, _,
- _, _, // 124
- X, _, _,
- _, _, // 125
- X, _, _,
- _, _, // 126
- X, _, _,
- _, _, // 127
- 1, 7, _,
- _, _, // 128
- 2, 0, 7,
- _, _, // 129
- 2, 1, 7,
- _, _, // 130
- X, _, _,
- _, _, // 131
- 2, 2, 7,
- _, _, // 132
- 3, 0, 2,
- 7, _, // 133
- X, _, _,
- _, _, // 134
- X, _, _,
- _, _, // 135
- 2, 3, 7,
- _, _, // 136
- 3, 0, 3,
- 7, _, // 137
- 3, 1, 3,
- 7, _, // 138
- X, _, _,
- _, _, // 139
- X, _, _,
- _, _, // 140
- X, _, _,
- _, _, // 141
- X, _, _,
- _, _, // 142
- X, _, _,
- _, _, // 143
- 2, 4, 7,
- _, _, // 144
- 3, 0, 4,
- 7, _, // 145
- 3, 1, 4,
- 7, _, // 146
- X, _, _,
- _, _, // 147
- 3, 2, 4,
- 7, _, // 148
- 4, 0, 2,
- 4, 7, // 149
- X, _, _,
- _, _, // 150
- X, _, _,
- _, _, // 151
- X, _, _,
- _, _, // 152
- X, _, _,
- _, _, // 153
- X, _, _,
- _, _, // 154
- X, _, _,
- _, _, // 155
- X, _, _,
- _, _, // 156
- X, _, _,
- _, _, // 157
- X, _, _,
- _, _, // 158
- X, _, _,
- _, _, // 159
- 2, 5, 7,
- _, _, // 160
- 3, 0, 5,
- 7, _, // 161
- 3, 1, 5,
- 7, _, // 162
- X, _, _,
- _, _, // 163
- 3, 2, 5,
- 7, _, // 164
- 4, 0, 2,
- 5, 7, // 165
- X, _, _,
- _, _, // 166
- X, _, _,
- _, _, // 167
- 3, 3, 5,
- 7, _, // 168
- 4, 0, 3,
- 5, 7, // 169
- 4, 1, 3,
- 5, 7 // 170
-};
-#undef _
-#undef X
-
-
-// Takes a word of mark bits. Returns the number of objects that start in the
-// range. Puts the offsets of the words in the supplied array.
-static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
- int objects = 0;
- int offset = 0;
-
- // No consecutive 1 bits.
- DCHECK((mark_bits & 0x180) != 0x180);
- DCHECK((mark_bits & 0x18000) != 0x18000);
- DCHECK((mark_bits & 0x1800000) != 0x1800000);
-
- while (mark_bits != 0) {
- int byte = (mark_bits & 0xff);
- mark_bits >>= 8;
- if (byte != 0) {
- DCHECK(byte < kStartTableLines); // No consecutive 1 bits.
- char* table = kStartTable + byte * kStartTableEntriesPerLine;
- int objects_in_these_8_words = table[0];
- DCHECK(objects_in_these_8_words != kStartTableInvalidLine);
- DCHECK(objects_in_these_8_words < kStartTableEntriesPerLine);
- for (int i = 0; i < objects_in_these_8_words; i++) {
- starts[objects++] = offset + table[1 + i];
- }
- }
- offset += 8;
- }
- return objects;
-}
-
-
int MarkCompactCollector::SweepInParallel(PagedSpace* space,
int required_freed_bytes) {
int max_freed = 0;
@@ -4009,20 +3813,40 @@
int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
int max_freed = 0;
- if (page->TryParallelSweeping()) {
- FreeList* free_list = space == heap()->old_pointer_space()
- ? free_list_old_pointer_space_.get()
- : free_list_old_data_space_.get();
+ if (page->TryLock()) {
+ // If this page was already swept in the meantime, we can return here.
+ if (page->parallel_sweeping_state().Value() !=
+ MemoryChunk::kSweepingPending) {
+ page->mutex()->Unlock();
+ return 0;
+ }
+ page->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingInProgress);
+ FreeList* free_list;
FreeList private_free_list(space);
- max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
+ if (space->identity() == OLD_SPACE) {
+ free_list = free_list_old_space_.get();
+ max_freed =
+ Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
+ } else if (space->identity() == CODE_SPACE) {
+ free_list = free_list_code_space_.get();
+ max_freed =
+ Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
+ } else {
+ free_list = free_list_map_space_.get();
+ max_freed =
+ Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
+ }
free_list->Concatenate(&private_free_list);
+ page->mutex()->Unlock();
}
return max_freed;
}
-void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
+void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
space->ClearStats();
// We defensively initialize end_of_unswept_pages_ here with the first page
@@ -4037,7 +3861,7 @@
while (it.has_next()) {
Page* p = it.next();
- DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
+ DCHECK(p->parallel_sweeping_state().Value() == MemoryChunk::kSweepingDone);
// Clear sweeping flags indicating that marking bits are still intact.
p->ClearWasSwept();
@@ -4049,71 +3873,61 @@
continue;
}
+ if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
+ // We need to sweep the page to get it into an iterable state again. Note
+ // that this adds unusable memory into the free list that is later on
+ // (in the free list) dropped again. Since we only use the flag for
+ // testing this is fine.
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
+ continue;
+ }
+
// One unused page is kept, all further are released before sweeping them.
if (p->LiveBytes() == 0) {
if (unused_page_present) {
if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
- reinterpret_cast<intptr_t>(p));
+ PrintIsolate(isolate(), "sweeping: released page: %p", p);
}
- // Adjust unswept free bytes because releasing a page expects said
- // counter to be accurate for unswept pages.
- space->IncreaseUnsweptFreeBytes(p);
space->ReleasePage(p);
continue;
}
unused_page_present = true;
}
- switch (sweeper) {
- case CONCURRENT_SWEEPING:
- if (!parallel_sweeping_active) {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR ".\n",
- reinterpret_cast<intptr_t>(p));
- }
- Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, NULL, p, NULL);
- pages_swept++;
- parallel_sweeping_active = true;
- } else {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR " in parallel.\n",
- reinterpret_cast<intptr_t>(p));
- }
- p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
- space->IncreaseUnsweptFreeBytes(p);
- }
- space->set_end_of_unswept_pages(p);
- break;
- case SEQUENTIAL_SWEEPING: {
- if (FLAG_gc_verbose) {
- PrintF("Sweeping 0x%" V8PRIxPTR ".\n", reinterpret_cast<intptr_t>(p));
- }
- if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
+ if (!parallel_sweeping_active) {
+ if (FLAG_gc_verbose) {
+ PrintIsolate(isolate(), "sweeping: %p", p);
+ }
+ if (space->identity() == CODE_SPACE) {
+ if (FLAG_zap_code_space) {
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
ZAP_FREE_SPACE>(space, NULL, p, NULL);
- } else if (space->identity() == CODE_SPACE) {
+ } else {
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
IGNORE_FREE_SPACE>(space, NULL, p, NULL);
- } else {
- Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, NULL, p, NULL);
}
- pages_swept++;
- break;
+ } else {
+ Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
+ IGNORE_FREE_SPACE>(space, NULL, p, NULL);
}
- default: { UNREACHABLE(); }
+ pages_swept++;
+ parallel_sweeping_active = true;
+ } else {
+ if (FLAG_gc_verbose) {
+ PrintIsolate(isolate(), "sweeping: initialized for parallel: %p", p);
+ }
+ p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingPending);
+ int to_sweep = p->area_size() - p->LiveBytes();
+ space->accounting_stats_.ShrinkSpace(to_sweep);
}
+ space->set_end_of_unswept_pages(p);
}
if (FLAG_gc_verbose) {
- PrintF("SweepSpace: %s (%d pages swept)\n",
- AllocationSpaceName(space->identity()), pages_swept);
+ PrintIsolate(isolate(), "sweeping: space=%s pages_swept=%d",
+ AllocationSpaceName(space->identity()), pages_swept);
}
-
- // Give pages that are queued to be freed back to the OS.
- heap()->FreeQueuedChunks();
}
@@ -4121,69 +3935,46 @@
GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
double start_time = 0.0;
if (FLAG_print_cumulative_gc_stat) {
- start_time = base::OS::TimeCurrentMillis();
+ start_time = heap_->MonotonicallyIncreasingTimeInMs();
}
#ifdef DEBUG
state_ = SWEEP_SPACES;
#endif
+
MoveEvacuationCandidatesToEndOfPagesList();
- // Noncompacting collections simply sweep the spaces to clear the mark
- // bits and free the nonlive blocks (for old and map spaces). We sweep
- // the map space last because freeing non-live maps overwrites them and
- // the other spaces rely on possibly non-live maps to get the sizes for
- // non-live objects.
{
- GCTracer::Scope sweep_scope(heap()->tracer(),
- GCTracer::Scope::MC_SWEEP_OLDSPACE);
- {
- SweepSpace(heap()->old_pointer_space(), CONCURRENT_SWEEPING);
- SweepSpace(heap()->old_data_space(), CONCURRENT_SWEEPING);
- }
sweeping_in_progress_ = true;
+ {
+ GCTracer::Scope sweep_scope(heap()->tracer(),
+ GCTracer::Scope::MC_SWEEP_OLD);
+ StartSweepSpace(heap()->old_space());
+ }
+ {
+ GCTracer::Scope sweep_scope(heap()->tracer(),
+ GCTracer::Scope::MC_SWEEP_CODE);
+ StartSweepSpace(heap()->code_space());
+ }
+ {
+ GCTracer::Scope sweep_scope(heap()->tracer(),
+ GCTracer::Scope::MC_SWEEP_MAP);
+ StartSweepSpace(heap()->map_space());
+ }
if (FLAG_concurrent_sweeping) {
StartSweeperThreads();
}
}
- RemoveDeadInvalidatedCode();
- {
- GCTracer::Scope sweep_scope(heap()->tracer(),
- GCTracer::Scope::MC_SWEEP_CODE);
- SweepSpace(heap()->code_space(), SEQUENTIAL_SWEEPING);
- }
-
- {
- GCTracer::Scope sweep_scope(heap()->tracer(),
- GCTracer::Scope::MC_SWEEP_CELL);
- SweepSpace(heap()->cell_space(), SEQUENTIAL_SWEEPING);
- SweepSpace(heap()->property_cell_space(), SEQUENTIAL_SWEEPING);
- }
-
- EvacuateNewSpaceAndCandidates();
-
- // ClearNonLiveTransitions depends on precise sweeping of map space to
- // detect whether unmarked map became dead in this collection or in one
- // of the previous ones.
- {
- GCTracer::Scope sweep_scope(heap()->tracer(),
- GCTracer::Scope::MC_SWEEP_MAP);
- SweepSpace(heap()->map_space(), SEQUENTIAL_SWEEPING);
- }
-
- // Deallocate unmarked objects and clear marked bits for marked objects.
+ // Deallocate unmarked large objects.
heap_->lo_space()->FreeUnmarkedObjects();
- // Deallocate evacuated candidate pages.
- ReleaseEvacuationCandidates();
- CodeRange* code_range = heap()->isolate()->code_range();
- if (code_range != NULL && code_range->valid()) {
- code_range->ReserveEmergencyBlock();
- }
+ // Give pages that are queued to be freed back to the OS. Invalid store
+ // buffer entries are already filter out. We can just release the memory.
+ heap()->FreeQueuedChunks();
if (FLAG_print_cumulative_gc_stat) {
- heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() -
+ heap_->tracer()->AddSweepingTime(heap_->MonotonicallyIncreasingTimeInMs() -
start_time);
}
}
@@ -4193,40 +3984,20 @@
PageIterator it(space);
while (it.has_next()) {
Page* p = it.next();
- if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) {
- p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE);
+ if (p->parallel_sweeping_state().Value() ==
+ MemoryChunk::kSweepingFinalize) {
+ p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingDone);
p->SetWasSwept();
}
- DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
+ DCHECK(p->parallel_sweeping_state().Value() == MemoryChunk::kSweepingDone);
}
}
void MarkCompactCollector::ParallelSweepSpacesComplete() {
- ParallelSweepSpaceComplete(heap()->old_pointer_space());
- ParallelSweepSpaceComplete(heap()->old_data_space());
-}
-
-
-void MarkCompactCollector::EnableCodeFlushing(bool enable) {
- if (isolate()->debug()->is_loaded() ||
- isolate()->debug()->has_break_points()) {
- enable = false;
- }
-
- if (enable) {
- if (code_flusher_ != NULL) return;
- code_flusher_ = new CodeFlusher(isolate());
- } else {
- if (code_flusher_ == NULL) return;
- code_flusher_->EvictAllCandidates();
- delete code_flusher_;
- code_flusher_ = NULL;
- }
-
- if (FLAG_trace_code_flushing) {
- PrintF("[code-flushing is now %s]\n", enable ? "on" : "off");
- }
+ ParallelSweepSpaceComplete(heap()->old_space());
+ ParallelSweepSpaceComplete(heap()->code_space());
+ ParallelSweepSpaceComplete(heap()->map_space());
}
@@ -4250,85 +4021,39 @@
}
-bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
- return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
-}
-
-
-bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
- SlotsBuffer** buffer_address, SlotType type,
- Address addr, AdditionMode mode) {
- SlotsBuffer* buffer = *buffer_address;
- if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
- if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
- allocator->DeallocateChain(buffer_address);
- return false;
- }
- buffer = allocator->AllocateBuffer(buffer);
- *buffer_address = buffer;
+void MarkCompactCollector::EvictPopularEvacuationCandidate(Page* page) {
+ if (FLAG_trace_fragmentation) {
+ PrintF("Page %p is too popular. Disabling evacuation.\n",
+ reinterpret_cast<void*>(page));
}
- DCHECK(buffer->HasSpaceForTypedSlot());
- buffer->Add(reinterpret_cast<ObjectSlot>(type));
- buffer->Add(reinterpret_cast<ObjectSlot>(addr));
- return true;
+
+ isolate()->CountUsage(v8::Isolate::UseCounterFeature::kSlotsBufferOverflow);
+
+ // TODO(gc) If all evacuation candidates are too popular we
+ // should stop slots recording entirely.
+ page->ClearEvacuationCandidate();
+
+ DCHECK(!page->IsFlagSet(Page::POPULAR_PAGE));
+ page->SetFlag(Page::POPULAR_PAGE);
+
+ // We were not collecting slots on this page that point
+ // to other evacuation candidates thus we have to
+ // rescan the page after evacuation to discover and update all
+ // pointers to evacuated objects.
+ page->SetFlag(Page::RESCAN_ON_EVACUATION);
}
-static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
- if (RelocInfo::IsCodeTarget(rmode)) {
- return SlotsBuffer::CODE_TARGET_SLOT;
- } else if (RelocInfo::IsEmbeddedObject(rmode)) {
- return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
- } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
- return SlotsBuffer::DEBUG_TARGET_SLOT;
- } else if (RelocInfo::IsJSReturn(rmode)) {
- return SlotsBuffer::JS_RETURN_SLOT;
- }
- UNREACHABLE();
- return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
-}
-
-
-void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
- Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
- RelocInfo::Mode rmode = rinfo->rmode();
- if (target_page->IsEvacuationCandidate() &&
- (rinfo->host() == NULL ||
- !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
- bool success;
- if (RelocInfo::IsEmbeddedObject(rmode) && rinfo->IsInConstantPool()) {
- // This doesn't need to be typed since it is just a normal heap pointer.
- Object** target_pointer =
- reinterpret_cast<Object**>(rinfo->constant_pool_entry_address());
- success = SlotsBuffer::AddTo(
- &slots_buffer_allocator_, target_page->slots_buffer_address(),
- target_pointer, SlotsBuffer::FAIL_ON_OVERFLOW);
- } else if (RelocInfo::IsCodeTarget(rmode) && rinfo->IsInConstantPool()) {
- success = SlotsBuffer::AddTo(
- &slots_buffer_allocator_, target_page->slots_buffer_address(),
- SlotsBuffer::CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address(),
- SlotsBuffer::FAIL_ON_OVERFLOW);
- } else {
- success = SlotsBuffer::AddTo(
- &slots_buffer_allocator_, target_page->slots_buffer_address(),
- SlotTypeForRMode(rmode), rinfo->pc(), SlotsBuffer::FAIL_ON_OVERFLOW);
- }
- if (!success) {
- EvictEvacuationCandidate(target_page);
- }
- }
-}
-
-
-void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
+void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* object, Address slot,
+ Code* target) {
Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
if (target_page->IsEvacuationCandidate() &&
- !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
- if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
+ !ShouldSkipEvacuationSlotRecording(object)) {
+ if (!SlotsBuffer::AddTo(slots_buffer_allocator_,
target_page->slots_buffer_address(),
SlotsBuffer::CODE_ENTRY_SLOT, slot,
SlotsBuffer::FAIL_ON_OVERFLOW)) {
- EvictEvacuationCandidate(target_page);
+ EvictPopularEvacuationCandidate(target_page);
}
}
}
@@ -4342,76 +4067,11 @@
pc);
MarkBit mark_bit = Marking::MarkBitFrom(host);
if (Marking::IsBlack(mark_bit)) {
- RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
+ RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
RecordRelocSlot(&rinfo, target);
}
}
}
-
-static inline SlotsBuffer::SlotType DecodeSlotType(
- SlotsBuffer::ObjectSlot slot) {
- return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
-}
-
-
-void SlotsBuffer::UpdateSlots(Heap* heap) {
- PointersUpdatingVisitor v(heap);
-
- for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
- ObjectSlot slot = slots_[slot_idx];
- if (!IsTypedSlot(slot)) {
- PointersUpdatingVisitor::UpdateSlot(heap, slot);
- } else {
- ++slot_idx;
- DCHECK(slot_idx < idx_);
- UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
- reinterpret_cast<Address>(slots_[slot_idx]));
- }
- }
-}
-
-
-void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
- PointersUpdatingVisitor v(heap);
-
- for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
- ObjectSlot slot = slots_[slot_idx];
- if (!IsTypedSlot(slot)) {
- if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
- PointersUpdatingVisitor::UpdateSlot(heap, slot);
- }
- } else {
- ++slot_idx;
- DCHECK(slot_idx < idx_);
- Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
- if (!IsOnInvalidatedCodeObject(pc)) {
- UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
- reinterpret_cast<Address>(slots_[slot_idx]));
- }
- }
- }
-}
-
-
-SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
- return new SlotsBuffer(next_buffer);
-}
-
-
-void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
- delete buffer;
-}
-
-
-void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
- SlotsBuffer* buffer = *buffer_address;
- while (buffer != NULL) {
- SlotsBuffer* next_buffer = buffer->next();
- DeallocateBuffer(buffer);
- buffer = next_buffer;
- }
- *buffer_address = NULL;
-}
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8