Merge V8 5.3.332.45. DO NOT MERGE
Test: Manual
FPIIM-449
Change-Id: Id3254828b068abdea3cb10442e0172a8c9a98e03
(cherry picked from commit 13e2dadd00298019ed862f2b2fc5068bba730bcf)
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
index b2ae93d..f9a55df 100644
--- a/src/heap/mark-compact.cc
+++ b/src/heap/mark-compact.cc
@@ -25,7 +25,6 @@
#include "src/heap/spaces-inl.h"
#include "src/ic/ic.h"
#include "src/ic/stub-cache.h"
-#include "src/profiler/cpu-profiler.h"
#include "src/utils-inl.h"
#include "src/v8.h"
@@ -132,13 +131,14 @@
static void VerifyMarking(NewSpace* space) {
Address end = space->top();
- NewSpacePageIterator it(space->bottom(), end);
// The bottom position is at the start of its page. Allows us to use
// page->area_start() as start of range on all pages.
CHECK_EQ(space->bottom(), Page::FromAddress(space->bottom())->area_start());
- while (it.has_next()) {
- Page* page = it.next();
- Address limit = it.has_next() ? page->area_end() : end;
+
+ NewSpacePageRange range(space->bottom(), end);
+ for (auto it = range.begin(); it != range.end();) {
+ Page* page = *(it++);
+ Address limit = it != range.end() ? page->area_end() : end;
CHECK(limit == end || !page->Contains(end));
VerifyMarking(space->heap(), page->area_start(), limit);
}
@@ -146,10 +146,7 @@
static void VerifyMarking(PagedSpace* space) {
- PageIterator it(space);
-
- while (it.has_next()) {
- Page* p = it.next();
+ for (Page* p : *space) {
if (p->IsFlagSet(Page::BLACK_PAGE)) {
VerifyMarkingBlackPage(space->heap(), p);
} else {
@@ -205,13 +202,12 @@
static void VerifyEvacuation(NewSpace* space) {
- NewSpacePageIterator it(space->bottom(), space->top());
VerifyEvacuationVisitor visitor;
-
- while (it.has_next()) {
- Page* page = it.next();
+ NewSpacePageRange range(space->bottom(), space->top());
+ for (auto it = range.begin(); it != range.end();) {
+ Page* page = *(it++);
Address current = page->area_start();
- Address limit = it.has_next() ? page->area_end() : space->top();
+ Address limit = it != range.end() ? page->area_end() : space->top();
CHECK(limit == space->top() || !page->Contains(space->top()));
while (current < limit) {
HeapObject* object = HeapObject::FromAddress(current);
@@ -226,10 +222,7 @@
if (FLAG_use_allocation_folding && (space == heap->old_space())) {
return;
}
- PageIterator it(space);
-
- while (it.has_next()) {
- Page* p = it.next();
+ for (Page* p : *space) {
if (p->IsEvacuationCandidate()) continue;
VerifyEvacuation(p);
}
@@ -361,10 +354,7 @@
#ifdef VERIFY_HEAP
void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
- PageIterator it(space);
-
- while (it.has_next()) {
- Page* p = it.next();
+ for (Page* p : *space) {
CHECK(p->markbits()->IsClean());
CHECK_EQ(0, p->LiveBytes());
}
@@ -372,10 +362,7 @@
void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
- NewSpacePageIterator it(space->bottom(), space->top());
-
- while (it.has_next()) {
- Page* p = it.next();
+ for (Page* p : NewSpacePageRange(space->bottom(), space->top())) {
CHECK(p->markbits()->IsClean());
CHECK_EQ(0, p->LiveBytes());
}
@@ -420,10 +407,7 @@
static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
- PageIterator it(space);
-
- while (it.has_next()) {
- Page* p = it.next();
+ for (Page* p : *space) {
Bitmap::Clear(p);
if (p->IsFlagSet(Page::BLACK_PAGE)) {
p->ClearFlag(Page::BLACK_PAGE);
@@ -433,10 +417,8 @@
static void ClearMarkbitsInNewSpace(NewSpace* space) {
- NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
-
- while (it.has_next()) {
- Bitmap::Clear(it.next());
+ for (Page* page : *space) {
+ Bitmap::Clear(page);
}
}
@@ -472,13 +454,13 @@
private:
// v8::Task overrides.
void Run() override {
- DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE);
+ DCHECK_GE(space_to_start_, FIRST_SPACE);
DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
- const int offset = space_to_start_ - FIRST_PAGED_SPACE;
- const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
+ const int offset = space_to_start_ - FIRST_SPACE;
+ const int num_spaces = LAST_PAGED_SPACE - FIRST_SPACE + 1;
for (int i = 0; i < num_spaces; i++) {
- const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces);
- DCHECK_GE(space_id, FIRST_PAGED_SPACE);
+ const int space_id = FIRST_SPACE + ((i + offset) % num_spaces);
+ DCHECK_GE(space_id, FIRST_SPACE);
DCHECK_LE(space_id, LAST_PAGED_SPACE);
sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0);
}
@@ -508,7 +490,7 @@
void MarkCompactCollector::Sweeper::StartSweepingHelper(
AllocationSpace space_to_start) {
- num_sweeping_tasks_++;
+ num_sweeping_tasks_.Increment(1);
V8::GetCurrentPlatform()->CallOnBackgroundThread(
new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start),
v8::Platform::kShortRunningTask);
@@ -516,9 +498,8 @@
void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted(
Page* page) {
- PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
if (!page->SweepingDone()) {
- ParallelSweepPage(page, owner);
+ ParallelSweepPage(page, page->owner()->identity());
if (!page->SweepingDone()) {
// We were not able to sweep that page, i.e., a concurrent
// sweeper thread currently owns this page. Wait for the sweeper
@@ -555,18 +536,31 @@
}
if (FLAG_concurrent_sweeping) {
- while (num_sweeping_tasks_ > 0) {
+ while (num_sweeping_tasks_.Value() > 0) {
pending_sweeper_tasks_semaphore_.Wait();
- num_sweeping_tasks_--;
+ num_sweeping_tasks_.Increment(-1);
}
}
- ForAllSweepingSpaces(
- [this](AllocationSpace space) { DCHECK(sweeping_list_[space].empty()); });
+ ForAllSweepingSpaces([this](AllocationSpace space) {
+ if (space == NEW_SPACE) {
+ swept_list_[NEW_SPACE].Clear();
+ }
+ DCHECK(sweeping_list_[space].empty());
+ });
late_pages_ = false;
sweeping_in_progress_ = false;
}
+void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() {
+ if (!sweeping_in_progress_) return;
+ if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
+ for (Page* p : *heap_->new_space()) {
+ SweepOrWaitUntilSweepingCompleted(p);
+ }
+ }
+}
+
void MarkCompactCollector::EnsureSweepingCompleted() {
if (!sweeper().sweeping_in_progress()) return;
@@ -583,12 +577,11 @@
}
bool MarkCompactCollector::Sweeper::IsSweepingCompleted() {
- if (!pending_sweeper_tasks_semaphore_.WaitFor(
- base::TimeDelta::FromSeconds(0))) {
- return false;
+ while (pending_sweeper_tasks_semaphore_.WaitFor(
+ base::TimeDelta::FromSeconds(0))) {
+ num_sweeping_tasks_.Increment(-1);
}
- pending_sweeper_tasks_semaphore_.Signal();
- return true;
+ return num_sweeping_tasks_.Value() == 0;
}
void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) {
@@ -703,9 +696,7 @@
std::vector<LiveBytesPagePair> pages;
pages.reserve(number_of_pages);
- PageIterator it(space);
- while (it.has_next()) {
- Page* p = it.next();
+ for (Page* p : *space) {
if (p->NeverEvacuate()) continue;
if (p->IsFlagSet(Page::BLACK_PAGE)) continue;
// Invariant: Evacuation candidates are just created when marking is
@@ -858,9 +849,22 @@
AbortWeakCells();
AbortTransitionArrays();
AbortCompaction();
+ if (heap_->UsingEmbedderHeapTracer()) {
+ heap_->mark_compact_collector()->embedder_heap_tracer()->AbortTracing();
+ }
was_marked_incrementally_ = false;
}
+ if (!was_marked_incrementally_) {
+ if (heap_->UsingEmbedderHeapTracer()) {
+ heap_->mark_compact_collector()->embedder_heap_tracer()->TracePrologue();
+ }
+ }
+
+ if (UsingEmbedderHeapTracer()) {
+ embedder_heap_tracer()->EnterFinalPause();
+ }
+
// Don't start compaction if we are in the middle of incremental
// marking cycle. We did not collect any slots.
if (!FLAG_never_compact && !was_marked_incrementally_) {
@@ -872,6 +876,7 @@
space = spaces.next()) {
space->PrepareForMarkCompact();
}
+ heap()->account_external_memory_concurrently_freed();
#ifdef VERIFY_HEAP
if (!was_marked_incrementally_ && FLAG_verify_heap) {
@@ -1074,7 +1079,7 @@
void CodeFlusher::EvictCandidate(JSFunction* function) {
- DCHECK(!function->next_function_link()->IsUndefined());
+ DCHECK(!function->next_function_link()->IsUndefined(isolate_));
Object* undefined = isolate_->heap()->undefined_value();
// Make sure previous flushing decisions are revisited.
@@ -1299,7 +1304,7 @@
table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
if (FLAG_track_gc_object_stats) {
- ObjectStatsVisitor::Initialize(&table_);
+ MarkCompactObjectStatsVisitor::Initialize(&table_);
}
}
@@ -1408,6 +1413,8 @@
HeapObject* object = HeapObject::cast(*p);
+ if (collector_->heap()->PurgeLeftTrimmedObject(p)) return;
+
MarkBit mark_bit = Marking::MarkBitFrom(object);
if (Marking::IsBlackOrGrey(mark_bit)) return;
@@ -1535,6 +1542,9 @@
class RecordMigratedSlotVisitor final : public ObjectVisitor {
public:
+ explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector)
+ : collector_(collector) {}
+
inline void VisitPointer(Object** p) final {
RecordMigratedSlot(*p, reinterpret_cast<Address>(p));
}
@@ -1550,10 +1560,59 @@
Address code_entry = Memory::Address_at(code_entry_slot);
if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot),
- CODE_ENTRY_SLOT, code_entry_slot);
+ nullptr, CODE_ENTRY_SLOT,
+ code_entry_slot);
}
}
+ inline void VisitCodeTarget(RelocInfo* rinfo) final {
+ DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+ Code* host = rinfo->host();
+ collector_->RecordRelocSlot(host, rinfo, target);
+ }
+
+ inline void VisitDebugTarget(RelocInfo* rinfo) final {
+ DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+ rinfo->IsPatchedDebugBreakSlotSequence());
+ Code* target = Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
+ Code* host = rinfo->host();
+ collector_->RecordRelocSlot(host, rinfo, target);
+ }
+
+ inline void VisitEmbeddedPointer(RelocInfo* rinfo) final {
+ DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+ HeapObject* object = HeapObject::cast(rinfo->target_object());
+ Code* host = rinfo->host();
+ collector_->RecordRelocSlot(host, rinfo, object);
+ }
+
+ inline void VisitCell(RelocInfo* rinfo) final {
+ DCHECK(rinfo->rmode() == RelocInfo::CELL);
+ Cell* cell = rinfo->target_cell();
+ Code* host = rinfo->host();
+ collector_->RecordRelocSlot(host, rinfo, cell);
+ }
+
+ // Entries that will never move.
+ inline void VisitCodeAgeSequence(RelocInfo* rinfo) final {
+ DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
+ Code* stub = rinfo->code_age_stub();
+ USE(stub);
+ DCHECK(!Page::FromAddress(stub->address())->IsEvacuationCandidate());
+ }
+
+ // Entries that are skipped for recording.
+ inline void VisitExternalReference(RelocInfo* rinfo) final {}
+ inline void VisitExternalReference(Address* p) final {}
+ inline void VisitRuntimeEntry(RelocInfo* rinfo) final {}
+ inline void VisitExternalOneByteString(
+ v8::String::ExternalOneByteStringResource** resource) final {}
+ inline void VisitExternalTwoByteString(
+ v8::String::ExternalStringResource** resource) final {}
+ inline void VisitInternalReference(RelocInfo* rinfo) final {}
+ inline void VisitEmbedderReference(Object** p, uint16_t class_id) final {}
+
private:
inline void RecordMigratedSlot(Object* value, Address slot) {
if (value->IsHeapObject()) {
@@ -1565,6 +1624,8 @@
}
}
}
+
+ MarkCompactCollector* collector_;
};
class MarkCompactCollector::HeapObjectVisitor {
@@ -1582,12 +1643,15 @@
: heap_(heap),
compaction_spaces_(compaction_spaces),
profiling_(
- heap->isolate()->cpu_profiler()->is_profiling() ||
+ heap->isolate()->is_profiling() ||
heap->isolate()->logger()->is_logging_code_events() ||
heap->isolate()->heap_profiler()->is_tracking_object_moves()) {}
inline bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
HeapObject** target_object) {
+#ifdef VERIFY_HEAP
+ if (AbortCompactionForTesting(object)) return false;
+#endif // VERIFY_HEAP
int size = object->Size();
AllocationAlignment alignment = object->RequiredAlignment();
AllocationResult allocation = target_space->AllocateRaw(size, alignment);
@@ -1622,7 +1686,7 @@
PROFILE(heap_->isolate(),
CodeMoveEvent(AbstractCode::cast(src), dst_addr));
}
- RecordMigratedSlotVisitor visitor;
+ RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
dst->IterateBodyFast(dst->map()->instance_type(), size, &visitor);
} else if (dest == CODE_SPACE) {
DCHECK_CODEOBJECT_SIZE(size, heap_->code_space());
@@ -1631,9 +1695,9 @@
CodeMoveEvent(AbstractCode::cast(src), dst_addr));
}
heap_->CopyBlock(dst_addr, src_addr, size);
- RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(dst_addr),
- RELOCATED_CODE_OBJECT, dst_addr);
Code::cast(dst)->Relocate(dst_addr - src_addr);
+ RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
+ dst->IterateBodyFast(dst->map()->instance_type(), size, &visitor);
} else {
DCHECK_OBJECT_SIZE(size);
DCHECK(dest == NEW_SPACE);
@@ -1645,6 +1709,26 @@
Memory::Address_at(src_addr) = dst_addr;
}
+#ifdef VERIFY_HEAP
+ bool AbortCompactionForTesting(HeapObject* object) {
+ if (FLAG_stress_compaction) {
+ const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
+ Page::kPageAlignmentMask & ~kPointerAlignmentMask;
+ if ((reinterpret_cast<uintptr_t>(object->address()) &
+ Page::kPageAlignmentMask) == mask) {
+ Page* page = Page::FromAddress(object->address());
+ if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
+ page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
+ } else {
+ page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+#endif // VERIFY_HEAP
+
Heap* heap_;
CompactionSpaceCollection* compaction_spaces_;
bool profiling_;
@@ -1658,7 +1742,7 @@
explicit EvacuateNewSpaceVisitor(Heap* heap,
CompactionSpaceCollection* compaction_spaces,
- HashMap* local_pretenuring_feedback)
+ base::HashMap* local_pretenuring_feedback)
: EvacuateVisitorBase(heap, compaction_spaces),
buffer_(LocalAllocationBuffer::InvalidBuffer()),
space_to_allocate_(NEW_SPACE),
@@ -1671,23 +1755,15 @@
local_pretenuring_feedback_);
int size = object->Size();
HeapObject* target_object = nullptr;
- if (heap_->ShouldBePromoted(object->address(), size) &&
+ if (heap_->ShouldBePromoted<DEFAULT_PROMOTION>(object->address(), size) &&
TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object,
&target_object)) {
- // If we end up needing more special cases, we should factor this out.
- if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
- heap_->array_buffer_tracker()->Promote(
- JSArrayBuffer::cast(target_object));
- }
promoted_size_ += size;
return true;
}
HeapObject* target = nullptr;
AllocationSpace space = AllocateTargetObject(object, &target);
MigrateObject(HeapObject::cast(target), object, size, space);
- if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
- heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
- }
semispace_copied_size_ += size;
return true;
}
@@ -1706,6 +1782,7 @@
const int size = old_object->Size();
AllocationAlignment alignment = old_object->RequiredAlignment();
AllocationResult allocation;
+ AllocationSpace space_allocated_in = space_to_allocate_;
if (space_to_allocate_ == NEW_SPACE) {
if (size > kMaxLabObjectSize) {
allocation =
@@ -1716,11 +1793,12 @@
}
if (allocation.IsRetry() || (space_to_allocate_ == OLD_SPACE)) {
allocation = AllocateInOldSpace(size, alignment);
+ space_allocated_in = OLD_SPACE;
}
bool ok = allocation.To(target_object);
DCHECK(ok);
USE(ok);
- return space_to_allocate_;
+ return space_allocated_in;
}
inline bool NewLocalAllocationBuffer() {
@@ -1795,36 +1873,44 @@
AllocationSpace space_to_allocate_;
intptr_t promoted_size_;
intptr_t semispace_copied_size_;
- HashMap* local_pretenuring_feedback_;
+ base::HashMap* local_pretenuring_feedback_;
};
class MarkCompactCollector::EvacuateNewSpacePageVisitor final
: public MarkCompactCollector::HeapObjectVisitor {
public:
- EvacuateNewSpacePageVisitor() : promoted_size_(0) {}
+ explicit EvacuateNewSpacePageVisitor(Heap* heap)
+ : heap_(heap), promoted_size_(0), semispace_copied_size_(0) {}
- static void TryMoveToOldSpace(Page* page, PagedSpace* owner) {
- if (page->heap()->new_space()->ReplaceWithEmptyPage(page)) {
- Page* new_page = Page::ConvertNewToOld(page, owner);
- new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
- }
+ static void MoveToOldSpace(Page* page, PagedSpace* owner) {
+ page->Unlink();
+ Page* new_page = Page::ConvertNewToOld(page, owner);
+ new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
+ }
+
+ static void MoveToToSpace(Page* page) {
+ page->heap()->new_space()->MovePageFromSpaceToSpace(page);
+ page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
}
inline bool Visit(HeapObject* object) {
- if (V8_UNLIKELY(object->IsJSArrayBuffer())) {
- object->GetHeap()->array_buffer_tracker()->Promote(
- JSArrayBuffer::cast(object));
- }
- RecordMigratedSlotVisitor visitor;
+ RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
object->IterateBodyFast(&visitor);
promoted_size_ += object->Size();
return true;
}
intptr_t promoted_size() { return promoted_size_; }
+ intptr_t semispace_copied_size() { return semispace_copied_size_; }
+
+ void account_semispace_copied(intptr_t copied) {
+ semispace_copied_size_ += copied;
+ }
private:
+ Heap* heap_;
intptr_t promoted_size_;
+ intptr_t semispace_copied_size_;
};
class MarkCompactCollector::EvacuateOldSpaceVisitor final
@@ -1849,30 +1935,20 @@
class MarkCompactCollector::EvacuateRecordOnlyVisitor final
: public MarkCompactCollector::HeapObjectVisitor {
public:
- explicit EvacuateRecordOnlyVisitor(AllocationSpace space) : space_(space) {}
+ explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {}
inline bool Visit(HeapObject* object) {
- if (space_ == OLD_SPACE) {
- RecordMigratedSlotVisitor visitor;
- object->IterateBody(&visitor);
- } else {
- DCHECK_EQ(space_, CODE_SPACE);
- // Add a typed slot for the whole code object.
- RememberedSet<OLD_TO_OLD>::InsertTyped(
- Page::FromAddress(object->address()), RELOCATED_CODE_OBJECT,
- object->address());
- }
+ RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
+ object->IterateBody(&visitor);
return true;
}
private:
- AllocationSpace space_;
+ Heap* heap_;
};
void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
- PageIterator it(space);
- while (it.has_next()) {
- Page* p = it.next();
+ for (Page* p : *space) {
if (!p->IsFlagSet(Page::BLACK_PAGE)) {
DiscoverGreyObjectsOnPage(p);
}
@@ -1883,9 +1959,7 @@
void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
NewSpace* space = heap()->new_space();
- NewSpacePageIterator it(space->bottom(), space->top());
- while (it.has_next()) {
- Page* page = it.next();
+ for (Page* page : NewSpacePageRange(space->bottom(), space->top())) {
DiscoverGreyObjectsOnPage(page);
if (marking_deque()->IsFull()) return;
}
@@ -2052,8 +2126,10 @@
bool work_to_do = true;
while (work_to_do) {
if (UsingEmbedderHeapTracer()) {
- embedder_heap_tracer()->TraceWrappersFrom(wrappers_to_trace_);
- wrappers_to_trace_.clear();
+ RegisterWrappersWithEmbedderHeapTracer();
+ embedder_heap_tracer()->AdvanceTracing(
+ 0, EmbedderHeapTracer::AdvanceTracingActions(
+ EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
}
if (!only_process_harmony_weak_collections) {
isolate()->global_handles()->IterateObjectGroups(
@@ -2170,6 +2246,15 @@
embedder_heap_tracer_ = tracer;
}
+void MarkCompactCollector::RegisterWrappersWithEmbedderHeapTracer() {
+ DCHECK(UsingEmbedderHeapTracer());
+ if (wrappers_to_trace_.empty()) {
+ return;
+ }
+ embedder_heap_tracer()->RegisterV8References(wrappers_to_trace_);
+ wrappers_to_trace_.clear();
+}
+
void MarkCompactCollector::TracePossibleWrapper(JSObject* js_object) {
DCHECK(js_object->WasConstructedFromApiFunction());
if (js_object->GetInternalFieldCount() >= 2 &&
@@ -2177,7 +2262,7 @@
js_object->GetInternalField(0) != heap_->undefined_value() &&
js_object->GetInternalField(1) != heap_->undefined_value()) {
DCHECK(reinterpret_cast<intptr_t>(js_object->GetInternalField(0)) % 2 == 0);
- wrappers_to_trace().push_back(std::pair<void*, void*>(
+ wrappers_to_trace_.push_back(std::pair<void*, void*>(
reinterpret_cast<void*>(js_object->GetInternalField(0)),
reinterpret_cast<void*>(js_object->GetInternalField(1))));
}
@@ -2210,6 +2295,10 @@
} else {
// Abort any pending incremental activities e.g. incremental sweeping.
incremental_marking->Stop();
+ if (FLAG_track_gc_object_stats) {
+ // Clear object stats collected during incremental marking.
+ heap()->object_stats_->ClearObjectStats();
+ }
if (marking_deque_.in_use()) {
marking_deque_.Uninitialize(true);
}
@@ -2246,10 +2335,6 @@
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL);
- if (UsingEmbedderHeapTracer()) {
- embedder_heap_tracer()->TracePrologue();
- ProcessMarkingDeque();
- }
ProcessEphemeralMarking(&root_visitor, false);
}
@@ -2378,7 +2463,7 @@
for (uint32_t i = 0; i < capacity; i++) {
uint32_t key_index = table->EntryToIndex(i);
Object* key = table->get(key_index);
- if (!table->IsKey(key)) continue;
+ if (!table->IsKey(isolate, key)) continue;
uint32_t value_index = table->EntryToValueIndex(i);
Object* value = table->get(value_index);
DCHECK(key->IsWeakCell());
@@ -2750,128 +2835,20 @@
slot_type = OBJECT_SLOT;
}
}
- RememberedSet<OLD_TO_OLD>::InsertTyped(source_page, slot_type, addr);
+ RememberedSet<OLD_TO_OLD>::InsertTyped(
+ source_page, reinterpret_cast<Address>(host), slot_type, addr);
}
}
-static inline void UpdateTypedSlot(Isolate* isolate, ObjectVisitor* v,
- SlotType slot_type, Address addr) {
- switch (slot_type) {
- case CODE_TARGET_SLOT: {
- RelocInfo rinfo(isolate, addr, RelocInfo::CODE_TARGET, 0, NULL);
- rinfo.Visit(isolate, v);
- break;
- }
- case CELL_TARGET_SLOT: {
- RelocInfo rinfo(isolate, addr, RelocInfo::CELL, 0, NULL);
- rinfo.Visit(isolate, v);
- break;
- }
- case CODE_ENTRY_SLOT: {
- v->VisitCodeEntry(addr);
- break;
- }
- case RELOCATED_CODE_OBJECT: {
- HeapObject* obj = HeapObject::FromAddress(addr);
- Code::BodyDescriptor::IterateBody(obj, v);
- break;
- }
- case DEBUG_TARGET_SLOT: {
- RelocInfo rinfo(isolate, addr, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION, 0,
- NULL);
- if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
- break;
- }
- case EMBEDDED_OBJECT_SLOT: {
- RelocInfo rinfo(isolate, addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
- rinfo.Visit(isolate, v);
- break;
- }
- case OBJECT_SLOT: {
- v->VisitPointer(reinterpret_cast<Object**>(addr));
- break;
- }
- default:
- UNREACHABLE();
- break;
- }
-}
+static inline SlotCallbackResult UpdateSlot(Object** slot) {
+ Object* obj = reinterpret_cast<Object*>(
+ base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
-
-// Visitor for updating pointers from live objects in old spaces to new space.
-// It does not expect to encounter pointers to dead objects.
-class PointersUpdatingVisitor : public ObjectVisitor {
- public:
- explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
-
- void VisitPointer(Object** p) override { UpdatePointer(p); }
-
- void VisitPointers(Object** start, Object** end) override {
- for (Object** p = start; p < end; p++) UpdatePointer(p);
- }
-
- void VisitCell(RelocInfo* rinfo) override {
- DCHECK(rinfo->rmode() == RelocInfo::CELL);
- Object* cell = rinfo->target_cell();
- Object* old_cell = cell;
- VisitPointer(&cell);
- if (cell != old_cell) {
- rinfo->set_target_cell(reinterpret_cast<Cell*>(cell));
- }
- }
-
- void VisitEmbeddedPointer(RelocInfo* rinfo) override {
- DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
- Object* target = rinfo->target_object();
- Object* old_target = target;
- VisitPointer(&target);
- // Avoid unnecessary changes that might unnecessary flush the instruction
- // cache.
- if (target != old_target) {
- rinfo->set_target_object(target);
- }
- }
-
- void VisitCodeTarget(RelocInfo* rinfo) override {
- DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
- Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
- Object* old_target = target;
- VisitPointer(&target);
- if (target != old_target) {
- rinfo->set_target_address(Code::cast(target)->instruction_start());
- }
- }
-
- void VisitCodeAgeSequence(RelocInfo* rinfo) override {
- DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
- Object* stub = rinfo->code_age_stub();
- DCHECK(stub != NULL);
- VisitPointer(&stub);
- if (stub != rinfo->code_age_stub()) {
- rinfo->set_code_age_stub(Code::cast(stub));
- }
- }
-
- void VisitDebugTarget(RelocInfo* rinfo) override {
- DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
- rinfo->IsPatchedDebugBreakSlotSequence());
- Object* target =
- Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
- VisitPointer(&target);
- rinfo->set_debug_call_address(Code::cast(target)->instruction_start());
- }
-
- static inline void UpdateSlot(Heap* heap, Object** slot) {
- Object* obj = reinterpret_cast<Object*>(
- base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
-
- if (!obj->IsHeapObject()) return;
-
+ if (obj->IsHeapObject()) {
HeapObject* heap_obj = HeapObject::cast(obj);
-
MapWord map_word = heap_obj->map_word();
if (map_word.IsForwardingAddress()) {
- DCHECK(heap->InFromSpace(heap_obj) ||
+ DCHECK(heap_obj->GetHeap()->InFromSpace(heap_obj) ||
MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
Page::FromAddress(heap_obj->address())
->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
@@ -2880,15 +2857,42 @@
reinterpret_cast<base::AtomicWord*>(slot),
reinterpret_cast<base::AtomicWord>(obj),
reinterpret_cast<base::AtomicWord>(target));
- DCHECK(!heap->InFromSpace(target) &&
+ DCHECK(!heap_obj->GetHeap()->InFromSpace(target) &&
!MarkCompactCollector::IsOnEvacuationCandidate(target));
}
}
+ return REMOVE_SLOT;
+}
- private:
- inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); }
+// Visitor for updating pointers from live objects in old spaces to new space.
+// It does not expect to encounter pointers to dead objects.
+class PointersUpdatingVisitor : public ObjectVisitor {
+ public:
+ void VisitPointer(Object** p) override { UpdateSlot(p); }
- Heap* heap_;
+ void VisitPointers(Object** start, Object** end) override {
+ for (Object** p = start; p < end; p++) UpdateSlot(p);
+ }
+
+ void VisitCell(RelocInfo* rinfo) override {
+ UpdateTypedSlotHelper::UpdateCell(rinfo, UpdateSlot);
+ }
+
+ void VisitEmbeddedPointer(RelocInfo* rinfo) override {
+ UpdateTypedSlotHelper::UpdateEmbeddedPointer(rinfo, UpdateSlot);
+ }
+
+ void VisitCodeTarget(RelocInfo* rinfo) override {
+ UpdateTypedSlotHelper::UpdateCodeTarget(rinfo, UpdateSlot);
+ }
+
+ void VisitCodeEntry(Address entry_address) override {
+ UpdateTypedSlotHelper::UpdateCodeEntry(entry_address, UpdateSlot);
+ }
+
+ void VisitDebugTarget(RelocInfo* rinfo) override {
+ UpdateTypedSlotHelper::UpdateDebugTarget(rinfo, UpdateSlot);
+ }
};
static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
@@ -3033,21 +3037,34 @@
void MarkCompactCollector::EvacuateNewSpacePrologue() {
NewSpace* new_space = heap()->new_space();
- NewSpacePageIterator it(new_space->bottom(), new_space->top());
// Append the list of new space pages to be processed.
- while (it.has_next()) {
- newspace_evacuation_candidates_.Add(it.next());
+ for (Page* p : NewSpacePageRange(new_space->bottom(), new_space->top())) {
+ newspace_evacuation_candidates_.Add(p);
}
new_space->Flip();
new_space->ResetAllocationInfo();
}
-void MarkCompactCollector::EvacuateNewSpaceEpilogue() {
- newspace_evacuation_candidates_.Rewind(0);
-}
-
class MarkCompactCollector::Evacuator : public Malloced {
public:
+ enum EvacuationMode {
+ kObjectsNewToOld,
+ kPageNewToOld,
+ kObjectsOldToOld,
+ kPageNewToNew,
+ };
+
+ static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
+ // Note: The order of checks is important in this function.
+ if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
+ return kPageNewToOld;
+ if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION))
+ return kPageNewToNew;
+ if (chunk->InNewSpace()) return kObjectsNewToOld;
+ DCHECK(chunk->IsEvacuationCandidate());
+ return kObjectsOldToOld;
+ }
+
// NewSpacePages with more live bytes than this threshold qualify for fast
// evacuation.
static int PageEvacuationThreshold() {
@@ -3059,11 +3076,11 @@
explicit Evacuator(MarkCompactCollector* collector)
: collector_(collector),
compaction_spaces_(collector->heap()),
- local_pretenuring_feedback_(HashMap::PointersMatch,
+ local_pretenuring_feedback_(base::HashMap::PointersMatch,
kInitialLocalPretenuringFeedbackCapacity),
new_space_visitor_(collector->heap(), &compaction_spaces_,
&local_pretenuring_feedback_),
- new_space_page_visitor(),
+ new_space_page_visitor(collector->heap()),
old_space_visitor_(collector->heap(), &compaction_spaces_),
duration_(0.0),
bytes_compacted_(0) {}
@@ -3077,38 +3094,20 @@
CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
private:
- enum EvacuationMode {
- kObjectsNewToOld,
- kPageNewToOld,
- kObjectsOldToOld,
- };
-
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
inline Heap* heap() { return collector_->heap(); }
- inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
- // Note: The order of checks is important in this function.
- if (chunk->InNewSpace()) return kObjectsNewToOld;
- if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
- return kPageNewToOld;
- DCHECK(chunk->IsEvacuationCandidate());
- return kObjectsOldToOld;
- }
-
void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
duration_ += duration;
bytes_compacted_ += bytes_compacted;
}
- template <IterationMode mode, class Visitor>
- inline bool EvacuateSinglePage(Page* p, Visitor* visitor);
-
MarkCompactCollector* collector_;
// Locally cached collector data.
CompactionSpaceCollection compaction_spaces_;
- HashMap local_pretenuring_feedback_;
+ base::HashMap local_pretenuring_feedback_;
// Visitors for the corresponding spaces.
EvacuateNewSpaceVisitor new_space_visitor_;
@@ -3120,75 +3119,78 @@
intptr_t bytes_compacted_;
};
-template <MarkCompactCollector::IterationMode mode, class Visitor>
-bool MarkCompactCollector::Evacuator::EvacuateSinglePage(Page* p,
- Visitor* visitor) {
+bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
bool success = false;
- DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() ||
- p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
- int saved_live_bytes = p->LiveBytes();
- double evacuation_time;
+ DCHECK(page->SweepingDone());
+ int saved_live_bytes = page->LiveBytes();
+ double evacuation_time = 0.0;
+ Heap* heap = page->heap();
{
- AlwaysAllocateScope always_allocate(heap()->isolate());
+ AlwaysAllocateScope always_allocate(heap->isolate());
TimedScope timed_scope(&evacuation_time);
- success = collector_->VisitLiveObjects<Visitor>(p, visitor, mode);
+ switch (ComputeEvacuationMode(page)) {
+ case kObjectsNewToOld:
+ success = collector_->VisitLiveObjects(page, &new_space_visitor_,
+ kClearMarkbits);
+ ArrayBufferTracker::ProcessBuffers(
+ page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
+ DCHECK(success);
+ break;
+ case kPageNewToOld:
+ success = collector_->VisitLiveObjects(page, &new_space_page_visitor,
+ kKeepMarking);
+ // ArrayBufferTracker will be updated during sweeping.
+ DCHECK(success);
+ break;
+ case kPageNewToNew:
+ new_space_page_visitor.account_semispace_copied(page->LiveBytes());
+ // ArrayBufferTracker will be updated during sweeping.
+ success = true;
+ break;
+ case kObjectsOldToOld:
+ success = collector_->VisitLiveObjects(page, &old_space_visitor_,
+ kClearMarkbits);
+ if (!success) {
+ // Aborted compaction page. We have to record slots here, since we
+ // might not have recorded them in first place.
+ // Note: We mark the page as aborted here to be able to record slots
+ // for code objects in |RecordMigratedSlotVisitor|.
+ page->SetFlag(Page::COMPACTION_WAS_ABORTED);
+ EvacuateRecordOnlyVisitor record_visitor(collector_->heap());
+ success =
+ collector_->VisitLiveObjects(page, &record_visitor, kKeepMarking);
+ ArrayBufferTracker::ProcessBuffers(
+ page, ArrayBufferTracker::kUpdateForwardedKeepOthers);
+ DCHECK(success);
+ // We need to return failure here to indicate that we want this page
+ // added to the sweeper.
+ success = false;
+ } else {
+ ArrayBufferTracker::ProcessBuffers(
+ page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
}
+ ReportCompactionProgress(evacuation_time, saved_live_bytes);
if (FLAG_trace_evacuation) {
- const char age_mark_tag =
- !p->InNewSpace()
- ? 'x'
- : !p->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)
- ? '>'
- : !p->ContainsLimit(heap()->new_space()->age_mark()) ? '<'
- : '#';
- PrintIsolate(heap()->isolate(),
- "evacuation[%p]: page=%p new_space=%d age_mark_tag=%c "
- "page_evacuation=%d executable=%d live_bytes=%d time=%f\n",
- this, p, p->InNewSpace(), age_mark_tag,
- p->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION),
- p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes,
- evacuation_time);
- }
- if (success) {
- ReportCompactionProgress(evacuation_time, saved_live_bytes);
+ PrintIsolate(heap->isolate(),
+ "evacuation[%p]: page=%p new_space=%d "
+ "page_evacuation=%d executable=%d contains_age_mark=%d "
+ "live_bytes=%d time=%f\n",
+ static_cast<void*>(this), static_cast<void*>(page),
+ page->InNewSpace(),
+ page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
+ page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
+ page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
+ page->Contains(heap->new_space()->age_mark()),
+ saved_live_bytes, evacuation_time);
}
return success;
}
-bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
- bool result = false;
- DCHECK(page->SweepingDone());
- switch (ComputeEvacuationMode(page)) {
- case kObjectsNewToOld:
- result = EvacuateSinglePage<kClearMarkbits>(page, &new_space_visitor_);
- DCHECK(result);
- USE(result);
- break;
- case kPageNewToOld:
- result = EvacuateSinglePage<kKeepMarking>(page, &new_space_page_visitor);
- DCHECK(result);
- USE(result);
- break;
- case kObjectsOldToOld:
- result = EvacuateSinglePage<kClearMarkbits>(page, &old_space_visitor_);
- if (!result) {
- // Aborted compaction page. We can record slots here to have them
- // processed in parallel later on.
- EvacuateRecordOnlyVisitor record_visitor(page->owner()->identity());
- result = EvacuateSinglePage<kKeepMarking>(page, &record_visitor);
- DCHECK(result);
- USE(result);
- // We need to return failure here to indicate that we want this page
- // added to the sweeper.
- return false;
- }
- break;
- default:
- UNREACHABLE();
- }
- return result;
-}
-
void MarkCompactCollector::Evacuator::Finalize() {
heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
heap()->code_space()->MergeCompactionSpace(
@@ -3197,11 +3199,13 @@
heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
new_space_page_visitor.promoted_size());
heap()->IncrementSemiSpaceCopiedObjectSize(
- new_space_visitor_.semispace_copied_size());
+ new_space_visitor_.semispace_copied_size() +
+ new_space_page_visitor.semispace_copied_size());
heap()->IncrementYoungSurvivorsCounter(
new_space_visitor_.promoted_size() +
new_space_visitor_.semispace_copied_size() +
- new_space_page_visitor.promoted_size());
+ new_space_page_visitor.promoted_size() +
+ new_space_page_visitor.semispace_copied_size());
heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
}
@@ -3249,31 +3253,33 @@
static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk,
bool success, PerPageData data) {
- if (chunk->InNewSpace()) {
- DCHECK(success);
- } else if (chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
- DCHECK(success);
- Page* p = static_cast<Page*>(chunk);
- p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
- p->ForAllFreeListCategories(
- [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
- heap->mark_compact_collector()->sweeper().AddLatePage(
- p->owner()->identity(), p);
- } else {
- Page* p = static_cast<Page*>(chunk);
- if (success) {
- DCHECK(p->IsEvacuationCandidate());
- DCHECK(p->SweepingDone());
- p->Unlink();
- } else {
- // We have partially compacted the page, i.e., some objects may have
- // moved, others are still in place.
- p->SetFlag(Page::COMPACTION_WAS_ABORTED);
- p->ClearEvacuationCandidate();
- // Slots have already been recorded so we just need to add it to the
- // sweeper.
- *data += 1;
- }
+ using Evacuator = MarkCompactCollector::Evacuator;
+ Page* p = static_cast<Page*>(chunk);
+ switch (Evacuator::ComputeEvacuationMode(p)) {
+ case Evacuator::kPageNewToOld:
+ break;
+ case Evacuator::kPageNewToNew:
+ DCHECK(success);
+ break;
+ case Evacuator::kObjectsNewToOld:
+ DCHECK(success);
+ break;
+ case Evacuator::kObjectsOldToOld:
+ if (success) {
+ DCHECK(p->IsEvacuationCandidate());
+ DCHECK(p->SweepingDone());
+ p->Unlink();
+ } else {
+ // We have partially compacted the page, i.e., some objects may have
+ // moved, others are still in place.
+ p->ClearEvacuationCandidate();
+ // Slots have already been recorded so we just need to add it to the
+ // sweeper, which will happen after updating pointers.
+ *data += 1;
+ }
+ break;
+ default:
+ UNREACHABLE();
}
}
};
@@ -3295,10 +3301,14 @@
live_bytes += page->LiveBytes();
if (!page->NeverEvacuate() &&
(page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
- page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
!page->Contains(age_mark)) {
- EvacuateNewSpacePageVisitor::TryMoveToOldSpace(page, heap()->old_space());
+ if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
+ EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space());
+ } else {
+ EvacuateNewSpacePageVisitor::MoveToToSpace(page);
+ }
}
+
job.AddPage(page, &abandoned_pages);
}
DCHECK_GE(job.NumberOfPages(), 1);
@@ -3352,16 +3362,21 @@
template <MarkCompactCollector::Sweeper::SweepingMode sweeping_mode,
MarkCompactCollector::Sweeper::SweepingParallelism parallelism,
MarkCompactCollector::Sweeper::SkipListRebuildingMode skip_list_mode,
+ MarkCompactCollector::Sweeper::FreeListRebuildingMode free_list_mode,
MarkCompactCollector::Sweeper::FreeSpaceTreatmentMode free_space_mode>
int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p,
ObjectVisitor* v) {
DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
DCHECK(!p->IsFlagSet(Page::BLACK_PAGE));
- DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
- space->identity() == CODE_SPACE);
+ DCHECK((space == nullptr) || (space->identity() != CODE_SPACE) ||
+ (skip_list_mode == REBUILD_SKIP_LIST));
DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
DCHECK(parallelism == SWEEP_ON_MAIN_THREAD || sweeping_mode == SWEEP_ONLY);
+ // Before we sweep objects on the page, we free dead array buffers which
+ // requires valid mark bits.
+ ArrayBufferTracker::FreeDead(p);
+
Address free_start = p->area_start();
DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
@@ -3387,8 +3402,13 @@
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
}
- freed_bytes = space->UnaccountedFree(free_start, size);
- max_freed_bytes = Max(freed_bytes, max_freed_bytes);
+ if (free_list_mode == REBUILD_FREE_LIST) {
+ freed_bytes = space->UnaccountedFree(free_start, size);
+ max_freed_bytes = Max(freed_bytes, max_freed_bytes);
+ } else {
+ p->heap()->CreateFillerObjectAt(free_start, size,
+ ClearRecordedSlots::kNo);
+ }
}
Map* map = object->synchronized_map();
int size = object->SizeFromMap(map);
@@ -3415,10 +3435,16 @@
if (free_space_mode == ZAP_FREE_SPACE) {
memset(free_start, 0xcc, size);
}
- freed_bytes = space->UnaccountedFree(free_start, size);
- max_freed_bytes = Max(freed_bytes, max_freed_bytes);
+ if (free_list_mode == REBUILD_FREE_LIST) {
+ freed_bytes = space->UnaccountedFree(free_start, size);
+ max_freed_bytes = Max(freed_bytes, max_freed_bytes);
+ } else {
+ p->heap()->CreateFillerObjectAt(free_start, size,
+ ClearRecordedSlots::kNo);
+ }
}
p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
+ if (free_list_mode == IGNORE_FREE_LIST) return 0;
return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
}
@@ -3438,6 +3464,7 @@
Address start = code->instruction_start();
Address end = code->address() + code->Size();
RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(page, start, end);
+ RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, start, end);
}
}
@@ -3533,12 +3560,15 @@
EvacuateNewSpacePrologue();
EvacuatePagesInParallel();
- EvacuateNewSpaceEpilogue();
heap()->new_space()->set_age_mark(heap()->new_space()->top());
}
UpdatePointersAfterEvacuation();
+ if (!heap()->new_space()->Rebalance()) {
+ FatalProcessOutOfMemory("NewSpace::Rebalance");
+ }
+
// Give pages that are queued to be freed back to the OS. Note that filtering
// slots only handles old space (for unboxed doubles), and thus map space can
// still contain stale pointers. We only free the chunks after pointer updates
@@ -3548,6 +3578,19 @@
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
+ for (Page* p : newspace_evacuation_candidates_) {
+ if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
+ p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
+ sweeper().AddLatePage(p->owner()->identity(), p);
+ } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
+ p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
+ p->ForAllFreeListCategories(
+ [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
+ sweeper().AddLatePage(p->owner()->identity(), p);
+ }
+ }
+ newspace_evacuation_candidates_.Rewind(0);
+
for (Page* p : evacuation_candidates_) {
// Important: skip list should be cleared only after roots were updated
// because root iteration traverses the stack and might have to find
@@ -3560,11 +3603,6 @@
}
}
- // EvacuateNewSpaceAndCandidates iterates over new space objects and for
- // ArrayBuffers either re-registers them as live or promotes them. This is
- // needed to properly free them.
- heap()->array_buffer_tracker()->FreeDead(false);
-
// Deallocate evacuated candidate pages.
ReleaseEvacuationCandidates();
}
@@ -3580,12 +3618,12 @@
class PointerUpdateJobTraits {
public:
typedef int PerPageData; // Per page data is not used in this job.
- typedef PointersUpdatingVisitor* PerTaskData;
+ typedef int PerTaskData; // Per task data is not used in this job.
- static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
- MemoryChunk* chunk, PerPageData) {
+ static bool ProcessPageInParallel(Heap* heap, PerTaskData, MemoryChunk* chunk,
+ PerPageData) {
UpdateUntypedPointers(heap, chunk);
- UpdateTypedPointers(heap, chunk, visitor);
+ UpdateTypedPointers(heap, chunk);
return true;
}
static const bool NeedSequentialFinalization = false;
@@ -3595,37 +3633,71 @@
private:
static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) {
if (direction == OLD_TO_NEW) {
- RememberedSet<OLD_TO_NEW>::IterateWithWrapper(heap, chunk,
- UpdateOldToNewSlot);
+ RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap, chunk](Address slot) {
+ return CheckAndUpdateOldToNewSlot(heap, slot);
+ });
} else {
- RememberedSet<OLD_TO_OLD>::Iterate(chunk, [heap](Address slot) {
- PointersUpdatingVisitor::UpdateSlot(heap,
- reinterpret_cast<Object**>(slot));
- return REMOVE_SLOT;
+ RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) {
+ return UpdateSlot(reinterpret_cast<Object**>(slot));
});
}
}
- static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk,
- PointersUpdatingVisitor* visitor) {
+ static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk) {
if (direction == OLD_TO_OLD) {
Isolate* isolate = heap->isolate();
RememberedSet<OLD_TO_OLD>::IterateTyped(
- chunk, [isolate, visitor](SlotType type, Address slot) {
- UpdateTypedSlot(isolate, visitor, type, slot);
- return REMOVE_SLOT;
+ chunk, [isolate](SlotType type, Address host_addr, Address slot) {
+ return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, type, slot,
+ UpdateSlot);
+ });
+ } else {
+ Isolate* isolate = heap->isolate();
+ RememberedSet<OLD_TO_NEW>::IterateTyped(
+ chunk,
+ [isolate, heap](SlotType type, Address host_addr, Address slot) {
+ return UpdateTypedSlotHelper::UpdateTypedSlot(
+ isolate, type, slot, [heap](Object** slot) {
+ return CheckAndUpdateOldToNewSlot(
+ heap, reinterpret_cast<Address>(slot));
+ });
});
}
}
- static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) {
- MapWord map_word = object->map_word();
- // There could still be stale pointers in large object space, map space,
- // and old space for pages that have been promoted.
- if (map_word.IsForwardingAddress()) {
- // Update the corresponding slot.
- *address = map_word.ToForwardingAddress();
+ static SlotCallbackResult CheckAndUpdateOldToNewSlot(Heap* heap,
+ Address slot_address) {
+ Object** slot = reinterpret_cast<Object**>(slot_address);
+ if (heap->InFromSpace(*slot)) {
+ HeapObject* heap_object = reinterpret_cast<HeapObject*>(*slot);
+ DCHECK(heap_object->IsHeapObject());
+ MapWord map_word = heap_object->map_word();
+ // There could still be stale pointers in large object space, map space,
+ // and old space for pages that have been promoted.
+ if (map_word.IsForwardingAddress()) {
+ // Update the corresponding slot.
+ *slot = map_word.ToForwardingAddress();
+ }
+ // If the object was in from space before and is after executing the
+ // callback in to space, the object is still live.
+ // Unfortunately, we do not know about the slot. It could be in a
+ // just freed free space object.
+ if (heap->InToSpace(*slot)) {
+ return KEEP_SLOT;
+ }
+ } else if (heap->InToSpace(*slot)) {
+ DCHECK(Page::FromAddress(reinterpret_cast<HeapObject*>(*slot)->address())
+ ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
+ // Slots can be in "to" space after a page has been moved. Since there is
+ // no forwarding information present we need to check the markbits to
+ // determine liveness.
+ if (Marking::IsBlack(
+ Marking::MarkBitFrom(reinterpret_cast<HeapObject*>(*slot))))
+ return KEEP_SLOT;
+ } else {
+ DCHECK(!heap->InNewSpace(*slot));
}
+ return REMOVE_SLOT;
}
};
@@ -3642,10 +3714,9 @@
heap, heap->isolate()->cancelable_task_manager(), semaphore);
RememberedSet<direction>::IterateMemoryChunks(
heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
- PointersUpdatingVisitor visitor(heap);
int num_pages = job.NumberOfPages();
int num_tasks = NumberOfPointerUpdateTasks(num_pages);
- job.Run(num_tasks, [&visitor](int i) { return &visitor; });
+ job.Run(num_tasks, [](int i) { return 0; });
}
class ToSpacePointerUpdateJobTraits {
@@ -3655,6 +3726,24 @@
static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
MemoryChunk* chunk, PerPageData limits) {
+ if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
+ // New->new promoted pages contain garbage so they require iteration
+ // using markbits.
+ ProcessPageInParallelVisitLive(heap, visitor, chunk, limits);
+ } else {
+ ProcessPageInParallelVisitAll(heap, visitor, chunk, limits);
+ }
+ return true;
+ }
+
+ static const bool NeedSequentialFinalization = false;
+ static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
+ }
+
+ private:
+ static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor,
+ MemoryChunk* chunk,
+ PerPageData limits) {
for (Address cur = limits.first; cur < limits.second;) {
HeapObject* object = HeapObject::FromAddress(cur);
Map* map = object->map();
@@ -3662,10 +3751,18 @@
object->IterateBody(map->instance_type(), size, visitor);
cur += size;
}
- return true;
}
- static const bool NeedSequentialFinalization = false;
- static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
+
+ static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor,
+ MemoryChunk* chunk,
+ PerPageData limits) {
+ LiveObjectIterator<kBlackObjects> it(chunk);
+ HeapObject* object = NULL;
+ while ((object = it.Next()) != NULL) {
+ Map* map = object->map();
+ int size = object->SizeFromMap(map);
+ object->IterateBody(map->instance_type(), size, visitor);
+ }
}
};
@@ -3674,15 +3771,13 @@
heap, heap->isolate()->cancelable_task_manager(), semaphore);
Address space_start = heap->new_space()->bottom();
Address space_end = heap->new_space()->top();
- NewSpacePageIterator it(space_start, space_end);
- while (it.has_next()) {
- Page* page = it.next();
+ for (Page* page : NewSpacePageRange(space_start, space_end)) {
Address start =
page->Contains(space_start) ? space_start : page->area_start();
Address end = page->Contains(space_end) ? space_end : page->area_end();
job.AddPage(page, std::make_pair(start, end));
}
- PointersUpdatingVisitor visitor(heap);
+ PointersUpdatingVisitor visitor;
int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1;
job.Run(num_tasks, [&visitor](int i) { return &visitor; });
}
@@ -3690,7 +3785,7 @@
void MarkCompactCollector::UpdatePointersAfterEvacuation() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
- PointersUpdatingVisitor updating_visitor(heap());
+ PointersUpdatingVisitor updating_visitor;
{
TRACE_GC(heap()->tracer(),
@@ -3741,7 +3836,7 @@
int pages_freed = 0;
Page* page = nullptr;
while ((page = GetSweepingPageSafe(identity)) != nullptr) {
- int freed = ParallelSweepPage(page, heap_->paged_space(identity));
+ int freed = ParallelSweepPage(page, identity);
pages_freed += 1;
DCHECK_GE(freed, 0);
max_freed = Max(max_freed, freed);
@@ -3753,7 +3848,7 @@
}
int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
- PagedSpace* space) {
+ AllocationSpace identity) {
int max_freed = 0;
if (page->mutex()->TryLock()) {
// If this page was already swept in the meantime, we can return here.
@@ -3762,19 +3857,25 @@
return 0;
}
page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
- if (space->identity() == OLD_SPACE) {
+ if (identity == NEW_SPACE) {
+ RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
+ IGNORE_FREE_LIST, IGNORE_FREE_SPACE>(nullptr, page, nullptr);
+ } else if (identity == OLD_SPACE) {
max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, page, NULL);
- } else if (space->identity() == CODE_SPACE) {
+ REBUILD_FREE_LIST, IGNORE_FREE_SPACE>(
+ heap_->paged_space(identity), page, nullptr);
+ } else if (identity == CODE_SPACE) {
max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, page, NULL);
+ REBUILD_FREE_LIST, IGNORE_FREE_SPACE>(
+ heap_->paged_space(identity), page, nullptr);
} else {
max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
- IGNORE_FREE_SPACE>(space, page, NULL);
+ REBUILD_FREE_LIST, IGNORE_FREE_SPACE>(
+ heap_->paged_space(identity), page, nullptr);
}
{
base::LockGuard<base::Mutex> guard(&mutex_);
- swept_list_[space->identity()].Add(page);
+ swept_list_[identity].Add(page);
}
page->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
page->mutex()->Unlock();
@@ -3800,7 +3901,8 @@
Page* page) {
page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
int to_sweep = page->area_size() - page->LiveBytes();
- heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
+ if (space != NEW_SPACE)
+ heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
}
Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe(
@@ -3821,15 +3923,15 @@
}
void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
+ Address space_top = space->top();
space->ClearStats();
- PageIterator it(space);
-
int will_be_swept = 0;
bool unused_page_present = false;
- while (it.has_next()) {
- Page* p = it.next();
+ // Loop needs to support deletion if live bytes == 0 for a page.
+ for (auto it = space->begin(); it != space->end();) {
+ Page* p = *(it++);
DCHECK(p->SweepingDone());
if (p->IsEvacuationCandidate()) {
@@ -3844,7 +3946,15 @@
Bitmap::Clear(p);
p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
p->ClearFlag(Page::BLACK_PAGE);
- // TODO(hpayer): Free unused memory of last black page.
+ // Area above the high watermark is free.
+ Address free_start = p->HighWaterMark();
+ // Check if the space top was in this page, which means that the
+ // high watermark is not up-to-date.
+ if (free_start < space_top && space_top <= p->area_end()) {
+ free_start = space_top;
+ }
+ int size = static_cast<int>(p->area_end() - free_start);
+ space->Free(free_start, size);
continue;
}
@@ -3855,8 +3965,8 @@
// testing this is fine.
p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD,
- Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_SPACE>(
- space, p, nullptr);
+ Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_LIST,
+ Sweeper::IGNORE_FREE_SPACE>(space, p, nullptr);
continue;
}
@@ -3864,8 +3974,10 @@
if (p->LiveBytes() == 0) {
if (unused_page_present) {
if (FLAG_gc_verbose) {
- PrintIsolate(isolate(), "sweeping: released page: %p", p);
+ PrintIsolate(isolate(), "sweeping: released page: %p",
+ static_cast<void*>(p));
}
+ ArrayBufferTracker::FreeAll(p);
space->ReleasePage(p);
continue;
}
@@ -3936,7 +4048,10 @@
Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
if (target_page->IsEvacuationCandidate() &&
!ShouldSkipEvacuationSlotRecording(host)) {
- RememberedSet<OLD_TO_OLD>::InsertTyped(source_page, CODE_ENTRY_SLOT, slot);
+ // TODO(ulan): remove this check after investigating crbug.com/414964.
+ CHECK(target->IsCode());
+ RememberedSet<OLD_TO_OLD>::InsertTyped(
+ source_page, reinterpret_cast<Address>(host), CODE_ENTRY_SLOT, slot);
}
}