blob: f9a55dfc61bad724240572ffde28ebd9612d2883 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005#include "src/heap/mark-compact.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006
7#include "src/base/atomicops.h"
8#include "src/base/bits.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00009#include "src/base/sys-info.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000010#include "src/code-stubs.h"
11#include "src/compilation-cache.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000012#include "src/deoptimizer.h"
13#include "src/execution.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000014#include "src/frames-inl.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000015#include "src/gdb-jit.h"
16#include "src/global-handles.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000017#include "src/heap/array-buffer-tracker.h"
18#include "src/heap/gc-tracer.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000019#include "src/heap/incremental-marking.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000020#include "src/heap/mark-compact-inl.h"
21#include "src/heap/object-stats.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000022#include "src/heap/objects-visiting-inl.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010023#include "src/heap/objects-visiting.h"
Ben Murdochda12d292016-06-02 14:46:10 +010024#include "src/heap/page-parallel-job.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000025#include "src/heap/spaces-inl.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000026#include "src/ic/ic.h"
27#include "src/ic/stub-cache.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010028#include "src/utils-inl.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000029#include "src/v8.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000030
31namespace v8 {
32namespace internal {
33
34
35const char* Marking::kWhiteBitPattern = "00";
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000036const char* Marking::kBlackBitPattern = "11";
37const char* Marking::kGreyBitPattern = "10";
Ben Murdochb8a8cc12014-11-26 15:28:44 +000038const char* Marking::kImpossibleBitPattern = "01";
39
40
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000041// The following has to hold in order for {Marking::MarkBitFrom} to not produce
42// invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
43STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2);
44
45
Ben Murdochb8a8cc12014-11-26 15:28:44 +000046// -------------------------------------------------------------------------
47// MarkCompactCollector
48
49MarkCompactCollector::MarkCompactCollector(Heap* heap)
50 : // NOLINT
Ben Murdochc5610432016-08-08 18:44:38 +010051 heap_(heap),
52 page_parallel_job_semaphore_(0),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000053#ifdef DEBUG
54 state_(IDLE),
55#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +000056 marking_parity_(ODD_MARKING_PARITY),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000057 was_marked_incrementally_(false),
Emily Bernierd0a1eb72015-03-24 16:35:39 -040058 evacuation_(false),
Ben Murdochc5610432016-08-08 18:44:38 +010059 compacting_(false),
60 black_allocation_(false),
61 have_code_to_deoptimize_(false),
Emily Bernierd0a1eb72015-03-24 16:35:39 -040062 marking_deque_memory_(NULL),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000063 marking_deque_memory_committed_(0),
64 code_flusher_(nullptr),
Ben Murdochc5610432016-08-08 18:44:38 +010065 embedder_heap_tracer_(nullptr),
66 sweeper_(heap) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000067}
68
69#ifdef VERIFY_HEAP
70class VerifyMarkingVisitor : public ObjectVisitor {
71 public:
72 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
73
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000074 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000075 for (Object** current = start; current < end; current++) {
76 if ((*current)->IsHeapObject()) {
77 HeapObject* object = HeapObject::cast(*current);
78 CHECK(heap_->mark_compact_collector()->IsMarked(object));
79 }
80 }
81 }
82
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000083 void VisitEmbeddedPointer(RelocInfo* rinfo) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000084 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
85 if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
86 Object* p = rinfo->target_object();
87 VisitPointer(&p);
88 }
89 }
90
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000091 void VisitCell(RelocInfo* rinfo) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000092 Code* code = rinfo->host();
93 DCHECK(rinfo->rmode() == RelocInfo::CELL);
94 if (!code->IsWeakObject(rinfo->target_cell())) {
95 ObjectVisitor::VisitCell(rinfo);
96 }
97 }
98
99 private:
100 Heap* heap_;
101};
102
103
104static void VerifyMarking(Heap* heap, Address bottom, Address top) {
105 VerifyMarkingVisitor visitor(heap);
106 HeapObject* object;
107 Address next_object_must_be_here_or_later = bottom;
108
109 for (Address current = bottom; current < top; current += kPointerSize) {
110 object = HeapObject::FromAddress(current);
111 if (MarkCompactCollector::IsMarked(object)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000112 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000113 CHECK(current >= next_object_must_be_here_or_later);
114 object->Iterate(&visitor);
115 next_object_must_be_here_or_later = current + object->Size();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000116 // The next word for sure belongs to the current object, jump over it.
117 current += kPointerSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000118 }
119 }
120}
121
Ben Murdochda12d292016-06-02 14:46:10 +0100122static void VerifyMarkingBlackPage(Heap* heap, Page* page) {
123 CHECK(page->IsFlagSet(Page::BLACK_PAGE));
124 VerifyMarkingVisitor visitor(heap);
125 HeapObjectIterator it(page);
126 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
127 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
128 object->Iterate(&visitor);
129 }
130}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000131
132static void VerifyMarking(NewSpace* space) {
133 Address end = space->top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000134 // The bottom position is at the start of its page. Allows us to use
135 // page->area_start() as start of range on all pages.
Ben Murdochc5610432016-08-08 18:44:38 +0100136 CHECK_EQ(space->bottom(), Page::FromAddress(space->bottom())->area_start());
Ben Murdoch61f157c2016-09-16 13:49:30 +0100137
138 NewSpacePageRange range(space->bottom(), end);
139 for (auto it = range.begin(); it != range.end();) {
140 Page* page = *(it++);
141 Address limit = it != range.end() ? page->area_end() : end;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000142 CHECK(limit == end || !page->Contains(end));
143 VerifyMarking(space->heap(), page->area_start(), limit);
144 }
145}
146
147
148static void VerifyMarking(PagedSpace* space) {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100149 for (Page* p : *space) {
Ben Murdochda12d292016-06-02 14:46:10 +0100150 if (p->IsFlagSet(Page::BLACK_PAGE)) {
151 VerifyMarkingBlackPage(space->heap(), p);
152 } else {
153 VerifyMarking(space->heap(), p->area_start(), p->area_end());
154 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000155 }
156}
157
158
159static void VerifyMarking(Heap* heap) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000160 VerifyMarking(heap->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000161 VerifyMarking(heap->code_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000162 VerifyMarking(heap->map_space());
163 VerifyMarking(heap->new_space());
164
165 VerifyMarkingVisitor visitor(heap);
166
167 LargeObjectIterator it(heap->lo_space());
168 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
169 if (MarkCompactCollector::IsMarked(obj)) {
170 obj->Iterate(&visitor);
171 }
172 }
173
174 heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
175}
176
177
178class VerifyEvacuationVisitor : public ObjectVisitor {
179 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000180 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000181 for (Object** current = start; current < end; current++) {
182 if ((*current)->IsHeapObject()) {
183 HeapObject* object = HeapObject::cast(*current);
184 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
185 }
186 }
187 }
188};
189
190
191static void VerifyEvacuation(Page* page) {
192 VerifyEvacuationVisitor visitor;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000193 HeapObjectIterator iterator(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000194 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
195 heap_object = iterator.Next()) {
196 // We skip free space objects.
197 if (!heap_object->IsFiller()) {
198 heap_object->Iterate(&visitor);
199 }
200 }
201}
202
203
204static void VerifyEvacuation(NewSpace* space) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000205 VerifyEvacuationVisitor visitor;
Ben Murdoch61f157c2016-09-16 13:49:30 +0100206 NewSpacePageRange range(space->bottom(), space->top());
207 for (auto it = range.begin(); it != range.end();) {
208 Page* page = *(it++);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000209 Address current = page->area_start();
Ben Murdoch61f157c2016-09-16 13:49:30 +0100210 Address limit = it != range.end() ? page->area_end() : space->top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000211 CHECK(limit == space->top() || !page->Contains(space->top()));
212 while (current < limit) {
213 HeapObject* object = HeapObject::FromAddress(current);
214 object->Iterate(&visitor);
215 current += object->Size();
216 }
217 }
218}
219
220
221static void VerifyEvacuation(Heap* heap, PagedSpace* space) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000222 if (FLAG_use_allocation_folding && (space == heap->old_space())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000223 return;
224 }
Ben Murdoch61f157c2016-09-16 13:49:30 +0100225 for (Page* p : *space) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000226 if (p->IsEvacuationCandidate()) continue;
227 VerifyEvacuation(p);
228 }
229}
230
231
232static void VerifyEvacuation(Heap* heap) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000233 VerifyEvacuation(heap, heap->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000234 VerifyEvacuation(heap, heap->code_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000235 VerifyEvacuation(heap, heap->map_space());
236 VerifyEvacuation(heap->new_space());
237
238 VerifyEvacuationVisitor visitor;
239 heap->IterateStrongRoots(&visitor, VISIT_ALL);
240}
241#endif // VERIFY_HEAP
242
243
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000244void MarkCompactCollector::SetUp() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000245 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
246 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
247 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
248 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
249
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000250 EnsureMarkingDequeIsReserved();
251 EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000252
253 if (FLAG_flush_code) {
254 code_flusher_ = new CodeFlusher(isolate());
255 if (FLAG_trace_code_flushing) {
256 PrintF("[code-flushing is now on]\n");
257 }
258 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000259}
260
261
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400262void MarkCompactCollector::TearDown() {
263 AbortCompaction();
264 delete marking_deque_memory_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000265 delete code_flusher_;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400266}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000267
268
269void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000270 DCHECK(!p->NeverEvacuate());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000271 p->MarkEvacuationCandidate();
272 evacuation_candidates_.Add(p);
273}
274
275
276static void TraceFragmentation(PagedSpace* space) {
277 int number_of_pages = space->CountTotalPages();
278 intptr_t reserved = (number_of_pages * space->AreaSize());
279 intptr_t free = reserved - space->SizeOfObjects();
280 PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
281 AllocationSpaceName(space->identity()), number_of_pages,
282 static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
283}
284
285
286bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
287 if (!compacting_) {
288 DCHECK(evacuation_candidates_.length() == 0);
289
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000290 CollectEvacuationCandidates(heap()->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000291
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000292 if (FLAG_compact_code_space) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000293 CollectEvacuationCandidates(heap()->code_space());
294 } else if (FLAG_trace_fragmentation) {
295 TraceFragmentation(heap()->code_space());
296 }
297
298 if (FLAG_trace_fragmentation) {
299 TraceFragmentation(heap()->map_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000300 }
301
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000302 heap()->old_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
303 heap()->code_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000304
305 compacting_ = evacuation_candidates_.length() > 0;
306 }
307
308 return compacting_;
309}
310
Ben Murdochda12d292016-06-02 14:46:10 +0100311void MarkCompactCollector::ClearInvalidRememberedSetSlots() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000312 {
Ben Murdochda12d292016-06-02 14:46:10 +0100313 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100314 RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(heap());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000315 }
Ben Murdochda12d292016-06-02 14:46:10 +0100316// There is not need to filter the old to old set because
317// it is completely cleared after the mark-compact GC.
318// The slots that become invalid due to runtime transitions are
319// cleared eagerly immediately after the transition.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000320
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000321#ifdef VERIFY_HEAP
322 if (FLAG_verify_heap) {
Ben Murdochda12d292016-06-02 14:46:10 +0100323 RememberedSet<OLD_TO_NEW>::VerifyValidSlots(heap());
324 RememberedSet<OLD_TO_OLD>::VerifyValidSlots(heap());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000325 }
326#endif
327}
328
329
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000330void MarkCompactCollector::CollectGarbage() {
331 // Make sure that Prepare() has been called. The individual steps below will
332 // update the state as they proceed.
333 DCHECK(state_ == PREPARE_GC);
334
335 MarkLiveObjects();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000336
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000337 DCHECK(heap_->incremental_marking()->IsStopped());
338
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000339 ClearNonLiveReferences();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400340
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000341#ifdef VERIFY_HEAP
342 if (FLAG_verify_heap) {
343 VerifyMarking(heap_);
344 }
345#endif
346
347 SweepSpaces();
348
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000349 EvacuateNewSpaceAndCandidates();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000350
351 Finish();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000352}
353
354
355#ifdef VERIFY_HEAP
356void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100357 for (Page* p : *space) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000358 CHECK(p->markbits()->IsClean());
359 CHECK_EQ(0, p->LiveBytes());
360 }
361}
362
363
364void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100365 for (Page* p : NewSpacePageRange(space->bottom(), space->top())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000366 CHECK(p->markbits()->IsClean());
367 CHECK_EQ(0, p->LiveBytes());
368 }
369}
370
371
372void MarkCompactCollector::VerifyMarkbitsAreClean() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000373 VerifyMarkbitsAreClean(heap_->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000374 VerifyMarkbitsAreClean(heap_->code_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000375 VerifyMarkbitsAreClean(heap_->map_space());
376 VerifyMarkbitsAreClean(heap_->new_space());
377
378 LargeObjectIterator it(heap_->lo_space());
379 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
380 MarkBit mark_bit = Marking::MarkBitFrom(obj);
381 CHECK(Marking::IsWhite(mark_bit));
382 CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
383 }
384}
385
386
387void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
388 HeapObjectIterator code_iterator(heap()->code_space());
389 for (HeapObject* obj = code_iterator.Next(); obj != NULL;
390 obj = code_iterator.Next()) {
391 Code* code = Code::cast(obj);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400392 if (!code->is_optimized_code()) continue;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000393 if (WillBeDeoptimized(code)) continue;
394 code->VerifyEmbeddedObjectsDependency();
395 }
396}
397
398
399void MarkCompactCollector::VerifyOmittedMapChecks() {
400 HeapObjectIterator iterator(heap()->map_space());
401 for (HeapObject* obj = iterator.Next(); obj != NULL; obj = iterator.Next()) {
402 Map* map = Map::cast(obj);
403 map->VerifyOmittedMapChecks();
404 }
405}
406#endif // VERIFY_HEAP
407
408
409static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100410 for (Page* p : *space) {
Ben Murdochda12d292016-06-02 14:46:10 +0100411 Bitmap::Clear(p);
412 if (p->IsFlagSet(Page::BLACK_PAGE)) {
413 p->ClearFlag(Page::BLACK_PAGE);
414 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000415 }
416}
417
418
419static void ClearMarkbitsInNewSpace(NewSpace* space) {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100420 for (Page* page : *space) {
421 Bitmap::Clear(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000422 }
423}
424
425
426void MarkCompactCollector::ClearMarkbits() {
427 ClearMarkbitsInPagedSpace(heap_->code_space());
428 ClearMarkbitsInPagedSpace(heap_->map_space());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000429 ClearMarkbitsInPagedSpace(heap_->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000430 ClearMarkbitsInNewSpace(heap_->new_space());
431
432 LargeObjectIterator it(heap_->lo_space());
433 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000434 Marking::MarkWhite(Marking::MarkBitFrom(obj));
Ben Murdochda12d292016-06-02 14:46:10 +0100435 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
436 chunk->ResetProgressBar();
437 chunk->ResetLiveBytes();
438 if (chunk->IsFlagSet(Page::BLACK_PAGE)) {
439 chunk->ClearFlag(Page::BLACK_PAGE);
440 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000441 }
442}
443
Ben Murdochc5610432016-08-08 18:44:38 +0100444class MarkCompactCollector::Sweeper::SweeperTask : public v8::Task {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000445 public:
Ben Murdochc5610432016-08-08 18:44:38 +0100446 SweeperTask(Sweeper* sweeper, base::Semaphore* pending_sweeper_tasks,
447 AllocationSpace space_to_start)
448 : sweeper_(sweeper),
449 pending_sweeper_tasks_(pending_sweeper_tasks),
450 space_to_start_(space_to_start) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000451
452 virtual ~SweeperTask() {}
453
454 private:
455 // v8::Task overrides.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000456 void Run() override {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100457 DCHECK_GE(space_to_start_, FIRST_SPACE);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100458 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
Ben Murdoch61f157c2016-09-16 13:49:30 +0100459 const int offset = space_to_start_ - FIRST_SPACE;
460 const int num_spaces = LAST_PAGED_SPACE - FIRST_SPACE + 1;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100461 for (int i = 0; i < num_spaces; i++) {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100462 const int space_id = FIRST_SPACE + ((i + offset) % num_spaces);
463 DCHECK_GE(space_id, FIRST_SPACE);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100464 DCHECK_LE(space_id, LAST_PAGED_SPACE);
Ben Murdochc5610432016-08-08 18:44:38 +0100465 sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100466 }
Ben Murdochc5610432016-08-08 18:44:38 +0100467 pending_sweeper_tasks_->Signal();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000468 }
469
Ben Murdochc5610432016-08-08 18:44:38 +0100470 Sweeper* sweeper_;
471 base::Semaphore* pending_sweeper_tasks_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100472 AllocationSpace space_to_start_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000473
474 DISALLOW_COPY_AND_ASSIGN(SweeperTask);
475};
476
Ben Murdochc5610432016-08-08 18:44:38 +0100477void MarkCompactCollector::Sweeper::StartSweeping() {
478 sweeping_in_progress_ = true;
479 ForAllSweepingSpaces([this](AllocationSpace space) {
480 std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(),
481 [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); });
482 });
483 if (FLAG_concurrent_sweeping) {
484 ForAllSweepingSpaces([this](AllocationSpace space) {
485 if (space == NEW_SPACE) return;
486 StartSweepingHelper(space);
487 });
488 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000489}
490
Ben Murdochc5610432016-08-08 18:44:38 +0100491void MarkCompactCollector::Sweeper::StartSweepingHelper(
492 AllocationSpace space_to_start) {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100493 num_sweeping_tasks_.Increment(1);
Ben Murdochc5610432016-08-08 18:44:38 +0100494 V8::GetCurrentPlatform()->CallOnBackgroundThread(
495 new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start),
496 v8::Platform::kShortRunningTask);
497}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000498
Ben Murdochc5610432016-08-08 18:44:38 +0100499void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted(
500 Page* page) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100501 if (!page->SweepingDone()) {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100502 ParallelSweepPage(page, page->owner()->identity());
Ben Murdoch097c5b22016-05-18 11:27:45 +0100503 if (!page->SweepingDone()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000504 // We were not able to sweep that page, i.e., a concurrent
505 // sweeper thread currently owns this page. Wait for the sweeper
506 // thread to be done with this page.
507 page->WaitUntilSweepingCompleted();
508 }
509 }
510}
511
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000512void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
Ben Murdochc5610432016-08-08 18:44:38 +0100513 if (FLAG_concurrent_sweeping && !sweeper().IsSweepingCompleted()) {
514 sweeper().ParallelSweepSpace(space->identity(), 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000515 space->RefillFreeList();
516 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000517}
518
Ben Murdochc5610432016-08-08 18:44:38 +0100519Page* MarkCompactCollector::Sweeper::GetSweptPageSafe(PagedSpace* space) {
520 base::LockGuard<base::Mutex> guard(&mutex_);
521 SweptList& list = swept_list_[space->identity()];
522 if (list.length() > 0) {
523 return list.RemoveLast();
524 }
525 return nullptr;
526}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000527
Ben Murdochc5610432016-08-08 18:44:38 +0100528void MarkCompactCollector::Sweeper::EnsureCompleted() {
529 if (!sweeping_in_progress_) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000530
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400531 // If sweeping is not completed or not running at all, we try to complete it
532 // here.
533 if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
Ben Murdochc5610432016-08-08 18:44:38 +0100534 ForAllSweepingSpaces(
535 [this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000536 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000537
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400538 if (FLAG_concurrent_sweeping) {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100539 while (num_sweeping_tasks_.Value() > 0) {
Ben Murdochc5610432016-08-08 18:44:38 +0100540 pending_sweeper_tasks_semaphore_.Wait();
Ben Murdoch61f157c2016-09-16 13:49:30 +0100541 num_sweeping_tasks_.Increment(-1);
Ben Murdochc5610432016-08-08 18:44:38 +0100542 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000543 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000544
Ben Murdoch61f157c2016-09-16 13:49:30 +0100545 ForAllSweepingSpaces([this](AllocationSpace space) {
546 if (space == NEW_SPACE) {
547 swept_list_[NEW_SPACE].Clear();
548 }
549 DCHECK(sweeping_list_[space].empty());
550 });
Ben Murdochc5610432016-08-08 18:44:38 +0100551 late_pages_ = false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000552 sweeping_in_progress_ = false;
Ben Murdochc5610432016-08-08 18:44:38 +0100553}
554
Ben Murdoch61f157c2016-09-16 13:49:30 +0100555void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() {
556 if (!sweeping_in_progress_) return;
557 if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
558 for (Page* p : *heap_->new_space()) {
559 SweepOrWaitUntilSweepingCompleted(p);
560 }
561 }
562}
563
Ben Murdochc5610432016-08-08 18:44:38 +0100564void MarkCompactCollector::EnsureSweepingCompleted() {
565 if (!sweeper().sweeping_in_progress()) return;
566
567 sweeper().EnsureCompleted();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000568 heap()->old_space()->RefillFreeList();
569 heap()->code_space()->RefillFreeList();
570 heap()->map_space()->RefillFreeList();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000571
572#ifdef VERIFY_HEAP
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400573 if (FLAG_verify_heap && !evacuation()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000574 VerifyEvacuation(heap_);
575 }
576#endif
577}
578
Ben Murdochc5610432016-08-08 18:44:38 +0100579bool MarkCompactCollector::Sweeper::IsSweepingCompleted() {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100580 while (pending_sweeper_tasks_semaphore_.WaitFor(
581 base::TimeDelta::FromSeconds(0))) {
582 num_sweeping_tasks_.Increment(-1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000583 }
Ben Murdoch61f157c2016-09-16 13:49:30 +0100584 return num_sweeping_tasks_.Value() == 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000585}
586
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000587void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000588 // This is only used when resizing an object.
589 DCHECK(MemoryChunk::FromAddress(old_start) ==
590 MemoryChunk::FromAddress(new_start));
591
Ben Murdochda12d292016-06-02 14:46:10 +0100592 if (!heap->incremental_marking()->IsMarking() ||
593 Page::FromAddress(old_start)->IsFlagSet(Page::BLACK_PAGE))
594 return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000595
596 // If the mark doesn't move, we don't check the color of the object.
597 // It doesn't matter whether the object is black, since it hasn't changed
598 // size, so the adjustment to the live data count will be zero anyway.
599 if (old_start == new_start) return;
600
601 MarkBit new_mark_bit = MarkBitFrom(new_start);
602 MarkBit old_mark_bit = MarkBitFrom(old_start);
603
604#ifdef DEBUG
605 ObjectColor old_color = Color(old_mark_bit);
606#endif
607
608 if (Marking::IsBlack(old_mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000609 Marking::BlackToWhite(old_mark_bit);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000610 Marking::MarkBlack(new_mark_bit);
611 return;
612 } else if (Marking::IsGrey(old_mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000613 Marking::GreyToWhite(old_mark_bit);
614 heap->incremental_marking()->WhiteToGreyAndPush(
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000615 HeapObject::FromAddress(new_start), new_mark_bit);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000616 heap->incremental_marking()->RestartIfNotMarking();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000617 }
618
619#ifdef DEBUG
620 ObjectColor new_color = Color(new_mark_bit);
621 DCHECK(new_color == old_color);
622#endif
623}
624
625
626const char* AllocationSpaceName(AllocationSpace space) {
627 switch (space) {
628 case NEW_SPACE:
629 return "NEW_SPACE";
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000630 case OLD_SPACE:
631 return "OLD_SPACE";
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000632 case CODE_SPACE:
633 return "CODE_SPACE";
634 case MAP_SPACE:
635 return "MAP_SPACE";
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000636 case LO_SPACE:
637 return "LO_SPACE";
638 default:
639 UNREACHABLE();
640 }
641
642 return NULL;
643}
644
645
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000646void MarkCompactCollector::ComputeEvacuationHeuristics(
647 int area_size, int* target_fragmentation_percent,
648 int* max_evacuated_bytes) {
649 // For memory reducing mode we directly define both constants.
650 const int kTargetFragmentationPercentForReduceMemory = 20;
651 const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000652
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000653 // For regular mode (which is latency critical) we define less aggressive
654 // defaults to start and switch to a trace-based (using compaction speed)
655 // approach as soon as we have enough samples.
656 const int kTargetFragmentationPercent = 70;
657 const int kMaxEvacuatedBytes = 4 * Page::kPageSize;
658 // Time to take for a single area (=payload of page). Used as soon as there
659 // exist enough compaction speed samples.
660 const int kTargetMsPerArea = 1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000661
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000662 if (heap()->ShouldReduceMemory()) {
663 *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
664 *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000665 } else {
Ben Murdochda12d292016-06-02 14:46:10 +0100666 const double estimated_compaction_speed =
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000667 heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
668 if (estimated_compaction_speed != 0) {
669 // Estimate the target fragmentation based on traced compaction speed
670 // and a goal for a single page.
Ben Murdochda12d292016-06-02 14:46:10 +0100671 const double estimated_ms_per_area =
672 1 + area_size / estimated_compaction_speed;
673 *target_fragmentation_percent = static_cast<int>(
674 100 - 100 * kTargetMsPerArea / estimated_ms_per_area);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000675 if (*target_fragmentation_percent <
676 kTargetFragmentationPercentForReduceMemory) {
677 *target_fragmentation_percent =
678 kTargetFragmentationPercentForReduceMemory;
679 }
680 } else {
681 *target_fragmentation_percent = kTargetFragmentationPercent;
682 }
683 *max_evacuated_bytes = kMaxEvacuatedBytes;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000684 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000685}
686
687
688void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000689 DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000690
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000691 int number_of_pages = space->CountTotalPages();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000692 int area_size = space->AreaSize();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000693
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000694 // Pairs of (live_bytes_in_page, page).
695 typedef std::pair<int, Page*> LiveBytesPagePair;
696 std::vector<LiveBytesPagePair> pages;
697 pages.reserve(number_of_pages);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000698
Ben Murdoch61f157c2016-09-16 13:49:30 +0100699 for (Page* p : *space) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000700 if (p->NeverEvacuate()) continue;
Ben Murdochda12d292016-06-02 14:46:10 +0100701 if (p->IsFlagSet(Page::BLACK_PAGE)) continue;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000702 // Invariant: Evacuation candidates are just created when marking is
Ben Murdoch097c5b22016-05-18 11:27:45 +0100703 // started. This means that sweeping has finished. Furthermore, at the end
704 // of a GC all evacuation candidates are cleared and their slot buffers are
705 // released.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000706 CHECK(!p->IsEvacuationCandidate());
Ben Murdochda12d292016-06-02 14:46:10 +0100707 CHECK_NULL(p->old_to_old_slots());
708 CHECK_NULL(p->typed_old_to_old_slots());
Ben Murdoch097c5b22016-05-18 11:27:45 +0100709 CHECK(p->SweepingDone());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000710 DCHECK(p->area_size() == area_size);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100711 pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000712 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000713
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000714 int candidate_count = 0;
715 int total_live_bytes = 0;
716
717 const bool reduce_memory = heap()->ShouldReduceMemory();
718 if (FLAG_manual_evacuation_candidates_selection) {
719 for (size_t i = 0; i < pages.size(); i++) {
720 Page* p = pages[i].second;
721 if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
722 candidate_count++;
723 total_live_bytes += pages[i].first;
724 p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
725 AddEvacuationCandidate(p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000726 }
727 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000728 } else if (FLAG_stress_compaction) {
729 for (size_t i = 0; i < pages.size(); i++) {
730 Page* p = pages[i].second;
731 if (i % 2 == 0) {
732 candidate_count++;
733 total_live_bytes += pages[i].first;
734 AddEvacuationCandidate(p);
735 }
736 }
737 } else {
738 // The following approach determines the pages that should be evacuated.
739 //
740 // We use two conditions to decide whether a page qualifies as an evacuation
741 // candidate, or not:
742 // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
743 // between live bytes and capacity of this page (= area).
744 // * Evacuation quota: A global quota determining how much bytes should be
745 // compacted.
746 //
747 // The algorithm sorts all pages by live bytes and then iterates through
748 // them starting with the page with the most free memory, adding them to the
749 // set of evacuation candidates as long as both conditions (fragmentation
750 // and quota) hold.
751 int max_evacuated_bytes;
752 int target_fragmentation_percent;
753 ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
754 &max_evacuated_bytes);
755
756 const intptr_t free_bytes_threshold =
757 target_fragmentation_percent * (area_size / 100);
758
759 // Sort pages from the most free to the least free, then select
760 // the first n pages for evacuation such that:
761 // - the total size of evacuated objects does not exceed the specified
762 // limit.
763 // - fragmentation of (n+1)-th page does not exceed the specified limit.
764 std::sort(pages.begin(), pages.end(),
765 [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
766 return a.first < b.first;
767 });
768 for (size_t i = 0; i < pages.size(); i++) {
769 int live_bytes = pages[i].first;
770 int free_bytes = area_size - live_bytes;
771 if (FLAG_always_compact ||
772 ((free_bytes >= free_bytes_threshold) &&
773 ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
774 candidate_count++;
775 total_live_bytes += live_bytes;
776 }
777 if (FLAG_trace_fragmentation_verbose) {
778 PrintIsolate(isolate(),
779 "compaction-selection-page: space=%s free_bytes_page=%d "
Ben Murdochc5610432016-08-08 18:44:38 +0100780 "fragmentation_limit_kb=%" V8PRIdPTR
781 " fragmentation_limit_percent=%d sum_compaction_kb=%d "
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000782 "compaction_limit_kb=%d\n",
783 AllocationSpaceName(space->identity()), free_bytes / KB,
784 free_bytes_threshold / KB, target_fragmentation_percent,
785 total_live_bytes / KB, max_evacuated_bytes / KB);
786 }
787 }
788 // How many pages we will allocated for the evacuated objects
789 // in the worst case: ceil(total_live_bytes / area_size)
790 int estimated_new_pages = (total_live_bytes + area_size - 1) / area_size;
791 DCHECK_LE(estimated_new_pages, candidate_count);
792 int estimated_released_pages = candidate_count - estimated_new_pages;
793 // Avoid (compact -> expand) cycles.
794 if ((estimated_released_pages == 0) && !FLAG_always_compact) {
795 candidate_count = 0;
796 }
797 for (int i = 0; i < candidate_count; i++) {
798 AddEvacuationCandidate(pages[i].second);
799 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000800 }
801
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000802 if (FLAG_trace_fragmentation) {
803 PrintIsolate(isolate(),
804 "compaction-selection: space=%s reduce_memory=%d pages=%d "
805 "total_live_bytes=%d\n",
806 AllocationSpaceName(space->identity()), reduce_memory,
807 candidate_count, total_live_bytes / KB);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000808 }
809}
810
811
812void MarkCompactCollector::AbortCompaction() {
813 if (compacting_) {
Ben Murdochda12d292016-06-02 14:46:10 +0100814 RememberedSet<OLD_TO_OLD>::ClearAll(heap());
Ben Murdoch097c5b22016-05-18 11:27:45 +0100815 for (Page* p : evacuation_candidates_) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000816 p->ClearEvacuationCandidate();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000817 }
818 compacting_ = false;
819 evacuation_candidates_.Rewind(0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000820 }
821 DCHECK_EQ(0, evacuation_candidates_.length());
822}
823
824
825void MarkCompactCollector::Prepare() {
826 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
827
828#ifdef DEBUG
829 DCHECK(state_ == IDLE);
830 state_ = PREPARE_GC;
831#endif
832
833 DCHECK(!FLAG_never_compact || !FLAG_always_compact);
834
835 if (sweeping_in_progress()) {
836 // Instead of waiting we could also abort the sweeper threads here.
837 EnsureSweepingCompleted();
838 }
839
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000840 // If concurrent unmapping tasks are still running, we should wait for
841 // them here.
Ben Murdochc5610432016-08-08 18:44:38 +0100842 heap()->memory_allocator()->unmapper()->WaitUntilCompleted();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000843
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000844 // Clear marking bits if incremental marking is aborted.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000845 if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
846 heap()->incremental_marking()->Stop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000847 ClearMarkbits();
848 AbortWeakCollections();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400849 AbortWeakCells();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000850 AbortTransitionArrays();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000851 AbortCompaction();
Ben Murdoch61f157c2016-09-16 13:49:30 +0100852 if (heap_->UsingEmbedderHeapTracer()) {
853 heap_->mark_compact_collector()->embedder_heap_tracer()->AbortTracing();
854 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000855 was_marked_incrementally_ = false;
856 }
857
Ben Murdoch61f157c2016-09-16 13:49:30 +0100858 if (!was_marked_incrementally_) {
859 if (heap_->UsingEmbedderHeapTracer()) {
860 heap_->mark_compact_collector()->embedder_heap_tracer()->TracePrologue();
861 }
862 }
863
864 if (UsingEmbedderHeapTracer()) {
865 embedder_heap_tracer()->EnterFinalPause();
866 }
867
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000868 // Don't start compaction if we are in the middle of incremental
869 // marking cycle. We did not collect any slots.
870 if (!FLAG_never_compact && !was_marked_incrementally_) {
871 StartCompaction(NON_INCREMENTAL_COMPACTION);
872 }
873
874 PagedSpaces spaces(heap());
875 for (PagedSpace* space = spaces.next(); space != NULL;
876 space = spaces.next()) {
877 space->PrepareForMarkCompact();
878 }
Ben Murdoch61f157c2016-09-16 13:49:30 +0100879 heap()->account_external_memory_concurrently_freed();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000880
881#ifdef VERIFY_HEAP
882 if (!was_marked_incrementally_ && FLAG_verify_heap) {
883 VerifyMarkbitsAreClean();
884 }
885#endif
886}
887
888
889void MarkCompactCollector::Finish() {
Ben Murdochda12d292016-06-02 14:46:10 +0100890 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000891
Ben Murdochc5610432016-08-08 18:44:38 +0100892 if (sweeper().contains_late_pages() && FLAG_concurrent_sweeping) {
893 // If we added some more pages during MC, we need to start at least one
894 // more task as all other tasks might already be finished.
895 sweeper().StartSweepingHelper(OLD_SPACE);
896 }
897
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000898 // The hashing of weak_object_to_code_table is no longer valid.
899 heap()->weak_object_to_code_table()->Rehash(
900 heap()->isolate()->factory()->undefined_value());
901
902 // Clear the marking state of live large objects.
903 heap_->lo_space()->ClearMarkingStateOfLiveObjects();
904
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000905#ifdef DEBUG
906 DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
907 state_ = IDLE;
908#endif
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000909 heap_->isolate()->inner_pointer_to_code_cache()->Flush();
910
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000911 // The stub cache is not traversed during GC; clear the cache to
912 // force lazy re-initialization of it. This must be done after the
913 // GC, because it relies on the new address of certain old space
914 // objects (empty string, illegal builtin).
915 isolate()->stub_cache()->Clear();
916
917 if (have_code_to_deoptimize_) {
918 // Some code objects were marked for deoptimization during the GC.
919 Deoptimizer::DeoptimizeMarkedCode(isolate());
920 have_code_to_deoptimize_ = false;
921 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400922
923 heap_->incremental_marking()->ClearIdleMarkingDelayCounter();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000924
925 if (marking_parity_ == EVEN_MARKING_PARITY) {
926 marking_parity_ = ODD_MARKING_PARITY;
927 } else {
928 DCHECK(marking_parity_ == ODD_MARKING_PARITY);
929 marking_parity_ = EVEN_MARKING_PARITY;
930 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000931}
932
933
934// -------------------------------------------------------------------------
935// Phase 1: tracing and marking live objects.
936// before: all objects are in normal state.
937// after: a live object's map pointer is marked as '00'.
938
939// Marking all live objects in the heap as part of mark-sweep or mark-compact
940// collection. Before marking, all objects are in their normal state. After
941// marking, live objects' map pointers are marked indicating that the object
942// has been found reachable.
943//
944// The marking algorithm is a (mostly) depth-first (because of possible stack
945// overflow) traversal of the graph of objects reachable from the roots. It
946// uses an explicit stack of pointers rather than recursion. The young
947// generation's inactive ('from') space is used as a marking stack. The
948// objects in the marking stack are the ones that have been reached and marked
949// but their children have not yet been visited.
950//
951// The marking stack can overflow during traversal. In that case, we set an
952// overflow flag. When the overflow flag is set, we continue marking objects
953// reachable from the objects on the marking stack, but no longer push them on
954// the marking stack. Instead, we mark them as both marked and overflowed.
955// When the stack is in the overflowed state, objects marked as overflowed
956// have been reached and marked but their children have not been visited yet.
957// After emptying the marking stack, we clear the overflow flag and traverse
958// the heap looking for objects marked as overflowed, push them on the stack,
959// and continue with marking. This process repeats until all reachable
960// objects have been marked.
961
962void CodeFlusher::ProcessJSFunctionCandidates() {
963 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
964 Object* undefined = isolate_->heap()->undefined_value();
965
966 JSFunction* candidate = jsfunction_candidates_head_;
967 JSFunction* next_candidate;
968 while (candidate != NULL) {
969 next_candidate = GetNextCandidate(candidate);
970 ClearNextCandidate(candidate, undefined);
971
972 SharedFunctionInfo* shared = candidate->shared();
973
974 Code* code = shared->code();
975 MarkBit code_mark = Marking::MarkBitFrom(code);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000976 if (Marking::IsWhite(code_mark)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000977 if (FLAG_trace_code_flushing && shared->is_compiled()) {
978 PrintF("[code-flushing clears: ");
979 shared->ShortPrint();
980 PrintF(" - age: %d]\n", code->GetAge());
981 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000982 // Always flush the optimized code map if there is one.
983 if (!shared->OptimizedCodeMapIsCleared()) {
984 shared->ClearOptimizedCodeMap();
985 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000986 shared->set_code(lazy_compile);
987 candidate->set_code(lazy_compile);
988 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000989 DCHECK(Marking::IsBlack(code_mark));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000990 candidate->set_code(code);
991 }
992
993 // We are in the middle of a GC cycle so the write barrier in the code
994 // setter did not record the slot update and we have to do that manually.
995 Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
996 Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000997 isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(
998 candidate, slot, target);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000999
1000 Object** shared_code_slot =
1001 HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
1002 isolate_->heap()->mark_compact_collector()->RecordSlot(
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001003 shared, shared_code_slot, *shared_code_slot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001004
1005 candidate = next_candidate;
1006 }
1007
1008 jsfunction_candidates_head_ = NULL;
1009}
1010
1011
1012void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
1013 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
1014
1015 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1016 SharedFunctionInfo* next_candidate;
1017 while (candidate != NULL) {
1018 next_candidate = GetNextCandidate(candidate);
1019 ClearNextCandidate(candidate);
1020
1021 Code* code = candidate->code();
1022 MarkBit code_mark = Marking::MarkBitFrom(code);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001023 if (Marking::IsWhite(code_mark)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001024 if (FLAG_trace_code_flushing && candidate->is_compiled()) {
1025 PrintF("[code-flushing clears: ");
1026 candidate->ShortPrint();
1027 PrintF(" - age: %d]\n", code->GetAge());
1028 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001029 // Always flush the optimized code map if there is one.
1030 if (!candidate->OptimizedCodeMapIsCleared()) {
1031 candidate->ClearOptimizedCodeMap();
1032 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001033 candidate->set_code(lazy_compile);
1034 }
1035
1036 Object** code_slot =
1037 HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001038 isolate_->heap()->mark_compact_collector()->RecordSlot(candidate, code_slot,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001039 *code_slot);
1040
1041 candidate = next_candidate;
1042 }
1043
1044 shared_function_info_candidates_head_ = NULL;
1045}
1046
1047
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001048void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
1049 // Make sure previous flushing decisions are revisited.
Ben Murdochda12d292016-06-02 14:46:10 +01001050 isolate_->heap()->incremental_marking()->IterateBlackObject(shared_info);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001051
1052 if (FLAG_trace_code_flushing) {
1053 PrintF("[code-flushing abandons function-info: ");
1054 shared_info->ShortPrint();
1055 PrintF("]\n");
1056 }
1057
1058 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1059 SharedFunctionInfo* next_candidate;
1060 if (candidate == shared_info) {
1061 next_candidate = GetNextCandidate(shared_info);
1062 shared_function_info_candidates_head_ = next_candidate;
1063 ClearNextCandidate(shared_info);
1064 } else {
1065 while (candidate != NULL) {
1066 next_candidate = GetNextCandidate(candidate);
1067
1068 if (next_candidate == shared_info) {
1069 next_candidate = GetNextCandidate(shared_info);
1070 SetNextCandidate(candidate, next_candidate);
1071 ClearNextCandidate(shared_info);
1072 break;
1073 }
1074
1075 candidate = next_candidate;
1076 }
1077 }
1078}
1079
1080
1081void CodeFlusher::EvictCandidate(JSFunction* function) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01001082 DCHECK(!function->next_function_link()->IsUndefined(isolate_));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001083 Object* undefined = isolate_->heap()->undefined_value();
1084
1085 // Make sure previous flushing decisions are revisited.
Ben Murdochda12d292016-06-02 14:46:10 +01001086 isolate_->heap()->incremental_marking()->IterateBlackObject(function);
1087 isolate_->heap()->incremental_marking()->IterateBlackObject(
1088 function->shared());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001089
1090 if (FLAG_trace_code_flushing) {
1091 PrintF("[code-flushing abandons closure: ");
1092 function->shared()->ShortPrint();
1093 PrintF("]\n");
1094 }
1095
1096 JSFunction* candidate = jsfunction_candidates_head_;
1097 JSFunction* next_candidate;
1098 if (candidate == function) {
1099 next_candidate = GetNextCandidate(function);
1100 jsfunction_candidates_head_ = next_candidate;
1101 ClearNextCandidate(function, undefined);
1102 } else {
1103 while (candidate != NULL) {
1104 next_candidate = GetNextCandidate(candidate);
1105
1106 if (next_candidate == function) {
1107 next_candidate = GetNextCandidate(function);
1108 SetNextCandidate(candidate, next_candidate);
1109 ClearNextCandidate(function, undefined);
1110 break;
1111 }
1112
1113 candidate = next_candidate;
1114 }
1115 }
1116}
1117
1118
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001119void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
1120 Heap* heap = isolate_->heap();
1121
1122 JSFunction** slot = &jsfunction_candidates_head_;
1123 JSFunction* candidate = jsfunction_candidates_head_;
1124 while (candidate != NULL) {
1125 if (heap->InFromSpace(candidate)) {
1126 v->VisitPointer(reinterpret_cast<Object**>(slot));
1127 }
1128 candidate = GetNextCandidate(*slot);
1129 slot = GetNextCandidateSlot(*slot);
1130 }
1131}
1132
1133
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001134class MarkCompactMarkingVisitor
1135 : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
1136 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001137 static void Initialize();
1138
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001139 INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
1140 MarkObjectByPointer(heap->mark_compact_collector(), object, p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001141 }
1142
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001143 INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
1144 Object** start, Object** end)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001145 // Mark all objects pointed to in [start, end).
1146 const int kMinRangeForMarkingRecursion = 64;
1147 if (end - start >= kMinRangeForMarkingRecursion) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001148 if (VisitUnmarkedObjects(heap, object, start, end)) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001149 // We are close to a stack overflow, so just mark the objects.
1150 }
1151 MarkCompactCollector* collector = heap->mark_compact_collector();
1152 for (Object** p = start; p < end; p++) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001153 MarkObjectByPointer(collector, object, p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001154 }
1155 }
1156
1157 // Marks the object black and pushes it on the marking stack.
1158 INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
1159 MarkBit mark = Marking::MarkBitFrom(object);
1160 heap->mark_compact_collector()->MarkObject(object, mark);
1161 }
1162
1163 // Marks the object black without pushing it on the marking stack.
1164 // Returns true if object needed marking and false otherwise.
1165 INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
1166 MarkBit mark_bit = Marking::MarkBitFrom(object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001167 if (Marking::IsWhite(mark_bit)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001168 heap->mark_compact_collector()->SetMark(object, mark_bit);
1169 return true;
1170 }
1171 return false;
1172 }
1173
1174 // Mark object pointed to by p.
1175 INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001176 HeapObject* object, Object** p)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001177 if (!(*p)->IsHeapObject()) return;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001178 HeapObject* target_object = HeapObject::cast(*p);
1179 collector->RecordSlot(object, p, target_object);
1180 MarkBit mark = Marking::MarkBitFrom(target_object);
1181 collector->MarkObject(target_object, mark);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001182 }
1183
1184
1185 // Visit an unmarked object.
1186 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1187 HeapObject* obj)) {
1188#ifdef DEBUG
1189 DCHECK(collector->heap()->Contains(obj));
1190 DCHECK(!collector->heap()->mark_compact_collector()->IsMarked(obj));
1191#endif
1192 Map* map = obj->map();
1193 Heap* heap = obj->GetHeap();
1194 MarkBit mark = Marking::MarkBitFrom(obj);
1195 heap->mark_compact_collector()->SetMark(obj, mark);
1196 // Mark the map pointer and the body.
1197 MarkBit map_mark = Marking::MarkBitFrom(map);
1198 heap->mark_compact_collector()->MarkObject(map, map_mark);
1199 IterateBody(map, obj);
1200 }
1201
1202 // Visit all unmarked objects pointed to by [start, end).
1203 // Returns false if the operation fails (lack of stack space).
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001204 INLINE(static bool VisitUnmarkedObjects(Heap* heap, HeapObject* object,
1205 Object** start, Object** end)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001206 // Return false is we are close to the stack limit.
1207 StackLimitCheck check(heap->isolate());
1208 if (check.HasOverflowed()) return false;
1209
1210 MarkCompactCollector* collector = heap->mark_compact_collector();
1211 // Visit the unmarked objects.
1212 for (Object** p = start; p < end; p++) {
1213 Object* o = *p;
1214 if (!o->IsHeapObject()) continue;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001215 collector->RecordSlot(object, p, o);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001216 HeapObject* obj = HeapObject::cast(o);
1217 MarkBit mark = Marking::MarkBitFrom(obj);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001218 if (Marking::IsBlackOrGrey(mark)) continue;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001219 VisitUnmarkedObject(collector, obj);
1220 }
1221 return true;
1222 }
1223
1224 private:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001225 // Code flushing support.
1226
1227 static const int kRegExpCodeThreshold = 5;
1228
1229 static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re,
1230 bool is_one_byte) {
1231 // Make sure that the fixed array is in fact initialized on the RegExp.
1232 // We could potentially trigger a GC when initializing the RegExp.
1233 if (HeapObject::cast(re->data())->map()->instance_type() !=
1234 FIXED_ARRAY_TYPE)
1235 return;
1236
1237 // Make sure this is a RegExp that actually contains code.
1238 if (re->TypeTag() != JSRegExp::IRREGEXP) return;
1239
1240 Object* code = re->DataAt(JSRegExp::code_index(is_one_byte));
1241 if (!code->IsSmi() &&
1242 HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1243 // Save a copy that can be reinstated if we need the code again.
1244 re->SetDataAt(JSRegExp::saved_code_index(is_one_byte), code);
1245
1246 // Saving a copy might create a pointer into compaction candidate
1247 // that was not observed by marker. This might happen if JSRegExp data
1248 // was marked through the compilation cache before marker reached JSRegExp
1249 // object.
1250 FixedArray* data = FixedArray::cast(re->data());
Ben Murdochda12d292016-06-02 14:46:10 +01001251 if (Marking::IsBlackOrGrey(Marking::MarkBitFrom(data))) {
1252 Object** slot =
1253 data->data_start() + JSRegExp::saved_code_index(is_one_byte);
1254 heap->mark_compact_collector()->RecordSlot(data, slot, code);
1255 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001256
1257 // Set a number in the 0-255 range to guarantee no smi overflow.
1258 re->SetDataAt(JSRegExp::code_index(is_one_byte),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001259 Smi::FromInt(heap->ms_count() & 0xff));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001260 } else if (code->IsSmi()) {
1261 int value = Smi::cast(code)->value();
1262 // The regexp has not been compiled yet or there was a compilation error.
1263 if (value == JSRegExp::kUninitializedValue ||
1264 value == JSRegExp::kCompilationErrorValue) {
1265 return;
1266 }
1267
1268 // Check if we should flush now.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001269 if (value == ((heap->ms_count() - kRegExpCodeThreshold) & 0xff)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001270 re->SetDataAt(JSRegExp::code_index(is_one_byte),
1271 Smi::FromInt(JSRegExp::kUninitializedValue));
1272 re->SetDataAt(JSRegExp::saved_code_index(is_one_byte),
1273 Smi::FromInt(JSRegExp::kUninitializedValue));
1274 }
1275 }
1276 }
1277
1278
1279 // Works by setting the current sweep_generation (as a smi) in the
1280 // code object place in the data array of the RegExp and keeps a copy
1281 // around that can be reinstated if we reuse the RegExp before flushing.
1282 // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1283 // we flush the code.
1284 static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1285 Heap* heap = map->GetHeap();
1286 MarkCompactCollector* collector = heap->mark_compact_collector();
1287 if (!collector->is_code_flushing_enabled()) {
1288 VisitJSRegExp(map, object);
1289 return;
1290 }
1291 JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1292 // Flush code or set age on both one byte and two byte code.
1293 UpdateRegExpCodeAgeAndFlush(heap, re, true);
1294 UpdateRegExpCodeAgeAndFlush(heap, re, false);
1295 // Visit the fields of the RegExp, including the updated FixedArray.
1296 VisitJSRegExp(map, object);
1297 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001298};
1299
1300
1301void MarkCompactMarkingVisitor::Initialize() {
1302 StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
1303
1304 table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
1305
1306 if (FLAG_track_gc_object_stats) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01001307 MarkCompactObjectStatsVisitor::Initialize(&table_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001308 }
1309}
1310
1311
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001312class CodeMarkingVisitor : public ThreadVisitor {
1313 public:
1314 explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1315 : collector_(collector) {}
1316
1317 void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1318 collector_->PrepareThreadForCodeFlushing(isolate, top);
1319 }
1320
1321 private:
1322 MarkCompactCollector* collector_;
1323};
1324
1325
1326class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1327 public:
1328 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1329 : collector_(collector) {}
1330
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001331 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001332 for (Object** p = start; p < end; p++) VisitPointer(p);
1333 }
1334
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001335 void VisitPointer(Object** slot) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001336 Object* obj = *slot;
1337 if (obj->IsSharedFunctionInfo()) {
1338 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1339 MarkBit shared_mark = Marking::MarkBitFrom(shared);
1340 MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1341 collector_->MarkObject(shared->code(), code_mark);
1342 collector_->MarkObject(shared, shared_mark);
1343 }
1344 }
1345
1346 private:
1347 MarkCompactCollector* collector_;
1348};
1349
1350
1351void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1352 ThreadLocalTop* top) {
1353 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1354 // Note: for the frame that has a pending lazy deoptimization
1355 // StackFrame::unchecked_code will return a non-optimized code object for
1356 // the outermost function and StackFrame::LookupCode will return
1357 // actual optimized code object.
1358 StackFrame* frame = it.frame();
1359 Code* code = frame->unchecked_code();
1360 MarkBit code_mark = Marking::MarkBitFrom(code);
1361 MarkObject(code, code_mark);
1362 if (frame->is_optimized()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001363 Code* optimized_code = frame->LookupCode();
1364 MarkBit optimized_code_mark = Marking::MarkBitFrom(optimized_code);
1365 MarkObject(optimized_code, optimized_code_mark);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001366 }
1367 }
1368}
1369
1370
1371void MarkCompactCollector::PrepareForCodeFlushing() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001372 // If code flushing is disabled, there is no need to prepare for it.
1373 if (!is_code_flushing_enabled()) return;
1374
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001375 // Make sure we are not referencing the code from the stack.
1376 DCHECK(this == heap()->mark_compact_collector());
1377 PrepareThreadForCodeFlushing(heap()->isolate(),
1378 heap()->isolate()->thread_local_top());
1379
1380 // Iterate the archived stacks in all threads to check if
1381 // the code is referenced.
1382 CodeMarkingVisitor code_marking_visitor(this);
1383 heap()->isolate()->thread_manager()->IterateArchivedThreads(
1384 &code_marking_visitor);
1385
1386 SharedFunctionInfoMarkingVisitor visitor(this);
1387 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1388 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1389
1390 ProcessMarkingDeque();
1391}
1392
1393
1394// Visitor class for marking heap roots.
1395class RootMarkingVisitor : public ObjectVisitor {
1396 public:
1397 explicit RootMarkingVisitor(Heap* heap)
1398 : collector_(heap->mark_compact_collector()) {}
1399
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001400 void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001401
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001402 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001403 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1404 }
1405
1406 // Skip the weak next code link in a code object, which is visited in
1407 // ProcessTopOptimizedFrame.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001408 void VisitNextCodeLink(Object** p) override {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001409
1410 private:
1411 void MarkObjectByPointer(Object** p) {
1412 if (!(*p)->IsHeapObject()) return;
1413
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001414 HeapObject* object = HeapObject::cast(*p);
Ben Murdochc5610432016-08-08 18:44:38 +01001415
Ben Murdoch61f157c2016-09-16 13:49:30 +01001416 if (collector_->heap()->PurgeLeftTrimmedObject(p)) return;
1417
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001418 MarkBit mark_bit = Marking::MarkBitFrom(object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001419 if (Marking::IsBlackOrGrey(mark_bit)) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001420
1421 Map* map = object->map();
1422 // Mark the object.
1423 collector_->SetMark(object, mark_bit);
1424
1425 // Mark the map pointer and body, and push them on the marking stack.
1426 MarkBit map_mark = Marking::MarkBitFrom(map);
1427 collector_->MarkObject(map, map_mark);
1428 MarkCompactMarkingVisitor::IterateBody(map, object);
1429
1430 // Mark all the objects reachable from the map and body. May leave
1431 // overflowed objects in the heap.
1432 collector_->EmptyMarkingDeque();
1433 }
1434
1435 MarkCompactCollector* collector_;
1436};
1437
1438
1439// Helper class for pruning the string table.
Ben Murdochda12d292016-06-02 14:46:10 +01001440template <bool finalize_external_strings, bool record_slots>
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001441class StringTableCleaner : public ObjectVisitor {
1442 public:
Ben Murdochda12d292016-06-02 14:46:10 +01001443 StringTableCleaner(Heap* heap, HeapObject* table)
1444 : heap_(heap), pointers_removed_(0), table_(table) {
1445 DCHECK(!record_slots || table != nullptr);
1446 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001447
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001448 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001449 // Visit all HeapObject pointers in [start, end).
Ben Murdochda12d292016-06-02 14:46:10 +01001450 MarkCompactCollector* collector = heap_->mark_compact_collector();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001451 for (Object** p = start; p < end; p++) {
1452 Object* o = *p;
Ben Murdochda12d292016-06-02 14:46:10 +01001453 if (o->IsHeapObject()) {
1454 if (Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(o)))) {
1455 if (finalize_external_strings) {
1456 DCHECK(o->IsExternalString());
1457 heap_->FinalizeExternalString(String::cast(*p));
1458 } else {
1459 pointers_removed_++;
1460 }
1461 // Set the entry to the_hole_value (as deleted).
1462 *p = heap_->the_hole_value();
1463 } else if (record_slots) {
1464 // StringTable contains only old space strings.
1465 DCHECK(!heap_->InNewSpace(o));
1466 collector->RecordSlot(table_, p, o);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001467 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001468 }
1469 }
1470 }
1471
1472 int PointersRemoved() {
1473 DCHECK(!finalize_external_strings);
1474 return pointers_removed_;
1475 }
1476
1477 private:
1478 Heap* heap_;
1479 int pointers_removed_;
Ben Murdochda12d292016-06-02 14:46:10 +01001480 HeapObject* table_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001481};
1482
Ben Murdochda12d292016-06-02 14:46:10 +01001483typedef StringTableCleaner<false, true> InternalizedStringTableCleaner;
1484typedef StringTableCleaner<true, false> ExternalStringTableCleaner;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001485
1486// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1487// are retained.
1488class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1489 public:
1490 virtual Object* RetainAs(Object* object) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001491 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(object));
1492 DCHECK(!Marking::IsGrey(mark_bit));
1493 if (Marking::IsBlack(mark_bit)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001494 return object;
1495 } else if (object->IsAllocationSite() &&
1496 !(AllocationSite::cast(object)->IsZombie())) {
1497 // "dead" AllocationSites need to live long enough for a traversal of new
1498 // space. These sites get a one-time reprieve.
1499 AllocationSite* site = AllocationSite::cast(object);
1500 site->MarkZombie();
1501 site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
1502 return object;
1503 } else {
1504 return NULL;
1505 }
1506 }
1507};
1508
1509
1510// Fill the marking stack with overflowed objects returned by the given
1511// iterator. Stop when the marking stack is filled or the end of the space
1512// is reached, whichever comes first.
1513template <class T>
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001514void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001515 // The caller should ensure that the marking stack is initially not full,
1516 // so that we don't waste effort pointlessly scanning for objects.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001517 DCHECK(!marking_deque()->IsFull());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001518
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001519 Map* filler_map = heap()->one_pointer_filler_map();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001520 for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
1521 MarkBit markbit = Marking::MarkBitFrom(object);
1522 if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1523 Marking::GreyToBlack(markbit);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001524 PushBlack(object);
1525 if (marking_deque()->IsFull()) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001526 }
1527 }
1528}
1529
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001530void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
1531 DCHECK(!marking_deque()->IsFull());
1532 LiveObjectIterator<kGreyObjects> it(p);
1533 HeapObject* object = NULL;
1534 while ((object = it.Next()) != NULL) {
1535 MarkBit markbit = Marking::MarkBitFrom(object);
1536 DCHECK(Marking::IsGrey(markbit));
1537 Marking::GreyToBlack(markbit);
1538 PushBlack(object);
1539 if (marking_deque()->IsFull()) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001540 }
1541}
1542
Ben Murdochda12d292016-06-02 14:46:10 +01001543class RecordMigratedSlotVisitor final : public ObjectVisitor {
1544 public:
Ben Murdoch61f157c2016-09-16 13:49:30 +01001545 explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector)
1546 : collector_(collector) {}
1547
Ben Murdochda12d292016-06-02 14:46:10 +01001548 inline void VisitPointer(Object** p) final {
1549 RecordMigratedSlot(*p, reinterpret_cast<Address>(p));
1550 }
1551
1552 inline void VisitPointers(Object** start, Object** end) final {
1553 while (start < end) {
1554 RecordMigratedSlot(*start, reinterpret_cast<Address>(start));
1555 ++start;
1556 }
1557 }
1558
1559 inline void VisitCodeEntry(Address code_entry_slot) final {
1560 Address code_entry = Memory::Address_at(code_entry_slot);
1561 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
1562 RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot),
Ben Murdoch61f157c2016-09-16 13:49:30 +01001563 nullptr, CODE_ENTRY_SLOT,
1564 code_entry_slot);
Ben Murdochda12d292016-06-02 14:46:10 +01001565 }
1566 }
1567
Ben Murdoch61f157c2016-09-16 13:49:30 +01001568 inline void VisitCodeTarget(RelocInfo* rinfo) final {
1569 DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
1570 Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1571 Code* host = rinfo->host();
1572 collector_->RecordRelocSlot(host, rinfo, target);
1573 }
1574
1575 inline void VisitDebugTarget(RelocInfo* rinfo) final {
1576 DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
1577 rinfo->IsPatchedDebugBreakSlotSequence());
1578 Code* target = Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
1579 Code* host = rinfo->host();
1580 collector_->RecordRelocSlot(host, rinfo, target);
1581 }
1582
1583 inline void VisitEmbeddedPointer(RelocInfo* rinfo) final {
1584 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
1585 HeapObject* object = HeapObject::cast(rinfo->target_object());
1586 Code* host = rinfo->host();
1587 collector_->RecordRelocSlot(host, rinfo, object);
1588 }
1589
1590 inline void VisitCell(RelocInfo* rinfo) final {
1591 DCHECK(rinfo->rmode() == RelocInfo::CELL);
1592 Cell* cell = rinfo->target_cell();
1593 Code* host = rinfo->host();
1594 collector_->RecordRelocSlot(host, rinfo, cell);
1595 }
1596
1597 // Entries that will never move.
1598 inline void VisitCodeAgeSequence(RelocInfo* rinfo) final {
1599 DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
1600 Code* stub = rinfo->code_age_stub();
1601 USE(stub);
1602 DCHECK(!Page::FromAddress(stub->address())->IsEvacuationCandidate());
1603 }
1604
1605 // Entries that are skipped for recording.
1606 inline void VisitExternalReference(RelocInfo* rinfo) final {}
1607 inline void VisitExternalReference(Address* p) final {}
1608 inline void VisitRuntimeEntry(RelocInfo* rinfo) final {}
1609 inline void VisitExternalOneByteString(
1610 v8::String::ExternalOneByteStringResource** resource) final {}
1611 inline void VisitExternalTwoByteString(
1612 v8::String::ExternalStringResource** resource) final {}
1613 inline void VisitInternalReference(RelocInfo* rinfo) final {}
1614 inline void VisitEmbedderReference(Object** p, uint16_t class_id) final {}
1615
Ben Murdochda12d292016-06-02 14:46:10 +01001616 private:
1617 inline void RecordMigratedSlot(Object* value, Address slot) {
1618 if (value->IsHeapObject()) {
1619 Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
1620 if (p->InNewSpace()) {
1621 RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
1622 } else if (p->IsEvacuationCandidate()) {
1623 RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot);
1624 }
1625 }
1626 }
Ben Murdoch61f157c2016-09-16 13:49:30 +01001627
1628 MarkCompactCollector* collector_;
Ben Murdochda12d292016-06-02 14:46:10 +01001629};
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001630
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001631class MarkCompactCollector::HeapObjectVisitor {
1632 public:
1633 virtual ~HeapObjectVisitor() {}
1634 virtual bool Visit(HeapObject* object) = 0;
1635};
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001636
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001637class MarkCompactCollector::EvacuateVisitorBase
1638 : public MarkCompactCollector::HeapObjectVisitor {
Ben Murdochda12d292016-06-02 14:46:10 +01001639 protected:
1640 enum MigrationMode { kFast, kProfiled };
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001641
Ben Murdochda12d292016-06-02 14:46:10 +01001642 EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces)
1643 : heap_(heap),
1644 compaction_spaces_(compaction_spaces),
1645 profiling_(
Ben Murdoch61f157c2016-09-16 13:49:30 +01001646 heap->isolate()->is_profiling() ||
Ben Murdochda12d292016-06-02 14:46:10 +01001647 heap->isolate()->logger()->is_logging_code_events() ||
1648 heap->isolate()->heap_profiler()->is_tracking_object_moves()) {}
1649
1650 inline bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
1651 HeapObject** target_object) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01001652#ifdef VERIFY_HEAP
1653 if (AbortCompactionForTesting(object)) return false;
1654#endif // VERIFY_HEAP
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001655 int size = object->Size();
1656 AllocationAlignment alignment = object->RequiredAlignment();
1657 AllocationResult allocation = target_space->AllocateRaw(size, alignment);
1658 if (allocation.To(target_object)) {
Ben Murdochda12d292016-06-02 14:46:10 +01001659 MigrateObject(*target_object, object, size, target_space->identity());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001660 return true;
1661 }
1662 return false;
1663 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001664
Ben Murdochda12d292016-06-02 14:46:10 +01001665 inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
1666 AllocationSpace dest) {
1667 if (profiling_) {
1668 MigrateObject<kProfiled>(dst, src, size, dest);
1669 } else {
1670 MigrateObject<kFast>(dst, src, size, dest);
1671 }
1672 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001673
Ben Murdochda12d292016-06-02 14:46:10 +01001674 template <MigrationMode mode>
1675 inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
1676 AllocationSpace dest) {
1677 Address dst_addr = dst->address();
1678 Address src_addr = src->address();
1679 DCHECK(heap_->AllowedToBeMigrated(src, dest));
1680 DCHECK(dest != LO_SPACE);
1681 if (dest == OLD_SPACE) {
1682 DCHECK_OBJECT_SIZE(size);
1683 DCHECK(IsAligned(size, kPointerSize));
1684 heap_->CopyBlock(dst_addr, src_addr, size);
1685 if ((mode == kProfiled) && FLAG_ignition && dst->IsBytecodeArray()) {
1686 PROFILE(heap_->isolate(),
1687 CodeMoveEvent(AbstractCode::cast(src), dst_addr));
1688 }
Ben Murdoch61f157c2016-09-16 13:49:30 +01001689 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
Ben Murdochda12d292016-06-02 14:46:10 +01001690 dst->IterateBodyFast(dst->map()->instance_type(), size, &visitor);
1691 } else if (dest == CODE_SPACE) {
1692 DCHECK_CODEOBJECT_SIZE(size, heap_->code_space());
1693 if (mode == kProfiled) {
1694 PROFILE(heap_->isolate(),
1695 CodeMoveEvent(AbstractCode::cast(src), dst_addr));
1696 }
1697 heap_->CopyBlock(dst_addr, src_addr, size);
Ben Murdochda12d292016-06-02 14:46:10 +01001698 Code::cast(dst)->Relocate(dst_addr - src_addr);
Ben Murdoch61f157c2016-09-16 13:49:30 +01001699 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
1700 dst->IterateBodyFast(dst->map()->instance_type(), size, &visitor);
Ben Murdochda12d292016-06-02 14:46:10 +01001701 } else {
1702 DCHECK_OBJECT_SIZE(size);
1703 DCHECK(dest == NEW_SPACE);
1704 heap_->CopyBlock(dst_addr, src_addr, size);
1705 }
1706 if (mode == kProfiled) {
1707 heap_->OnMoveEvent(dst, src, size);
1708 }
1709 Memory::Address_at(src_addr) = dst_addr;
1710 }
1711
Ben Murdoch61f157c2016-09-16 13:49:30 +01001712#ifdef VERIFY_HEAP
1713 bool AbortCompactionForTesting(HeapObject* object) {
1714 if (FLAG_stress_compaction) {
1715 const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
1716 Page::kPageAlignmentMask & ~kPointerAlignmentMask;
1717 if ((reinterpret_cast<uintptr_t>(object->address()) &
1718 Page::kPageAlignmentMask) == mask) {
1719 Page* page = Page::FromAddress(object->address());
1720 if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
1721 page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1722 } else {
1723 page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1724 return true;
1725 }
1726 }
1727 }
1728 return false;
1729 }
1730#endif // VERIFY_HEAP
1731
Ben Murdochda12d292016-06-02 14:46:10 +01001732 Heap* heap_;
1733 CompactionSpaceCollection* compaction_spaces_;
1734 bool profiling_;
1735};
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001736
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001737class MarkCompactCollector::EvacuateNewSpaceVisitor final
1738 : public MarkCompactCollector::EvacuateVisitorBase {
1739 public:
1740 static const intptr_t kLabSize = 4 * KB;
1741 static const intptr_t kMaxLabObjectSize = 256;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001742
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001743 explicit EvacuateNewSpaceVisitor(Heap* heap,
Ben Murdoch097c5b22016-05-18 11:27:45 +01001744 CompactionSpaceCollection* compaction_spaces,
Ben Murdoch61f157c2016-09-16 13:49:30 +01001745 base::HashMap* local_pretenuring_feedback)
Ben Murdochda12d292016-06-02 14:46:10 +01001746 : EvacuateVisitorBase(heap, compaction_spaces),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001747 buffer_(LocalAllocationBuffer::InvalidBuffer()),
1748 space_to_allocate_(NEW_SPACE),
1749 promoted_size_(0),
1750 semispace_copied_size_(0),
1751 local_pretenuring_feedback_(local_pretenuring_feedback) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001752
Ben Murdochc5610432016-08-08 18:44:38 +01001753 inline bool Visit(HeapObject* object) override {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001754 heap_->UpdateAllocationSite<Heap::kCached>(object,
1755 local_pretenuring_feedback_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001756 int size = object->Size();
1757 HeapObject* target_object = nullptr;
Ben Murdoch61f157c2016-09-16 13:49:30 +01001758 if (heap_->ShouldBePromoted<DEFAULT_PROMOTION>(object->address(), size) &&
Ben Murdoch097c5b22016-05-18 11:27:45 +01001759 TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object,
1760 &target_object)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001761 promoted_size_ += size;
1762 return true;
1763 }
1764 HeapObject* target = nullptr;
1765 AllocationSpace space = AllocateTargetObject(object, &target);
Ben Murdochda12d292016-06-02 14:46:10 +01001766 MigrateObject(HeapObject::cast(target), object, size, space);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001767 semispace_copied_size_ += size;
1768 return true;
1769 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001770
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001771 intptr_t promoted_size() { return promoted_size_; }
1772 intptr_t semispace_copied_size() { return semispace_copied_size_; }
1773
1774 private:
1775 enum NewSpaceAllocationMode {
1776 kNonstickyBailoutOldSpace,
1777 kStickyBailoutOldSpace,
1778 };
1779
1780 inline AllocationSpace AllocateTargetObject(HeapObject* old_object,
1781 HeapObject** target_object) {
1782 const int size = old_object->Size();
1783 AllocationAlignment alignment = old_object->RequiredAlignment();
1784 AllocationResult allocation;
Ben Murdoch61f157c2016-09-16 13:49:30 +01001785 AllocationSpace space_allocated_in = space_to_allocate_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001786 if (space_to_allocate_ == NEW_SPACE) {
1787 if (size > kMaxLabObjectSize) {
1788 allocation =
1789 AllocateInNewSpace(size, alignment, kNonstickyBailoutOldSpace);
1790 } else {
1791 allocation = AllocateInLab(size, alignment);
1792 }
1793 }
1794 if (allocation.IsRetry() || (space_to_allocate_ == OLD_SPACE)) {
1795 allocation = AllocateInOldSpace(size, alignment);
Ben Murdoch61f157c2016-09-16 13:49:30 +01001796 space_allocated_in = OLD_SPACE;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001797 }
1798 bool ok = allocation.To(target_object);
1799 DCHECK(ok);
1800 USE(ok);
Ben Murdoch61f157c2016-09-16 13:49:30 +01001801 return space_allocated_in;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001802 }
1803
1804 inline bool NewLocalAllocationBuffer() {
1805 AllocationResult result =
1806 AllocateInNewSpace(kLabSize, kWordAligned, kStickyBailoutOldSpace);
1807 LocalAllocationBuffer saved_old_buffer = buffer_;
1808 buffer_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize);
1809 if (buffer_.IsValid()) {
1810 buffer_.TryMerge(&saved_old_buffer);
1811 return true;
1812 }
1813 return false;
1814 }
1815
1816 inline AllocationResult AllocateInNewSpace(int size_in_bytes,
1817 AllocationAlignment alignment,
1818 NewSpaceAllocationMode mode) {
1819 AllocationResult allocation =
1820 heap_->new_space()->AllocateRawSynchronized(size_in_bytes, alignment);
1821 if (allocation.IsRetry()) {
1822 if (!heap_->new_space()->AddFreshPageSynchronized()) {
1823 if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
1824 } else {
1825 allocation = heap_->new_space()->AllocateRawSynchronized(size_in_bytes,
1826 alignment);
1827 if (allocation.IsRetry()) {
1828 if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001829 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001830 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001831 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001832 return allocation;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001833 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001834
1835 inline AllocationResult AllocateInOldSpace(int size_in_bytes,
1836 AllocationAlignment alignment) {
1837 AllocationResult allocation =
Ben Murdoch097c5b22016-05-18 11:27:45 +01001838 compaction_spaces_->Get(OLD_SPACE)->AllocateRaw(size_in_bytes,
1839 alignment);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001840 if (allocation.IsRetry()) {
Ben Murdochc5610432016-08-08 18:44:38 +01001841 v8::internal::Heap::FatalProcessOutOfMemory(
1842 "MarkCompactCollector: semi-space copy, fallback in old gen", true);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001843 }
1844 return allocation;
1845 }
1846
1847 inline AllocationResult AllocateInLab(int size_in_bytes,
1848 AllocationAlignment alignment) {
1849 AllocationResult allocation;
1850 if (!buffer_.IsValid()) {
1851 if (!NewLocalAllocationBuffer()) {
1852 space_to_allocate_ = OLD_SPACE;
1853 return AllocationResult::Retry(OLD_SPACE);
1854 }
1855 }
1856 allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
1857 if (allocation.IsRetry()) {
1858 if (!NewLocalAllocationBuffer()) {
1859 space_to_allocate_ = OLD_SPACE;
1860 return AllocationResult::Retry(OLD_SPACE);
1861 } else {
1862 allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
1863 if (allocation.IsRetry()) {
1864 space_to_allocate_ = OLD_SPACE;
1865 return AllocationResult::Retry(OLD_SPACE);
1866 }
1867 }
1868 }
1869 return allocation;
1870 }
1871
1872 LocalAllocationBuffer buffer_;
1873 AllocationSpace space_to_allocate_;
1874 intptr_t promoted_size_;
1875 intptr_t semispace_copied_size_;
Ben Murdoch61f157c2016-09-16 13:49:30 +01001876 base::HashMap* local_pretenuring_feedback_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001877};
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001878
Ben Murdochc5610432016-08-08 18:44:38 +01001879class MarkCompactCollector::EvacuateNewSpacePageVisitor final
1880 : public MarkCompactCollector::HeapObjectVisitor {
1881 public:
Ben Murdoch61f157c2016-09-16 13:49:30 +01001882 explicit EvacuateNewSpacePageVisitor(Heap* heap)
1883 : heap_(heap), promoted_size_(0), semispace_copied_size_(0) {}
Ben Murdochc5610432016-08-08 18:44:38 +01001884
Ben Murdoch61f157c2016-09-16 13:49:30 +01001885 static void MoveToOldSpace(Page* page, PagedSpace* owner) {
1886 page->Unlink();
1887 Page* new_page = Page::ConvertNewToOld(page, owner);
1888 new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
1889 }
1890
1891 static void MoveToToSpace(Page* page) {
1892 page->heap()->new_space()->MovePageFromSpaceToSpace(page);
1893 page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
Ben Murdochc5610432016-08-08 18:44:38 +01001894 }
1895
1896 inline bool Visit(HeapObject* object) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01001897 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
Ben Murdochc5610432016-08-08 18:44:38 +01001898 object->IterateBodyFast(&visitor);
1899 promoted_size_ += object->Size();
1900 return true;
1901 }
1902
1903 intptr_t promoted_size() { return promoted_size_; }
Ben Murdoch61f157c2016-09-16 13:49:30 +01001904 intptr_t semispace_copied_size() { return semispace_copied_size_; }
1905
1906 void account_semispace_copied(intptr_t copied) {
1907 semispace_copied_size_ += copied;
1908 }
Ben Murdochc5610432016-08-08 18:44:38 +01001909
1910 private:
Ben Murdoch61f157c2016-09-16 13:49:30 +01001911 Heap* heap_;
Ben Murdochc5610432016-08-08 18:44:38 +01001912 intptr_t promoted_size_;
Ben Murdoch61f157c2016-09-16 13:49:30 +01001913 intptr_t semispace_copied_size_;
Ben Murdochc5610432016-08-08 18:44:38 +01001914};
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001915
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001916class MarkCompactCollector::EvacuateOldSpaceVisitor final
1917 : public MarkCompactCollector::EvacuateVisitorBase {
1918 public:
1919 EvacuateOldSpaceVisitor(Heap* heap,
Ben Murdochda12d292016-06-02 14:46:10 +01001920 CompactionSpaceCollection* compaction_spaces)
1921 : EvacuateVisitorBase(heap, compaction_spaces) {}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001922
Ben Murdochc5610432016-08-08 18:44:38 +01001923 inline bool Visit(HeapObject* object) override {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001924 CompactionSpace* target_space = compaction_spaces_->Get(
1925 Page::FromAddress(object->address())->owner()->identity());
1926 HeapObject* target_object = nullptr;
1927 if (TryEvacuateObject(target_space, object, &target_object)) {
1928 DCHECK(object->map_word().IsForwardingAddress());
1929 return true;
1930 }
1931 return false;
1932 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001933};
1934
Ben Murdochc5610432016-08-08 18:44:38 +01001935class MarkCompactCollector::EvacuateRecordOnlyVisitor final
1936 : public MarkCompactCollector::HeapObjectVisitor {
1937 public:
Ben Murdoch61f157c2016-09-16 13:49:30 +01001938 explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {}
Ben Murdochc5610432016-08-08 18:44:38 +01001939
1940 inline bool Visit(HeapObject* object) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01001941 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
1942 object->IterateBody(&visitor);
Ben Murdochc5610432016-08-08 18:44:38 +01001943 return true;
1944 }
1945
1946 private:
Ben Murdoch61f157c2016-09-16 13:49:30 +01001947 Heap* heap_;
Ben Murdochc5610432016-08-08 18:44:38 +01001948};
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001949
1950void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01001951 for (Page* p : *space) {
Ben Murdochda12d292016-06-02 14:46:10 +01001952 if (!p->IsFlagSet(Page::BLACK_PAGE)) {
1953 DiscoverGreyObjectsOnPage(p);
1954 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001955 if (marking_deque()->IsFull()) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001956 }
1957}
1958
1959
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001960void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
1961 NewSpace* space = heap()->new_space();
Ben Murdoch61f157c2016-09-16 13:49:30 +01001962 for (Page* page : NewSpacePageRange(space->bottom(), space->top())) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001963 DiscoverGreyObjectsOnPage(page);
1964 if (marking_deque()->IsFull()) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001965 }
1966}
1967
1968
1969bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
1970 Object* o = *p;
1971 if (!o->IsHeapObject()) return false;
1972 HeapObject* heap_object = HeapObject::cast(o);
1973 MarkBit mark = Marking::MarkBitFrom(heap_object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001974 return Marking::IsWhite(mark);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001975}
1976
1977
1978bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
1979 Object** p) {
1980 Object* o = *p;
1981 DCHECK(o->IsHeapObject());
1982 HeapObject* heap_object = HeapObject::cast(o);
1983 MarkBit mark = Marking::MarkBitFrom(heap_object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001984 return Marking::IsWhite(mark);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001985}
1986
1987
1988void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
1989 StringTable* string_table = heap()->string_table();
1990 // Mark the string table itself.
1991 MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001992 if (Marking::IsWhite(string_table_mark)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001993 // String table could have already been marked by visiting the handles list.
1994 SetMark(string_table, string_table_mark);
1995 }
1996 // Explicitly mark the prefix.
1997 string_table->IteratePrefix(visitor);
1998 ProcessMarkingDeque();
1999}
2000
2001
2002void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
2003 MarkBit mark_bit = Marking::MarkBitFrom(site);
2004 SetMark(site, mark_bit);
2005}
2006
2007
2008void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
2009 // Mark the heap roots including global variables, stack variables,
2010 // etc., and all objects reachable from them.
2011 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
2012
2013 // Handle the string table specially.
2014 MarkStringTable(visitor);
2015
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002016 // There may be overflowed objects in the heap. Visit them now.
2017 while (marking_deque_.overflowed()) {
2018 RefillMarkingDeque();
2019 EmptyMarkingDeque();
2020 }
2021}
2022
2023
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002024void MarkCompactCollector::MarkImplicitRefGroups(
2025 MarkObjectFunction mark_object) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002026 List<ImplicitRefGroup*>* ref_groups =
2027 isolate()->global_handles()->implicit_ref_groups();
2028
2029 int last = 0;
2030 for (int i = 0; i < ref_groups->length(); i++) {
2031 ImplicitRefGroup* entry = ref_groups->at(i);
2032 DCHECK(entry != NULL);
2033
2034 if (!IsMarked(*entry->parent)) {
2035 (*ref_groups)[last++] = entry;
2036 continue;
2037 }
2038
2039 Object*** children = entry->children;
2040 // A parent object is marked, so mark all child heap objects.
2041 for (size_t j = 0; j < entry->length; ++j) {
2042 if ((*children[j])->IsHeapObject()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002043 mark_object(heap(), HeapObject::cast(*children[j]));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002044 }
2045 }
2046
2047 // Once the entire group has been marked, dispose it because it's
2048 // not needed anymore.
2049 delete entry;
2050 }
2051 ref_groups->Rewind(last);
2052}
2053
2054
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002055// Mark all objects reachable from the objects on the marking stack.
2056// Before: the marking stack contains zero or more heap object pointers.
2057// After: the marking stack is empty, and all objects reachable from the
2058// marking stack have been marked, or are overflowed in the heap.
2059void MarkCompactCollector::EmptyMarkingDeque() {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002060 Map* filler_map = heap_->one_pointer_filler_map();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002061 while (!marking_deque_.IsEmpty()) {
2062 HeapObject* object = marking_deque_.Pop();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002063 // Explicitly skip one word fillers. Incremental markbit patterns are
2064 // correct only for objects that occupy at least two words.
2065 Map* map = object->map();
2066 if (map == filler_map) continue;
2067
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002068 DCHECK(object->IsHeapObject());
2069 DCHECK(heap()->Contains(object));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002070 DCHECK(!Marking::IsWhite(Marking::MarkBitFrom(object)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002071
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002072 MarkBit map_mark = Marking::MarkBitFrom(map);
2073 MarkObject(map, map_mark);
2074
2075 MarkCompactMarkingVisitor::IterateBody(map, object);
2076 }
2077}
2078
2079
2080// Sweep the heap for overflowed objects, clear their overflow bits, and
2081// push them on the marking stack. Stop early if the marking stack fills
2082// before sweeping completes. If sweeping completes, there are no remaining
2083// overflowed objects in the heap so the overflow flag on the markings stack
2084// is cleared.
2085void MarkCompactCollector::RefillMarkingDeque() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002086 isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002087 DCHECK(marking_deque_.overflowed());
2088
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002089 DiscoverGreyObjectsInNewSpace();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002090 if (marking_deque_.IsFull()) return;
2091
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002092 DiscoverGreyObjectsInSpace(heap()->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002093 if (marking_deque_.IsFull()) return;
2094
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002095 DiscoverGreyObjectsInSpace(heap()->code_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002096 if (marking_deque_.IsFull()) return;
2097
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002098 DiscoverGreyObjectsInSpace(heap()->map_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002099 if (marking_deque_.IsFull()) return;
2100
2101 LargeObjectIterator lo_it(heap()->lo_space());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002102 DiscoverGreyObjectsWithIterator(&lo_it);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002103 if (marking_deque_.IsFull()) return;
2104
2105 marking_deque_.ClearOverflowed();
2106}
2107
2108
2109// Mark all objects reachable (transitively) from objects on the marking
2110// stack. Before: the marking stack contains zero or more heap object
2111// pointers. After: the marking stack is empty and there are no overflowed
2112// objects in the heap.
2113void MarkCompactCollector::ProcessMarkingDeque() {
2114 EmptyMarkingDeque();
2115 while (marking_deque_.overflowed()) {
2116 RefillMarkingDeque();
2117 EmptyMarkingDeque();
2118 }
2119}
2120
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002121// Mark all objects reachable (transitively) from objects on the marking
2122// stack including references only considered in the atomic marking pause.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002123void MarkCompactCollector::ProcessEphemeralMarking(
2124 ObjectVisitor* visitor, bool only_process_harmony_weak_collections) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002125 DCHECK(marking_deque_.IsEmpty() && !marking_deque_.overflowed());
Ben Murdochc5610432016-08-08 18:44:38 +01002126 bool work_to_do = true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002127 while (work_to_do) {
Ben Murdochc5610432016-08-08 18:44:38 +01002128 if (UsingEmbedderHeapTracer()) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01002129 RegisterWrappersWithEmbedderHeapTracer();
2130 embedder_heap_tracer()->AdvanceTracing(
2131 0, EmbedderHeapTracer::AdvanceTracingActions(
2132 EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
Ben Murdochc5610432016-08-08 18:44:38 +01002133 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002134 if (!only_process_harmony_weak_collections) {
2135 isolate()->global_handles()->IterateObjectGroups(
2136 visitor, &IsUnmarkedHeapObjectWithHeap);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002137 MarkImplicitRefGroups(&MarkCompactMarkingVisitor::MarkObject);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002138 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002139 ProcessWeakCollections();
2140 work_to_do = !marking_deque_.IsEmpty();
2141 ProcessMarkingDeque();
2142 }
2143}
2144
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002145void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
2146 for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
2147 !it.done(); it.Advance()) {
2148 if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
2149 return;
2150 }
2151 if (it.frame()->type() == StackFrame::OPTIMIZED) {
2152 Code* code = it.frame()->LookupCode();
2153 if (!code->CanDeoptAt(it.frame()->pc())) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002154 Code::BodyDescriptor::IterateBody(code, visitor);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002155 }
2156 ProcessMarkingDeque();
2157 return;
2158 }
2159 }
2160}
2161
2162
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002163void MarkCompactCollector::EnsureMarkingDequeIsReserved() {
2164 DCHECK(!marking_deque_.in_use());
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002165 if (marking_deque_memory_ == NULL) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002166 marking_deque_memory_ = new base::VirtualMemory(kMaxMarkingDequeSize);
2167 marking_deque_memory_committed_ = 0;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002168 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002169 if (marking_deque_memory_ == NULL) {
2170 V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsReserved");
2171 }
2172}
2173
2174
2175void MarkCompactCollector::EnsureMarkingDequeIsCommitted(size_t max_size) {
2176 // If the marking deque is too small, we try to allocate a bigger one.
2177 // If that fails, make do with a smaller one.
2178 CHECK(!marking_deque_.in_use());
2179 for (size_t size = max_size; size >= kMinMarkingDequeSize; size >>= 1) {
2180 base::VirtualMemory* memory = marking_deque_memory_;
2181 size_t currently_committed = marking_deque_memory_committed_;
2182
2183 if (currently_committed == size) return;
2184
2185 if (currently_committed > size) {
2186 bool success = marking_deque_memory_->Uncommit(
2187 reinterpret_cast<Address>(marking_deque_memory_->address()) + size,
2188 currently_committed - size);
2189 if (success) {
2190 marking_deque_memory_committed_ = size;
2191 return;
2192 }
2193 UNREACHABLE();
2194 }
2195
2196 bool success = memory->Commit(
2197 reinterpret_cast<Address>(memory->address()) + currently_committed,
2198 size - currently_committed,
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002199 false); // Not executable.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002200 if (success) {
2201 marking_deque_memory_committed_ = size;
2202 return;
2203 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002204 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002205 V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsCommitted");
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002206}
2207
2208
2209void MarkCompactCollector::InitializeMarkingDeque() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002210 DCHECK(!marking_deque_.in_use());
2211 DCHECK(marking_deque_memory_committed_ > 0);
2212 Address addr = static_cast<Address>(marking_deque_memory_->address());
2213 size_t size = marking_deque_memory_committed_;
2214 if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
2215 marking_deque_.Initialize(addr, addr + size);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002216}
2217
2218
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002219void MarkingDeque::Initialize(Address low, Address high) {
2220 DCHECK(!in_use_);
2221 HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
2222 HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
2223 array_ = obj_low;
2224 mask_ = base::bits::RoundDownToPowerOfTwo32(
2225 static_cast<uint32_t>(obj_high - obj_low)) -
2226 1;
2227 top_ = bottom_ = 0;
2228 overflowed_ = false;
2229 in_use_ = true;
2230}
2231
2232
2233void MarkingDeque::Uninitialize(bool aborting) {
2234 if (!aborting) {
2235 DCHECK(IsEmpty());
2236 DCHECK(!overflowed_);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002237 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002238 DCHECK(in_use_);
2239 top_ = bottom_ = 0xdecbad;
2240 in_use_ = false;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002241}
2242
Ben Murdochc5610432016-08-08 18:44:38 +01002243void MarkCompactCollector::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
2244 DCHECK_NOT_NULL(tracer);
2245 CHECK_NULL(embedder_heap_tracer_);
2246 embedder_heap_tracer_ = tracer;
2247}
2248
Ben Murdoch61f157c2016-09-16 13:49:30 +01002249void MarkCompactCollector::RegisterWrappersWithEmbedderHeapTracer() {
2250 DCHECK(UsingEmbedderHeapTracer());
2251 if (wrappers_to_trace_.empty()) {
2252 return;
2253 }
2254 embedder_heap_tracer()->RegisterV8References(wrappers_to_trace_);
2255 wrappers_to_trace_.clear();
2256}
2257
Ben Murdochc5610432016-08-08 18:44:38 +01002258void MarkCompactCollector::TracePossibleWrapper(JSObject* js_object) {
2259 DCHECK(js_object->WasConstructedFromApiFunction());
2260 if (js_object->GetInternalFieldCount() >= 2 &&
2261 js_object->GetInternalField(0) &&
2262 js_object->GetInternalField(0) != heap_->undefined_value() &&
2263 js_object->GetInternalField(1) != heap_->undefined_value()) {
2264 DCHECK(reinterpret_cast<intptr_t>(js_object->GetInternalField(0)) % 2 == 0);
Ben Murdoch61f157c2016-09-16 13:49:30 +01002265 wrappers_to_trace_.push_back(std::pair<void*, void*>(
Ben Murdochc5610432016-08-08 18:44:38 +01002266 reinterpret_cast<void*>(js_object->GetInternalField(0)),
2267 reinterpret_cast<void*>(js_object->GetInternalField(1))));
2268 }
2269}
2270
2271void MarkCompactCollector::RegisterExternallyReferencedObject(Object** object) {
2272 DCHECK(in_use());
2273 HeapObject* heap_object = HeapObject::cast(*object);
2274 DCHECK(heap_->Contains(heap_object));
2275 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
2276 MarkObject(heap_object, mark_bit);
2277}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002278
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002279void MarkCompactCollector::MarkLiveObjects() {
Ben Murdochda12d292016-06-02 14:46:10 +01002280 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002281 double start_time = 0.0;
2282 if (FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002283 start_time = heap_->MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002284 }
2285 // The recursive GC marker detects when it is nearing stack overflow,
2286 // and switches to a different marking system. JS interrupts interfere
2287 // with the C stack limit check.
2288 PostponeInterruptsScope postpone(isolate());
2289
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002290 {
Ben Murdochda12d292016-06-02 14:46:10 +01002291 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002292 IncrementalMarking* incremental_marking = heap_->incremental_marking();
2293 if (was_marked_incrementally_) {
2294 incremental_marking->Finalize();
2295 } else {
2296 // Abort any pending incremental activities e.g. incremental sweeping.
2297 incremental_marking->Stop();
Ben Murdoch61f157c2016-09-16 13:49:30 +01002298 if (FLAG_track_gc_object_stats) {
2299 // Clear object stats collected during incremental marking.
2300 heap()->object_stats_->ClearObjectStats();
2301 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002302 if (marking_deque_.in_use()) {
2303 marking_deque_.Uninitialize(true);
2304 }
2305 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002306 }
2307
2308#ifdef DEBUG
2309 DCHECK(state_ == PREPARE_GC);
2310 state_ = MARK_LIVE_OBJECTS;
2311#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002312
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002313 EnsureMarkingDequeIsCommittedAndInitialize(
2314 MarkCompactCollector::kMaxMarkingDequeSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002315
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002316 {
Ben Murdochda12d292016-06-02 14:46:10 +01002317 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002318 PrepareForCodeFlushing();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002319 }
2320
2321 RootMarkingVisitor root_visitor(heap());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002322
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002323 {
Ben Murdochda12d292016-06-02 14:46:10 +01002324 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002325 MarkRoots(&root_visitor);
2326 ProcessTopOptimizedFrame(&root_visitor);
2327 }
2328
2329 {
Ben Murdochda12d292016-06-02 14:46:10 +01002330 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002331
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002332 // The objects reachable from the roots are marked, yet unreachable
2333 // objects are unmarked. Mark objects reachable due to host
2334 // application specific logic or through Harmony weak maps.
Ben Murdochda12d292016-06-02 14:46:10 +01002335 {
2336 TRACE_GC(heap()->tracer(),
2337 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL);
2338 ProcessEphemeralMarking(&root_visitor, false);
Ben Murdochda12d292016-06-02 14:46:10 +01002339 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002340
2341 // The objects reachable from the roots, weak maps or object groups
2342 // are marked. Objects pointed to only by weak global handles cannot be
2343 // immediately reclaimed. Instead, we have to mark them as pending and mark
2344 // objects reachable from them.
2345 //
2346 // First we identify nonlive weak handles and mark them as pending
2347 // destruction.
Ben Murdochda12d292016-06-02 14:46:10 +01002348 {
2349 TRACE_GC(heap()->tracer(),
2350 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
2351 heap()->isolate()->global_handles()->IdentifyWeakHandles(
2352 &IsUnmarkedHeapObject);
2353 ProcessMarkingDeque();
2354 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002355 // Then we mark the objects.
Ben Murdochda12d292016-06-02 14:46:10 +01002356
2357 {
2358 TRACE_GC(heap()->tracer(),
2359 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
2360 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2361 ProcessMarkingDeque();
2362 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002363
2364 // Repeat Harmony weak maps marking to mark unmarked objects reachable from
2365 // the weak roots we just marked as pending destruction.
2366 //
2367 // We only process harmony collections, as all object groups have been fully
2368 // processed and no weakly reachable node can discover new objects groups.
Ben Murdochda12d292016-06-02 14:46:10 +01002369 {
2370 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
2371 ProcessEphemeralMarking(&root_visitor, true);
Ben Murdochc5610432016-08-08 18:44:38 +01002372 if (UsingEmbedderHeapTracer()) {
2373 embedder_heap_tracer()->TraceEpilogue();
2374 }
Ben Murdochda12d292016-06-02 14:46:10 +01002375 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002376 }
2377
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002378 if (FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002379 heap_->tracer()->AddMarkingTime(heap_->MonotonicallyIncreasingTimeInMs() -
2380 start_time);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002381 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002382 if (FLAG_track_gc_object_stats) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002383 if (FLAG_trace_gc_object_stats) {
2384 heap()->object_stats_->TraceObjectStats();
2385 }
2386 heap()->object_stats_->CheckpointObjectStats();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002387 }
2388}
2389
2390
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002391void MarkCompactCollector::ClearNonLiveReferences() {
Ben Murdochda12d292016-06-02 14:46:10 +01002392 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002393
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002394 {
Ben Murdochda12d292016-06-02 14:46:10 +01002395 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002396
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002397 // Prune the string table removing all strings only pointed to by the
2398 // string table. Cannot use string_table() here because the string
2399 // table is marked.
2400 StringTable* string_table = heap()->string_table();
Ben Murdochda12d292016-06-02 14:46:10 +01002401 InternalizedStringTableCleaner internalized_visitor(heap(), string_table);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002402 string_table->IterateElements(&internalized_visitor);
2403 string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002404
Ben Murdochda12d292016-06-02 14:46:10 +01002405 ExternalStringTableCleaner external_visitor(heap(), nullptr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002406 heap()->external_string_table_.Iterate(&external_visitor);
2407 heap()->external_string_table_.CleanUp();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002408 }
2409
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002410 {
Ben Murdochda12d292016-06-02 14:46:10 +01002411 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002412 // Process the weak references.
2413 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2414 heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002415 }
2416
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002417 {
Ben Murdochda12d292016-06-02 14:46:10 +01002418 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_GLOBAL_HANDLES);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002419
2420 // Remove object groups after marking phase.
2421 heap()->isolate()->global_handles()->RemoveObjectGroups();
2422 heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002423 }
2424
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002425 // Flush code from collected candidates.
2426 if (is_code_flushing_enabled()) {
Ben Murdochda12d292016-06-02 14:46:10 +01002427 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_CODE_FLUSH);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002428 code_flusher_->ProcessCandidates();
2429 }
2430
2431
2432 DependentCode* dependent_code_list;
2433 Object* non_live_map_list;
2434 ClearWeakCells(&non_live_map_list, &dependent_code_list);
2435
2436 {
Ben Murdochda12d292016-06-02 14:46:10 +01002437 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002438 ClearSimpleMapTransitions(non_live_map_list);
2439 ClearFullMapTransitions();
2440 }
2441
2442 MarkDependentCodeForDeoptimization(dependent_code_list);
2443
2444 ClearWeakCollections();
2445
Ben Murdochda12d292016-06-02 14:46:10 +01002446 ClearInvalidRememberedSetSlots();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002447}
2448
2449
2450void MarkCompactCollector::MarkDependentCodeForDeoptimization(
2451 DependentCode* list_head) {
Ben Murdochda12d292016-06-02 14:46:10 +01002452 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_DEPENDENT_CODE);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002453 Isolate* isolate = this->isolate();
2454 DependentCode* current = list_head;
2455 while (current->length() > 0) {
2456 have_code_to_deoptimize_ |= current->MarkCodeForDeoptimization(
2457 isolate, DependentCode::kWeakCodeGroup);
2458 current = current->next_link();
2459 }
2460
2461 WeakHashTable* table = heap_->weak_object_to_code_table();
2462 uint32_t capacity = table->Capacity();
2463 for (uint32_t i = 0; i < capacity; i++) {
2464 uint32_t key_index = table->EntryToIndex(i);
2465 Object* key = table->get(key_index);
Ben Murdoch61f157c2016-09-16 13:49:30 +01002466 if (!table->IsKey(isolate, key)) continue;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002467 uint32_t value_index = table->EntryToValueIndex(i);
2468 Object* value = table->get(value_index);
2469 DCHECK(key->IsWeakCell());
2470 if (WeakCell::cast(key)->cleared()) {
2471 have_code_to_deoptimize_ |=
2472 DependentCode::cast(value)->MarkCodeForDeoptimization(
2473 isolate, DependentCode::kWeakCodeGroup);
2474 table->set(key_index, heap_->the_hole_value());
2475 table->set(value_index, heap_->the_hole_value());
2476 table->ElementRemoved();
2477 }
2478 }
2479}
2480
2481
2482void MarkCompactCollector::ClearSimpleMapTransitions(
2483 Object* non_live_map_list) {
2484 Object* the_hole_value = heap()->the_hole_value();
2485 Object* weak_cell_obj = non_live_map_list;
2486 while (weak_cell_obj != Smi::FromInt(0)) {
2487 WeakCell* weak_cell = WeakCell::cast(weak_cell_obj);
2488 Map* map = Map::cast(weak_cell->value());
2489 DCHECK(Marking::IsWhite(Marking::MarkBitFrom(map)));
2490 Object* potential_parent = map->constructor_or_backpointer();
2491 if (potential_parent->IsMap()) {
2492 Map* parent = Map::cast(potential_parent);
2493 if (Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent)) &&
2494 parent->raw_transitions() == weak_cell) {
2495 ClearSimpleMapTransition(parent, map);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002496 }
2497 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002498 weak_cell->clear();
2499 weak_cell_obj = weak_cell->next();
2500 weak_cell->clear_next(the_hole_value);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002501 }
2502}
2503
2504
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002505void MarkCompactCollector::ClearSimpleMapTransition(Map* map,
2506 Map* dead_transition) {
2507 // A previously existing simple transition (stored in a WeakCell) is going
2508 // to be cleared. Clear the useless cell pointer, and take ownership
2509 // of the descriptor array.
2510 map->set_raw_transitions(Smi::FromInt(0));
2511 int number_of_own_descriptors = map->NumberOfOwnDescriptors();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002512 DescriptorArray* descriptors = map->instance_descriptors();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002513 if (descriptors == dead_transition->instance_descriptors() &&
2514 number_of_own_descriptors > 0) {
2515 TrimDescriptorArray(map, descriptors);
2516 DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2517 map->set_owns_descriptors(true);
2518 }
2519}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002520
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002521
2522void MarkCompactCollector::ClearFullMapTransitions() {
2523 HeapObject* undefined = heap()->undefined_value();
2524 Object* obj = heap()->encountered_transition_arrays();
2525 while (obj != Smi::FromInt(0)) {
2526 TransitionArray* array = TransitionArray::cast(obj);
2527 int num_transitions = array->number_of_entries();
2528 DCHECK_EQ(TransitionArray::NumberOfTransitions(array), num_transitions);
2529 if (num_transitions > 0) {
2530 Map* map = array->GetTarget(0);
2531 Map* parent = Map::cast(map->constructor_or_backpointer());
2532 bool parent_is_alive =
2533 Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent));
2534 DescriptorArray* descriptors =
2535 parent_is_alive ? parent->instance_descriptors() : nullptr;
2536 bool descriptors_owner_died =
2537 CompactTransitionArray(parent, array, descriptors);
2538 if (descriptors_owner_died) {
2539 TrimDescriptorArray(parent, descriptors);
2540 }
2541 }
2542 obj = array->next_link();
2543 array->set_next_link(undefined, SKIP_WRITE_BARRIER);
2544 }
2545 heap()->set_encountered_transition_arrays(Smi::FromInt(0));
2546}
2547
2548
2549bool MarkCompactCollector::CompactTransitionArray(
2550 Map* map, TransitionArray* transitions, DescriptorArray* descriptors) {
2551 int num_transitions = transitions->number_of_entries();
2552 bool descriptors_owner_died = false;
2553 int transition_index = 0;
2554 // Compact all live transitions to the left.
2555 for (int i = 0; i < num_transitions; ++i) {
2556 Map* target = transitions->GetTarget(i);
2557 DCHECK_EQ(target->constructor_or_backpointer(), map);
2558 if (Marking::IsWhite(Marking::MarkBitFrom(target))) {
2559 if (descriptors != nullptr &&
2560 target->instance_descriptors() == descriptors) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002561 descriptors_owner_died = true;
2562 }
2563 } else {
2564 if (i != transition_index) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002565 Name* key = transitions->GetKey(i);
2566 transitions->SetKey(transition_index, key);
2567 Object** key_slot = transitions->GetKeySlot(transition_index);
2568 RecordSlot(transitions, key_slot, key);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002569 // Target slots do not need to be recorded since maps are not compacted.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002570 transitions->SetTarget(transition_index, transitions->GetTarget(i));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002571 }
2572 transition_index++;
2573 }
2574 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002575 // If there are no transitions to be cleared, return.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002576 if (transition_index == num_transitions) {
2577 DCHECK(!descriptors_owner_died);
2578 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002579 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002580 // Note that we never eliminate a transition array, though we might right-trim
2581 // such that number_of_transitions() == 0. If this assumption changes,
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002582 // TransitionArray::Insert() will need to deal with the case that a transition
2583 // array disappeared during GC.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002584 int trim = TransitionArray::Capacity(transitions) - transition_index;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002585 if (trim > 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002586 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
2587 transitions, trim * TransitionArray::kTransitionSize);
2588 transitions->SetNumberOfTransitions(transition_index);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002589 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002590 return descriptors_owner_died;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002591}
2592
2593
2594void MarkCompactCollector::TrimDescriptorArray(Map* map,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002595 DescriptorArray* descriptors) {
2596 int number_of_own_descriptors = map->NumberOfOwnDescriptors();
2597 if (number_of_own_descriptors == 0) {
2598 DCHECK(descriptors == heap_->empty_descriptor_array());
2599 return;
2600 }
2601
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002602 int number_of_descriptors = descriptors->number_of_descriptors_storage();
2603 int to_trim = number_of_descriptors - number_of_own_descriptors;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002604 if (to_trim > 0) {
2605 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
2606 descriptors, to_trim * DescriptorArray::kDescriptorSize);
2607 descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002608
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002609 if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
2610 descriptors->Sort();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002611
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002612 if (FLAG_unbox_double_fields) {
2613 LayoutDescriptor* layout_descriptor = map->layout_descriptor();
2614 layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
2615 number_of_own_descriptors);
2616 SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
2617 }
2618 }
2619 DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2620 map->set_owns_descriptors(true);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002621}
2622
2623
2624void MarkCompactCollector::TrimEnumCache(Map* map,
2625 DescriptorArray* descriptors) {
2626 int live_enum = map->EnumLength();
2627 if (live_enum == kInvalidEnumCacheSentinel) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002628 live_enum =
2629 map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002630 }
2631 if (live_enum == 0) return descriptors->ClearEnumCache();
2632
2633 FixedArray* enum_cache = descriptors->GetEnumCache();
2634
2635 int to_trim = enum_cache->length() - live_enum;
2636 if (to_trim <= 0) return;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002637 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
2638 descriptors->GetEnumCache(), to_trim);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002639
2640 if (!descriptors->HasEnumIndicesCache()) return;
2641 FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002642 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(enum_indices_cache,
2643 to_trim);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002644}
2645
2646
2647void MarkCompactCollector::ProcessWeakCollections() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002648 Object* weak_collection_obj = heap()->encountered_weak_collections();
2649 while (weak_collection_obj != Smi::FromInt(0)) {
2650 JSWeakCollection* weak_collection =
2651 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2652 DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2653 if (weak_collection->table()->IsHashTable()) {
2654 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002655 for (int i = 0; i < table->Capacity(); i++) {
2656 if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2657 Object** key_slot =
2658 table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002659 RecordSlot(table, key_slot, *key_slot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002660 Object** value_slot =
2661 table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002662 MarkCompactMarkingVisitor::MarkObjectByPointer(this, table,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002663 value_slot);
2664 }
2665 }
2666 }
2667 weak_collection_obj = weak_collection->next();
2668 }
2669}
2670
2671
2672void MarkCompactCollector::ClearWeakCollections() {
Ben Murdochda12d292016-06-02 14:46:10 +01002673 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002674 Object* weak_collection_obj = heap()->encountered_weak_collections();
2675 while (weak_collection_obj != Smi::FromInt(0)) {
2676 JSWeakCollection* weak_collection =
2677 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2678 DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2679 if (weak_collection->table()->IsHashTable()) {
2680 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2681 for (int i = 0; i < table->Capacity(); i++) {
2682 HeapObject* key = HeapObject::cast(table->KeyAt(i));
2683 if (!MarkCompactCollector::IsMarked(key)) {
2684 table->RemoveEntry(i);
2685 }
2686 }
2687 }
2688 weak_collection_obj = weak_collection->next();
2689 weak_collection->set_next(heap()->undefined_value());
2690 }
2691 heap()->set_encountered_weak_collections(Smi::FromInt(0));
2692}
2693
2694
2695void MarkCompactCollector::AbortWeakCollections() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002696 Object* weak_collection_obj = heap()->encountered_weak_collections();
2697 while (weak_collection_obj != Smi::FromInt(0)) {
2698 JSWeakCollection* weak_collection =
2699 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2700 weak_collection_obj = weak_collection->next();
2701 weak_collection->set_next(heap()->undefined_value());
2702 }
2703 heap()->set_encountered_weak_collections(Smi::FromInt(0));
2704}
2705
2706
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002707void MarkCompactCollector::ClearWeakCells(Object** non_live_map_list,
2708 DependentCode** dependent_code_list) {
2709 Heap* heap = this->heap();
Ben Murdochda12d292016-06-02 14:46:10 +01002710 TRACE_GC(heap->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_CELLS);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002711 Object* weak_cell_obj = heap->encountered_weak_cells();
2712 Object* the_hole_value = heap->the_hole_value();
2713 DependentCode* dependent_code_head =
2714 DependentCode::cast(heap->empty_fixed_array());
2715 Object* non_live_map_head = Smi::FromInt(0);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002716 while (weak_cell_obj != Smi::FromInt(0)) {
2717 WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002718 Object* next_weak_cell = weak_cell->next();
2719 bool clear_value = true;
2720 bool clear_next = true;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002721 // We do not insert cleared weak cells into the list, so the value
2722 // cannot be a Smi here.
2723 HeapObject* value = HeapObject::cast(weak_cell->value());
2724 if (!MarkCompactCollector::IsMarked(value)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002725 // Cells for new-space objects embedded in optimized code are wrapped in
2726 // WeakCell and put into Heap::weak_object_to_code_table.
2727 // Such cells do not have any strong references but we want to keep them
2728 // alive as long as the cell value is alive.
2729 // TODO(ulan): remove this once we remove Heap::weak_object_to_code_table.
2730 if (value->IsCell()) {
2731 Object* cell_value = Cell::cast(value)->value();
2732 if (cell_value->IsHeapObject() &&
2733 MarkCompactCollector::IsMarked(HeapObject::cast(cell_value))) {
2734 // Resurrect the cell.
2735 MarkBit mark = Marking::MarkBitFrom(value);
2736 SetMark(value, mark);
2737 Object** slot = HeapObject::RawField(value, Cell::kValueOffset);
2738 RecordSlot(value, slot, *slot);
2739 slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
2740 RecordSlot(weak_cell, slot, *slot);
2741 clear_value = false;
2742 }
2743 }
2744 if (value->IsMap()) {
2745 // The map is non-live.
2746 Map* map = Map::cast(value);
2747 // Add dependent code to the dependent_code_list.
2748 DependentCode* candidate = map->dependent_code();
2749 // We rely on the fact that the weak code group comes first.
2750 STATIC_ASSERT(DependentCode::kWeakCodeGroup == 0);
2751 if (candidate->length() > 0 &&
2752 candidate->group() == DependentCode::kWeakCodeGroup) {
2753 candidate->set_next_link(dependent_code_head);
2754 dependent_code_head = candidate;
2755 }
2756 // Add the weak cell to the non_live_map list.
2757 weak_cell->set_next(non_live_map_head);
2758 non_live_map_head = weak_cell;
2759 clear_value = false;
2760 clear_next = false;
2761 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002762 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002763 // The value of the weak cell is alive.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002764 Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002765 RecordSlot(weak_cell, slot, *slot);
2766 clear_value = false;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002767 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002768 if (clear_value) {
2769 weak_cell->clear();
2770 }
2771 if (clear_next) {
2772 weak_cell->clear_next(the_hole_value);
2773 }
2774 weak_cell_obj = next_weak_cell;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002775 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002776 heap->set_encountered_weak_cells(Smi::FromInt(0));
2777 *non_live_map_list = non_live_map_head;
2778 *dependent_code_list = dependent_code_head;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002779}
2780
2781
2782void MarkCompactCollector::AbortWeakCells() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002783 Object* the_hole_value = heap()->the_hole_value();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002784 Object* weak_cell_obj = heap()->encountered_weak_cells();
2785 while (weak_cell_obj != Smi::FromInt(0)) {
2786 WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
2787 weak_cell_obj = weak_cell->next();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002788 weak_cell->clear_next(the_hole_value);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002789 }
2790 heap()->set_encountered_weak_cells(Smi::FromInt(0));
2791}
2792
2793
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002794void MarkCompactCollector::AbortTransitionArrays() {
2795 HeapObject* undefined = heap()->undefined_value();
2796 Object* obj = heap()->encountered_transition_arrays();
2797 while (obj != Smi::FromInt(0)) {
2798 TransitionArray* array = TransitionArray::cast(obj);
2799 obj = array->next_link();
2800 array->set_next_link(undefined, SKIP_WRITE_BARRIER);
2801 }
2802 heap()->set_encountered_transition_arrays(Smi::FromInt(0));
2803}
2804
Ben Murdochda12d292016-06-02 14:46:10 +01002805static inline SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002806 if (RelocInfo::IsCodeTarget(rmode)) {
Ben Murdochda12d292016-06-02 14:46:10 +01002807 return CODE_TARGET_SLOT;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002808 } else if (RelocInfo::IsCell(rmode)) {
Ben Murdochda12d292016-06-02 14:46:10 +01002809 return CELL_TARGET_SLOT;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002810 } else if (RelocInfo::IsEmbeddedObject(rmode)) {
Ben Murdochda12d292016-06-02 14:46:10 +01002811 return EMBEDDED_OBJECT_SLOT;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002812 } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
Ben Murdochda12d292016-06-02 14:46:10 +01002813 return DEBUG_TARGET_SLOT;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002814 }
2815 UNREACHABLE();
Ben Murdochda12d292016-06-02 14:46:10 +01002816 return NUMBER_OF_SLOT_TYPES;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002817}
2818
Ben Murdochda12d292016-06-02 14:46:10 +01002819void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
2820 Object* target) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002821 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
Ben Murdochda12d292016-06-02 14:46:10 +01002822 Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002823 RelocInfo::Mode rmode = rinfo->rmode();
2824 if (target_page->IsEvacuationCandidate() &&
2825 (rinfo->host() == NULL ||
2826 !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
2827 Address addr = rinfo->pc();
Ben Murdochda12d292016-06-02 14:46:10 +01002828 SlotType slot_type = SlotTypeForRMode(rmode);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002829 if (rinfo->IsInConstantPool()) {
2830 addr = rinfo->constant_pool_entry_address();
2831 if (RelocInfo::IsCodeTarget(rmode)) {
Ben Murdochda12d292016-06-02 14:46:10 +01002832 slot_type = CODE_ENTRY_SLOT;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002833 } else {
2834 DCHECK(RelocInfo::IsEmbeddedObject(rmode));
Ben Murdochda12d292016-06-02 14:46:10 +01002835 slot_type = OBJECT_SLOT;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002836 }
2837 }
Ben Murdoch61f157c2016-09-16 13:49:30 +01002838 RememberedSet<OLD_TO_OLD>::InsertTyped(
2839 source_page, reinterpret_cast<Address>(host), slot_type, addr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002840 }
2841}
2842
Ben Murdoch61f157c2016-09-16 13:49:30 +01002843static inline SlotCallbackResult UpdateSlot(Object** slot) {
2844 Object* obj = reinterpret_cast<Object*>(
2845 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002846
Ben Murdoch61f157c2016-09-16 13:49:30 +01002847 if (obj->IsHeapObject()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002848 HeapObject* heap_obj = HeapObject::cast(obj);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002849 MapWord map_word = heap_obj->map_word();
2850 if (map_word.IsForwardingAddress()) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01002851 DCHECK(heap_obj->GetHeap()->InFromSpace(heap_obj) ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002852 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
2853 Page::FromAddress(heap_obj->address())
2854 ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002855 HeapObject* target = map_word.ToForwardingAddress();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002856 base::NoBarrier_CompareAndSwap(
2857 reinterpret_cast<base::AtomicWord*>(slot),
2858 reinterpret_cast<base::AtomicWord>(obj),
2859 reinterpret_cast<base::AtomicWord>(target));
Ben Murdoch61f157c2016-09-16 13:49:30 +01002860 DCHECK(!heap_obj->GetHeap()->InFromSpace(target) &&
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002861 !MarkCompactCollector::IsOnEvacuationCandidate(target));
2862 }
2863 }
Ben Murdoch61f157c2016-09-16 13:49:30 +01002864 return REMOVE_SLOT;
2865}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002866
Ben Murdoch61f157c2016-09-16 13:49:30 +01002867// Visitor for updating pointers from live objects in old spaces to new space.
2868// It does not expect to encounter pointers to dead objects.
2869class PointersUpdatingVisitor : public ObjectVisitor {
2870 public:
2871 void VisitPointer(Object** p) override { UpdateSlot(p); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002872
Ben Murdoch61f157c2016-09-16 13:49:30 +01002873 void VisitPointers(Object** start, Object** end) override {
2874 for (Object** p = start; p < end; p++) UpdateSlot(p);
2875 }
2876
2877 void VisitCell(RelocInfo* rinfo) override {
2878 UpdateTypedSlotHelper::UpdateCell(rinfo, UpdateSlot);
2879 }
2880
2881 void VisitEmbeddedPointer(RelocInfo* rinfo) override {
2882 UpdateTypedSlotHelper::UpdateEmbeddedPointer(rinfo, UpdateSlot);
2883 }
2884
2885 void VisitCodeTarget(RelocInfo* rinfo) override {
2886 UpdateTypedSlotHelper::UpdateCodeTarget(rinfo, UpdateSlot);
2887 }
2888
2889 void VisitCodeEntry(Address entry_address) override {
2890 UpdateTypedSlotHelper::UpdateCodeEntry(entry_address, UpdateSlot);
2891 }
2892
2893 void VisitDebugTarget(RelocInfo* rinfo) override {
2894 UpdateTypedSlotHelper::UpdateDebugTarget(rinfo, UpdateSlot);
2895 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002896};
2897
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002898static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
2899 Object** p) {
2900 MapWord map_word = HeapObject::cast(*p)->map_word();
2901
2902 if (map_word.IsForwardingAddress()) {
2903 return String::cast(map_word.ToForwardingAddress());
2904 }
2905
2906 return String::cast(*p);
2907}
2908
Ben Murdochda12d292016-06-02 14:46:10 +01002909bool MarkCompactCollector::IsSlotInBlackObject(MemoryChunk* p, Address slot) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002910 Space* owner = p->owner();
Ben Murdochda12d292016-06-02 14:46:10 +01002911 DCHECK(owner != heap_->lo_space() && owner != nullptr);
2912 USE(owner);
2913
2914 // If we are on a black page, we cannot find the actual object start
2915 // easiliy. We just return true but do not set the out_object.
2916 if (p->IsFlagSet(Page::BLACK_PAGE)) {
2917 return true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002918 }
2919
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002920 uint32_t mark_bit_index = p->AddressToMarkbitIndex(slot);
2921 unsigned int cell_index = mark_bit_index >> Bitmap::kBitsPerCellLog2;
2922 MarkBit::CellType index_mask = 1u << Bitmap::IndexInCell(mark_bit_index);
2923 MarkBit::CellType* cells = p->markbits()->cells();
2924 Address base_address = p->area_start();
2925 unsigned int base_address_cell_index = Bitmap::IndexToCell(
2926 Bitmap::CellAlignIndex(p->AddressToMarkbitIndex(base_address)));
2927
2928 // Check if the slot points to the start of an object. This can happen e.g.
2929 // when we left trim a fixed array. Such slots are invalid and we can remove
2930 // them.
2931 if (index_mask > 1) {
2932 if ((cells[cell_index] & index_mask) != 0 &&
2933 (cells[cell_index] & (index_mask >> 1)) == 0) {
2934 return false;
2935 }
2936 } else {
2937 // Left trimming moves the mark bits so we cannot be in the very first cell.
2938 DCHECK(cell_index != base_address_cell_index);
2939 if ((cells[cell_index] & index_mask) != 0 &&
2940 (cells[cell_index - 1] & (1u << Bitmap::kBitIndexMask)) == 0) {
2941 return false;
2942 }
2943 }
2944
2945 // Check if the object is in the current cell.
2946 MarkBit::CellType slot_mask;
2947 if ((cells[cell_index] == 0) ||
2948 (base::bits::CountTrailingZeros32(cells[cell_index]) >
2949 base::bits::CountTrailingZeros32(cells[cell_index] | index_mask))) {
2950 // If we are already in the first cell, there is no live object.
2951 if (cell_index == base_address_cell_index) return false;
2952
2953 // If not, find a cell in a preceding cell slot that has a mark bit set.
2954 do {
2955 cell_index--;
2956 } while (cell_index > base_address_cell_index && cells[cell_index] == 0);
2957
2958 // The slot must be in a dead object if there are no preceding cells that
2959 // have mark bits set.
2960 if (cells[cell_index] == 0) {
2961 return false;
2962 }
2963
2964 // The object is in a preceding cell. Set the mask to find any object.
2965 slot_mask = ~0u;
2966 } else {
2967 // We are interested in object mark bits right before the slot.
2968 slot_mask = index_mask + (index_mask - 1);
2969 }
2970
2971 MarkBit::CellType current_cell = cells[cell_index];
2972 CHECK(current_cell != 0);
2973
2974 // Find the last live object in the cell.
2975 unsigned int leading_zeros =
2976 base::bits::CountLeadingZeros32(current_cell & slot_mask);
2977 CHECK(leading_zeros != Bitmap::kBitsPerCell);
2978 int offset = static_cast<int>(Bitmap::kBitIndexMask - leading_zeros) - 1;
2979
2980 base_address += (cell_index - base_address_cell_index) *
2981 Bitmap::kBitsPerCell * kPointerSize;
2982 Address address = base_address + offset * kPointerSize;
2983 HeapObject* object = HeapObject::FromAddress(address);
2984 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
2985 CHECK(object->address() < reinterpret_cast<Address>(slot));
2986 if ((object->address() + kPointerSize) <= slot &&
2987 (object->address() + object->Size()) > slot) {
2988 // If the slot is within the last found object in the cell, the slot is
2989 // in a live object.
2990 // Slots pointing to the first word of an object are invalid and removed.
2991 // This can happen when we move the object header while left trimming.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002992 return true;
2993 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002994 return false;
2995}
2996
Ben Murdochda12d292016-06-02 14:46:10 +01002997HeapObject* MarkCompactCollector::FindBlackObjectBySlotSlow(Address slot) {
2998 Page* p = Page::FromAddress(slot);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002999 Space* owner = p->owner();
Ben Murdochda12d292016-06-02 14:46:10 +01003000 if (owner == heap_->lo_space() || owner == nullptr) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003001 Object* large_object = heap_->lo_space()->FindObject(slot);
3002 // This object has to exist, otherwise we would not have recorded a slot
3003 // for it.
3004 CHECK(large_object->IsHeapObject());
3005 HeapObject* large_heap_object = HeapObject::cast(large_object);
Ben Murdochda12d292016-06-02 14:46:10 +01003006
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003007 if (IsMarked(large_heap_object)) {
Ben Murdochda12d292016-06-02 14:46:10 +01003008 return large_heap_object;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003009 }
Ben Murdochda12d292016-06-02 14:46:10 +01003010 return nullptr;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003011 }
3012
Ben Murdochda12d292016-06-02 14:46:10 +01003013 if (p->IsFlagSet(Page::BLACK_PAGE)) {
3014 HeapObjectIterator it(p);
3015 HeapObject* object = nullptr;
3016 while ((object = it.Next()) != nullptr) {
3017 int size = object->Size();
3018 if (object->address() > slot) return nullptr;
3019 if (object->address() <= slot && slot < (object->address() + size)) {
3020 return object;
3021 }
3022 }
3023 } else {
3024 LiveObjectIterator<kBlackObjects> it(p);
3025 HeapObject* object = nullptr;
3026 while ((object = it.Next()) != nullptr) {
3027 int size = object->Size();
3028 if (object->address() > slot) return nullptr;
3029 if (object->address() <= slot && slot < (object->address() + size)) {
3030 return object;
3031 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003032 }
3033 }
Ben Murdochda12d292016-06-02 14:46:10 +01003034 return nullptr;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003035}
3036
3037
3038void MarkCompactCollector::EvacuateNewSpacePrologue() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003039 NewSpace* new_space = heap()->new_space();
Ben Murdoch097c5b22016-05-18 11:27:45 +01003040 // Append the list of new space pages to be processed.
Ben Murdoch61f157c2016-09-16 13:49:30 +01003041 for (Page* p : NewSpacePageRange(new_space->bottom(), new_space->top())) {
3042 newspace_evacuation_candidates_.Add(p);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003043 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003044 new_space->Flip();
3045 new_space->ResetAllocationInfo();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003046}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003047
Ben Murdoch097c5b22016-05-18 11:27:45 +01003048class MarkCompactCollector::Evacuator : public Malloced {
3049 public:
Ben Murdoch61f157c2016-09-16 13:49:30 +01003050 enum EvacuationMode {
3051 kObjectsNewToOld,
3052 kPageNewToOld,
3053 kObjectsOldToOld,
3054 kPageNewToNew,
3055 };
3056
3057 static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
3058 // Note: The order of checks is important in this function.
3059 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
3060 return kPageNewToOld;
3061 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION))
3062 return kPageNewToNew;
3063 if (chunk->InNewSpace()) return kObjectsNewToOld;
3064 DCHECK(chunk->IsEvacuationCandidate());
3065 return kObjectsOldToOld;
3066 }
3067
Ben Murdochc5610432016-08-08 18:44:38 +01003068 // NewSpacePages with more live bytes than this threshold qualify for fast
3069 // evacuation.
3070 static int PageEvacuationThreshold() {
3071 if (FLAG_page_promotion)
3072 return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
3073 return Page::kAllocatableMemory + kPointerSize;
3074 }
3075
Ben Murdochda12d292016-06-02 14:46:10 +01003076 explicit Evacuator(MarkCompactCollector* collector)
Ben Murdoch097c5b22016-05-18 11:27:45 +01003077 : collector_(collector),
Ben Murdoch097c5b22016-05-18 11:27:45 +01003078 compaction_spaces_(collector->heap()),
Ben Murdoch61f157c2016-09-16 13:49:30 +01003079 local_pretenuring_feedback_(base::HashMap::PointersMatch,
Ben Murdoch097c5b22016-05-18 11:27:45 +01003080 kInitialLocalPretenuringFeedbackCapacity),
3081 new_space_visitor_(collector->heap(), &compaction_spaces_,
Ben Murdoch097c5b22016-05-18 11:27:45 +01003082 &local_pretenuring_feedback_),
Ben Murdoch61f157c2016-09-16 13:49:30 +01003083 new_space_page_visitor(collector->heap()),
Ben Murdochda12d292016-06-02 14:46:10 +01003084 old_space_visitor_(collector->heap(), &compaction_spaces_),
Ben Murdoch097c5b22016-05-18 11:27:45 +01003085 duration_(0.0),
Ben Murdochda12d292016-06-02 14:46:10 +01003086 bytes_compacted_(0) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003087
Ben Murdochc5610432016-08-08 18:44:38 +01003088 inline bool EvacuatePage(Page* chunk);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003089
3090 // Merge back locally cached info sequentially. Note that this method needs
3091 // to be called from the main thread.
3092 inline void Finalize();
3093
3094 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
3095
Ben Murdoch097c5b22016-05-18 11:27:45 +01003096 private:
3097 static const int kInitialLocalPretenuringFeedbackCapacity = 256;
3098
Ben Murdochc5610432016-08-08 18:44:38 +01003099 inline Heap* heap() { return collector_->heap(); }
3100
Ben Murdoch097c5b22016-05-18 11:27:45 +01003101 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
3102 duration_ += duration;
3103 bytes_compacted_ += bytes_compacted;
3104 }
3105
Ben Murdoch097c5b22016-05-18 11:27:45 +01003106 MarkCompactCollector* collector_;
3107
Ben Murdoch097c5b22016-05-18 11:27:45 +01003108 // Locally cached collector data.
3109 CompactionSpaceCollection compaction_spaces_;
Ben Murdoch61f157c2016-09-16 13:49:30 +01003110 base::HashMap local_pretenuring_feedback_;
Ben Murdoch097c5b22016-05-18 11:27:45 +01003111
Ben Murdochda12d292016-06-02 14:46:10 +01003112 // Visitors for the corresponding spaces.
Ben Murdoch097c5b22016-05-18 11:27:45 +01003113 EvacuateNewSpaceVisitor new_space_visitor_;
Ben Murdochc5610432016-08-08 18:44:38 +01003114 EvacuateNewSpacePageVisitor new_space_page_visitor;
Ben Murdoch097c5b22016-05-18 11:27:45 +01003115 EvacuateOldSpaceVisitor old_space_visitor_;
3116
3117 // Book keeping info.
3118 double duration_;
3119 intptr_t bytes_compacted_;
Ben Murdoch097c5b22016-05-18 11:27:45 +01003120};
3121
Ben Murdoch61f157c2016-09-16 13:49:30 +01003122bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
Ben Murdochda12d292016-06-02 14:46:10 +01003123 bool success = false;
Ben Murdoch61f157c2016-09-16 13:49:30 +01003124 DCHECK(page->SweepingDone());
3125 int saved_live_bytes = page->LiveBytes();
3126 double evacuation_time = 0.0;
3127 Heap* heap = page->heap();
Ben Murdochda12d292016-06-02 14:46:10 +01003128 {
Ben Murdoch61f157c2016-09-16 13:49:30 +01003129 AlwaysAllocateScope always_allocate(heap->isolate());
Ben Murdochda12d292016-06-02 14:46:10 +01003130 TimedScope timed_scope(&evacuation_time);
Ben Murdoch61f157c2016-09-16 13:49:30 +01003131 switch (ComputeEvacuationMode(page)) {
3132 case kObjectsNewToOld:
3133 success = collector_->VisitLiveObjects(page, &new_space_visitor_,
3134 kClearMarkbits);
3135 ArrayBufferTracker::ProcessBuffers(
3136 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
3137 DCHECK(success);
3138 break;
3139 case kPageNewToOld:
3140 success = collector_->VisitLiveObjects(page, &new_space_page_visitor,
3141 kKeepMarking);
3142 // ArrayBufferTracker will be updated during sweeping.
3143 DCHECK(success);
3144 break;
3145 case kPageNewToNew:
3146 new_space_page_visitor.account_semispace_copied(page->LiveBytes());
3147 // ArrayBufferTracker will be updated during sweeping.
3148 success = true;
3149 break;
3150 case kObjectsOldToOld:
3151 success = collector_->VisitLiveObjects(page, &old_space_visitor_,
3152 kClearMarkbits);
3153 if (!success) {
3154 // Aborted compaction page. We have to record slots here, since we
3155 // might not have recorded them in first place.
3156 // Note: We mark the page as aborted here to be able to record slots
3157 // for code objects in |RecordMigratedSlotVisitor|.
3158 page->SetFlag(Page::COMPACTION_WAS_ABORTED);
3159 EvacuateRecordOnlyVisitor record_visitor(collector_->heap());
3160 success =
3161 collector_->VisitLiveObjects(page, &record_visitor, kKeepMarking);
3162 ArrayBufferTracker::ProcessBuffers(
3163 page, ArrayBufferTracker::kUpdateForwardedKeepOthers);
3164 DCHECK(success);
3165 // We need to return failure here to indicate that we want this page
3166 // added to the sweeper.
3167 success = false;
3168 } else {
3169 ArrayBufferTracker::ProcessBuffers(
3170 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
3171 }
3172 break;
3173 default:
3174 UNREACHABLE();
3175 }
Ben Murdochda12d292016-06-02 14:46:10 +01003176 }
Ben Murdoch61f157c2016-09-16 13:49:30 +01003177 ReportCompactionProgress(evacuation_time, saved_live_bytes);
Ben Murdochda12d292016-06-02 14:46:10 +01003178 if (FLAG_trace_evacuation) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01003179 PrintIsolate(heap->isolate(),
3180 "evacuation[%p]: page=%p new_space=%d "
3181 "page_evacuation=%d executable=%d contains_age_mark=%d "
3182 "live_bytes=%d time=%f\n",
3183 static_cast<void*>(this), static_cast<void*>(page),
3184 page->InNewSpace(),
3185 page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
3186 page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
3187 page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
3188 page->Contains(heap->new_space()->age_mark()),
3189 saved_live_bytes, evacuation_time);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003190 }
3191 return success;
3192}
3193
Ben Murdoch097c5b22016-05-18 11:27:45 +01003194void MarkCompactCollector::Evacuator::Finalize() {
3195 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
3196 heap()->code_space()->MergeCompactionSpace(
3197 compaction_spaces_.Get(CODE_SPACE));
3198 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
Ben Murdochc5610432016-08-08 18:44:38 +01003199 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
3200 new_space_page_visitor.promoted_size());
Ben Murdoch097c5b22016-05-18 11:27:45 +01003201 heap()->IncrementSemiSpaceCopiedObjectSize(
Ben Murdoch61f157c2016-09-16 13:49:30 +01003202 new_space_visitor_.semispace_copied_size() +
3203 new_space_page_visitor.semispace_copied_size());
Ben Murdoch097c5b22016-05-18 11:27:45 +01003204 heap()->IncrementYoungSurvivorsCounter(
3205 new_space_visitor_.promoted_size() +
Ben Murdochc5610432016-08-08 18:44:38 +01003206 new_space_visitor_.semispace_copied_size() +
Ben Murdoch61f157c2016-09-16 13:49:30 +01003207 new_space_page_visitor.promoted_size() +
3208 new_space_page_visitor.semispace_copied_size());
Ben Murdoch097c5b22016-05-18 11:27:45 +01003209 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003210}
3211
Ben Murdoch097c5b22016-05-18 11:27:45 +01003212int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
3213 intptr_t live_bytes) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003214 if (!FLAG_parallel_compaction) return 1;
3215 // Compute the number of needed tasks based on a target compaction time, the
3216 // profiled compaction speed and marked live memory.
3217 //
3218 // The number of parallel compaction tasks is limited by:
3219 // - #evacuation pages
3220 // - (#cores - 1)
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003221 const double kTargetCompactionTimeInMs = 1;
Ben Murdoch097c5b22016-05-18 11:27:45 +01003222 const int kNumSweepingTasks = 3;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003223
Ben Murdochda12d292016-06-02 14:46:10 +01003224 double compaction_speed =
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003225 heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003226
Ben Murdochda12d292016-06-02 14:46:10 +01003227 const int available_cores = Max(
3228 1, static_cast<int>(
3229 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()) -
3230 kNumSweepingTasks - 1);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003231 int tasks;
3232 if (compaction_speed > 0) {
Ben Murdochda12d292016-06-02 14:46:10 +01003233 tasks = 1 + static_cast<int>(live_bytes / compaction_speed /
3234 kTargetCompactionTimeInMs);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003235 } else {
3236 tasks = pages;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003237 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003238 const int tasks_capped_pages = Min(pages, tasks);
3239 return Min(available_cores, tasks_capped_pages);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003240}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003241
Ben Murdochda12d292016-06-02 14:46:10 +01003242class EvacuationJobTraits {
3243 public:
3244 typedef int* PerPageData; // Pointer to number of aborted pages.
3245 typedef MarkCompactCollector::Evacuator* PerTaskData;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003246
Ben Murdochda12d292016-06-02 14:46:10 +01003247 static const bool NeedSequentialFinalization = true;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003248
Ben Murdochda12d292016-06-02 14:46:10 +01003249 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
3250 MemoryChunk* chunk, PerPageData) {
Ben Murdochc5610432016-08-08 18:44:38 +01003251 return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003252 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003253
Ben Murdochc5610432016-08-08 18:44:38 +01003254 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk,
3255 bool success, PerPageData data) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01003256 using Evacuator = MarkCompactCollector::Evacuator;
3257 Page* p = static_cast<Page*>(chunk);
3258 switch (Evacuator::ComputeEvacuationMode(p)) {
3259 case Evacuator::kPageNewToOld:
3260 break;
3261 case Evacuator::kPageNewToNew:
3262 DCHECK(success);
3263 break;
3264 case Evacuator::kObjectsNewToOld:
3265 DCHECK(success);
3266 break;
3267 case Evacuator::kObjectsOldToOld:
3268 if (success) {
3269 DCHECK(p->IsEvacuationCandidate());
3270 DCHECK(p->SweepingDone());
3271 p->Unlink();
3272 } else {
3273 // We have partially compacted the page, i.e., some objects may have
3274 // moved, others are still in place.
3275 p->ClearEvacuationCandidate();
3276 // Slots have already been recorded so we just need to add it to the
3277 // sweeper, which will happen after updating pointers.
3278 *data += 1;
3279 }
3280 break;
3281 default:
3282 UNREACHABLE();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003283 }
3284 }
Ben Murdochda12d292016-06-02 14:46:10 +01003285};
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003286
Ben Murdochda12d292016-06-02 14:46:10 +01003287void MarkCompactCollector::EvacuatePagesInParallel() {
3288 PageParallelJob<EvacuationJobTraits> job(
Ben Murdochc5610432016-08-08 18:44:38 +01003289 heap_, heap_->isolate()->cancelable_task_manager(),
3290 &page_parallel_job_semaphore_);
Ben Murdochda12d292016-06-02 14:46:10 +01003291
3292 int abandoned_pages = 0;
3293 intptr_t live_bytes = 0;
3294 for (Page* page : evacuation_candidates_) {
3295 live_bytes += page->LiveBytes();
3296 job.AddPage(page, &abandoned_pages);
3297 }
Ben Murdochc5610432016-08-08 18:44:38 +01003298
3299 const Address age_mark = heap()->new_space()->age_mark();
3300 for (Page* page : newspace_evacuation_candidates_) {
Ben Murdochda12d292016-06-02 14:46:10 +01003301 live_bytes += page->LiveBytes();
Ben Murdochc5610432016-08-08 18:44:38 +01003302 if (!page->NeverEvacuate() &&
3303 (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
Ben Murdochc5610432016-08-08 18:44:38 +01003304 !page->Contains(age_mark)) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01003305 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
3306 EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space());
3307 } else {
3308 EvacuateNewSpacePageVisitor::MoveToToSpace(page);
3309 }
Ben Murdochc5610432016-08-08 18:44:38 +01003310 }
Ben Murdoch61f157c2016-09-16 13:49:30 +01003311
Ben Murdochda12d292016-06-02 14:46:10 +01003312 job.AddPage(page, &abandoned_pages);
3313 }
3314 DCHECK_GE(job.NumberOfPages(), 1);
3315
3316 // Used for trace summary.
3317 double compaction_speed = 0;
3318 if (FLAG_trace_evacuation) {
3319 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3320 }
3321
3322 const int wanted_num_tasks =
3323 NumberOfParallelCompactionTasks(job.NumberOfPages(), live_bytes);
3324 Evacuator** evacuators = new Evacuator*[wanted_num_tasks];
3325 for (int i = 0; i < wanted_num_tasks; i++) {
3326 evacuators[i] = new Evacuator(this);
3327 }
3328 job.Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; });
3329 for (int i = 0; i < wanted_num_tasks; i++) {
3330 evacuators[i]->Finalize();
3331 delete evacuators[i];
3332 }
3333 delete[] evacuators;
3334
3335 if (FLAG_trace_evacuation) {
Ben Murdochc5610432016-08-08 18:44:38 +01003336 PrintIsolate(isolate(),
3337 "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
3338 "aborted=%d wanted_tasks=%d tasks=%d cores=%" PRIuS
3339 " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n",
3340 isolate()->time_millis_since_init(),
3341 FLAG_parallel_compaction ? "yes" : "no", job.NumberOfPages(),
3342 abandoned_pages, wanted_num_tasks, job.NumberOfTasks(),
3343 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
3344 live_bytes, compaction_speed);
Ben Murdochda12d292016-06-02 14:46:10 +01003345 }
3346}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003347
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003348class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3349 public:
3350 virtual Object* RetainAs(Object* object) {
3351 if (object->IsHeapObject()) {
3352 HeapObject* heap_object = HeapObject::cast(object);
3353 MapWord map_word = heap_object->map_word();
3354 if (map_word.IsForwardingAddress()) {
3355 return map_word.ToForwardingAddress();
3356 }
3357 }
3358 return object;
3359 }
3360};
3361
Ben Murdochc5610432016-08-08 18:44:38 +01003362template <MarkCompactCollector::Sweeper::SweepingMode sweeping_mode,
3363 MarkCompactCollector::Sweeper::SweepingParallelism parallelism,
3364 MarkCompactCollector::Sweeper::SkipListRebuildingMode skip_list_mode,
Ben Murdoch61f157c2016-09-16 13:49:30 +01003365 MarkCompactCollector::Sweeper::FreeListRebuildingMode free_list_mode,
Ben Murdochc5610432016-08-08 18:44:38 +01003366 MarkCompactCollector::Sweeper::FreeSpaceTreatmentMode free_space_mode>
3367int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p,
3368 ObjectVisitor* v) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003369 DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
Ben Murdochda12d292016-06-02 14:46:10 +01003370 DCHECK(!p->IsFlagSet(Page::BLACK_PAGE));
Ben Murdoch61f157c2016-09-16 13:49:30 +01003371 DCHECK((space == nullptr) || (space->identity() != CODE_SPACE) ||
3372 (skip_list_mode == REBUILD_SKIP_LIST));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003373 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
Ben Murdochc5610432016-08-08 18:44:38 +01003374 DCHECK(parallelism == SWEEP_ON_MAIN_THREAD || sweeping_mode == SWEEP_ONLY);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003375
Ben Murdoch61f157c2016-09-16 13:49:30 +01003376 // Before we sweep objects on the page, we free dead array buffers which
3377 // requires valid mark bits.
3378 ArrayBufferTracker::FreeDead(p);
3379
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003380 Address free_start = p->area_start();
3381 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003382
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003383 // If we use the skip list for code space pages, we have to lock the skip
3384 // list because it could be accessed concurrently by the runtime or the
3385 // deoptimizer.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003386 SkipList* skip_list = p->skip_list();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003387 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3388 skip_list->Clear();
3389 }
3390
3391 intptr_t freed_bytes = 0;
3392 intptr_t max_freed_bytes = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003393 int curr_region = -1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003394
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003395 LiveObjectIterator<kBlackObjects> it(p);
3396 HeapObject* object = NULL;
3397 while ((object = it.Next()) != NULL) {
3398 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3399 Address free_end = object->address();
3400 if (free_end != free_start) {
3401 int size = static_cast<int>(free_end - free_start);
3402 if (free_space_mode == ZAP_FREE_SPACE) {
3403 memset(free_start, 0xcc, size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003404 }
Ben Murdoch61f157c2016-09-16 13:49:30 +01003405 if (free_list_mode == REBUILD_FREE_LIST) {
3406 freed_bytes = space->UnaccountedFree(free_start, size);
3407 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3408 } else {
3409 p->heap()->CreateFillerObjectAt(free_start, size,
3410 ClearRecordedSlots::kNo);
3411 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003412 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003413 Map* map = object->synchronized_map();
3414 int size = object->SizeFromMap(map);
3415 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3416 object->IterateBody(map->instance_type(), size, v);
3417 }
3418 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3419 int new_region_start = SkipList::RegionNumber(free_end);
3420 int new_region_end =
3421 SkipList::RegionNumber(free_end + size - kPointerSize);
3422 if (new_region_start != curr_region || new_region_end != curr_region) {
3423 skip_list->AddObject(free_end, size);
3424 curr_region = new_region_end;
3425 }
3426 }
3427 free_start = free_end + size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003428 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003429
3430 // Clear the mark bits of that page and reset live bytes count.
3431 Bitmap::Clear(p);
3432
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003433 if (free_start != p->area_end()) {
3434 int size = static_cast<int>(p->area_end() - free_start);
3435 if (free_space_mode == ZAP_FREE_SPACE) {
3436 memset(free_start, 0xcc, size);
3437 }
Ben Murdoch61f157c2016-09-16 13:49:30 +01003438 if (free_list_mode == REBUILD_FREE_LIST) {
3439 freed_bytes = space->UnaccountedFree(free_start, size);
3440 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3441 } else {
3442 p->heap()->CreateFillerObjectAt(free_start, size,
3443 ClearRecordedSlots::kNo);
3444 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003445 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003446 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
Ben Murdoch61f157c2016-09-16 13:49:30 +01003447 if (free_list_mode == IGNORE_FREE_LIST) return 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003448 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
3449}
3450
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003451void MarkCompactCollector::InvalidateCode(Code* code) {
3452 if (heap_->incremental_marking()->IsCompacting() &&
3453 !ShouldSkipEvacuationSlotRecording(code)) {
3454 DCHECK(compacting_);
3455
3456 // If the object is white than no slots were recorded on it yet.
3457 MarkBit mark_bit = Marking::MarkBitFrom(code);
3458 if (Marking::IsWhite(mark_bit)) return;
3459
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003460 // Ignore all slots that might have been recorded in the body of the
3461 // deoptimized code object. Assumption: no slots will be recorded for
3462 // this object after invalidating it.
Ben Murdochda12d292016-06-02 14:46:10 +01003463 Page* page = Page::FromAddress(code->address());
3464 Address start = code->instruction_start();
3465 Address end = code->address() + code->Size();
3466 RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(page, start, end);
Ben Murdoch61f157c2016-09-16 13:49:30 +01003467 RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, start, end);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003468 }
3469}
3470
3471
3472// Return true if the given code is deoptimized or will be deoptimized.
3473bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
3474 return code->is_optimized_code() && code->marked_for_deoptimization();
3475}
3476
3477
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003478#ifdef VERIFY_HEAP
3479static void VerifyAllBlackObjects(MemoryChunk* page) {
3480 LiveObjectIterator<kAllLiveObjects> it(page);
3481 HeapObject* object = NULL;
3482 while ((object = it.Next()) != NULL) {
3483 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3484 }
3485}
3486#endif // VERIFY_HEAP
3487
Ben Murdochc5610432016-08-08 18:44:38 +01003488template <class Visitor>
3489bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003490 IterationMode mode) {
3491#ifdef VERIFY_HEAP
3492 VerifyAllBlackObjects(page);
3493#endif // VERIFY_HEAP
3494
3495 LiveObjectIterator<kBlackObjects> it(page);
3496 HeapObject* object = nullptr;
3497 while ((object = it.Next()) != nullptr) {
3498 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3499 if (!visitor->Visit(object)) {
3500 if (mode == kClearMarkbits) {
3501 page->markbits()->ClearRange(
3502 page->AddressToMarkbitIndex(page->area_start()),
3503 page->AddressToMarkbitIndex(object->address()));
Ben Murdoch097c5b22016-05-18 11:27:45 +01003504 if (page->old_to_new_slots() != nullptr) {
3505 page->old_to_new_slots()->RemoveRange(
3506 0, static_cast<int>(object->address() - page->address()));
3507 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003508 RecomputeLiveBytes(page);
3509 }
3510 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003511 }
3512 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003513 if (mode == kClearMarkbits) {
3514 Bitmap::Clear(page);
3515 }
3516 return true;
3517}
3518
3519
3520void MarkCompactCollector::RecomputeLiveBytes(MemoryChunk* page) {
3521 LiveObjectIterator<kBlackObjects> it(page);
3522 int new_live_size = 0;
3523 HeapObject* object = nullptr;
3524 while ((object = it.Next()) != nullptr) {
3525 new_live_size += object->Size();
3526 }
3527 page->SetLiveBytes(new_live_size);
3528}
3529
3530
3531void MarkCompactCollector::VisitLiveObjectsBody(Page* page,
3532 ObjectVisitor* visitor) {
3533#ifdef VERIFY_HEAP
3534 VerifyAllBlackObjects(page);
3535#endif // VERIFY_HEAP
3536
3537 LiveObjectIterator<kBlackObjects> it(page);
3538 HeapObject* object = NULL;
3539 while ((object = it.Next()) != NULL) {
3540 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3541 Map* map = object->synchronized_map();
3542 int size = object->SizeFromMap(map);
3543 object->IterateBody(map->instance_type(), size, visitor);
3544 }
3545}
3546
Ben Murdochc5610432016-08-08 18:44:38 +01003547void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space,
3548 Page* page) {
3549 base::LockGuard<base::Mutex> guard(&mutex_);
3550 swept_list_[space->identity()].Add(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003551}
3552
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003553void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
Ben Murdochda12d292016-06-02 14:46:10 +01003554 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003555 Heap::RelocationLock relocation_lock(heap());
3556
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003557 {
Ben Murdochda12d292016-06-02 14:46:10 +01003558 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003559 EvacuationScope evacuation_scope(this);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003560
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003561 EvacuateNewSpacePrologue();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003562 EvacuatePagesInParallel();
Ben Murdoch097c5b22016-05-18 11:27:45 +01003563 heap()->new_space()->set_age_mark(heap()->new_space()->top());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003564 }
3565
3566 UpdatePointersAfterEvacuation();
3567
Ben Murdoch61f157c2016-09-16 13:49:30 +01003568 if (!heap()->new_space()->Rebalance()) {
3569 FatalProcessOutOfMemory("NewSpace::Rebalance");
3570 }
3571
Ben Murdoch097c5b22016-05-18 11:27:45 +01003572 // Give pages that are queued to be freed back to the OS. Note that filtering
3573 // slots only handles old space (for unboxed doubles), and thus map space can
3574 // still contain stale pointers. We only free the chunks after pointer updates
3575 // to still have access to page headers.
Ben Murdochc5610432016-08-08 18:44:38 +01003576 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
Ben Murdoch097c5b22016-05-18 11:27:45 +01003577
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003578 {
Ben Murdochda12d292016-06-02 14:46:10 +01003579 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
Ben Murdochc5610432016-08-08 18:44:38 +01003580
Ben Murdoch61f157c2016-09-16 13:49:30 +01003581 for (Page* p : newspace_evacuation_candidates_) {
3582 if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3583 p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
3584 sweeper().AddLatePage(p->owner()->identity(), p);
3585 } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
3586 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
3587 p->ForAllFreeListCategories(
3588 [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
3589 sweeper().AddLatePage(p->owner()->identity(), p);
3590 }
3591 }
3592 newspace_evacuation_candidates_.Rewind(0);
3593
Ben Murdochc5610432016-08-08 18:44:38 +01003594 for (Page* p : evacuation_candidates_) {
3595 // Important: skip list should be cleared only after roots were updated
3596 // because root iteration traverses the stack and might have to find
3597 // code objects from non-updated pc pointing into evacuation candidate.
3598 SkipList* list = p->skip_list();
3599 if (list != NULL) list->Clear();
3600 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3601 sweeper().AddLatePage(p->owner()->identity(), p);
3602 p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
3603 }
3604 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003605
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003606 // Deallocate evacuated candidate pages.
3607 ReleaseEvacuationCandidates();
3608 }
3609
3610#ifdef VERIFY_HEAP
Ben Murdochc5610432016-08-08 18:44:38 +01003611 if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003612 VerifyEvacuation(heap());
3613 }
3614#endif
3615}
3616
Ben Murdochda12d292016-06-02 14:46:10 +01003617template <PointerDirection direction>
3618class PointerUpdateJobTraits {
3619 public:
3620 typedef int PerPageData; // Per page data is not used in this job.
Ben Murdoch61f157c2016-09-16 13:49:30 +01003621 typedef int PerTaskData; // Per task data is not used in this job.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003622
Ben Murdoch61f157c2016-09-16 13:49:30 +01003623 static bool ProcessPageInParallel(Heap* heap, PerTaskData, MemoryChunk* chunk,
3624 PerPageData) {
Ben Murdochda12d292016-06-02 14:46:10 +01003625 UpdateUntypedPointers(heap, chunk);
Ben Murdoch61f157c2016-09-16 13:49:30 +01003626 UpdateTypedPointers(heap, chunk);
Ben Murdochda12d292016-06-02 14:46:10 +01003627 return true;
3628 }
3629 static const bool NeedSequentialFinalization = false;
3630 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003631 }
3632
Ben Murdochda12d292016-06-02 14:46:10 +01003633 private:
3634 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) {
3635 if (direction == OLD_TO_NEW) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01003636 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap, chunk](Address slot) {
3637 return CheckAndUpdateOldToNewSlot(heap, slot);
3638 });
Ben Murdochda12d292016-06-02 14:46:10 +01003639 } else {
Ben Murdoch61f157c2016-09-16 13:49:30 +01003640 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) {
3641 return UpdateSlot(reinterpret_cast<Object**>(slot));
Ben Murdochda12d292016-06-02 14:46:10 +01003642 });
3643 }
3644 }
3645
Ben Murdoch61f157c2016-09-16 13:49:30 +01003646 static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk) {
Ben Murdochda12d292016-06-02 14:46:10 +01003647 if (direction == OLD_TO_OLD) {
3648 Isolate* isolate = heap->isolate();
3649 RememberedSet<OLD_TO_OLD>::IterateTyped(
Ben Murdoch61f157c2016-09-16 13:49:30 +01003650 chunk, [isolate](SlotType type, Address host_addr, Address slot) {
3651 return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, type, slot,
3652 UpdateSlot);
3653 });
3654 } else {
3655 Isolate* isolate = heap->isolate();
3656 RememberedSet<OLD_TO_NEW>::IterateTyped(
3657 chunk,
3658 [isolate, heap](SlotType type, Address host_addr, Address slot) {
3659 return UpdateTypedSlotHelper::UpdateTypedSlot(
3660 isolate, type, slot, [heap](Object** slot) {
3661 return CheckAndUpdateOldToNewSlot(
3662 heap, reinterpret_cast<Address>(slot));
3663 });
Ben Murdochda12d292016-06-02 14:46:10 +01003664 });
3665 }
3666 }
3667
Ben Murdoch61f157c2016-09-16 13:49:30 +01003668 static SlotCallbackResult CheckAndUpdateOldToNewSlot(Heap* heap,
3669 Address slot_address) {
3670 Object** slot = reinterpret_cast<Object**>(slot_address);
3671 if (heap->InFromSpace(*slot)) {
3672 HeapObject* heap_object = reinterpret_cast<HeapObject*>(*slot);
3673 DCHECK(heap_object->IsHeapObject());
3674 MapWord map_word = heap_object->map_word();
3675 // There could still be stale pointers in large object space, map space,
3676 // and old space for pages that have been promoted.
3677 if (map_word.IsForwardingAddress()) {
3678 // Update the corresponding slot.
3679 *slot = map_word.ToForwardingAddress();
3680 }
3681 // If the object was in from space before and is after executing the
3682 // callback in to space, the object is still live.
3683 // Unfortunately, we do not know about the slot. It could be in a
3684 // just freed free space object.
3685 if (heap->InToSpace(*slot)) {
3686 return KEEP_SLOT;
3687 }
3688 } else if (heap->InToSpace(*slot)) {
3689 DCHECK(Page::FromAddress(reinterpret_cast<HeapObject*>(*slot)->address())
3690 ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
3691 // Slots can be in "to" space after a page has been moved. Since there is
3692 // no forwarding information present we need to check the markbits to
3693 // determine liveness.
3694 if (Marking::IsBlack(
3695 Marking::MarkBitFrom(reinterpret_cast<HeapObject*>(*slot))))
3696 return KEEP_SLOT;
3697 } else {
3698 DCHECK(!heap->InNewSpace(*slot));
Ben Murdochda12d292016-06-02 14:46:10 +01003699 }
Ben Murdoch61f157c2016-09-16 13:49:30 +01003700 return REMOVE_SLOT;
Ben Murdochda12d292016-06-02 14:46:10 +01003701 }
3702};
3703
3704int NumberOfPointerUpdateTasks(int pages) {
3705 if (!FLAG_parallel_pointer_update) return 1;
3706 const int kMaxTasks = 4;
3707 const int kPagesPerTask = 4;
3708 return Min(kMaxTasks, (pages + kPagesPerTask - 1) / kPagesPerTask);
3709}
3710
3711template <PointerDirection direction>
Ben Murdochc5610432016-08-08 18:44:38 +01003712void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
Ben Murdochda12d292016-06-02 14:46:10 +01003713 PageParallelJob<PointerUpdateJobTraits<direction> > job(
Ben Murdochc5610432016-08-08 18:44:38 +01003714 heap, heap->isolate()->cancelable_task_manager(), semaphore);
Ben Murdochda12d292016-06-02 14:46:10 +01003715 RememberedSet<direction>::IterateMemoryChunks(
3716 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
Ben Murdochda12d292016-06-02 14:46:10 +01003717 int num_pages = job.NumberOfPages();
3718 int num_tasks = NumberOfPointerUpdateTasks(num_pages);
Ben Murdoch61f157c2016-09-16 13:49:30 +01003719 job.Run(num_tasks, [](int i) { return 0; });
Ben Murdochda12d292016-06-02 14:46:10 +01003720}
3721
3722class ToSpacePointerUpdateJobTraits {
3723 public:
3724 typedef std::pair<Address, Address> PerPageData;
3725 typedef PointersUpdatingVisitor* PerTaskData;
3726
3727 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
3728 MemoryChunk* chunk, PerPageData limits) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01003729 if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3730 // New->new promoted pages contain garbage so they require iteration
3731 // using markbits.
3732 ProcessPageInParallelVisitLive(heap, visitor, chunk, limits);
3733 } else {
3734 ProcessPageInParallelVisitAll(heap, visitor, chunk, limits);
3735 }
3736 return true;
3737 }
3738
3739 static const bool NeedSequentialFinalization = false;
3740 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
3741 }
3742
3743 private:
3744 static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor,
3745 MemoryChunk* chunk,
3746 PerPageData limits) {
Ben Murdochda12d292016-06-02 14:46:10 +01003747 for (Address cur = limits.first; cur < limits.second;) {
3748 HeapObject* object = HeapObject::FromAddress(cur);
3749 Map* map = object->map();
3750 int size = object->SizeFromMap(map);
3751 object->IterateBody(map->instance_type(), size, visitor);
3752 cur += size;
3753 }
Ben Murdochda12d292016-06-02 14:46:10 +01003754 }
Ben Murdoch61f157c2016-09-16 13:49:30 +01003755
3756 static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor,
3757 MemoryChunk* chunk,
3758 PerPageData limits) {
3759 LiveObjectIterator<kBlackObjects> it(chunk);
3760 HeapObject* object = NULL;
3761 while ((object = it.Next()) != NULL) {
3762 Map* map = object->map();
3763 int size = object->SizeFromMap(map);
3764 object->IterateBody(map->instance_type(), size, visitor);
3765 }
Ben Murdochda12d292016-06-02 14:46:10 +01003766 }
3767};
3768
Ben Murdochc5610432016-08-08 18:44:38 +01003769void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
Ben Murdochda12d292016-06-02 14:46:10 +01003770 PageParallelJob<ToSpacePointerUpdateJobTraits> job(
Ben Murdochc5610432016-08-08 18:44:38 +01003771 heap, heap->isolate()->cancelable_task_manager(), semaphore);
Ben Murdochda12d292016-06-02 14:46:10 +01003772 Address space_start = heap->new_space()->bottom();
3773 Address space_end = heap->new_space()->top();
Ben Murdoch61f157c2016-09-16 13:49:30 +01003774 for (Page* page : NewSpacePageRange(space_start, space_end)) {
Ben Murdochda12d292016-06-02 14:46:10 +01003775 Address start =
3776 page->Contains(space_start) ? space_start : page->area_start();
3777 Address end = page->Contains(space_end) ? space_end : page->area_end();
3778 job.AddPage(page, std::make_pair(start, end));
3779 }
Ben Murdoch61f157c2016-09-16 13:49:30 +01003780 PointersUpdatingVisitor visitor;
Ben Murdochda12d292016-06-02 14:46:10 +01003781 int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1;
3782 job.Run(num_tasks, [&visitor](int i) { return &visitor; });
3783}
3784
3785void MarkCompactCollector::UpdatePointersAfterEvacuation() {
3786 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
3787
Ben Murdoch61f157c2016-09-16 13:49:30 +01003788 PointersUpdatingVisitor updating_visitor;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003789
3790 {
Ben Murdochda12d292016-06-02 14:46:10 +01003791 TRACE_GC(heap()->tracer(),
3792 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
Ben Murdochc5610432016-08-08 18:44:38 +01003793 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003794 // Update roots.
3795 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
Ben Murdochc5610432016-08-08 18:44:38 +01003796 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003797 }
3798
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003799 {
Ben Murdochda12d292016-06-02 14:46:10 +01003800 Heap* heap = this->heap();
3801 TRACE_GC(heap->tracer(),
3802 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
Ben Murdochc5610432016-08-08 18:44:38 +01003803 UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003804 }
3805
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003806 {
Ben Murdochda12d292016-06-02 14:46:10 +01003807 TRACE_GC(heap()->tracer(),
3808 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003809 // Update pointers from external string table.
3810 heap_->UpdateReferencesInExternalStringTable(
3811 &UpdateReferenceInExternalStringTableEntry);
3812
3813 EvacuationWeakObjectRetainer evacuation_object_retainer;
Ben Murdochda12d292016-06-02 14:46:10 +01003814 heap()->ProcessWeakListRoots(&evacuation_object_retainer);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003815 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003816}
3817
3818
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003819void MarkCompactCollector::ReleaseEvacuationCandidates() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003820 for (Page* p : evacuation_candidates_) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003821 if (!p->IsEvacuationCandidate()) continue;
3822 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003823 p->ResetLiveBytes();
Ben Murdoch097c5b22016-05-18 11:27:45 +01003824 CHECK(p->SweepingDone());
Ben Murdochda12d292016-06-02 14:46:10 +01003825 space->ReleasePage(p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003826 }
3827 evacuation_candidates_.Rewind(0);
3828 compacting_ = false;
Ben Murdochc5610432016-08-08 18:44:38 +01003829 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003830}
3831
Ben Murdochc5610432016-08-08 18:44:38 +01003832int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity,
3833 int required_freed_bytes,
3834 int max_pages) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003835 int max_freed = 0;
Ben Murdochc5610432016-08-08 18:44:38 +01003836 int pages_freed = 0;
3837 Page* page = nullptr;
3838 while ((page = GetSweepingPageSafe(identity)) != nullptr) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01003839 int freed = ParallelSweepPage(page, identity);
Ben Murdochc5610432016-08-08 18:44:38 +01003840 pages_freed += 1;
3841 DCHECK_GE(freed, 0);
3842 max_freed = Max(max_freed, freed);
3843 if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003844 return max_freed;
Ben Murdochc5610432016-08-08 18:44:38 +01003845 if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003846 }
Ben Murdochc5610432016-08-08 18:44:38 +01003847 return max_freed;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003848}
3849
Ben Murdochc5610432016-08-08 18:44:38 +01003850int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
Ben Murdoch61f157c2016-09-16 13:49:30 +01003851 AllocationSpace identity) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003852 int max_freed = 0;
Ben Murdoch097c5b22016-05-18 11:27:45 +01003853 if (page->mutex()->TryLock()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003854 // If this page was already swept in the meantime, we can return here.
Ben Murdoch097c5b22016-05-18 11:27:45 +01003855 if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003856 page->mutex()->Unlock();
3857 return 0;
3858 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003859 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
Ben Murdoch61f157c2016-09-16 13:49:30 +01003860 if (identity == NEW_SPACE) {
3861 RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
3862 IGNORE_FREE_LIST, IGNORE_FREE_SPACE>(nullptr, page, nullptr);
3863 } else if (identity == OLD_SPACE) {
Ben Murdochc5610432016-08-08 18:44:38 +01003864 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
Ben Murdoch61f157c2016-09-16 13:49:30 +01003865 REBUILD_FREE_LIST, IGNORE_FREE_SPACE>(
3866 heap_->paged_space(identity), page, nullptr);
3867 } else if (identity == CODE_SPACE) {
Ben Murdochc5610432016-08-08 18:44:38 +01003868 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
Ben Murdoch61f157c2016-09-16 13:49:30 +01003869 REBUILD_FREE_LIST, IGNORE_FREE_SPACE>(
3870 heap_->paged_space(identity), page, nullptr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003871 } else {
Ben Murdochc5610432016-08-08 18:44:38 +01003872 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
Ben Murdoch61f157c2016-09-16 13:49:30 +01003873 REBUILD_FREE_LIST, IGNORE_FREE_SPACE>(
3874 heap_->paged_space(identity), page, nullptr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003875 }
Ben Murdochda12d292016-06-02 14:46:10 +01003876 {
Ben Murdochc5610432016-08-08 18:44:38 +01003877 base::LockGuard<base::Mutex> guard(&mutex_);
Ben Murdoch61f157c2016-09-16 13:49:30 +01003878 swept_list_[identity].Add(page);
Ben Murdochda12d292016-06-02 14:46:10 +01003879 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003880 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003881 page->mutex()->Unlock();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003882 }
3883 return max_freed;
3884}
3885
Ben Murdochc5610432016-08-08 18:44:38 +01003886void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) {
3887 DCHECK(!sweeping_in_progress_);
3888 PrepareToBeSweptPage(space, page);
3889 sweeping_list_[space].push_back(page);
3890}
3891
3892void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space,
3893 Page* page) {
3894 DCHECK(sweeping_in_progress_);
3895 PrepareToBeSweptPage(space, page);
3896 late_pages_ = true;
3897 AddSweepingPageSafe(space, page);
3898}
3899
3900void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space,
3901 Page* page) {
3902 page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
3903 int to_sweep = page->area_size() - page->LiveBytes();
Ben Murdoch61f157c2016-09-16 13:49:30 +01003904 if (space != NEW_SPACE)
3905 heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
Ben Murdochc5610432016-08-08 18:44:38 +01003906}
3907
3908Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe(
3909 AllocationSpace space) {
3910 base::LockGuard<base::Mutex> guard(&mutex_);
3911 Page* page = nullptr;
3912 if (!sweeping_list_[space].empty()) {
3913 page = sweeping_list_[space].front();
3914 sweeping_list_[space].pop_front();
3915 }
3916 return page;
3917}
3918
3919void MarkCompactCollector::Sweeper::AddSweepingPageSafe(AllocationSpace space,
3920 Page* page) {
3921 base::LockGuard<base::Mutex> guard(&mutex_);
3922 sweeping_list_[space].push_back(page);
3923}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003924
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003925void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01003926 Address space_top = space->top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003927 space->ClearStats();
3928
Ben Murdoch097c5b22016-05-18 11:27:45 +01003929 int will_be_swept = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003930 bool unused_page_present = false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003931
Ben Murdoch61f157c2016-09-16 13:49:30 +01003932 // Loop needs to support deletion if live bytes == 0 for a page.
3933 for (auto it = space->begin(); it != space->end();) {
3934 Page* p = *(it++);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003935 DCHECK(p->SweepingDone());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003936
Ben Murdochda12d292016-06-02 14:46:10 +01003937 if (p->IsEvacuationCandidate()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003938 // Will be processed in EvacuateNewSpaceAndCandidates.
3939 DCHECK(evacuation_candidates_.length() > 0);
3940 continue;
3941 }
3942
Ben Murdochda12d292016-06-02 14:46:10 +01003943 // We can not sweep black pages, since all mark bits are set for these
3944 // pages.
3945 if (p->IsFlagSet(Page::BLACK_PAGE)) {
3946 Bitmap::Clear(p);
3947 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
3948 p->ClearFlag(Page::BLACK_PAGE);
Ben Murdoch61f157c2016-09-16 13:49:30 +01003949 // Area above the high watermark is free.
3950 Address free_start = p->HighWaterMark();
3951 // Check if the space top was in this page, which means that the
3952 // high watermark is not up-to-date.
3953 if (free_start < space_top && space_top <= p->area_end()) {
3954 free_start = space_top;
3955 }
3956 int size = static_cast<int>(p->area_end() - free_start);
3957 space->Free(free_start, size);
Ben Murdochda12d292016-06-02 14:46:10 +01003958 continue;
3959 }
3960
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003961 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
3962 // We need to sweep the page to get it into an iterable state again. Note
3963 // that this adds unusable memory into the free list that is later on
3964 // (in the free list) dropped again. Since we only use the flag for
3965 // testing this is fine.
Ben Murdoch097c5b22016-05-18 11:27:45 +01003966 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
Ben Murdochc5610432016-08-08 18:44:38 +01003967 Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD,
Ben Murdoch61f157c2016-09-16 13:49:30 +01003968 Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_LIST,
3969 Sweeper::IGNORE_FREE_SPACE>(space, p, nullptr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003970 continue;
3971 }
3972
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003973 // One unused page is kept, all further are released before sweeping them.
3974 if (p->LiveBytes() == 0) {
3975 if (unused_page_present) {
3976 if (FLAG_gc_verbose) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01003977 PrintIsolate(isolate(), "sweeping: released page: %p",
3978 static_cast<void*>(p));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003979 }
Ben Murdoch61f157c2016-09-16 13:49:30 +01003980 ArrayBufferTracker::FreeAll(p);
Ben Murdochda12d292016-06-02 14:46:10 +01003981 space->ReleasePage(p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003982 continue;
3983 }
3984 unused_page_present = true;
3985 }
3986
Ben Murdochc5610432016-08-08 18:44:38 +01003987 sweeper().AddPage(space->identity(), p);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003988 will_be_swept++;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003989 }
3990
3991 if (FLAG_gc_verbose) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003992 PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
3993 AllocationSpaceName(space->identity()), will_be_swept);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003994 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003995}
3996
3997
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003998void MarkCompactCollector::SweepSpaces() {
Ben Murdochda12d292016-06-02 14:46:10 +01003999 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004000 double start_time = 0.0;
4001 if (FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004002 start_time = heap_->MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004003 }
4004
4005#ifdef DEBUG
4006 state_ = SWEEP_SPACES;
4007#endif
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004008
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004009 {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004010 {
4011 GCTracer::Scope sweep_scope(heap()->tracer(),
4012 GCTracer::Scope::MC_SWEEP_OLD);
4013 StartSweepSpace(heap()->old_space());
4014 }
4015 {
4016 GCTracer::Scope sweep_scope(heap()->tracer(),
4017 GCTracer::Scope::MC_SWEEP_CODE);
4018 StartSweepSpace(heap()->code_space());
4019 }
4020 {
4021 GCTracer::Scope sweep_scope(heap()->tracer(),
4022 GCTracer::Scope::MC_SWEEP_MAP);
4023 StartSweepSpace(heap()->map_space());
4024 }
Ben Murdochc5610432016-08-08 18:44:38 +01004025 sweeper().StartSweeping();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004026 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004027
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004028 // Deallocate unmarked large objects.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004029 heap_->lo_space()->FreeUnmarkedObjects();
4030
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004031 if (FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004032 heap_->tracer()->AddSweepingTime(heap_->MonotonicallyIncreasingTimeInMs() -
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004033 start_time);
4034 }
4035}
4036
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004037Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); }
4038
4039
4040void MarkCompactCollector::Initialize() {
4041 MarkCompactMarkingVisitor::Initialize();
4042 IncrementalMarking::Initialize();
4043}
4044
Ben Murdochda12d292016-06-02 14:46:10 +01004045void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* host, Address slot,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004046 Code* target) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004047 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
Ben Murdochda12d292016-06-02 14:46:10 +01004048 Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004049 if (target_page->IsEvacuationCandidate() &&
Ben Murdochda12d292016-06-02 14:46:10 +01004050 !ShouldSkipEvacuationSlotRecording(host)) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01004051 // TODO(ulan): remove this check after investigating crbug.com/414964.
4052 CHECK(target->IsCode());
4053 RememberedSet<OLD_TO_OLD>::InsertTyped(
4054 source_page, reinterpret_cast<Address>(host), CODE_ENTRY_SLOT, slot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004055 }
4056}
4057
4058
4059void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
4060 DCHECK(heap()->gc_state() == Heap::MARK_COMPACT);
4061 if (is_compacting()) {
4062 Code* host =
4063 isolate()->inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(
4064 pc);
4065 MarkBit mark_bit = Marking::MarkBitFrom(host);
4066 if (Marking::IsBlack(mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004067 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
Ben Murdochda12d292016-06-02 14:46:10 +01004068 RecordRelocSlot(host, &rinfo, target);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004069 }
4070 }
4071}
4072
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004073} // namespace internal
4074} // namespace v8