blob: e537689c4a5ca4ba35530fc8d81a55f3bfd37596 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005#include "src/heap/mark-compact.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006
7#include "src/base/atomicops.h"
8#include "src/base/bits.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00009#include "src/base/sys-info.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000010#include "src/code-stubs.h"
11#include "src/compilation-cache.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000012#include "src/deoptimizer.h"
13#include "src/execution.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000014#include "src/frames-inl.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000015#include "src/gdb-jit.h"
16#include "src/global-handles.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000017#include "src/heap/array-buffer-tracker.h"
18#include "src/heap/gc-tracer.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000019#include "src/heap/incremental-marking.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000020#include "src/heap/mark-compact-inl.h"
21#include "src/heap/object-stats.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000022#include "src/heap/objects-visiting-inl.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010023#include "src/heap/objects-visiting.h"
Ben Murdochda12d292016-06-02 14:46:10 +010024#include "src/heap/page-parallel-job.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000025#include "src/heap/spaces-inl.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000026#include "src/ic/ic.h"
27#include "src/ic/stub-cache.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000028#include "src/profiler/cpu-profiler.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010029#include "src/utils-inl.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000030#include "src/v8.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000031
32namespace v8 {
33namespace internal {
34
35
36const char* Marking::kWhiteBitPattern = "00";
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000037const char* Marking::kBlackBitPattern = "11";
38const char* Marking::kGreyBitPattern = "10";
Ben Murdochb8a8cc12014-11-26 15:28:44 +000039const char* Marking::kImpossibleBitPattern = "01";
40
41
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000042// The following has to hold in order for {Marking::MarkBitFrom} to not produce
43// invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
44STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2);
45
46
Ben Murdochb8a8cc12014-11-26 15:28:44 +000047// -------------------------------------------------------------------------
48// MarkCompactCollector
49
50MarkCompactCollector::MarkCompactCollector(Heap* heap)
51 : // NOLINT
52#ifdef DEBUG
53 state_(IDLE),
54#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +000055 marking_parity_(ODD_MARKING_PARITY),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000056 was_marked_incrementally_(false),
Emily Bernierd0a1eb72015-03-24 16:35:39 -040057 evacuation_(false),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000058 heap_(heap),
Emily Bernierd0a1eb72015-03-24 16:35:39 -040059 marking_deque_memory_(NULL),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000060 marking_deque_memory_committed_(0),
61 code_flusher_(nullptr),
62 have_code_to_deoptimize_(false),
63 compacting_(false),
64 sweeping_in_progress_(false),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000065 pending_sweeper_tasks_semaphore_(0),
66 pending_compaction_tasks_semaphore_(0) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000067}
68
69#ifdef VERIFY_HEAP
70class VerifyMarkingVisitor : public ObjectVisitor {
71 public:
72 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
73
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000074 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000075 for (Object** current = start; current < end; current++) {
76 if ((*current)->IsHeapObject()) {
77 HeapObject* object = HeapObject::cast(*current);
78 CHECK(heap_->mark_compact_collector()->IsMarked(object));
79 }
80 }
81 }
82
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000083 void VisitEmbeddedPointer(RelocInfo* rinfo) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000084 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
85 if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
86 Object* p = rinfo->target_object();
87 VisitPointer(&p);
88 }
89 }
90
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000091 void VisitCell(RelocInfo* rinfo) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000092 Code* code = rinfo->host();
93 DCHECK(rinfo->rmode() == RelocInfo::CELL);
94 if (!code->IsWeakObject(rinfo->target_cell())) {
95 ObjectVisitor::VisitCell(rinfo);
96 }
97 }
98
99 private:
100 Heap* heap_;
101};
102
103
104static void VerifyMarking(Heap* heap, Address bottom, Address top) {
105 VerifyMarkingVisitor visitor(heap);
106 HeapObject* object;
107 Address next_object_must_be_here_or_later = bottom;
108
109 for (Address current = bottom; current < top; current += kPointerSize) {
110 object = HeapObject::FromAddress(current);
111 if (MarkCompactCollector::IsMarked(object)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000112 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000113 CHECK(current >= next_object_must_be_here_or_later);
114 object->Iterate(&visitor);
115 next_object_must_be_here_or_later = current + object->Size();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000116 // The next word for sure belongs to the current object, jump over it.
117 current += kPointerSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000118 }
119 }
120}
121
Ben Murdochda12d292016-06-02 14:46:10 +0100122static void VerifyMarkingBlackPage(Heap* heap, Page* page) {
123 CHECK(page->IsFlagSet(Page::BLACK_PAGE));
124 VerifyMarkingVisitor visitor(heap);
125 HeapObjectIterator it(page);
126 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
127 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
128 object->Iterate(&visitor);
129 }
130}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000131
132static void VerifyMarking(NewSpace* space) {
133 Address end = space->top();
134 NewSpacePageIterator it(space->bottom(), end);
135 // The bottom position is at the start of its page. Allows us to use
136 // page->area_start() as start of range on all pages.
137 CHECK_EQ(space->bottom(),
138 NewSpacePage::FromAddress(space->bottom())->area_start());
139 while (it.has_next()) {
140 NewSpacePage* page = it.next();
141 Address limit = it.has_next() ? page->area_end() : end;
142 CHECK(limit == end || !page->Contains(end));
143 VerifyMarking(space->heap(), page->area_start(), limit);
144 }
145}
146
147
148static void VerifyMarking(PagedSpace* space) {
149 PageIterator it(space);
150
151 while (it.has_next()) {
152 Page* p = it.next();
Ben Murdochda12d292016-06-02 14:46:10 +0100153 if (p->IsFlagSet(Page::BLACK_PAGE)) {
154 VerifyMarkingBlackPage(space->heap(), p);
155 } else {
156 VerifyMarking(space->heap(), p->area_start(), p->area_end());
157 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000158 }
159}
160
161
162static void VerifyMarking(Heap* heap) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000163 VerifyMarking(heap->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000164 VerifyMarking(heap->code_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000165 VerifyMarking(heap->map_space());
166 VerifyMarking(heap->new_space());
167
168 VerifyMarkingVisitor visitor(heap);
169
170 LargeObjectIterator it(heap->lo_space());
171 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
172 if (MarkCompactCollector::IsMarked(obj)) {
173 obj->Iterate(&visitor);
174 }
175 }
176
177 heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
178}
179
180
181class VerifyEvacuationVisitor : public ObjectVisitor {
182 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000183 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000184 for (Object** current = start; current < end; current++) {
185 if ((*current)->IsHeapObject()) {
186 HeapObject* object = HeapObject::cast(*current);
187 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
188 }
189 }
190 }
191};
192
193
194static void VerifyEvacuation(Page* page) {
195 VerifyEvacuationVisitor visitor;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000196 HeapObjectIterator iterator(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000197 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
198 heap_object = iterator.Next()) {
199 // We skip free space objects.
200 if (!heap_object->IsFiller()) {
201 heap_object->Iterate(&visitor);
202 }
203 }
204}
205
206
207static void VerifyEvacuation(NewSpace* space) {
208 NewSpacePageIterator it(space->bottom(), space->top());
209 VerifyEvacuationVisitor visitor;
210
211 while (it.has_next()) {
212 NewSpacePage* page = it.next();
213 Address current = page->area_start();
214 Address limit = it.has_next() ? page->area_end() : space->top();
215 CHECK(limit == space->top() || !page->Contains(space->top()));
216 while (current < limit) {
217 HeapObject* object = HeapObject::FromAddress(current);
218 object->Iterate(&visitor);
219 current += object->Size();
220 }
221 }
222}
223
224
225static void VerifyEvacuation(Heap* heap, PagedSpace* space) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000226 if (FLAG_use_allocation_folding && (space == heap->old_space())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000227 return;
228 }
229 PageIterator it(space);
230
231 while (it.has_next()) {
232 Page* p = it.next();
233 if (p->IsEvacuationCandidate()) continue;
234 VerifyEvacuation(p);
235 }
236}
237
238
239static void VerifyEvacuation(Heap* heap) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000240 VerifyEvacuation(heap, heap->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000241 VerifyEvacuation(heap, heap->code_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000242 VerifyEvacuation(heap, heap->map_space());
243 VerifyEvacuation(heap->new_space());
244
245 VerifyEvacuationVisitor visitor;
246 heap->IterateStrongRoots(&visitor, VISIT_ALL);
247}
248#endif // VERIFY_HEAP
249
250
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000251void MarkCompactCollector::SetUp() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000252 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
253 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
254 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
255 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
256
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000257 EnsureMarkingDequeIsReserved();
258 EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000259
260 if (FLAG_flush_code) {
261 code_flusher_ = new CodeFlusher(isolate());
262 if (FLAG_trace_code_flushing) {
263 PrintF("[code-flushing is now on]\n");
264 }
265 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000266}
267
268
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400269void MarkCompactCollector::TearDown() {
270 AbortCompaction();
271 delete marking_deque_memory_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000272 delete code_flusher_;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400273}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000274
275
276void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000277 DCHECK(!p->NeverEvacuate());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000278 p->MarkEvacuationCandidate();
279 evacuation_candidates_.Add(p);
280}
281
282
283static void TraceFragmentation(PagedSpace* space) {
284 int number_of_pages = space->CountTotalPages();
285 intptr_t reserved = (number_of_pages * space->AreaSize());
286 intptr_t free = reserved - space->SizeOfObjects();
287 PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
288 AllocationSpaceName(space->identity()), number_of_pages,
289 static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
290}
291
292
293bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
294 if (!compacting_) {
295 DCHECK(evacuation_candidates_.length() == 0);
296
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000297 CollectEvacuationCandidates(heap()->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000298
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000299 if (FLAG_compact_code_space) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000300 CollectEvacuationCandidates(heap()->code_space());
301 } else if (FLAG_trace_fragmentation) {
302 TraceFragmentation(heap()->code_space());
303 }
304
305 if (FLAG_trace_fragmentation) {
306 TraceFragmentation(heap()->map_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000307 }
308
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000309 heap()->old_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
310 heap()->code_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000311
312 compacting_ = evacuation_candidates_.length() > 0;
313 }
314
315 return compacting_;
316}
317
Ben Murdochda12d292016-06-02 14:46:10 +0100318void MarkCompactCollector::ClearInvalidRememberedSetSlots() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000319 {
Ben Murdochda12d292016-06-02 14:46:10 +0100320 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100321 RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(heap());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000322 }
Ben Murdochda12d292016-06-02 14:46:10 +0100323// There is not need to filter the old to old set because
324// it is completely cleared after the mark-compact GC.
325// The slots that become invalid due to runtime transitions are
326// cleared eagerly immediately after the transition.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000327
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000328#ifdef VERIFY_HEAP
329 if (FLAG_verify_heap) {
Ben Murdochda12d292016-06-02 14:46:10 +0100330 RememberedSet<OLD_TO_NEW>::VerifyValidSlots(heap());
331 RememberedSet<OLD_TO_OLD>::VerifyValidSlots(heap());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000332 }
333#endif
334}
335
336
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000337void MarkCompactCollector::CollectGarbage() {
338 // Make sure that Prepare() has been called. The individual steps below will
339 // update the state as they proceed.
340 DCHECK(state_ == PREPARE_GC);
341
342 MarkLiveObjects();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000343
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000344 DCHECK(heap_->incremental_marking()->IsStopped());
345
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000346 ClearNonLiveReferences();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400347
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000348#ifdef VERIFY_HEAP
349 if (FLAG_verify_heap) {
350 VerifyMarking(heap_);
351 }
352#endif
353
354 SweepSpaces();
355
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000356 EvacuateNewSpaceAndCandidates();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000357
358 Finish();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000359}
360
361
362#ifdef VERIFY_HEAP
363void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
364 PageIterator it(space);
365
366 while (it.has_next()) {
367 Page* p = it.next();
368 CHECK(p->markbits()->IsClean());
369 CHECK_EQ(0, p->LiveBytes());
370 }
371}
372
373
374void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
375 NewSpacePageIterator it(space->bottom(), space->top());
376
377 while (it.has_next()) {
378 NewSpacePage* p = it.next();
379 CHECK(p->markbits()->IsClean());
380 CHECK_EQ(0, p->LiveBytes());
381 }
382}
383
384
385void MarkCompactCollector::VerifyMarkbitsAreClean() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000386 VerifyMarkbitsAreClean(heap_->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000387 VerifyMarkbitsAreClean(heap_->code_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000388 VerifyMarkbitsAreClean(heap_->map_space());
389 VerifyMarkbitsAreClean(heap_->new_space());
390
391 LargeObjectIterator it(heap_->lo_space());
392 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
393 MarkBit mark_bit = Marking::MarkBitFrom(obj);
394 CHECK(Marking::IsWhite(mark_bit));
395 CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
396 }
397}
398
399
400void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
401 HeapObjectIterator code_iterator(heap()->code_space());
402 for (HeapObject* obj = code_iterator.Next(); obj != NULL;
403 obj = code_iterator.Next()) {
404 Code* code = Code::cast(obj);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400405 if (!code->is_optimized_code()) continue;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000406 if (WillBeDeoptimized(code)) continue;
407 code->VerifyEmbeddedObjectsDependency();
408 }
409}
410
411
412void MarkCompactCollector::VerifyOmittedMapChecks() {
413 HeapObjectIterator iterator(heap()->map_space());
414 for (HeapObject* obj = iterator.Next(); obj != NULL; obj = iterator.Next()) {
415 Map* map = Map::cast(obj);
416 map->VerifyOmittedMapChecks();
417 }
418}
419#endif // VERIFY_HEAP
420
421
422static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
423 PageIterator it(space);
424
425 while (it.has_next()) {
Ben Murdochda12d292016-06-02 14:46:10 +0100426 Page* p = it.next();
427 Bitmap::Clear(p);
428 if (p->IsFlagSet(Page::BLACK_PAGE)) {
429 p->ClearFlag(Page::BLACK_PAGE);
430 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000431 }
432}
433
434
435static void ClearMarkbitsInNewSpace(NewSpace* space) {
436 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
437
438 while (it.has_next()) {
439 Bitmap::Clear(it.next());
440 }
441}
442
443
444void MarkCompactCollector::ClearMarkbits() {
445 ClearMarkbitsInPagedSpace(heap_->code_space());
446 ClearMarkbitsInPagedSpace(heap_->map_space());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000447 ClearMarkbitsInPagedSpace(heap_->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000448 ClearMarkbitsInNewSpace(heap_->new_space());
449
450 LargeObjectIterator it(heap_->lo_space());
451 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000452 Marking::MarkWhite(Marking::MarkBitFrom(obj));
Ben Murdochda12d292016-06-02 14:46:10 +0100453 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
454 chunk->ResetProgressBar();
455 chunk->ResetLiveBytes();
456 if (chunk->IsFlagSet(Page::BLACK_PAGE)) {
457 chunk->ClearFlag(Page::BLACK_PAGE);
458 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000459 }
460}
461
462
463class MarkCompactCollector::SweeperTask : public v8::Task {
464 public:
Ben Murdoch097c5b22016-05-18 11:27:45 +0100465 SweeperTask(Heap* heap, AllocationSpace space_to_start)
466 : heap_(heap), space_to_start_(space_to_start) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000467
468 virtual ~SweeperTask() {}
469
470 private:
471 // v8::Task overrides.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000472 void Run() override {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100473 DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE);
474 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
475 const int offset = space_to_start_ - FIRST_PAGED_SPACE;
476 const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
477 for (int i = 0; i < num_spaces; i++) {
478 const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces);
479 DCHECK_GE(space_id, FIRST_PAGED_SPACE);
480 DCHECK_LE(space_id, LAST_PAGED_SPACE);
481 heap_->mark_compact_collector()->SweepInParallel(
482 heap_->paged_space(space_id), 0);
483 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000484 heap_->mark_compact_collector()->pending_sweeper_tasks_semaphore_.Signal();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000485 }
486
487 Heap* heap_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100488 AllocationSpace space_to_start_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000489
490 DISALLOW_COPY_AND_ASSIGN(SweeperTask);
491};
492
493
494void MarkCompactCollector::StartSweeperThreads() {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400495 V8::GetCurrentPlatform()->CallOnBackgroundThread(
Ben Murdoch097c5b22016-05-18 11:27:45 +0100496 new SweeperTask(heap(), OLD_SPACE), v8::Platform::kShortRunningTask);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400497 V8::GetCurrentPlatform()->CallOnBackgroundThread(
Ben Murdoch097c5b22016-05-18 11:27:45 +0100498 new SweeperTask(heap(), CODE_SPACE), v8::Platform::kShortRunningTask);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000499 V8::GetCurrentPlatform()->CallOnBackgroundThread(
Ben Murdoch097c5b22016-05-18 11:27:45 +0100500 new SweeperTask(heap(), MAP_SPACE), v8::Platform::kShortRunningTask);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000501}
502
503
504void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) {
505 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
Ben Murdoch097c5b22016-05-18 11:27:45 +0100506 if (!page->SweepingDone()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000507 SweepInParallel(page, owner);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100508 if (!page->SweepingDone()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000509 // We were not able to sweep that page, i.e., a concurrent
510 // sweeper thread currently owns this page. Wait for the sweeper
511 // thread to be done with this page.
512 page->WaitUntilSweepingCompleted();
513 }
514 }
515}
516
517
518void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
519 if (FLAG_concurrent_sweeping && !IsSweepingCompleted()) {
520 SweepInParallel(heap()->paged_space(space->identity()), 0);
521 space->RefillFreeList();
522 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000523}
524
525
526void MarkCompactCollector::EnsureSweepingCompleted() {
527 DCHECK(sweeping_in_progress_ == true);
528
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400529 // If sweeping is not completed or not running at all, we try to complete it
530 // here.
531 if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000532 SweepInParallel(heap()->paged_space(OLD_SPACE), 0);
533 SweepInParallel(heap()->paged_space(CODE_SPACE), 0);
534 SweepInParallel(heap()->paged_space(MAP_SPACE), 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000535 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000536
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400537 if (FLAG_concurrent_sweeping) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000538 pending_sweeper_tasks_semaphore_.Wait();
539 pending_sweeper_tasks_semaphore_.Wait();
540 pending_sweeper_tasks_semaphore_.Wait();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000541 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000542
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000543 ParallelSweepSpacesComplete();
544 sweeping_in_progress_ = false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000545 heap()->old_space()->RefillFreeList();
546 heap()->code_space()->RefillFreeList();
547 heap()->map_space()->RefillFreeList();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000548
549#ifdef VERIFY_HEAP
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400550 if (FLAG_verify_heap && !evacuation()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000551 VerifyEvacuation(heap_);
552 }
553#endif
554}
555
556
557bool MarkCompactCollector::IsSweepingCompleted() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000558 if (!pending_sweeper_tasks_semaphore_.WaitFor(
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400559 base::TimeDelta::FromSeconds(0))) {
560 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000561 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000562 pending_sweeper_tasks_semaphore_.Signal();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000563 return true;
564}
565
566
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000567void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000568 // This is only used when resizing an object.
569 DCHECK(MemoryChunk::FromAddress(old_start) ==
570 MemoryChunk::FromAddress(new_start));
571
Ben Murdochda12d292016-06-02 14:46:10 +0100572 if (!heap->incremental_marking()->IsMarking() ||
573 Page::FromAddress(old_start)->IsFlagSet(Page::BLACK_PAGE))
574 return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000575
576 // If the mark doesn't move, we don't check the color of the object.
577 // It doesn't matter whether the object is black, since it hasn't changed
578 // size, so the adjustment to the live data count will be zero anyway.
579 if (old_start == new_start) return;
580
581 MarkBit new_mark_bit = MarkBitFrom(new_start);
582 MarkBit old_mark_bit = MarkBitFrom(old_start);
583
584#ifdef DEBUG
585 ObjectColor old_color = Color(old_mark_bit);
586#endif
587
588 if (Marking::IsBlack(old_mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000589 Marking::BlackToWhite(old_mark_bit);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000590 Marking::MarkBlack(new_mark_bit);
591 return;
592 } else if (Marking::IsGrey(old_mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000593 Marking::GreyToWhite(old_mark_bit);
594 heap->incremental_marking()->WhiteToGreyAndPush(
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000595 HeapObject::FromAddress(new_start), new_mark_bit);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000596 heap->incremental_marking()->RestartIfNotMarking();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000597 }
598
599#ifdef DEBUG
600 ObjectColor new_color = Color(new_mark_bit);
601 DCHECK(new_color == old_color);
602#endif
603}
604
605
606const char* AllocationSpaceName(AllocationSpace space) {
607 switch (space) {
608 case NEW_SPACE:
609 return "NEW_SPACE";
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000610 case OLD_SPACE:
611 return "OLD_SPACE";
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000612 case CODE_SPACE:
613 return "CODE_SPACE";
614 case MAP_SPACE:
615 return "MAP_SPACE";
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000616 case LO_SPACE:
617 return "LO_SPACE";
618 default:
619 UNREACHABLE();
620 }
621
622 return NULL;
623}
624
625
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000626void MarkCompactCollector::ComputeEvacuationHeuristics(
627 int area_size, int* target_fragmentation_percent,
628 int* max_evacuated_bytes) {
629 // For memory reducing mode we directly define both constants.
630 const int kTargetFragmentationPercentForReduceMemory = 20;
631 const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000632
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000633 // For regular mode (which is latency critical) we define less aggressive
634 // defaults to start and switch to a trace-based (using compaction speed)
635 // approach as soon as we have enough samples.
636 const int kTargetFragmentationPercent = 70;
637 const int kMaxEvacuatedBytes = 4 * Page::kPageSize;
638 // Time to take for a single area (=payload of page). Used as soon as there
639 // exist enough compaction speed samples.
640 const int kTargetMsPerArea = 1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000641
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000642 if (heap()->ShouldReduceMemory()) {
643 *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
644 *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000645 } else {
Ben Murdochda12d292016-06-02 14:46:10 +0100646 const double estimated_compaction_speed =
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000647 heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
648 if (estimated_compaction_speed != 0) {
649 // Estimate the target fragmentation based on traced compaction speed
650 // and a goal for a single page.
Ben Murdochda12d292016-06-02 14:46:10 +0100651 const double estimated_ms_per_area =
652 1 + area_size / estimated_compaction_speed;
653 *target_fragmentation_percent = static_cast<int>(
654 100 - 100 * kTargetMsPerArea / estimated_ms_per_area);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000655 if (*target_fragmentation_percent <
656 kTargetFragmentationPercentForReduceMemory) {
657 *target_fragmentation_percent =
658 kTargetFragmentationPercentForReduceMemory;
659 }
660 } else {
661 *target_fragmentation_percent = kTargetFragmentationPercent;
662 }
663 *max_evacuated_bytes = kMaxEvacuatedBytes;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000664 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000665}
666
667
668void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000669 DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000670
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000671 int number_of_pages = space->CountTotalPages();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000672 int area_size = space->AreaSize();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000673
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000674 // Pairs of (live_bytes_in_page, page).
675 typedef std::pair<int, Page*> LiveBytesPagePair;
676 std::vector<LiveBytesPagePair> pages;
677 pages.reserve(number_of_pages);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000678
679 PageIterator it(space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000680 while (it.has_next()) {
681 Page* p = it.next();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000682 if (p->NeverEvacuate()) continue;
Ben Murdochda12d292016-06-02 14:46:10 +0100683 if (p->IsFlagSet(Page::BLACK_PAGE)) continue;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000684 // Invariant: Evacuation candidates are just created when marking is
Ben Murdoch097c5b22016-05-18 11:27:45 +0100685 // started. This means that sweeping has finished. Furthermore, at the end
686 // of a GC all evacuation candidates are cleared and their slot buffers are
687 // released.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000688 CHECK(!p->IsEvacuationCandidate());
Ben Murdochda12d292016-06-02 14:46:10 +0100689 CHECK_NULL(p->old_to_old_slots());
690 CHECK_NULL(p->typed_old_to_old_slots());
Ben Murdoch097c5b22016-05-18 11:27:45 +0100691 CHECK(p->SweepingDone());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000692 DCHECK(p->area_size() == area_size);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100693 pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000694 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000695
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000696 int candidate_count = 0;
697 int total_live_bytes = 0;
698
699 const bool reduce_memory = heap()->ShouldReduceMemory();
700 if (FLAG_manual_evacuation_candidates_selection) {
701 for (size_t i = 0; i < pages.size(); i++) {
702 Page* p = pages[i].second;
703 if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
704 candidate_count++;
705 total_live_bytes += pages[i].first;
706 p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
707 AddEvacuationCandidate(p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000708 }
709 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000710 } else if (FLAG_stress_compaction) {
711 for (size_t i = 0; i < pages.size(); i++) {
712 Page* p = pages[i].second;
713 if (i % 2 == 0) {
714 candidate_count++;
715 total_live_bytes += pages[i].first;
716 AddEvacuationCandidate(p);
717 }
718 }
719 } else {
720 // The following approach determines the pages that should be evacuated.
721 //
722 // We use two conditions to decide whether a page qualifies as an evacuation
723 // candidate, or not:
724 // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
725 // between live bytes and capacity of this page (= area).
726 // * Evacuation quota: A global quota determining how much bytes should be
727 // compacted.
728 //
729 // The algorithm sorts all pages by live bytes and then iterates through
730 // them starting with the page with the most free memory, adding them to the
731 // set of evacuation candidates as long as both conditions (fragmentation
732 // and quota) hold.
733 int max_evacuated_bytes;
734 int target_fragmentation_percent;
735 ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
736 &max_evacuated_bytes);
737
738 const intptr_t free_bytes_threshold =
739 target_fragmentation_percent * (area_size / 100);
740
741 // Sort pages from the most free to the least free, then select
742 // the first n pages for evacuation such that:
743 // - the total size of evacuated objects does not exceed the specified
744 // limit.
745 // - fragmentation of (n+1)-th page does not exceed the specified limit.
746 std::sort(pages.begin(), pages.end(),
747 [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
748 return a.first < b.first;
749 });
750 for (size_t i = 0; i < pages.size(); i++) {
751 int live_bytes = pages[i].first;
752 int free_bytes = area_size - live_bytes;
753 if (FLAG_always_compact ||
754 ((free_bytes >= free_bytes_threshold) &&
755 ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
756 candidate_count++;
757 total_live_bytes += live_bytes;
758 }
759 if (FLAG_trace_fragmentation_verbose) {
760 PrintIsolate(isolate(),
761 "compaction-selection-page: space=%s free_bytes_page=%d "
762 "fragmentation_limit_kb=%d fragmentation_limit_percent=%d "
763 "sum_compaction_kb=%d "
764 "compaction_limit_kb=%d\n",
765 AllocationSpaceName(space->identity()), free_bytes / KB,
766 free_bytes_threshold / KB, target_fragmentation_percent,
767 total_live_bytes / KB, max_evacuated_bytes / KB);
768 }
769 }
770 // How many pages we will allocated for the evacuated objects
771 // in the worst case: ceil(total_live_bytes / area_size)
772 int estimated_new_pages = (total_live_bytes + area_size - 1) / area_size;
773 DCHECK_LE(estimated_new_pages, candidate_count);
774 int estimated_released_pages = candidate_count - estimated_new_pages;
775 // Avoid (compact -> expand) cycles.
776 if ((estimated_released_pages == 0) && !FLAG_always_compact) {
777 candidate_count = 0;
778 }
779 for (int i = 0; i < candidate_count; i++) {
780 AddEvacuationCandidate(pages[i].second);
781 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000782 }
783
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000784 if (FLAG_trace_fragmentation) {
785 PrintIsolate(isolate(),
786 "compaction-selection: space=%s reduce_memory=%d pages=%d "
787 "total_live_bytes=%d\n",
788 AllocationSpaceName(space->identity()), reduce_memory,
789 candidate_count, total_live_bytes / KB);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000790 }
791}
792
793
794void MarkCompactCollector::AbortCompaction() {
795 if (compacting_) {
Ben Murdochda12d292016-06-02 14:46:10 +0100796 RememberedSet<OLD_TO_OLD>::ClearAll(heap());
Ben Murdoch097c5b22016-05-18 11:27:45 +0100797 for (Page* p : evacuation_candidates_) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000798 p->ClearEvacuationCandidate();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000799 }
800 compacting_ = false;
801 evacuation_candidates_.Rewind(0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000802 }
803 DCHECK_EQ(0, evacuation_candidates_.length());
804}
805
806
807void MarkCompactCollector::Prepare() {
808 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
809
810#ifdef DEBUG
811 DCHECK(state_ == IDLE);
812 state_ = PREPARE_GC;
813#endif
814
815 DCHECK(!FLAG_never_compact || !FLAG_always_compact);
816
817 if (sweeping_in_progress()) {
818 // Instead of waiting we could also abort the sweeper threads here.
819 EnsureSweepingCompleted();
820 }
821
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000822 // If concurrent unmapping tasks are still running, we should wait for
823 // them here.
824 heap()->WaitUntilUnmappingOfFreeChunksCompleted();
825
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000826 // Clear marking bits if incremental marking is aborted.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000827 if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
828 heap()->incremental_marking()->Stop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000829 ClearMarkbits();
830 AbortWeakCollections();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400831 AbortWeakCells();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000832 AbortTransitionArrays();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000833 AbortCompaction();
834 was_marked_incrementally_ = false;
835 }
836
837 // Don't start compaction if we are in the middle of incremental
838 // marking cycle. We did not collect any slots.
839 if (!FLAG_never_compact && !was_marked_incrementally_) {
840 StartCompaction(NON_INCREMENTAL_COMPACTION);
841 }
842
843 PagedSpaces spaces(heap());
844 for (PagedSpace* space = spaces.next(); space != NULL;
845 space = spaces.next()) {
846 space->PrepareForMarkCompact();
847 }
848
849#ifdef VERIFY_HEAP
850 if (!was_marked_incrementally_ && FLAG_verify_heap) {
851 VerifyMarkbitsAreClean();
852 }
853#endif
854}
855
856
857void MarkCompactCollector::Finish() {
Ben Murdochda12d292016-06-02 14:46:10 +0100858 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000859
860 // The hashing of weak_object_to_code_table is no longer valid.
861 heap()->weak_object_to_code_table()->Rehash(
862 heap()->isolate()->factory()->undefined_value());
863
864 // Clear the marking state of live large objects.
865 heap_->lo_space()->ClearMarkingStateOfLiveObjects();
866
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000867#ifdef DEBUG
868 DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
869 state_ = IDLE;
870#endif
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000871 heap_->isolate()->inner_pointer_to_code_cache()->Flush();
872
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000873 // The stub cache is not traversed during GC; clear the cache to
874 // force lazy re-initialization of it. This must be done after the
875 // GC, because it relies on the new address of certain old space
876 // objects (empty string, illegal builtin).
877 isolate()->stub_cache()->Clear();
878
879 if (have_code_to_deoptimize_) {
880 // Some code objects were marked for deoptimization during the GC.
881 Deoptimizer::DeoptimizeMarkedCode(isolate());
882 have_code_to_deoptimize_ = false;
883 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400884
885 heap_->incremental_marking()->ClearIdleMarkingDelayCounter();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000886
887 if (marking_parity_ == EVEN_MARKING_PARITY) {
888 marking_parity_ = ODD_MARKING_PARITY;
889 } else {
890 DCHECK(marking_parity_ == ODD_MARKING_PARITY);
891 marking_parity_ = EVEN_MARKING_PARITY;
892 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000893}
894
895
896// -------------------------------------------------------------------------
897// Phase 1: tracing and marking live objects.
898// before: all objects are in normal state.
899// after: a live object's map pointer is marked as '00'.
900
901// Marking all live objects in the heap as part of mark-sweep or mark-compact
902// collection. Before marking, all objects are in their normal state. After
903// marking, live objects' map pointers are marked indicating that the object
904// has been found reachable.
905//
906// The marking algorithm is a (mostly) depth-first (because of possible stack
907// overflow) traversal of the graph of objects reachable from the roots. It
908// uses an explicit stack of pointers rather than recursion. The young
909// generation's inactive ('from') space is used as a marking stack. The
910// objects in the marking stack are the ones that have been reached and marked
911// but their children have not yet been visited.
912//
913// The marking stack can overflow during traversal. In that case, we set an
914// overflow flag. When the overflow flag is set, we continue marking objects
915// reachable from the objects on the marking stack, but no longer push them on
916// the marking stack. Instead, we mark them as both marked and overflowed.
917// When the stack is in the overflowed state, objects marked as overflowed
918// have been reached and marked but their children have not been visited yet.
919// After emptying the marking stack, we clear the overflow flag and traverse
920// the heap looking for objects marked as overflowed, push them on the stack,
921// and continue with marking. This process repeats until all reachable
922// objects have been marked.
923
924void CodeFlusher::ProcessJSFunctionCandidates() {
925 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
926 Object* undefined = isolate_->heap()->undefined_value();
927
928 JSFunction* candidate = jsfunction_candidates_head_;
929 JSFunction* next_candidate;
930 while (candidate != NULL) {
931 next_candidate = GetNextCandidate(candidate);
932 ClearNextCandidate(candidate, undefined);
933
934 SharedFunctionInfo* shared = candidate->shared();
935
936 Code* code = shared->code();
937 MarkBit code_mark = Marking::MarkBitFrom(code);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000938 if (Marking::IsWhite(code_mark)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000939 if (FLAG_trace_code_flushing && shared->is_compiled()) {
940 PrintF("[code-flushing clears: ");
941 shared->ShortPrint();
942 PrintF(" - age: %d]\n", code->GetAge());
943 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000944 // Always flush the optimized code map if there is one.
945 if (!shared->OptimizedCodeMapIsCleared()) {
946 shared->ClearOptimizedCodeMap();
947 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000948 shared->set_code(lazy_compile);
949 candidate->set_code(lazy_compile);
950 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000951 DCHECK(Marking::IsBlack(code_mark));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000952 candidate->set_code(code);
953 }
954
955 // We are in the middle of a GC cycle so the write barrier in the code
956 // setter did not record the slot update and we have to do that manually.
957 Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
958 Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000959 isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(
960 candidate, slot, target);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000961
962 Object** shared_code_slot =
963 HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
964 isolate_->heap()->mark_compact_collector()->RecordSlot(
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000965 shared, shared_code_slot, *shared_code_slot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000966
967 candidate = next_candidate;
968 }
969
970 jsfunction_candidates_head_ = NULL;
971}
972
973
974void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
975 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
976
977 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
978 SharedFunctionInfo* next_candidate;
979 while (candidate != NULL) {
980 next_candidate = GetNextCandidate(candidate);
981 ClearNextCandidate(candidate);
982
983 Code* code = candidate->code();
984 MarkBit code_mark = Marking::MarkBitFrom(code);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000985 if (Marking::IsWhite(code_mark)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000986 if (FLAG_trace_code_flushing && candidate->is_compiled()) {
987 PrintF("[code-flushing clears: ");
988 candidate->ShortPrint();
989 PrintF(" - age: %d]\n", code->GetAge());
990 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000991 // Always flush the optimized code map if there is one.
992 if (!candidate->OptimizedCodeMapIsCleared()) {
993 candidate->ClearOptimizedCodeMap();
994 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000995 candidate->set_code(lazy_compile);
996 }
997
998 Object** code_slot =
999 HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001000 isolate_->heap()->mark_compact_collector()->RecordSlot(candidate, code_slot,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001001 *code_slot);
1002
1003 candidate = next_candidate;
1004 }
1005
1006 shared_function_info_candidates_head_ = NULL;
1007}
1008
1009
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001010void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
1011 // Make sure previous flushing decisions are revisited.
Ben Murdochda12d292016-06-02 14:46:10 +01001012 isolate_->heap()->incremental_marking()->IterateBlackObject(shared_info);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001013
1014 if (FLAG_trace_code_flushing) {
1015 PrintF("[code-flushing abandons function-info: ");
1016 shared_info->ShortPrint();
1017 PrintF("]\n");
1018 }
1019
1020 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1021 SharedFunctionInfo* next_candidate;
1022 if (candidate == shared_info) {
1023 next_candidate = GetNextCandidate(shared_info);
1024 shared_function_info_candidates_head_ = next_candidate;
1025 ClearNextCandidate(shared_info);
1026 } else {
1027 while (candidate != NULL) {
1028 next_candidate = GetNextCandidate(candidate);
1029
1030 if (next_candidate == shared_info) {
1031 next_candidate = GetNextCandidate(shared_info);
1032 SetNextCandidate(candidate, next_candidate);
1033 ClearNextCandidate(shared_info);
1034 break;
1035 }
1036
1037 candidate = next_candidate;
1038 }
1039 }
1040}
1041
1042
1043void CodeFlusher::EvictCandidate(JSFunction* function) {
1044 DCHECK(!function->next_function_link()->IsUndefined());
1045 Object* undefined = isolate_->heap()->undefined_value();
1046
1047 // Make sure previous flushing decisions are revisited.
Ben Murdochda12d292016-06-02 14:46:10 +01001048 isolate_->heap()->incremental_marking()->IterateBlackObject(function);
1049 isolate_->heap()->incremental_marking()->IterateBlackObject(
1050 function->shared());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001051
1052 if (FLAG_trace_code_flushing) {
1053 PrintF("[code-flushing abandons closure: ");
1054 function->shared()->ShortPrint();
1055 PrintF("]\n");
1056 }
1057
1058 JSFunction* candidate = jsfunction_candidates_head_;
1059 JSFunction* next_candidate;
1060 if (candidate == function) {
1061 next_candidate = GetNextCandidate(function);
1062 jsfunction_candidates_head_ = next_candidate;
1063 ClearNextCandidate(function, undefined);
1064 } else {
1065 while (candidate != NULL) {
1066 next_candidate = GetNextCandidate(candidate);
1067
1068 if (next_candidate == function) {
1069 next_candidate = GetNextCandidate(function);
1070 SetNextCandidate(candidate, next_candidate);
1071 ClearNextCandidate(function, undefined);
1072 break;
1073 }
1074
1075 candidate = next_candidate;
1076 }
1077 }
1078}
1079
1080
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001081void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
1082 Heap* heap = isolate_->heap();
1083
1084 JSFunction** slot = &jsfunction_candidates_head_;
1085 JSFunction* candidate = jsfunction_candidates_head_;
1086 while (candidate != NULL) {
1087 if (heap->InFromSpace(candidate)) {
1088 v->VisitPointer(reinterpret_cast<Object**>(slot));
1089 }
1090 candidate = GetNextCandidate(*slot);
1091 slot = GetNextCandidateSlot(*slot);
1092 }
1093}
1094
1095
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001096class MarkCompactMarkingVisitor
1097 : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
1098 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001099 static void Initialize();
1100
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001101 INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
1102 MarkObjectByPointer(heap->mark_compact_collector(), object, p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001103 }
1104
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001105 INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
1106 Object** start, Object** end)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001107 // Mark all objects pointed to in [start, end).
1108 const int kMinRangeForMarkingRecursion = 64;
1109 if (end - start >= kMinRangeForMarkingRecursion) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001110 if (VisitUnmarkedObjects(heap, object, start, end)) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001111 // We are close to a stack overflow, so just mark the objects.
1112 }
1113 MarkCompactCollector* collector = heap->mark_compact_collector();
1114 for (Object** p = start; p < end; p++) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001115 MarkObjectByPointer(collector, object, p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001116 }
1117 }
1118
1119 // Marks the object black and pushes it on the marking stack.
1120 INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
1121 MarkBit mark = Marking::MarkBitFrom(object);
1122 heap->mark_compact_collector()->MarkObject(object, mark);
1123 }
1124
1125 // Marks the object black without pushing it on the marking stack.
1126 // Returns true if object needed marking and false otherwise.
1127 INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
1128 MarkBit mark_bit = Marking::MarkBitFrom(object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001129 if (Marking::IsWhite(mark_bit)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001130 heap->mark_compact_collector()->SetMark(object, mark_bit);
1131 return true;
1132 }
1133 return false;
1134 }
1135
1136 // Mark object pointed to by p.
1137 INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001138 HeapObject* object, Object** p)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001139 if (!(*p)->IsHeapObject()) return;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001140 HeapObject* target_object = HeapObject::cast(*p);
1141 collector->RecordSlot(object, p, target_object);
1142 MarkBit mark = Marking::MarkBitFrom(target_object);
1143 collector->MarkObject(target_object, mark);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001144 }
1145
1146
1147 // Visit an unmarked object.
1148 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1149 HeapObject* obj)) {
1150#ifdef DEBUG
1151 DCHECK(collector->heap()->Contains(obj));
1152 DCHECK(!collector->heap()->mark_compact_collector()->IsMarked(obj));
1153#endif
1154 Map* map = obj->map();
1155 Heap* heap = obj->GetHeap();
1156 MarkBit mark = Marking::MarkBitFrom(obj);
1157 heap->mark_compact_collector()->SetMark(obj, mark);
1158 // Mark the map pointer and the body.
1159 MarkBit map_mark = Marking::MarkBitFrom(map);
1160 heap->mark_compact_collector()->MarkObject(map, map_mark);
1161 IterateBody(map, obj);
1162 }
1163
1164 // Visit all unmarked objects pointed to by [start, end).
1165 // Returns false if the operation fails (lack of stack space).
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001166 INLINE(static bool VisitUnmarkedObjects(Heap* heap, HeapObject* object,
1167 Object** start, Object** end)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001168 // Return false is we are close to the stack limit.
1169 StackLimitCheck check(heap->isolate());
1170 if (check.HasOverflowed()) return false;
1171
1172 MarkCompactCollector* collector = heap->mark_compact_collector();
1173 // Visit the unmarked objects.
1174 for (Object** p = start; p < end; p++) {
1175 Object* o = *p;
1176 if (!o->IsHeapObject()) continue;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001177 collector->RecordSlot(object, p, o);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001178 HeapObject* obj = HeapObject::cast(o);
1179 MarkBit mark = Marking::MarkBitFrom(obj);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001180 if (Marking::IsBlackOrGrey(mark)) continue;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001181 VisitUnmarkedObject(collector, obj);
1182 }
1183 return true;
1184 }
1185
1186 private:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001187 // Code flushing support.
1188
1189 static const int kRegExpCodeThreshold = 5;
1190
1191 static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re,
1192 bool is_one_byte) {
1193 // Make sure that the fixed array is in fact initialized on the RegExp.
1194 // We could potentially trigger a GC when initializing the RegExp.
1195 if (HeapObject::cast(re->data())->map()->instance_type() !=
1196 FIXED_ARRAY_TYPE)
1197 return;
1198
1199 // Make sure this is a RegExp that actually contains code.
1200 if (re->TypeTag() != JSRegExp::IRREGEXP) return;
1201
1202 Object* code = re->DataAt(JSRegExp::code_index(is_one_byte));
1203 if (!code->IsSmi() &&
1204 HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1205 // Save a copy that can be reinstated if we need the code again.
1206 re->SetDataAt(JSRegExp::saved_code_index(is_one_byte), code);
1207
1208 // Saving a copy might create a pointer into compaction candidate
1209 // that was not observed by marker. This might happen if JSRegExp data
1210 // was marked through the compilation cache before marker reached JSRegExp
1211 // object.
1212 FixedArray* data = FixedArray::cast(re->data());
Ben Murdochda12d292016-06-02 14:46:10 +01001213 if (Marking::IsBlackOrGrey(Marking::MarkBitFrom(data))) {
1214 Object** slot =
1215 data->data_start() + JSRegExp::saved_code_index(is_one_byte);
1216 heap->mark_compact_collector()->RecordSlot(data, slot, code);
1217 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001218
1219 // Set a number in the 0-255 range to guarantee no smi overflow.
1220 re->SetDataAt(JSRegExp::code_index(is_one_byte),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001221 Smi::FromInt(heap->ms_count() & 0xff));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001222 } else if (code->IsSmi()) {
1223 int value = Smi::cast(code)->value();
1224 // The regexp has not been compiled yet or there was a compilation error.
1225 if (value == JSRegExp::kUninitializedValue ||
1226 value == JSRegExp::kCompilationErrorValue) {
1227 return;
1228 }
1229
1230 // Check if we should flush now.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001231 if (value == ((heap->ms_count() - kRegExpCodeThreshold) & 0xff)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001232 re->SetDataAt(JSRegExp::code_index(is_one_byte),
1233 Smi::FromInt(JSRegExp::kUninitializedValue));
1234 re->SetDataAt(JSRegExp::saved_code_index(is_one_byte),
1235 Smi::FromInt(JSRegExp::kUninitializedValue));
1236 }
1237 }
1238 }
1239
1240
1241 // Works by setting the current sweep_generation (as a smi) in the
1242 // code object place in the data array of the RegExp and keeps a copy
1243 // around that can be reinstated if we reuse the RegExp before flushing.
1244 // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1245 // we flush the code.
1246 static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1247 Heap* heap = map->GetHeap();
1248 MarkCompactCollector* collector = heap->mark_compact_collector();
1249 if (!collector->is_code_flushing_enabled()) {
1250 VisitJSRegExp(map, object);
1251 return;
1252 }
1253 JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1254 // Flush code or set age on both one byte and two byte code.
1255 UpdateRegExpCodeAgeAndFlush(heap, re, true);
1256 UpdateRegExpCodeAgeAndFlush(heap, re, false);
1257 // Visit the fields of the RegExp, including the updated FixedArray.
1258 VisitJSRegExp(map, object);
1259 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001260};
1261
1262
1263void MarkCompactMarkingVisitor::Initialize() {
1264 StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
1265
1266 table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
1267
1268 if (FLAG_track_gc_object_stats) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001269 ObjectStatsVisitor::Initialize(&table_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001270 }
1271}
1272
1273
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001274class CodeMarkingVisitor : public ThreadVisitor {
1275 public:
1276 explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1277 : collector_(collector) {}
1278
1279 void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1280 collector_->PrepareThreadForCodeFlushing(isolate, top);
1281 }
1282
1283 private:
1284 MarkCompactCollector* collector_;
1285};
1286
1287
1288class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1289 public:
1290 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1291 : collector_(collector) {}
1292
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001293 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001294 for (Object** p = start; p < end; p++) VisitPointer(p);
1295 }
1296
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001297 void VisitPointer(Object** slot) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001298 Object* obj = *slot;
1299 if (obj->IsSharedFunctionInfo()) {
1300 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1301 MarkBit shared_mark = Marking::MarkBitFrom(shared);
1302 MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1303 collector_->MarkObject(shared->code(), code_mark);
1304 collector_->MarkObject(shared, shared_mark);
1305 }
1306 }
1307
1308 private:
1309 MarkCompactCollector* collector_;
1310};
1311
1312
1313void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1314 ThreadLocalTop* top) {
1315 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1316 // Note: for the frame that has a pending lazy deoptimization
1317 // StackFrame::unchecked_code will return a non-optimized code object for
1318 // the outermost function and StackFrame::LookupCode will return
1319 // actual optimized code object.
1320 StackFrame* frame = it.frame();
1321 Code* code = frame->unchecked_code();
1322 MarkBit code_mark = Marking::MarkBitFrom(code);
1323 MarkObject(code, code_mark);
1324 if (frame->is_optimized()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001325 Code* optimized_code = frame->LookupCode();
1326 MarkBit optimized_code_mark = Marking::MarkBitFrom(optimized_code);
1327 MarkObject(optimized_code, optimized_code_mark);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001328 }
1329 }
1330}
1331
1332
1333void MarkCompactCollector::PrepareForCodeFlushing() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001334 // If code flushing is disabled, there is no need to prepare for it.
1335 if (!is_code_flushing_enabled()) return;
1336
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001337 // Make sure we are not referencing the code from the stack.
1338 DCHECK(this == heap()->mark_compact_collector());
1339 PrepareThreadForCodeFlushing(heap()->isolate(),
1340 heap()->isolate()->thread_local_top());
1341
1342 // Iterate the archived stacks in all threads to check if
1343 // the code is referenced.
1344 CodeMarkingVisitor code_marking_visitor(this);
1345 heap()->isolate()->thread_manager()->IterateArchivedThreads(
1346 &code_marking_visitor);
1347
1348 SharedFunctionInfoMarkingVisitor visitor(this);
1349 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1350 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1351
1352 ProcessMarkingDeque();
1353}
1354
1355
1356// Visitor class for marking heap roots.
1357class RootMarkingVisitor : public ObjectVisitor {
1358 public:
1359 explicit RootMarkingVisitor(Heap* heap)
1360 : collector_(heap->mark_compact_collector()) {}
1361
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001362 void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001363
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001364 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001365 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1366 }
1367
1368 // Skip the weak next code link in a code object, which is visited in
1369 // ProcessTopOptimizedFrame.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001370 void VisitNextCodeLink(Object** p) override {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001371
1372 private:
1373 void MarkObjectByPointer(Object** p) {
1374 if (!(*p)->IsHeapObject()) return;
1375
1376 // Replace flat cons strings in place.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001377 HeapObject* object = HeapObject::cast(*p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001378 MarkBit mark_bit = Marking::MarkBitFrom(object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001379 if (Marking::IsBlackOrGrey(mark_bit)) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001380
1381 Map* map = object->map();
1382 // Mark the object.
1383 collector_->SetMark(object, mark_bit);
1384
1385 // Mark the map pointer and body, and push them on the marking stack.
1386 MarkBit map_mark = Marking::MarkBitFrom(map);
1387 collector_->MarkObject(map, map_mark);
1388 MarkCompactMarkingVisitor::IterateBody(map, object);
1389
1390 // Mark all the objects reachable from the map and body. May leave
1391 // overflowed objects in the heap.
1392 collector_->EmptyMarkingDeque();
1393 }
1394
1395 MarkCompactCollector* collector_;
1396};
1397
1398
1399// Helper class for pruning the string table.
Ben Murdochda12d292016-06-02 14:46:10 +01001400template <bool finalize_external_strings, bool record_slots>
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001401class StringTableCleaner : public ObjectVisitor {
1402 public:
Ben Murdochda12d292016-06-02 14:46:10 +01001403 StringTableCleaner(Heap* heap, HeapObject* table)
1404 : heap_(heap), pointers_removed_(0), table_(table) {
1405 DCHECK(!record_slots || table != nullptr);
1406 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001407
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001408 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001409 // Visit all HeapObject pointers in [start, end).
Ben Murdochda12d292016-06-02 14:46:10 +01001410 MarkCompactCollector* collector = heap_->mark_compact_collector();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001411 for (Object** p = start; p < end; p++) {
1412 Object* o = *p;
Ben Murdochda12d292016-06-02 14:46:10 +01001413 if (o->IsHeapObject()) {
1414 if (Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(o)))) {
1415 if (finalize_external_strings) {
1416 DCHECK(o->IsExternalString());
1417 heap_->FinalizeExternalString(String::cast(*p));
1418 } else {
1419 pointers_removed_++;
1420 }
1421 // Set the entry to the_hole_value (as deleted).
1422 *p = heap_->the_hole_value();
1423 } else if (record_slots) {
1424 // StringTable contains only old space strings.
1425 DCHECK(!heap_->InNewSpace(o));
1426 collector->RecordSlot(table_, p, o);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001427 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001428 }
1429 }
1430 }
1431
1432 int PointersRemoved() {
1433 DCHECK(!finalize_external_strings);
1434 return pointers_removed_;
1435 }
1436
1437 private:
1438 Heap* heap_;
1439 int pointers_removed_;
Ben Murdochda12d292016-06-02 14:46:10 +01001440 HeapObject* table_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001441};
1442
Ben Murdochda12d292016-06-02 14:46:10 +01001443typedef StringTableCleaner<false, true> InternalizedStringTableCleaner;
1444typedef StringTableCleaner<true, false> ExternalStringTableCleaner;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001445
1446// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1447// are retained.
1448class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1449 public:
1450 virtual Object* RetainAs(Object* object) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001451 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(object));
1452 DCHECK(!Marking::IsGrey(mark_bit));
1453 if (Marking::IsBlack(mark_bit)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001454 return object;
1455 } else if (object->IsAllocationSite() &&
1456 !(AllocationSite::cast(object)->IsZombie())) {
1457 // "dead" AllocationSites need to live long enough for a traversal of new
1458 // space. These sites get a one-time reprieve.
1459 AllocationSite* site = AllocationSite::cast(object);
1460 site->MarkZombie();
1461 site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
1462 return object;
1463 } else {
1464 return NULL;
1465 }
1466 }
1467};
1468
1469
1470// Fill the marking stack with overflowed objects returned by the given
1471// iterator. Stop when the marking stack is filled or the end of the space
1472// is reached, whichever comes first.
1473template <class T>
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001474void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001475 // The caller should ensure that the marking stack is initially not full,
1476 // so that we don't waste effort pointlessly scanning for objects.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001477 DCHECK(!marking_deque()->IsFull());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001478
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001479 Map* filler_map = heap()->one_pointer_filler_map();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001480 for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
1481 MarkBit markbit = Marking::MarkBitFrom(object);
1482 if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1483 Marking::GreyToBlack(markbit);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001484 PushBlack(object);
1485 if (marking_deque()->IsFull()) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001486 }
1487 }
1488}
1489
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001490void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
1491 DCHECK(!marking_deque()->IsFull());
1492 LiveObjectIterator<kGreyObjects> it(p);
1493 HeapObject* object = NULL;
1494 while ((object = it.Next()) != NULL) {
1495 MarkBit markbit = Marking::MarkBitFrom(object);
1496 DCHECK(Marking::IsGrey(markbit));
1497 Marking::GreyToBlack(markbit);
1498 PushBlack(object);
1499 if (marking_deque()->IsFull()) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001500 }
1501}
1502
Ben Murdochda12d292016-06-02 14:46:10 +01001503class RecordMigratedSlotVisitor final : public ObjectVisitor {
1504 public:
1505 inline void VisitPointer(Object** p) final {
1506 RecordMigratedSlot(*p, reinterpret_cast<Address>(p));
1507 }
1508
1509 inline void VisitPointers(Object** start, Object** end) final {
1510 while (start < end) {
1511 RecordMigratedSlot(*start, reinterpret_cast<Address>(start));
1512 ++start;
1513 }
1514 }
1515
1516 inline void VisitCodeEntry(Address code_entry_slot) final {
1517 Address code_entry = Memory::Address_at(code_entry_slot);
1518 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
1519 RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot),
1520 CODE_ENTRY_SLOT, code_entry_slot);
1521 }
1522 }
1523
1524 private:
1525 inline void RecordMigratedSlot(Object* value, Address slot) {
1526 if (value->IsHeapObject()) {
1527 Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
1528 if (p->InNewSpace()) {
1529 RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
1530 } else if (p->IsEvacuationCandidate()) {
1531 RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot);
1532 }
1533 }
1534 }
1535};
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001536
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001537class MarkCompactCollector::HeapObjectVisitor {
1538 public:
1539 virtual ~HeapObjectVisitor() {}
1540 virtual bool Visit(HeapObject* object) = 0;
1541};
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001542
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001543class MarkCompactCollector::EvacuateVisitorBase
1544 : public MarkCompactCollector::HeapObjectVisitor {
Ben Murdochda12d292016-06-02 14:46:10 +01001545 protected:
1546 enum MigrationMode { kFast, kProfiled };
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001547
Ben Murdochda12d292016-06-02 14:46:10 +01001548 EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces)
1549 : heap_(heap),
1550 compaction_spaces_(compaction_spaces),
1551 profiling_(
1552 heap->isolate()->cpu_profiler()->is_profiling() ||
1553 heap->isolate()->logger()->is_logging_code_events() ||
1554 heap->isolate()->heap_profiler()->is_tracking_object_moves()) {}
1555
1556 inline bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
1557 HeapObject** target_object) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001558 int size = object->Size();
1559 AllocationAlignment alignment = object->RequiredAlignment();
1560 AllocationResult allocation = target_space->AllocateRaw(size, alignment);
1561 if (allocation.To(target_object)) {
Ben Murdochda12d292016-06-02 14:46:10 +01001562 MigrateObject(*target_object, object, size, target_space->identity());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001563 return true;
1564 }
1565 return false;
1566 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001567
Ben Murdochda12d292016-06-02 14:46:10 +01001568 inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
1569 AllocationSpace dest) {
1570 if (profiling_) {
1571 MigrateObject<kProfiled>(dst, src, size, dest);
1572 } else {
1573 MigrateObject<kFast>(dst, src, size, dest);
1574 }
1575 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001576
Ben Murdochda12d292016-06-02 14:46:10 +01001577 template <MigrationMode mode>
1578 inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
1579 AllocationSpace dest) {
1580 Address dst_addr = dst->address();
1581 Address src_addr = src->address();
1582 DCHECK(heap_->AllowedToBeMigrated(src, dest));
1583 DCHECK(dest != LO_SPACE);
1584 if (dest == OLD_SPACE) {
1585 DCHECK_OBJECT_SIZE(size);
1586 DCHECK(IsAligned(size, kPointerSize));
1587 heap_->CopyBlock(dst_addr, src_addr, size);
1588 if ((mode == kProfiled) && FLAG_ignition && dst->IsBytecodeArray()) {
1589 PROFILE(heap_->isolate(),
1590 CodeMoveEvent(AbstractCode::cast(src), dst_addr));
1591 }
1592 RecordMigratedSlotVisitor visitor;
1593 dst->IterateBodyFast(dst->map()->instance_type(), size, &visitor);
1594 } else if (dest == CODE_SPACE) {
1595 DCHECK_CODEOBJECT_SIZE(size, heap_->code_space());
1596 if (mode == kProfiled) {
1597 PROFILE(heap_->isolate(),
1598 CodeMoveEvent(AbstractCode::cast(src), dst_addr));
1599 }
1600 heap_->CopyBlock(dst_addr, src_addr, size);
1601 RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(dst_addr),
1602 RELOCATED_CODE_OBJECT, dst_addr);
1603 Code::cast(dst)->Relocate(dst_addr - src_addr);
1604 } else {
1605 DCHECK_OBJECT_SIZE(size);
1606 DCHECK(dest == NEW_SPACE);
1607 heap_->CopyBlock(dst_addr, src_addr, size);
1608 }
1609 if (mode == kProfiled) {
1610 heap_->OnMoveEvent(dst, src, size);
1611 }
1612 Memory::Address_at(src_addr) = dst_addr;
1613 }
1614
1615 Heap* heap_;
1616 CompactionSpaceCollection* compaction_spaces_;
1617 bool profiling_;
1618};
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001619
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001620class MarkCompactCollector::EvacuateNewSpaceVisitor final
1621 : public MarkCompactCollector::EvacuateVisitorBase {
1622 public:
1623 static const intptr_t kLabSize = 4 * KB;
1624 static const intptr_t kMaxLabObjectSize = 256;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001625
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001626 explicit EvacuateNewSpaceVisitor(Heap* heap,
Ben Murdoch097c5b22016-05-18 11:27:45 +01001627 CompactionSpaceCollection* compaction_spaces,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001628 HashMap* local_pretenuring_feedback)
Ben Murdochda12d292016-06-02 14:46:10 +01001629 : EvacuateVisitorBase(heap, compaction_spaces),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001630 buffer_(LocalAllocationBuffer::InvalidBuffer()),
1631 space_to_allocate_(NEW_SPACE),
1632 promoted_size_(0),
1633 semispace_copied_size_(0),
1634 local_pretenuring_feedback_(local_pretenuring_feedback) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001635
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001636 bool Visit(HeapObject* object) override {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001637 heap_->UpdateAllocationSite<Heap::kCached>(object,
1638 local_pretenuring_feedback_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001639 int size = object->Size();
1640 HeapObject* target_object = nullptr;
1641 if (heap_->ShouldBePromoted(object->address(), size) &&
Ben Murdoch097c5b22016-05-18 11:27:45 +01001642 TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object,
1643 &target_object)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001644 // If we end up needing more special cases, we should factor this out.
1645 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
1646 heap_->array_buffer_tracker()->Promote(
1647 JSArrayBuffer::cast(target_object));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001648 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001649 promoted_size_ += size;
1650 return true;
1651 }
1652 HeapObject* target = nullptr;
1653 AllocationSpace space = AllocateTargetObject(object, &target);
Ben Murdochda12d292016-06-02 14:46:10 +01001654 MigrateObject(HeapObject::cast(target), object, size, space);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001655 if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
1656 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
1657 }
1658 semispace_copied_size_ += size;
1659 return true;
1660 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001661
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001662 intptr_t promoted_size() { return promoted_size_; }
1663 intptr_t semispace_copied_size() { return semispace_copied_size_; }
1664
1665 private:
1666 enum NewSpaceAllocationMode {
1667 kNonstickyBailoutOldSpace,
1668 kStickyBailoutOldSpace,
1669 };
1670
1671 inline AllocationSpace AllocateTargetObject(HeapObject* old_object,
1672 HeapObject** target_object) {
1673 const int size = old_object->Size();
1674 AllocationAlignment alignment = old_object->RequiredAlignment();
1675 AllocationResult allocation;
1676 if (space_to_allocate_ == NEW_SPACE) {
1677 if (size > kMaxLabObjectSize) {
1678 allocation =
1679 AllocateInNewSpace(size, alignment, kNonstickyBailoutOldSpace);
1680 } else {
1681 allocation = AllocateInLab(size, alignment);
1682 }
1683 }
1684 if (allocation.IsRetry() || (space_to_allocate_ == OLD_SPACE)) {
1685 allocation = AllocateInOldSpace(size, alignment);
1686 }
1687 bool ok = allocation.To(target_object);
1688 DCHECK(ok);
1689 USE(ok);
1690 return space_to_allocate_;
1691 }
1692
1693 inline bool NewLocalAllocationBuffer() {
1694 AllocationResult result =
1695 AllocateInNewSpace(kLabSize, kWordAligned, kStickyBailoutOldSpace);
1696 LocalAllocationBuffer saved_old_buffer = buffer_;
1697 buffer_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize);
1698 if (buffer_.IsValid()) {
1699 buffer_.TryMerge(&saved_old_buffer);
1700 return true;
1701 }
1702 return false;
1703 }
1704
1705 inline AllocationResult AllocateInNewSpace(int size_in_bytes,
1706 AllocationAlignment alignment,
1707 NewSpaceAllocationMode mode) {
1708 AllocationResult allocation =
1709 heap_->new_space()->AllocateRawSynchronized(size_in_bytes, alignment);
1710 if (allocation.IsRetry()) {
1711 if (!heap_->new_space()->AddFreshPageSynchronized()) {
1712 if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
1713 } else {
1714 allocation = heap_->new_space()->AllocateRawSynchronized(size_in_bytes,
1715 alignment);
1716 if (allocation.IsRetry()) {
1717 if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001718 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001719 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001720 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001721 return allocation;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001722 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001723
1724 inline AllocationResult AllocateInOldSpace(int size_in_bytes,
1725 AllocationAlignment alignment) {
1726 AllocationResult allocation =
Ben Murdoch097c5b22016-05-18 11:27:45 +01001727 compaction_spaces_->Get(OLD_SPACE)->AllocateRaw(size_in_bytes,
1728 alignment);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001729 if (allocation.IsRetry()) {
1730 FatalProcessOutOfMemory(
1731 "MarkCompactCollector: semi-space copy, fallback in old gen\n");
1732 }
1733 return allocation;
1734 }
1735
1736 inline AllocationResult AllocateInLab(int size_in_bytes,
1737 AllocationAlignment alignment) {
1738 AllocationResult allocation;
1739 if (!buffer_.IsValid()) {
1740 if (!NewLocalAllocationBuffer()) {
1741 space_to_allocate_ = OLD_SPACE;
1742 return AllocationResult::Retry(OLD_SPACE);
1743 }
1744 }
1745 allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
1746 if (allocation.IsRetry()) {
1747 if (!NewLocalAllocationBuffer()) {
1748 space_to_allocate_ = OLD_SPACE;
1749 return AllocationResult::Retry(OLD_SPACE);
1750 } else {
1751 allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
1752 if (allocation.IsRetry()) {
1753 space_to_allocate_ = OLD_SPACE;
1754 return AllocationResult::Retry(OLD_SPACE);
1755 }
1756 }
1757 }
1758 return allocation;
1759 }
1760
1761 LocalAllocationBuffer buffer_;
1762 AllocationSpace space_to_allocate_;
1763 intptr_t promoted_size_;
1764 intptr_t semispace_copied_size_;
1765 HashMap* local_pretenuring_feedback_;
1766};
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001767
1768
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001769class MarkCompactCollector::EvacuateOldSpaceVisitor final
1770 : public MarkCompactCollector::EvacuateVisitorBase {
1771 public:
1772 EvacuateOldSpaceVisitor(Heap* heap,
Ben Murdochda12d292016-06-02 14:46:10 +01001773 CompactionSpaceCollection* compaction_spaces)
1774 : EvacuateVisitorBase(heap, compaction_spaces) {}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001775
1776 bool Visit(HeapObject* object) override {
1777 CompactionSpace* target_space = compaction_spaces_->Get(
1778 Page::FromAddress(object->address())->owner()->identity());
1779 HeapObject* target_object = nullptr;
1780 if (TryEvacuateObject(target_space, object, &target_object)) {
1781 DCHECK(object->map_word().IsForwardingAddress());
1782 return true;
1783 }
1784 return false;
1785 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001786};
1787
1788
1789void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001790 PageIterator it(space);
1791 while (it.has_next()) {
1792 Page* p = it.next();
Ben Murdochda12d292016-06-02 14:46:10 +01001793 if (!p->IsFlagSet(Page::BLACK_PAGE)) {
1794 DiscoverGreyObjectsOnPage(p);
1795 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001796 if (marking_deque()->IsFull()) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001797 }
1798}
1799
1800
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001801void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
1802 NewSpace* space = heap()->new_space();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001803 NewSpacePageIterator it(space->bottom(), space->top());
1804 while (it.has_next()) {
1805 NewSpacePage* page = it.next();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001806 DiscoverGreyObjectsOnPage(page);
1807 if (marking_deque()->IsFull()) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001808 }
1809}
1810
1811
1812bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
1813 Object* o = *p;
1814 if (!o->IsHeapObject()) return false;
1815 HeapObject* heap_object = HeapObject::cast(o);
1816 MarkBit mark = Marking::MarkBitFrom(heap_object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001817 return Marking::IsWhite(mark);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001818}
1819
1820
1821bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
1822 Object** p) {
1823 Object* o = *p;
1824 DCHECK(o->IsHeapObject());
1825 HeapObject* heap_object = HeapObject::cast(o);
1826 MarkBit mark = Marking::MarkBitFrom(heap_object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001827 return Marking::IsWhite(mark);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001828}
1829
1830
1831void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
1832 StringTable* string_table = heap()->string_table();
1833 // Mark the string table itself.
1834 MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001835 if (Marking::IsWhite(string_table_mark)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001836 // String table could have already been marked by visiting the handles list.
1837 SetMark(string_table, string_table_mark);
1838 }
1839 // Explicitly mark the prefix.
1840 string_table->IteratePrefix(visitor);
1841 ProcessMarkingDeque();
1842}
1843
1844
1845void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
1846 MarkBit mark_bit = Marking::MarkBitFrom(site);
1847 SetMark(site, mark_bit);
1848}
1849
1850
1851void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
1852 // Mark the heap roots including global variables, stack variables,
1853 // etc., and all objects reachable from them.
1854 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
1855
1856 // Handle the string table specially.
1857 MarkStringTable(visitor);
1858
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001859 // There may be overflowed objects in the heap. Visit them now.
1860 while (marking_deque_.overflowed()) {
1861 RefillMarkingDeque();
1862 EmptyMarkingDeque();
1863 }
1864}
1865
1866
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001867void MarkCompactCollector::MarkImplicitRefGroups(
1868 MarkObjectFunction mark_object) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001869 List<ImplicitRefGroup*>* ref_groups =
1870 isolate()->global_handles()->implicit_ref_groups();
1871
1872 int last = 0;
1873 for (int i = 0; i < ref_groups->length(); i++) {
1874 ImplicitRefGroup* entry = ref_groups->at(i);
1875 DCHECK(entry != NULL);
1876
1877 if (!IsMarked(*entry->parent)) {
1878 (*ref_groups)[last++] = entry;
1879 continue;
1880 }
1881
1882 Object*** children = entry->children;
1883 // A parent object is marked, so mark all child heap objects.
1884 for (size_t j = 0; j < entry->length; ++j) {
1885 if ((*children[j])->IsHeapObject()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001886 mark_object(heap(), HeapObject::cast(*children[j]));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001887 }
1888 }
1889
1890 // Once the entire group has been marked, dispose it because it's
1891 // not needed anymore.
1892 delete entry;
1893 }
1894 ref_groups->Rewind(last);
1895}
1896
1897
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001898// Mark all objects reachable from the objects on the marking stack.
1899// Before: the marking stack contains zero or more heap object pointers.
1900// After: the marking stack is empty, and all objects reachable from the
1901// marking stack have been marked, or are overflowed in the heap.
1902void MarkCompactCollector::EmptyMarkingDeque() {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001903 Map* filler_map = heap_->one_pointer_filler_map();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001904 while (!marking_deque_.IsEmpty()) {
1905 HeapObject* object = marking_deque_.Pop();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001906 // Explicitly skip one word fillers. Incremental markbit patterns are
1907 // correct only for objects that occupy at least two words.
1908 Map* map = object->map();
1909 if (map == filler_map) continue;
1910
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001911 DCHECK(object->IsHeapObject());
1912 DCHECK(heap()->Contains(object));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001913 DCHECK(!Marking::IsWhite(Marking::MarkBitFrom(object)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001914
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001915 MarkBit map_mark = Marking::MarkBitFrom(map);
1916 MarkObject(map, map_mark);
1917
1918 MarkCompactMarkingVisitor::IterateBody(map, object);
1919 }
1920}
1921
1922
1923// Sweep the heap for overflowed objects, clear their overflow bits, and
1924// push them on the marking stack. Stop early if the marking stack fills
1925// before sweeping completes. If sweeping completes, there are no remaining
1926// overflowed objects in the heap so the overflow flag on the markings stack
1927// is cleared.
1928void MarkCompactCollector::RefillMarkingDeque() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001929 isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001930 DCHECK(marking_deque_.overflowed());
1931
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001932 DiscoverGreyObjectsInNewSpace();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001933 if (marking_deque_.IsFull()) return;
1934
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001935 DiscoverGreyObjectsInSpace(heap()->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001936 if (marking_deque_.IsFull()) return;
1937
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001938 DiscoverGreyObjectsInSpace(heap()->code_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001939 if (marking_deque_.IsFull()) return;
1940
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001941 DiscoverGreyObjectsInSpace(heap()->map_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001942 if (marking_deque_.IsFull()) return;
1943
1944 LargeObjectIterator lo_it(heap()->lo_space());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001945 DiscoverGreyObjectsWithIterator(&lo_it);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001946 if (marking_deque_.IsFull()) return;
1947
1948 marking_deque_.ClearOverflowed();
1949}
1950
1951
1952// Mark all objects reachable (transitively) from objects on the marking
1953// stack. Before: the marking stack contains zero or more heap object
1954// pointers. After: the marking stack is empty and there are no overflowed
1955// objects in the heap.
1956void MarkCompactCollector::ProcessMarkingDeque() {
1957 EmptyMarkingDeque();
1958 while (marking_deque_.overflowed()) {
1959 RefillMarkingDeque();
1960 EmptyMarkingDeque();
1961 }
1962}
1963
1964
1965// Mark all objects reachable (transitively) from objects on the marking
1966// stack including references only considered in the atomic marking pause.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001967void MarkCompactCollector::ProcessEphemeralMarking(
1968 ObjectVisitor* visitor, bool only_process_harmony_weak_collections) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001969 bool work_to_do = true;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001970 DCHECK(marking_deque_.IsEmpty() && !marking_deque_.overflowed());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001971 while (work_to_do) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001972 if (!only_process_harmony_weak_collections) {
1973 isolate()->global_handles()->IterateObjectGroups(
1974 visitor, &IsUnmarkedHeapObjectWithHeap);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001975 MarkImplicitRefGroups(&MarkCompactMarkingVisitor::MarkObject);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001976 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001977 ProcessWeakCollections();
1978 work_to_do = !marking_deque_.IsEmpty();
1979 ProcessMarkingDeque();
1980 }
1981}
1982
1983
1984void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
1985 for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
1986 !it.done(); it.Advance()) {
1987 if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
1988 return;
1989 }
1990 if (it.frame()->type() == StackFrame::OPTIMIZED) {
1991 Code* code = it.frame()->LookupCode();
1992 if (!code->CanDeoptAt(it.frame()->pc())) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001993 Code::BodyDescriptor::IterateBody(code, visitor);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001994 }
1995 ProcessMarkingDeque();
1996 return;
1997 }
1998 }
1999}
2000
2001
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002002void MarkCompactCollector::EnsureMarkingDequeIsReserved() {
2003 DCHECK(!marking_deque_.in_use());
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002004 if (marking_deque_memory_ == NULL) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002005 marking_deque_memory_ = new base::VirtualMemory(kMaxMarkingDequeSize);
2006 marking_deque_memory_committed_ = 0;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002007 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002008 if (marking_deque_memory_ == NULL) {
2009 V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsReserved");
2010 }
2011}
2012
2013
2014void MarkCompactCollector::EnsureMarkingDequeIsCommitted(size_t max_size) {
2015 // If the marking deque is too small, we try to allocate a bigger one.
2016 // If that fails, make do with a smaller one.
2017 CHECK(!marking_deque_.in_use());
2018 for (size_t size = max_size; size >= kMinMarkingDequeSize; size >>= 1) {
2019 base::VirtualMemory* memory = marking_deque_memory_;
2020 size_t currently_committed = marking_deque_memory_committed_;
2021
2022 if (currently_committed == size) return;
2023
2024 if (currently_committed > size) {
2025 bool success = marking_deque_memory_->Uncommit(
2026 reinterpret_cast<Address>(marking_deque_memory_->address()) + size,
2027 currently_committed - size);
2028 if (success) {
2029 marking_deque_memory_committed_ = size;
2030 return;
2031 }
2032 UNREACHABLE();
2033 }
2034
2035 bool success = memory->Commit(
2036 reinterpret_cast<Address>(memory->address()) + currently_committed,
2037 size - currently_committed,
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002038 false); // Not executable.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002039 if (success) {
2040 marking_deque_memory_committed_ = size;
2041 return;
2042 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002043 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002044 V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsCommitted");
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002045}
2046
2047
2048void MarkCompactCollector::InitializeMarkingDeque() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002049 DCHECK(!marking_deque_.in_use());
2050 DCHECK(marking_deque_memory_committed_ > 0);
2051 Address addr = static_cast<Address>(marking_deque_memory_->address());
2052 size_t size = marking_deque_memory_committed_;
2053 if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
2054 marking_deque_.Initialize(addr, addr + size);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002055}
2056
2057
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002058void MarkingDeque::Initialize(Address low, Address high) {
2059 DCHECK(!in_use_);
2060 HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
2061 HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
2062 array_ = obj_low;
2063 mask_ = base::bits::RoundDownToPowerOfTwo32(
2064 static_cast<uint32_t>(obj_high - obj_low)) -
2065 1;
2066 top_ = bottom_ = 0;
2067 overflowed_ = false;
2068 in_use_ = true;
2069}
2070
2071
2072void MarkingDeque::Uninitialize(bool aborting) {
2073 if (!aborting) {
2074 DCHECK(IsEmpty());
2075 DCHECK(!overflowed_);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002076 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002077 DCHECK(in_use_);
2078 top_ = bottom_ = 0xdecbad;
2079 in_use_ = false;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002080}
2081
2082
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002083void MarkCompactCollector::MarkLiveObjects() {
Ben Murdochda12d292016-06-02 14:46:10 +01002084 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002085 double start_time = 0.0;
2086 if (FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002087 start_time = heap_->MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002088 }
2089 // The recursive GC marker detects when it is nearing stack overflow,
2090 // and switches to a different marking system. JS interrupts interfere
2091 // with the C stack limit check.
2092 PostponeInterruptsScope postpone(isolate());
2093
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002094 {
Ben Murdochda12d292016-06-02 14:46:10 +01002095 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002096 IncrementalMarking* incremental_marking = heap_->incremental_marking();
2097 if (was_marked_incrementally_) {
2098 incremental_marking->Finalize();
2099 } else {
2100 // Abort any pending incremental activities e.g. incremental sweeping.
2101 incremental_marking->Stop();
2102 if (marking_deque_.in_use()) {
2103 marking_deque_.Uninitialize(true);
2104 }
2105 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002106 }
2107
2108#ifdef DEBUG
2109 DCHECK(state_ == PREPARE_GC);
2110 state_ = MARK_LIVE_OBJECTS;
2111#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002112
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002113 EnsureMarkingDequeIsCommittedAndInitialize(
2114 MarkCompactCollector::kMaxMarkingDequeSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002115
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002116 {
Ben Murdochda12d292016-06-02 14:46:10 +01002117 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002118 PrepareForCodeFlushing();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002119 }
2120
2121 RootMarkingVisitor root_visitor(heap());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002122
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002123 {
Ben Murdochda12d292016-06-02 14:46:10 +01002124 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002125 MarkRoots(&root_visitor);
2126 ProcessTopOptimizedFrame(&root_visitor);
2127 }
2128
2129 {
Ben Murdochda12d292016-06-02 14:46:10 +01002130 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002131
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002132 // The objects reachable from the roots are marked, yet unreachable
2133 // objects are unmarked. Mark objects reachable due to host
2134 // application specific logic or through Harmony weak maps.
Ben Murdochda12d292016-06-02 14:46:10 +01002135 {
2136 TRACE_GC(heap()->tracer(),
2137 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL);
2138 ProcessEphemeralMarking(&root_visitor, false);
2139 ProcessMarkingDeque();
2140 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002141
2142 // The objects reachable from the roots, weak maps or object groups
2143 // are marked. Objects pointed to only by weak global handles cannot be
2144 // immediately reclaimed. Instead, we have to mark them as pending and mark
2145 // objects reachable from them.
2146 //
2147 // First we identify nonlive weak handles and mark them as pending
2148 // destruction.
Ben Murdochda12d292016-06-02 14:46:10 +01002149 {
2150 TRACE_GC(heap()->tracer(),
2151 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
2152 heap()->isolate()->global_handles()->IdentifyWeakHandles(
2153 &IsUnmarkedHeapObject);
2154 ProcessMarkingDeque();
2155 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002156 // Then we mark the objects.
Ben Murdochda12d292016-06-02 14:46:10 +01002157
2158 {
2159 TRACE_GC(heap()->tracer(),
2160 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
2161 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2162 ProcessMarkingDeque();
2163 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002164
2165 // Repeat Harmony weak maps marking to mark unmarked objects reachable from
2166 // the weak roots we just marked as pending destruction.
2167 //
2168 // We only process harmony collections, as all object groups have been fully
2169 // processed and no weakly reachable node can discover new objects groups.
Ben Murdochda12d292016-06-02 14:46:10 +01002170 {
2171 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
2172 ProcessEphemeralMarking(&root_visitor, true);
2173 ProcessMarkingDeque();
2174 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002175 }
2176
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002177 if (FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002178 heap_->tracer()->AddMarkingTime(heap_->MonotonicallyIncreasingTimeInMs() -
2179 start_time);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002180 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002181 if (FLAG_track_gc_object_stats) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002182 if (FLAG_trace_gc_object_stats) {
2183 heap()->object_stats_->TraceObjectStats();
2184 }
2185 heap()->object_stats_->CheckpointObjectStats();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002186 }
2187}
2188
2189
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002190void MarkCompactCollector::ClearNonLiveReferences() {
Ben Murdochda12d292016-06-02 14:46:10 +01002191 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002192
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002193 {
Ben Murdochda12d292016-06-02 14:46:10 +01002194 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002195
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002196 // Prune the string table removing all strings only pointed to by the
2197 // string table. Cannot use string_table() here because the string
2198 // table is marked.
2199 StringTable* string_table = heap()->string_table();
Ben Murdochda12d292016-06-02 14:46:10 +01002200 InternalizedStringTableCleaner internalized_visitor(heap(), string_table);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002201 string_table->IterateElements(&internalized_visitor);
2202 string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002203
Ben Murdochda12d292016-06-02 14:46:10 +01002204 ExternalStringTableCleaner external_visitor(heap(), nullptr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002205 heap()->external_string_table_.Iterate(&external_visitor);
2206 heap()->external_string_table_.CleanUp();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002207 }
2208
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002209 {
Ben Murdochda12d292016-06-02 14:46:10 +01002210 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002211 // Process the weak references.
2212 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2213 heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002214 }
2215
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002216 {
Ben Murdochda12d292016-06-02 14:46:10 +01002217 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_GLOBAL_HANDLES);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002218
2219 // Remove object groups after marking phase.
2220 heap()->isolate()->global_handles()->RemoveObjectGroups();
2221 heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002222 }
2223
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002224 // Flush code from collected candidates.
2225 if (is_code_flushing_enabled()) {
Ben Murdochda12d292016-06-02 14:46:10 +01002226 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_CODE_FLUSH);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002227 code_flusher_->ProcessCandidates();
2228 }
2229
2230
2231 DependentCode* dependent_code_list;
2232 Object* non_live_map_list;
2233 ClearWeakCells(&non_live_map_list, &dependent_code_list);
2234
2235 {
Ben Murdochda12d292016-06-02 14:46:10 +01002236 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002237 ClearSimpleMapTransitions(non_live_map_list);
2238 ClearFullMapTransitions();
2239 }
2240
2241 MarkDependentCodeForDeoptimization(dependent_code_list);
2242
2243 ClearWeakCollections();
2244
Ben Murdochda12d292016-06-02 14:46:10 +01002245 ClearInvalidRememberedSetSlots();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002246}
2247
2248
2249void MarkCompactCollector::MarkDependentCodeForDeoptimization(
2250 DependentCode* list_head) {
Ben Murdochda12d292016-06-02 14:46:10 +01002251 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_DEPENDENT_CODE);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002252 Isolate* isolate = this->isolate();
2253 DependentCode* current = list_head;
2254 while (current->length() > 0) {
2255 have_code_to_deoptimize_ |= current->MarkCodeForDeoptimization(
2256 isolate, DependentCode::kWeakCodeGroup);
2257 current = current->next_link();
2258 }
2259
2260 WeakHashTable* table = heap_->weak_object_to_code_table();
2261 uint32_t capacity = table->Capacity();
2262 for (uint32_t i = 0; i < capacity; i++) {
2263 uint32_t key_index = table->EntryToIndex(i);
2264 Object* key = table->get(key_index);
2265 if (!table->IsKey(key)) continue;
2266 uint32_t value_index = table->EntryToValueIndex(i);
2267 Object* value = table->get(value_index);
2268 DCHECK(key->IsWeakCell());
2269 if (WeakCell::cast(key)->cleared()) {
2270 have_code_to_deoptimize_ |=
2271 DependentCode::cast(value)->MarkCodeForDeoptimization(
2272 isolate, DependentCode::kWeakCodeGroup);
2273 table->set(key_index, heap_->the_hole_value());
2274 table->set(value_index, heap_->the_hole_value());
2275 table->ElementRemoved();
2276 }
2277 }
2278}
2279
2280
2281void MarkCompactCollector::ClearSimpleMapTransitions(
2282 Object* non_live_map_list) {
2283 Object* the_hole_value = heap()->the_hole_value();
2284 Object* weak_cell_obj = non_live_map_list;
2285 while (weak_cell_obj != Smi::FromInt(0)) {
2286 WeakCell* weak_cell = WeakCell::cast(weak_cell_obj);
2287 Map* map = Map::cast(weak_cell->value());
2288 DCHECK(Marking::IsWhite(Marking::MarkBitFrom(map)));
2289 Object* potential_parent = map->constructor_or_backpointer();
2290 if (potential_parent->IsMap()) {
2291 Map* parent = Map::cast(potential_parent);
2292 if (Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent)) &&
2293 parent->raw_transitions() == weak_cell) {
2294 ClearSimpleMapTransition(parent, map);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002295 }
2296 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002297 weak_cell->clear();
2298 weak_cell_obj = weak_cell->next();
2299 weak_cell->clear_next(the_hole_value);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002300 }
2301}
2302
2303
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002304void MarkCompactCollector::ClearSimpleMapTransition(Map* map,
2305 Map* dead_transition) {
2306 // A previously existing simple transition (stored in a WeakCell) is going
2307 // to be cleared. Clear the useless cell pointer, and take ownership
2308 // of the descriptor array.
2309 map->set_raw_transitions(Smi::FromInt(0));
2310 int number_of_own_descriptors = map->NumberOfOwnDescriptors();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002311 DescriptorArray* descriptors = map->instance_descriptors();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002312 if (descriptors == dead_transition->instance_descriptors() &&
2313 number_of_own_descriptors > 0) {
2314 TrimDescriptorArray(map, descriptors);
2315 DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2316 map->set_owns_descriptors(true);
2317 }
2318}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002319
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002320
2321void MarkCompactCollector::ClearFullMapTransitions() {
2322 HeapObject* undefined = heap()->undefined_value();
2323 Object* obj = heap()->encountered_transition_arrays();
2324 while (obj != Smi::FromInt(0)) {
2325 TransitionArray* array = TransitionArray::cast(obj);
2326 int num_transitions = array->number_of_entries();
2327 DCHECK_EQ(TransitionArray::NumberOfTransitions(array), num_transitions);
2328 if (num_transitions > 0) {
2329 Map* map = array->GetTarget(0);
2330 Map* parent = Map::cast(map->constructor_or_backpointer());
2331 bool parent_is_alive =
2332 Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent));
2333 DescriptorArray* descriptors =
2334 parent_is_alive ? parent->instance_descriptors() : nullptr;
2335 bool descriptors_owner_died =
2336 CompactTransitionArray(parent, array, descriptors);
2337 if (descriptors_owner_died) {
2338 TrimDescriptorArray(parent, descriptors);
2339 }
2340 }
2341 obj = array->next_link();
2342 array->set_next_link(undefined, SKIP_WRITE_BARRIER);
2343 }
2344 heap()->set_encountered_transition_arrays(Smi::FromInt(0));
2345}
2346
2347
2348bool MarkCompactCollector::CompactTransitionArray(
2349 Map* map, TransitionArray* transitions, DescriptorArray* descriptors) {
2350 int num_transitions = transitions->number_of_entries();
2351 bool descriptors_owner_died = false;
2352 int transition_index = 0;
2353 // Compact all live transitions to the left.
2354 for (int i = 0; i < num_transitions; ++i) {
2355 Map* target = transitions->GetTarget(i);
2356 DCHECK_EQ(target->constructor_or_backpointer(), map);
2357 if (Marking::IsWhite(Marking::MarkBitFrom(target))) {
2358 if (descriptors != nullptr &&
2359 target->instance_descriptors() == descriptors) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002360 descriptors_owner_died = true;
2361 }
2362 } else {
2363 if (i != transition_index) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002364 Name* key = transitions->GetKey(i);
2365 transitions->SetKey(transition_index, key);
2366 Object** key_slot = transitions->GetKeySlot(transition_index);
2367 RecordSlot(transitions, key_slot, key);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002368 // Target slots do not need to be recorded since maps are not compacted.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002369 transitions->SetTarget(transition_index, transitions->GetTarget(i));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002370 }
2371 transition_index++;
2372 }
2373 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002374 // If there are no transitions to be cleared, return.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002375 if (transition_index == num_transitions) {
2376 DCHECK(!descriptors_owner_died);
2377 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002378 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002379 // Note that we never eliminate a transition array, though we might right-trim
2380 // such that number_of_transitions() == 0. If this assumption changes,
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002381 // TransitionArray::Insert() will need to deal with the case that a transition
2382 // array disappeared during GC.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002383 int trim = TransitionArray::Capacity(transitions) - transition_index;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002384 if (trim > 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002385 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
2386 transitions, trim * TransitionArray::kTransitionSize);
2387 transitions->SetNumberOfTransitions(transition_index);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002388 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002389 return descriptors_owner_died;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002390}
2391
2392
2393void MarkCompactCollector::TrimDescriptorArray(Map* map,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002394 DescriptorArray* descriptors) {
2395 int number_of_own_descriptors = map->NumberOfOwnDescriptors();
2396 if (number_of_own_descriptors == 0) {
2397 DCHECK(descriptors == heap_->empty_descriptor_array());
2398 return;
2399 }
2400
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002401 int number_of_descriptors = descriptors->number_of_descriptors_storage();
2402 int to_trim = number_of_descriptors - number_of_own_descriptors;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002403 if (to_trim > 0) {
2404 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
2405 descriptors, to_trim * DescriptorArray::kDescriptorSize);
2406 descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002407
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002408 if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
2409 descriptors->Sort();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002410
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002411 if (FLAG_unbox_double_fields) {
2412 LayoutDescriptor* layout_descriptor = map->layout_descriptor();
2413 layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
2414 number_of_own_descriptors);
2415 SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
2416 }
2417 }
2418 DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2419 map->set_owns_descriptors(true);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002420}
2421
2422
2423void MarkCompactCollector::TrimEnumCache(Map* map,
2424 DescriptorArray* descriptors) {
2425 int live_enum = map->EnumLength();
2426 if (live_enum == kInvalidEnumCacheSentinel) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002427 live_enum =
2428 map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002429 }
2430 if (live_enum == 0) return descriptors->ClearEnumCache();
2431
2432 FixedArray* enum_cache = descriptors->GetEnumCache();
2433
2434 int to_trim = enum_cache->length() - live_enum;
2435 if (to_trim <= 0) return;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002436 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
2437 descriptors->GetEnumCache(), to_trim);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002438
2439 if (!descriptors->HasEnumIndicesCache()) return;
2440 FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002441 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(enum_indices_cache,
2442 to_trim);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002443}
2444
2445
2446void MarkCompactCollector::ProcessWeakCollections() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002447 Object* weak_collection_obj = heap()->encountered_weak_collections();
2448 while (weak_collection_obj != Smi::FromInt(0)) {
2449 JSWeakCollection* weak_collection =
2450 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2451 DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2452 if (weak_collection->table()->IsHashTable()) {
2453 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002454 for (int i = 0; i < table->Capacity(); i++) {
2455 if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2456 Object** key_slot =
2457 table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002458 RecordSlot(table, key_slot, *key_slot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002459 Object** value_slot =
2460 table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002461 MarkCompactMarkingVisitor::MarkObjectByPointer(this, table,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002462 value_slot);
2463 }
2464 }
2465 }
2466 weak_collection_obj = weak_collection->next();
2467 }
2468}
2469
2470
2471void MarkCompactCollector::ClearWeakCollections() {
Ben Murdochda12d292016-06-02 14:46:10 +01002472 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002473 Object* weak_collection_obj = heap()->encountered_weak_collections();
2474 while (weak_collection_obj != Smi::FromInt(0)) {
2475 JSWeakCollection* weak_collection =
2476 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2477 DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2478 if (weak_collection->table()->IsHashTable()) {
2479 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2480 for (int i = 0; i < table->Capacity(); i++) {
2481 HeapObject* key = HeapObject::cast(table->KeyAt(i));
2482 if (!MarkCompactCollector::IsMarked(key)) {
2483 table->RemoveEntry(i);
2484 }
2485 }
2486 }
2487 weak_collection_obj = weak_collection->next();
2488 weak_collection->set_next(heap()->undefined_value());
2489 }
2490 heap()->set_encountered_weak_collections(Smi::FromInt(0));
2491}
2492
2493
2494void MarkCompactCollector::AbortWeakCollections() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002495 Object* weak_collection_obj = heap()->encountered_weak_collections();
2496 while (weak_collection_obj != Smi::FromInt(0)) {
2497 JSWeakCollection* weak_collection =
2498 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2499 weak_collection_obj = weak_collection->next();
2500 weak_collection->set_next(heap()->undefined_value());
2501 }
2502 heap()->set_encountered_weak_collections(Smi::FromInt(0));
2503}
2504
2505
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002506void MarkCompactCollector::ClearWeakCells(Object** non_live_map_list,
2507 DependentCode** dependent_code_list) {
2508 Heap* heap = this->heap();
Ben Murdochda12d292016-06-02 14:46:10 +01002509 TRACE_GC(heap->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_CELLS);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002510 Object* weak_cell_obj = heap->encountered_weak_cells();
2511 Object* the_hole_value = heap->the_hole_value();
2512 DependentCode* dependent_code_head =
2513 DependentCode::cast(heap->empty_fixed_array());
2514 Object* non_live_map_head = Smi::FromInt(0);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002515 while (weak_cell_obj != Smi::FromInt(0)) {
2516 WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002517 Object* next_weak_cell = weak_cell->next();
2518 bool clear_value = true;
2519 bool clear_next = true;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002520 // We do not insert cleared weak cells into the list, so the value
2521 // cannot be a Smi here.
2522 HeapObject* value = HeapObject::cast(weak_cell->value());
2523 if (!MarkCompactCollector::IsMarked(value)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002524 // Cells for new-space objects embedded in optimized code are wrapped in
2525 // WeakCell and put into Heap::weak_object_to_code_table.
2526 // Such cells do not have any strong references but we want to keep them
2527 // alive as long as the cell value is alive.
2528 // TODO(ulan): remove this once we remove Heap::weak_object_to_code_table.
2529 if (value->IsCell()) {
2530 Object* cell_value = Cell::cast(value)->value();
2531 if (cell_value->IsHeapObject() &&
2532 MarkCompactCollector::IsMarked(HeapObject::cast(cell_value))) {
2533 // Resurrect the cell.
2534 MarkBit mark = Marking::MarkBitFrom(value);
2535 SetMark(value, mark);
2536 Object** slot = HeapObject::RawField(value, Cell::kValueOffset);
2537 RecordSlot(value, slot, *slot);
2538 slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
2539 RecordSlot(weak_cell, slot, *slot);
2540 clear_value = false;
2541 }
2542 }
2543 if (value->IsMap()) {
2544 // The map is non-live.
2545 Map* map = Map::cast(value);
2546 // Add dependent code to the dependent_code_list.
2547 DependentCode* candidate = map->dependent_code();
2548 // We rely on the fact that the weak code group comes first.
2549 STATIC_ASSERT(DependentCode::kWeakCodeGroup == 0);
2550 if (candidate->length() > 0 &&
2551 candidate->group() == DependentCode::kWeakCodeGroup) {
2552 candidate->set_next_link(dependent_code_head);
2553 dependent_code_head = candidate;
2554 }
2555 // Add the weak cell to the non_live_map list.
2556 weak_cell->set_next(non_live_map_head);
2557 non_live_map_head = weak_cell;
2558 clear_value = false;
2559 clear_next = false;
2560 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002561 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002562 // The value of the weak cell is alive.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002563 Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002564 RecordSlot(weak_cell, slot, *slot);
2565 clear_value = false;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002566 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002567 if (clear_value) {
2568 weak_cell->clear();
2569 }
2570 if (clear_next) {
2571 weak_cell->clear_next(the_hole_value);
2572 }
2573 weak_cell_obj = next_weak_cell;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002574 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002575 heap->set_encountered_weak_cells(Smi::FromInt(0));
2576 *non_live_map_list = non_live_map_head;
2577 *dependent_code_list = dependent_code_head;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002578}
2579
2580
2581void MarkCompactCollector::AbortWeakCells() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002582 Object* the_hole_value = heap()->the_hole_value();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002583 Object* weak_cell_obj = heap()->encountered_weak_cells();
2584 while (weak_cell_obj != Smi::FromInt(0)) {
2585 WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
2586 weak_cell_obj = weak_cell->next();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002587 weak_cell->clear_next(the_hole_value);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002588 }
2589 heap()->set_encountered_weak_cells(Smi::FromInt(0));
2590}
2591
2592
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002593void MarkCompactCollector::AbortTransitionArrays() {
2594 HeapObject* undefined = heap()->undefined_value();
2595 Object* obj = heap()->encountered_transition_arrays();
2596 while (obj != Smi::FromInt(0)) {
2597 TransitionArray* array = TransitionArray::cast(obj);
2598 obj = array->next_link();
2599 array->set_next_link(undefined, SKIP_WRITE_BARRIER);
2600 }
2601 heap()->set_encountered_transition_arrays(Smi::FromInt(0));
2602}
2603
Ben Murdochda12d292016-06-02 14:46:10 +01002604static inline SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002605 if (RelocInfo::IsCodeTarget(rmode)) {
Ben Murdochda12d292016-06-02 14:46:10 +01002606 return CODE_TARGET_SLOT;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002607 } else if (RelocInfo::IsCell(rmode)) {
Ben Murdochda12d292016-06-02 14:46:10 +01002608 return CELL_TARGET_SLOT;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002609 } else if (RelocInfo::IsEmbeddedObject(rmode)) {
Ben Murdochda12d292016-06-02 14:46:10 +01002610 return EMBEDDED_OBJECT_SLOT;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002611 } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
Ben Murdochda12d292016-06-02 14:46:10 +01002612 return DEBUG_TARGET_SLOT;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002613 }
2614 UNREACHABLE();
Ben Murdochda12d292016-06-02 14:46:10 +01002615 return NUMBER_OF_SLOT_TYPES;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002616}
2617
Ben Murdochda12d292016-06-02 14:46:10 +01002618void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
2619 Object* target) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002620 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
Ben Murdochda12d292016-06-02 14:46:10 +01002621 Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002622 RelocInfo::Mode rmode = rinfo->rmode();
2623 if (target_page->IsEvacuationCandidate() &&
2624 (rinfo->host() == NULL ||
2625 !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
2626 Address addr = rinfo->pc();
Ben Murdochda12d292016-06-02 14:46:10 +01002627 SlotType slot_type = SlotTypeForRMode(rmode);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002628 if (rinfo->IsInConstantPool()) {
2629 addr = rinfo->constant_pool_entry_address();
2630 if (RelocInfo::IsCodeTarget(rmode)) {
Ben Murdochda12d292016-06-02 14:46:10 +01002631 slot_type = CODE_ENTRY_SLOT;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002632 } else {
2633 DCHECK(RelocInfo::IsEmbeddedObject(rmode));
Ben Murdochda12d292016-06-02 14:46:10 +01002634 slot_type = OBJECT_SLOT;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002635 }
2636 }
Ben Murdochda12d292016-06-02 14:46:10 +01002637 RememberedSet<OLD_TO_OLD>::InsertTyped(source_page, slot_type, addr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002638 }
2639}
2640
Ben Murdochda12d292016-06-02 14:46:10 +01002641static inline void UpdateTypedSlot(Isolate* isolate, ObjectVisitor* v,
2642 SlotType slot_type, Address addr) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002643 switch (slot_type) {
Ben Murdochda12d292016-06-02 14:46:10 +01002644 case CODE_TARGET_SLOT: {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002645 RelocInfo rinfo(isolate, addr, RelocInfo::CODE_TARGET, 0, NULL);
2646 rinfo.Visit(isolate, v);
2647 break;
2648 }
Ben Murdochda12d292016-06-02 14:46:10 +01002649 case CELL_TARGET_SLOT: {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002650 RelocInfo rinfo(isolate, addr, RelocInfo::CELL, 0, NULL);
2651 rinfo.Visit(isolate, v);
2652 break;
2653 }
Ben Murdochda12d292016-06-02 14:46:10 +01002654 case CODE_ENTRY_SLOT: {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002655 v->VisitCodeEntry(addr);
2656 break;
2657 }
Ben Murdochda12d292016-06-02 14:46:10 +01002658 case RELOCATED_CODE_OBJECT: {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002659 HeapObject* obj = HeapObject::FromAddress(addr);
2660 Code::BodyDescriptor::IterateBody(obj, v);
2661 break;
2662 }
Ben Murdochda12d292016-06-02 14:46:10 +01002663 case DEBUG_TARGET_SLOT: {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002664 RelocInfo rinfo(isolate, addr, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION, 0,
2665 NULL);
2666 if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
2667 break;
2668 }
Ben Murdochda12d292016-06-02 14:46:10 +01002669 case EMBEDDED_OBJECT_SLOT: {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002670 RelocInfo rinfo(isolate, addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
2671 rinfo.Visit(isolate, v);
2672 break;
2673 }
Ben Murdochda12d292016-06-02 14:46:10 +01002674 case OBJECT_SLOT: {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002675 v->VisitPointer(reinterpret_cast<Object**>(addr));
2676 break;
2677 }
2678 default:
2679 UNREACHABLE();
2680 break;
2681 }
2682}
2683
2684
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002685// Visitor for updating pointers from live objects in old spaces to new space.
2686// It does not expect to encounter pointers to dead objects.
2687class PointersUpdatingVisitor : public ObjectVisitor {
2688 public:
2689 explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
2690
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002691 void VisitPointer(Object** p) override { UpdatePointer(p); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002692
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002693 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002694 for (Object** p = start; p < end; p++) UpdatePointer(p);
2695 }
2696
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002697 void VisitCell(RelocInfo* rinfo) override {
2698 DCHECK(rinfo->rmode() == RelocInfo::CELL);
2699 Object* cell = rinfo->target_cell();
2700 Object* old_cell = cell;
2701 VisitPointer(&cell);
2702 if (cell != old_cell) {
2703 rinfo->set_target_cell(reinterpret_cast<Cell*>(cell));
2704 }
2705 }
2706
2707 void VisitEmbeddedPointer(RelocInfo* rinfo) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002708 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
2709 Object* target = rinfo->target_object();
2710 Object* old_target = target;
2711 VisitPointer(&target);
2712 // Avoid unnecessary changes that might unnecessary flush the instruction
2713 // cache.
2714 if (target != old_target) {
2715 rinfo->set_target_object(target);
2716 }
2717 }
2718
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002719 void VisitCodeTarget(RelocInfo* rinfo) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002720 DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
2721 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2722 Object* old_target = target;
2723 VisitPointer(&target);
2724 if (target != old_target) {
2725 rinfo->set_target_address(Code::cast(target)->instruction_start());
2726 }
2727 }
2728
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002729 void VisitCodeAgeSequence(RelocInfo* rinfo) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002730 DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
2731 Object* stub = rinfo->code_age_stub();
2732 DCHECK(stub != NULL);
2733 VisitPointer(&stub);
2734 if (stub != rinfo->code_age_stub()) {
2735 rinfo->set_code_age_stub(Code::cast(stub));
2736 }
2737 }
2738
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002739 void VisitDebugTarget(RelocInfo* rinfo) override {
2740 DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2741 rinfo->IsPatchedDebugBreakSlotSequence());
2742 Object* target =
2743 Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002744 VisitPointer(&target);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002745 rinfo->set_debug_call_address(Code::cast(target)->instruction_start());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002746 }
2747
2748 static inline void UpdateSlot(Heap* heap, Object** slot) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002749 Object* obj = reinterpret_cast<Object*>(
2750 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002751
2752 if (!obj->IsHeapObject()) return;
2753
2754 HeapObject* heap_obj = HeapObject::cast(obj);
2755
2756 MapWord map_word = heap_obj->map_word();
2757 if (map_word.IsForwardingAddress()) {
2758 DCHECK(heap->InFromSpace(heap_obj) ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002759 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
2760 Page::FromAddress(heap_obj->address())
2761 ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002762 HeapObject* target = map_word.ToForwardingAddress();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002763 base::NoBarrier_CompareAndSwap(
2764 reinterpret_cast<base::AtomicWord*>(slot),
2765 reinterpret_cast<base::AtomicWord>(obj),
2766 reinterpret_cast<base::AtomicWord>(target));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002767 DCHECK(!heap->InFromSpace(target) &&
2768 !MarkCompactCollector::IsOnEvacuationCandidate(target));
2769 }
2770 }
2771
2772 private:
2773 inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); }
2774
2775 Heap* heap_;
2776};
2777
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002778static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
2779 Object** p) {
2780 MapWord map_word = HeapObject::cast(*p)->map_word();
2781
2782 if (map_word.IsForwardingAddress()) {
2783 return String::cast(map_word.ToForwardingAddress());
2784 }
2785
2786 return String::cast(*p);
2787}
2788
Ben Murdochda12d292016-06-02 14:46:10 +01002789bool MarkCompactCollector::IsSlotInBlackObject(MemoryChunk* p, Address slot) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002790 Space* owner = p->owner();
Ben Murdochda12d292016-06-02 14:46:10 +01002791 DCHECK(owner != heap_->lo_space() && owner != nullptr);
2792 USE(owner);
2793
2794 // If we are on a black page, we cannot find the actual object start
2795 // easiliy. We just return true but do not set the out_object.
2796 if (p->IsFlagSet(Page::BLACK_PAGE)) {
2797 return true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002798 }
2799
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002800 uint32_t mark_bit_index = p->AddressToMarkbitIndex(slot);
2801 unsigned int cell_index = mark_bit_index >> Bitmap::kBitsPerCellLog2;
2802 MarkBit::CellType index_mask = 1u << Bitmap::IndexInCell(mark_bit_index);
2803 MarkBit::CellType* cells = p->markbits()->cells();
2804 Address base_address = p->area_start();
2805 unsigned int base_address_cell_index = Bitmap::IndexToCell(
2806 Bitmap::CellAlignIndex(p->AddressToMarkbitIndex(base_address)));
2807
2808 // Check if the slot points to the start of an object. This can happen e.g.
2809 // when we left trim a fixed array. Such slots are invalid and we can remove
2810 // them.
2811 if (index_mask > 1) {
2812 if ((cells[cell_index] & index_mask) != 0 &&
2813 (cells[cell_index] & (index_mask >> 1)) == 0) {
2814 return false;
2815 }
2816 } else {
2817 // Left trimming moves the mark bits so we cannot be in the very first cell.
2818 DCHECK(cell_index != base_address_cell_index);
2819 if ((cells[cell_index] & index_mask) != 0 &&
2820 (cells[cell_index - 1] & (1u << Bitmap::kBitIndexMask)) == 0) {
2821 return false;
2822 }
2823 }
2824
2825 // Check if the object is in the current cell.
2826 MarkBit::CellType slot_mask;
2827 if ((cells[cell_index] == 0) ||
2828 (base::bits::CountTrailingZeros32(cells[cell_index]) >
2829 base::bits::CountTrailingZeros32(cells[cell_index] | index_mask))) {
2830 // If we are already in the first cell, there is no live object.
2831 if (cell_index == base_address_cell_index) return false;
2832
2833 // If not, find a cell in a preceding cell slot that has a mark bit set.
2834 do {
2835 cell_index--;
2836 } while (cell_index > base_address_cell_index && cells[cell_index] == 0);
2837
2838 // The slot must be in a dead object if there are no preceding cells that
2839 // have mark bits set.
2840 if (cells[cell_index] == 0) {
2841 return false;
2842 }
2843
2844 // The object is in a preceding cell. Set the mask to find any object.
2845 slot_mask = ~0u;
2846 } else {
2847 // We are interested in object mark bits right before the slot.
2848 slot_mask = index_mask + (index_mask - 1);
2849 }
2850
2851 MarkBit::CellType current_cell = cells[cell_index];
2852 CHECK(current_cell != 0);
2853
2854 // Find the last live object in the cell.
2855 unsigned int leading_zeros =
2856 base::bits::CountLeadingZeros32(current_cell & slot_mask);
2857 CHECK(leading_zeros != Bitmap::kBitsPerCell);
2858 int offset = static_cast<int>(Bitmap::kBitIndexMask - leading_zeros) - 1;
2859
2860 base_address += (cell_index - base_address_cell_index) *
2861 Bitmap::kBitsPerCell * kPointerSize;
2862 Address address = base_address + offset * kPointerSize;
2863 HeapObject* object = HeapObject::FromAddress(address);
2864 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
2865 CHECK(object->address() < reinterpret_cast<Address>(slot));
2866 if ((object->address() + kPointerSize) <= slot &&
2867 (object->address() + object->Size()) > slot) {
2868 // If the slot is within the last found object in the cell, the slot is
2869 // in a live object.
2870 // Slots pointing to the first word of an object are invalid and removed.
2871 // This can happen when we move the object header while left trimming.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002872 return true;
2873 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002874 return false;
2875}
2876
Ben Murdochda12d292016-06-02 14:46:10 +01002877HeapObject* MarkCompactCollector::FindBlackObjectBySlotSlow(Address slot) {
2878 Page* p = Page::FromAddress(slot);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002879 Space* owner = p->owner();
Ben Murdochda12d292016-06-02 14:46:10 +01002880 if (owner == heap_->lo_space() || owner == nullptr) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002881 Object* large_object = heap_->lo_space()->FindObject(slot);
2882 // This object has to exist, otherwise we would not have recorded a slot
2883 // for it.
2884 CHECK(large_object->IsHeapObject());
2885 HeapObject* large_heap_object = HeapObject::cast(large_object);
Ben Murdochda12d292016-06-02 14:46:10 +01002886
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002887 if (IsMarked(large_heap_object)) {
Ben Murdochda12d292016-06-02 14:46:10 +01002888 return large_heap_object;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002889 }
Ben Murdochda12d292016-06-02 14:46:10 +01002890 return nullptr;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002891 }
2892
Ben Murdochda12d292016-06-02 14:46:10 +01002893 if (p->IsFlagSet(Page::BLACK_PAGE)) {
2894 HeapObjectIterator it(p);
2895 HeapObject* object = nullptr;
2896 while ((object = it.Next()) != nullptr) {
2897 int size = object->Size();
2898 if (object->address() > slot) return nullptr;
2899 if (object->address() <= slot && slot < (object->address() + size)) {
2900 return object;
2901 }
2902 }
2903 } else {
2904 LiveObjectIterator<kBlackObjects> it(p);
2905 HeapObject* object = nullptr;
2906 while ((object = it.Next()) != nullptr) {
2907 int size = object->Size();
2908 if (object->address() > slot) return nullptr;
2909 if (object->address() <= slot && slot < (object->address() + size)) {
2910 return object;
2911 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002912 }
2913 }
Ben Murdochda12d292016-06-02 14:46:10 +01002914 return nullptr;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002915}
2916
2917
2918void MarkCompactCollector::EvacuateNewSpacePrologue() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002919 NewSpace* new_space = heap()->new_space();
Ben Murdoch097c5b22016-05-18 11:27:45 +01002920 NewSpacePageIterator it(new_space->bottom(), new_space->top());
2921 // Append the list of new space pages to be processed.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002922 while (it.has_next()) {
2923 newspace_evacuation_candidates_.Add(it.next());
2924 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01002925 new_space->Flip();
2926 new_space->ResetAllocationInfo();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002927}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002928
Ben Murdoch097c5b22016-05-18 11:27:45 +01002929void MarkCompactCollector::EvacuateNewSpaceEpilogue() {
2930 newspace_evacuation_candidates_.Rewind(0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002931}
2932
2933
Ben Murdoch097c5b22016-05-18 11:27:45 +01002934class MarkCompactCollector::Evacuator : public Malloced {
2935 public:
Ben Murdochda12d292016-06-02 14:46:10 +01002936 explicit Evacuator(MarkCompactCollector* collector)
Ben Murdoch097c5b22016-05-18 11:27:45 +01002937 : collector_(collector),
Ben Murdoch097c5b22016-05-18 11:27:45 +01002938 compaction_spaces_(collector->heap()),
Ben Murdoch097c5b22016-05-18 11:27:45 +01002939 local_pretenuring_feedback_(HashMap::PointersMatch,
2940 kInitialLocalPretenuringFeedbackCapacity),
2941 new_space_visitor_(collector->heap(), &compaction_spaces_,
Ben Murdoch097c5b22016-05-18 11:27:45 +01002942 &local_pretenuring_feedback_),
Ben Murdochda12d292016-06-02 14:46:10 +01002943 old_space_visitor_(collector->heap(), &compaction_spaces_),
Ben Murdoch097c5b22016-05-18 11:27:45 +01002944 duration_(0.0),
Ben Murdochda12d292016-06-02 14:46:10 +01002945 bytes_compacted_(0) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002946
Ben Murdochda12d292016-06-02 14:46:10 +01002947 inline bool EvacuatePage(MemoryChunk* chunk);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002948
2949 // Merge back locally cached info sequentially. Note that this method needs
2950 // to be called from the main thread.
2951 inline void Finalize();
2952
2953 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
2954
Ben Murdoch097c5b22016-05-18 11:27:45 +01002955 private:
2956 static const int kInitialLocalPretenuringFeedbackCapacity = 256;
2957
2958 Heap* heap() { return collector_->heap(); }
2959
2960 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
2961 duration_ += duration;
2962 bytes_compacted_ += bytes_compacted;
2963 }
2964
2965 inline bool EvacuateSinglePage(MemoryChunk* p, HeapObjectVisitor* visitor);
2966
2967 MarkCompactCollector* collector_;
2968
Ben Murdoch097c5b22016-05-18 11:27:45 +01002969 // Locally cached collector data.
2970 CompactionSpaceCollection compaction_spaces_;
Ben Murdoch097c5b22016-05-18 11:27:45 +01002971 HashMap local_pretenuring_feedback_;
2972
Ben Murdochda12d292016-06-02 14:46:10 +01002973 // Visitors for the corresponding spaces.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002974 EvacuateNewSpaceVisitor new_space_visitor_;
2975 EvacuateOldSpaceVisitor old_space_visitor_;
2976
2977 // Book keeping info.
2978 double duration_;
2979 intptr_t bytes_compacted_;
Ben Murdoch097c5b22016-05-18 11:27:45 +01002980};
2981
2982bool MarkCompactCollector::Evacuator::EvacuateSinglePage(
2983 MemoryChunk* p, HeapObjectVisitor* visitor) {
Ben Murdochda12d292016-06-02 14:46:10 +01002984 bool success = false;
2985 DCHECK(p->IsEvacuationCandidate() || p->InNewSpace());
2986 int saved_live_bytes = p->LiveBytes();
2987 double evacuation_time;
2988 {
2989 AlwaysAllocateScope always_allocate(heap()->isolate());
2990 TimedScope timed_scope(&evacuation_time);
2991 success = collector_->VisitLiveObjects(p, visitor, kClearMarkbits);
2992 }
2993 if (FLAG_trace_evacuation) {
2994 PrintIsolate(heap()->isolate(),
2995 "evacuation[%p]: page=%p new_space=%d executable=%d "
2996 "live_bytes=%d time=%f\n",
2997 this, p, p->InNewSpace(),
2998 p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes,
2999 evacuation_time);
3000 }
3001 if (success) {
3002 ReportCompactionProgress(evacuation_time, saved_live_bytes);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003003 }
3004 return success;
3005}
3006
Ben Murdochda12d292016-06-02 14:46:10 +01003007bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) {
3008 bool success = false;
3009 if (chunk->InNewSpace()) {
3010 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(),
Ben Murdoch097c5b22016-05-18 11:27:45 +01003011 NewSpacePage::kSweepingDone);
Ben Murdochda12d292016-06-02 14:46:10 +01003012 success = EvacuateSinglePage(chunk, &new_space_visitor_);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003013 DCHECK(success);
3014 USE(success);
Ben Murdochda12d292016-06-02 14:46:10 +01003015 } else {
3016 DCHECK(chunk->IsEvacuationCandidate());
3017 DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), Page::kSweepingDone);
3018 success = EvacuateSinglePage(chunk, &old_space_visitor_);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003019 }
Ben Murdochda12d292016-06-02 14:46:10 +01003020 return success;
Ben Murdoch097c5b22016-05-18 11:27:45 +01003021}
3022
3023void MarkCompactCollector::Evacuator::Finalize() {
3024 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
3025 heap()->code_space()->MergeCompactionSpace(
3026 compaction_spaces_.Get(CODE_SPACE));
3027 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
3028 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size());
3029 heap()->IncrementSemiSpaceCopiedObjectSize(
3030 new_space_visitor_.semispace_copied_size());
3031 heap()->IncrementYoungSurvivorsCounter(
3032 new_space_visitor_.promoted_size() +
3033 new_space_visitor_.semispace_copied_size());
3034 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003035}
3036
Ben Murdoch097c5b22016-05-18 11:27:45 +01003037int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
3038 intptr_t live_bytes) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003039 if (!FLAG_parallel_compaction) return 1;
3040 // Compute the number of needed tasks based on a target compaction time, the
3041 // profiled compaction speed and marked live memory.
3042 //
3043 // The number of parallel compaction tasks is limited by:
3044 // - #evacuation pages
3045 // - (#cores - 1)
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003046 const double kTargetCompactionTimeInMs = 1;
Ben Murdoch097c5b22016-05-18 11:27:45 +01003047 const int kNumSweepingTasks = 3;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003048
Ben Murdochda12d292016-06-02 14:46:10 +01003049 double compaction_speed =
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003050 heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003051
Ben Murdochda12d292016-06-02 14:46:10 +01003052 const int available_cores = Max(
3053 1, static_cast<int>(
3054 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()) -
3055 kNumSweepingTasks - 1);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003056 int tasks;
3057 if (compaction_speed > 0) {
Ben Murdochda12d292016-06-02 14:46:10 +01003058 tasks = 1 + static_cast<int>(live_bytes / compaction_speed /
3059 kTargetCompactionTimeInMs);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003060 } else {
3061 tasks = pages;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003062 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003063 const int tasks_capped_pages = Min(pages, tasks);
3064 return Min(available_cores, tasks_capped_pages);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003065}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003066
Ben Murdochda12d292016-06-02 14:46:10 +01003067class EvacuationJobTraits {
3068 public:
3069 typedef int* PerPageData; // Pointer to number of aborted pages.
3070 typedef MarkCompactCollector::Evacuator* PerTaskData;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003071
Ben Murdochda12d292016-06-02 14:46:10 +01003072 static const bool NeedSequentialFinalization = true;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003073
Ben Murdochda12d292016-06-02 14:46:10 +01003074 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
3075 MemoryChunk* chunk, PerPageData) {
3076 return evacuator->EvacuatePage(chunk);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003077 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003078
Ben Murdochda12d292016-06-02 14:46:10 +01003079 static void FinalizePageSequentially(Heap*, MemoryChunk* chunk, bool success,
3080 PerPageData data) {
3081 if (chunk->InNewSpace()) {
3082 DCHECK(success);
3083 } else {
3084 Page* p = static_cast<Page*>(chunk);
3085 if (success) {
3086 DCHECK(p->IsEvacuationCandidate());
3087 DCHECK(p->SweepingDone());
3088 p->Unlink();
3089 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003090 // We have partially compacted the page, i.e., some objects may have
3091 // moved, others are still in place.
3092 // We need to:
Ben Murdochda12d292016-06-02 14:46:10 +01003093 // - Leave the evacuation candidate flag for later processing of slots
3094 // buffer entries.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003095 // - Leave the slots buffer there for processing of entries added by
3096 // the write barrier.
3097 // - Rescan the page as slot recording in the migration buffer only
3098 // happens upon moving (which we potentially didn't do).
3099 // - Leave the page in the list of pages of a space since we could not
3100 // fully evacuate it.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003101 DCHECK(p->IsEvacuationCandidate());
3102 p->SetFlag(Page::COMPACTION_WAS_ABORTED);
Ben Murdochda12d292016-06-02 14:46:10 +01003103 *data += 1;
3104 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003105 }
3106 }
Ben Murdochda12d292016-06-02 14:46:10 +01003107};
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003108
Ben Murdochda12d292016-06-02 14:46:10 +01003109void MarkCompactCollector::EvacuatePagesInParallel() {
3110 PageParallelJob<EvacuationJobTraits> job(
3111 heap_, heap_->isolate()->cancelable_task_manager());
3112
3113 int abandoned_pages = 0;
3114 intptr_t live_bytes = 0;
3115 for (Page* page : evacuation_candidates_) {
3116 live_bytes += page->LiveBytes();
3117 job.AddPage(page, &abandoned_pages);
3118 }
3119 for (NewSpacePage* page : newspace_evacuation_candidates_) {
3120 live_bytes += page->LiveBytes();
3121 job.AddPage(page, &abandoned_pages);
3122 }
3123 DCHECK_GE(job.NumberOfPages(), 1);
3124
3125 // Used for trace summary.
3126 double compaction_speed = 0;
3127 if (FLAG_trace_evacuation) {
3128 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3129 }
3130
3131 const int wanted_num_tasks =
3132 NumberOfParallelCompactionTasks(job.NumberOfPages(), live_bytes);
3133 Evacuator** evacuators = new Evacuator*[wanted_num_tasks];
3134 for (int i = 0; i < wanted_num_tasks; i++) {
3135 evacuators[i] = new Evacuator(this);
3136 }
3137 job.Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; });
3138 for (int i = 0; i < wanted_num_tasks; i++) {
3139 evacuators[i]->Finalize();
3140 delete evacuators[i];
3141 }
3142 delete[] evacuators;
3143
3144 if (FLAG_trace_evacuation) {
3145 PrintIsolate(
3146 isolate(),
3147 "%8.0f ms: evacuation-summary: parallel=%s pages=%d aborted=%d "
3148 "wanted_tasks=%d tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX
3149 "d compaction_speed=%.f\n",
3150 isolate()->time_millis_since_init(),
3151 FLAG_parallel_compaction ? "yes" : "no", job.NumberOfPages(),
3152 abandoned_pages, wanted_num_tasks, job.NumberOfTasks(),
3153 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
3154 live_bytes, compaction_speed);
3155 }
3156}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003157
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003158class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3159 public:
3160 virtual Object* RetainAs(Object* object) {
3161 if (object->IsHeapObject()) {
3162 HeapObject* heap_object = HeapObject::cast(object);
3163 MapWord map_word = heap_object->map_word();
3164 if (map_word.IsForwardingAddress()) {
3165 return map_word.ToForwardingAddress();
3166 }
3167 }
3168 return object;
3169 }
3170};
3171
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003172enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
3173
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003174enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
3175
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003176enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
3177
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003178// Sweeps a page. After sweeping the page can be iterated.
3179// Slots in live objects pointing into evacuation candidates are updated
3180// if requested.
3181// Returns the size of the biggest continuous freed memory chunk in bytes.
3182template <SweepingMode sweeping_mode,
3183 MarkCompactCollector::SweepingParallelism parallelism,
3184 SkipListRebuildingMode skip_list_mode,
3185 FreeSpaceTreatmentMode free_space_mode>
Ben Murdochda12d292016-06-02 14:46:10 +01003186static int Sweep(PagedSpace* space, Page* p, ObjectVisitor* v) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003187 DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
Ben Murdochda12d292016-06-02 14:46:10 +01003188 DCHECK(!p->IsFlagSet(Page::BLACK_PAGE));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003189 DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3190 space->identity() == CODE_SPACE);
3191 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3192 DCHECK(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD ||
3193 sweeping_mode == SWEEP_ONLY);
3194
3195 Address free_start = p->area_start();
3196 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003197
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003198 // If we use the skip list for code space pages, we have to lock the skip
3199 // list because it could be accessed concurrently by the runtime or the
3200 // deoptimizer.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003201 SkipList* skip_list = p->skip_list();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003202 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3203 skip_list->Clear();
3204 }
3205
3206 intptr_t freed_bytes = 0;
3207 intptr_t max_freed_bytes = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003208 int curr_region = -1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003209
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003210 LiveObjectIterator<kBlackObjects> it(p);
3211 HeapObject* object = NULL;
3212 while ((object = it.Next()) != NULL) {
3213 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3214 Address free_end = object->address();
3215 if (free_end != free_start) {
3216 int size = static_cast<int>(free_end - free_start);
3217 if (free_space_mode == ZAP_FREE_SPACE) {
3218 memset(free_start, 0xcc, size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003219 }
Ben Murdochda12d292016-06-02 14:46:10 +01003220 freed_bytes = space->UnaccountedFree(free_start, size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003221 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003222 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003223 Map* map = object->synchronized_map();
3224 int size = object->SizeFromMap(map);
3225 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3226 object->IterateBody(map->instance_type(), size, v);
3227 }
3228 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3229 int new_region_start = SkipList::RegionNumber(free_end);
3230 int new_region_end =
3231 SkipList::RegionNumber(free_end + size - kPointerSize);
3232 if (new_region_start != curr_region || new_region_end != curr_region) {
3233 skip_list->AddObject(free_end, size);
3234 curr_region = new_region_end;
3235 }
3236 }
3237 free_start = free_end + size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003238 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003239
3240 // Clear the mark bits of that page and reset live bytes count.
3241 Bitmap::Clear(p);
3242
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003243 if (free_start != p->area_end()) {
3244 int size = static_cast<int>(p->area_end() - free_start);
3245 if (free_space_mode == ZAP_FREE_SPACE) {
3246 memset(free_start, 0xcc, size);
3247 }
Ben Murdochda12d292016-06-02 14:46:10 +01003248 freed_bytes = space->UnaccountedFree(free_start, size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003249 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003250 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003251 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003252 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
3253}
3254
3255
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003256void MarkCompactCollector::InvalidateCode(Code* code) {
3257 if (heap_->incremental_marking()->IsCompacting() &&
3258 !ShouldSkipEvacuationSlotRecording(code)) {
3259 DCHECK(compacting_);
3260
3261 // If the object is white than no slots were recorded on it yet.
3262 MarkBit mark_bit = Marking::MarkBitFrom(code);
3263 if (Marking::IsWhite(mark_bit)) return;
3264
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003265 // Ignore all slots that might have been recorded in the body of the
3266 // deoptimized code object. Assumption: no slots will be recorded for
3267 // this object after invalidating it.
Ben Murdochda12d292016-06-02 14:46:10 +01003268 Page* page = Page::FromAddress(code->address());
3269 Address start = code->instruction_start();
3270 Address end = code->address() + code->Size();
3271 RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(page, start, end);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003272 }
3273}
3274
3275
3276// Return true if the given code is deoptimized or will be deoptimized.
3277bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
3278 return code->is_optimized_code() && code->marked_for_deoptimization();
3279}
3280
3281
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003282#ifdef VERIFY_HEAP
3283static void VerifyAllBlackObjects(MemoryChunk* page) {
3284 LiveObjectIterator<kAllLiveObjects> it(page);
3285 HeapObject* object = NULL;
3286 while ((object = it.Next()) != NULL) {
3287 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3288 }
3289}
3290#endif // VERIFY_HEAP
3291
3292
3293bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page,
3294 HeapObjectVisitor* visitor,
3295 IterationMode mode) {
3296#ifdef VERIFY_HEAP
3297 VerifyAllBlackObjects(page);
3298#endif // VERIFY_HEAP
3299
3300 LiveObjectIterator<kBlackObjects> it(page);
3301 HeapObject* object = nullptr;
3302 while ((object = it.Next()) != nullptr) {
3303 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3304 if (!visitor->Visit(object)) {
3305 if (mode == kClearMarkbits) {
3306 page->markbits()->ClearRange(
3307 page->AddressToMarkbitIndex(page->area_start()),
3308 page->AddressToMarkbitIndex(object->address()));
Ben Murdoch097c5b22016-05-18 11:27:45 +01003309 if (page->old_to_new_slots() != nullptr) {
3310 page->old_to_new_slots()->RemoveRange(
3311 0, static_cast<int>(object->address() - page->address()));
3312 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003313 RecomputeLiveBytes(page);
3314 }
3315 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003316 }
3317 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003318 if (mode == kClearMarkbits) {
3319 Bitmap::Clear(page);
3320 }
3321 return true;
3322}
3323
3324
3325void MarkCompactCollector::RecomputeLiveBytes(MemoryChunk* page) {
3326 LiveObjectIterator<kBlackObjects> it(page);
3327 int new_live_size = 0;
3328 HeapObject* object = nullptr;
3329 while ((object = it.Next()) != nullptr) {
3330 new_live_size += object->Size();
3331 }
3332 page->SetLiveBytes(new_live_size);
3333}
3334
3335
3336void MarkCompactCollector::VisitLiveObjectsBody(Page* page,
3337 ObjectVisitor* visitor) {
3338#ifdef VERIFY_HEAP
3339 VerifyAllBlackObjects(page);
3340#endif // VERIFY_HEAP
3341
3342 LiveObjectIterator<kBlackObjects> it(page);
3343 HeapObject* object = NULL;
3344 while ((object = it.Next()) != NULL) {
3345 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3346 Map* map = object->synchronized_map();
3347 int size = object->SizeFromMap(map);
3348 object->IterateBody(map->instance_type(), size, visitor);
3349 }
3350}
3351
3352
3353void MarkCompactCollector::SweepAbortedPages() {
3354 // Second pass on aborted pages.
Ben Murdoch097c5b22016-05-18 11:27:45 +01003355 for (Page* p : evacuation_candidates_) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003356 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3357 p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003358 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003359 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3360 switch (space->identity()) {
3361 case OLD_SPACE:
3362 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
Ben Murdochda12d292016-06-02 14:46:10 +01003363 IGNORE_FREE_SPACE>(space, p, nullptr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003364 break;
3365 case CODE_SPACE:
3366 if (FLAG_zap_code_space) {
3367 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
Ben Murdochda12d292016-06-02 14:46:10 +01003368 ZAP_FREE_SPACE>(space, p, nullptr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003369 } else {
3370 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
Ben Murdochda12d292016-06-02 14:46:10 +01003371 IGNORE_FREE_SPACE>(space, p, nullptr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003372 }
3373 break;
3374 default:
3375 UNREACHABLE();
3376 break;
3377 }
Ben Murdochda12d292016-06-02 14:46:10 +01003378 {
3379 base::LockGuard<base::Mutex> guard(&swept_pages_mutex_);
3380 swept_pages(space->identity())->Add(p);
3381 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003382 }
3383 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003384}
3385
3386
3387void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
Ben Murdochda12d292016-06-02 14:46:10 +01003388 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003389 Heap::RelocationLock relocation_lock(heap());
3390
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003391 {
Ben Murdochda12d292016-06-02 14:46:10 +01003392 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003393 EvacuationScope evacuation_scope(this);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003394
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003395 EvacuateNewSpacePrologue();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003396 EvacuatePagesInParallel();
Ben Murdoch097c5b22016-05-18 11:27:45 +01003397 EvacuateNewSpaceEpilogue();
3398 heap()->new_space()->set_age_mark(heap()->new_space()->top());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003399 }
3400
3401 UpdatePointersAfterEvacuation();
3402
Ben Murdoch097c5b22016-05-18 11:27:45 +01003403 // Give pages that are queued to be freed back to the OS. Note that filtering
3404 // slots only handles old space (for unboxed doubles), and thus map space can
3405 // still contain stale pointers. We only free the chunks after pointer updates
3406 // to still have access to page headers.
3407 heap()->FreeQueuedChunks();
3408
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003409 {
Ben Murdochda12d292016-06-02 14:46:10 +01003410 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003411 // After updating all pointers, we can finally sweep the aborted pages,
3412 // effectively overriding any forward pointers.
3413 SweepAbortedPages();
3414
3415 // EvacuateNewSpaceAndCandidates iterates over new space objects and for
3416 // ArrayBuffers either re-registers them as live or promotes them. This is
3417 // needed to properly free them.
3418 heap()->array_buffer_tracker()->FreeDead(false);
3419
3420 // Deallocate evacuated candidate pages.
3421 ReleaseEvacuationCandidates();
3422 }
3423
3424#ifdef VERIFY_HEAP
3425 if (FLAG_verify_heap && !sweeping_in_progress_) {
3426 VerifyEvacuation(heap());
3427 }
3428#endif
3429}
3430
Ben Murdochda12d292016-06-02 14:46:10 +01003431template <PointerDirection direction>
3432class PointerUpdateJobTraits {
3433 public:
3434 typedef int PerPageData; // Per page data is not used in this job.
3435 typedef PointersUpdatingVisitor* PerTaskData;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003436
Ben Murdochda12d292016-06-02 14:46:10 +01003437 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
3438 MemoryChunk* chunk, PerPageData) {
3439 UpdateUntypedPointers(heap, chunk);
3440 UpdateTypedPointers(heap, chunk, visitor);
3441 return true;
3442 }
3443 static const bool NeedSequentialFinalization = false;
3444 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003445 }
3446
Ben Murdochda12d292016-06-02 14:46:10 +01003447 private:
3448 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) {
3449 if (direction == OLD_TO_NEW) {
3450 RememberedSet<OLD_TO_NEW>::IterateWithWrapper(heap, chunk,
3451 UpdateOldToNewSlot);
3452 } else {
3453 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [heap](Address slot) {
3454 PointersUpdatingVisitor::UpdateSlot(heap,
3455 reinterpret_cast<Object**>(slot));
3456 return REMOVE_SLOT;
3457 });
3458 }
3459 }
3460
3461 static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk,
3462 PointersUpdatingVisitor* visitor) {
3463 if (direction == OLD_TO_OLD) {
3464 Isolate* isolate = heap->isolate();
3465 RememberedSet<OLD_TO_OLD>::IterateTyped(
3466 chunk, [isolate, visitor](SlotType type, Address slot) {
3467 UpdateTypedSlot(isolate, visitor, type, slot);
3468 return REMOVE_SLOT;
3469 });
3470 }
3471 }
3472
3473 static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) {
3474 MapWord map_word = object->map_word();
3475 // Since we only filter invalid slots in old space, the store buffer can
3476 // still contain stale pointers in large object and in map spaces. Ignore
3477 // these pointers here.
3478 DCHECK(map_word.IsForwardingAddress() ||
3479 !object->GetHeap()->old_space()->Contains(
3480 reinterpret_cast<Address>(address)));
3481 if (map_word.IsForwardingAddress()) {
3482 // Update the corresponding slot.
3483 *address = map_word.ToForwardingAddress();
3484 }
3485 }
3486};
3487
3488int NumberOfPointerUpdateTasks(int pages) {
3489 if (!FLAG_parallel_pointer_update) return 1;
3490 const int kMaxTasks = 4;
3491 const int kPagesPerTask = 4;
3492 return Min(kMaxTasks, (pages + kPagesPerTask - 1) / kPagesPerTask);
3493}
3494
3495template <PointerDirection direction>
3496void UpdatePointersInParallel(Heap* heap) {
3497 PageParallelJob<PointerUpdateJobTraits<direction> > job(
3498 heap, heap->isolate()->cancelable_task_manager());
3499 RememberedSet<direction>::IterateMemoryChunks(
3500 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
3501 PointersUpdatingVisitor visitor(heap);
3502 int num_pages = job.NumberOfPages();
3503 int num_tasks = NumberOfPointerUpdateTasks(num_pages);
3504 job.Run(num_tasks, [&visitor](int i) { return &visitor; });
3505}
3506
3507class ToSpacePointerUpdateJobTraits {
3508 public:
3509 typedef std::pair<Address, Address> PerPageData;
3510 typedef PointersUpdatingVisitor* PerTaskData;
3511
3512 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
3513 MemoryChunk* chunk, PerPageData limits) {
3514 for (Address cur = limits.first; cur < limits.second;) {
3515 HeapObject* object = HeapObject::FromAddress(cur);
3516 Map* map = object->map();
3517 int size = object->SizeFromMap(map);
3518 object->IterateBody(map->instance_type(), size, visitor);
3519 cur += size;
3520 }
3521 return true;
3522 }
3523 static const bool NeedSequentialFinalization = false;
3524 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
3525 }
3526};
3527
3528void UpdateToSpacePointersInParallel(Heap* heap) {
3529 PageParallelJob<ToSpacePointerUpdateJobTraits> job(
3530 heap, heap->isolate()->cancelable_task_manager());
3531 Address space_start = heap->new_space()->bottom();
3532 Address space_end = heap->new_space()->top();
3533 NewSpacePageIterator it(space_start, space_end);
3534 while (it.has_next()) {
3535 NewSpacePage* page = it.next();
3536 Address start =
3537 page->Contains(space_start) ? space_start : page->area_start();
3538 Address end = page->Contains(space_end) ? space_end : page->area_end();
3539 job.AddPage(page, std::make_pair(start, end));
3540 }
3541 PointersUpdatingVisitor visitor(heap);
3542 int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1;
3543 job.Run(num_tasks, [&visitor](int i) { return &visitor; });
3544}
3545
3546void MarkCompactCollector::UpdatePointersAfterEvacuation() {
3547 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
3548
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003549 PointersUpdatingVisitor updating_visitor(heap());
3550
3551 {
Ben Murdochda12d292016-06-02 14:46:10 +01003552 TRACE_GC(heap()->tracer(),
3553 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
3554 UpdateToSpacePointersInParallel(heap_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003555 // Update roots.
3556 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
Ben Murdochda12d292016-06-02 14:46:10 +01003557 UpdatePointersInParallel<OLD_TO_NEW>(heap_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003558 }
3559
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003560 {
Ben Murdochda12d292016-06-02 14:46:10 +01003561 Heap* heap = this->heap();
3562 TRACE_GC(heap->tracer(),
3563 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
3564 UpdatePointersInParallel<OLD_TO_OLD>(heap_);
3565 }
3566
3567 {
3568 TRACE_GC(heap()->tracer(),
3569 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003570 for (Page* p : evacuation_candidates_) {
Ben Murdochda12d292016-06-02 14:46:10 +01003571 DCHECK(p->IsEvacuationCandidate());
3572 // Important: skip list should be cleared only after roots were updated
3573 // because root iteration traverses the stack and might have to find
3574 // code objects from non-updated pc pointing into evacuation candidate.
3575 SkipList* list = p->skip_list();
3576 if (list != NULL) list->Clear();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003577
Ben Murdochda12d292016-06-02 14:46:10 +01003578 // First pass on aborted pages, fixing up all live objects.
3579 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3580 p->ClearEvacuationCandidate();
3581 VisitLiveObjectsBody(p, &updating_visitor);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003582 }
3583 }
3584 }
3585
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003586 {
Ben Murdochda12d292016-06-02 14:46:10 +01003587 TRACE_GC(heap()->tracer(),
3588 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003589 // Update pointers from external string table.
3590 heap_->UpdateReferencesInExternalStringTable(
3591 &UpdateReferenceInExternalStringTableEntry);
3592
3593 EvacuationWeakObjectRetainer evacuation_object_retainer;
Ben Murdochda12d292016-06-02 14:46:10 +01003594 heap()->ProcessWeakListRoots(&evacuation_object_retainer);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003595 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003596}
3597
3598
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003599void MarkCompactCollector::ReleaseEvacuationCandidates() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003600 for (Page* p : evacuation_candidates_) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003601 if (!p->IsEvacuationCandidate()) continue;
3602 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003603 p->ResetLiveBytes();
Ben Murdoch097c5b22016-05-18 11:27:45 +01003604 CHECK(p->SweepingDone());
Ben Murdochda12d292016-06-02 14:46:10 +01003605 space->ReleasePage(p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003606 }
3607 evacuation_candidates_.Rewind(0);
3608 compacting_ = false;
3609 heap()->FreeQueuedChunks();
3610}
3611
3612
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003613int MarkCompactCollector::SweepInParallel(PagedSpace* space,
Ben Murdoch097c5b22016-05-18 11:27:45 +01003614 int required_freed_bytes,
3615 int max_pages) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003616 int max_freed = 0;
3617 int max_freed_overall = 0;
Ben Murdoch097c5b22016-05-18 11:27:45 +01003618 int page_count = 0;
3619 for (Page* p : sweeping_list(space)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003620 max_freed = SweepInParallel(p, space);
3621 DCHECK(max_freed >= 0);
3622 if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
3623 return max_freed;
3624 }
3625 max_freed_overall = Max(max_freed, max_freed_overall);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003626 page_count++;
3627 if (max_pages > 0 && page_count >= max_pages) {
3628 break;
3629 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003630 }
3631 return max_freed_overall;
3632}
3633
3634
3635int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
3636 int max_freed = 0;
Ben Murdoch097c5b22016-05-18 11:27:45 +01003637 if (page->mutex()->TryLock()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003638 // If this page was already swept in the meantime, we can return here.
Ben Murdoch097c5b22016-05-18 11:27:45 +01003639 if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003640 page->mutex()->Unlock();
3641 return 0;
3642 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003643 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003644 if (space->identity() == OLD_SPACE) {
Ben Murdochda12d292016-06-02 14:46:10 +01003645 max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
3646 IGNORE_FREE_SPACE>(space, page, NULL);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003647 } else if (space->identity() == CODE_SPACE) {
Ben Murdochda12d292016-06-02 14:46:10 +01003648 max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
3649 IGNORE_FREE_SPACE>(space, page, NULL);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003650 } else {
Ben Murdochda12d292016-06-02 14:46:10 +01003651 max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
3652 IGNORE_FREE_SPACE>(space, page, NULL);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003653 }
Ben Murdochda12d292016-06-02 14:46:10 +01003654 {
3655 base::LockGuard<base::Mutex> guard(&swept_pages_mutex_);
3656 swept_pages(space->identity())->Add(page);
3657 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003658 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003659 page->mutex()->Unlock();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003660 }
3661 return max_freed;
3662}
3663
3664
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003665void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003666 space->ClearStats();
3667
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003668 PageIterator it(space);
3669
Ben Murdoch097c5b22016-05-18 11:27:45 +01003670 int will_be_swept = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003671 bool unused_page_present = false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003672
3673 while (it.has_next()) {
3674 Page* p = it.next();
Ben Murdoch097c5b22016-05-18 11:27:45 +01003675 DCHECK(p->SweepingDone());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003676
Ben Murdochda12d292016-06-02 14:46:10 +01003677 if (p->IsEvacuationCandidate()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003678 // Will be processed in EvacuateNewSpaceAndCandidates.
3679 DCHECK(evacuation_candidates_.length() > 0);
3680 continue;
3681 }
3682
Ben Murdochda12d292016-06-02 14:46:10 +01003683 // We can not sweep black pages, since all mark bits are set for these
3684 // pages.
3685 if (p->IsFlagSet(Page::BLACK_PAGE)) {
3686 Bitmap::Clear(p);
3687 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
3688 p->ClearFlag(Page::BLACK_PAGE);
3689 // TODO(hpayer): Free unused memory of last black page.
3690 continue;
3691 }
3692
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003693 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
3694 // We need to sweep the page to get it into an iterable state again. Note
3695 // that this adds unusable memory into the free list that is later on
3696 // (in the free list) dropped again. Since we only use the flag for
3697 // testing this is fine.
Ben Murdoch097c5b22016-05-18 11:27:45 +01003698 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003699 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
Ben Murdochda12d292016-06-02 14:46:10 +01003700 IGNORE_FREE_SPACE>(space, p, nullptr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003701 continue;
3702 }
3703
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003704 // One unused page is kept, all further are released before sweeping them.
3705 if (p->LiveBytes() == 0) {
3706 if (unused_page_present) {
3707 if (FLAG_gc_verbose) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003708 PrintIsolate(isolate(), "sweeping: released page: %p", p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003709 }
Ben Murdochda12d292016-06-02 14:46:10 +01003710 space->ReleasePage(p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003711 continue;
3712 }
3713 unused_page_present = true;
3714 }
3715
Ben Murdoch097c5b22016-05-18 11:27:45 +01003716 p->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
3717 sweeping_list(space).push_back(p);
3718 int to_sweep = p->area_size() - p->LiveBytes();
3719 space->accounting_stats_.ShrinkSpace(to_sweep);
3720 will_be_swept++;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003721 }
3722
3723 if (FLAG_gc_verbose) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003724 PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
3725 AllocationSpaceName(space->identity()), will_be_swept);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003726 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003727 std::sort(sweeping_list(space).begin(), sweeping_list(space).end(),
3728 [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); });
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003729}
3730
3731
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003732void MarkCompactCollector::SweepSpaces() {
Ben Murdochda12d292016-06-02 14:46:10 +01003733 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003734 double start_time = 0.0;
3735 if (FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003736 start_time = heap_->MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003737 }
3738
3739#ifdef DEBUG
3740 state_ = SWEEP_SPACES;
3741#endif
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003742
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003743 {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003744 sweeping_in_progress_ = true;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003745 {
3746 GCTracer::Scope sweep_scope(heap()->tracer(),
3747 GCTracer::Scope::MC_SWEEP_OLD);
3748 StartSweepSpace(heap()->old_space());
3749 }
3750 {
3751 GCTracer::Scope sweep_scope(heap()->tracer(),
3752 GCTracer::Scope::MC_SWEEP_CODE);
3753 StartSweepSpace(heap()->code_space());
3754 }
3755 {
3756 GCTracer::Scope sweep_scope(heap()->tracer(),
3757 GCTracer::Scope::MC_SWEEP_MAP);
3758 StartSweepSpace(heap()->map_space());
3759 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003760 if (FLAG_concurrent_sweeping) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003761 StartSweeperThreads();
3762 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003763 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003764
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003765 // Deallocate unmarked large objects.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003766 heap_->lo_space()->FreeUnmarkedObjects();
3767
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003768 if (FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003769 heap_->tracer()->AddSweepingTime(heap_->MonotonicallyIncreasingTimeInMs() -
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003770 start_time);
3771 }
3772}
3773
3774
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003775void MarkCompactCollector::ParallelSweepSpacesComplete() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003776 sweeping_list(heap()->old_space()).clear();
3777 sweeping_list(heap()->code_space()).clear();
3778 sweeping_list(heap()->map_space()).clear();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003779}
3780
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003781Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); }
3782
3783
3784void MarkCompactCollector::Initialize() {
3785 MarkCompactMarkingVisitor::Initialize();
3786 IncrementalMarking::Initialize();
3787}
3788
Ben Murdochda12d292016-06-02 14:46:10 +01003789void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* host, Address slot,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003790 Code* target) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003791 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
Ben Murdochda12d292016-06-02 14:46:10 +01003792 Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003793 if (target_page->IsEvacuationCandidate() &&
Ben Murdochda12d292016-06-02 14:46:10 +01003794 !ShouldSkipEvacuationSlotRecording(host)) {
3795 RememberedSet<OLD_TO_OLD>::InsertTyped(source_page, CODE_ENTRY_SLOT, slot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003796 }
3797}
3798
3799
3800void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
3801 DCHECK(heap()->gc_state() == Heap::MARK_COMPACT);
3802 if (is_compacting()) {
3803 Code* host =
3804 isolate()->inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(
3805 pc);
3806 MarkBit mark_bit = Marking::MarkBitFrom(host);
3807 if (Marking::IsBlack(mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003808 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
Ben Murdochda12d292016-06-02 14:46:10 +01003809 RecordRelocSlot(host, &rinfo, target);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003810 }
3811 }
3812}
3813
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003814} // namespace internal
3815} // namespace v8