blob: b2ae93dec66b590e8f29aeb797c2b81f7b33aebd [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005#include "src/heap/mark-compact.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006
7#include "src/base/atomicops.h"
8#include "src/base/bits.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00009#include "src/base/sys-info.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000010#include "src/code-stubs.h"
11#include "src/compilation-cache.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000012#include "src/deoptimizer.h"
13#include "src/execution.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000014#include "src/frames-inl.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000015#include "src/gdb-jit.h"
16#include "src/global-handles.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000017#include "src/heap/array-buffer-tracker.h"
18#include "src/heap/gc-tracer.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000019#include "src/heap/incremental-marking.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000020#include "src/heap/mark-compact-inl.h"
21#include "src/heap/object-stats.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000022#include "src/heap/objects-visiting-inl.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010023#include "src/heap/objects-visiting.h"
Ben Murdochda12d292016-06-02 14:46:10 +010024#include "src/heap/page-parallel-job.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000025#include "src/heap/spaces-inl.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000026#include "src/ic/ic.h"
27#include "src/ic/stub-cache.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000028#include "src/profiler/cpu-profiler.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010029#include "src/utils-inl.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000030#include "src/v8.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000031
32namespace v8 {
33namespace internal {
34
35
36const char* Marking::kWhiteBitPattern = "00";
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000037const char* Marking::kBlackBitPattern = "11";
38const char* Marking::kGreyBitPattern = "10";
Ben Murdochb8a8cc12014-11-26 15:28:44 +000039const char* Marking::kImpossibleBitPattern = "01";
40
41
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000042// The following has to hold in order for {Marking::MarkBitFrom} to not produce
43// invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
44STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2);
45
46
Ben Murdochb8a8cc12014-11-26 15:28:44 +000047// -------------------------------------------------------------------------
48// MarkCompactCollector
49
50MarkCompactCollector::MarkCompactCollector(Heap* heap)
51 : // NOLINT
Ben Murdochc5610432016-08-08 18:44:38 +010052 heap_(heap),
53 page_parallel_job_semaphore_(0),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000054#ifdef DEBUG
55 state_(IDLE),
56#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +000057 marking_parity_(ODD_MARKING_PARITY),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000058 was_marked_incrementally_(false),
Emily Bernierd0a1eb72015-03-24 16:35:39 -040059 evacuation_(false),
Ben Murdochc5610432016-08-08 18:44:38 +010060 compacting_(false),
61 black_allocation_(false),
62 have_code_to_deoptimize_(false),
Emily Bernierd0a1eb72015-03-24 16:35:39 -040063 marking_deque_memory_(NULL),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000064 marking_deque_memory_committed_(0),
65 code_flusher_(nullptr),
Ben Murdochc5610432016-08-08 18:44:38 +010066 embedder_heap_tracer_(nullptr),
67 sweeper_(heap) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000068}
69
70#ifdef VERIFY_HEAP
71class VerifyMarkingVisitor : public ObjectVisitor {
72 public:
73 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
74
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000075 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000076 for (Object** current = start; current < end; current++) {
77 if ((*current)->IsHeapObject()) {
78 HeapObject* object = HeapObject::cast(*current);
79 CHECK(heap_->mark_compact_collector()->IsMarked(object));
80 }
81 }
82 }
83
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000084 void VisitEmbeddedPointer(RelocInfo* rinfo) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000085 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
86 if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
87 Object* p = rinfo->target_object();
88 VisitPointer(&p);
89 }
90 }
91
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000092 void VisitCell(RelocInfo* rinfo) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000093 Code* code = rinfo->host();
94 DCHECK(rinfo->rmode() == RelocInfo::CELL);
95 if (!code->IsWeakObject(rinfo->target_cell())) {
96 ObjectVisitor::VisitCell(rinfo);
97 }
98 }
99
100 private:
101 Heap* heap_;
102};
103
104
105static void VerifyMarking(Heap* heap, Address bottom, Address top) {
106 VerifyMarkingVisitor visitor(heap);
107 HeapObject* object;
108 Address next_object_must_be_here_or_later = bottom;
109
110 for (Address current = bottom; current < top; current += kPointerSize) {
111 object = HeapObject::FromAddress(current);
112 if (MarkCompactCollector::IsMarked(object)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000113 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000114 CHECK(current >= next_object_must_be_here_or_later);
115 object->Iterate(&visitor);
116 next_object_must_be_here_or_later = current + object->Size();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000117 // The next word for sure belongs to the current object, jump over it.
118 current += kPointerSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000119 }
120 }
121}
122
Ben Murdochda12d292016-06-02 14:46:10 +0100123static void VerifyMarkingBlackPage(Heap* heap, Page* page) {
124 CHECK(page->IsFlagSet(Page::BLACK_PAGE));
125 VerifyMarkingVisitor visitor(heap);
126 HeapObjectIterator it(page);
127 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
128 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
129 object->Iterate(&visitor);
130 }
131}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000132
133static void VerifyMarking(NewSpace* space) {
134 Address end = space->top();
135 NewSpacePageIterator it(space->bottom(), end);
136 // The bottom position is at the start of its page. Allows us to use
137 // page->area_start() as start of range on all pages.
Ben Murdochc5610432016-08-08 18:44:38 +0100138 CHECK_EQ(space->bottom(), Page::FromAddress(space->bottom())->area_start());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000139 while (it.has_next()) {
Ben Murdochc5610432016-08-08 18:44:38 +0100140 Page* page = it.next();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000141 Address limit = it.has_next() ? page->area_end() : end;
142 CHECK(limit == end || !page->Contains(end));
143 VerifyMarking(space->heap(), page->area_start(), limit);
144 }
145}
146
147
148static void VerifyMarking(PagedSpace* space) {
149 PageIterator it(space);
150
151 while (it.has_next()) {
152 Page* p = it.next();
Ben Murdochda12d292016-06-02 14:46:10 +0100153 if (p->IsFlagSet(Page::BLACK_PAGE)) {
154 VerifyMarkingBlackPage(space->heap(), p);
155 } else {
156 VerifyMarking(space->heap(), p->area_start(), p->area_end());
157 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000158 }
159}
160
161
162static void VerifyMarking(Heap* heap) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000163 VerifyMarking(heap->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000164 VerifyMarking(heap->code_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000165 VerifyMarking(heap->map_space());
166 VerifyMarking(heap->new_space());
167
168 VerifyMarkingVisitor visitor(heap);
169
170 LargeObjectIterator it(heap->lo_space());
171 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
172 if (MarkCompactCollector::IsMarked(obj)) {
173 obj->Iterate(&visitor);
174 }
175 }
176
177 heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
178}
179
180
181class VerifyEvacuationVisitor : public ObjectVisitor {
182 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000183 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000184 for (Object** current = start; current < end; current++) {
185 if ((*current)->IsHeapObject()) {
186 HeapObject* object = HeapObject::cast(*current);
187 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
188 }
189 }
190 }
191};
192
193
194static void VerifyEvacuation(Page* page) {
195 VerifyEvacuationVisitor visitor;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000196 HeapObjectIterator iterator(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000197 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
198 heap_object = iterator.Next()) {
199 // We skip free space objects.
200 if (!heap_object->IsFiller()) {
201 heap_object->Iterate(&visitor);
202 }
203 }
204}
205
206
207static void VerifyEvacuation(NewSpace* space) {
208 NewSpacePageIterator it(space->bottom(), space->top());
209 VerifyEvacuationVisitor visitor;
210
211 while (it.has_next()) {
Ben Murdochc5610432016-08-08 18:44:38 +0100212 Page* page = it.next();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000213 Address current = page->area_start();
214 Address limit = it.has_next() ? page->area_end() : space->top();
215 CHECK(limit == space->top() || !page->Contains(space->top()));
216 while (current < limit) {
217 HeapObject* object = HeapObject::FromAddress(current);
218 object->Iterate(&visitor);
219 current += object->Size();
220 }
221 }
222}
223
224
225static void VerifyEvacuation(Heap* heap, PagedSpace* space) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000226 if (FLAG_use_allocation_folding && (space == heap->old_space())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000227 return;
228 }
229 PageIterator it(space);
230
231 while (it.has_next()) {
232 Page* p = it.next();
233 if (p->IsEvacuationCandidate()) continue;
234 VerifyEvacuation(p);
235 }
236}
237
238
239static void VerifyEvacuation(Heap* heap) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000240 VerifyEvacuation(heap, heap->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000241 VerifyEvacuation(heap, heap->code_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000242 VerifyEvacuation(heap, heap->map_space());
243 VerifyEvacuation(heap->new_space());
244
245 VerifyEvacuationVisitor visitor;
246 heap->IterateStrongRoots(&visitor, VISIT_ALL);
247}
248#endif // VERIFY_HEAP
249
250
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000251void MarkCompactCollector::SetUp() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000252 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
253 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
254 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
255 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
256
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000257 EnsureMarkingDequeIsReserved();
258 EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000259
260 if (FLAG_flush_code) {
261 code_flusher_ = new CodeFlusher(isolate());
262 if (FLAG_trace_code_flushing) {
263 PrintF("[code-flushing is now on]\n");
264 }
265 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000266}
267
268
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400269void MarkCompactCollector::TearDown() {
270 AbortCompaction();
271 delete marking_deque_memory_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000272 delete code_flusher_;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400273}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000274
275
276void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000277 DCHECK(!p->NeverEvacuate());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000278 p->MarkEvacuationCandidate();
279 evacuation_candidates_.Add(p);
280}
281
282
283static void TraceFragmentation(PagedSpace* space) {
284 int number_of_pages = space->CountTotalPages();
285 intptr_t reserved = (number_of_pages * space->AreaSize());
286 intptr_t free = reserved - space->SizeOfObjects();
287 PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
288 AllocationSpaceName(space->identity()), number_of_pages,
289 static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
290}
291
292
293bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
294 if (!compacting_) {
295 DCHECK(evacuation_candidates_.length() == 0);
296
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000297 CollectEvacuationCandidates(heap()->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000298
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000299 if (FLAG_compact_code_space) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000300 CollectEvacuationCandidates(heap()->code_space());
301 } else if (FLAG_trace_fragmentation) {
302 TraceFragmentation(heap()->code_space());
303 }
304
305 if (FLAG_trace_fragmentation) {
306 TraceFragmentation(heap()->map_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000307 }
308
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000309 heap()->old_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
310 heap()->code_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000311
312 compacting_ = evacuation_candidates_.length() > 0;
313 }
314
315 return compacting_;
316}
317
Ben Murdochda12d292016-06-02 14:46:10 +0100318void MarkCompactCollector::ClearInvalidRememberedSetSlots() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000319 {
Ben Murdochda12d292016-06-02 14:46:10 +0100320 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100321 RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(heap());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000322 }
Ben Murdochda12d292016-06-02 14:46:10 +0100323// There is not need to filter the old to old set because
324// it is completely cleared after the mark-compact GC.
325// The slots that become invalid due to runtime transitions are
326// cleared eagerly immediately after the transition.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000327
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000328#ifdef VERIFY_HEAP
329 if (FLAG_verify_heap) {
Ben Murdochda12d292016-06-02 14:46:10 +0100330 RememberedSet<OLD_TO_NEW>::VerifyValidSlots(heap());
331 RememberedSet<OLD_TO_OLD>::VerifyValidSlots(heap());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000332 }
333#endif
334}
335
336
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000337void MarkCompactCollector::CollectGarbage() {
338 // Make sure that Prepare() has been called. The individual steps below will
339 // update the state as they proceed.
340 DCHECK(state_ == PREPARE_GC);
341
342 MarkLiveObjects();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000343
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000344 DCHECK(heap_->incremental_marking()->IsStopped());
345
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000346 ClearNonLiveReferences();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400347
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000348#ifdef VERIFY_HEAP
349 if (FLAG_verify_heap) {
350 VerifyMarking(heap_);
351 }
352#endif
353
354 SweepSpaces();
355
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000356 EvacuateNewSpaceAndCandidates();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000357
358 Finish();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000359}
360
361
362#ifdef VERIFY_HEAP
363void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
364 PageIterator it(space);
365
366 while (it.has_next()) {
367 Page* p = it.next();
368 CHECK(p->markbits()->IsClean());
369 CHECK_EQ(0, p->LiveBytes());
370 }
371}
372
373
374void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
375 NewSpacePageIterator it(space->bottom(), space->top());
376
377 while (it.has_next()) {
Ben Murdochc5610432016-08-08 18:44:38 +0100378 Page* p = it.next();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000379 CHECK(p->markbits()->IsClean());
380 CHECK_EQ(0, p->LiveBytes());
381 }
382}
383
384
385void MarkCompactCollector::VerifyMarkbitsAreClean() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000386 VerifyMarkbitsAreClean(heap_->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000387 VerifyMarkbitsAreClean(heap_->code_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000388 VerifyMarkbitsAreClean(heap_->map_space());
389 VerifyMarkbitsAreClean(heap_->new_space());
390
391 LargeObjectIterator it(heap_->lo_space());
392 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
393 MarkBit mark_bit = Marking::MarkBitFrom(obj);
394 CHECK(Marking::IsWhite(mark_bit));
395 CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
396 }
397}
398
399
400void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
401 HeapObjectIterator code_iterator(heap()->code_space());
402 for (HeapObject* obj = code_iterator.Next(); obj != NULL;
403 obj = code_iterator.Next()) {
404 Code* code = Code::cast(obj);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400405 if (!code->is_optimized_code()) continue;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000406 if (WillBeDeoptimized(code)) continue;
407 code->VerifyEmbeddedObjectsDependency();
408 }
409}
410
411
412void MarkCompactCollector::VerifyOmittedMapChecks() {
413 HeapObjectIterator iterator(heap()->map_space());
414 for (HeapObject* obj = iterator.Next(); obj != NULL; obj = iterator.Next()) {
415 Map* map = Map::cast(obj);
416 map->VerifyOmittedMapChecks();
417 }
418}
419#endif // VERIFY_HEAP
420
421
422static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
423 PageIterator it(space);
424
425 while (it.has_next()) {
Ben Murdochda12d292016-06-02 14:46:10 +0100426 Page* p = it.next();
427 Bitmap::Clear(p);
428 if (p->IsFlagSet(Page::BLACK_PAGE)) {
429 p->ClearFlag(Page::BLACK_PAGE);
430 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000431 }
432}
433
434
435static void ClearMarkbitsInNewSpace(NewSpace* space) {
436 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
437
438 while (it.has_next()) {
439 Bitmap::Clear(it.next());
440 }
441}
442
443
444void MarkCompactCollector::ClearMarkbits() {
445 ClearMarkbitsInPagedSpace(heap_->code_space());
446 ClearMarkbitsInPagedSpace(heap_->map_space());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000447 ClearMarkbitsInPagedSpace(heap_->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000448 ClearMarkbitsInNewSpace(heap_->new_space());
449
450 LargeObjectIterator it(heap_->lo_space());
451 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000452 Marking::MarkWhite(Marking::MarkBitFrom(obj));
Ben Murdochda12d292016-06-02 14:46:10 +0100453 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
454 chunk->ResetProgressBar();
455 chunk->ResetLiveBytes();
456 if (chunk->IsFlagSet(Page::BLACK_PAGE)) {
457 chunk->ClearFlag(Page::BLACK_PAGE);
458 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000459 }
460}
461
Ben Murdochc5610432016-08-08 18:44:38 +0100462class MarkCompactCollector::Sweeper::SweeperTask : public v8::Task {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000463 public:
Ben Murdochc5610432016-08-08 18:44:38 +0100464 SweeperTask(Sweeper* sweeper, base::Semaphore* pending_sweeper_tasks,
465 AllocationSpace space_to_start)
466 : sweeper_(sweeper),
467 pending_sweeper_tasks_(pending_sweeper_tasks),
468 space_to_start_(space_to_start) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000469
470 virtual ~SweeperTask() {}
471
472 private:
473 // v8::Task overrides.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000474 void Run() override {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100475 DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE);
476 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
477 const int offset = space_to_start_ - FIRST_PAGED_SPACE;
478 const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
479 for (int i = 0; i < num_spaces; i++) {
480 const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces);
481 DCHECK_GE(space_id, FIRST_PAGED_SPACE);
482 DCHECK_LE(space_id, LAST_PAGED_SPACE);
Ben Murdochc5610432016-08-08 18:44:38 +0100483 sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100484 }
Ben Murdochc5610432016-08-08 18:44:38 +0100485 pending_sweeper_tasks_->Signal();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000486 }
487
Ben Murdochc5610432016-08-08 18:44:38 +0100488 Sweeper* sweeper_;
489 base::Semaphore* pending_sweeper_tasks_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100490 AllocationSpace space_to_start_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000491
492 DISALLOW_COPY_AND_ASSIGN(SweeperTask);
493};
494
Ben Murdochc5610432016-08-08 18:44:38 +0100495void MarkCompactCollector::Sweeper::StartSweeping() {
496 sweeping_in_progress_ = true;
497 ForAllSweepingSpaces([this](AllocationSpace space) {
498 std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(),
499 [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); });
500 });
501 if (FLAG_concurrent_sweeping) {
502 ForAllSweepingSpaces([this](AllocationSpace space) {
503 if (space == NEW_SPACE) return;
504 StartSweepingHelper(space);
505 });
506 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000507}
508
Ben Murdochc5610432016-08-08 18:44:38 +0100509void MarkCompactCollector::Sweeper::StartSweepingHelper(
510 AllocationSpace space_to_start) {
511 num_sweeping_tasks_++;
512 V8::GetCurrentPlatform()->CallOnBackgroundThread(
513 new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start),
514 v8::Platform::kShortRunningTask);
515}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000516
Ben Murdochc5610432016-08-08 18:44:38 +0100517void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted(
518 Page* page) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000519 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
Ben Murdoch097c5b22016-05-18 11:27:45 +0100520 if (!page->SweepingDone()) {
Ben Murdochc5610432016-08-08 18:44:38 +0100521 ParallelSweepPage(page, owner);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100522 if (!page->SweepingDone()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000523 // We were not able to sweep that page, i.e., a concurrent
524 // sweeper thread currently owns this page. Wait for the sweeper
525 // thread to be done with this page.
526 page->WaitUntilSweepingCompleted();
527 }
528 }
529}
530
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000531void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
Ben Murdochc5610432016-08-08 18:44:38 +0100532 if (FLAG_concurrent_sweeping && !sweeper().IsSweepingCompleted()) {
533 sweeper().ParallelSweepSpace(space->identity(), 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000534 space->RefillFreeList();
535 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000536}
537
Ben Murdochc5610432016-08-08 18:44:38 +0100538Page* MarkCompactCollector::Sweeper::GetSweptPageSafe(PagedSpace* space) {
539 base::LockGuard<base::Mutex> guard(&mutex_);
540 SweptList& list = swept_list_[space->identity()];
541 if (list.length() > 0) {
542 return list.RemoveLast();
543 }
544 return nullptr;
545}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000546
Ben Murdochc5610432016-08-08 18:44:38 +0100547void MarkCompactCollector::Sweeper::EnsureCompleted() {
548 if (!sweeping_in_progress_) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000549
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400550 // If sweeping is not completed or not running at all, we try to complete it
551 // here.
552 if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
Ben Murdochc5610432016-08-08 18:44:38 +0100553 ForAllSweepingSpaces(
554 [this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000555 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000556
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400557 if (FLAG_concurrent_sweeping) {
Ben Murdochc5610432016-08-08 18:44:38 +0100558 while (num_sweeping_tasks_ > 0) {
559 pending_sweeper_tasks_semaphore_.Wait();
560 num_sweeping_tasks_--;
561 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000562 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000563
Ben Murdochc5610432016-08-08 18:44:38 +0100564 ForAllSweepingSpaces(
565 [this](AllocationSpace space) { DCHECK(sweeping_list_[space].empty()); });
566 late_pages_ = false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000567 sweeping_in_progress_ = false;
Ben Murdochc5610432016-08-08 18:44:38 +0100568}
569
570void MarkCompactCollector::EnsureSweepingCompleted() {
571 if (!sweeper().sweeping_in_progress()) return;
572
573 sweeper().EnsureCompleted();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000574 heap()->old_space()->RefillFreeList();
575 heap()->code_space()->RefillFreeList();
576 heap()->map_space()->RefillFreeList();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000577
578#ifdef VERIFY_HEAP
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400579 if (FLAG_verify_heap && !evacuation()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000580 VerifyEvacuation(heap_);
581 }
582#endif
583}
584
Ben Murdochc5610432016-08-08 18:44:38 +0100585bool MarkCompactCollector::Sweeper::IsSweepingCompleted() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000586 if (!pending_sweeper_tasks_semaphore_.WaitFor(
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400587 base::TimeDelta::FromSeconds(0))) {
588 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000589 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000590 pending_sweeper_tasks_semaphore_.Signal();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000591 return true;
592}
593
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000594void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000595 // This is only used when resizing an object.
596 DCHECK(MemoryChunk::FromAddress(old_start) ==
597 MemoryChunk::FromAddress(new_start));
598
Ben Murdochda12d292016-06-02 14:46:10 +0100599 if (!heap->incremental_marking()->IsMarking() ||
600 Page::FromAddress(old_start)->IsFlagSet(Page::BLACK_PAGE))
601 return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000602
603 // If the mark doesn't move, we don't check the color of the object.
604 // It doesn't matter whether the object is black, since it hasn't changed
605 // size, so the adjustment to the live data count will be zero anyway.
606 if (old_start == new_start) return;
607
608 MarkBit new_mark_bit = MarkBitFrom(new_start);
609 MarkBit old_mark_bit = MarkBitFrom(old_start);
610
611#ifdef DEBUG
612 ObjectColor old_color = Color(old_mark_bit);
613#endif
614
615 if (Marking::IsBlack(old_mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000616 Marking::BlackToWhite(old_mark_bit);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000617 Marking::MarkBlack(new_mark_bit);
618 return;
619 } else if (Marking::IsGrey(old_mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000620 Marking::GreyToWhite(old_mark_bit);
621 heap->incremental_marking()->WhiteToGreyAndPush(
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000622 HeapObject::FromAddress(new_start), new_mark_bit);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000623 heap->incremental_marking()->RestartIfNotMarking();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000624 }
625
626#ifdef DEBUG
627 ObjectColor new_color = Color(new_mark_bit);
628 DCHECK(new_color == old_color);
629#endif
630}
631
632
633const char* AllocationSpaceName(AllocationSpace space) {
634 switch (space) {
635 case NEW_SPACE:
636 return "NEW_SPACE";
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000637 case OLD_SPACE:
638 return "OLD_SPACE";
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000639 case CODE_SPACE:
640 return "CODE_SPACE";
641 case MAP_SPACE:
642 return "MAP_SPACE";
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000643 case LO_SPACE:
644 return "LO_SPACE";
645 default:
646 UNREACHABLE();
647 }
648
649 return NULL;
650}
651
652
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000653void MarkCompactCollector::ComputeEvacuationHeuristics(
654 int area_size, int* target_fragmentation_percent,
655 int* max_evacuated_bytes) {
656 // For memory reducing mode we directly define both constants.
657 const int kTargetFragmentationPercentForReduceMemory = 20;
658 const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000659
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000660 // For regular mode (which is latency critical) we define less aggressive
661 // defaults to start and switch to a trace-based (using compaction speed)
662 // approach as soon as we have enough samples.
663 const int kTargetFragmentationPercent = 70;
664 const int kMaxEvacuatedBytes = 4 * Page::kPageSize;
665 // Time to take for a single area (=payload of page). Used as soon as there
666 // exist enough compaction speed samples.
667 const int kTargetMsPerArea = 1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000668
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000669 if (heap()->ShouldReduceMemory()) {
670 *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
671 *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000672 } else {
Ben Murdochda12d292016-06-02 14:46:10 +0100673 const double estimated_compaction_speed =
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000674 heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
675 if (estimated_compaction_speed != 0) {
676 // Estimate the target fragmentation based on traced compaction speed
677 // and a goal for a single page.
Ben Murdochda12d292016-06-02 14:46:10 +0100678 const double estimated_ms_per_area =
679 1 + area_size / estimated_compaction_speed;
680 *target_fragmentation_percent = static_cast<int>(
681 100 - 100 * kTargetMsPerArea / estimated_ms_per_area);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000682 if (*target_fragmentation_percent <
683 kTargetFragmentationPercentForReduceMemory) {
684 *target_fragmentation_percent =
685 kTargetFragmentationPercentForReduceMemory;
686 }
687 } else {
688 *target_fragmentation_percent = kTargetFragmentationPercent;
689 }
690 *max_evacuated_bytes = kMaxEvacuatedBytes;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000691 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000692}
693
694
695void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000696 DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000697
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000698 int number_of_pages = space->CountTotalPages();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000699 int area_size = space->AreaSize();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000700
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000701 // Pairs of (live_bytes_in_page, page).
702 typedef std::pair<int, Page*> LiveBytesPagePair;
703 std::vector<LiveBytesPagePair> pages;
704 pages.reserve(number_of_pages);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000705
706 PageIterator it(space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000707 while (it.has_next()) {
708 Page* p = it.next();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000709 if (p->NeverEvacuate()) continue;
Ben Murdochda12d292016-06-02 14:46:10 +0100710 if (p->IsFlagSet(Page::BLACK_PAGE)) continue;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000711 // Invariant: Evacuation candidates are just created when marking is
Ben Murdoch097c5b22016-05-18 11:27:45 +0100712 // started. This means that sweeping has finished. Furthermore, at the end
713 // of a GC all evacuation candidates are cleared and their slot buffers are
714 // released.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000715 CHECK(!p->IsEvacuationCandidate());
Ben Murdochda12d292016-06-02 14:46:10 +0100716 CHECK_NULL(p->old_to_old_slots());
717 CHECK_NULL(p->typed_old_to_old_slots());
Ben Murdoch097c5b22016-05-18 11:27:45 +0100718 CHECK(p->SweepingDone());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000719 DCHECK(p->area_size() == area_size);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100720 pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000721 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000722
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000723 int candidate_count = 0;
724 int total_live_bytes = 0;
725
726 const bool reduce_memory = heap()->ShouldReduceMemory();
727 if (FLAG_manual_evacuation_candidates_selection) {
728 for (size_t i = 0; i < pages.size(); i++) {
729 Page* p = pages[i].second;
730 if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
731 candidate_count++;
732 total_live_bytes += pages[i].first;
733 p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
734 AddEvacuationCandidate(p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000735 }
736 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000737 } else if (FLAG_stress_compaction) {
738 for (size_t i = 0; i < pages.size(); i++) {
739 Page* p = pages[i].second;
740 if (i % 2 == 0) {
741 candidate_count++;
742 total_live_bytes += pages[i].first;
743 AddEvacuationCandidate(p);
744 }
745 }
746 } else {
747 // The following approach determines the pages that should be evacuated.
748 //
749 // We use two conditions to decide whether a page qualifies as an evacuation
750 // candidate, or not:
751 // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
752 // between live bytes and capacity of this page (= area).
753 // * Evacuation quota: A global quota determining how much bytes should be
754 // compacted.
755 //
756 // The algorithm sorts all pages by live bytes and then iterates through
757 // them starting with the page with the most free memory, adding them to the
758 // set of evacuation candidates as long as both conditions (fragmentation
759 // and quota) hold.
760 int max_evacuated_bytes;
761 int target_fragmentation_percent;
762 ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
763 &max_evacuated_bytes);
764
765 const intptr_t free_bytes_threshold =
766 target_fragmentation_percent * (area_size / 100);
767
768 // Sort pages from the most free to the least free, then select
769 // the first n pages for evacuation such that:
770 // - the total size of evacuated objects does not exceed the specified
771 // limit.
772 // - fragmentation of (n+1)-th page does not exceed the specified limit.
773 std::sort(pages.begin(), pages.end(),
774 [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
775 return a.first < b.first;
776 });
777 for (size_t i = 0; i < pages.size(); i++) {
778 int live_bytes = pages[i].first;
779 int free_bytes = area_size - live_bytes;
780 if (FLAG_always_compact ||
781 ((free_bytes >= free_bytes_threshold) &&
782 ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
783 candidate_count++;
784 total_live_bytes += live_bytes;
785 }
786 if (FLAG_trace_fragmentation_verbose) {
787 PrintIsolate(isolate(),
788 "compaction-selection-page: space=%s free_bytes_page=%d "
Ben Murdochc5610432016-08-08 18:44:38 +0100789 "fragmentation_limit_kb=%" V8PRIdPTR
790 " fragmentation_limit_percent=%d sum_compaction_kb=%d "
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000791 "compaction_limit_kb=%d\n",
792 AllocationSpaceName(space->identity()), free_bytes / KB,
793 free_bytes_threshold / KB, target_fragmentation_percent,
794 total_live_bytes / KB, max_evacuated_bytes / KB);
795 }
796 }
797 // How many pages we will allocated for the evacuated objects
798 // in the worst case: ceil(total_live_bytes / area_size)
799 int estimated_new_pages = (total_live_bytes + area_size - 1) / area_size;
800 DCHECK_LE(estimated_new_pages, candidate_count);
801 int estimated_released_pages = candidate_count - estimated_new_pages;
802 // Avoid (compact -> expand) cycles.
803 if ((estimated_released_pages == 0) && !FLAG_always_compact) {
804 candidate_count = 0;
805 }
806 for (int i = 0; i < candidate_count; i++) {
807 AddEvacuationCandidate(pages[i].second);
808 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000809 }
810
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000811 if (FLAG_trace_fragmentation) {
812 PrintIsolate(isolate(),
813 "compaction-selection: space=%s reduce_memory=%d pages=%d "
814 "total_live_bytes=%d\n",
815 AllocationSpaceName(space->identity()), reduce_memory,
816 candidate_count, total_live_bytes / KB);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000817 }
818}
819
820
821void MarkCompactCollector::AbortCompaction() {
822 if (compacting_) {
Ben Murdochda12d292016-06-02 14:46:10 +0100823 RememberedSet<OLD_TO_OLD>::ClearAll(heap());
Ben Murdoch097c5b22016-05-18 11:27:45 +0100824 for (Page* p : evacuation_candidates_) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000825 p->ClearEvacuationCandidate();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000826 }
827 compacting_ = false;
828 evacuation_candidates_.Rewind(0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000829 }
830 DCHECK_EQ(0, evacuation_candidates_.length());
831}
832
833
834void MarkCompactCollector::Prepare() {
835 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
836
837#ifdef DEBUG
838 DCHECK(state_ == IDLE);
839 state_ = PREPARE_GC;
840#endif
841
842 DCHECK(!FLAG_never_compact || !FLAG_always_compact);
843
844 if (sweeping_in_progress()) {
845 // Instead of waiting we could also abort the sweeper threads here.
846 EnsureSweepingCompleted();
847 }
848
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000849 // If concurrent unmapping tasks are still running, we should wait for
850 // them here.
Ben Murdochc5610432016-08-08 18:44:38 +0100851 heap()->memory_allocator()->unmapper()->WaitUntilCompleted();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000852
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000853 // Clear marking bits if incremental marking is aborted.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000854 if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
855 heap()->incremental_marking()->Stop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000856 ClearMarkbits();
857 AbortWeakCollections();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400858 AbortWeakCells();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000859 AbortTransitionArrays();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000860 AbortCompaction();
861 was_marked_incrementally_ = false;
862 }
863
864 // Don't start compaction if we are in the middle of incremental
865 // marking cycle. We did not collect any slots.
866 if (!FLAG_never_compact && !was_marked_incrementally_) {
867 StartCompaction(NON_INCREMENTAL_COMPACTION);
868 }
869
870 PagedSpaces spaces(heap());
871 for (PagedSpace* space = spaces.next(); space != NULL;
872 space = spaces.next()) {
873 space->PrepareForMarkCompact();
874 }
875
876#ifdef VERIFY_HEAP
877 if (!was_marked_incrementally_ && FLAG_verify_heap) {
878 VerifyMarkbitsAreClean();
879 }
880#endif
881}
882
883
884void MarkCompactCollector::Finish() {
Ben Murdochda12d292016-06-02 14:46:10 +0100885 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000886
Ben Murdochc5610432016-08-08 18:44:38 +0100887 if (sweeper().contains_late_pages() && FLAG_concurrent_sweeping) {
888 // If we added some more pages during MC, we need to start at least one
889 // more task as all other tasks might already be finished.
890 sweeper().StartSweepingHelper(OLD_SPACE);
891 }
892
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000893 // The hashing of weak_object_to_code_table is no longer valid.
894 heap()->weak_object_to_code_table()->Rehash(
895 heap()->isolate()->factory()->undefined_value());
896
897 // Clear the marking state of live large objects.
898 heap_->lo_space()->ClearMarkingStateOfLiveObjects();
899
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000900#ifdef DEBUG
901 DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
902 state_ = IDLE;
903#endif
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000904 heap_->isolate()->inner_pointer_to_code_cache()->Flush();
905
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000906 // The stub cache is not traversed during GC; clear the cache to
907 // force lazy re-initialization of it. This must be done after the
908 // GC, because it relies on the new address of certain old space
909 // objects (empty string, illegal builtin).
910 isolate()->stub_cache()->Clear();
911
912 if (have_code_to_deoptimize_) {
913 // Some code objects were marked for deoptimization during the GC.
914 Deoptimizer::DeoptimizeMarkedCode(isolate());
915 have_code_to_deoptimize_ = false;
916 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400917
918 heap_->incremental_marking()->ClearIdleMarkingDelayCounter();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000919
920 if (marking_parity_ == EVEN_MARKING_PARITY) {
921 marking_parity_ = ODD_MARKING_PARITY;
922 } else {
923 DCHECK(marking_parity_ == ODD_MARKING_PARITY);
924 marking_parity_ = EVEN_MARKING_PARITY;
925 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000926}
927
928
929// -------------------------------------------------------------------------
930// Phase 1: tracing and marking live objects.
931// before: all objects are in normal state.
932// after: a live object's map pointer is marked as '00'.
933
934// Marking all live objects in the heap as part of mark-sweep or mark-compact
935// collection. Before marking, all objects are in their normal state. After
936// marking, live objects' map pointers are marked indicating that the object
937// has been found reachable.
938//
939// The marking algorithm is a (mostly) depth-first (because of possible stack
940// overflow) traversal of the graph of objects reachable from the roots. It
941// uses an explicit stack of pointers rather than recursion. The young
942// generation's inactive ('from') space is used as a marking stack. The
943// objects in the marking stack are the ones that have been reached and marked
944// but their children have not yet been visited.
945//
946// The marking stack can overflow during traversal. In that case, we set an
947// overflow flag. When the overflow flag is set, we continue marking objects
948// reachable from the objects on the marking stack, but no longer push them on
949// the marking stack. Instead, we mark them as both marked and overflowed.
950// When the stack is in the overflowed state, objects marked as overflowed
951// have been reached and marked but their children have not been visited yet.
952// After emptying the marking stack, we clear the overflow flag and traverse
953// the heap looking for objects marked as overflowed, push them on the stack,
954// and continue with marking. This process repeats until all reachable
955// objects have been marked.
956
957void CodeFlusher::ProcessJSFunctionCandidates() {
958 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
959 Object* undefined = isolate_->heap()->undefined_value();
960
961 JSFunction* candidate = jsfunction_candidates_head_;
962 JSFunction* next_candidate;
963 while (candidate != NULL) {
964 next_candidate = GetNextCandidate(candidate);
965 ClearNextCandidate(candidate, undefined);
966
967 SharedFunctionInfo* shared = candidate->shared();
968
969 Code* code = shared->code();
970 MarkBit code_mark = Marking::MarkBitFrom(code);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000971 if (Marking::IsWhite(code_mark)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000972 if (FLAG_trace_code_flushing && shared->is_compiled()) {
973 PrintF("[code-flushing clears: ");
974 shared->ShortPrint();
975 PrintF(" - age: %d]\n", code->GetAge());
976 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000977 // Always flush the optimized code map if there is one.
978 if (!shared->OptimizedCodeMapIsCleared()) {
979 shared->ClearOptimizedCodeMap();
980 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000981 shared->set_code(lazy_compile);
982 candidate->set_code(lazy_compile);
983 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000984 DCHECK(Marking::IsBlack(code_mark));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000985 candidate->set_code(code);
986 }
987
988 // We are in the middle of a GC cycle so the write barrier in the code
989 // setter did not record the slot update and we have to do that manually.
990 Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
991 Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000992 isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(
993 candidate, slot, target);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000994
995 Object** shared_code_slot =
996 HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
997 isolate_->heap()->mark_compact_collector()->RecordSlot(
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000998 shared, shared_code_slot, *shared_code_slot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000999
1000 candidate = next_candidate;
1001 }
1002
1003 jsfunction_candidates_head_ = NULL;
1004}
1005
1006
1007void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
1008 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
1009
1010 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1011 SharedFunctionInfo* next_candidate;
1012 while (candidate != NULL) {
1013 next_candidate = GetNextCandidate(candidate);
1014 ClearNextCandidate(candidate);
1015
1016 Code* code = candidate->code();
1017 MarkBit code_mark = Marking::MarkBitFrom(code);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001018 if (Marking::IsWhite(code_mark)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001019 if (FLAG_trace_code_flushing && candidate->is_compiled()) {
1020 PrintF("[code-flushing clears: ");
1021 candidate->ShortPrint();
1022 PrintF(" - age: %d]\n", code->GetAge());
1023 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001024 // Always flush the optimized code map if there is one.
1025 if (!candidate->OptimizedCodeMapIsCleared()) {
1026 candidate->ClearOptimizedCodeMap();
1027 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001028 candidate->set_code(lazy_compile);
1029 }
1030
1031 Object** code_slot =
1032 HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001033 isolate_->heap()->mark_compact_collector()->RecordSlot(candidate, code_slot,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001034 *code_slot);
1035
1036 candidate = next_candidate;
1037 }
1038
1039 shared_function_info_candidates_head_ = NULL;
1040}
1041
1042
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001043void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
1044 // Make sure previous flushing decisions are revisited.
Ben Murdochda12d292016-06-02 14:46:10 +01001045 isolate_->heap()->incremental_marking()->IterateBlackObject(shared_info);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001046
1047 if (FLAG_trace_code_flushing) {
1048 PrintF("[code-flushing abandons function-info: ");
1049 shared_info->ShortPrint();
1050 PrintF("]\n");
1051 }
1052
1053 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1054 SharedFunctionInfo* next_candidate;
1055 if (candidate == shared_info) {
1056 next_candidate = GetNextCandidate(shared_info);
1057 shared_function_info_candidates_head_ = next_candidate;
1058 ClearNextCandidate(shared_info);
1059 } else {
1060 while (candidate != NULL) {
1061 next_candidate = GetNextCandidate(candidate);
1062
1063 if (next_candidate == shared_info) {
1064 next_candidate = GetNextCandidate(shared_info);
1065 SetNextCandidate(candidate, next_candidate);
1066 ClearNextCandidate(shared_info);
1067 break;
1068 }
1069
1070 candidate = next_candidate;
1071 }
1072 }
1073}
1074
1075
1076void CodeFlusher::EvictCandidate(JSFunction* function) {
1077 DCHECK(!function->next_function_link()->IsUndefined());
1078 Object* undefined = isolate_->heap()->undefined_value();
1079
1080 // Make sure previous flushing decisions are revisited.
Ben Murdochda12d292016-06-02 14:46:10 +01001081 isolate_->heap()->incremental_marking()->IterateBlackObject(function);
1082 isolate_->heap()->incremental_marking()->IterateBlackObject(
1083 function->shared());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001084
1085 if (FLAG_trace_code_flushing) {
1086 PrintF("[code-flushing abandons closure: ");
1087 function->shared()->ShortPrint();
1088 PrintF("]\n");
1089 }
1090
1091 JSFunction* candidate = jsfunction_candidates_head_;
1092 JSFunction* next_candidate;
1093 if (candidate == function) {
1094 next_candidate = GetNextCandidate(function);
1095 jsfunction_candidates_head_ = next_candidate;
1096 ClearNextCandidate(function, undefined);
1097 } else {
1098 while (candidate != NULL) {
1099 next_candidate = GetNextCandidate(candidate);
1100
1101 if (next_candidate == function) {
1102 next_candidate = GetNextCandidate(function);
1103 SetNextCandidate(candidate, next_candidate);
1104 ClearNextCandidate(function, undefined);
1105 break;
1106 }
1107
1108 candidate = next_candidate;
1109 }
1110 }
1111}
1112
1113
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001114void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
1115 Heap* heap = isolate_->heap();
1116
1117 JSFunction** slot = &jsfunction_candidates_head_;
1118 JSFunction* candidate = jsfunction_candidates_head_;
1119 while (candidate != NULL) {
1120 if (heap->InFromSpace(candidate)) {
1121 v->VisitPointer(reinterpret_cast<Object**>(slot));
1122 }
1123 candidate = GetNextCandidate(*slot);
1124 slot = GetNextCandidateSlot(*slot);
1125 }
1126}
1127
1128
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001129class MarkCompactMarkingVisitor
1130 : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
1131 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001132 static void Initialize();
1133
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001134 INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
1135 MarkObjectByPointer(heap->mark_compact_collector(), object, p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001136 }
1137
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001138 INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
1139 Object** start, Object** end)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001140 // Mark all objects pointed to in [start, end).
1141 const int kMinRangeForMarkingRecursion = 64;
1142 if (end - start >= kMinRangeForMarkingRecursion) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001143 if (VisitUnmarkedObjects(heap, object, start, end)) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001144 // We are close to a stack overflow, so just mark the objects.
1145 }
1146 MarkCompactCollector* collector = heap->mark_compact_collector();
1147 for (Object** p = start; p < end; p++) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001148 MarkObjectByPointer(collector, object, p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001149 }
1150 }
1151
1152 // Marks the object black and pushes it on the marking stack.
1153 INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
1154 MarkBit mark = Marking::MarkBitFrom(object);
1155 heap->mark_compact_collector()->MarkObject(object, mark);
1156 }
1157
1158 // Marks the object black without pushing it on the marking stack.
1159 // Returns true if object needed marking and false otherwise.
1160 INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
1161 MarkBit mark_bit = Marking::MarkBitFrom(object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001162 if (Marking::IsWhite(mark_bit)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001163 heap->mark_compact_collector()->SetMark(object, mark_bit);
1164 return true;
1165 }
1166 return false;
1167 }
1168
1169 // Mark object pointed to by p.
1170 INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001171 HeapObject* object, Object** p)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001172 if (!(*p)->IsHeapObject()) return;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001173 HeapObject* target_object = HeapObject::cast(*p);
1174 collector->RecordSlot(object, p, target_object);
1175 MarkBit mark = Marking::MarkBitFrom(target_object);
1176 collector->MarkObject(target_object, mark);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001177 }
1178
1179
1180 // Visit an unmarked object.
1181 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1182 HeapObject* obj)) {
1183#ifdef DEBUG
1184 DCHECK(collector->heap()->Contains(obj));
1185 DCHECK(!collector->heap()->mark_compact_collector()->IsMarked(obj));
1186#endif
1187 Map* map = obj->map();
1188 Heap* heap = obj->GetHeap();
1189 MarkBit mark = Marking::MarkBitFrom(obj);
1190 heap->mark_compact_collector()->SetMark(obj, mark);
1191 // Mark the map pointer and the body.
1192 MarkBit map_mark = Marking::MarkBitFrom(map);
1193 heap->mark_compact_collector()->MarkObject(map, map_mark);
1194 IterateBody(map, obj);
1195 }
1196
1197 // Visit all unmarked objects pointed to by [start, end).
1198 // Returns false if the operation fails (lack of stack space).
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001199 INLINE(static bool VisitUnmarkedObjects(Heap* heap, HeapObject* object,
1200 Object** start, Object** end)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001201 // Return false is we are close to the stack limit.
1202 StackLimitCheck check(heap->isolate());
1203 if (check.HasOverflowed()) return false;
1204
1205 MarkCompactCollector* collector = heap->mark_compact_collector();
1206 // Visit the unmarked objects.
1207 for (Object** p = start; p < end; p++) {
1208 Object* o = *p;
1209 if (!o->IsHeapObject()) continue;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001210 collector->RecordSlot(object, p, o);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001211 HeapObject* obj = HeapObject::cast(o);
1212 MarkBit mark = Marking::MarkBitFrom(obj);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001213 if (Marking::IsBlackOrGrey(mark)) continue;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001214 VisitUnmarkedObject(collector, obj);
1215 }
1216 return true;
1217 }
1218
1219 private:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001220 // Code flushing support.
1221
1222 static const int kRegExpCodeThreshold = 5;
1223
1224 static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re,
1225 bool is_one_byte) {
1226 // Make sure that the fixed array is in fact initialized on the RegExp.
1227 // We could potentially trigger a GC when initializing the RegExp.
1228 if (HeapObject::cast(re->data())->map()->instance_type() !=
1229 FIXED_ARRAY_TYPE)
1230 return;
1231
1232 // Make sure this is a RegExp that actually contains code.
1233 if (re->TypeTag() != JSRegExp::IRREGEXP) return;
1234
1235 Object* code = re->DataAt(JSRegExp::code_index(is_one_byte));
1236 if (!code->IsSmi() &&
1237 HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1238 // Save a copy that can be reinstated if we need the code again.
1239 re->SetDataAt(JSRegExp::saved_code_index(is_one_byte), code);
1240
1241 // Saving a copy might create a pointer into compaction candidate
1242 // that was not observed by marker. This might happen if JSRegExp data
1243 // was marked through the compilation cache before marker reached JSRegExp
1244 // object.
1245 FixedArray* data = FixedArray::cast(re->data());
Ben Murdochda12d292016-06-02 14:46:10 +01001246 if (Marking::IsBlackOrGrey(Marking::MarkBitFrom(data))) {
1247 Object** slot =
1248 data->data_start() + JSRegExp::saved_code_index(is_one_byte);
1249 heap->mark_compact_collector()->RecordSlot(data, slot, code);
1250 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001251
1252 // Set a number in the 0-255 range to guarantee no smi overflow.
1253 re->SetDataAt(JSRegExp::code_index(is_one_byte),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001254 Smi::FromInt(heap->ms_count() & 0xff));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001255 } else if (code->IsSmi()) {
1256 int value = Smi::cast(code)->value();
1257 // The regexp has not been compiled yet or there was a compilation error.
1258 if (value == JSRegExp::kUninitializedValue ||
1259 value == JSRegExp::kCompilationErrorValue) {
1260 return;
1261 }
1262
1263 // Check if we should flush now.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001264 if (value == ((heap->ms_count() - kRegExpCodeThreshold) & 0xff)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001265 re->SetDataAt(JSRegExp::code_index(is_one_byte),
1266 Smi::FromInt(JSRegExp::kUninitializedValue));
1267 re->SetDataAt(JSRegExp::saved_code_index(is_one_byte),
1268 Smi::FromInt(JSRegExp::kUninitializedValue));
1269 }
1270 }
1271 }
1272
1273
1274 // Works by setting the current sweep_generation (as a smi) in the
1275 // code object place in the data array of the RegExp and keeps a copy
1276 // around that can be reinstated if we reuse the RegExp before flushing.
1277 // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1278 // we flush the code.
1279 static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1280 Heap* heap = map->GetHeap();
1281 MarkCompactCollector* collector = heap->mark_compact_collector();
1282 if (!collector->is_code_flushing_enabled()) {
1283 VisitJSRegExp(map, object);
1284 return;
1285 }
1286 JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1287 // Flush code or set age on both one byte and two byte code.
1288 UpdateRegExpCodeAgeAndFlush(heap, re, true);
1289 UpdateRegExpCodeAgeAndFlush(heap, re, false);
1290 // Visit the fields of the RegExp, including the updated FixedArray.
1291 VisitJSRegExp(map, object);
1292 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001293};
1294
1295
1296void MarkCompactMarkingVisitor::Initialize() {
1297 StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
1298
1299 table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
1300
1301 if (FLAG_track_gc_object_stats) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001302 ObjectStatsVisitor::Initialize(&table_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001303 }
1304}
1305
1306
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001307class CodeMarkingVisitor : public ThreadVisitor {
1308 public:
1309 explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1310 : collector_(collector) {}
1311
1312 void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1313 collector_->PrepareThreadForCodeFlushing(isolate, top);
1314 }
1315
1316 private:
1317 MarkCompactCollector* collector_;
1318};
1319
1320
1321class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1322 public:
1323 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1324 : collector_(collector) {}
1325
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001326 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001327 for (Object** p = start; p < end; p++) VisitPointer(p);
1328 }
1329
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001330 void VisitPointer(Object** slot) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001331 Object* obj = *slot;
1332 if (obj->IsSharedFunctionInfo()) {
1333 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1334 MarkBit shared_mark = Marking::MarkBitFrom(shared);
1335 MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1336 collector_->MarkObject(shared->code(), code_mark);
1337 collector_->MarkObject(shared, shared_mark);
1338 }
1339 }
1340
1341 private:
1342 MarkCompactCollector* collector_;
1343};
1344
1345
1346void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1347 ThreadLocalTop* top) {
1348 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1349 // Note: for the frame that has a pending lazy deoptimization
1350 // StackFrame::unchecked_code will return a non-optimized code object for
1351 // the outermost function and StackFrame::LookupCode will return
1352 // actual optimized code object.
1353 StackFrame* frame = it.frame();
1354 Code* code = frame->unchecked_code();
1355 MarkBit code_mark = Marking::MarkBitFrom(code);
1356 MarkObject(code, code_mark);
1357 if (frame->is_optimized()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001358 Code* optimized_code = frame->LookupCode();
1359 MarkBit optimized_code_mark = Marking::MarkBitFrom(optimized_code);
1360 MarkObject(optimized_code, optimized_code_mark);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001361 }
1362 }
1363}
1364
1365
1366void MarkCompactCollector::PrepareForCodeFlushing() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001367 // If code flushing is disabled, there is no need to prepare for it.
1368 if (!is_code_flushing_enabled()) return;
1369
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001370 // Make sure we are not referencing the code from the stack.
1371 DCHECK(this == heap()->mark_compact_collector());
1372 PrepareThreadForCodeFlushing(heap()->isolate(),
1373 heap()->isolate()->thread_local_top());
1374
1375 // Iterate the archived stacks in all threads to check if
1376 // the code is referenced.
1377 CodeMarkingVisitor code_marking_visitor(this);
1378 heap()->isolate()->thread_manager()->IterateArchivedThreads(
1379 &code_marking_visitor);
1380
1381 SharedFunctionInfoMarkingVisitor visitor(this);
1382 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1383 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1384
1385 ProcessMarkingDeque();
1386}
1387
1388
1389// Visitor class for marking heap roots.
1390class RootMarkingVisitor : public ObjectVisitor {
1391 public:
1392 explicit RootMarkingVisitor(Heap* heap)
1393 : collector_(heap->mark_compact_collector()) {}
1394
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001395 void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001396
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001397 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001398 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1399 }
1400
1401 // Skip the weak next code link in a code object, which is visited in
1402 // ProcessTopOptimizedFrame.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001403 void VisitNextCodeLink(Object** p) override {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001404
1405 private:
1406 void MarkObjectByPointer(Object** p) {
1407 if (!(*p)->IsHeapObject()) return;
1408
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001409 HeapObject* object = HeapObject::cast(*p);
Ben Murdochc5610432016-08-08 18:44:38 +01001410
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001411 MarkBit mark_bit = Marking::MarkBitFrom(object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001412 if (Marking::IsBlackOrGrey(mark_bit)) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001413
1414 Map* map = object->map();
1415 // Mark the object.
1416 collector_->SetMark(object, mark_bit);
1417
1418 // Mark the map pointer and body, and push them on the marking stack.
1419 MarkBit map_mark = Marking::MarkBitFrom(map);
1420 collector_->MarkObject(map, map_mark);
1421 MarkCompactMarkingVisitor::IterateBody(map, object);
1422
1423 // Mark all the objects reachable from the map and body. May leave
1424 // overflowed objects in the heap.
1425 collector_->EmptyMarkingDeque();
1426 }
1427
1428 MarkCompactCollector* collector_;
1429};
1430
1431
1432// Helper class for pruning the string table.
Ben Murdochda12d292016-06-02 14:46:10 +01001433template <bool finalize_external_strings, bool record_slots>
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001434class StringTableCleaner : public ObjectVisitor {
1435 public:
Ben Murdochda12d292016-06-02 14:46:10 +01001436 StringTableCleaner(Heap* heap, HeapObject* table)
1437 : heap_(heap), pointers_removed_(0), table_(table) {
1438 DCHECK(!record_slots || table != nullptr);
1439 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001440
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001441 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001442 // Visit all HeapObject pointers in [start, end).
Ben Murdochda12d292016-06-02 14:46:10 +01001443 MarkCompactCollector* collector = heap_->mark_compact_collector();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001444 for (Object** p = start; p < end; p++) {
1445 Object* o = *p;
Ben Murdochda12d292016-06-02 14:46:10 +01001446 if (o->IsHeapObject()) {
1447 if (Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(o)))) {
1448 if (finalize_external_strings) {
1449 DCHECK(o->IsExternalString());
1450 heap_->FinalizeExternalString(String::cast(*p));
1451 } else {
1452 pointers_removed_++;
1453 }
1454 // Set the entry to the_hole_value (as deleted).
1455 *p = heap_->the_hole_value();
1456 } else if (record_slots) {
1457 // StringTable contains only old space strings.
1458 DCHECK(!heap_->InNewSpace(o));
1459 collector->RecordSlot(table_, p, o);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001460 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001461 }
1462 }
1463 }
1464
1465 int PointersRemoved() {
1466 DCHECK(!finalize_external_strings);
1467 return pointers_removed_;
1468 }
1469
1470 private:
1471 Heap* heap_;
1472 int pointers_removed_;
Ben Murdochda12d292016-06-02 14:46:10 +01001473 HeapObject* table_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001474};
1475
Ben Murdochda12d292016-06-02 14:46:10 +01001476typedef StringTableCleaner<false, true> InternalizedStringTableCleaner;
1477typedef StringTableCleaner<true, false> ExternalStringTableCleaner;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001478
1479// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1480// are retained.
1481class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1482 public:
1483 virtual Object* RetainAs(Object* object) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001484 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(object));
1485 DCHECK(!Marking::IsGrey(mark_bit));
1486 if (Marking::IsBlack(mark_bit)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001487 return object;
1488 } else if (object->IsAllocationSite() &&
1489 !(AllocationSite::cast(object)->IsZombie())) {
1490 // "dead" AllocationSites need to live long enough for a traversal of new
1491 // space. These sites get a one-time reprieve.
1492 AllocationSite* site = AllocationSite::cast(object);
1493 site->MarkZombie();
1494 site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
1495 return object;
1496 } else {
1497 return NULL;
1498 }
1499 }
1500};
1501
1502
1503// Fill the marking stack with overflowed objects returned by the given
1504// iterator. Stop when the marking stack is filled or the end of the space
1505// is reached, whichever comes first.
1506template <class T>
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001507void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001508 // The caller should ensure that the marking stack is initially not full,
1509 // so that we don't waste effort pointlessly scanning for objects.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001510 DCHECK(!marking_deque()->IsFull());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001511
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001512 Map* filler_map = heap()->one_pointer_filler_map();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001513 for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
1514 MarkBit markbit = Marking::MarkBitFrom(object);
1515 if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1516 Marking::GreyToBlack(markbit);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001517 PushBlack(object);
1518 if (marking_deque()->IsFull()) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001519 }
1520 }
1521}
1522
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001523void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
1524 DCHECK(!marking_deque()->IsFull());
1525 LiveObjectIterator<kGreyObjects> it(p);
1526 HeapObject* object = NULL;
1527 while ((object = it.Next()) != NULL) {
1528 MarkBit markbit = Marking::MarkBitFrom(object);
1529 DCHECK(Marking::IsGrey(markbit));
1530 Marking::GreyToBlack(markbit);
1531 PushBlack(object);
1532 if (marking_deque()->IsFull()) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001533 }
1534}
1535
Ben Murdochda12d292016-06-02 14:46:10 +01001536class RecordMigratedSlotVisitor final : public ObjectVisitor {
1537 public:
1538 inline void VisitPointer(Object** p) final {
1539 RecordMigratedSlot(*p, reinterpret_cast<Address>(p));
1540 }
1541
1542 inline void VisitPointers(Object** start, Object** end) final {
1543 while (start < end) {
1544 RecordMigratedSlot(*start, reinterpret_cast<Address>(start));
1545 ++start;
1546 }
1547 }
1548
1549 inline void VisitCodeEntry(Address code_entry_slot) final {
1550 Address code_entry = Memory::Address_at(code_entry_slot);
1551 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
1552 RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot),
1553 CODE_ENTRY_SLOT, code_entry_slot);
1554 }
1555 }
1556
1557 private:
1558 inline void RecordMigratedSlot(Object* value, Address slot) {
1559 if (value->IsHeapObject()) {
1560 Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
1561 if (p->InNewSpace()) {
1562 RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
1563 } else if (p->IsEvacuationCandidate()) {
1564 RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot);
1565 }
1566 }
1567 }
1568};
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001569
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001570class MarkCompactCollector::HeapObjectVisitor {
1571 public:
1572 virtual ~HeapObjectVisitor() {}
1573 virtual bool Visit(HeapObject* object) = 0;
1574};
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001575
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001576class MarkCompactCollector::EvacuateVisitorBase
1577 : public MarkCompactCollector::HeapObjectVisitor {
Ben Murdochda12d292016-06-02 14:46:10 +01001578 protected:
1579 enum MigrationMode { kFast, kProfiled };
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001580
Ben Murdochda12d292016-06-02 14:46:10 +01001581 EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces)
1582 : heap_(heap),
1583 compaction_spaces_(compaction_spaces),
1584 profiling_(
1585 heap->isolate()->cpu_profiler()->is_profiling() ||
1586 heap->isolate()->logger()->is_logging_code_events() ||
1587 heap->isolate()->heap_profiler()->is_tracking_object_moves()) {}
1588
1589 inline bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
1590 HeapObject** target_object) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001591 int size = object->Size();
1592 AllocationAlignment alignment = object->RequiredAlignment();
1593 AllocationResult allocation = target_space->AllocateRaw(size, alignment);
1594 if (allocation.To(target_object)) {
Ben Murdochda12d292016-06-02 14:46:10 +01001595 MigrateObject(*target_object, object, size, target_space->identity());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001596 return true;
1597 }
1598 return false;
1599 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001600
Ben Murdochda12d292016-06-02 14:46:10 +01001601 inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
1602 AllocationSpace dest) {
1603 if (profiling_) {
1604 MigrateObject<kProfiled>(dst, src, size, dest);
1605 } else {
1606 MigrateObject<kFast>(dst, src, size, dest);
1607 }
1608 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001609
Ben Murdochda12d292016-06-02 14:46:10 +01001610 template <MigrationMode mode>
1611 inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
1612 AllocationSpace dest) {
1613 Address dst_addr = dst->address();
1614 Address src_addr = src->address();
1615 DCHECK(heap_->AllowedToBeMigrated(src, dest));
1616 DCHECK(dest != LO_SPACE);
1617 if (dest == OLD_SPACE) {
1618 DCHECK_OBJECT_SIZE(size);
1619 DCHECK(IsAligned(size, kPointerSize));
1620 heap_->CopyBlock(dst_addr, src_addr, size);
1621 if ((mode == kProfiled) && FLAG_ignition && dst->IsBytecodeArray()) {
1622 PROFILE(heap_->isolate(),
1623 CodeMoveEvent(AbstractCode::cast(src), dst_addr));
1624 }
1625 RecordMigratedSlotVisitor visitor;
1626 dst->IterateBodyFast(dst->map()->instance_type(), size, &visitor);
1627 } else if (dest == CODE_SPACE) {
1628 DCHECK_CODEOBJECT_SIZE(size, heap_->code_space());
1629 if (mode == kProfiled) {
1630 PROFILE(heap_->isolate(),
1631 CodeMoveEvent(AbstractCode::cast(src), dst_addr));
1632 }
1633 heap_->CopyBlock(dst_addr, src_addr, size);
1634 RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(dst_addr),
1635 RELOCATED_CODE_OBJECT, dst_addr);
1636 Code::cast(dst)->Relocate(dst_addr - src_addr);
1637 } else {
1638 DCHECK_OBJECT_SIZE(size);
1639 DCHECK(dest == NEW_SPACE);
1640 heap_->CopyBlock(dst_addr, src_addr, size);
1641 }
1642 if (mode == kProfiled) {
1643 heap_->OnMoveEvent(dst, src, size);
1644 }
1645 Memory::Address_at(src_addr) = dst_addr;
1646 }
1647
1648 Heap* heap_;
1649 CompactionSpaceCollection* compaction_spaces_;
1650 bool profiling_;
1651};
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001652
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001653class MarkCompactCollector::EvacuateNewSpaceVisitor final
1654 : public MarkCompactCollector::EvacuateVisitorBase {
1655 public:
1656 static const intptr_t kLabSize = 4 * KB;
1657 static const intptr_t kMaxLabObjectSize = 256;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001658
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001659 explicit EvacuateNewSpaceVisitor(Heap* heap,
Ben Murdoch097c5b22016-05-18 11:27:45 +01001660 CompactionSpaceCollection* compaction_spaces,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001661 HashMap* local_pretenuring_feedback)
Ben Murdochda12d292016-06-02 14:46:10 +01001662 : EvacuateVisitorBase(heap, compaction_spaces),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001663 buffer_(LocalAllocationBuffer::InvalidBuffer()),
1664 space_to_allocate_(NEW_SPACE),
1665 promoted_size_(0),
1666 semispace_copied_size_(0),
1667 local_pretenuring_feedback_(local_pretenuring_feedback) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001668
Ben Murdochc5610432016-08-08 18:44:38 +01001669 inline bool Visit(HeapObject* object) override {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001670 heap_->UpdateAllocationSite<Heap::kCached>(object,
1671 local_pretenuring_feedback_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001672 int size = object->Size();
1673 HeapObject* target_object = nullptr;
1674 if (heap_->ShouldBePromoted(object->address(), size) &&
Ben Murdoch097c5b22016-05-18 11:27:45 +01001675 TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object,
1676 &target_object)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001677 // If we end up needing more special cases, we should factor this out.
1678 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
1679 heap_->array_buffer_tracker()->Promote(
1680 JSArrayBuffer::cast(target_object));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001681 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001682 promoted_size_ += size;
1683 return true;
1684 }
1685 HeapObject* target = nullptr;
1686 AllocationSpace space = AllocateTargetObject(object, &target);
Ben Murdochda12d292016-06-02 14:46:10 +01001687 MigrateObject(HeapObject::cast(target), object, size, space);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001688 if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
1689 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
1690 }
1691 semispace_copied_size_ += size;
1692 return true;
1693 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001694
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001695 intptr_t promoted_size() { return promoted_size_; }
1696 intptr_t semispace_copied_size() { return semispace_copied_size_; }
1697
1698 private:
1699 enum NewSpaceAllocationMode {
1700 kNonstickyBailoutOldSpace,
1701 kStickyBailoutOldSpace,
1702 };
1703
1704 inline AllocationSpace AllocateTargetObject(HeapObject* old_object,
1705 HeapObject** target_object) {
1706 const int size = old_object->Size();
1707 AllocationAlignment alignment = old_object->RequiredAlignment();
1708 AllocationResult allocation;
1709 if (space_to_allocate_ == NEW_SPACE) {
1710 if (size > kMaxLabObjectSize) {
1711 allocation =
1712 AllocateInNewSpace(size, alignment, kNonstickyBailoutOldSpace);
1713 } else {
1714 allocation = AllocateInLab(size, alignment);
1715 }
1716 }
1717 if (allocation.IsRetry() || (space_to_allocate_ == OLD_SPACE)) {
1718 allocation = AllocateInOldSpace(size, alignment);
1719 }
1720 bool ok = allocation.To(target_object);
1721 DCHECK(ok);
1722 USE(ok);
1723 return space_to_allocate_;
1724 }
1725
1726 inline bool NewLocalAllocationBuffer() {
1727 AllocationResult result =
1728 AllocateInNewSpace(kLabSize, kWordAligned, kStickyBailoutOldSpace);
1729 LocalAllocationBuffer saved_old_buffer = buffer_;
1730 buffer_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize);
1731 if (buffer_.IsValid()) {
1732 buffer_.TryMerge(&saved_old_buffer);
1733 return true;
1734 }
1735 return false;
1736 }
1737
1738 inline AllocationResult AllocateInNewSpace(int size_in_bytes,
1739 AllocationAlignment alignment,
1740 NewSpaceAllocationMode mode) {
1741 AllocationResult allocation =
1742 heap_->new_space()->AllocateRawSynchronized(size_in_bytes, alignment);
1743 if (allocation.IsRetry()) {
1744 if (!heap_->new_space()->AddFreshPageSynchronized()) {
1745 if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
1746 } else {
1747 allocation = heap_->new_space()->AllocateRawSynchronized(size_in_bytes,
1748 alignment);
1749 if (allocation.IsRetry()) {
1750 if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001751 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001752 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001753 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001754 return allocation;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001755 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001756
1757 inline AllocationResult AllocateInOldSpace(int size_in_bytes,
1758 AllocationAlignment alignment) {
1759 AllocationResult allocation =
Ben Murdoch097c5b22016-05-18 11:27:45 +01001760 compaction_spaces_->Get(OLD_SPACE)->AllocateRaw(size_in_bytes,
1761 alignment);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001762 if (allocation.IsRetry()) {
Ben Murdochc5610432016-08-08 18:44:38 +01001763 v8::internal::Heap::FatalProcessOutOfMemory(
1764 "MarkCompactCollector: semi-space copy, fallback in old gen", true);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001765 }
1766 return allocation;
1767 }
1768
1769 inline AllocationResult AllocateInLab(int size_in_bytes,
1770 AllocationAlignment alignment) {
1771 AllocationResult allocation;
1772 if (!buffer_.IsValid()) {
1773 if (!NewLocalAllocationBuffer()) {
1774 space_to_allocate_ = OLD_SPACE;
1775 return AllocationResult::Retry(OLD_SPACE);
1776 }
1777 }
1778 allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
1779 if (allocation.IsRetry()) {
1780 if (!NewLocalAllocationBuffer()) {
1781 space_to_allocate_ = OLD_SPACE;
1782 return AllocationResult::Retry(OLD_SPACE);
1783 } else {
1784 allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
1785 if (allocation.IsRetry()) {
1786 space_to_allocate_ = OLD_SPACE;
1787 return AllocationResult::Retry(OLD_SPACE);
1788 }
1789 }
1790 }
1791 return allocation;
1792 }
1793
1794 LocalAllocationBuffer buffer_;
1795 AllocationSpace space_to_allocate_;
1796 intptr_t promoted_size_;
1797 intptr_t semispace_copied_size_;
1798 HashMap* local_pretenuring_feedback_;
1799};
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001800
Ben Murdochc5610432016-08-08 18:44:38 +01001801class MarkCompactCollector::EvacuateNewSpacePageVisitor final
1802 : public MarkCompactCollector::HeapObjectVisitor {
1803 public:
1804 EvacuateNewSpacePageVisitor() : promoted_size_(0) {}
1805
1806 static void TryMoveToOldSpace(Page* page, PagedSpace* owner) {
1807 if (page->heap()->new_space()->ReplaceWithEmptyPage(page)) {
1808 Page* new_page = Page::ConvertNewToOld(page, owner);
1809 new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
1810 }
1811 }
1812
1813 inline bool Visit(HeapObject* object) {
1814 if (V8_UNLIKELY(object->IsJSArrayBuffer())) {
1815 object->GetHeap()->array_buffer_tracker()->Promote(
1816 JSArrayBuffer::cast(object));
1817 }
1818 RecordMigratedSlotVisitor visitor;
1819 object->IterateBodyFast(&visitor);
1820 promoted_size_ += object->Size();
1821 return true;
1822 }
1823
1824 intptr_t promoted_size() { return promoted_size_; }
1825
1826 private:
1827 intptr_t promoted_size_;
1828};
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001829
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001830class MarkCompactCollector::EvacuateOldSpaceVisitor final
1831 : public MarkCompactCollector::EvacuateVisitorBase {
1832 public:
1833 EvacuateOldSpaceVisitor(Heap* heap,
Ben Murdochda12d292016-06-02 14:46:10 +01001834 CompactionSpaceCollection* compaction_spaces)
1835 : EvacuateVisitorBase(heap, compaction_spaces) {}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001836
Ben Murdochc5610432016-08-08 18:44:38 +01001837 inline bool Visit(HeapObject* object) override {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001838 CompactionSpace* target_space = compaction_spaces_->Get(
1839 Page::FromAddress(object->address())->owner()->identity());
1840 HeapObject* target_object = nullptr;
1841 if (TryEvacuateObject(target_space, object, &target_object)) {
1842 DCHECK(object->map_word().IsForwardingAddress());
1843 return true;
1844 }
1845 return false;
1846 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001847};
1848
Ben Murdochc5610432016-08-08 18:44:38 +01001849class MarkCompactCollector::EvacuateRecordOnlyVisitor final
1850 : public MarkCompactCollector::HeapObjectVisitor {
1851 public:
1852 explicit EvacuateRecordOnlyVisitor(AllocationSpace space) : space_(space) {}
1853
1854 inline bool Visit(HeapObject* object) {
1855 if (space_ == OLD_SPACE) {
1856 RecordMigratedSlotVisitor visitor;
1857 object->IterateBody(&visitor);
1858 } else {
1859 DCHECK_EQ(space_, CODE_SPACE);
1860 // Add a typed slot for the whole code object.
1861 RememberedSet<OLD_TO_OLD>::InsertTyped(
1862 Page::FromAddress(object->address()), RELOCATED_CODE_OBJECT,
1863 object->address());
1864 }
1865 return true;
1866 }
1867
1868 private:
1869 AllocationSpace space_;
1870};
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001871
1872void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001873 PageIterator it(space);
1874 while (it.has_next()) {
1875 Page* p = it.next();
Ben Murdochda12d292016-06-02 14:46:10 +01001876 if (!p->IsFlagSet(Page::BLACK_PAGE)) {
1877 DiscoverGreyObjectsOnPage(p);
1878 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001879 if (marking_deque()->IsFull()) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001880 }
1881}
1882
1883
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001884void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
1885 NewSpace* space = heap()->new_space();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001886 NewSpacePageIterator it(space->bottom(), space->top());
1887 while (it.has_next()) {
Ben Murdochc5610432016-08-08 18:44:38 +01001888 Page* page = it.next();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001889 DiscoverGreyObjectsOnPage(page);
1890 if (marking_deque()->IsFull()) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001891 }
1892}
1893
1894
1895bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
1896 Object* o = *p;
1897 if (!o->IsHeapObject()) return false;
1898 HeapObject* heap_object = HeapObject::cast(o);
1899 MarkBit mark = Marking::MarkBitFrom(heap_object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001900 return Marking::IsWhite(mark);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001901}
1902
1903
1904bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
1905 Object** p) {
1906 Object* o = *p;
1907 DCHECK(o->IsHeapObject());
1908 HeapObject* heap_object = HeapObject::cast(o);
1909 MarkBit mark = Marking::MarkBitFrom(heap_object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001910 return Marking::IsWhite(mark);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001911}
1912
1913
1914void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
1915 StringTable* string_table = heap()->string_table();
1916 // Mark the string table itself.
1917 MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001918 if (Marking::IsWhite(string_table_mark)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001919 // String table could have already been marked by visiting the handles list.
1920 SetMark(string_table, string_table_mark);
1921 }
1922 // Explicitly mark the prefix.
1923 string_table->IteratePrefix(visitor);
1924 ProcessMarkingDeque();
1925}
1926
1927
1928void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
1929 MarkBit mark_bit = Marking::MarkBitFrom(site);
1930 SetMark(site, mark_bit);
1931}
1932
1933
1934void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
1935 // Mark the heap roots including global variables, stack variables,
1936 // etc., and all objects reachable from them.
1937 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
1938
1939 // Handle the string table specially.
1940 MarkStringTable(visitor);
1941
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001942 // There may be overflowed objects in the heap. Visit them now.
1943 while (marking_deque_.overflowed()) {
1944 RefillMarkingDeque();
1945 EmptyMarkingDeque();
1946 }
1947}
1948
1949
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001950void MarkCompactCollector::MarkImplicitRefGroups(
1951 MarkObjectFunction mark_object) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001952 List<ImplicitRefGroup*>* ref_groups =
1953 isolate()->global_handles()->implicit_ref_groups();
1954
1955 int last = 0;
1956 for (int i = 0; i < ref_groups->length(); i++) {
1957 ImplicitRefGroup* entry = ref_groups->at(i);
1958 DCHECK(entry != NULL);
1959
1960 if (!IsMarked(*entry->parent)) {
1961 (*ref_groups)[last++] = entry;
1962 continue;
1963 }
1964
1965 Object*** children = entry->children;
1966 // A parent object is marked, so mark all child heap objects.
1967 for (size_t j = 0; j < entry->length; ++j) {
1968 if ((*children[j])->IsHeapObject()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001969 mark_object(heap(), HeapObject::cast(*children[j]));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001970 }
1971 }
1972
1973 // Once the entire group has been marked, dispose it because it's
1974 // not needed anymore.
1975 delete entry;
1976 }
1977 ref_groups->Rewind(last);
1978}
1979
1980
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001981// Mark all objects reachable from the objects on the marking stack.
1982// Before: the marking stack contains zero or more heap object pointers.
1983// After: the marking stack is empty, and all objects reachable from the
1984// marking stack have been marked, or are overflowed in the heap.
1985void MarkCompactCollector::EmptyMarkingDeque() {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001986 Map* filler_map = heap_->one_pointer_filler_map();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001987 while (!marking_deque_.IsEmpty()) {
1988 HeapObject* object = marking_deque_.Pop();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001989 // Explicitly skip one word fillers. Incremental markbit patterns are
1990 // correct only for objects that occupy at least two words.
1991 Map* map = object->map();
1992 if (map == filler_map) continue;
1993
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001994 DCHECK(object->IsHeapObject());
1995 DCHECK(heap()->Contains(object));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001996 DCHECK(!Marking::IsWhite(Marking::MarkBitFrom(object)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001997
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001998 MarkBit map_mark = Marking::MarkBitFrom(map);
1999 MarkObject(map, map_mark);
2000
2001 MarkCompactMarkingVisitor::IterateBody(map, object);
2002 }
2003}
2004
2005
2006// Sweep the heap for overflowed objects, clear their overflow bits, and
2007// push them on the marking stack. Stop early if the marking stack fills
2008// before sweeping completes. If sweeping completes, there are no remaining
2009// overflowed objects in the heap so the overflow flag on the markings stack
2010// is cleared.
2011void MarkCompactCollector::RefillMarkingDeque() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002012 isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002013 DCHECK(marking_deque_.overflowed());
2014
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002015 DiscoverGreyObjectsInNewSpace();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002016 if (marking_deque_.IsFull()) return;
2017
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002018 DiscoverGreyObjectsInSpace(heap()->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002019 if (marking_deque_.IsFull()) return;
2020
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002021 DiscoverGreyObjectsInSpace(heap()->code_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002022 if (marking_deque_.IsFull()) return;
2023
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002024 DiscoverGreyObjectsInSpace(heap()->map_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002025 if (marking_deque_.IsFull()) return;
2026
2027 LargeObjectIterator lo_it(heap()->lo_space());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002028 DiscoverGreyObjectsWithIterator(&lo_it);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002029 if (marking_deque_.IsFull()) return;
2030
2031 marking_deque_.ClearOverflowed();
2032}
2033
2034
2035// Mark all objects reachable (transitively) from objects on the marking
2036// stack. Before: the marking stack contains zero or more heap object
2037// pointers. After: the marking stack is empty and there are no overflowed
2038// objects in the heap.
2039void MarkCompactCollector::ProcessMarkingDeque() {
2040 EmptyMarkingDeque();
2041 while (marking_deque_.overflowed()) {
2042 RefillMarkingDeque();
2043 EmptyMarkingDeque();
2044 }
2045}
2046
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002047// Mark all objects reachable (transitively) from objects on the marking
2048// stack including references only considered in the atomic marking pause.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002049void MarkCompactCollector::ProcessEphemeralMarking(
2050 ObjectVisitor* visitor, bool only_process_harmony_weak_collections) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002051 DCHECK(marking_deque_.IsEmpty() && !marking_deque_.overflowed());
Ben Murdochc5610432016-08-08 18:44:38 +01002052 bool work_to_do = true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002053 while (work_to_do) {
Ben Murdochc5610432016-08-08 18:44:38 +01002054 if (UsingEmbedderHeapTracer()) {
2055 embedder_heap_tracer()->TraceWrappersFrom(wrappers_to_trace_);
2056 wrappers_to_trace_.clear();
2057 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002058 if (!only_process_harmony_weak_collections) {
2059 isolate()->global_handles()->IterateObjectGroups(
2060 visitor, &IsUnmarkedHeapObjectWithHeap);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002061 MarkImplicitRefGroups(&MarkCompactMarkingVisitor::MarkObject);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002062 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002063 ProcessWeakCollections();
2064 work_to_do = !marking_deque_.IsEmpty();
2065 ProcessMarkingDeque();
2066 }
2067}
2068
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002069void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
2070 for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
2071 !it.done(); it.Advance()) {
2072 if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
2073 return;
2074 }
2075 if (it.frame()->type() == StackFrame::OPTIMIZED) {
2076 Code* code = it.frame()->LookupCode();
2077 if (!code->CanDeoptAt(it.frame()->pc())) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002078 Code::BodyDescriptor::IterateBody(code, visitor);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002079 }
2080 ProcessMarkingDeque();
2081 return;
2082 }
2083 }
2084}
2085
2086
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002087void MarkCompactCollector::EnsureMarkingDequeIsReserved() {
2088 DCHECK(!marking_deque_.in_use());
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002089 if (marking_deque_memory_ == NULL) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002090 marking_deque_memory_ = new base::VirtualMemory(kMaxMarkingDequeSize);
2091 marking_deque_memory_committed_ = 0;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002092 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002093 if (marking_deque_memory_ == NULL) {
2094 V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsReserved");
2095 }
2096}
2097
2098
2099void MarkCompactCollector::EnsureMarkingDequeIsCommitted(size_t max_size) {
2100 // If the marking deque is too small, we try to allocate a bigger one.
2101 // If that fails, make do with a smaller one.
2102 CHECK(!marking_deque_.in_use());
2103 for (size_t size = max_size; size >= kMinMarkingDequeSize; size >>= 1) {
2104 base::VirtualMemory* memory = marking_deque_memory_;
2105 size_t currently_committed = marking_deque_memory_committed_;
2106
2107 if (currently_committed == size) return;
2108
2109 if (currently_committed > size) {
2110 bool success = marking_deque_memory_->Uncommit(
2111 reinterpret_cast<Address>(marking_deque_memory_->address()) + size,
2112 currently_committed - size);
2113 if (success) {
2114 marking_deque_memory_committed_ = size;
2115 return;
2116 }
2117 UNREACHABLE();
2118 }
2119
2120 bool success = memory->Commit(
2121 reinterpret_cast<Address>(memory->address()) + currently_committed,
2122 size - currently_committed,
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002123 false); // Not executable.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002124 if (success) {
2125 marking_deque_memory_committed_ = size;
2126 return;
2127 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002128 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002129 V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsCommitted");
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002130}
2131
2132
2133void MarkCompactCollector::InitializeMarkingDeque() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002134 DCHECK(!marking_deque_.in_use());
2135 DCHECK(marking_deque_memory_committed_ > 0);
2136 Address addr = static_cast<Address>(marking_deque_memory_->address());
2137 size_t size = marking_deque_memory_committed_;
2138 if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
2139 marking_deque_.Initialize(addr, addr + size);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002140}
2141
2142
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002143void MarkingDeque::Initialize(Address low, Address high) {
2144 DCHECK(!in_use_);
2145 HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
2146 HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
2147 array_ = obj_low;
2148 mask_ = base::bits::RoundDownToPowerOfTwo32(
2149 static_cast<uint32_t>(obj_high - obj_low)) -
2150 1;
2151 top_ = bottom_ = 0;
2152 overflowed_ = false;
2153 in_use_ = true;
2154}
2155
2156
2157void MarkingDeque::Uninitialize(bool aborting) {
2158 if (!aborting) {
2159 DCHECK(IsEmpty());
2160 DCHECK(!overflowed_);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002161 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002162 DCHECK(in_use_);
2163 top_ = bottom_ = 0xdecbad;
2164 in_use_ = false;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002165}
2166
Ben Murdochc5610432016-08-08 18:44:38 +01002167void MarkCompactCollector::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
2168 DCHECK_NOT_NULL(tracer);
2169 CHECK_NULL(embedder_heap_tracer_);
2170 embedder_heap_tracer_ = tracer;
2171}
2172
2173void MarkCompactCollector::TracePossibleWrapper(JSObject* js_object) {
2174 DCHECK(js_object->WasConstructedFromApiFunction());
2175 if (js_object->GetInternalFieldCount() >= 2 &&
2176 js_object->GetInternalField(0) &&
2177 js_object->GetInternalField(0) != heap_->undefined_value() &&
2178 js_object->GetInternalField(1) != heap_->undefined_value()) {
2179 DCHECK(reinterpret_cast<intptr_t>(js_object->GetInternalField(0)) % 2 == 0);
2180 wrappers_to_trace().push_back(std::pair<void*, void*>(
2181 reinterpret_cast<void*>(js_object->GetInternalField(0)),
2182 reinterpret_cast<void*>(js_object->GetInternalField(1))));
2183 }
2184}
2185
2186void MarkCompactCollector::RegisterExternallyReferencedObject(Object** object) {
2187 DCHECK(in_use());
2188 HeapObject* heap_object = HeapObject::cast(*object);
2189 DCHECK(heap_->Contains(heap_object));
2190 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
2191 MarkObject(heap_object, mark_bit);
2192}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002193
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002194void MarkCompactCollector::MarkLiveObjects() {
Ben Murdochda12d292016-06-02 14:46:10 +01002195 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002196 double start_time = 0.0;
2197 if (FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002198 start_time = heap_->MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002199 }
2200 // The recursive GC marker detects when it is nearing stack overflow,
2201 // and switches to a different marking system. JS interrupts interfere
2202 // with the C stack limit check.
2203 PostponeInterruptsScope postpone(isolate());
2204
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002205 {
Ben Murdochda12d292016-06-02 14:46:10 +01002206 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002207 IncrementalMarking* incremental_marking = heap_->incremental_marking();
2208 if (was_marked_incrementally_) {
2209 incremental_marking->Finalize();
2210 } else {
2211 // Abort any pending incremental activities e.g. incremental sweeping.
2212 incremental_marking->Stop();
2213 if (marking_deque_.in_use()) {
2214 marking_deque_.Uninitialize(true);
2215 }
2216 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002217 }
2218
2219#ifdef DEBUG
2220 DCHECK(state_ == PREPARE_GC);
2221 state_ = MARK_LIVE_OBJECTS;
2222#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002223
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002224 EnsureMarkingDequeIsCommittedAndInitialize(
2225 MarkCompactCollector::kMaxMarkingDequeSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002226
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002227 {
Ben Murdochda12d292016-06-02 14:46:10 +01002228 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002229 PrepareForCodeFlushing();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002230 }
2231
2232 RootMarkingVisitor root_visitor(heap());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002233
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002234 {
Ben Murdochda12d292016-06-02 14:46:10 +01002235 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002236 MarkRoots(&root_visitor);
2237 ProcessTopOptimizedFrame(&root_visitor);
2238 }
2239
2240 {
Ben Murdochda12d292016-06-02 14:46:10 +01002241 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002242
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002243 // The objects reachable from the roots are marked, yet unreachable
2244 // objects are unmarked. Mark objects reachable due to host
2245 // application specific logic or through Harmony weak maps.
Ben Murdochda12d292016-06-02 14:46:10 +01002246 {
2247 TRACE_GC(heap()->tracer(),
2248 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL);
Ben Murdochc5610432016-08-08 18:44:38 +01002249 if (UsingEmbedderHeapTracer()) {
2250 embedder_heap_tracer()->TracePrologue();
2251 ProcessMarkingDeque();
2252 }
Ben Murdochda12d292016-06-02 14:46:10 +01002253 ProcessEphemeralMarking(&root_visitor, false);
Ben Murdochda12d292016-06-02 14:46:10 +01002254 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002255
2256 // The objects reachable from the roots, weak maps or object groups
2257 // are marked. Objects pointed to only by weak global handles cannot be
2258 // immediately reclaimed. Instead, we have to mark them as pending and mark
2259 // objects reachable from them.
2260 //
2261 // First we identify nonlive weak handles and mark them as pending
2262 // destruction.
Ben Murdochda12d292016-06-02 14:46:10 +01002263 {
2264 TRACE_GC(heap()->tracer(),
2265 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
2266 heap()->isolate()->global_handles()->IdentifyWeakHandles(
2267 &IsUnmarkedHeapObject);
2268 ProcessMarkingDeque();
2269 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002270 // Then we mark the objects.
Ben Murdochda12d292016-06-02 14:46:10 +01002271
2272 {
2273 TRACE_GC(heap()->tracer(),
2274 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
2275 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2276 ProcessMarkingDeque();
2277 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002278
2279 // Repeat Harmony weak maps marking to mark unmarked objects reachable from
2280 // the weak roots we just marked as pending destruction.
2281 //
2282 // We only process harmony collections, as all object groups have been fully
2283 // processed and no weakly reachable node can discover new objects groups.
Ben Murdochda12d292016-06-02 14:46:10 +01002284 {
2285 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
2286 ProcessEphemeralMarking(&root_visitor, true);
Ben Murdochc5610432016-08-08 18:44:38 +01002287 if (UsingEmbedderHeapTracer()) {
2288 embedder_heap_tracer()->TraceEpilogue();
2289 }
Ben Murdochda12d292016-06-02 14:46:10 +01002290 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002291 }
2292
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002293 if (FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002294 heap_->tracer()->AddMarkingTime(heap_->MonotonicallyIncreasingTimeInMs() -
2295 start_time);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002296 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002297 if (FLAG_track_gc_object_stats) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002298 if (FLAG_trace_gc_object_stats) {
2299 heap()->object_stats_->TraceObjectStats();
2300 }
2301 heap()->object_stats_->CheckpointObjectStats();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002302 }
2303}
2304
2305
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002306void MarkCompactCollector::ClearNonLiveReferences() {
Ben Murdochda12d292016-06-02 14:46:10 +01002307 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002308
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002309 {
Ben Murdochda12d292016-06-02 14:46:10 +01002310 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002311
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002312 // Prune the string table removing all strings only pointed to by the
2313 // string table. Cannot use string_table() here because the string
2314 // table is marked.
2315 StringTable* string_table = heap()->string_table();
Ben Murdochda12d292016-06-02 14:46:10 +01002316 InternalizedStringTableCleaner internalized_visitor(heap(), string_table);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002317 string_table->IterateElements(&internalized_visitor);
2318 string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002319
Ben Murdochda12d292016-06-02 14:46:10 +01002320 ExternalStringTableCleaner external_visitor(heap(), nullptr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002321 heap()->external_string_table_.Iterate(&external_visitor);
2322 heap()->external_string_table_.CleanUp();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002323 }
2324
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002325 {
Ben Murdochda12d292016-06-02 14:46:10 +01002326 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002327 // Process the weak references.
2328 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2329 heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002330 }
2331
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002332 {
Ben Murdochda12d292016-06-02 14:46:10 +01002333 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_GLOBAL_HANDLES);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002334
2335 // Remove object groups after marking phase.
2336 heap()->isolate()->global_handles()->RemoveObjectGroups();
2337 heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002338 }
2339
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002340 // Flush code from collected candidates.
2341 if (is_code_flushing_enabled()) {
Ben Murdochda12d292016-06-02 14:46:10 +01002342 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_CODE_FLUSH);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002343 code_flusher_->ProcessCandidates();
2344 }
2345
2346
2347 DependentCode* dependent_code_list;
2348 Object* non_live_map_list;
2349 ClearWeakCells(&non_live_map_list, &dependent_code_list);
2350
2351 {
Ben Murdochda12d292016-06-02 14:46:10 +01002352 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002353 ClearSimpleMapTransitions(non_live_map_list);
2354 ClearFullMapTransitions();
2355 }
2356
2357 MarkDependentCodeForDeoptimization(dependent_code_list);
2358
2359 ClearWeakCollections();
2360
Ben Murdochda12d292016-06-02 14:46:10 +01002361 ClearInvalidRememberedSetSlots();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002362}
2363
2364
2365void MarkCompactCollector::MarkDependentCodeForDeoptimization(
2366 DependentCode* list_head) {
Ben Murdochda12d292016-06-02 14:46:10 +01002367 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_DEPENDENT_CODE);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002368 Isolate* isolate = this->isolate();
2369 DependentCode* current = list_head;
2370 while (current->length() > 0) {
2371 have_code_to_deoptimize_ |= current->MarkCodeForDeoptimization(
2372 isolate, DependentCode::kWeakCodeGroup);
2373 current = current->next_link();
2374 }
2375
2376 WeakHashTable* table = heap_->weak_object_to_code_table();
2377 uint32_t capacity = table->Capacity();
2378 for (uint32_t i = 0; i < capacity; i++) {
2379 uint32_t key_index = table->EntryToIndex(i);
2380 Object* key = table->get(key_index);
2381 if (!table->IsKey(key)) continue;
2382 uint32_t value_index = table->EntryToValueIndex(i);
2383 Object* value = table->get(value_index);
2384 DCHECK(key->IsWeakCell());
2385 if (WeakCell::cast(key)->cleared()) {
2386 have_code_to_deoptimize_ |=
2387 DependentCode::cast(value)->MarkCodeForDeoptimization(
2388 isolate, DependentCode::kWeakCodeGroup);
2389 table->set(key_index, heap_->the_hole_value());
2390 table->set(value_index, heap_->the_hole_value());
2391 table->ElementRemoved();
2392 }
2393 }
2394}
2395
2396
2397void MarkCompactCollector::ClearSimpleMapTransitions(
2398 Object* non_live_map_list) {
2399 Object* the_hole_value = heap()->the_hole_value();
2400 Object* weak_cell_obj = non_live_map_list;
2401 while (weak_cell_obj != Smi::FromInt(0)) {
2402 WeakCell* weak_cell = WeakCell::cast(weak_cell_obj);
2403 Map* map = Map::cast(weak_cell->value());
2404 DCHECK(Marking::IsWhite(Marking::MarkBitFrom(map)));
2405 Object* potential_parent = map->constructor_or_backpointer();
2406 if (potential_parent->IsMap()) {
2407 Map* parent = Map::cast(potential_parent);
2408 if (Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent)) &&
2409 parent->raw_transitions() == weak_cell) {
2410 ClearSimpleMapTransition(parent, map);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002411 }
2412 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002413 weak_cell->clear();
2414 weak_cell_obj = weak_cell->next();
2415 weak_cell->clear_next(the_hole_value);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002416 }
2417}
2418
2419
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002420void MarkCompactCollector::ClearSimpleMapTransition(Map* map,
2421 Map* dead_transition) {
2422 // A previously existing simple transition (stored in a WeakCell) is going
2423 // to be cleared. Clear the useless cell pointer, and take ownership
2424 // of the descriptor array.
2425 map->set_raw_transitions(Smi::FromInt(0));
2426 int number_of_own_descriptors = map->NumberOfOwnDescriptors();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002427 DescriptorArray* descriptors = map->instance_descriptors();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002428 if (descriptors == dead_transition->instance_descriptors() &&
2429 number_of_own_descriptors > 0) {
2430 TrimDescriptorArray(map, descriptors);
2431 DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2432 map->set_owns_descriptors(true);
2433 }
2434}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002435
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002436
2437void MarkCompactCollector::ClearFullMapTransitions() {
2438 HeapObject* undefined = heap()->undefined_value();
2439 Object* obj = heap()->encountered_transition_arrays();
2440 while (obj != Smi::FromInt(0)) {
2441 TransitionArray* array = TransitionArray::cast(obj);
2442 int num_transitions = array->number_of_entries();
2443 DCHECK_EQ(TransitionArray::NumberOfTransitions(array), num_transitions);
2444 if (num_transitions > 0) {
2445 Map* map = array->GetTarget(0);
2446 Map* parent = Map::cast(map->constructor_or_backpointer());
2447 bool parent_is_alive =
2448 Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent));
2449 DescriptorArray* descriptors =
2450 parent_is_alive ? parent->instance_descriptors() : nullptr;
2451 bool descriptors_owner_died =
2452 CompactTransitionArray(parent, array, descriptors);
2453 if (descriptors_owner_died) {
2454 TrimDescriptorArray(parent, descriptors);
2455 }
2456 }
2457 obj = array->next_link();
2458 array->set_next_link(undefined, SKIP_WRITE_BARRIER);
2459 }
2460 heap()->set_encountered_transition_arrays(Smi::FromInt(0));
2461}
2462
2463
2464bool MarkCompactCollector::CompactTransitionArray(
2465 Map* map, TransitionArray* transitions, DescriptorArray* descriptors) {
2466 int num_transitions = transitions->number_of_entries();
2467 bool descriptors_owner_died = false;
2468 int transition_index = 0;
2469 // Compact all live transitions to the left.
2470 for (int i = 0; i < num_transitions; ++i) {
2471 Map* target = transitions->GetTarget(i);
2472 DCHECK_EQ(target->constructor_or_backpointer(), map);
2473 if (Marking::IsWhite(Marking::MarkBitFrom(target))) {
2474 if (descriptors != nullptr &&
2475 target->instance_descriptors() == descriptors) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002476 descriptors_owner_died = true;
2477 }
2478 } else {
2479 if (i != transition_index) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002480 Name* key = transitions->GetKey(i);
2481 transitions->SetKey(transition_index, key);
2482 Object** key_slot = transitions->GetKeySlot(transition_index);
2483 RecordSlot(transitions, key_slot, key);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002484 // Target slots do not need to be recorded since maps are not compacted.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002485 transitions->SetTarget(transition_index, transitions->GetTarget(i));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002486 }
2487 transition_index++;
2488 }
2489 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002490 // If there are no transitions to be cleared, return.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002491 if (transition_index == num_transitions) {
2492 DCHECK(!descriptors_owner_died);
2493 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002494 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002495 // Note that we never eliminate a transition array, though we might right-trim
2496 // such that number_of_transitions() == 0. If this assumption changes,
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002497 // TransitionArray::Insert() will need to deal with the case that a transition
2498 // array disappeared during GC.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002499 int trim = TransitionArray::Capacity(transitions) - transition_index;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002500 if (trim > 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002501 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
2502 transitions, trim * TransitionArray::kTransitionSize);
2503 transitions->SetNumberOfTransitions(transition_index);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002504 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002505 return descriptors_owner_died;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002506}
2507
2508
2509void MarkCompactCollector::TrimDescriptorArray(Map* map,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002510 DescriptorArray* descriptors) {
2511 int number_of_own_descriptors = map->NumberOfOwnDescriptors();
2512 if (number_of_own_descriptors == 0) {
2513 DCHECK(descriptors == heap_->empty_descriptor_array());
2514 return;
2515 }
2516
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002517 int number_of_descriptors = descriptors->number_of_descriptors_storage();
2518 int to_trim = number_of_descriptors - number_of_own_descriptors;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002519 if (to_trim > 0) {
2520 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
2521 descriptors, to_trim * DescriptorArray::kDescriptorSize);
2522 descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002523
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002524 if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
2525 descriptors->Sort();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002526
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002527 if (FLAG_unbox_double_fields) {
2528 LayoutDescriptor* layout_descriptor = map->layout_descriptor();
2529 layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
2530 number_of_own_descriptors);
2531 SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
2532 }
2533 }
2534 DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2535 map->set_owns_descriptors(true);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002536}
2537
2538
2539void MarkCompactCollector::TrimEnumCache(Map* map,
2540 DescriptorArray* descriptors) {
2541 int live_enum = map->EnumLength();
2542 if (live_enum == kInvalidEnumCacheSentinel) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002543 live_enum =
2544 map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002545 }
2546 if (live_enum == 0) return descriptors->ClearEnumCache();
2547
2548 FixedArray* enum_cache = descriptors->GetEnumCache();
2549
2550 int to_trim = enum_cache->length() - live_enum;
2551 if (to_trim <= 0) return;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002552 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
2553 descriptors->GetEnumCache(), to_trim);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002554
2555 if (!descriptors->HasEnumIndicesCache()) return;
2556 FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002557 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(enum_indices_cache,
2558 to_trim);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002559}
2560
2561
2562void MarkCompactCollector::ProcessWeakCollections() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002563 Object* weak_collection_obj = heap()->encountered_weak_collections();
2564 while (weak_collection_obj != Smi::FromInt(0)) {
2565 JSWeakCollection* weak_collection =
2566 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2567 DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2568 if (weak_collection->table()->IsHashTable()) {
2569 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002570 for (int i = 0; i < table->Capacity(); i++) {
2571 if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2572 Object** key_slot =
2573 table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002574 RecordSlot(table, key_slot, *key_slot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002575 Object** value_slot =
2576 table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002577 MarkCompactMarkingVisitor::MarkObjectByPointer(this, table,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002578 value_slot);
2579 }
2580 }
2581 }
2582 weak_collection_obj = weak_collection->next();
2583 }
2584}
2585
2586
2587void MarkCompactCollector::ClearWeakCollections() {
Ben Murdochda12d292016-06-02 14:46:10 +01002588 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002589 Object* weak_collection_obj = heap()->encountered_weak_collections();
2590 while (weak_collection_obj != Smi::FromInt(0)) {
2591 JSWeakCollection* weak_collection =
2592 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2593 DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2594 if (weak_collection->table()->IsHashTable()) {
2595 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2596 for (int i = 0; i < table->Capacity(); i++) {
2597 HeapObject* key = HeapObject::cast(table->KeyAt(i));
2598 if (!MarkCompactCollector::IsMarked(key)) {
2599 table->RemoveEntry(i);
2600 }
2601 }
2602 }
2603 weak_collection_obj = weak_collection->next();
2604 weak_collection->set_next(heap()->undefined_value());
2605 }
2606 heap()->set_encountered_weak_collections(Smi::FromInt(0));
2607}
2608
2609
2610void MarkCompactCollector::AbortWeakCollections() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002611 Object* weak_collection_obj = heap()->encountered_weak_collections();
2612 while (weak_collection_obj != Smi::FromInt(0)) {
2613 JSWeakCollection* weak_collection =
2614 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2615 weak_collection_obj = weak_collection->next();
2616 weak_collection->set_next(heap()->undefined_value());
2617 }
2618 heap()->set_encountered_weak_collections(Smi::FromInt(0));
2619}
2620
2621
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002622void MarkCompactCollector::ClearWeakCells(Object** non_live_map_list,
2623 DependentCode** dependent_code_list) {
2624 Heap* heap = this->heap();
Ben Murdochda12d292016-06-02 14:46:10 +01002625 TRACE_GC(heap->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_CELLS);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002626 Object* weak_cell_obj = heap->encountered_weak_cells();
2627 Object* the_hole_value = heap->the_hole_value();
2628 DependentCode* dependent_code_head =
2629 DependentCode::cast(heap->empty_fixed_array());
2630 Object* non_live_map_head = Smi::FromInt(0);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002631 while (weak_cell_obj != Smi::FromInt(0)) {
2632 WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002633 Object* next_weak_cell = weak_cell->next();
2634 bool clear_value = true;
2635 bool clear_next = true;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002636 // We do not insert cleared weak cells into the list, so the value
2637 // cannot be a Smi here.
2638 HeapObject* value = HeapObject::cast(weak_cell->value());
2639 if (!MarkCompactCollector::IsMarked(value)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002640 // Cells for new-space objects embedded in optimized code are wrapped in
2641 // WeakCell and put into Heap::weak_object_to_code_table.
2642 // Such cells do not have any strong references but we want to keep them
2643 // alive as long as the cell value is alive.
2644 // TODO(ulan): remove this once we remove Heap::weak_object_to_code_table.
2645 if (value->IsCell()) {
2646 Object* cell_value = Cell::cast(value)->value();
2647 if (cell_value->IsHeapObject() &&
2648 MarkCompactCollector::IsMarked(HeapObject::cast(cell_value))) {
2649 // Resurrect the cell.
2650 MarkBit mark = Marking::MarkBitFrom(value);
2651 SetMark(value, mark);
2652 Object** slot = HeapObject::RawField(value, Cell::kValueOffset);
2653 RecordSlot(value, slot, *slot);
2654 slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
2655 RecordSlot(weak_cell, slot, *slot);
2656 clear_value = false;
2657 }
2658 }
2659 if (value->IsMap()) {
2660 // The map is non-live.
2661 Map* map = Map::cast(value);
2662 // Add dependent code to the dependent_code_list.
2663 DependentCode* candidate = map->dependent_code();
2664 // We rely on the fact that the weak code group comes first.
2665 STATIC_ASSERT(DependentCode::kWeakCodeGroup == 0);
2666 if (candidate->length() > 0 &&
2667 candidate->group() == DependentCode::kWeakCodeGroup) {
2668 candidate->set_next_link(dependent_code_head);
2669 dependent_code_head = candidate;
2670 }
2671 // Add the weak cell to the non_live_map list.
2672 weak_cell->set_next(non_live_map_head);
2673 non_live_map_head = weak_cell;
2674 clear_value = false;
2675 clear_next = false;
2676 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002677 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002678 // The value of the weak cell is alive.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002679 Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002680 RecordSlot(weak_cell, slot, *slot);
2681 clear_value = false;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002682 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002683 if (clear_value) {
2684 weak_cell->clear();
2685 }
2686 if (clear_next) {
2687 weak_cell->clear_next(the_hole_value);
2688 }
2689 weak_cell_obj = next_weak_cell;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002690 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002691 heap->set_encountered_weak_cells(Smi::FromInt(0));
2692 *non_live_map_list = non_live_map_head;
2693 *dependent_code_list = dependent_code_head;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002694}
2695
2696
2697void MarkCompactCollector::AbortWeakCells() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002698 Object* the_hole_value = heap()->the_hole_value();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002699 Object* weak_cell_obj = heap()->encountered_weak_cells();
2700 while (weak_cell_obj != Smi::FromInt(0)) {
2701 WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
2702 weak_cell_obj = weak_cell->next();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002703 weak_cell->clear_next(the_hole_value);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002704 }
2705 heap()->set_encountered_weak_cells(Smi::FromInt(0));
2706}
2707
2708
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002709void MarkCompactCollector::AbortTransitionArrays() {
2710 HeapObject* undefined = heap()->undefined_value();
2711 Object* obj = heap()->encountered_transition_arrays();
2712 while (obj != Smi::FromInt(0)) {
2713 TransitionArray* array = TransitionArray::cast(obj);
2714 obj = array->next_link();
2715 array->set_next_link(undefined, SKIP_WRITE_BARRIER);
2716 }
2717 heap()->set_encountered_transition_arrays(Smi::FromInt(0));
2718}
2719
Ben Murdochda12d292016-06-02 14:46:10 +01002720static inline SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002721 if (RelocInfo::IsCodeTarget(rmode)) {
Ben Murdochda12d292016-06-02 14:46:10 +01002722 return CODE_TARGET_SLOT;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002723 } else if (RelocInfo::IsCell(rmode)) {
Ben Murdochda12d292016-06-02 14:46:10 +01002724 return CELL_TARGET_SLOT;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002725 } else if (RelocInfo::IsEmbeddedObject(rmode)) {
Ben Murdochda12d292016-06-02 14:46:10 +01002726 return EMBEDDED_OBJECT_SLOT;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002727 } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
Ben Murdochda12d292016-06-02 14:46:10 +01002728 return DEBUG_TARGET_SLOT;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002729 }
2730 UNREACHABLE();
Ben Murdochda12d292016-06-02 14:46:10 +01002731 return NUMBER_OF_SLOT_TYPES;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002732}
2733
Ben Murdochda12d292016-06-02 14:46:10 +01002734void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
2735 Object* target) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002736 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
Ben Murdochda12d292016-06-02 14:46:10 +01002737 Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002738 RelocInfo::Mode rmode = rinfo->rmode();
2739 if (target_page->IsEvacuationCandidate() &&
2740 (rinfo->host() == NULL ||
2741 !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
2742 Address addr = rinfo->pc();
Ben Murdochda12d292016-06-02 14:46:10 +01002743 SlotType slot_type = SlotTypeForRMode(rmode);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002744 if (rinfo->IsInConstantPool()) {
2745 addr = rinfo->constant_pool_entry_address();
2746 if (RelocInfo::IsCodeTarget(rmode)) {
Ben Murdochda12d292016-06-02 14:46:10 +01002747 slot_type = CODE_ENTRY_SLOT;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002748 } else {
2749 DCHECK(RelocInfo::IsEmbeddedObject(rmode));
Ben Murdochda12d292016-06-02 14:46:10 +01002750 slot_type = OBJECT_SLOT;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002751 }
2752 }
Ben Murdochda12d292016-06-02 14:46:10 +01002753 RememberedSet<OLD_TO_OLD>::InsertTyped(source_page, slot_type, addr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002754 }
2755}
2756
Ben Murdochda12d292016-06-02 14:46:10 +01002757static inline void UpdateTypedSlot(Isolate* isolate, ObjectVisitor* v,
2758 SlotType slot_type, Address addr) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002759 switch (slot_type) {
Ben Murdochda12d292016-06-02 14:46:10 +01002760 case CODE_TARGET_SLOT: {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002761 RelocInfo rinfo(isolate, addr, RelocInfo::CODE_TARGET, 0, NULL);
2762 rinfo.Visit(isolate, v);
2763 break;
2764 }
Ben Murdochda12d292016-06-02 14:46:10 +01002765 case CELL_TARGET_SLOT: {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002766 RelocInfo rinfo(isolate, addr, RelocInfo::CELL, 0, NULL);
2767 rinfo.Visit(isolate, v);
2768 break;
2769 }
Ben Murdochda12d292016-06-02 14:46:10 +01002770 case CODE_ENTRY_SLOT: {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002771 v->VisitCodeEntry(addr);
2772 break;
2773 }
Ben Murdochda12d292016-06-02 14:46:10 +01002774 case RELOCATED_CODE_OBJECT: {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002775 HeapObject* obj = HeapObject::FromAddress(addr);
2776 Code::BodyDescriptor::IterateBody(obj, v);
2777 break;
2778 }
Ben Murdochda12d292016-06-02 14:46:10 +01002779 case DEBUG_TARGET_SLOT: {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002780 RelocInfo rinfo(isolate, addr, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION, 0,
2781 NULL);
2782 if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
2783 break;
2784 }
Ben Murdochda12d292016-06-02 14:46:10 +01002785 case EMBEDDED_OBJECT_SLOT: {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002786 RelocInfo rinfo(isolate, addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
2787 rinfo.Visit(isolate, v);
2788 break;
2789 }
Ben Murdochda12d292016-06-02 14:46:10 +01002790 case OBJECT_SLOT: {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002791 v->VisitPointer(reinterpret_cast<Object**>(addr));
2792 break;
2793 }
2794 default:
2795 UNREACHABLE();
2796 break;
2797 }
2798}
2799
2800
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002801// Visitor for updating pointers from live objects in old spaces to new space.
2802// It does not expect to encounter pointers to dead objects.
2803class PointersUpdatingVisitor : public ObjectVisitor {
2804 public:
2805 explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
2806
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002807 void VisitPointer(Object** p) override { UpdatePointer(p); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002808
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002809 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002810 for (Object** p = start; p < end; p++) UpdatePointer(p);
2811 }
2812
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002813 void VisitCell(RelocInfo* rinfo) override {
2814 DCHECK(rinfo->rmode() == RelocInfo::CELL);
2815 Object* cell = rinfo->target_cell();
2816 Object* old_cell = cell;
2817 VisitPointer(&cell);
2818 if (cell != old_cell) {
2819 rinfo->set_target_cell(reinterpret_cast<Cell*>(cell));
2820 }
2821 }
2822
2823 void VisitEmbeddedPointer(RelocInfo* rinfo) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002824 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
2825 Object* target = rinfo->target_object();
2826 Object* old_target = target;
2827 VisitPointer(&target);
2828 // Avoid unnecessary changes that might unnecessary flush the instruction
2829 // cache.
2830 if (target != old_target) {
2831 rinfo->set_target_object(target);
2832 }
2833 }
2834
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002835 void VisitCodeTarget(RelocInfo* rinfo) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002836 DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
2837 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2838 Object* old_target = target;
2839 VisitPointer(&target);
2840 if (target != old_target) {
2841 rinfo->set_target_address(Code::cast(target)->instruction_start());
2842 }
2843 }
2844
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002845 void VisitCodeAgeSequence(RelocInfo* rinfo) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002846 DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
2847 Object* stub = rinfo->code_age_stub();
2848 DCHECK(stub != NULL);
2849 VisitPointer(&stub);
2850 if (stub != rinfo->code_age_stub()) {
2851 rinfo->set_code_age_stub(Code::cast(stub));
2852 }
2853 }
2854
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002855 void VisitDebugTarget(RelocInfo* rinfo) override {
2856 DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2857 rinfo->IsPatchedDebugBreakSlotSequence());
2858 Object* target =
2859 Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002860 VisitPointer(&target);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002861 rinfo->set_debug_call_address(Code::cast(target)->instruction_start());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002862 }
2863
2864 static inline void UpdateSlot(Heap* heap, Object** slot) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002865 Object* obj = reinterpret_cast<Object*>(
2866 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002867
2868 if (!obj->IsHeapObject()) return;
2869
2870 HeapObject* heap_obj = HeapObject::cast(obj);
2871
2872 MapWord map_word = heap_obj->map_word();
2873 if (map_word.IsForwardingAddress()) {
2874 DCHECK(heap->InFromSpace(heap_obj) ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002875 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
2876 Page::FromAddress(heap_obj->address())
2877 ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002878 HeapObject* target = map_word.ToForwardingAddress();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002879 base::NoBarrier_CompareAndSwap(
2880 reinterpret_cast<base::AtomicWord*>(slot),
2881 reinterpret_cast<base::AtomicWord>(obj),
2882 reinterpret_cast<base::AtomicWord>(target));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002883 DCHECK(!heap->InFromSpace(target) &&
2884 !MarkCompactCollector::IsOnEvacuationCandidate(target));
2885 }
2886 }
2887
2888 private:
2889 inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); }
2890
2891 Heap* heap_;
2892};
2893
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002894static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
2895 Object** p) {
2896 MapWord map_word = HeapObject::cast(*p)->map_word();
2897
2898 if (map_word.IsForwardingAddress()) {
2899 return String::cast(map_word.ToForwardingAddress());
2900 }
2901
2902 return String::cast(*p);
2903}
2904
Ben Murdochda12d292016-06-02 14:46:10 +01002905bool MarkCompactCollector::IsSlotInBlackObject(MemoryChunk* p, Address slot) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002906 Space* owner = p->owner();
Ben Murdochda12d292016-06-02 14:46:10 +01002907 DCHECK(owner != heap_->lo_space() && owner != nullptr);
2908 USE(owner);
2909
2910 // If we are on a black page, we cannot find the actual object start
2911 // easiliy. We just return true but do not set the out_object.
2912 if (p->IsFlagSet(Page::BLACK_PAGE)) {
2913 return true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002914 }
2915
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002916 uint32_t mark_bit_index = p->AddressToMarkbitIndex(slot);
2917 unsigned int cell_index = mark_bit_index >> Bitmap::kBitsPerCellLog2;
2918 MarkBit::CellType index_mask = 1u << Bitmap::IndexInCell(mark_bit_index);
2919 MarkBit::CellType* cells = p->markbits()->cells();
2920 Address base_address = p->area_start();
2921 unsigned int base_address_cell_index = Bitmap::IndexToCell(
2922 Bitmap::CellAlignIndex(p->AddressToMarkbitIndex(base_address)));
2923
2924 // Check if the slot points to the start of an object. This can happen e.g.
2925 // when we left trim a fixed array. Such slots are invalid and we can remove
2926 // them.
2927 if (index_mask > 1) {
2928 if ((cells[cell_index] & index_mask) != 0 &&
2929 (cells[cell_index] & (index_mask >> 1)) == 0) {
2930 return false;
2931 }
2932 } else {
2933 // Left trimming moves the mark bits so we cannot be in the very first cell.
2934 DCHECK(cell_index != base_address_cell_index);
2935 if ((cells[cell_index] & index_mask) != 0 &&
2936 (cells[cell_index - 1] & (1u << Bitmap::kBitIndexMask)) == 0) {
2937 return false;
2938 }
2939 }
2940
2941 // Check if the object is in the current cell.
2942 MarkBit::CellType slot_mask;
2943 if ((cells[cell_index] == 0) ||
2944 (base::bits::CountTrailingZeros32(cells[cell_index]) >
2945 base::bits::CountTrailingZeros32(cells[cell_index] | index_mask))) {
2946 // If we are already in the first cell, there is no live object.
2947 if (cell_index == base_address_cell_index) return false;
2948
2949 // If not, find a cell in a preceding cell slot that has a mark bit set.
2950 do {
2951 cell_index--;
2952 } while (cell_index > base_address_cell_index && cells[cell_index] == 0);
2953
2954 // The slot must be in a dead object if there are no preceding cells that
2955 // have mark bits set.
2956 if (cells[cell_index] == 0) {
2957 return false;
2958 }
2959
2960 // The object is in a preceding cell. Set the mask to find any object.
2961 slot_mask = ~0u;
2962 } else {
2963 // We are interested in object mark bits right before the slot.
2964 slot_mask = index_mask + (index_mask - 1);
2965 }
2966
2967 MarkBit::CellType current_cell = cells[cell_index];
2968 CHECK(current_cell != 0);
2969
2970 // Find the last live object in the cell.
2971 unsigned int leading_zeros =
2972 base::bits::CountLeadingZeros32(current_cell & slot_mask);
2973 CHECK(leading_zeros != Bitmap::kBitsPerCell);
2974 int offset = static_cast<int>(Bitmap::kBitIndexMask - leading_zeros) - 1;
2975
2976 base_address += (cell_index - base_address_cell_index) *
2977 Bitmap::kBitsPerCell * kPointerSize;
2978 Address address = base_address + offset * kPointerSize;
2979 HeapObject* object = HeapObject::FromAddress(address);
2980 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
2981 CHECK(object->address() < reinterpret_cast<Address>(slot));
2982 if ((object->address() + kPointerSize) <= slot &&
2983 (object->address() + object->Size()) > slot) {
2984 // If the slot is within the last found object in the cell, the slot is
2985 // in a live object.
2986 // Slots pointing to the first word of an object are invalid and removed.
2987 // This can happen when we move the object header while left trimming.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002988 return true;
2989 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002990 return false;
2991}
2992
Ben Murdochda12d292016-06-02 14:46:10 +01002993HeapObject* MarkCompactCollector::FindBlackObjectBySlotSlow(Address slot) {
2994 Page* p = Page::FromAddress(slot);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002995 Space* owner = p->owner();
Ben Murdochda12d292016-06-02 14:46:10 +01002996 if (owner == heap_->lo_space() || owner == nullptr) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002997 Object* large_object = heap_->lo_space()->FindObject(slot);
2998 // This object has to exist, otherwise we would not have recorded a slot
2999 // for it.
3000 CHECK(large_object->IsHeapObject());
3001 HeapObject* large_heap_object = HeapObject::cast(large_object);
Ben Murdochda12d292016-06-02 14:46:10 +01003002
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003003 if (IsMarked(large_heap_object)) {
Ben Murdochda12d292016-06-02 14:46:10 +01003004 return large_heap_object;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003005 }
Ben Murdochda12d292016-06-02 14:46:10 +01003006 return nullptr;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003007 }
3008
Ben Murdochda12d292016-06-02 14:46:10 +01003009 if (p->IsFlagSet(Page::BLACK_PAGE)) {
3010 HeapObjectIterator it(p);
3011 HeapObject* object = nullptr;
3012 while ((object = it.Next()) != nullptr) {
3013 int size = object->Size();
3014 if (object->address() > slot) return nullptr;
3015 if (object->address() <= slot && slot < (object->address() + size)) {
3016 return object;
3017 }
3018 }
3019 } else {
3020 LiveObjectIterator<kBlackObjects> it(p);
3021 HeapObject* object = nullptr;
3022 while ((object = it.Next()) != nullptr) {
3023 int size = object->Size();
3024 if (object->address() > slot) return nullptr;
3025 if (object->address() <= slot && slot < (object->address() + size)) {
3026 return object;
3027 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003028 }
3029 }
Ben Murdochda12d292016-06-02 14:46:10 +01003030 return nullptr;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003031}
3032
3033
3034void MarkCompactCollector::EvacuateNewSpacePrologue() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003035 NewSpace* new_space = heap()->new_space();
Ben Murdoch097c5b22016-05-18 11:27:45 +01003036 NewSpacePageIterator it(new_space->bottom(), new_space->top());
3037 // Append the list of new space pages to be processed.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003038 while (it.has_next()) {
3039 newspace_evacuation_candidates_.Add(it.next());
3040 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003041 new_space->Flip();
3042 new_space->ResetAllocationInfo();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003043}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003044
Ben Murdoch097c5b22016-05-18 11:27:45 +01003045void MarkCompactCollector::EvacuateNewSpaceEpilogue() {
3046 newspace_evacuation_candidates_.Rewind(0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003047}
3048
Ben Murdoch097c5b22016-05-18 11:27:45 +01003049class MarkCompactCollector::Evacuator : public Malloced {
3050 public:
Ben Murdochc5610432016-08-08 18:44:38 +01003051 // NewSpacePages with more live bytes than this threshold qualify for fast
3052 // evacuation.
3053 static int PageEvacuationThreshold() {
3054 if (FLAG_page_promotion)
3055 return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
3056 return Page::kAllocatableMemory + kPointerSize;
3057 }
3058
Ben Murdochda12d292016-06-02 14:46:10 +01003059 explicit Evacuator(MarkCompactCollector* collector)
Ben Murdoch097c5b22016-05-18 11:27:45 +01003060 : collector_(collector),
Ben Murdoch097c5b22016-05-18 11:27:45 +01003061 compaction_spaces_(collector->heap()),
Ben Murdoch097c5b22016-05-18 11:27:45 +01003062 local_pretenuring_feedback_(HashMap::PointersMatch,
3063 kInitialLocalPretenuringFeedbackCapacity),
3064 new_space_visitor_(collector->heap(), &compaction_spaces_,
Ben Murdoch097c5b22016-05-18 11:27:45 +01003065 &local_pretenuring_feedback_),
Ben Murdochc5610432016-08-08 18:44:38 +01003066 new_space_page_visitor(),
Ben Murdochda12d292016-06-02 14:46:10 +01003067 old_space_visitor_(collector->heap(), &compaction_spaces_),
Ben Murdoch097c5b22016-05-18 11:27:45 +01003068 duration_(0.0),
Ben Murdochda12d292016-06-02 14:46:10 +01003069 bytes_compacted_(0) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003070
Ben Murdochc5610432016-08-08 18:44:38 +01003071 inline bool EvacuatePage(Page* chunk);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003072
3073 // Merge back locally cached info sequentially. Note that this method needs
3074 // to be called from the main thread.
3075 inline void Finalize();
3076
3077 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
3078
Ben Murdoch097c5b22016-05-18 11:27:45 +01003079 private:
Ben Murdochc5610432016-08-08 18:44:38 +01003080 enum EvacuationMode {
3081 kObjectsNewToOld,
3082 kPageNewToOld,
3083 kObjectsOldToOld,
3084 };
3085
Ben Murdoch097c5b22016-05-18 11:27:45 +01003086 static const int kInitialLocalPretenuringFeedbackCapacity = 256;
3087
Ben Murdochc5610432016-08-08 18:44:38 +01003088 inline Heap* heap() { return collector_->heap(); }
3089
3090 inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
3091 // Note: The order of checks is important in this function.
3092 if (chunk->InNewSpace()) return kObjectsNewToOld;
3093 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
3094 return kPageNewToOld;
3095 DCHECK(chunk->IsEvacuationCandidate());
3096 return kObjectsOldToOld;
3097 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003098
3099 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
3100 duration_ += duration;
3101 bytes_compacted_ += bytes_compacted;
3102 }
3103
Ben Murdochc5610432016-08-08 18:44:38 +01003104 template <IterationMode mode, class Visitor>
3105 inline bool EvacuateSinglePage(Page* p, Visitor* visitor);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003106
3107 MarkCompactCollector* collector_;
3108
Ben Murdoch097c5b22016-05-18 11:27:45 +01003109 // Locally cached collector data.
3110 CompactionSpaceCollection compaction_spaces_;
Ben Murdoch097c5b22016-05-18 11:27:45 +01003111 HashMap local_pretenuring_feedback_;
3112
Ben Murdochda12d292016-06-02 14:46:10 +01003113 // Visitors for the corresponding spaces.
Ben Murdoch097c5b22016-05-18 11:27:45 +01003114 EvacuateNewSpaceVisitor new_space_visitor_;
Ben Murdochc5610432016-08-08 18:44:38 +01003115 EvacuateNewSpacePageVisitor new_space_page_visitor;
Ben Murdoch097c5b22016-05-18 11:27:45 +01003116 EvacuateOldSpaceVisitor old_space_visitor_;
3117
3118 // Book keeping info.
3119 double duration_;
3120 intptr_t bytes_compacted_;
Ben Murdoch097c5b22016-05-18 11:27:45 +01003121};
3122
Ben Murdochc5610432016-08-08 18:44:38 +01003123template <MarkCompactCollector::IterationMode mode, class Visitor>
3124bool MarkCompactCollector::Evacuator::EvacuateSinglePage(Page* p,
3125 Visitor* visitor) {
Ben Murdochda12d292016-06-02 14:46:10 +01003126 bool success = false;
Ben Murdochc5610432016-08-08 18:44:38 +01003127 DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() ||
3128 p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
Ben Murdochda12d292016-06-02 14:46:10 +01003129 int saved_live_bytes = p->LiveBytes();
3130 double evacuation_time;
3131 {
3132 AlwaysAllocateScope always_allocate(heap()->isolate());
3133 TimedScope timed_scope(&evacuation_time);
Ben Murdochc5610432016-08-08 18:44:38 +01003134 success = collector_->VisitLiveObjects<Visitor>(p, visitor, mode);
Ben Murdochda12d292016-06-02 14:46:10 +01003135 }
3136 if (FLAG_trace_evacuation) {
Ben Murdochc5610432016-08-08 18:44:38 +01003137 const char age_mark_tag =
3138 !p->InNewSpace()
3139 ? 'x'
3140 : !p->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)
3141 ? '>'
3142 : !p->ContainsLimit(heap()->new_space()->age_mark()) ? '<'
3143 : '#';
Ben Murdochda12d292016-06-02 14:46:10 +01003144 PrintIsolate(heap()->isolate(),
Ben Murdochc5610432016-08-08 18:44:38 +01003145 "evacuation[%p]: page=%p new_space=%d age_mark_tag=%c "
3146 "page_evacuation=%d executable=%d live_bytes=%d time=%f\n",
3147 this, p, p->InNewSpace(), age_mark_tag,
3148 p->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION),
Ben Murdochda12d292016-06-02 14:46:10 +01003149 p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes,
3150 evacuation_time);
3151 }
3152 if (success) {
3153 ReportCompactionProgress(evacuation_time, saved_live_bytes);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003154 }
3155 return success;
3156}
3157
Ben Murdochc5610432016-08-08 18:44:38 +01003158bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
3159 bool result = false;
3160 DCHECK(page->SweepingDone());
3161 switch (ComputeEvacuationMode(page)) {
3162 case kObjectsNewToOld:
3163 result = EvacuateSinglePage<kClearMarkbits>(page, &new_space_visitor_);
3164 DCHECK(result);
3165 USE(result);
3166 break;
3167 case kPageNewToOld:
3168 result = EvacuateSinglePage<kKeepMarking>(page, &new_space_page_visitor);
3169 DCHECK(result);
3170 USE(result);
3171 break;
3172 case kObjectsOldToOld:
3173 result = EvacuateSinglePage<kClearMarkbits>(page, &old_space_visitor_);
3174 if (!result) {
3175 // Aborted compaction page. We can record slots here to have them
3176 // processed in parallel later on.
3177 EvacuateRecordOnlyVisitor record_visitor(page->owner()->identity());
3178 result = EvacuateSinglePage<kKeepMarking>(page, &record_visitor);
3179 DCHECK(result);
3180 USE(result);
3181 // We need to return failure here to indicate that we want this page
3182 // added to the sweeper.
3183 return false;
3184 }
3185 break;
3186 default:
3187 UNREACHABLE();
Ben Murdoch097c5b22016-05-18 11:27:45 +01003188 }
Ben Murdochc5610432016-08-08 18:44:38 +01003189 return result;
Ben Murdoch097c5b22016-05-18 11:27:45 +01003190}
3191
3192void MarkCompactCollector::Evacuator::Finalize() {
3193 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
3194 heap()->code_space()->MergeCompactionSpace(
3195 compaction_spaces_.Get(CODE_SPACE));
3196 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
Ben Murdochc5610432016-08-08 18:44:38 +01003197 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
3198 new_space_page_visitor.promoted_size());
Ben Murdoch097c5b22016-05-18 11:27:45 +01003199 heap()->IncrementSemiSpaceCopiedObjectSize(
3200 new_space_visitor_.semispace_copied_size());
3201 heap()->IncrementYoungSurvivorsCounter(
3202 new_space_visitor_.promoted_size() +
Ben Murdochc5610432016-08-08 18:44:38 +01003203 new_space_visitor_.semispace_copied_size() +
3204 new_space_page_visitor.promoted_size());
Ben Murdoch097c5b22016-05-18 11:27:45 +01003205 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003206}
3207
Ben Murdoch097c5b22016-05-18 11:27:45 +01003208int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
3209 intptr_t live_bytes) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003210 if (!FLAG_parallel_compaction) return 1;
3211 // Compute the number of needed tasks based on a target compaction time, the
3212 // profiled compaction speed and marked live memory.
3213 //
3214 // The number of parallel compaction tasks is limited by:
3215 // - #evacuation pages
3216 // - (#cores - 1)
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003217 const double kTargetCompactionTimeInMs = 1;
Ben Murdoch097c5b22016-05-18 11:27:45 +01003218 const int kNumSweepingTasks = 3;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003219
Ben Murdochda12d292016-06-02 14:46:10 +01003220 double compaction_speed =
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003221 heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003222
Ben Murdochda12d292016-06-02 14:46:10 +01003223 const int available_cores = Max(
3224 1, static_cast<int>(
3225 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()) -
3226 kNumSweepingTasks - 1);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003227 int tasks;
3228 if (compaction_speed > 0) {
Ben Murdochda12d292016-06-02 14:46:10 +01003229 tasks = 1 + static_cast<int>(live_bytes / compaction_speed /
3230 kTargetCompactionTimeInMs);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003231 } else {
3232 tasks = pages;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003233 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003234 const int tasks_capped_pages = Min(pages, tasks);
3235 return Min(available_cores, tasks_capped_pages);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003236}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003237
Ben Murdochda12d292016-06-02 14:46:10 +01003238class EvacuationJobTraits {
3239 public:
3240 typedef int* PerPageData; // Pointer to number of aborted pages.
3241 typedef MarkCompactCollector::Evacuator* PerTaskData;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003242
Ben Murdochda12d292016-06-02 14:46:10 +01003243 static const bool NeedSequentialFinalization = true;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003244
Ben Murdochda12d292016-06-02 14:46:10 +01003245 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
3246 MemoryChunk* chunk, PerPageData) {
Ben Murdochc5610432016-08-08 18:44:38 +01003247 return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003248 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003249
Ben Murdochc5610432016-08-08 18:44:38 +01003250 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk,
3251 bool success, PerPageData data) {
Ben Murdochda12d292016-06-02 14:46:10 +01003252 if (chunk->InNewSpace()) {
3253 DCHECK(success);
Ben Murdochc5610432016-08-08 18:44:38 +01003254 } else if (chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
3255 DCHECK(success);
3256 Page* p = static_cast<Page*>(chunk);
3257 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
3258 p->ForAllFreeListCategories(
3259 [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
3260 heap->mark_compact_collector()->sweeper().AddLatePage(
3261 p->owner()->identity(), p);
Ben Murdochda12d292016-06-02 14:46:10 +01003262 } else {
3263 Page* p = static_cast<Page*>(chunk);
3264 if (success) {
3265 DCHECK(p->IsEvacuationCandidate());
3266 DCHECK(p->SweepingDone());
3267 p->Unlink();
3268 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003269 // We have partially compacted the page, i.e., some objects may have
3270 // moved, others are still in place.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003271 p->SetFlag(Page::COMPACTION_WAS_ABORTED);
Ben Murdochc5610432016-08-08 18:44:38 +01003272 p->ClearEvacuationCandidate();
3273 // Slots have already been recorded so we just need to add it to the
3274 // sweeper.
Ben Murdochda12d292016-06-02 14:46:10 +01003275 *data += 1;
3276 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003277 }
3278 }
Ben Murdochda12d292016-06-02 14:46:10 +01003279};
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003280
Ben Murdochda12d292016-06-02 14:46:10 +01003281void MarkCompactCollector::EvacuatePagesInParallel() {
3282 PageParallelJob<EvacuationJobTraits> job(
Ben Murdochc5610432016-08-08 18:44:38 +01003283 heap_, heap_->isolate()->cancelable_task_manager(),
3284 &page_parallel_job_semaphore_);
Ben Murdochda12d292016-06-02 14:46:10 +01003285
3286 int abandoned_pages = 0;
3287 intptr_t live_bytes = 0;
3288 for (Page* page : evacuation_candidates_) {
3289 live_bytes += page->LiveBytes();
3290 job.AddPage(page, &abandoned_pages);
3291 }
Ben Murdochc5610432016-08-08 18:44:38 +01003292
3293 const Address age_mark = heap()->new_space()->age_mark();
3294 for (Page* page : newspace_evacuation_candidates_) {
Ben Murdochda12d292016-06-02 14:46:10 +01003295 live_bytes += page->LiveBytes();
Ben Murdochc5610432016-08-08 18:44:38 +01003296 if (!page->NeverEvacuate() &&
3297 (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
3298 page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
3299 !page->Contains(age_mark)) {
3300 EvacuateNewSpacePageVisitor::TryMoveToOldSpace(page, heap()->old_space());
3301 }
Ben Murdochda12d292016-06-02 14:46:10 +01003302 job.AddPage(page, &abandoned_pages);
3303 }
3304 DCHECK_GE(job.NumberOfPages(), 1);
3305
3306 // Used for trace summary.
3307 double compaction_speed = 0;
3308 if (FLAG_trace_evacuation) {
3309 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3310 }
3311
3312 const int wanted_num_tasks =
3313 NumberOfParallelCompactionTasks(job.NumberOfPages(), live_bytes);
3314 Evacuator** evacuators = new Evacuator*[wanted_num_tasks];
3315 for (int i = 0; i < wanted_num_tasks; i++) {
3316 evacuators[i] = new Evacuator(this);
3317 }
3318 job.Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; });
3319 for (int i = 0; i < wanted_num_tasks; i++) {
3320 evacuators[i]->Finalize();
3321 delete evacuators[i];
3322 }
3323 delete[] evacuators;
3324
3325 if (FLAG_trace_evacuation) {
Ben Murdochc5610432016-08-08 18:44:38 +01003326 PrintIsolate(isolate(),
3327 "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
3328 "aborted=%d wanted_tasks=%d tasks=%d cores=%" PRIuS
3329 " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n",
3330 isolate()->time_millis_since_init(),
3331 FLAG_parallel_compaction ? "yes" : "no", job.NumberOfPages(),
3332 abandoned_pages, wanted_num_tasks, job.NumberOfTasks(),
3333 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
3334 live_bytes, compaction_speed);
Ben Murdochda12d292016-06-02 14:46:10 +01003335 }
3336}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003337
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003338class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3339 public:
3340 virtual Object* RetainAs(Object* object) {
3341 if (object->IsHeapObject()) {
3342 HeapObject* heap_object = HeapObject::cast(object);
3343 MapWord map_word = heap_object->map_word();
3344 if (map_word.IsForwardingAddress()) {
3345 return map_word.ToForwardingAddress();
3346 }
3347 }
3348 return object;
3349 }
3350};
3351
Ben Murdochc5610432016-08-08 18:44:38 +01003352template <MarkCompactCollector::Sweeper::SweepingMode sweeping_mode,
3353 MarkCompactCollector::Sweeper::SweepingParallelism parallelism,
3354 MarkCompactCollector::Sweeper::SkipListRebuildingMode skip_list_mode,
3355 MarkCompactCollector::Sweeper::FreeSpaceTreatmentMode free_space_mode>
3356int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p,
3357 ObjectVisitor* v) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003358 DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
Ben Murdochda12d292016-06-02 14:46:10 +01003359 DCHECK(!p->IsFlagSet(Page::BLACK_PAGE));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003360 DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3361 space->identity() == CODE_SPACE);
3362 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
Ben Murdochc5610432016-08-08 18:44:38 +01003363 DCHECK(parallelism == SWEEP_ON_MAIN_THREAD || sweeping_mode == SWEEP_ONLY);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003364
3365 Address free_start = p->area_start();
3366 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003367
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003368 // If we use the skip list for code space pages, we have to lock the skip
3369 // list because it could be accessed concurrently by the runtime or the
3370 // deoptimizer.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003371 SkipList* skip_list = p->skip_list();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003372 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3373 skip_list->Clear();
3374 }
3375
3376 intptr_t freed_bytes = 0;
3377 intptr_t max_freed_bytes = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003378 int curr_region = -1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003379
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003380 LiveObjectIterator<kBlackObjects> it(p);
3381 HeapObject* object = NULL;
3382 while ((object = it.Next()) != NULL) {
3383 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3384 Address free_end = object->address();
3385 if (free_end != free_start) {
3386 int size = static_cast<int>(free_end - free_start);
3387 if (free_space_mode == ZAP_FREE_SPACE) {
3388 memset(free_start, 0xcc, size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003389 }
Ben Murdochda12d292016-06-02 14:46:10 +01003390 freed_bytes = space->UnaccountedFree(free_start, size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003391 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003392 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003393 Map* map = object->synchronized_map();
3394 int size = object->SizeFromMap(map);
3395 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3396 object->IterateBody(map->instance_type(), size, v);
3397 }
3398 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3399 int new_region_start = SkipList::RegionNumber(free_end);
3400 int new_region_end =
3401 SkipList::RegionNumber(free_end + size - kPointerSize);
3402 if (new_region_start != curr_region || new_region_end != curr_region) {
3403 skip_list->AddObject(free_end, size);
3404 curr_region = new_region_end;
3405 }
3406 }
3407 free_start = free_end + size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003408 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003409
3410 // Clear the mark bits of that page and reset live bytes count.
3411 Bitmap::Clear(p);
3412
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003413 if (free_start != p->area_end()) {
3414 int size = static_cast<int>(p->area_end() - free_start);
3415 if (free_space_mode == ZAP_FREE_SPACE) {
3416 memset(free_start, 0xcc, size);
3417 }
Ben Murdochda12d292016-06-02 14:46:10 +01003418 freed_bytes = space->UnaccountedFree(free_start, size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003419 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003420 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003421 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003422 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
3423}
3424
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003425void MarkCompactCollector::InvalidateCode(Code* code) {
3426 if (heap_->incremental_marking()->IsCompacting() &&
3427 !ShouldSkipEvacuationSlotRecording(code)) {
3428 DCHECK(compacting_);
3429
3430 // If the object is white than no slots were recorded on it yet.
3431 MarkBit mark_bit = Marking::MarkBitFrom(code);
3432 if (Marking::IsWhite(mark_bit)) return;
3433
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003434 // Ignore all slots that might have been recorded in the body of the
3435 // deoptimized code object. Assumption: no slots will be recorded for
3436 // this object after invalidating it.
Ben Murdochda12d292016-06-02 14:46:10 +01003437 Page* page = Page::FromAddress(code->address());
3438 Address start = code->instruction_start();
3439 Address end = code->address() + code->Size();
3440 RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(page, start, end);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003441 }
3442}
3443
3444
3445// Return true if the given code is deoptimized or will be deoptimized.
3446bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
3447 return code->is_optimized_code() && code->marked_for_deoptimization();
3448}
3449
3450
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003451#ifdef VERIFY_HEAP
3452static void VerifyAllBlackObjects(MemoryChunk* page) {
3453 LiveObjectIterator<kAllLiveObjects> it(page);
3454 HeapObject* object = NULL;
3455 while ((object = it.Next()) != NULL) {
3456 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3457 }
3458}
3459#endif // VERIFY_HEAP
3460
Ben Murdochc5610432016-08-08 18:44:38 +01003461template <class Visitor>
3462bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003463 IterationMode mode) {
3464#ifdef VERIFY_HEAP
3465 VerifyAllBlackObjects(page);
3466#endif // VERIFY_HEAP
3467
3468 LiveObjectIterator<kBlackObjects> it(page);
3469 HeapObject* object = nullptr;
3470 while ((object = it.Next()) != nullptr) {
3471 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3472 if (!visitor->Visit(object)) {
3473 if (mode == kClearMarkbits) {
3474 page->markbits()->ClearRange(
3475 page->AddressToMarkbitIndex(page->area_start()),
3476 page->AddressToMarkbitIndex(object->address()));
Ben Murdoch097c5b22016-05-18 11:27:45 +01003477 if (page->old_to_new_slots() != nullptr) {
3478 page->old_to_new_slots()->RemoveRange(
3479 0, static_cast<int>(object->address() - page->address()));
3480 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003481 RecomputeLiveBytes(page);
3482 }
3483 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003484 }
3485 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003486 if (mode == kClearMarkbits) {
3487 Bitmap::Clear(page);
3488 }
3489 return true;
3490}
3491
3492
3493void MarkCompactCollector::RecomputeLiveBytes(MemoryChunk* page) {
3494 LiveObjectIterator<kBlackObjects> it(page);
3495 int new_live_size = 0;
3496 HeapObject* object = nullptr;
3497 while ((object = it.Next()) != nullptr) {
3498 new_live_size += object->Size();
3499 }
3500 page->SetLiveBytes(new_live_size);
3501}
3502
3503
3504void MarkCompactCollector::VisitLiveObjectsBody(Page* page,
3505 ObjectVisitor* visitor) {
3506#ifdef VERIFY_HEAP
3507 VerifyAllBlackObjects(page);
3508#endif // VERIFY_HEAP
3509
3510 LiveObjectIterator<kBlackObjects> it(page);
3511 HeapObject* object = NULL;
3512 while ((object = it.Next()) != NULL) {
3513 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3514 Map* map = object->synchronized_map();
3515 int size = object->SizeFromMap(map);
3516 object->IterateBody(map->instance_type(), size, visitor);
3517 }
3518}
3519
Ben Murdochc5610432016-08-08 18:44:38 +01003520void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space,
3521 Page* page) {
3522 base::LockGuard<base::Mutex> guard(&mutex_);
3523 swept_list_[space->identity()].Add(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003524}
3525
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003526void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
Ben Murdochda12d292016-06-02 14:46:10 +01003527 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003528 Heap::RelocationLock relocation_lock(heap());
3529
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003530 {
Ben Murdochda12d292016-06-02 14:46:10 +01003531 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003532 EvacuationScope evacuation_scope(this);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003533
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003534 EvacuateNewSpacePrologue();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003535 EvacuatePagesInParallel();
Ben Murdoch097c5b22016-05-18 11:27:45 +01003536 EvacuateNewSpaceEpilogue();
3537 heap()->new_space()->set_age_mark(heap()->new_space()->top());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003538 }
3539
3540 UpdatePointersAfterEvacuation();
3541
Ben Murdoch097c5b22016-05-18 11:27:45 +01003542 // Give pages that are queued to be freed back to the OS. Note that filtering
3543 // slots only handles old space (for unboxed doubles), and thus map space can
3544 // still contain stale pointers. We only free the chunks after pointer updates
3545 // to still have access to page headers.
Ben Murdochc5610432016-08-08 18:44:38 +01003546 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
Ben Murdoch097c5b22016-05-18 11:27:45 +01003547
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003548 {
Ben Murdochda12d292016-06-02 14:46:10 +01003549 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
Ben Murdochc5610432016-08-08 18:44:38 +01003550
3551 for (Page* p : evacuation_candidates_) {
3552 // Important: skip list should be cleared only after roots were updated
3553 // because root iteration traverses the stack and might have to find
3554 // code objects from non-updated pc pointing into evacuation candidate.
3555 SkipList* list = p->skip_list();
3556 if (list != NULL) list->Clear();
3557 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3558 sweeper().AddLatePage(p->owner()->identity(), p);
3559 p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
3560 }
3561 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003562
3563 // EvacuateNewSpaceAndCandidates iterates over new space objects and for
3564 // ArrayBuffers either re-registers them as live or promotes them. This is
3565 // needed to properly free them.
3566 heap()->array_buffer_tracker()->FreeDead(false);
3567
3568 // Deallocate evacuated candidate pages.
3569 ReleaseEvacuationCandidates();
3570 }
3571
3572#ifdef VERIFY_HEAP
Ben Murdochc5610432016-08-08 18:44:38 +01003573 if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003574 VerifyEvacuation(heap());
3575 }
3576#endif
3577}
3578
Ben Murdochda12d292016-06-02 14:46:10 +01003579template <PointerDirection direction>
3580class PointerUpdateJobTraits {
3581 public:
3582 typedef int PerPageData; // Per page data is not used in this job.
3583 typedef PointersUpdatingVisitor* PerTaskData;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003584
Ben Murdochda12d292016-06-02 14:46:10 +01003585 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
3586 MemoryChunk* chunk, PerPageData) {
3587 UpdateUntypedPointers(heap, chunk);
3588 UpdateTypedPointers(heap, chunk, visitor);
3589 return true;
3590 }
3591 static const bool NeedSequentialFinalization = false;
3592 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003593 }
3594
Ben Murdochda12d292016-06-02 14:46:10 +01003595 private:
3596 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) {
3597 if (direction == OLD_TO_NEW) {
3598 RememberedSet<OLD_TO_NEW>::IterateWithWrapper(heap, chunk,
3599 UpdateOldToNewSlot);
3600 } else {
3601 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [heap](Address slot) {
3602 PointersUpdatingVisitor::UpdateSlot(heap,
3603 reinterpret_cast<Object**>(slot));
3604 return REMOVE_SLOT;
3605 });
3606 }
3607 }
3608
3609 static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk,
3610 PointersUpdatingVisitor* visitor) {
3611 if (direction == OLD_TO_OLD) {
3612 Isolate* isolate = heap->isolate();
3613 RememberedSet<OLD_TO_OLD>::IterateTyped(
3614 chunk, [isolate, visitor](SlotType type, Address slot) {
3615 UpdateTypedSlot(isolate, visitor, type, slot);
3616 return REMOVE_SLOT;
3617 });
3618 }
3619 }
3620
3621 static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) {
3622 MapWord map_word = object->map_word();
Ben Murdochc5610432016-08-08 18:44:38 +01003623 // There could still be stale pointers in large object space, map space,
3624 // and old space for pages that have been promoted.
Ben Murdochda12d292016-06-02 14:46:10 +01003625 if (map_word.IsForwardingAddress()) {
3626 // Update the corresponding slot.
3627 *address = map_word.ToForwardingAddress();
3628 }
3629 }
3630};
3631
3632int NumberOfPointerUpdateTasks(int pages) {
3633 if (!FLAG_parallel_pointer_update) return 1;
3634 const int kMaxTasks = 4;
3635 const int kPagesPerTask = 4;
3636 return Min(kMaxTasks, (pages + kPagesPerTask - 1) / kPagesPerTask);
3637}
3638
3639template <PointerDirection direction>
Ben Murdochc5610432016-08-08 18:44:38 +01003640void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
Ben Murdochda12d292016-06-02 14:46:10 +01003641 PageParallelJob<PointerUpdateJobTraits<direction> > job(
Ben Murdochc5610432016-08-08 18:44:38 +01003642 heap, heap->isolate()->cancelable_task_manager(), semaphore);
Ben Murdochda12d292016-06-02 14:46:10 +01003643 RememberedSet<direction>::IterateMemoryChunks(
3644 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
3645 PointersUpdatingVisitor visitor(heap);
3646 int num_pages = job.NumberOfPages();
3647 int num_tasks = NumberOfPointerUpdateTasks(num_pages);
3648 job.Run(num_tasks, [&visitor](int i) { return &visitor; });
3649}
3650
3651class ToSpacePointerUpdateJobTraits {
3652 public:
3653 typedef std::pair<Address, Address> PerPageData;
3654 typedef PointersUpdatingVisitor* PerTaskData;
3655
3656 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
3657 MemoryChunk* chunk, PerPageData limits) {
3658 for (Address cur = limits.first; cur < limits.second;) {
3659 HeapObject* object = HeapObject::FromAddress(cur);
3660 Map* map = object->map();
3661 int size = object->SizeFromMap(map);
3662 object->IterateBody(map->instance_type(), size, visitor);
3663 cur += size;
3664 }
3665 return true;
3666 }
3667 static const bool NeedSequentialFinalization = false;
3668 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
3669 }
3670};
3671
Ben Murdochc5610432016-08-08 18:44:38 +01003672void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
Ben Murdochda12d292016-06-02 14:46:10 +01003673 PageParallelJob<ToSpacePointerUpdateJobTraits> job(
Ben Murdochc5610432016-08-08 18:44:38 +01003674 heap, heap->isolate()->cancelable_task_manager(), semaphore);
Ben Murdochda12d292016-06-02 14:46:10 +01003675 Address space_start = heap->new_space()->bottom();
3676 Address space_end = heap->new_space()->top();
3677 NewSpacePageIterator it(space_start, space_end);
3678 while (it.has_next()) {
Ben Murdochc5610432016-08-08 18:44:38 +01003679 Page* page = it.next();
Ben Murdochda12d292016-06-02 14:46:10 +01003680 Address start =
3681 page->Contains(space_start) ? space_start : page->area_start();
3682 Address end = page->Contains(space_end) ? space_end : page->area_end();
3683 job.AddPage(page, std::make_pair(start, end));
3684 }
3685 PointersUpdatingVisitor visitor(heap);
3686 int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1;
3687 job.Run(num_tasks, [&visitor](int i) { return &visitor; });
3688}
3689
3690void MarkCompactCollector::UpdatePointersAfterEvacuation() {
3691 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
3692
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003693 PointersUpdatingVisitor updating_visitor(heap());
3694
3695 {
Ben Murdochda12d292016-06-02 14:46:10 +01003696 TRACE_GC(heap()->tracer(),
3697 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
Ben Murdochc5610432016-08-08 18:44:38 +01003698 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003699 // Update roots.
3700 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
Ben Murdochc5610432016-08-08 18:44:38 +01003701 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003702 }
3703
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003704 {
Ben Murdochda12d292016-06-02 14:46:10 +01003705 Heap* heap = this->heap();
3706 TRACE_GC(heap->tracer(),
3707 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
Ben Murdochc5610432016-08-08 18:44:38 +01003708 UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003709 }
3710
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003711 {
Ben Murdochda12d292016-06-02 14:46:10 +01003712 TRACE_GC(heap()->tracer(),
3713 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003714 // Update pointers from external string table.
3715 heap_->UpdateReferencesInExternalStringTable(
3716 &UpdateReferenceInExternalStringTableEntry);
3717
3718 EvacuationWeakObjectRetainer evacuation_object_retainer;
Ben Murdochda12d292016-06-02 14:46:10 +01003719 heap()->ProcessWeakListRoots(&evacuation_object_retainer);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003720 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003721}
3722
3723
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003724void MarkCompactCollector::ReleaseEvacuationCandidates() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003725 for (Page* p : evacuation_candidates_) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003726 if (!p->IsEvacuationCandidate()) continue;
3727 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003728 p->ResetLiveBytes();
Ben Murdoch097c5b22016-05-18 11:27:45 +01003729 CHECK(p->SweepingDone());
Ben Murdochda12d292016-06-02 14:46:10 +01003730 space->ReleasePage(p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003731 }
3732 evacuation_candidates_.Rewind(0);
3733 compacting_ = false;
Ben Murdochc5610432016-08-08 18:44:38 +01003734 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003735}
3736
Ben Murdochc5610432016-08-08 18:44:38 +01003737int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity,
3738 int required_freed_bytes,
3739 int max_pages) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003740 int max_freed = 0;
Ben Murdochc5610432016-08-08 18:44:38 +01003741 int pages_freed = 0;
3742 Page* page = nullptr;
3743 while ((page = GetSweepingPageSafe(identity)) != nullptr) {
3744 int freed = ParallelSweepPage(page, heap_->paged_space(identity));
3745 pages_freed += 1;
3746 DCHECK_GE(freed, 0);
3747 max_freed = Max(max_freed, freed);
3748 if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003749 return max_freed;
Ben Murdochc5610432016-08-08 18:44:38 +01003750 if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003751 }
Ben Murdochc5610432016-08-08 18:44:38 +01003752 return max_freed;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003753}
3754
Ben Murdochc5610432016-08-08 18:44:38 +01003755int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
3756 PagedSpace* space) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003757 int max_freed = 0;
Ben Murdoch097c5b22016-05-18 11:27:45 +01003758 if (page->mutex()->TryLock()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003759 // If this page was already swept in the meantime, we can return here.
Ben Murdoch097c5b22016-05-18 11:27:45 +01003760 if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003761 page->mutex()->Unlock();
3762 return 0;
3763 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003764 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003765 if (space->identity() == OLD_SPACE) {
Ben Murdochc5610432016-08-08 18:44:38 +01003766 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
3767 IGNORE_FREE_SPACE>(space, page, NULL);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003768 } else if (space->identity() == CODE_SPACE) {
Ben Murdochc5610432016-08-08 18:44:38 +01003769 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
3770 IGNORE_FREE_SPACE>(space, page, NULL);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003771 } else {
Ben Murdochc5610432016-08-08 18:44:38 +01003772 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
3773 IGNORE_FREE_SPACE>(space, page, NULL);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003774 }
Ben Murdochda12d292016-06-02 14:46:10 +01003775 {
Ben Murdochc5610432016-08-08 18:44:38 +01003776 base::LockGuard<base::Mutex> guard(&mutex_);
3777 swept_list_[space->identity()].Add(page);
Ben Murdochda12d292016-06-02 14:46:10 +01003778 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003779 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003780 page->mutex()->Unlock();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003781 }
3782 return max_freed;
3783}
3784
Ben Murdochc5610432016-08-08 18:44:38 +01003785void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) {
3786 DCHECK(!sweeping_in_progress_);
3787 PrepareToBeSweptPage(space, page);
3788 sweeping_list_[space].push_back(page);
3789}
3790
3791void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space,
3792 Page* page) {
3793 DCHECK(sweeping_in_progress_);
3794 PrepareToBeSweptPage(space, page);
3795 late_pages_ = true;
3796 AddSweepingPageSafe(space, page);
3797}
3798
3799void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space,
3800 Page* page) {
3801 page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
3802 int to_sweep = page->area_size() - page->LiveBytes();
3803 heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
3804}
3805
3806Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe(
3807 AllocationSpace space) {
3808 base::LockGuard<base::Mutex> guard(&mutex_);
3809 Page* page = nullptr;
3810 if (!sweeping_list_[space].empty()) {
3811 page = sweeping_list_[space].front();
3812 sweeping_list_[space].pop_front();
3813 }
3814 return page;
3815}
3816
3817void MarkCompactCollector::Sweeper::AddSweepingPageSafe(AllocationSpace space,
3818 Page* page) {
3819 base::LockGuard<base::Mutex> guard(&mutex_);
3820 sweeping_list_[space].push_back(page);
3821}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003822
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003823void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003824 space->ClearStats();
3825
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003826 PageIterator it(space);
3827
Ben Murdoch097c5b22016-05-18 11:27:45 +01003828 int will_be_swept = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003829 bool unused_page_present = false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003830
3831 while (it.has_next()) {
3832 Page* p = it.next();
Ben Murdoch097c5b22016-05-18 11:27:45 +01003833 DCHECK(p->SweepingDone());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003834
Ben Murdochda12d292016-06-02 14:46:10 +01003835 if (p->IsEvacuationCandidate()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003836 // Will be processed in EvacuateNewSpaceAndCandidates.
3837 DCHECK(evacuation_candidates_.length() > 0);
3838 continue;
3839 }
3840
Ben Murdochda12d292016-06-02 14:46:10 +01003841 // We can not sweep black pages, since all mark bits are set for these
3842 // pages.
3843 if (p->IsFlagSet(Page::BLACK_PAGE)) {
3844 Bitmap::Clear(p);
3845 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
3846 p->ClearFlag(Page::BLACK_PAGE);
3847 // TODO(hpayer): Free unused memory of last black page.
3848 continue;
3849 }
3850
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003851 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
3852 // We need to sweep the page to get it into an iterable state again. Note
3853 // that this adds unusable memory into the free list that is later on
3854 // (in the free list) dropped again. Since we only use the flag for
3855 // testing this is fine.
Ben Murdoch097c5b22016-05-18 11:27:45 +01003856 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
Ben Murdochc5610432016-08-08 18:44:38 +01003857 Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD,
3858 Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_SPACE>(
3859 space, p, nullptr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003860 continue;
3861 }
3862
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003863 // One unused page is kept, all further are released before sweeping them.
3864 if (p->LiveBytes() == 0) {
3865 if (unused_page_present) {
3866 if (FLAG_gc_verbose) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003867 PrintIsolate(isolate(), "sweeping: released page: %p", p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003868 }
Ben Murdochda12d292016-06-02 14:46:10 +01003869 space->ReleasePage(p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003870 continue;
3871 }
3872 unused_page_present = true;
3873 }
3874
Ben Murdochc5610432016-08-08 18:44:38 +01003875 sweeper().AddPage(space->identity(), p);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003876 will_be_swept++;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003877 }
3878
3879 if (FLAG_gc_verbose) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003880 PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
3881 AllocationSpaceName(space->identity()), will_be_swept);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003882 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003883}
3884
3885
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003886void MarkCompactCollector::SweepSpaces() {
Ben Murdochda12d292016-06-02 14:46:10 +01003887 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003888 double start_time = 0.0;
3889 if (FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003890 start_time = heap_->MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003891 }
3892
3893#ifdef DEBUG
3894 state_ = SWEEP_SPACES;
3895#endif
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003896
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003897 {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003898 {
3899 GCTracer::Scope sweep_scope(heap()->tracer(),
3900 GCTracer::Scope::MC_SWEEP_OLD);
3901 StartSweepSpace(heap()->old_space());
3902 }
3903 {
3904 GCTracer::Scope sweep_scope(heap()->tracer(),
3905 GCTracer::Scope::MC_SWEEP_CODE);
3906 StartSweepSpace(heap()->code_space());
3907 }
3908 {
3909 GCTracer::Scope sweep_scope(heap()->tracer(),
3910 GCTracer::Scope::MC_SWEEP_MAP);
3911 StartSweepSpace(heap()->map_space());
3912 }
Ben Murdochc5610432016-08-08 18:44:38 +01003913 sweeper().StartSweeping();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003914 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003915
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003916 // Deallocate unmarked large objects.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003917 heap_->lo_space()->FreeUnmarkedObjects();
3918
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003919 if (FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003920 heap_->tracer()->AddSweepingTime(heap_->MonotonicallyIncreasingTimeInMs() -
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003921 start_time);
3922 }
3923}
3924
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003925Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); }
3926
3927
3928void MarkCompactCollector::Initialize() {
3929 MarkCompactMarkingVisitor::Initialize();
3930 IncrementalMarking::Initialize();
3931}
3932
Ben Murdochda12d292016-06-02 14:46:10 +01003933void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* host, Address slot,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003934 Code* target) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003935 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
Ben Murdochda12d292016-06-02 14:46:10 +01003936 Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003937 if (target_page->IsEvacuationCandidate() &&
Ben Murdochda12d292016-06-02 14:46:10 +01003938 !ShouldSkipEvacuationSlotRecording(host)) {
3939 RememberedSet<OLD_TO_OLD>::InsertTyped(source_page, CODE_ENTRY_SLOT, slot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003940 }
3941}
3942
3943
3944void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
3945 DCHECK(heap()->gc_state() == Heap::MARK_COMPACT);
3946 if (is_compacting()) {
3947 Code* host =
3948 isolate()->inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(
3949 pc);
3950 MarkBit mark_bit = Marking::MarkBitFrom(host);
3951 if (Marking::IsBlack(mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003952 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
Ben Murdochda12d292016-06-02 14:46:10 +01003953 RecordRelocSlot(host, &rinfo, target);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003954 }
3955 }
3956}
3957
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003958} // namespace internal
3959} // namespace v8