blob: 646e63402a6eb2c0f949b7b35dcc0bd3aa4b3967 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005#include "src/heap/mark-compact.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006
7#include "src/base/atomicops.h"
8#include "src/base/bits.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00009#include "src/base/sys-info.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000010#include "src/code-stubs.h"
11#include "src/compilation-cache.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000012#include "src/deoptimizer.h"
13#include "src/execution.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000014#include "src/frames-inl.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000015#include "src/gdb-jit.h"
16#include "src/global-handles.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000017#include "src/heap/array-buffer-tracker.h"
18#include "src/heap/gc-tracer.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000019#include "src/heap/incremental-marking.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000020#include "src/heap/mark-compact-inl.h"
21#include "src/heap/object-stats.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000022#include "src/heap/objects-visiting-inl.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010023#include "src/heap/objects-visiting.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000024#include "src/heap/slots-buffer.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000025#include "src/heap/spaces-inl.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000026#include "src/ic/ic.h"
27#include "src/ic/stub-cache.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000028#include "src/profiler/cpu-profiler.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010029#include "src/utils-inl.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000030#include "src/v8.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000031
32namespace v8 {
33namespace internal {
34
35
36const char* Marking::kWhiteBitPattern = "00";
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000037const char* Marking::kBlackBitPattern = "11";
38const char* Marking::kGreyBitPattern = "10";
Ben Murdochb8a8cc12014-11-26 15:28:44 +000039const char* Marking::kImpossibleBitPattern = "01";
40
41
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000042// The following has to hold in order for {Marking::MarkBitFrom} to not produce
43// invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
44STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2);
45
46
Ben Murdochb8a8cc12014-11-26 15:28:44 +000047// -------------------------------------------------------------------------
48// MarkCompactCollector
49
50MarkCompactCollector::MarkCompactCollector(Heap* heap)
51 : // NOLINT
52#ifdef DEBUG
53 state_(IDLE),
54#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +000055 marking_parity_(ODD_MARKING_PARITY),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000056 was_marked_incrementally_(false),
Emily Bernierd0a1eb72015-03-24 16:35:39 -040057 evacuation_(false),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000058 slots_buffer_allocator_(nullptr),
59 migration_slots_buffer_(nullptr),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000060 heap_(heap),
Emily Bernierd0a1eb72015-03-24 16:35:39 -040061 marking_deque_memory_(NULL),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000062 marking_deque_memory_committed_(0),
63 code_flusher_(nullptr),
64 have_code_to_deoptimize_(false),
65 compacting_(false),
66 sweeping_in_progress_(false),
67 compaction_in_progress_(false),
68 pending_sweeper_tasks_semaphore_(0),
69 pending_compaction_tasks_semaphore_(0) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000070}
71
72#ifdef VERIFY_HEAP
73class VerifyMarkingVisitor : public ObjectVisitor {
74 public:
75 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
76
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000077 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000078 for (Object** current = start; current < end; current++) {
79 if ((*current)->IsHeapObject()) {
80 HeapObject* object = HeapObject::cast(*current);
81 CHECK(heap_->mark_compact_collector()->IsMarked(object));
82 }
83 }
84 }
85
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000086 void VisitEmbeddedPointer(RelocInfo* rinfo) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000087 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
88 if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
89 Object* p = rinfo->target_object();
90 VisitPointer(&p);
91 }
92 }
93
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000094 void VisitCell(RelocInfo* rinfo) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000095 Code* code = rinfo->host();
96 DCHECK(rinfo->rmode() == RelocInfo::CELL);
97 if (!code->IsWeakObject(rinfo->target_cell())) {
98 ObjectVisitor::VisitCell(rinfo);
99 }
100 }
101
102 private:
103 Heap* heap_;
104};
105
106
107static void VerifyMarking(Heap* heap, Address bottom, Address top) {
108 VerifyMarkingVisitor visitor(heap);
109 HeapObject* object;
110 Address next_object_must_be_here_or_later = bottom;
111
112 for (Address current = bottom; current < top; current += kPointerSize) {
113 object = HeapObject::FromAddress(current);
114 if (MarkCompactCollector::IsMarked(object)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000115 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000116 CHECK(current >= next_object_must_be_here_or_later);
117 object->Iterate(&visitor);
118 next_object_must_be_here_or_later = current + object->Size();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000119 // The next word for sure belongs to the current object, jump over it.
120 current += kPointerSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000121 }
122 }
123}
124
125
126static void VerifyMarking(NewSpace* space) {
127 Address end = space->top();
128 NewSpacePageIterator it(space->bottom(), end);
129 // The bottom position is at the start of its page. Allows us to use
130 // page->area_start() as start of range on all pages.
131 CHECK_EQ(space->bottom(),
132 NewSpacePage::FromAddress(space->bottom())->area_start());
133 while (it.has_next()) {
134 NewSpacePage* page = it.next();
135 Address limit = it.has_next() ? page->area_end() : end;
136 CHECK(limit == end || !page->Contains(end));
137 VerifyMarking(space->heap(), page->area_start(), limit);
138 }
139}
140
141
142static void VerifyMarking(PagedSpace* space) {
143 PageIterator it(space);
144
145 while (it.has_next()) {
146 Page* p = it.next();
147 VerifyMarking(space->heap(), p->area_start(), p->area_end());
148 }
149}
150
151
152static void VerifyMarking(Heap* heap) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000153 VerifyMarking(heap->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000154 VerifyMarking(heap->code_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000155 VerifyMarking(heap->map_space());
156 VerifyMarking(heap->new_space());
157
158 VerifyMarkingVisitor visitor(heap);
159
160 LargeObjectIterator it(heap->lo_space());
161 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
162 if (MarkCompactCollector::IsMarked(obj)) {
163 obj->Iterate(&visitor);
164 }
165 }
166
167 heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
168}
169
170
171class VerifyEvacuationVisitor : public ObjectVisitor {
172 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000173 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000174 for (Object** current = start; current < end; current++) {
175 if ((*current)->IsHeapObject()) {
176 HeapObject* object = HeapObject::cast(*current);
177 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
178 }
179 }
180 }
181};
182
183
184static void VerifyEvacuation(Page* page) {
185 VerifyEvacuationVisitor visitor;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000186 HeapObjectIterator iterator(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000187 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
188 heap_object = iterator.Next()) {
189 // We skip free space objects.
190 if (!heap_object->IsFiller()) {
191 heap_object->Iterate(&visitor);
192 }
193 }
194}
195
196
197static void VerifyEvacuation(NewSpace* space) {
198 NewSpacePageIterator it(space->bottom(), space->top());
199 VerifyEvacuationVisitor visitor;
200
201 while (it.has_next()) {
202 NewSpacePage* page = it.next();
203 Address current = page->area_start();
204 Address limit = it.has_next() ? page->area_end() : space->top();
205 CHECK(limit == space->top() || !page->Contains(space->top()));
206 while (current < limit) {
207 HeapObject* object = HeapObject::FromAddress(current);
208 object->Iterate(&visitor);
209 current += object->Size();
210 }
211 }
212}
213
214
215static void VerifyEvacuation(Heap* heap, PagedSpace* space) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000216 if (FLAG_use_allocation_folding && (space == heap->old_space())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000217 return;
218 }
219 PageIterator it(space);
220
221 while (it.has_next()) {
222 Page* p = it.next();
223 if (p->IsEvacuationCandidate()) continue;
224 VerifyEvacuation(p);
225 }
226}
227
228
229static void VerifyEvacuation(Heap* heap) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000230 VerifyEvacuation(heap, heap->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000231 VerifyEvacuation(heap, heap->code_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000232 VerifyEvacuation(heap, heap->map_space());
233 VerifyEvacuation(heap->new_space());
234
235 VerifyEvacuationVisitor visitor;
236 heap->IterateStrongRoots(&visitor, VISIT_ALL);
237}
238#endif // VERIFY_HEAP
239
240
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000241void MarkCompactCollector::SetUp() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000242 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
243 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
244 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
245 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
246
247 free_list_old_space_.Reset(new FreeList(heap_->old_space()));
248 free_list_code_space_.Reset(new FreeList(heap_->code_space()));
249 free_list_map_space_.Reset(new FreeList(heap_->map_space()));
250 EnsureMarkingDequeIsReserved();
251 EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
252 slots_buffer_allocator_ = new SlotsBufferAllocator();
253
254 if (FLAG_flush_code) {
255 code_flusher_ = new CodeFlusher(isolate());
256 if (FLAG_trace_code_flushing) {
257 PrintF("[code-flushing is now on]\n");
258 }
259 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000260}
261
262
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400263void MarkCompactCollector::TearDown() {
264 AbortCompaction();
265 delete marking_deque_memory_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000266 delete slots_buffer_allocator_;
267 delete code_flusher_;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400268}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000269
270
271void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000272 DCHECK(!p->NeverEvacuate());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000273 p->MarkEvacuationCandidate();
274 evacuation_candidates_.Add(p);
275}
276
277
278static void TraceFragmentation(PagedSpace* space) {
279 int number_of_pages = space->CountTotalPages();
280 intptr_t reserved = (number_of_pages * space->AreaSize());
281 intptr_t free = reserved - space->SizeOfObjects();
282 PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
283 AllocationSpaceName(space->identity()), number_of_pages,
284 static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
285}
286
287
288bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
289 if (!compacting_) {
290 DCHECK(evacuation_candidates_.length() == 0);
291
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000292 CollectEvacuationCandidates(heap()->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000293
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000294 if (FLAG_compact_code_space) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000295 CollectEvacuationCandidates(heap()->code_space());
296 } else if (FLAG_trace_fragmentation) {
297 TraceFragmentation(heap()->code_space());
298 }
299
300 if (FLAG_trace_fragmentation) {
301 TraceFragmentation(heap()->map_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000302 }
303
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000304 heap()->old_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
305 heap()->code_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000306
307 compacting_ = evacuation_candidates_.length() > 0;
308 }
309
310 return compacting_;
311}
312
313
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000314void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() {
315 {
316 GCTracer::Scope gc_scope(heap()->tracer(),
317 GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100318 RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(heap());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000319 }
320
321 {
322 GCTracer::Scope gc_scope(heap()->tracer(),
323 GCTracer::Scope::MC_CLEAR_SLOTS_BUFFER);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100324 for (Page* p : evacuation_candidates_) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000325 SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer());
326 }
327 }
328#ifdef VERIFY_HEAP
329 if (FLAG_verify_heap) {
330 VerifyValidStoreAndSlotsBufferEntries();
331 }
332#endif
333}
334
335
336#ifdef VERIFY_HEAP
337static void VerifyValidSlotsBufferEntries(Heap* heap, PagedSpace* space) {
338 PageIterator it(space);
339 while (it.has_next()) {
340 Page* p = it.next();
341 SlotsBuffer::VerifySlots(heap, p->slots_buffer());
342 }
343}
344
345
346void MarkCompactCollector::VerifyValidStoreAndSlotsBufferEntries() {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100347 RememberedSet<OLD_TO_NEW>::VerifyValidSlots(heap());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000348
349 VerifyValidSlotsBufferEntries(heap(), heap()->old_space());
350 VerifyValidSlotsBufferEntries(heap(), heap()->code_space());
351 VerifyValidSlotsBufferEntries(heap(), heap()->map_space());
352
353 LargeObjectIterator it(heap()->lo_space());
354 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
355 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
356 SlotsBuffer::VerifySlots(heap(), chunk->slots_buffer());
357 }
358}
359#endif
360
361
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000362void MarkCompactCollector::CollectGarbage() {
363 // Make sure that Prepare() has been called. The individual steps below will
364 // update the state as they proceed.
365 DCHECK(state_ == PREPARE_GC);
366
367 MarkLiveObjects();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000368
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000369 DCHECK(heap_->incremental_marking()->IsStopped());
370
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000371 ClearNonLiveReferences();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400372
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000373#ifdef VERIFY_HEAP
374 if (FLAG_verify_heap) {
375 VerifyMarking(heap_);
376 }
377#endif
378
379 SweepSpaces();
380
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000381 EvacuateNewSpaceAndCandidates();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000382
383 Finish();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000384}
385
386
387#ifdef VERIFY_HEAP
388void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
389 PageIterator it(space);
390
391 while (it.has_next()) {
392 Page* p = it.next();
393 CHECK(p->markbits()->IsClean());
394 CHECK_EQ(0, p->LiveBytes());
395 }
396}
397
398
399void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
400 NewSpacePageIterator it(space->bottom(), space->top());
401
402 while (it.has_next()) {
403 NewSpacePage* p = it.next();
404 CHECK(p->markbits()->IsClean());
405 CHECK_EQ(0, p->LiveBytes());
406 }
407}
408
409
410void MarkCompactCollector::VerifyMarkbitsAreClean() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000411 VerifyMarkbitsAreClean(heap_->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000412 VerifyMarkbitsAreClean(heap_->code_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000413 VerifyMarkbitsAreClean(heap_->map_space());
414 VerifyMarkbitsAreClean(heap_->new_space());
415
416 LargeObjectIterator it(heap_->lo_space());
417 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
418 MarkBit mark_bit = Marking::MarkBitFrom(obj);
419 CHECK(Marking::IsWhite(mark_bit));
420 CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
421 }
422}
423
424
425void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
426 HeapObjectIterator code_iterator(heap()->code_space());
427 for (HeapObject* obj = code_iterator.Next(); obj != NULL;
428 obj = code_iterator.Next()) {
429 Code* code = Code::cast(obj);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400430 if (!code->is_optimized_code()) continue;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000431 if (WillBeDeoptimized(code)) continue;
432 code->VerifyEmbeddedObjectsDependency();
433 }
434}
435
436
437void MarkCompactCollector::VerifyOmittedMapChecks() {
438 HeapObjectIterator iterator(heap()->map_space());
439 for (HeapObject* obj = iterator.Next(); obj != NULL; obj = iterator.Next()) {
440 Map* map = Map::cast(obj);
441 map->VerifyOmittedMapChecks();
442 }
443}
444#endif // VERIFY_HEAP
445
446
447static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
448 PageIterator it(space);
449
450 while (it.has_next()) {
451 Bitmap::Clear(it.next());
452 }
453}
454
455
456static void ClearMarkbitsInNewSpace(NewSpace* space) {
457 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
458
459 while (it.has_next()) {
460 Bitmap::Clear(it.next());
461 }
462}
463
464
465void MarkCompactCollector::ClearMarkbits() {
466 ClearMarkbitsInPagedSpace(heap_->code_space());
467 ClearMarkbitsInPagedSpace(heap_->map_space());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000468 ClearMarkbitsInPagedSpace(heap_->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000469 ClearMarkbitsInNewSpace(heap_->new_space());
470
471 LargeObjectIterator it(heap_->lo_space());
472 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000473 Marking::MarkWhite(Marking::MarkBitFrom(obj));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000474 Page::FromAddress(obj->address())->ResetProgressBar();
475 Page::FromAddress(obj->address())->ResetLiveBytes();
476 }
477}
478
479
480class MarkCompactCollector::SweeperTask : public v8::Task {
481 public:
Ben Murdoch097c5b22016-05-18 11:27:45 +0100482 SweeperTask(Heap* heap, AllocationSpace space_to_start)
483 : heap_(heap), space_to_start_(space_to_start) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000484
485 virtual ~SweeperTask() {}
486
487 private:
488 // v8::Task overrides.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000489 void Run() override {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100490 DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE);
491 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
492 const int offset = space_to_start_ - FIRST_PAGED_SPACE;
493 const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
494 for (int i = 0; i < num_spaces; i++) {
495 const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces);
496 DCHECK_GE(space_id, FIRST_PAGED_SPACE);
497 DCHECK_LE(space_id, LAST_PAGED_SPACE);
498 heap_->mark_compact_collector()->SweepInParallel(
499 heap_->paged_space(space_id), 0);
500 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000501 heap_->mark_compact_collector()->pending_sweeper_tasks_semaphore_.Signal();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000502 }
503
504 Heap* heap_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100505 AllocationSpace space_to_start_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000506
507 DISALLOW_COPY_AND_ASSIGN(SweeperTask);
508};
509
510
511void MarkCompactCollector::StartSweeperThreads() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000512 DCHECK(free_list_old_space_.get()->IsEmpty());
513 DCHECK(free_list_code_space_.get()->IsEmpty());
514 DCHECK(free_list_map_space_.get()->IsEmpty());
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400515 V8::GetCurrentPlatform()->CallOnBackgroundThread(
Ben Murdoch097c5b22016-05-18 11:27:45 +0100516 new SweeperTask(heap(), OLD_SPACE), v8::Platform::kShortRunningTask);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400517 V8::GetCurrentPlatform()->CallOnBackgroundThread(
Ben Murdoch097c5b22016-05-18 11:27:45 +0100518 new SweeperTask(heap(), CODE_SPACE), v8::Platform::kShortRunningTask);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000519 V8::GetCurrentPlatform()->CallOnBackgroundThread(
Ben Murdoch097c5b22016-05-18 11:27:45 +0100520 new SweeperTask(heap(), MAP_SPACE), v8::Platform::kShortRunningTask);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000521}
522
523
524void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) {
525 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
Ben Murdoch097c5b22016-05-18 11:27:45 +0100526 if (!page->SweepingDone()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000527 SweepInParallel(page, owner);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100528 if (!page->SweepingDone()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000529 // We were not able to sweep that page, i.e., a concurrent
530 // sweeper thread currently owns this page. Wait for the sweeper
531 // thread to be done with this page.
532 page->WaitUntilSweepingCompleted();
533 }
534 }
535}
536
537
538void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
539 if (FLAG_concurrent_sweeping && !IsSweepingCompleted()) {
540 SweepInParallel(heap()->paged_space(space->identity()), 0);
541 space->RefillFreeList();
542 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000543}
544
545
546void MarkCompactCollector::EnsureSweepingCompleted() {
547 DCHECK(sweeping_in_progress_ == true);
548
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400549 // If sweeping is not completed or not running at all, we try to complete it
550 // here.
551 if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000552 SweepInParallel(heap()->paged_space(OLD_SPACE), 0);
553 SweepInParallel(heap()->paged_space(CODE_SPACE), 0);
554 SweepInParallel(heap()->paged_space(MAP_SPACE), 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000555 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000556
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400557 if (FLAG_concurrent_sweeping) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000558 pending_sweeper_tasks_semaphore_.Wait();
559 pending_sweeper_tasks_semaphore_.Wait();
560 pending_sweeper_tasks_semaphore_.Wait();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000561 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000562
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000563 ParallelSweepSpacesComplete();
564 sweeping_in_progress_ = false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000565 heap()->old_space()->RefillFreeList();
566 heap()->code_space()->RefillFreeList();
567 heap()->map_space()->RefillFreeList();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000568
569#ifdef VERIFY_HEAP
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400570 if (FLAG_verify_heap && !evacuation()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000571 VerifyEvacuation(heap_);
572 }
573#endif
574}
575
576
577bool MarkCompactCollector::IsSweepingCompleted() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000578 if (!pending_sweeper_tasks_semaphore_.WaitFor(
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400579 base::TimeDelta::FromSeconds(0))) {
580 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000581 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000582 pending_sweeper_tasks_semaphore_.Signal();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000583 return true;
584}
585
586
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000587void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000588 // This is only used when resizing an object.
589 DCHECK(MemoryChunk::FromAddress(old_start) ==
590 MemoryChunk::FromAddress(new_start));
591
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000592 if (!heap->incremental_marking()->IsMarking()) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000593
594 // If the mark doesn't move, we don't check the color of the object.
595 // It doesn't matter whether the object is black, since it hasn't changed
596 // size, so the adjustment to the live data count will be zero anyway.
597 if (old_start == new_start) return;
598
599 MarkBit new_mark_bit = MarkBitFrom(new_start);
600 MarkBit old_mark_bit = MarkBitFrom(old_start);
601
602#ifdef DEBUG
603 ObjectColor old_color = Color(old_mark_bit);
604#endif
605
606 if (Marking::IsBlack(old_mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000607 Marking::BlackToWhite(old_mark_bit);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000608 Marking::MarkBlack(new_mark_bit);
609 return;
610 } else if (Marking::IsGrey(old_mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000611 Marking::GreyToWhite(old_mark_bit);
612 heap->incremental_marking()->WhiteToGreyAndPush(
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000613 HeapObject::FromAddress(new_start), new_mark_bit);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000614 heap->incremental_marking()->RestartIfNotMarking();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000615 }
616
617#ifdef DEBUG
618 ObjectColor new_color = Color(new_mark_bit);
619 DCHECK(new_color == old_color);
620#endif
621}
622
623
624const char* AllocationSpaceName(AllocationSpace space) {
625 switch (space) {
626 case NEW_SPACE:
627 return "NEW_SPACE";
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000628 case OLD_SPACE:
629 return "OLD_SPACE";
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000630 case CODE_SPACE:
631 return "CODE_SPACE";
632 case MAP_SPACE:
633 return "MAP_SPACE";
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000634 case LO_SPACE:
635 return "LO_SPACE";
636 default:
637 UNREACHABLE();
638 }
639
640 return NULL;
641}
642
643
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000644void MarkCompactCollector::ComputeEvacuationHeuristics(
645 int area_size, int* target_fragmentation_percent,
646 int* max_evacuated_bytes) {
647 // For memory reducing mode we directly define both constants.
648 const int kTargetFragmentationPercentForReduceMemory = 20;
649 const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000650
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000651 // For regular mode (which is latency critical) we define less aggressive
652 // defaults to start and switch to a trace-based (using compaction speed)
653 // approach as soon as we have enough samples.
654 const int kTargetFragmentationPercent = 70;
655 const int kMaxEvacuatedBytes = 4 * Page::kPageSize;
656 // Time to take for a single area (=payload of page). Used as soon as there
657 // exist enough compaction speed samples.
658 const int kTargetMsPerArea = 1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000659
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000660 if (heap()->ShouldReduceMemory()) {
661 *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
662 *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000663 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000664 const intptr_t estimated_compaction_speed =
665 heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
666 if (estimated_compaction_speed != 0) {
667 // Estimate the target fragmentation based on traced compaction speed
668 // and a goal for a single page.
669 const intptr_t estimated_ms_per_area =
670 1 + static_cast<intptr_t>(area_size) / estimated_compaction_speed;
671 *target_fragmentation_percent =
672 100 - 100 * kTargetMsPerArea / estimated_ms_per_area;
673 if (*target_fragmentation_percent <
674 kTargetFragmentationPercentForReduceMemory) {
675 *target_fragmentation_percent =
676 kTargetFragmentationPercentForReduceMemory;
677 }
678 } else {
679 *target_fragmentation_percent = kTargetFragmentationPercent;
680 }
681 *max_evacuated_bytes = kMaxEvacuatedBytes;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000682 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000683}
684
685
686void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000687 DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000688
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000689 int number_of_pages = space->CountTotalPages();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000690 int area_size = space->AreaSize();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000691
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000692 // Pairs of (live_bytes_in_page, page).
693 typedef std::pair<int, Page*> LiveBytesPagePair;
694 std::vector<LiveBytesPagePair> pages;
695 pages.reserve(number_of_pages);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000696
697 PageIterator it(space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000698 while (it.has_next()) {
699 Page* p = it.next();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000700 if (p->NeverEvacuate()) continue;
701 if (p->IsFlagSet(Page::POPULAR_PAGE)) {
702 // This page had slots buffer overflow on previous GC, skip it.
703 p->ClearFlag(Page::POPULAR_PAGE);
704 continue;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000705 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000706 // Invariant: Evacuation candidates are just created when marking is
Ben Murdoch097c5b22016-05-18 11:27:45 +0100707 // started. This means that sweeping has finished. Furthermore, at the end
708 // of a GC all evacuation candidates are cleared and their slot buffers are
709 // released.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000710 CHECK(!p->IsEvacuationCandidate());
Ben Murdoch097c5b22016-05-18 11:27:45 +0100711 CHECK(p->slots_buffer() == nullptr);
712 CHECK(p->SweepingDone());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000713 DCHECK(p->area_size() == area_size);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100714 pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000715 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000716
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000717 int candidate_count = 0;
718 int total_live_bytes = 0;
719
720 const bool reduce_memory = heap()->ShouldReduceMemory();
721 if (FLAG_manual_evacuation_candidates_selection) {
722 for (size_t i = 0; i < pages.size(); i++) {
723 Page* p = pages[i].second;
724 if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
725 candidate_count++;
726 total_live_bytes += pages[i].first;
727 p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
728 AddEvacuationCandidate(p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000729 }
730 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000731 } else if (FLAG_stress_compaction) {
732 for (size_t i = 0; i < pages.size(); i++) {
733 Page* p = pages[i].second;
734 if (i % 2 == 0) {
735 candidate_count++;
736 total_live_bytes += pages[i].first;
737 AddEvacuationCandidate(p);
738 }
739 }
740 } else {
741 // The following approach determines the pages that should be evacuated.
742 //
743 // We use two conditions to decide whether a page qualifies as an evacuation
744 // candidate, or not:
745 // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
746 // between live bytes and capacity of this page (= area).
747 // * Evacuation quota: A global quota determining how much bytes should be
748 // compacted.
749 //
750 // The algorithm sorts all pages by live bytes and then iterates through
751 // them starting with the page with the most free memory, adding them to the
752 // set of evacuation candidates as long as both conditions (fragmentation
753 // and quota) hold.
754 int max_evacuated_bytes;
755 int target_fragmentation_percent;
756 ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
757 &max_evacuated_bytes);
758
759 const intptr_t free_bytes_threshold =
760 target_fragmentation_percent * (area_size / 100);
761
762 // Sort pages from the most free to the least free, then select
763 // the first n pages for evacuation such that:
764 // - the total size of evacuated objects does not exceed the specified
765 // limit.
766 // - fragmentation of (n+1)-th page does not exceed the specified limit.
767 std::sort(pages.begin(), pages.end(),
768 [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
769 return a.first < b.first;
770 });
771 for (size_t i = 0; i < pages.size(); i++) {
772 int live_bytes = pages[i].first;
773 int free_bytes = area_size - live_bytes;
774 if (FLAG_always_compact ||
775 ((free_bytes >= free_bytes_threshold) &&
776 ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
777 candidate_count++;
778 total_live_bytes += live_bytes;
779 }
780 if (FLAG_trace_fragmentation_verbose) {
781 PrintIsolate(isolate(),
782 "compaction-selection-page: space=%s free_bytes_page=%d "
783 "fragmentation_limit_kb=%d fragmentation_limit_percent=%d "
784 "sum_compaction_kb=%d "
785 "compaction_limit_kb=%d\n",
786 AllocationSpaceName(space->identity()), free_bytes / KB,
787 free_bytes_threshold / KB, target_fragmentation_percent,
788 total_live_bytes / KB, max_evacuated_bytes / KB);
789 }
790 }
791 // How many pages we will allocated for the evacuated objects
792 // in the worst case: ceil(total_live_bytes / area_size)
793 int estimated_new_pages = (total_live_bytes + area_size - 1) / area_size;
794 DCHECK_LE(estimated_new_pages, candidate_count);
795 int estimated_released_pages = candidate_count - estimated_new_pages;
796 // Avoid (compact -> expand) cycles.
797 if ((estimated_released_pages == 0) && !FLAG_always_compact) {
798 candidate_count = 0;
799 }
800 for (int i = 0; i < candidate_count; i++) {
801 AddEvacuationCandidate(pages[i].second);
802 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000803 }
804
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000805 if (FLAG_trace_fragmentation) {
806 PrintIsolate(isolate(),
807 "compaction-selection: space=%s reduce_memory=%d pages=%d "
808 "total_live_bytes=%d\n",
809 AllocationSpaceName(space->identity()), reduce_memory,
810 candidate_count, total_live_bytes / KB);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000811 }
812}
813
814
815void MarkCompactCollector::AbortCompaction() {
816 if (compacting_) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100817 for (Page* p : evacuation_candidates_) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000818 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000819 p->ClearEvacuationCandidate();
820 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
821 }
822 compacting_ = false;
823 evacuation_candidates_.Rewind(0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000824 }
825 DCHECK_EQ(0, evacuation_candidates_.length());
826}
827
828
829void MarkCompactCollector::Prepare() {
830 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
831
832#ifdef DEBUG
833 DCHECK(state_ == IDLE);
834 state_ = PREPARE_GC;
835#endif
836
837 DCHECK(!FLAG_never_compact || !FLAG_always_compact);
838
839 if (sweeping_in_progress()) {
840 // Instead of waiting we could also abort the sweeper threads here.
841 EnsureSweepingCompleted();
842 }
843
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000844 // If concurrent unmapping tasks are still running, we should wait for
845 // them here.
846 heap()->WaitUntilUnmappingOfFreeChunksCompleted();
847
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000848 // Clear marking bits if incremental marking is aborted.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000849 if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
850 heap()->incremental_marking()->Stop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000851 ClearMarkbits();
852 AbortWeakCollections();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400853 AbortWeakCells();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000854 AbortTransitionArrays();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000855 AbortCompaction();
856 was_marked_incrementally_ = false;
857 }
858
859 // Don't start compaction if we are in the middle of incremental
860 // marking cycle. We did not collect any slots.
861 if (!FLAG_never_compact && !was_marked_incrementally_) {
862 StartCompaction(NON_INCREMENTAL_COMPACTION);
863 }
864
865 PagedSpaces spaces(heap());
866 for (PagedSpace* space = spaces.next(); space != NULL;
867 space = spaces.next()) {
868 space->PrepareForMarkCompact();
869 }
870
871#ifdef VERIFY_HEAP
872 if (!was_marked_incrementally_ && FLAG_verify_heap) {
873 VerifyMarkbitsAreClean();
874 }
875#endif
876}
877
878
879void MarkCompactCollector::Finish() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000880 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_FINISH);
881
882 // The hashing of weak_object_to_code_table is no longer valid.
883 heap()->weak_object_to_code_table()->Rehash(
884 heap()->isolate()->factory()->undefined_value());
885
886 // Clear the marking state of live large objects.
887 heap_->lo_space()->ClearMarkingStateOfLiveObjects();
888
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000889#ifdef DEBUG
890 DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
891 state_ = IDLE;
892#endif
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000893 heap_->isolate()->inner_pointer_to_code_cache()->Flush();
894
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000895 // The stub cache is not traversed during GC; clear the cache to
896 // force lazy re-initialization of it. This must be done after the
897 // GC, because it relies on the new address of certain old space
898 // objects (empty string, illegal builtin).
899 isolate()->stub_cache()->Clear();
900
901 if (have_code_to_deoptimize_) {
902 // Some code objects were marked for deoptimization during the GC.
903 Deoptimizer::DeoptimizeMarkedCode(isolate());
904 have_code_to_deoptimize_ = false;
905 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400906
907 heap_->incremental_marking()->ClearIdleMarkingDelayCounter();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000908
909 if (marking_parity_ == EVEN_MARKING_PARITY) {
910 marking_parity_ = ODD_MARKING_PARITY;
911 } else {
912 DCHECK(marking_parity_ == ODD_MARKING_PARITY);
913 marking_parity_ = EVEN_MARKING_PARITY;
914 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000915}
916
917
918// -------------------------------------------------------------------------
919// Phase 1: tracing and marking live objects.
920// before: all objects are in normal state.
921// after: a live object's map pointer is marked as '00'.
922
923// Marking all live objects in the heap as part of mark-sweep or mark-compact
924// collection. Before marking, all objects are in their normal state. After
925// marking, live objects' map pointers are marked indicating that the object
926// has been found reachable.
927//
928// The marking algorithm is a (mostly) depth-first (because of possible stack
929// overflow) traversal of the graph of objects reachable from the roots. It
930// uses an explicit stack of pointers rather than recursion. The young
931// generation's inactive ('from') space is used as a marking stack. The
932// objects in the marking stack are the ones that have been reached and marked
933// but their children have not yet been visited.
934//
935// The marking stack can overflow during traversal. In that case, we set an
936// overflow flag. When the overflow flag is set, we continue marking objects
937// reachable from the objects on the marking stack, but no longer push them on
938// the marking stack. Instead, we mark them as both marked and overflowed.
939// When the stack is in the overflowed state, objects marked as overflowed
940// have been reached and marked but their children have not been visited yet.
941// After emptying the marking stack, we clear the overflow flag and traverse
942// the heap looking for objects marked as overflowed, push them on the stack,
943// and continue with marking. This process repeats until all reachable
944// objects have been marked.
945
946void CodeFlusher::ProcessJSFunctionCandidates() {
947 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
948 Object* undefined = isolate_->heap()->undefined_value();
949
950 JSFunction* candidate = jsfunction_candidates_head_;
951 JSFunction* next_candidate;
952 while (candidate != NULL) {
953 next_candidate = GetNextCandidate(candidate);
954 ClearNextCandidate(candidate, undefined);
955
956 SharedFunctionInfo* shared = candidate->shared();
957
958 Code* code = shared->code();
959 MarkBit code_mark = Marking::MarkBitFrom(code);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000960 if (Marking::IsWhite(code_mark)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000961 if (FLAG_trace_code_flushing && shared->is_compiled()) {
962 PrintF("[code-flushing clears: ");
963 shared->ShortPrint();
964 PrintF(" - age: %d]\n", code->GetAge());
965 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000966 // Always flush the optimized code map if there is one.
967 if (!shared->OptimizedCodeMapIsCleared()) {
968 shared->ClearOptimizedCodeMap();
969 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000970 shared->set_code(lazy_compile);
971 candidate->set_code(lazy_compile);
972 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000973 DCHECK(Marking::IsBlack(code_mark));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000974 candidate->set_code(code);
975 }
976
977 // We are in the middle of a GC cycle so the write barrier in the code
978 // setter did not record the slot update and we have to do that manually.
979 Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
980 Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000981 isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(
982 candidate, slot, target);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000983
984 Object** shared_code_slot =
985 HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
986 isolate_->heap()->mark_compact_collector()->RecordSlot(
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000987 shared, shared_code_slot, *shared_code_slot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000988
989 candidate = next_candidate;
990 }
991
992 jsfunction_candidates_head_ = NULL;
993}
994
995
996void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
997 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
998
999 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1000 SharedFunctionInfo* next_candidate;
1001 while (candidate != NULL) {
1002 next_candidate = GetNextCandidate(candidate);
1003 ClearNextCandidate(candidate);
1004
1005 Code* code = candidate->code();
1006 MarkBit code_mark = Marking::MarkBitFrom(code);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001007 if (Marking::IsWhite(code_mark)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001008 if (FLAG_trace_code_flushing && candidate->is_compiled()) {
1009 PrintF("[code-flushing clears: ");
1010 candidate->ShortPrint();
1011 PrintF(" - age: %d]\n", code->GetAge());
1012 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001013 // Always flush the optimized code map if there is one.
1014 if (!candidate->OptimizedCodeMapIsCleared()) {
1015 candidate->ClearOptimizedCodeMap();
1016 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001017 candidate->set_code(lazy_compile);
1018 }
1019
1020 Object** code_slot =
1021 HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001022 isolate_->heap()->mark_compact_collector()->RecordSlot(candidate, code_slot,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001023 *code_slot);
1024
1025 candidate = next_candidate;
1026 }
1027
1028 shared_function_info_candidates_head_ = NULL;
1029}
1030
1031
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001032void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
1033 // Make sure previous flushing decisions are revisited.
1034 isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
1035
1036 if (FLAG_trace_code_flushing) {
1037 PrintF("[code-flushing abandons function-info: ");
1038 shared_info->ShortPrint();
1039 PrintF("]\n");
1040 }
1041
1042 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1043 SharedFunctionInfo* next_candidate;
1044 if (candidate == shared_info) {
1045 next_candidate = GetNextCandidate(shared_info);
1046 shared_function_info_candidates_head_ = next_candidate;
1047 ClearNextCandidate(shared_info);
1048 } else {
1049 while (candidate != NULL) {
1050 next_candidate = GetNextCandidate(candidate);
1051
1052 if (next_candidate == shared_info) {
1053 next_candidate = GetNextCandidate(shared_info);
1054 SetNextCandidate(candidate, next_candidate);
1055 ClearNextCandidate(shared_info);
1056 break;
1057 }
1058
1059 candidate = next_candidate;
1060 }
1061 }
1062}
1063
1064
1065void CodeFlusher::EvictCandidate(JSFunction* function) {
1066 DCHECK(!function->next_function_link()->IsUndefined());
1067 Object* undefined = isolate_->heap()->undefined_value();
1068
1069 // Make sure previous flushing decisions are revisited.
1070 isolate_->heap()->incremental_marking()->RecordWrites(function);
1071 isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
1072
1073 if (FLAG_trace_code_flushing) {
1074 PrintF("[code-flushing abandons closure: ");
1075 function->shared()->ShortPrint();
1076 PrintF("]\n");
1077 }
1078
1079 JSFunction* candidate = jsfunction_candidates_head_;
1080 JSFunction* next_candidate;
1081 if (candidate == function) {
1082 next_candidate = GetNextCandidate(function);
1083 jsfunction_candidates_head_ = next_candidate;
1084 ClearNextCandidate(function, undefined);
1085 } else {
1086 while (candidate != NULL) {
1087 next_candidate = GetNextCandidate(candidate);
1088
1089 if (next_candidate == function) {
1090 next_candidate = GetNextCandidate(function);
1091 SetNextCandidate(candidate, next_candidate);
1092 ClearNextCandidate(function, undefined);
1093 break;
1094 }
1095
1096 candidate = next_candidate;
1097 }
1098 }
1099}
1100
1101
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001102void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
1103 Heap* heap = isolate_->heap();
1104
1105 JSFunction** slot = &jsfunction_candidates_head_;
1106 JSFunction* candidate = jsfunction_candidates_head_;
1107 while (candidate != NULL) {
1108 if (heap->InFromSpace(candidate)) {
1109 v->VisitPointer(reinterpret_cast<Object**>(slot));
1110 }
1111 candidate = GetNextCandidate(*slot);
1112 slot = GetNextCandidateSlot(*slot);
1113 }
1114}
1115
1116
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001117class MarkCompactMarkingVisitor
1118 : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
1119 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001120 static void Initialize();
1121
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001122 INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
1123 MarkObjectByPointer(heap->mark_compact_collector(), object, p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001124 }
1125
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001126 INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
1127 Object** start, Object** end)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001128 // Mark all objects pointed to in [start, end).
1129 const int kMinRangeForMarkingRecursion = 64;
1130 if (end - start >= kMinRangeForMarkingRecursion) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001131 if (VisitUnmarkedObjects(heap, object, start, end)) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001132 // We are close to a stack overflow, so just mark the objects.
1133 }
1134 MarkCompactCollector* collector = heap->mark_compact_collector();
1135 for (Object** p = start; p < end; p++) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001136 MarkObjectByPointer(collector, object, p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001137 }
1138 }
1139
1140 // Marks the object black and pushes it on the marking stack.
1141 INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
1142 MarkBit mark = Marking::MarkBitFrom(object);
1143 heap->mark_compact_collector()->MarkObject(object, mark);
1144 }
1145
1146 // Marks the object black without pushing it on the marking stack.
1147 // Returns true if object needed marking and false otherwise.
1148 INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
1149 MarkBit mark_bit = Marking::MarkBitFrom(object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001150 if (Marking::IsWhite(mark_bit)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001151 heap->mark_compact_collector()->SetMark(object, mark_bit);
1152 return true;
1153 }
1154 return false;
1155 }
1156
1157 // Mark object pointed to by p.
1158 INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001159 HeapObject* object, Object** p)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001160 if (!(*p)->IsHeapObject()) return;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001161 HeapObject* target_object = HeapObject::cast(*p);
1162 collector->RecordSlot(object, p, target_object);
1163 MarkBit mark = Marking::MarkBitFrom(target_object);
1164 collector->MarkObject(target_object, mark);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001165 }
1166
1167
1168 // Visit an unmarked object.
1169 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1170 HeapObject* obj)) {
1171#ifdef DEBUG
1172 DCHECK(collector->heap()->Contains(obj));
1173 DCHECK(!collector->heap()->mark_compact_collector()->IsMarked(obj));
1174#endif
1175 Map* map = obj->map();
1176 Heap* heap = obj->GetHeap();
1177 MarkBit mark = Marking::MarkBitFrom(obj);
1178 heap->mark_compact_collector()->SetMark(obj, mark);
1179 // Mark the map pointer and the body.
1180 MarkBit map_mark = Marking::MarkBitFrom(map);
1181 heap->mark_compact_collector()->MarkObject(map, map_mark);
1182 IterateBody(map, obj);
1183 }
1184
1185 // Visit all unmarked objects pointed to by [start, end).
1186 // Returns false if the operation fails (lack of stack space).
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001187 INLINE(static bool VisitUnmarkedObjects(Heap* heap, HeapObject* object,
1188 Object** start, Object** end)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001189 // Return false is we are close to the stack limit.
1190 StackLimitCheck check(heap->isolate());
1191 if (check.HasOverflowed()) return false;
1192
1193 MarkCompactCollector* collector = heap->mark_compact_collector();
1194 // Visit the unmarked objects.
1195 for (Object** p = start; p < end; p++) {
1196 Object* o = *p;
1197 if (!o->IsHeapObject()) continue;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001198 collector->RecordSlot(object, p, o);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001199 HeapObject* obj = HeapObject::cast(o);
1200 MarkBit mark = Marking::MarkBitFrom(obj);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001201 if (Marking::IsBlackOrGrey(mark)) continue;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001202 VisitUnmarkedObject(collector, obj);
1203 }
1204 return true;
1205 }
1206
1207 private:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001208 // Code flushing support.
1209
1210 static const int kRegExpCodeThreshold = 5;
1211
1212 static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re,
1213 bool is_one_byte) {
1214 // Make sure that the fixed array is in fact initialized on the RegExp.
1215 // We could potentially trigger a GC when initializing the RegExp.
1216 if (HeapObject::cast(re->data())->map()->instance_type() !=
1217 FIXED_ARRAY_TYPE)
1218 return;
1219
1220 // Make sure this is a RegExp that actually contains code.
1221 if (re->TypeTag() != JSRegExp::IRREGEXP) return;
1222
1223 Object* code = re->DataAt(JSRegExp::code_index(is_one_byte));
1224 if (!code->IsSmi() &&
1225 HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1226 // Save a copy that can be reinstated if we need the code again.
1227 re->SetDataAt(JSRegExp::saved_code_index(is_one_byte), code);
1228
1229 // Saving a copy might create a pointer into compaction candidate
1230 // that was not observed by marker. This might happen if JSRegExp data
1231 // was marked through the compilation cache before marker reached JSRegExp
1232 // object.
1233 FixedArray* data = FixedArray::cast(re->data());
1234 Object** slot =
1235 data->data_start() + JSRegExp::saved_code_index(is_one_byte);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001236 heap->mark_compact_collector()->RecordSlot(data, slot, code);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001237
1238 // Set a number in the 0-255 range to guarantee no smi overflow.
1239 re->SetDataAt(JSRegExp::code_index(is_one_byte),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001240 Smi::FromInt(heap->ms_count() & 0xff));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001241 } else if (code->IsSmi()) {
1242 int value = Smi::cast(code)->value();
1243 // The regexp has not been compiled yet or there was a compilation error.
1244 if (value == JSRegExp::kUninitializedValue ||
1245 value == JSRegExp::kCompilationErrorValue) {
1246 return;
1247 }
1248
1249 // Check if we should flush now.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001250 if (value == ((heap->ms_count() - kRegExpCodeThreshold) & 0xff)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001251 re->SetDataAt(JSRegExp::code_index(is_one_byte),
1252 Smi::FromInt(JSRegExp::kUninitializedValue));
1253 re->SetDataAt(JSRegExp::saved_code_index(is_one_byte),
1254 Smi::FromInt(JSRegExp::kUninitializedValue));
1255 }
1256 }
1257 }
1258
1259
1260 // Works by setting the current sweep_generation (as a smi) in the
1261 // code object place in the data array of the RegExp and keeps a copy
1262 // around that can be reinstated if we reuse the RegExp before flushing.
1263 // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1264 // we flush the code.
1265 static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1266 Heap* heap = map->GetHeap();
1267 MarkCompactCollector* collector = heap->mark_compact_collector();
1268 if (!collector->is_code_flushing_enabled()) {
1269 VisitJSRegExp(map, object);
1270 return;
1271 }
1272 JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1273 // Flush code or set age on both one byte and two byte code.
1274 UpdateRegExpCodeAgeAndFlush(heap, re, true);
1275 UpdateRegExpCodeAgeAndFlush(heap, re, false);
1276 // Visit the fields of the RegExp, including the updated FixedArray.
1277 VisitJSRegExp(map, object);
1278 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001279};
1280
1281
1282void MarkCompactMarkingVisitor::Initialize() {
1283 StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
1284
1285 table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
1286
1287 if (FLAG_track_gc_object_stats) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001288 ObjectStatsVisitor::Initialize(&table_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001289 }
1290}
1291
1292
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001293class CodeMarkingVisitor : public ThreadVisitor {
1294 public:
1295 explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1296 : collector_(collector) {}
1297
1298 void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1299 collector_->PrepareThreadForCodeFlushing(isolate, top);
1300 }
1301
1302 private:
1303 MarkCompactCollector* collector_;
1304};
1305
1306
1307class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1308 public:
1309 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1310 : collector_(collector) {}
1311
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001312 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001313 for (Object** p = start; p < end; p++) VisitPointer(p);
1314 }
1315
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001316 void VisitPointer(Object** slot) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001317 Object* obj = *slot;
1318 if (obj->IsSharedFunctionInfo()) {
1319 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1320 MarkBit shared_mark = Marking::MarkBitFrom(shared);
1321 MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1322 collector_->MarkObject(shared->code(), code_mark);
1323 collector_->MarkObject(shared, shared_mark);
1324 }
1325 }
1326
1327 private:
1328 MarkCompactCollector* collector_;
1329};
1330
1331
1332void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1333 ThreadLocalTop* top) {
1334 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1335 // Note: for the frame that has a pending lazy deoptimization
1336 // StackFrame::unchecked_code will return a non-optimized code object for
1337 // the outermost function and StackFrame::LookupCode will return
1338 // actual optimized code object.
1339 StackFrame* frame = it.frame();
1340 Code* code = frame->unchecked_code();
1341 MarkBit code_mark = Marking::MarkBitFrom(code);
1342 MarkObject(code, code_mark);
1343 if (frame->is_optimized()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001344 Code* optimized_code = frame->LookupCode();
1345 MarkBit optimized_code_mark = Marking::MarkBitFrom(optimized_code);
1346 MarkObject(optimized_code, optimized_code_mark);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001347 }
1348 }
1349}
1350
1351
1352void MarkCompactCollector::PrepareForCodeFlushing() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001353 // If code flushing is disabled, there is no need to prepare for it.
1354 if (!is_code_flushing_enabled()) return;
1355
1356 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1357 // relies on it being marked before any other descriptor array.
1358 HeapObject* descriptor_array = heap()->empty_descriptor_array();
1359 MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1360 MarkObject(descriptor_array, descriptor_array_mark);
1361
1362 // Make sure we are not referencing the code from the stack.
1363 DCHECK(this == heap()->mark_compact_collector());
1364 PrepareThreadForCodeFlushing(heap()->isolate(),
1365 heap()->isolate()->thread_local_top());
1366
1367 // Iterate the archived stacks in all threads to check if
1368 // the code is referenced.
1369 CodeMarkingVisitor code_marking_visitor(this);
1370 heap()->isolate()->thread_manager()->IterateArchivedThreads(
1371 &code_marking_visitor);
1372
1373 SharedFunctionInfoMarkingVisitor visitor(this);
1374 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1375 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1376
1377 ProcessMarkingDeque();
1378}
1379
1380
1381// Visitor class for marking heap roots.
1382class RootMarkingVisitor : public ObjectVisitor {
1383 public:
1384 explicit RootMarkingVisitor(Heap* heap)
1385 : collector_(heap->mark_compact_collector()) {}
1386
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001387 void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001388
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001389 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001390 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1391 }
1392
1393 // Skip the weak next code link in a code object, which is visited in
1394 // ProcessTopOptimizedFrame.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001395 void VisitNextCodeLink(Object** p) override {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001396
1397 private:
1398 void MarkObjectByPointer(Object** p) {
1399 if (!(*p)->IsHeapObject()) return;
1400
1401 // Replace flat cons strings in place.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001402 HeapObject* object = HeapObject::cast(*p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001403 MarkBit mark_bit = Marking::MarkBitFrom(object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001404 if (Marking::IsBlackOrGrey(mark_bit)) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001405
1406 Map* map = object->map();
1407 // Mark the object.
1408 collector_->SetMark(object, mark_bit);
1409
1410 // Mark the map pointer and body, and push them on the marking stack.
1411 MarkBit map_mark = Marking::MarkBitFrom(map);
1412 collector_->MarkObject(map, map_mark);
1413 MarkCompactMarkingVisitor::IterateBody(map, object);
1414
1415 // Mark all the objects reachable from the map and body. May leave
1416 // overflowed objects in the heap.
1417 collector_->EmptyMarkingDeque();
1418 }
1419
1420 MarkCompactCollector* collector_;
1421};
1422
1423
1424// Helper class for pruning the string table.
1425template <bool finalize_external_strings>
1426class StringTableCleaner : public ObjectVisitor {
1427 public:
1428 explicit StringTableCleaner(Heap* heap) : heap_(heap), pointers_removed_(0) {}
1429
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001430 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001431 // Visit all HeapObject pointers in [start, end).
1432 for (Object** p = start; p < end; p++) {
1433 Object* o = *p;
1434 if (o->IsHeapObject() &&
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001435 Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(o)))) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001436 if (finalize_external_strings) {
1437 DCHECK(o->IsExternalString());
1438 heap_->FinalizeExternalString(String::cast(*p));
1439 } else {
1440 pointers_removed_++;
1441 }
1442 // Set the entry to the_hole_value (as deleted).
1443 *p = heap_->the_hole_value();
1444 }
1445 }
1446 }
1447
1448 int PointersRemoved() {
1449 DCHECK(!finalize_external_strings);
1450 return pointers_removed_;
1451 }
1452
1453 private:
1454 Heap* heap_;
1455 int pointers_removed_;
1456};
1457
1458
1459typedef StringTableCleaner<false> InternalizedStringTableCleaner;
1460typedef StringTableCleaner<true> ExternalStringTableCleaner;
1461
1462
1463// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1464// are retained.
1465class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1466 public:
1467 virtual Object* RetainAs(Object* object) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001468 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(object));
1469 DCHECK(!Marking::IsGrey(mark_bit));
1470 if (Marking::IsBlack(mark_bit)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001471 return object;
1472 } else if (object->IsAllocationSite() &&
1473 !(AllocationSite::cast(object)->IsZombie())) {
1474 // "dead" AllocationSites need to live long enough for a traversal of new
1475 // space. These sites get a one-time reprieve.
1476 AllocationSite* site = AllocationSite::cast(object);
1477 site->MarkZombie();
1478 site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
1479 return object;
1480 } else {
1481 return NULL;
1482 }
1483 }
1484};
1485
1486
1487// Fill the marking stack with overflowed objects returned by the given
1488// iterator. Stop when the marking stack is filled or the end of the space
1489// is reached, whichever comes first.
1490template <class T>
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001491void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001492 // The caller should ensure that the marking stack is initially not full,
1493 // so that we don't waste effort pointlessly scanning for objects.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001494 DCHECK(!marking_deque()->IsFull());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001495
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001496 Map* filler_map = heap()->one_pointer_filler_map();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001497 for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
1498 MarkBit markbit = Marking::MarkBitFrom(object);
1499 if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1500 Marking::GreyToBlack(markbit);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001501 PushBlack(object);
1502 if (marking_deque()->IsFull()) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001503 }
1504 }
1505}
1506
1507
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001508void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
1509 DCHECK(!marking_deque()->IsFull());
1510 LiveObjectIterator<kGreyObjects> it(p);
1511 HeapObject* object = NULL;
1512 while ((object = it.Next()) != NULL) {
1513 MarkBit markbit = Marking::MarkBitFrom(object);
1514 DCHECK(Marking::IsGrey(markbit));
1515 Marking::GreyToBlack(markbit);
1516 PushBlack(object);
1517 if (marking_deque()->IsFull()) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001518 }
1519}
1520
1521
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001522class MarkCompactCollector::HeapObjectVisitor {
1523 public:
1524 virtual ~HeapObjectVisitor() {}
1525 virtual bool Visit(HeapObject* object) = 0;
1526};
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001527
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001528
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001529class MarkCompactCollector::EvacuateVisitorBase
1530 : public MarkCompactCollector::HeapObjectVisitor {
1531 public:
Ben Murdoch097c5b22016-05-18 11:27:45 +01001532 EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces,
1533 SlotsBuffer** evacuation_slots_buffer,
1534 LocalStoreBuffer* local_store_buffer)
1535 : heap_(heap),
1536 evacuation_slots_buffer_(evacuation_slots_buffer),
1537 compaction_spaces_(compaction_spaces),
1538 local_store_buffer_(local_store_buffer) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001539
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001540 bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
1541 HeapObject** target_object) {
1542 int size = object->Size();
1543 AllocationAlignment alignment = object->RequiredAlignment();
1544 AllocationResult allocation = target_space->AllocateRaw(size, alignment);
1545 if (allocation.To(target_object)) {
1546 heap_->mark_compact_collector()->MigrateObject(
1547 *target_object, object, size, target_space->identity(),
Ben Murdoch097c5b22016-05-18 11:27:45 +01001548 evacuation_slots_buffer_, local_store_buffer_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001549 return true;
1550 }
1551 return false;
1552 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001553
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001554 protected:
1555 Heap* heap_;
1556 SlotsBuffer** evacuation_slots_buffer_;
Ben Murdoch097c5b22016-05-18 11:27:45 +01001557 CompactionSpaceCollection* compaction_spaces_;
1558 LocalStoreBuffer* local_store_buffer_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001559};
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001560
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001561
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001562class MarkCompactCollector::EvacuateNewSpaceVisitor final
1563 : public MarkCompactCollector::EvacuateVisitorBase {
1564 public:
1565 static const intptr_t kLabSize = 4 * KB;
1566 static const intptr_t kMaxLabObjectSize = 256;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001567
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001568 explicit EvacuateNewSpaceVisitor(Heap* heap,
Ben Murdoch097c5b22016-05-18 11:27:45 +01001569 CompactionSpaceCollection* compaction_spaces,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001570 SlotsBuffer** evacuation_slots_buffer,
Ben Murdoch097c5b22016-05-18 11:27:45 +01001571 LocalStoreBuffer* local_store_buffer,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001572 HashMap* local_pretenuring_feedback)
Ben Murdoch097c5b22016-05-18 11:27:45 +01001573 : EvacuateVisitorBase(heap, compaction_spaces, evacuation_slots_buffer,
1574 local_store_buffer),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001575 buffer_(LocalAllocationBuffer::InvalidBuffer()),
1576 space_to_allocate_(NEW_SPACE),
1577 promoted_size_(0),
1578 semispace_copied_size_(0),
1579 local_pretenuring_feedback_(local_pretenuring_feedback) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001580
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001581 bool Visit(HeapObject* object) override {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001582 heap_->UpdateAllocationSite<Heap::kCached>(object,
1583 local_pretenuring_feedback_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001584 int size = object->Size();
1585 HeapObject* target_object = nullptr;
1586 if (heap_->ShouldBePromoted(object->address(), size) &&
Ben Murdoch097c5b22016-05-18 11:27:45 +01001587 TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object,
1588 &target_object)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001589 // If we end up needing more special cases, we should factor this out.
1590 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
1591 heap_->array_buffer_tracker()->Promote(
1592 JSArrayBuffer::cast(target_object));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001593 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001594 promoted_size_ += size;
1595 return true;
1596 }
1597 HeapObject* target = nullptr;
1598 AllocationSpace space = AllocateTargetObject(object, &target);
1599 heap_->mark_compact_collector()->MigrateObject(
1600 HeapObject::cast(target), object, size, space,
Ben Murdoch097c5b22016-05-18 11:27:45 +01001601 (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_,
1602 (space == NEW_SPACE) ? nullptr : local_store_buffer_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001603 if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
1604 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
1605 }
1606 semispace_copied_size_ += size;
1607 return true;
1608 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001609
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001610 intptr_t promoted_size() { return promoted_size_; }
1611 intptr_t semispace_copied_size() { return semispace_copied_size_; }
1612
1613 private:
1614 enum NewSpaceAllocationMode {
1615 kNonstickyBailoutOldSpace,
1616 kStickyBailoutOldSpace,
1617 };
1618
1619 inline AllocationSpace AllocateTargetObject(HeapObject* old_object,
1620 HeapObject** target_object) {
1621 const int size = old_object->Size();
1622 AllocationAlignment alignment = old_object->RequiredAlignment();
1623 AllocationResult allocation;
1624 if (space_to_allocate_ == NEW_SPACE) {
1625 if (size > kMaxLabObjectSize) {
1626 allocation =
1627 AllocateInNewSpace(size, alignment, kNonstickyBailoutOldSpace);
1628 } else {
1629 allocation = AllocateInLab(size, alignment);
1630 }
1631 }
1632 if (allocation.IsRetry() || (space_to_allocate_ == OLD_SPACE)) {
1633 allocation = AllocateInOldSpace(size, alignment);
1634 }
1635 bool ok = allocation.To(target_object);
1636 DCHECK(ok);
1637 USE(ok);
1638 return space_to_allocate_;
1639 }
1640
1641 inline bool NewLocalAllocationBuffer() {
1642 AllocationResult result =
1643 AllocateInNewSpace(kLabSize, kWordAligned, kStickyBailoutOldSpace);
1644 LocalAllocationBuffer saved_old_buffer = buffer_;
1645 buffer_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize);
1646 if (buffer_.IsValid()) {
1647 buffer_.TryMerge(&saved_old_buffer);
1648 return true;
1649 }
1650 return false;
1651 }
1652
1653 inline AllocationResult AllocateInNewSpace(int size_in_bytes,
1654 AllocationAlignment alignment,
1655 NewSpaceAllocationMode mode) {
1656 AllocationResult allocation =
1657 heap_->new_space()->AllocateRawSynchronized(size_in_bytes, alignment);
1658 if (allocation.IsRetry()) {
1659 if (!heap_->new_space()->AddFreshPageSynchronized()) {
1660 if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
1661 } else {
1662 allocation = heap_->new_space()->AllocateRawSynchronized(size_in_bytes,
1663 alignment);
1664 if (allocation.IsRetry()) {
1665 if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001666 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001667 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001668 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001669 return allocation;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001670 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001671
1672 inline AllocationResult AllocateInOldSpace(int size_in_bytes,
1673 AllocationAlignment alignment) {
1674 AllocationResult allocation =
Ben Murdoch097c5b22016-05-18 11:27:45 +01001675 compaction_spaces_->Get(OLD_SPACE)->AllocateRaw(size_in_bytes,
1676 alignment);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001677 if (allocation.IsRetry()) {
1678 FatalProcessOutOfMemory(
1679 "MarkCompactCollector: semi-space copy, fallback in old gen\n");
1680 }
1681 return allocation;
1682 }
1683
1684 inline AllocationResult AllocateInLab(int size_in_bytes,
1685 AllocationAlignment alignment) {
1686 AllocationResult allocation;
1687 if (!buffer_.IsValid()) {
1688 if (!NewLocalAllocationBuffer()) {
1689 space_to_allocate_ = OLD_SPACE;
1690 return AllocationResult::Retry(OLD_SPACE);
1691 }
1692 }
1693 allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
1694 if (allocation.IsRetry()) {
1695 if (!NewLocalAllocationBuffer()) {
1696 space_to_allocate_ = OLD_SPACE;
1697 return AllocationResult::Retry(OLD_SPACE);
1698 } else {
1699 allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
1700 if (allocation.IsRetry()) {
1701 space_to_allocate_ = OLD_SPACE;
1702 return AllocationResult::Retry(OLD_SPACE);
1703 }
1704 }
1705 }
1706 return allocation;
1707 }
1708
1709 LocalAllocationBuffer buffer_;
1710 AllocationSpace space_to_allocate_;
1711 intptr_t promoted_size_;
1712 intptr_t semispace_copied_size_;
1713 HashMap* local_pretenuring_feedback_;
1714};
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001715
1716
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001717class MarkCompactCollector::EvacuateOldSpaceVisitor final
1718 : public MarkCompactCollector::EvacuateVisitorBase {
1719 public:
1720 EvacuateOldSpaceVisitor(Heap* heap,
1721 CompactionSpaceCollection* compaction_spaces,
Ben Murdoch097c5b22016-05-18 11:27:45 +01001722 SlotsBuffer** evacuation_slots_buffer,
1723 LocalStoreBuffer* local_store_buffer)
1724 : EvacuateVisitorBase(heap, compaction_spaces, evacuation_slots_buffer,
1725 local_store_buffer) {}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001726
1727 bool Visit(HeapObject* object) override {
1728 CompactionSpace* target_space = compaction_spaces_->Get(
1729 Page::FromAddress(object->address())->owner()->identity());
1730 HeapObject* target_object = nullptr;
1731 if (TryEvacuateObject(target_space, object, &target_object)) {
1732 DCHECK(object->map_word().IsForwardingAddress());
1733 return true;
1734 }
1735 return false;
1736 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001737};
1738
1739
1740void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001741 PageIterator it(space);
1742 while (it.has_next()) {
1743 Page* p = it.next();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001744 DiscoverGreyObjectsOnPage(p);
1745 if (marking_deque()->IsFull()) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001746 }
1747}
1748
1749
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001750void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
1751 NewSpace* space = heap()->new_space();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001752 NewSpacePageIterator it(space->bottom(), space->top());
1753 while (it.has_next()) {
1754 NewSpacePage* page = it.next();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001755 DiscoverGreyObjectsOnPage(page);
1756 if (marking_deque()->IsFull()) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001757 }
1758}
1759
1760
1761bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
1762 Object* o = *p;
1763 if (!o->IsHeapObject()) return false;
1764 HeapObject* heap_object = HeapObject::cast(o);
1765 MarkBit mark = Marking::MarkBitFrom(heap_object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001766 return Marking::IsWhite(mark);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001767}
1768
1769
1770bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
1771 Object** p) {
1772 Object* o = *p;
1773 DCHECK(o->IsHeapObject());
1774 HeapObject* heap_object = HeapObject::cast(o);
1775 MarkBit mark = Marking::MarkBitFrom(heap_object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001776 return Marking::IsWhite(mark);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001777}
1778
1779
1780void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
1781 StringTable* string_table = heap()->string_table();
1782 // Mark the string table itself.
1783 MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001784 if (Marking::IsWhite(string_table_mark)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001785 // String table could have already been marked by visiting the handles list.
1786 SetMark(string_table, string_table_mark);
1787 }
1788 // Explicitly mark the prefix.
1789 string_table->IteratePrefix(visitor);
1790 ProcessMarkingDeque();
1791}
1792
1793
1794void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
1795 MarkBit mark_bit = Marking::MarkBitFrom(site);
1796 SetMark(site, mark_bit);
1797}
1798
1799
1800void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
1801 // Mark the heap roots including global variables, stack variables,
1802 // etc., and all objects reachable from them.
1803 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
1804
1805 // Handle the string table specially.
1806 MarkStringTable(visitor);
1807
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001808 // There may be overflowed objects in the heap. Visit them now.
1809 while (marking_deque_.overflowed()) {
1810 RefillMarkingDeque();
1811 EmptyMarkingDeque();
1812 }
1813}
1814
1815
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001816void MarkCompactCollector::MarkImplicitRefGroups(
1817 MarkObjectFunction mark_object) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001818 List<ImplicitRefGroup*>* ref_groups =
1819 isolate()->global_handles()->implicit_ref_groups();
1820
1821 int last = 0;
1822 for (int i = 0; i < ref_groups->length(); i++) {
1823 ImplicitRefGroup* entry = ref_groups->at(i);
1824 DCHECK(entry != NULL);
1825
1826 if (!IsMarked(*entry->parent)) {
1827 (*ref_groups)[last++] = entry;
1828 continue;
1829 }
1830
1831 Object*** children = entry->children;
1832 // A parent object is marked, so mark all child heap objects.
1833 for (size_t j = 0; j < entry->length; ++j) {
1834 if ((*children[j])->IsHeapObject()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001835 mark_object(heap(), HeapObject::cast(*children[j]));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001836 }
1837 }
1838
1839 // Once the entire group has been marked, dispose it because it's
1840 // not needed anymore.
1841 delete entry;
1842 }
1843 ref_groups->Rewind(last);
1844}
1845
1846
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001847// Mark all objects reachable from the objects on the marking stack.
1848// Before: the marking stack contains zero or more heap object pointers.
1849// After: the marking stack is empty, and all objects reachable from the
1850// marking stack have been marked, or are overflowed in the heap.
1851void MarkCompactCollector::EmptyMarkingDeque() {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001852 Map* filler_map = heap_->one_pointer_filler_map();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001853 while (!marking_deque_.IsEmpty()) {
1854 HeapObject* object = marking_deque_.Pop();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001855 // Explicitly skip one word fillers. Incremental markbit patterns are
1856 // correct only for objects that occupy at least two words.
1857 Map* map = object->map();
1858 if (map == filler_map) continue;
1859
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001860 DCHECK(object->IsHeapObject());
1861 DCHECK(heap()->Contains(object));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001862 DCHECK(!Marking::IsWhite(Marking::MarkBitFrom(object)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001863
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001864 MarkBit map_mark = Marking::MarkBitFrom(map);
1865 MarkObject(map, map_mark);
1866
1867 MarkCompactMarkingVisitor::IterateBody(map, object);
1868 }
1869}
1870
1871
1872// Sweep the heap for overflowed objects, clear their overflow bits, and
1873// push them on the marking stack. Stop early if the marking stack fills
1874// before sweeping completes. If sweeping completes, there are no remaining
1875// overflowed objects in the heap so the overflow flag on the markings stack
1876// is cleared.
1877void MarkCompactCollector::RefillMarkingDeque() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001878 isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001879 DCHECK(marking_deque_.overflowed());
1880
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001881 DiscoverGreyObjectsInNewSpace();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001882 if (marking_deque_.IsFull()) return;
1883
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001884 DiscoverGreyObjectsInSpace(heap()->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001885 if (marking_deque_.IsFull()) return;
1886
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001887 DiscoverGreyObjectsInSpace(heap()->code_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001888 if (marking_deque_.IsFull()) return;
1889
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001890 DiscoverGreyObjectsInSpace(heap()->map_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001891 if (marking_deque_.IsFull()) return;
1892
1893 LargeObjectIterator lo_it(heap()->lo_space());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001894 DiscoverGreyObjectsWithIterator(&lo_it);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001895 if (marking_deque_.IsFull()) return;
1896
1897 marking_deque_.ClearOverflowed();
1898}
1899
1900
1901// Mark all objects reachable (transitively) from objects on the marking
1902// stack. Before: the marking stack contains zero or more heap object
1903// pointers. After: the marking stack is empty and there are no overflowed
1904// objects in the heap.
1905void MarkCompactCollector::ProcessMarkingDeque() {
1906 EmptyMarkingDeque();
1907 while (marking_deque_.overflowed()) {
1908 RefillMarkingDeque();
1909 EmptyMarkingDeque();
1910 }
1911}
1912
1913
1914// Mark all objects reachable (transitively) from objects on the marking
1915// stack including references only considered in the atomic marking pause.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001916void MarkCompactCollector::ProcessEphemeralMarking(
1917 ObjectVisitor* visitor, bool only_process_harmony_weak_collections) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001918 bool work_to_do = true;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001919 DCHECK(marking_deque_.IsEmpty() && !marking_deque_.overflowed());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001920 while (work_to_do) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001921 if (!only_process_harmony_weak_collections) {
1922 isolate()->global_handles()->IterateObjectGroups(
1923 visitor, &IsUnmarkedHeapObjectWithHeap);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001924 MarkImplicitRefGroups(&MarkCompactMarkingVisitor::MarkObject);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001925 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001926 ProcessWeakCollections();
1927 work_to_do = !marking_deque_.IsEmpty();
1928 ProcessMarkingDeque();
1929 }
1930}
1931
1932
1933void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
1934 for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
1935 !it.done(); it.Advance()) {
1936 if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
1937 return;
1938 }
1939 if (it.frame()->type() == StackFrame::OPTIMIZED) {
1940 Code* code = it.frame()->LookupCode();
1941 if (!code->CanDeoptAt(it.frame()->pc())) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001942 Code::BodyDescriptor::IterateBody(code, visitor);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001943 }
1944 ProcessMarkingDeque();
1945 return;
1946 }
1947 }
1948}
1949
1950
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001951void MarkCompactCollector::EnsureMarkingDequeIsReserved() {
1952 DCHECK(!marking_deque_.in_use());
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001953 if (marking_deque_memory_ == NULL) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001954 marking_deque_memory_ = new base::VirtualMemory(kMaxMarkingDequeSize);
1955 marking_deque_memory_committed_ = 0;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001956 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001957 if (marking_deque_memory_ == NULL) {
1958 V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsReserved");
1959 }
1960}
1961
1962
1963void MarkCompactCollector::EnsureMarkingDequeIsCommitted(size_t max_size) {
1964 // If the marking deque is too small, we try to allocate a bigger one.
1965 // If that fails, make do with a smaller one.
1966 CHECK(!marking_deque_.in_use());
1967 for (size_t size = max_size; size >= kMinMarkingDequeSize; size >>= 1) {
1968 base::VirtualMemory* memory = marking_deque_memory_;
1969 size_t currently_committed = marking_deque_memory_committed_;
1970
1971 if (currently_committed == size) return;
1972
1973 if (currently_committed > size) {
1974 bool success = marking_deque_memory_->Uncommit(
1975 reinterpret_cast<Address>(marking_deque_memory_->address()) + size,
1976 currently_committed - size);
1977 if (success) {
1978 marking_deque_memory_committed_ = size;
1979 return;
1980 }
1981 UNREACHABLE();
1982 }
1983
1984 bool success = memory->Commit(
1985 reinterpret_cast<Address>(memory->address()) + currently_committed,
1986 size - currently_committed,
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001987 false); // Not executable.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001988 if (success) {
1989 marking_deque_memory_committed_ = size;
1990 return;
1991 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001992 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001993 V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsCommitted");
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001994}
1995
1996
1997void MarkCompactCollector::InitializeMarkingDeque() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001998 DCHECK(!marking_deque_.in_use());
1999 DCHECK(marking_deque_memory_committed_ > 0);
2000 Address addr = static_cast<Address>(marking_deque_memory_->address());
2001 size_t size = marking_deque_memory_committed_;
2002 if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
2003 marking_deque_.Initialize(addr, addr + size);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002004}
2005
2006
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002007void MarkingDeque::Initialize(Address low, Address high) {
2008 DCHECK(!in_use_);
2009 HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
2010 HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
2011 array_ = obj_low;
2012 mask_ = base::bits::RoundDownToPowerOfTwo32(
2013 static_cast<uint32_t>(obj_high - obj_low)) -
2014 1;
2015 top_ = bottom_ = 0;
2016 overflowed_ = false;
2017 in_use_ = true;
2018}
2019
2020
2021void MarkingDeque::Uninitialize(bool aborting) {
2022 if (!aborting) {
2023 DCHECK(IsEmpty());
2024 DCHECK(!overflowed_);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002025 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002026 DCHECK(in_use_);
2027 top_ = bottom_ = 0xdecbad;
2028 in_use_ = false;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002029}
2030
2031
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002032void MarkCompactCollector::MarkLiveObjects() {
2033 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK);
2034 double start_time = 0.0;
2035 if (FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002036 start_time = heap_->MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002037 }
2038 // The recursive GC marker detects when it is nearing stack overflow,
2039 // and switches to a different marking system. JS interrupts interfere
2040 // with the C stack limit check.
2041 PostponeInterruptsScope postpone(isolate());
2042
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002043 {
2044 GCTracer::Scope gc_scope(heap()->tracer(),
2045 GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
2046 IncrementalMarking* incremental_marking = heap_->incremental_marking();
2047 if (was_marked_incrementally_) {
2048 incremental_marking->Finalize();
2049 } else {
2050 // Abort any pending incremental activities e.g. incremental sweeping.
2051 incremental_marking->Stop();
2052 if (marking_deque_.in_use()) {
2053 marking_deque_.Uninitialize(true);
2054 }
2055 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002056 }
2057
2058#ifdef DEBUG
2059 DCHECK(state_ == PREPARE_GC);
2060 state_ = MARK_LIVE_OBJECTS;
2061#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002062
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002063 EnsureMarkingDequeIsCommittedAndInitialize(
2064 MarkCompactCollector::kMaxMarkingDequeSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002065
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002066 {
2067 GCTracer::Scope gc_scope(heap()->tracer(),
2068 GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
2069 PrepareForCodeFlushing();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002070 }
2071
2072 RootMarkingVisitor root_visitor(heap());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002073
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002074 {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002075 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
2076 MarkRoots(&root_visitor);
2077 ProcessTopOptimizedFrame(&root_visitor);
2078 }
2079
2080 {
2081 GCTracer::Scope gc_scope(heap()->tracer(),
2082 GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002083
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002084 // The objects reachable from the roots are marked, yet unreachable
2085 // objects are unmarked. Mark objects reachable due to host
2086 // application specific logic or through Harmony weak maps.
2087 ProcessEphemeralMarking(&root_visitor, false);
2088
2089 // The objects reachable from the roots, weak maps or object groups
2090 // are marked. Objects pointed to only by weak global handles cannot be
2091 // immediately reclaimed. Instead, we have to mark them as pending and mark
2092 // objects reachable from them.
2093 //
2094 // First we identify nonlive weak handles and mark them as pending
2095 // destruction.
2096 heap()->isolate()->global_handles()->IdentifyWeakHandles(
2097 &IsUnmarkedHeapObject);
2098 // Then we mark the objects.
2099 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2100 ProcessMarkingDeque();
2101
2102 // Repeat Harmony weak maps marking to mark unmarked objects reachable from
2103 // the weak roots we just marked as pending destruction.
2104 //
2105 // We only process harmony collections, as all object groups have been fully
2106 // processed and no weakly reachable node can discover new objects groups.
2107 ProcessEphemeralMarking(&root_visitor, true);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002108 }
2109
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002110 if (FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002111 heap_->tracer()->AddMarkingTime(heap_->MonotonicallyIncreasingTimeInMs() -
2112 start_time);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002113 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002114 if (FLAG_track_gc_object_stats) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002115 if (FLAG_trace_gc_object_stats) {
2116 heap()->object_stats_->TraceObjectStats();
2117 }
2118 heap()->object_stats_->CheckpointObjectStats();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002119 }
2120}
2121
2122
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002123void MarkCompactCollector::ClearNonLiveReferences() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002124 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002125
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002126 {
2127 GCTracer::Scope gc_scope(heap()->tracer(),
2128 GCTracer::Scope::MC_CLEAR_STRING_TABLE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002129
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002130 // Prune the string table removing all strings only pointed to by the
2131 // string table. Cannot use string_table() here because the string
2132 // table is marked.
2133 StringTable* string_table = heap()->string_table();
2134 InternalizedStringTableCleaner internalized_visitor(heap());
2135 string_table->IterateElements(&internalized_visitor);
2136 string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002137
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002138 ExternalStringTableCleaner external_visitor(heap());
2139 heap()->external_string_table_.Iterate(&external_visitor);
2140 heap()->external_string_table_.CleanUp();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002141 }
2142
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002143 {
2144 GCTracer::Scope gc_scope(heap()->tracer(),
2145 GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
2146 // Process the weak references.
2147 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2148 heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002149 }
2150
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002151 {
2152 GCTracer::Scope gc_scope(heap()->tracer(),
2153 GCTracer::Scope::MC_CLEAR_GLOBAL_HANDLES);
2154
2155 // Remove object groups after marking phase.
2156 heap()->isolate()->global_handles()->RemoveObjectGroups();
2157 heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002158 }
2159
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002160 // Flush code from collected candidates.
2161 if (is_code_flushing_enabled()) {
2162 GCTracer::Scope gc_scope(heap()->tracer(),
2163 GCTracer::Scope::MC_CLEAR_CODE_FLUSH);
2164 code_flusher_->ProcessCandidates();
2165 }
2166
2167
2168 DependentCode* dependent_code_list;
2169 Object* non_live_map_list;
2170 ClearWeakCells(&non_live_map_list, &dependent_code_list);
2171
2172 {
2173 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
2174 ClearSimpleMapTransitions(non_live_map_list);
2175 ClearFullMapTransitions();
2176 }
2177
2178 MarkDependentCodeForDeoptimization(dependent_code_list);
2179
2180 ClearWeakCollections();
2181
2182 ClearInvalidStoreAndSlotsBufferEntries();
2183}
2184
2185
2186void MarkCompactCollector::MarkDependentCodeForDeoptimization(
2187 DependentCode* list_head) {
2188 GCTracer::Scope gc_scope(heap()->tracer(),
2189 GCTracer::Scope::MC_CLEAR_DEPENDENT_CODE);
2190 Isolate* isolate = this->isolate();
2191 DependentCode* current = list_head;
2192 while (current->length() > 0) {
2193 have_code_to_deoptimize_ |= current->MarkCodeForDeoptimization(
2194 isolate, DependentCode::kWeakCodeGroup);
2195 current = current->next_link();
2196 }
2197
2198 WeakHashTable* table = heap_->weak_object_to_code_table();
2199 uint32_t capacity = table->Capacity();
2200 for (uint32_t i = 0; i < capacity; i++) {
2201 uint32_t key_index = table->EntryToIndex(i);
2202 Object* key = table->get(key_index);
2203 if (!table->IsKey(key)) continue;
2204 uint32_t value_index = table->EntryToValueIndex(i);
2205 Object* value = table->get(value_index);
2206 DCHECK(key->IsWeakCell());
2207 if (WeakCell::cast(key)->cleared()) {
2208 have_code_to_deoptimize_ |=
2209 DependentCode::cast(value)->MarkCodeForDeoptimization(
2210 isolate, DependentCode::kWeakCodeGroup);
2211 table->set(key_index, heap_->the_hole_value());
2212 table->set(value_index, heap_->the_hole_value());
2213 table->ElementRemoved();
2214 }
2215 }
2216}
2217
2218
2219void MarkCompactCollector::ClearSimpleMapTransitions(
2220 Object* non_live_map_list) {
2221 Object* the_hole_value = heap()->the_hole_value();
2222 Object* weak_cell_obj = non_live_map_list;
2223 while (weak_cell_obj != Smi::FromInt(0)) {
2224 WeakCell* weak_cell = WeakCell::cast(weak_cell_obj);
2225 Map* map = Map::cast(weak_cell->value());
2226 DCHECK(Marking::IsWhite(Marking::MarkBitFrom(map)));
2227 Object* potential_parent = map->constructor_or_backpointer();
2228 if (potential_parent->IsMap()) {
2229 Map* parent = Map::cast(potential_parent);
2230 if (Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent)) &&
2231 parent->raw_transitions() == weak_cell) {
2232 ClearSimpleMapTransition(parent, map);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002233 }
2234 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002235 weak_cell->clear();
2236 weak_cell_obj = weak_cell->next();
2237 weak_cell->clear_next(the_hole_value);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002238 }
2239}
2240
2241
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002242void MarkCompactCollector::ClearSimpleMapTransition(Map* map,
2243 Map* dead_transition) {
2244 // A previously existing simple transition (stored in a WeakCell) is going
2245 // to be cleared. Clear the useless cell pointer, and take ownership
2246 // of the descriptor array.
2247 map->set_raw_transitions(Smi::FromInt(0));
2248 int number_of_own_descriptors = map->NumberOfOwnDescriptors();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002249 DescriptorArray* descriptors = map->instance_descriptors();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002250 if (descriptors == dead_transition->instance_descriptors() &&
2251 number_of_own_descriptors > 0) {
2252 TrimDescriptorArray(map, descriptors);
2253 DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2254 map->set_owns_descriptors(true);
2255 }
2256}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002257
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002258
2259void MarkCompactCollector::ClearFullMapTransitions() {
2260 HeapObject* undefined = heap()->undefined_value();
2261 Object* obj = heap()->encountered_transition_arrays();
2262 while (obj != Smi::FromInt(0)) {
2263 TransitionArray* array = TransitionArray::cast(obj);
2264 int num_transitions = array->number_of_entries();
2265 DCHECK_EQ(TransitionArray::NumberOfTransitions(array), num_transitions);
2266 if (num_transitions > 0) {
2267 Map* map = array->GetTarget(0);
2268 Map* parent = Map::cast(map->constructor_or_backpointer());
2269 bool parent_is_alive =
2270 Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent));
2271 DescriptorArray* descriptors =
2272 parent_is_alive ? parent->instance_descriptors() : nullptr;
2273 bool descriptors_owner_died =
2274 CompactTransitionArray(parent, array, descriptors);
2275 if (descriptors_owner_died) {
2276 TrimDescriptorArray(parent, descriptors);
2277 }
2278 }
2279 obj = array->next_link();
2280 array->set_next_link(undefined, SKIP_WRITE_BARRIER);
2281 }
2282 heap()->set_encountered_transition_arrays(Smi::FromInt(0));
2283}
2284
2285
2286bool MarkCompactCollector::CompactTransitionArray(
2287 Map* map, TransitionArray* transitions, DescriptorArray* descriptors) {
2288 int num_transitions = transitions->number_of_entries();
2289 bool descriptors_owner_died = false;
2290 int transition_index = 0;
2291 // Compact all live transitions to the left.
2292 for (int i = 0; i < num_transitions; ++i) {
2293 Map* target = transitions->GetTarget(i);
2294 DCHECK_EQ(target->constructor_or_backpointer(), map);
2295 if (Marking::IsWhite(Marking::MarkBitFrom(target))) {
2296 if (descriptors != nullptr &&
2297 target->instance_descriptors() == descriptors) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002298 descriptors_owner_died = true;
2299 }
2300 } else {
2301 if (i != transition_index) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002302 Name* key = transitions->GetKey(i);
2303 transitions->SetKey(transition_index, key);
2304 Object** key_slot = transitions->GetKeySlot(transition_index);
2305 RecordSlot(transitions, key_slot, key);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002306 // Target slots do not need to be recorded since maps are not compacted.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002307 transitions->SetTarget(transition_index, transitions->GetTarget(i));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002308 }
2309 transition_index++;
2310 }
2311 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002312 // If there are no transitions to be cleared, return.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002313 if (transition_index == num_transitions) {
2314 DCHECK(!descriptors_owner_died);
2315 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002316 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002317 // Note that we never eliminate a transition array, though we might right-trim
2318 // such that number_of_transitions() == 0. If this assumption changes,
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002319 // TransitionArray::Insert() will need to deal with the case that a transition
2320 // array disappeared during GC.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002321 int trim = TransitionArray::Capacity(transitions) - transition_index;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002322 if (trim > 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002323 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
2324 transitions, trim * TransitionArray::kTransitionSize);
2325 transitions->SetNumberOfTransitions(transition_index);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002326 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002327 return descriptors_owner_died;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002328}
2329
2330
2331void MarkCompactCollector::TrimDescriptorArray(Map* map,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002332 DescriptorArray* descriptors) {
2333 int number_of_own_descriptors = map->NumberOfOwnDescriptors();
2334 if (number_of_own_descriptors == 0) {
2335 DCHECK(descriptors == heap_->empty_descriptor_array());
2336 return;
2337 }
2338
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002339 int number_of_descriptors = descriptors->number_of_descriptors_storage();
2340 int to_trim = number_of_descriptors - number_of_own_descriptors;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002341 if (to_trim > 0) {
2342 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
2343 descriptors, to_trim * DescriptorArray::kDescriptorSize);
2344 descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002345
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002346 if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
2347 descriptors->Sort();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002348
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002349 if (FLAG_unbox_double_fields) {
2350 LayoutDescriptor* layout_descriptor = map->layout_descriptor();
2351 layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
2352 number_of_own_descriptors);
2353 SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
2354 }
2355 }
2356 DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2357 map->set_owns_descriptors(true);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002358}
2359
2360
2361void MarkCompactCollector::TrimEnumCache(Map* map,
2362 DescriptorArray* descriptors) {
2363 int live_enum = map->EnumLength();
2364 if (live_enum == kInvalidEnumCacheSentinel) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002365 live_enum =
2366 map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002367 }
2368 if (live_enum == 0) return descriptors->ClearEnumCache();
2369
2370 FixedArray* enum_cache = descriptors->GetEnumCache();
2371
2372 int to_trim = enum_cache->length() - live_enum;
2373 if (to_trim <= 0) return;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002374 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
2375 descriptors->GetEnumCache(), to_trim);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002376
2377 if (!descriptors->HasEnumIndicesCache()) return;
2378 FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002379 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(enum_indices_cache,
2380 to_trim);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002381}
2382
2383
2384void MarkCompactCollector::ProcessWeakCollections() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002385 Object* weak_collection_obj = heap()->encountered_weak_collections();
2386 while (weak_collection_obj != Smi::FromInt(0)) {
2387 JSWeakCollection* weak_collection =
2388 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2389 DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2390 if (weak_collection->table()->IsHashTable()) {
2391 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002392 for (int i = 0; i < table->Capacity(); i++) {
2393 if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2394 Object** key_slot =
2395 table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002396 RecordSlot(table, key_slot, *key_slot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002397 Object** value_slot =
2398 table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002399 MarkCompactMarkingVisitor::MarkObjectByPointer(this, table,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002400 value_slot);
2401 }
2402 }
2403 }
2404 weak_collection_obj = weak_collection->next();
2405 }
2406}
2407
2408
2409void MarkCompactCollector::ClearWeakCollections() {
2410 GCTracer::Scope gc_scope(heap()->tracer(),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002411 GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002412 Object* weak_collection_obj = heap()->encountered_weak_collections();
2413 while (weak_collection_obj != Smi::FromInt(0)) {
2414 JSWeakCollection* weak_collection =
2415 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2416 DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2417 if (weak_collection->table()->IsHashTable()) {
2418 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2419 for (int i = 0; i < table->Capacity(); i++) {
2420 HeapObject* key = HeapObject::cast(table->KeyAt(i));
2421 if (!MarkCompactCollector::IsMarked(key)) {
2422 table->RemoveEntry(i);
2423 }
2424 }
2425 }
2426 weak_collection_obj = weak_collection->next();
2427 weak_collection->set_next(heap()->undefined_value());
2428 }
2429 heap()->set_encountered_weak_collections(Smi::FromInt(0));
2430}
2431
2432
2433void MarkCompactCollector::AbortWeakCollections() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002434 Object* weak_collection_obj = heap()->encountered_weak_collections();
2435 while (weak_collection_obj != Smi::FromInt(0)) {
2436 JSWeakCollection* weak_collection =
2437 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2438 weak_collection_obj = weak_collection->next();
2439 weak_collection->set_next(heap()->undefined_value());
2440 }
2441 heap()->set_encountered_weak_collections(Smi::FromInt(0));
2442}
2443
2444
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002445void MarkCompactCollector::ClearWeakCells(Object** non_live_map_list,
2446 DependentCode** dependent_code_list) {
2447 Heap* heap = this->heap();
2448 GCTracer::Scope gc_scope(heap->tracer(),
2449 GCTracer::Scope::MC_CLEAR_WEAK_CELLS);
2450 Object* weak_cell_obj = heap->encountered_weak_cells();
2451 Object* the_hole_value = heap->the_hole_value();
2452 DependentCode* dependent_code_head =
2453 DependentCode::cast(heap->empty_fixed_array());
2454 Object* non_live_map_head = Smi::FromInt(0);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002455 while (weak_cell_obj != Smi::FromInt(0)) {
2456 WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002457 Object* next_weak_cell = weak_cell->next();
2458 bool clear_value = true;
2459 bool clear_next = true;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002460 // We do not insert cleared weak cells into the list, so the value
2461 // cannot be a Smi here.
2462 HeapObject* value = HeapObject::cast(weak_cell->value());
2463 if (!MarkCompactCollector::IsMarked(value)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002464 // Cells for new-space objects embedded in optimized code are wrapped in
2465 // WeakCell and put into Heap::weak_object_to_code_table.
2466 // Such cells do not have any strong references but we want to keep them
2467 // alive as long as the cell value is alive.
2468 // TODO(ulan): remove this once we remove Heap::weak_object_to_code_table.
2469 if (value->IsCell()) {
2470 Object* cell_value = Cell::cast(value)->value();
2471 if (cell_value->IsHeapObject() &&
2472 MarkCompactCollector::IsMarked(HeapObject::cast(cell_value))) {
2473 // Resurrect the cell.
2474 MarkBit mark = Marking::MarkBitFrom(value);
2475 SetMark(value, mark);
2476 Object** slot = HeapObject::RawField(value, Cell::kValueOffset);
2477 RecordSlot(value, slot, *slot);
2478 slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
2479 RecordSlot(weak_cell, slot, *slot);
2480 clear_value = false;
2481 }
2482 }
2483 if (value->IsMap()) {
2484 // The map is non-live.
2485 Map* map = Map::cast(value);
2486 // Add dependent code to the dependent_code_list.
2487 DependentCode* candidate = map->dependent_code();
2488 // We rely on the fact that the weak code group comes first.
2489 STATIC_ASSERT(DependentCode::kWeakCodeGroup == 0);
2490 if (candidate->length() > 0 &&
2491 candidate->group() == DependentCode::kWeakCodeGroup) {
2492 candidate->set_next_link(dependent_code_head);
2493 dependent_code_head = candidate;
2494 }
2495 // Add the weak cell to the non_live_map list.
2496 weak_cell->set_next(non_live_map_head);
2497 non_live_map_head = weak_cell;
2498 clear_value = false;
2499 clear_next = false;
2500 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002501 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002502 // The value of the weak cell is alive.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002503 Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002504 RecordSlot(weak_cell, slot, *slot);
2505 clear_value = false;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002506 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002507 if (clear_value) {
2508 weak_cell->clear();
2509 }
2510 if (clear_next) {
2511 weak_cell->clear_next(the_hole_value);
2512 }
2513 weak_cell_obj = next_weak_cell;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002514 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002515 heap->set_encountered_weak_cells(Smi::FromInt(0));
2516 *non_live_map_list = non_live_map_head;
2517 *dependent_code_list = dependent_code_head;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002518}
2519
2520
2521void MarkCompactCollector::AbortWeakCells() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002522 Object* the_hole_value = heap()->the_hole_value();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002523 Object* weak_cell_obj = heap()->encountered_weak_cells();
2524 while (weak_cell_obj != Smi::FromInt(0)) {
2525 WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
2526 weak_cell_obj = weak_cell->next();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002527 weak_cell->clear_next(the_hole_value);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002528 }
2529 heap()->set_encountered_weak_cells(Smi::FromInt(0));
2530}
2531
2532
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002533void MarkCompactCollector::AbortTransitionArrays() {
2534 HeapObject* undefined = heap()->undefined_value();
2535 Object* obj = heap()->encountered_transition_arrays();
2536 while (obj != Smi::FromInt(0)) {
2537 TransitionArray* array = TransitionArray::cast(obj);
2538 obj = array->next_link();
2539 array->set_next_link(undefined, SKIP_WRITE_BARRIER);
2540 }
2541 heap()->set_encountered_transition_arrays(Smi::FromInt(0));
2542}
2543
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002544void MarkCompactCollector::RecordMigratedSlot(
Ben Murdoch097c5b22016-05-18 11:27:45 +01002545 Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer,
2546 LocalStoreBuffer* local_store_buffer) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002547 // When parallel compaction is in progress, store and slots buffer entries
2548 // require synchronization.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002549 if (heap_->InNewSpace(value)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002550 if (compaction_in_progress_) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002551 local_store_buffer->Record(slot);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002552 } else {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002553 Page* page = Page::FromAddress(slot);
2554 RememberedSet<OLD_TO_NEW>::Insert(page, slot);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002555 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002556 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002557 SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002558 reinterpret_cast<Object**>(slot),
2559 SlotsBuffer::IGNORE_OVERFLOW);
2560 }
2561}
2562
2563
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002564void MarkCompactCollector::RecordMigratedCodeEntrySlot(
2565 Address code_entry, Address code_entry_slot,
2566 SlotsBuffer** evacuation_slots_buffer) {
2567 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2568 SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
2569 SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
2570 SlotsBuffer::IGNORE_OVERFLOW);
2571 }
2572}
2573
2574
2575void MarkCompactCollector::RecordMigratedCodeObjectSlot(
2576 Address code_object, SlotsBuffer** evacuation_slots_buffer) {
2577 SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
2578 SlotsBuffer::RELOCATED_CODE_OBJECT, code_object,
2579 SlotsBuffer::IGNORE_OVERFLOW);
2580}
2581
2582
2583static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
2584 if (RelocInfo::IsCodeTarget(rmode)) {
2585 return SlotsBuffer::CODE_TARGET_SLOT;
2586 } else if (RelocInfo::IsCell(rmode)) {
2587 return SlotsBuffer::CELL_TARGET_SLOT;
2588 } else if (RelocInfo::IsEmbeddedObject(rmode)) {
2589 return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
2590 } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
2591 return SlotsBuffer::DEBUG_TARGET_SLOT;
2592 }
2593 UNREACHABLE();
2594 return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
2595}
2596
2597
2598static inline SlotsBuffer::SlotType DecodeSlotType(
2599 SlotsBuffer::ObjectSlot slot) {
2600 return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
2601}
2602
2603
2604void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
2605 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
2606 RelocInfo::Mode rmode = rinfo->rmode();
2607 if (target_page->IsEvacuationCandidate() &&
2608 (rinfo->host() == NULL ||
2609 !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
2610 Address addr = rinfo->pc();
2611 SlotsBuffer::SlotType slot_type = SlotTypeForRMode(rmode);
2612 if (rinfo->IsInConstantPool()) {
2613 addr = rinfo->constant_pool_entry_address();
2614 if (RelocInfo::IsCodeTarget(rmode)) {
2615 slot_type = SlotsBuffer::CODE_ENTRY_SLOT;
2616 } else {
2617 DCHECK(RelocInfo::IsEmbeddedObject(rmode));
2618 slot_type = SlotsBuffer::OBJECT_SLOT;
2619 }
2620 }
2621 bool success = SlotsBuffer::AddTo(
2622 slots_buffer_allocator_, target_page->slots_buffer_address(), slot_type,
2623 addr, SlotsBuffer::FAIL_ON_OVERFLOW);
2624 if (!success) {
2625 EvictPopularEvacuationCandidate(target_page);
2626 }
2627 }
2628}
2629
2630
2631class RecordMigratedSlotVisitor final : public ObjectVisitor {
2632 public:
2633 RecordMigratedSlotVisitor(MarkCompactCollector* collector,
Ben Murdoch097c5b22016-05-18 11:27:45 +01002634 SlotsBuffer** evacuation_slots_buffer,
2635 LocalStoreBuffer* local_store_buffer)
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002636 : collector_(collector),
Ben Murdoch097c5b22016-05-18 11:27:45 +01002637 evacuation_slots_buffer_(evacuation_slots_buffer),
2638 local_store_buffer_(local_store_buffer) {}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002639
2640 V8_INLINE void VisitPointer(Object** p) override {
2641 collector_->RecordMigratedSlot(*p, reinterpret_cast<Address>(p),
Ben Murdoch097c5b22016-05-18 11:27:45 +01002642 evacuation_slots_buffer_,
2643 local_store_buffer_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002644 }
2645
2646 V8_INLINE void VisitPointers(Object** start, Object** end) override {
2647 while (start < end) {
2648 collector_->RecordMigratedSlot(*start, reinterpret_cast<Address>(start),
Ben Murdoch097c5b22016-05-18 11:27:45 +01002649 evacuation_slots_buffer_,
2650 local_store_buffer_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002651 ++start;
2652 }
2653 }
2654
2655 V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {
2656 if (collector_->compacting_) {
2657 Address code_entry = Memory::Address_at(code_entry_slot);
2658 collector_->RecordMigratedCodeEntrySlot(code_entry, code_entry_slot,
2659 evacuation_slots_buffer_);
2660 }
2661 }
2662
2663 private:
2664 MarkCompactCollector* collector_;
2665 SlotsBuffer** evacuation_slots_buffer_;
Ben Murdoch097c5b22016-05-18 11:27:45 +01002666 LocalStoreBuffer* local_store_buffer_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002667};
2668
2669
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002670// We scavenge new space simultaneously with sweeping. This is done in two
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002671// passes.
2672//
2673// The first pass migrates all alive objects from one semispace to another or
2674// promotes them to old space. Forwarding address is written directly into
2675// first word of object without any encoding. If object is dead we write
2676// NULL as a forwarding address.
2677//
2678// The second pass updates pointers to new space in all spaces. It is possible
2679// to encounter pointers to dead new space objects during traversal of pointers
2680// to new space. We should clear them to avoid encountering them during next
2681// pointer iteration. This is an issue if the store buffer overflows and we
2682// have to scan the entire old space, including dead objects, looking for
2683// pointers to new space.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002684void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
2685 int size, AllocationSpace dest,
2686 SlotsBuffer** evacuation_slots_buffer,
2687 LocalStoreBuffer* local_store_buffer) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002688 Address dst_addr = dst->address();
2689 Address src_addr = src->address();
2690 DCHECK(heap()->AllowedToBeMigrated(src, dest));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002691 DCHECK(dest != LO_SPACE);
2692 if (dest == OLD_SPACE) {
2693 DCHECK_OBJECT_SIZE(size);
2694 DCHECK(evacuation_slots_buffer != nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002695 DCHECK(IsAligned(size, kPointerSize));
2696
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002697 heap()->MoveBlock(dst->address(), src->address(), size);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002698 RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer,
2699 local_store_buffer);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002700 dst->IterateBody(&visitor);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002701 } else if (dest == CODE_SPACE) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002702 DCHECK_CODEOBJECT_SIZE(size, heap()->code_space());
2703 DCHECK(evacuation_slots_buffer != nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002704 PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
2705 heap()->MoveBlock(dst_addr, src_addr, size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002706 RecordMigratedCodeObjectSlot(dst_addr, evacuation_slots_buffer);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002707 Code::cast(dst)->Relocate(dst_addr - src_addr);
2708 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002709 DCHECK_OBJECT_SIZE(size);
2710 DCHECK(evacuation_slots_buffer == nullptr);
2711 DCHECK(dest == NEW_SPACE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002712 heap()->MoveBlock(dst_addr, src_addr, size);
2713 }
2714 heap()->OnMoveEvent(dst, src, size);
2715 Memory::Address_at(src_addr) = dst_addr;
2716}
2717
2718
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002719static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
2720 SlotsBuffer::SlotType slot_type, Address addr) {
2721 switch (slot_type) {
2722 case SlotsBuffer::CODE_TARGET_SLOT: {
2723 RelocInfo rinfo(isolate, addr, RelocInfo::CODE_TARGET, 0, NULL);
2724 rinfo.Visit(isolate, v);
2725 break;
2726 }
2727 case SlotsBuffer::CELL_TARGET_SLOT: {
2728 RelocInfo rinfo(isolate, addr, RelocInfo::CELL, 0, NULL);
2729 rinfo.Visit(isolate, v);
2730 break;
2731 }
2732 case SlotsBuffer::CODE_ENTRY_SLOT: {
2733 v->VisitCodeEntry(addr);
2734 break;
2735 }
2736 case SlotsBuffer::RELOCATED_CODE_OBJECT: {
2737 HeapObject* obj = HeapObject::FromAddress(addr);
2738 Code::BodyDescriptor::IterateBody(obj, v);
2739 break;
2740 }
2741 case SlotsBuffer::DEBUG_TARGET_SLOT: {
2742 RelocInfo rinfo(isolate, addr, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION, 0,
2743 NULL);
2744 if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
2745 break;
2746 }
2747 case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
2748 RelocInfo rinfo(isolate, addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
2749 rinfo.Visit(isolate, v);
2750 break;
2751 }
2752 case SlotsBuffer::OBJECT_SLOT: {
2753 v->VisitPointer(reinterpret_cast<Object**>(addr));
2754 break;
2755 }
2756 default:
2757 UNREACHABLE();
2758 break;
2759 }
2760}
2761
2762
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002763// Visitor for updating pointers from live objects in old spaces to new space.
2764// It does not expect to encounter pointers to dead objects.
2765class PointersUpdatingVisitor : public ObjectVisitor {
2766 public:
2767 explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
2768
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002769 void VisitPointer(Object** p) override { UpdatePointer(p); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002770
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002771 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002772 for (Object** p = start; p < end; p++) UpdatePointer(p);
2773 }
2774
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002775 void VisitCell(RelocInfo* rinfo) override {
2776 DCHECK(rinfo->rmode() == RelocInfo::CELL);
2777 Object* cell = rinfo->target_cell();
2778 Object* old_cell = cell;
2779 VisitPointer(&cell);
2780 if (cell != old_cell) {
2781 rinfo->set_target_cell(reinterpret_cast<Cell*>(cell));
2782 }
2783 }
2784
2785 void VisitEmbeddedPointer(RelocInfo* rinfo) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002786 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
2787 Object* target = rinfo->target_object();
2788 Object* old_target = target;
2789 VisitPointer(&target);
2790 // Avoid unnecessary changes that might unnecessary flush the instruction
2791 // cache.
2792 if (target != old_target) {
2793 rinfo->set_target_object(target);
2794 }
2795 }
2796
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002797 void VisitCodeTarget(RelocInfo* rinfo) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002798 DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
2799 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2800 Object* old_target = target;
2801 VisitPointer(&target);
2802 if (target != old_target) {
2803 rinfo->set_target_address(Code::cast(target)->instruction_start());
2804 }
2805 }
2806
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002807 void VisitCodeAgeSequence(RelocInfo* rinfo) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002808 DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
2809 Object* stub = rinfo->code_age_stub();
2810 DCHECK(stub != NULL);
2811 VisitPointer(&stub);
2812 if (stub != rinfo->code_age_stub()) {
2813 rinfo->set_code_age_stub(Code::cast(stub));
2814 }
2815 }
2816
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002817 void VisitDebugTarget(RelocInfo* rinfo) override {
2818 DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2819 rinfo->IsPatchedDebugBreakSlotSequence());
2820 Object* target =
2821 Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002822 VisitPointer(&target);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002823 rinfo->set_debug_call_address(Code::cast(target)->instruction_start());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002824 }
2825
2826 static inline void UpdateSlot(Heap* heap, Object** slot) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002827 Object* obj = reinterpret_cast<Object*>(
2828 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002829
2830 if (!obj->IsHeapObject()) return;
2831
2832 HeapObject* heap_obj = HeapObject::cast(obj);
2833
2834 MapWord map_word = heap_obj->map_word();
2835 if (map_word.IsForwardingAddress()) {
2836 DCHECK(heap->InFromSpace(heap_obj) ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002837 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
2838 Page::FromAddress(heap_obj->address())
2839 ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002840 HeapObject* target = map_word.ToForwardingAddress();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002841 base::NoBarrier_CompareAndSwap(
2842 reinterpret_cast<base::AtomicWord*>(slot),
2843 reinterpret_cast<base::AtomicWord>(obj),
2844 reinterpret_cast<base::AtomicWord>(target));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002845 DCHECK(!heap->InFromSpace(target) &&
2846 !MarkCompactCollector::IsOnEvacuationCandidate(target));
2847 }
2848 }
2849
2850 private:
2851 inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); }
2852
2853 Heap* heap_;
2854};
2855
2856
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002857void MarkCompactCollector::UpdateSlots(SlotsBuffer* buffer) {
2858 PointersUpdatingVisitor v(heap_);
2859 size_t buffer_size = buffer->Size();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002860
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002861 for (size_t slot_idx = 0; slot_idx < buffer_size; ++slot_idx) {
2862 SlotsBuffer::ObjectSlot slot = buffer->Get(slot_idx);
2863 if (!SlotsBuffer::IsTypedSlot(slot)) {
2864 PointersUpdatingVisitor::UpdateSlot(heap_, slot);
2865 } else {
2866 ++slot_idx;
2867 DCHECK(slot_idx < buffer_size);
2868 UpdateSlot(heap_->isolate(), &v, DecodeSlotType(slot),
2869 reinterpret_cast<Address>(buffer->Get(slot_idx)));
2870 }
2871 }
2872}
2873
2874
2875void MarkCompactCollector::UpdateSlotsRecordedIn(SlotsBuffer* buffer) {
2876 while (buffer != NULL) {
2877 UpdateSlots(buffer);
2878 buffer = buffer->next();
2879 }
2880}
2881
2882
2883static void UpdatePointer(HeapObject** address, HeapObject* object) {
2884 MapWord map_word = object->map_word();
Ben Murdoch097c5b22016-05-18 11:27:45 +01002885 // Since we only filter invalid slots in old space, the store buffer can
2886 // still contain stale pointers in large object and in map spaces. Ignore
2887 // these pointers here.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002888 DCHECK(map_word.IsForwardingAddress() ||
Ben Murdoch097c5b22016-05-18 11:27:45 +01002889 !object->GetHeap()->old_space()->Contains(
2890 reinterpret_cast<Address>(address)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002891 if (map_word.IsForwardingAddress()) {
2892 // Update the corresponding slot.
2893 *address = map_word.ToForwardingAddress();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002894 }
2895}
2896
2897
2898static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
2899 Object** p) {
2900 MapWord map_word = HeapObject::cast(*p)->map_word();
2901
2902 if (map_word.IsForwardingAddress()) {
2903 return String::cast(map_word.ToForwardingAddress());
2904 }
2905
2906 return String::cast(*p);
2907}
2908
2909
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002910bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot,
2911 HeapObject** out_object) {
2912 Space* owner = p->owner();
2913 if (owner == heap_->lo_space() || owner == NULL) {
2914 Object* large_object = heap_->lo_space()->FindObject(slot);
2915 // This object has to exist, otherwise we would not have recorded a slot
2916 // for it.
2917 CHECK(large_object->IsHeapObject());
2918 HeapObject* large_heap_object = HeapObject::cast(large_object);
2919 if (IsMarked(large_heap_object)) {
2920 *out_object = large_heap_object;
2921 return true;
2922 }
2923 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002924 }
2925
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002926 uint32_t mark_bit_index = p->AddressToMarkbitIndex(slot);
2927 unsigned int cell_index = mark_bit_index >> Bitmap::kBitsPerCellLog2;
2928 MarkBit::CellType index_mask = 1u << Bitmap::IndexInCell(mark_bit_index);
2929 MarkBit::CellType* cells = p->markbits()->cells();
2930 Address base_address = p->area_start();
2931 unsigned int base_address_cell_index = Bitmap::IndexToCell(
2932 Bitmap::CellAlignIndex(p->AddressToMarkbitIndex(base_address)));
2933
2934 // Check if the slot points to the start of an object. This can happen e.g.
2935 // when we left trim a fixed array. Such slots are invalid and we can remove
2936 // them.
2937 if (index_mask > 1) {
2938 if ((cells[cell_index] & index_mask) != 0 &&
2939 (cells[cell_index] & (index_mask >> 1)) == 0) {
2940 return false;
2941 }
2942 } else {
2943 // Left trimming moves the mark bits so we cannot be in the very first cell.
2944 DCHECK(cell_index != base_address_cell_index);
2945 if ((cells[cell_index] & index_mask) != 0 &&
2946 (cells[cell_index - 1] & (1u << Bitmap::kBitIndexMask)) == 0) {
2947 return false;
2948 }
2949 }
2950
2951 // Check if the object is in the current cell.
2952 MarkBit::CellType slot_mask;
2953 if ((cells[cell_index] == 0) ||
2954 (base::bits::CountTrailingZeros32(cells[cell_index]) >
2955 base::bits::CountTrailingZeros32(cells[cell_index] | index_mask))) {
2956 // If we are already in the first cell, there is no live object.
2957 if (cell_index == base_address_cell_index) return false;
2958
2959 // If not, find a cell in a preceding cell slot that has a mark bit set.
2960 do {
2961 cell_index--;
2962 } while (cell_index > base_address_cell_index && cells[cell_index] == 0);
2963
2964 // The slot must be in a dead object if there are no preceding cells that
2965 // have mark bits set.
2966 if (cells[cell_index] == 0) {
2967 return false;
2968 }
2969
2970 // The object is in a preceding cell. Set the mask to find any object.
2971 slot_mask = ~0u;
2972 } else {
2973 // We are interested in object mark bits right before the slot.
2974 slot_mask = index_mask + (index_mask - 1);
2975 }
2976
2977 MarkBit::CellType current_cell = cells[cell_index];
2978 CHECK(current_cell != 0);
2979
2980 // Find the last live object in the cell.
2981 unsigned int leading_zeros =
2982 base::bits::CountLeadingZeros32(current_cell & slot_mask);
2983 CHECK(leading_zeros != Bitmap::kBitsPerCell);
2984 int offset = static_cast<int>(Bitmap::kBitIndexMask - leading_zeros) - 1;
2985
2986 base_address += (cell_index - base_address_cell_index) *
2987 Bitmap::kBitsPerCell * kPointerSize;
2988 Address address = base_address + offset * kPointerSize;
2989 HeapObject* object = HeapObject::FromAddress(address);
2990 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
2991 CHECK(object->address() < reinterpret_cast<Address>(slot));
2992 if ((object->address() + kPointerSize) <= slot &&
2993 (object->address() + object->Size()) > slot) {
2994 // If the slot is within the last found object in the cell, the slot is
2995 // in a live object.
2996 // Slots pointing to the first word of an object are invalid and removed.
2997 // This can happen when we move the object header while left trimming.
2998 *out_object = object;
2999 return true;
3000 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003001 return false;
3002}
3003
3004
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003005bool MarkCompactCollector::IsSlotInBlackObjectSlow(Page* p, Address slot) {
3006 // This function does not support large objects right now.
3007 Space* owner = p->owner();
3008 if (owner == heap_->lo_space() || owner == NULL) {
3009 Object* large_object = heap_->lo_space()->FindObject(slot);
3010 // This object has to exist, otherwise we would not have recorded a slot
3011 // for it.
3012 CHECK(large_object->IsHeapObject());
3013 HeapObject* large_heap_object = HeapObject::cast(large_object);
3014 if (IsMarked(large_heap_object)) {
3015 return true;
3016 }
3017 return false;
3018 }
3019
3020 LiveObjectIterator<kBlackObjects> it(p);
3021 HeapObject* object = NULL;
3022 while ((object = it.Next()) != NULL) {
3023 int size = object->Size();
3024
3025 if (object->address() > slot) return false;
3026 if (object->address() <= slot && slot < (object->address() + size)) {
3027 return true;
3028 }
3029 }
3030 return false;
3031}
3032
3033
3034bool MarkCompactCollector::IsSlotInLiveObject(Address slot) {
3035 HeapObject* object = NULL;
3036 // The target object is black but we don't know if the source slot is black.
3037 // The source object could have died and the slot could be part of a free
3038 // space. Find out based on mark bits if the slot is part of a live object.
3039 if (!IsSlotInBlackObject(Page::FromAddress(slot), slot, &object)) {
3040 return false;
3041 }
3042
3043 DCHECK(object != NULL);
3044 int offset = static_cast<int>(slot - object->address());
3045 return object->IsValidSlot(offset);
3046}
3047
3048
3049void MarkCompactCollector::VerifyIsSlotInLiveObject(Address slot,
3050 HeapObject* object) {
3051 // The target object has to be black.
3052 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3053
3054 // The target object is black but we don't know if the source slot is black.
3055 // The source object could have died and the slot could be part of a free
3056 // space. Use the mark bit iterator to find out about liveness of the slot.
3057 CHECK(IsSlotInBlackObjectSlow(Page::FromAddress(slot), slot));
3058}
3059
3060
3061void MarkCompactCollector::EvacuateNewSpacePrologue() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003062 NewSpace* new_space = heap()->new_space();
Ben Murdoch097c5b22016-05-18 11:27:45 +01003063 NewSpacePageIterator it(new_space->bottom(), new_space->top());
3064 // Append the list of new space pages to be processed.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003065 while (it.has_next()) {
3066 newspace_evacuation_candidates_.Add(it.next());
3067 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003068 new_space->Flip();
3069 new_space->ResetAllocationInfo();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003070}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003071
Ben Murdoch097c5b22016-05-18 11:27:45 +01003072void MarkCompactCollector::EvacuateNewSpaceEpilogue() {
3073 newspace_evacuation_candidates_.Rewind(0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003074}
3075
3076
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003077void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized(
3078 SlotsBuffer* evacuation_slots_buffer) {
3079 base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_);
3080 evacuation_slots_buffers_.Add(evacuation_slots_buffer);
3081}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003082
Ben Murdoch097c5b22016-05-18 11:27:45 +01003083class MarkCompactCollector::Evacuator : public Malloced {
3084 public:
3085 Evacuator(MarkCompactCollector* collector,
3086 const List<Page*>& evacuation_candidates,
3087 const List<NewSpacePage*>& newspace_evacuation_candidates)
3088 : collector_(collector),
3089 evacuation_candidates_(evacuation_candidates),
3090 newspace_evacuation_candidates_(newspace_evacuation_candidates),
3091 compaction_spaces_(collector->heap()),
3092 local_slots_buffer_(nullptr),
3093 local_store_buffer_(collector->heap()),
3094 local_pretenuring_feedback_(HashMap::PointersMatch,
3095 kInitialLocalPretenuringFeedbackCapacity),
3096 new_space_visitor_(collector->heap(), &compaction_spaces_,
3097 &local_slots_buffer_, &local_store_buffer_,
3098 &local_pretenuring_feedback_),
3099 old_space_visitor_(collector->heap(), &compaction_spaces_,
3100 &local_slots_buffer_, &local_store_buffer_),
3101 duration_(0.0),
3102 bytes_compacted_(0),
3103 task_id_(0) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003104
Ben Murdoch097c5b22016-05-18 11:27:45 +01003105 // Evacuate the configured set of pages in parallel.
3106 inline void EvacuatePages();
3107
3108 // Merge back locally cached info sequentially. Note that this method needs
3109 // to be called from the main thread.
3110 inline void Finalize();
3111
3112 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
3113
3114 uint32_t task_id() { return task_id_; }
3115 void set_task_id(uint32_t id) { task_id_ = id; }
3116
3117 private:
3118 static const int kInitialLocalPretenuringFeedbackCapacity = 256;
3119
3120 Heap* heap() { return collector_->heap(); }
3121
3122 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
3123 duration_ += duration;
3124 bytes_compacted_ += bytes_compacted;
3125 }
3126
3127 inline bool EvacuateSinglePage(MemoryChunk* p, HeapObjectVisitor* visitor);
3128
3129 MarkCompactCollector* collector_;
3130
3131 // Pages to process.
3132 const List<Page*>& evacuation_candidates_;
3133 const List<NewSpacePage*>& newspace_evacuation_candidates_;
3134
3135 // Locally cached collector data.
3136 CompactionSpaceCollection compaction_spaces_;
3137 SlotsBuffer* local_slots_buffer_;
3138 LocalStoreBuffer local_store_buffer_;
3139 HashMap local_pretenuring_feedback_;
3140
3141 // Vistors for the corresponding spaces.
3142 EvacuateNewSpaceVisitor new_space_visitor_;
3143 EvacuateOldSpaceVisitor old_space_visitor_;
3144
3145 // Book keeping info.
3146 double duration_;
3147 intptr_t bytes_compacted_;
3148
3149 // Task id, if this evacuator is executed on a background task instead of
3150 // the main thread. Can be used to try to abort the task currently scheduled
3151 // to executed to evacuate pages.
3152 uint32_t task_id_;
3153};
3154
3155bool MarkCompactCollector::Evacuator::EvacuateSinglePage(
3156 MemoryChunk* p, HeapObjectVisitor* visitor) {
3157 bool success = true;
3158 if (p->parallel_compaction_state().TrySetValue(
3159 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
3160 if (p->IsEvacuationCandidate() || p->InNewSpace()) {
3161 DCHECK_EQ(p->parallel_compaction_state().Value(),
3162 MemoryChunk::kCompactingInProgress);
3163 int saved_live_bytes = p->LiveBytes();
3164 double evacuation_time;
3165 {
3166 AlwaysAllocateScope always_allocate(heap()->isolate());
3167 TimedScope timed_scope(&evacuation_time);
3168 success = collector_->VisitLiveObjects(p, visitor, kClearMarkbits);
3169 }
3170 if (success) {
3171 ReportCompactionProgress(evacuation_time, saved_live_bytes);
3172 p->parallel_compaction_state().SetValue(
3173 MemoryChunk::kCompactingFinalize);
3174 } else {
3175 p->parallel_compaction_state().SetValue(
3176 MemoryChunk::kCompactingAborted);
3177 }
3178 } else {
3179 // There could be popular pages in the list of evacuation candidates
3180 // which we do not compact.
3181 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
3182 }
3183 }
3184 return success;
3185}
3186
3187void MarkCompactCollector::Evacuator::EvacuatePages() {
3188 for (NewSpacePage* p : newspace_evacuation_candidates_) {
3189 DCHECK(p->InNewSpace());
3190 DCHECK_EQ(p->concurrent_sweeping_state().Value(),
3191 NewSpacePage::kSweepingDone);
3192 bool success = EvacuateSinglePage(p, &new_space_visitor_);
3193 DCHECK(success);
3194 USE(success);
3195 }
3196 for (Page* p : evacuation_candidates_) {
3197 DCHECK(p->IsEvacuationCandidate() ||
3198 p->IsFlagSet(MemoryChunk::RESCAN_ON_EVACUATION));
3199 DCHECK_EQ(p->concurrent_sweeping_state().Value(), Page::kSweepingDone);
3200 EvacuateSinglePage(p, &old_space_visitor_);
3201 }
3202}
3203
3204void MarkCompactCollector::Evacuator::Finalize() {
3205 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
3206 heap()->code_space()->MergeCompactionSpace(
3207 compaction_spaces_.Get(CODE_SPACE));
3208 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
3209 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size());
3210 heap()->IncrementSemiSpaceCopiedObjectSize(
3211 new_space_visitor_.semispace_copied_size());
3212 heap()->IncrementYoungSurvivorsCounter(
3213 new_space_visitor_.promoted_size() +
3214 new_space_visitor_.semispace_copied_size());
3215 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
3216 local_store_buffer_.Process(heap()->store_buffer());
3217 collector_->AddEvacuationSlotsBufferSynchronized(local_slots_buffer_);
3218}
3219
3220class MarkCompactCollector::CompactionTask : public CancelableTask {
3221 public:
3222 explicit CompactionTask(Heap* heap, Evacuator* evacuator)
3223 : CancelableTask(heap->isolate()), heap_(heap), evacuator_(evacuator) {
3224 evacuator->set_task_id(id());
3225 }
3226
3227 virtual ~CompactionTask() {}
3228
3229 private:
3230 // v8::internal::CancelableTask overrides.
3231 void RunInternal() override {
3232 evacuator_->EvacuatePages();
3233 heap_->mark_compact_collector()
3234 ->pending_compaction_tasks_semaphore_.Signal();
3235 }
3236
3237 Heap* heap_;
3238 Evacuator* evacuator_;
3239
3240 DISALLOW_COPY_AND_ASSIGN(CompactionTask);
3241};
3242
3243int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
3244 intptr_t live_bytes) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003245 if (!FLAG_parallel_compaction) return 1;
3246 // Compute the number of needed tasks based on a target compaction time, the
3247 // profiled compaction speed and marked live memory.
3248 //
3249 // The number of parallel compaction tasks is limited by:
3250 // - #evacuation pages
3251 // - (#cores - 1)
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003252 const double kTargetCompactionTimeInMs = 1;
Ben Murdoch097c5b22016-05-18 11:27:45 +01003253 const int kNumSweepingTasks = 3;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003254
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003255 intptr_t compaction_speed =
3256 heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003257
Ben Murdoch097c5b22016-05-18 11:27:45 +01003258 const int available_cores =
3259 Max(1, base::SysInfo::NumberOfProcessors() - kNumSweepingTasks - 1);
3260 int tasks;
3261 if (compaction_speed > 0) {
3262 tasks = 1 + static_cast<int>(static_cast<double>(live_bytes) /
3263 compaction_speed / kTargetCompactionTimeInMs);
3264 } else {
3265 tasks = pages;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003266 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003267 const int tasks_capped_pages = Min(pages, tasks);
3268 return Min(available_cores, tasks_capped_pages);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003269}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003270
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003271
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003272void MarkCompactCollector::EvacuatePagesInParallel() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003273 int num_pages = 0;
3274 intptr_t live_bytes = 0;
3275 for (Page* page : evacuation_candidates_) {
3276 num_pages++;
3277 live_bytes += page->LiveBytes();
3278 }
3279 for (NewSpacePage* page : newspace_evacuation_candidates_) {
3280 num_pages++;
3281 live_bytes += page->LiveBytes();
3282 }
3283 DCHECK_GE(num_pages, 1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003284
3285 // Used for trace summary.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003286 intptr_t compaction_speed = 0;
3287 if (FLAG_trace_fragmentation) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003288 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003289 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003290
3291 const int num_tasks = NumberOfParallelCompactionTasks(num_pages, live_bytes);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003292
3293 // Set up compaction spaces.
Ben Murdoch097c5b22016-05-18 11:27:45 +01003294 Evacuator** evacuators = new Evacuator*[num_tasks];
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003295 for (int i = 0; i < num_tasks; i++) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003296 evacuators[i] = new Evacuator(this, evacuation_candidates_,
3297 newspace_evacuation_candidates_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003298 }
3299
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003300 // Kick off parallel tasks.
Ben Murdoch097c5b22016-05-18 11:27:45 +01003301 StartParallelCompaction(evacuators, num_tasks);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003302 // Wait for unfinished and not-yet-started tasks.
Ben Murdoch097c5b22016-05-18 11:27:45 +01003303 WaitUntilCompactionCompleted(&evacuators[1], num_tasks - 1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003304
Ben Murdoch097c5b22016-05-18 11:27:45 +01003305 // Finalize local evacuators by merging back all locally cached data.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003306 for (int i = 0; i < num_tasks; i++) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003307 evacuators[i]->Finalize();
3308 delete evacuators[i];
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003309 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003310 delete[] evacuators;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003311
Ben Murdoch097c5b22016-05-18 11:27:45 +01003312 // Finalize pages sequentially.
3313 for (NewSpacePage* p : newspace_evacuation_candidates_) {
3314 DCHECK_EQ(p->parallel_compaction_state().Value(),
3315 MemoryChunk::kCompactingFinalize);
3316 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
3317 }
3318
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003319 int abandoned_pages = 0;
Ben Murdoch097c5b22016-05-18 11:27:45 +01003320 for (Page* p : evacuation_candidates_) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003321 switch (p->parallel_compaction_state().Value()) {
3322 case MemoryChunk::ParallelCompactingState::kCompactingAborted:
3323 // We have partially compacted the page, i.e., some objects may have
3324 // moved, others are still in place.
3325 // We need to:
3326 // - Leave the evacuation candidate flag for later processing of
3327 // slots buffer entries.
3328 // - Leave the slots buffer there for processing of entries added by
3329 // the write barrier.
3330 // - Rescan the page as slot recording in the migration buffer only
3331 // happens upon moving (which we potentially didn't do).
3332 // - Leave the page in the list of pages of a space since we could not
3333 // fully evacuate it.
3334 // - Mark them for rescanning for store buffer entries as we otherwise
3335 // might have stale store buffer entries that become "valid" again
3336 // after reusing the memory. Note that all existing store buffer
3337 // entries of such pages are filtered before rescanning.
3338 DCHECK(p->IsEvacuationCandidate());
3339 p->SetFlag(Page::COMPACTION_WAS_ABORTED);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003340 abandoned_pages++;
3341 break;
3342 case MemoryChunk::kCompactingFinalize:
3343 DCHECK(p->IsEvacuationCandidate());
Ben Murdoch097c5b22016-05-18 11:27:45 +01003344 DCHECK(p->SweepingDone());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003345 p->Unlink();
3346 break;
3347 case MemoryChunk::kCompactingDone:
3348 DCHECK(p->IsFlagSet(Page::POPULAR_PAGE));
3349 DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3350 break;
3351 default:
Ben Murdoch097c5b22016-05-18 11:27:45 +01003352 // MemoryChunk::kCompactingInProgress.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003353 UNREACHABLE();
3354 }
3355 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
3356 }
3357 if (FLAG_trace_fragmentation) {
3358 PrintIsolate(isolate(),
3359 "%8.0f ms: compaction: parallel=%d pages=%d aborted=%d "
3360 "tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX
3361 "d compaction_speed=%" V8_PTR_PREFIX "d\n",
3362 isolate()->time_millis_since_init(), FLAG_parallel_compaction,
3363 num_pages, abandoned_pages, num_tasks,
3364 base::SysInfo::NumberOfProcessors(), live_bytes,
3365 compaction_speed);
3366 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003367}
3368
Ben Murdoch097c5b22016-05-18 11:27:45 +01003369void MarkCompactCollector::StartParallelCompaction(Evacuator** evacuators,
3370 int len) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003371 compaction_in_progress_ = true;
3372 for (int i = 1; i < len; i++) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003373 CompactionTask* task = new CompactionTask(heap(), evacuators[i]);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003374 V8::GetCurrentPlatform()->CallOnBackgroundThread(
3375 task, v8::Platform::kShortRunningTask);
3376 }
3377
Ben Murdoch097c5b22016-05-18 11:27:45 +01003378 // Contribute on main thread.
3379 evacuators[0]->EvacuatePages();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003380}
3381
Ben Murdoch097c5b22016-05-18 11:27:45 +01003382void MarkCompactCollector::WaitUntilCompactionCompleted(Evacuator** evacuators,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003383 int len) {
3384 // Try to cancel compaction tasks that have not been run (as they might be
3385 // stuck in a worker queue). Tasks that cannot be canceled, have either
3386 // already completed or are still running, hence we need to wait for their
3387 // semaphore signal.
3388 for (int i = 0; i < len; i++) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003389 if (!heap()->isolate()->cancelable_task_manager()->TryAbort(
3390 evacuators[i]->task_id())) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003391 pending_compaction_tasks_semaphore_.Wait();
3392 }
3393 }
3394 compaction_in_progress_ = false;
3395}
3396
3397
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003398class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3399 public:
3400 virtual Object* RetainAs(Object* object) {
3401 if (object->IsHeapObject()) {
3402 HeapObject* heap_object = HeapObject::cast(object);
3403 MapWord map_word = heap_object->map_word();
3404 if (map_word.IsForwardingAddress()) {
3405 return map_word.ToForwardingAddress();
3406 }
3407 }
3408 return object;
3409 }
3410};
3411
3412
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003413enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
3414
3415
3416enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
3417
3418
3419enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
3420
3421
3422template <MarkCompactCollector::SweepingParallelism mode>
3423static intptr_t Free(PagedSpace* space, FreeList* free_list, Address start,
3424 int size) {
3425 if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) {
3426 DCHECK(free_list == NULL);
3427 return space->Free(start, size);
3428 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003429 return size - free_list->Free(start, size);
3430 }
3431}
3432
3433
3434// Sweeps a page. After sweeping the page can be iterated.
3435// Slots in live objects pointing into evacuation candidates are updated
3436// if requested.
3437// Returns the size of the biggest continuous freed memory chunk in bytes.
3438template <SweepingMode sweeping_mode,
3439 MarkCompactCollector::SweepingParallelism parallelism,
3440 SkipListRebuildingMode skip_list_mode,
3441 FreeSpaceTreatmentMode free_space_mode>
3442static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
3443 ObjectVisitor* v) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003444 DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003445 DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3446 space->identity() == CODE_SPACE);
3447 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3448 DCHECK(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD ||
3449 sweeping_mode == SWEEP_ONLY);
3450
3451 Address free_start = p->area_start();
3452 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003453
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003454 // If we use the skip list for code space pages, we have to lock the skip
3455 // list because it could be accessed concurrently by the runtime or the
3456 // deoptimizer.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003457 SkipList* skip_list = p->skip_list();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003458 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3459 skip_list->Clear();
3460 }
3461
3462 intptr_t freed_bytes = 0;
3463 intptr_t max_freed_bytes = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003464 int curr_region = -1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003465
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003466 LiveObjectIterator<kBlackObjects> it(p);
3467 HeapObject* object = NULL;
3468 while ((object = it.Next()) != NULL) {
3469 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3470 Address free_end = object->address();
3471 if (free_end != free_start) {
3472 int size = static_cast<int>(free_end - free_start);
3473 if (free_space_mode == ZAP_FREE_SPACE) {
3474 memset(free_start, 0xcc, size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003475 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003476 freed_bytes = Free<parallelism>(space, free_list, free_start, size);
3477 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003478 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003479 Map* map = object->synchronized_map();
3480 int size = object->SizeFromMap(map);
3481 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3482 object->IterateBody(map->instance_type(), size, v);
3483 }
3484 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3485 int new_region_start = SkipList::RegionNumber(free_end);
3486 int new_region_end =
3487 SkipList::RegionNumber(free_end + size - kPointerSize);
3488 if (new_region_start != curr_region || new_region_end != curr_region) {
3489 skip_list->AddObject(free_end, size);
3490 curr_region = new_region_end;
3491 }
3492 }
3493 free_start = free_end + size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003494 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003495
3496 // Clear the mark bits of that page and reset live bytes count.
3497 Bitmap::Clear(p);
3498
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003499 if (free_start != p->area_end()) {
3500 int size = static_cast<int>(p->area_end() - free_start);
3501 if (free_space_mode == ZAP_FREE_SPACE) {
3502 memset(free_start, 0xcc, size);
3503 }
3504 freed_bytes = Free<parallelism>(space, free_list, free_start, size);
3505 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003506 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003507 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003508 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
3509}
3510
3511
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003512void MarkCompactCollector::InvalidateCode(Code* code) {
3513 if (heap_->incremental_marking()->IsCompacting() &&
3514 !ShouldSkipEvacuationSlotRecording(code)) {
3515 DCHECK(compacting_);
3516
3517 // If the object is white than no slots were recorded on it yet.
3518 MarkBit mark_bit = Marking::MarkBitFrom(code);
3519 if (Marking::IsWhite(mark_bit)) return;
3520
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003521 // Ignore all slots that might have been recorded in the body of the
3522 // deoptimized code object. Assumption: no slots will be recorded for
3523 // this object after invalidating it.
3524 RemoveObjectSlots(code->instruction_start(),
3525 code->address() + code->Size());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003526 }
3527}
3528
3529
3530// Return true if the given code is deoptimized or will be deoptimized.
3531bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
3532 return code->is_optimized_code() && code->marked_for_deoptimization();
3533}
3534
3535
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003536void MarkCompactCollector::RemoveObjectSlots(Address start_slot,
3537 Address end_slot) {
3538 // Remove entries by replacing them with an old-space slot containing a smi
3539 // that is located in an unmovable page.
Ben Murdoch097c5b22016-05-18 11:27:45 +01003540 for (Page* p : evacuation_candidates_) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003541 DCHECK(p->IsEvacuationCandidate() ||
3542 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3543 if (p->IsEvacuationCandidate()) {
3544 SlotsBuffer::RemoveObjectSlots(heap_, p->slots_buffer(), start_slot,
3545 end_slot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003546 }
3547 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003548}
3549
3550
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003551#ifdef VERIFY_HEAP
3552static void VerifyAllBlackObjects(MemoryChunk* page) {
3553 LiveObjectIterator<kAllLiveObjects> it(page);
3554 HeapObject* object = NULL;
3555 while ((object = it.Next()) != NULL) {
3556 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3557 }
3558}
3559#endif // VERIFY_HEAP
3560
3561
3562bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page,
3563 HeapObjectVisitor* visitor,
3564 IterationMode mode) {
3565#ifdef VERIFY_HEAP
3566 VerifyAllBlackObjects(page);
3567#endif // VERIFY_HEAP
3568
3569 LiveObjectIterator<kBlackObjects> it(page);
3570 HeapObject* object = nullptr;
3571 while ((object = it.Next()) != nullptr) {
3572 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3573 if (!visitor->Visit(object)) {
3574 if (mode == kClearMarkbits) {
3575 page->markbits()->ClearRange(
3576 page->AddressToMarkbitIndex(page->area_start()),
3577 page->AddressToMarkbitIndex(object->address()));
Ben Murdoch097c5b22016-05-18 11:27:45 +01003578 if (page->old_to_new_slots() != nullptr) {
3579 page->old_to_new_slots()->RemoveRange(
3580 0, static_cast<int>(object->address() - page->address()));
3581 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003582 RecomputeLiveBytes(page);
3583 }
3584 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003585 }
3586 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003587 if (mode == kClearMarkbits) {
3588 Bitmap::Clear(page);
3589 }
3590 return true;
3591}
3592
3593
3594void MarkCompactCollector::RecomputeLiveBytes(MemoryChunk* page) {
3595 LiveObjectIterator<kBlackObjects> it(page);
3596 int new_live_size = 0;
3597 HeapObject* object = nullptr;
3598 while ((object = it.Next()) != nullptr) {
3599 new_live_size += object->Size();
3600 }
3601 page->SetLiveBytes(new_live_size);
3602}
3603
3604
3605void MarkCompactCollector::VisitLiveObjectsBody(Page* page,
3606 ObjectVisitor* visitor) {
3607#ifdef VERIFY_HEAP
3608 VerifyAllBlackObjects(page);
3609#endif // VERIFY_HEAP
3610
3611 LiveObjectIterator<kBlackObjects> it(page);
3612 HeapObject* object = NULL;
3613 while ((object = it.Next()) != NULL) {
3614 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3615 Map* map = object->synchronized_map();
3616 int size = object->SizeFromMap(map);
3617 object->IterateBody(map->instance_type(), size, visitor);
3618 }
3619}
3620
3621
3622void MarkCompactCollector::SweepAbortedPages() {
3623 // Second pass on aborted pages.
Ben Murdoch097c5b22016-05-18 11:27:45 +01003624 for (Page* p : evacuation_candidates_) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003625 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3626 p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003627 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003628 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3629 switch (space->identity()) {
3630 case OLD_SPACE:
3631 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
3632 IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
3633 break;
3634 case CODE_SPACE:
3635 if (FLAG_zap_code_space) {
3636 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
3637 ZAP_FREE_SPACE>(space, NULL, p, nullptr);
3638 } else {
3639 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
3640 IGNORE_FREE_SPACE>(space, NULL, p, nullptr);
3641 }
3642 break;
3643 default:
3644 UNREACHABLE();
3645 break;
3646 }
3647 }
3648 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003649}
3650
3651
3652void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003653 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003654 Heap::RelocationLock relocation_lock(heap());
3655
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003656 {
3657 GCTracer::Scope gc_scope(heap()->tracer(),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003658 GCTracer::Scope::MC_EVACUATE_NEW_SPACE);
3659 EvacuationScope evacuation_scope(this);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003660
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003661 EvacuateNewSpacePrologue();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003662 EvacuatePagesInParallel();
Ben Murdoch097c5b22016-05-18 11:27:45 +01003663 EvacuateNewSpaceEpilogue();
3664 heap()->new_space()->set_age_mark(heap()->new_space()->top());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003665 }
3666
3667 UpdatePointersAfterEvacuation();
3668
Ben Murdoch097c5b22016-05-18 11:27:45 +01003669 // Give pages that are queued to be freed back to the OS. Note that filtering
3670 // slots only handles old space (for unboxed doubles), and thus map space can
3671 // still contain stale pointers. We only free the chunks after pointer updates
3672 // to still have access to page headers.
3673 heap()->FreeQueuedChunks();
3674
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003675 {
3676 GCTracer::Scope gc_scope(heap()->tracer(),
3677 GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
3678 // After updating all pointers, we can finally sweep the aborted pages,
3679 // effectively overriding any forward pointers.
3680 SweepAbortedPages();
3681
3682 // EvacuateNewSpaceAndCandidates iterates over new space objects and for
3683 // ArrayBuffers either re-registers them as live or promotes them. This is
3684 // needed to properly free them.
3685 heap()->array_buffer_tracker()->FreeDead(false);
3686
3687 // Deallocate evacuated candidate pages.
3688 ReleaseEvacuationCandidates();
3689 }
3690
3691#ifdef VERIFY_HEAP
3692 if (FLAG_verify_heap && !sweeping_in_progress_) {
3693 VerifyEvacuation(heap());
3694 }
3695#endif
3696}
3697
3698
3699void MarkCompactCollector::UpdatePointersAfterEvacuation() {
3700 GCTracer::Scope gc_scope(heap()->tracer(),
3701 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
3702 {
3703 GCTracer::Scope gc_scope(
3704 heap()->tracer(),
3705 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
3706 UpdateSlotsRecordedIn(migration_slots_buffer_);
3707 if (FLAG_trace_fragmentation_verbose) {
3708 PrintF(" migration slots buffer: %d\n",
3709 SlotsBuffer::SizeOfChain(migration_slots_buffer_));
3710 }
3711 slots_buffer_allocator_->DeallocateChain(&migration_slots_buffer_);
3712 DCHECK(migration_slots_buffer_ == NULL);
3713
3714 // TODO(hpayer): Process the slots buffers in parallel. This has to be done
3715 // after evacuation of all pages finishes.
3716 int buffers = evacuation_slots_buffers_.length();
3717 for (int i = 0; i < buffers; i++) {
3718 SlotsBuffer* buffer = evacuation_slots_buffers_[i];
3719 UpdateSlotsRecordedIn(buffer);
3720 slots_buffer_allocator_->DeallocateChain(&buffer);
3721 }
3722 evacuation_slots_buffers_.Rewind(0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003723 }
3724
3725 // Second pass: find pointers to new space and update them.
3726 PointersUpdatingVisitor updating_visitor(heap());
3727
3728 {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003729 GCTracer::Scope gc_scope(
3730 heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003731 // Update pointers in to space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003732 SemiSpaceIterator to_it(heap()->new_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003733 for (HeapObject* object = to_it.Next(); object != NULL;
3734 object = to_it.Next()) {
3735 Map* map = object->map();
3736 object->IterateBody(map->instance_type(), object->SizeFromMap(map),
3737 &updating_visitor);
3738 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003739 // Update roots.
3740 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003741
Ben Murdoch097c5b22016-05-18 11:27:45 +01003742 RememberedSet<OLD_TO_NEW>::IterateWithWrapper(heap_, UpdatePointer);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003743 }
3744
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003745 {
3746 GCTracer::Scope gc_scope(
3747 heap()->tracer(),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003748 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003749 for (Page* p : evacuation_candidates_) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003750 DCHECK(p->IsEvacuationCandidate() ||
3751 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3752
3753 if (p->IsEvacuationCandidate()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003754 UpdateSlotsRecordedIn(p->slots_buffer());
3755 if (FLAG_trace_fragmentation_verbose) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003756 PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
3757 SlotsBuffer::SizeOfChain(p->slots_buffer()));
3758 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003759 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003760
3761 // Important: skip list should be cleared only after roots were updated
3762 // because root iteration traverses the stack and might have to find
3763 // code objects from non-updated pc pointing into evacuation candidate.
3764 SkipList* list = p->skip_list();
3765 if (list != NULL) list->Clear();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003766
3767 // First pass on aborted pages, fixing up all live objects.
3768 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3769 p->ClearEvacuationCandidate();
3770 VisitLiveObjectsBody(p, &updating_visitor);
3771 }
3772 }
3773
3774 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003775 if (FLAG_gc_verbose) {
3776 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3777 reinterpret_cast<intptr_t>(p));
3778 }
3779 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3780 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003781 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003782
3783 switch (space->identity()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003784 case OLD_SPACE:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003785 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3786 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
3787 &updating_visitor);
3788 break;
3789 case CODE_SPACE:
3790 if (FLAG_zap_code_space) {
3791 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3792 REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(space, NULL, p,
3793 &updating_visitor);
3794 } else {
3795 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3796 REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
3797 &updating_visitor);
3798 }
3799 break;
3800 default:
3801 UNREACHABLE();
3802 break;
3803 }
3804 }
3805 }
3806 }
3807
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003808 {
3809 GCTracer::Scope gc_scope(heap()->tracer(),
3810 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
3811 heap_->string_table()->Iterate(&updating_visitor);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003812
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003813 // Update pointers from external string table.
3814 heap_->UpdateReferencesInExternalStringTable(
3815 &UpdateReferenceInExternalStringTableEntry);
3816
3817 EvacuationWeakObjectRetainer evacuation_object_retainer;
3818 heap()->ProcessAllWeakReferences(&evacuation_object_retainer);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003819 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003820}
3821
3822
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003823void MarkCompactCollector::ReleaseEvacuationCandidates() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003824 for (Page* p : evacuation_candidates_) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003825 if (!p->IsEvacuationCandidate()) continue;
3826 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3827 space->Free(p->area_start(), p->area_size());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003828 p->ResetLiveBytes();
Ben Murdoch097c5b22016-05-18 11:27:45 +01003829 CHECK(p->SweepingDone());
3830 space->ReleasePage(p, true);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003831 }
3832 evacuation_candidates_.Rewind(0);
3833 compacting_ = false;
3834 heap()->FreeQueuedChunks();
3835}
3836
3837
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003838int MarkCompactCollector::SweepInParallel(PagedSpace* space,
Ben Murdoch097c5b22016-05-18 11:27:45 +01003839 int required_freed_bytes,
3840 int max_pages) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003841 int max_freed = 0;
3842 int max_freed_overall = 0;
Ben Murdoch097c5b22016-05-18 11:27:45 +01003843 int page_count = 0;
3844 for (Page* p : sweeping_list(space)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003845 max_freed = SweepInParallel(p, space);
3846 DCHECK(max_freed >= 0);
3847 if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
3848 return max_freed;
3849 }
3850 max_freed_overall = Max(max_freed, max_freed_overall);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003851 page_count++;
3852 if (max_pages > 0 && page_count >= max_pages) {
3853 break;
3854 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003855 }
3856 return max_freed_overall;
3857}
3858
3859
3860int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
3861 int max_freed = 0;
Ben Murdoch097c5b22016-05-18 11:27:45 +01003862 if (page->mutex()->TryLock()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003863 // If this page was already swept in the meantime, we can return here.
Ben Murdoch097c5b22016-05-18 11:27:45 +01003864 if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003865 page->mutex()->Unlock();
3866 return 0;
3867 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003868 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003869 FreeList* free_list;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003870 FreeList private_free_list(space);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003871 if (space->identity() == OLD_SPACE) {
3872 free_list = free_list_old_space_.get();
3873 max_freed =
3874 Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
3875 IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
3876 } else if (space->identity() == CODE_SPACE) {
3877 free_list = free_list_code_space_.get();
3878 max_freed =
3879 Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
3880 IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
3881 } else {
3882 free_list = free_list_map_space_.get();
3883 max_freed =
3884 Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
3885 IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
3886 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003887 free_list->Concatenate(&private_free_list);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003888 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003889 page->mutex()->Unlock();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003890 }
3891 return max_freed;
3892}
3893
3894
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003895void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003896 space->ClearStats();
3897
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003898 PageIterator it(space);
3899
Ben Murdoch097c5b22016-05-18 11:27:45 +01003900 int will_be_swept = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003901 bool unused_page_present = false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003902
3903 while (it.has_next()) {
3904 Page* p = it.next();
Ben Murdoch097c5b22016-05-18 11:27:45 +01003905 DCHECK(p->SweepingDone());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003906
3907 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
3908 p->IsEvacuationCandidate()) {
3909 // Will be processed in EvacuateNewSpaceAndCandidates.
3910 DCHECK(evacuation_candidates_.length() > 0);
3911 continue;
3912 }
3913
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003914 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
3915 // We need to sweep the page to get it into an iterable state again. Note
3916 // that this adds unusable memory into the free list that is later on
3917 // (in the free list) dropped again. Since we only use the flag for
3918 // testing this is fine.
Ben Murdoch097c5b22016-05-18 11:27:45 +01003919 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003920 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
3921 IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
3922 continue;
3923 }
3924
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003925 // One unused page is kept, all further are released before sweeping them.
3926 if (p->LiveBytes() == 0) {
3927 if (unused_page_present) {
3928 if (FLAG_gc_verbose) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003929 PrintIsolate(isolate(), "sweeping: released page: %p", p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003930 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003931 space->ReleasePage(p, false);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003932 continue;
3933 }
3934 unused_page_present = true;
3935 }
3936
Ben Murdoch097c5b22016-05-18 11:27:45 +01003937 p->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
3938 sweeping_list(space).push_back(p);
3939 int to_sweep = p->area_size() - p->LiveBytes();
3940 space->accounting_stats_.ShrinkSpace(to_sweep);
3941 will_be_swept++;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003942 }
3943
3944 if (FLAG_gc_verbose) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003945 PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
3946 AllocationSpaceName(space->identity()), will_be_swept);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003947 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01003948 std::sort(sweeping_list(space).begin(), sweeping_list(space).end(),
3949 [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); });
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003950}
3951
3952
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003953void MarkCompactCollector::SweepSpaces() {
3954 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
3955 double start_time = 0.0;
3956 if (FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003957 start_time = heap_->MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003958 }
3959
3960#ifdef DEBUG
3961 state_ = SWEEP_SPACES;
3962#endif
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003963
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003964 {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003965 sweeping_in_progress_ = true;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003966 {
3967 GCTracer::Scope sweep_scope(heap()->tracer(),
3968 GCTracer::Scope::MC_SWEEP_OLD);
3969 StartSweepSpace(heap()->old_space());
3970 }
3971 {
3972 GCTracer::Scope sweep_scope(heap()->tracer(),
3973 GCTracer::Scope::MC_SWEEP_CODE);
3974 StartSweepSpace(heap()->code_space());
3975 }
3976 {
3977 GCTracer::Scope sweep_scope(heap()->tracer(),
3978 GCTracer::Scope::MC_SWEEP_MAP);
3979 StartSweepSpace(heap()->map_space());
3980 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003981 if (FLAG_concurrent_sweeping) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003982 StartSweeperThreads();
3983 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003984 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003985
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003986 // Deallocate unmarked large objects.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003987 heap_->lo_space()->FreeUnmarkedObjects();
3988
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003989 if (FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003990 heap_->tracer()->AddSweepingTime(heap_->MonotonicallyIncreasingTimeInMs() -
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003991 start_time);
3992 }
3993}
3994
3995
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003996void MarkCompactCollector::ParallelSweepSpacesComplete() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01003997 sweeping_list(heap()->old_space()).clear();
3998 sweeping_list(heap()->code_space()).clear();
3999 sweeping_list(heap()->map_space()).clear();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004000}
4001
4002
4003// TODO(1466) ReportDeleteIfNeeded is not called currently.
4004// Our profiling tools do not expect intersections between
4005// code objects. We should either reenable it or change our tools.
4006void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
4007 Isolate* isolate) {
4008 if (obj->IsCode()) {
4009 PROFILE(isolate, CodeDeleteEvent(obj->address()));
4010 }
4011}
4012
4013
4014Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); }
4015
4016
4017void MarkCompactCollector::Initialize() {
4018 MarkCompactMarkingVisitor::Initialize();
4019 IncrementalMarking::Initialize();
4020}
4021
4022
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004023void MarkCompactCollector::EvictPopularEvacuationCandidate(Page* page) {
4024 if (FLAG_trace_fragmentation) {
4025 PrintF("Page %p is too popular. Disabling evacuation.\n",
4026 reinterpret_cast<void*>(page));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004027 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004028
4029 isolate()->CountUsage(v8::Isolate::UseCounterFeature::kSlotsBufferOverflow);
4030
4031 // TODO(gc) If all evacuation candidates are too popular we
4032 // should stop slots recording entirely.
4033 page->ClearEvacuationCandidate();
4034
4035 DCHECK(!page->IsFlagSet(Page::POPULAR_PAGE));
4036 page->SetFlag(Page::POPULAR_PAGE);
4037
4038 // We were not collecting slots on this page that point
4039 // to other evacuation candidates thus we have to
4040 // rescan the page after evacuation to discover and update all
4041 // pointers to evacuated objects.
4042 page->SetFlag(Page::RESCAN_ON_EVACUATION);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004043}
4044
4045
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004046void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* object, Address slot,
4047 Code* target) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004048 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4049 if (target_page->IsEvacuationCandidate() &&
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004050 !ShouldSkipEvacuationSlotRecording(object)) {
4051 if (!SlotsBuffer::AddTo(slots_buffer_allocator_,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004052 target_page->slots_buffer_address(),
4053 SlotsBuffer::CODE_ENTRY_SLOT, slot,
4054 SlotsBuffer::FAIL_ON_OVERFLOW)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004055 EvictPopularEvacuationCandidate(target_page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004056 }
4057 }
4058}
4059
4060
4061void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
4062 DCHECK(heap()->gc_state() == Heap::MARK_COMPACT);
4063 if (is_compacting()) {
4064 Code* host =
4065 isolate()->inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(
4066 pc);
4067 MarkBit mark_bit = Marking::MarkBitFrom(host);
4068 if (Marking::IsBlack(mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004069 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004070 RecordRelocSlot(&rinfo, target);
4071 }
4072 }
4073}
4074
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004075} // namespace internal
4076} // namespace v8