blob: 65bfdd92d871a262e517452c8c14fe0989482d9b [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005#include "src/heap/mark-compact.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006
7#include "src/base/atomicops.h"
8#include "src/base/bits.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00009#include "src/base/sys-info.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000010#include "src/code-stubs.h"
11#include "src/compilation-cache.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000012#include "src/deoptimizer.h"
13#include "src/execution.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000014#include "src/frames-inl.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000015#include "src/gdb-jit.h"
16#include "src/global-handles.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000017#include "src/heap/array-buffer-tracker.h"
18#include "src/heap/gc-tracer.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000019#include "src/heap/incremental-marking.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000020#include "src/heap/mark-compact-inl.h"
21#include "src/heap/object-stats.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000022#include "src/heap/objects-visiting.h"
23#include "src/heap/objects-visiting-inl.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000024#include "src/heap/slots-buffer.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000025#include "src/heap/spaces-inl.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000026#include "src/ic/ic.h"
27#include "src/ic/stub-cache.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000028#include "src/profiler/cpu-profiler.h"
29#include "src/v8.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000030
31namespace v8 {
32namespace internal {
33
34
35const char* Marking::kWhiteBitPattern = "00";
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000036const char* Marking::kBlackBitPattern = "11";
37const char* Marking::kGreyBitPattern = "10";
Ben Murdochb8a8cc12014-11-26 15:28:44 +000038const char* Marking::kImpossibleBitPattern = "01";
39
40
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000041// The following has to hold in order for {Marking::MarkBitFrom} to not produce
42// invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
43STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2);
44
45
Ben Murdochb8a8cc12014-11-26 15:28:44 +000046// -------------------------------------------------------------------------
47// MarkCompactCollector
48
49MarkCompactCollector::MarkCompactCollector(Heap* heap)
50 : // NOLINT
51#ifdef DEBUG
52 state_(IDLE),
53#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +000054 marking_parity_(ODD_MARKING_PARITY),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000055 was_marked_incrementally_(false),
Emily Bernierd0a1eb72015-03-24 16:35:39 -040056 evacuation_(false),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000057 slots_buffer_allocator_(nullptr),
58 migration_slots_buffer_(nullptr),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000059 heap_(heap),
Emily Bernierd0a1eb72015-03-24 16:35:39 -040060 marking_deque_memory_(NULL),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000061 marking_deque_memory_committed_(0),
62 code_flusher_(nullptr),
63 have_code_to_deoptimize_(false),
64 compacting_(false),
65 sweeping_in_progress_(false),
66 compaction_in_progress_(false),
67 pending_sweeper_tasks_semaphore_(0),
68 pending_compaction_tasks_semaphore_(0) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000069}
70
71#ifdef VERIFY_HEAP
72class VerifyMarkingVisitor : public ObjectVisitor {
73 public:
74 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
75
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000076 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000077 for (Object** current = start; current < end; current++) {
78 if ((*current)->IsHeapObject()) {
79 HeapObject* object = HeapObject::cast(*current);
80 CHECK(heap_->mark_compact_collector()->IsMarked(object));
81 }
82 }
83 }
84
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000085 void VisitEmbeddedPointer(RelocInfo* rinfo) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000086 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
87 if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
88 Object* p = rinfo->target_object();
89 VisitPointer(&p);
90 }
91 }
92
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000093 void VisitCell(RelocInfo* rinfo) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000094 Code* code = rinfo->host();
95 DCHECK(rinfo->rmode() == RelocInfo::CELL);
96 if (!code->IsWeakObject(rinfo->target_cell())) {
97 ObjectVisitor::VisitCell(rinfo);
98 }
99 }
100
101 private:
102 Heap* heap_;
103};
104
105
106static void VerifyMarking(Heap* heap, Address bottom, Address top) {
107 VerifyMarkingVisitor visitor(heap);
108 HeapObject* object;
109 Address next_object_must_be_here_or_later = bottom;
110
111 for (Address current = bottom; current < top; current += kPointerSize) {
112 object = HeapObject::FromAddress(current);
113 if (MarkCompactCollector::IsMarked(object)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000114 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000115 CHECK(current >= next_object_must_be_here_or_later);
116 object->Iterate(&visitor);
117 next_object_must_be_here_or_later = current + object->Size();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000118 // The next word for sure belongs to the current object, jump over it.
119 current += kPointerSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000120 }
121 }
122}
123
124
125static void VerifyMarking(NewSpace* space) {
126 Address end = space->top();
127 NewSpacePageIterator it(space->bottom(), end);
128 // The bottom position is at the start of its page. Allows us to use
129 // page->area_start() as start of range on all pages.
130 CHECK_EQ(space->bottom(),
131 NewSpacePage::FromAddress(space->bottom())->area_start());
132 while (it.has_next()) {
133 NewSpacePage* page = it.next();
134 Address limit = it.has_next() ? page->area_end() : end;
135 CHECK(limit == end || !page->Contains(end));
136 VerifyMarking(space->heap(), page->area_start(), limit);
137 }
138}
139
140
141static void VerifyMarking(PagedSpace* space) {
142 PageIterator it(space);
143
144 while (it.has_next()) {
145 Page* p = it.next();
146 VerifyMarking(space->heap(), p->area_start(), p->area_end());
147 }
148}
149
150
151static void VerifyMarking(Heap* heap) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000152 VerifyMarking(heap->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000153 VerifyMarking(heap->code_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000154 VerifyMarking(heap->map_space());
155 VerifyMarking(heap->new_space());
156
157 VerifyMarkingVisitor visitor(heap);
158
159 LargeObjectIterator it(heap->lo_space());
160 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
161 if (MarkCompactCollector::IsMarked(obj)) {
162 obj->Iterate(&visitor);
163 }
164 }
165
166 heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
167}
168
169
170class VerifyEvacuationVisitor : public ObjectVisitor {
171 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000172 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000173 for (Object** current = start; current < end; current++) {
174 if ((*current)->IsHeapObject()) {
175 HeapObject* object = HeapObject::cast(*current);
176 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
177 }
178 }
179 }
180};
181
182
183static void VerifyEvacuation(Page* page) {
184 VerifyEvacuationVisitor visitor;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000185 HeapObjectIterator iterator(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000186 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
187 heap_object = iterator.Next()) {
188 // We skip free space objects.
189 if (!heap_object->IsFiller()) {
190 heap_object->Iterate(&visitor);
191 }
192 }
193}
194
195
196static void VerifyEvacuation(NewSpace* space) {
197 NewSpacePageIterator it(space->bottom(), space->top());
198 VerifyEvacuationVisitor visitor;
199
200 while (it.has_next()) {
201 NewSpacePage* page = it.next();
202 Address current = page->area_start();
203 Address limit = it.has_next() ? page->area_end() : space->top();
204 CHECK(limit == space->top() || !page->Contains(space->top()));
205 while (current < limit) {
206 HeapObject* object = HeapObject::FromAddress(current);
207 object->Iterate(&visitor);
208 current += object->Size();
209 }
210 }
211}
212
213
214static void VerifyEvacuation(Heap* heap, PagedSpace* space) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000215 if (FLAG_use_allocation_folding && (space == heap->old_space())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000216 return;
217 }
218 PageIterator it(space);
219
220 while (it.has_next()) {
221 Page* p = it.next();
222 if (p->IsEvacuationCandidate()) continue;
223 VerifyEvacuation(p);
224 }
225}
226
227
228static void VerifyEvacuation(Heap* heap) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000229 VerifyEvacuation(heap, heap->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000230 VerifyEvacuation(heap, heap->code_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000231 VerifyEvacuation(heap, heap->map_space());
232 VerifyEvacuation(heap->new_space());
233
234 VerifyEvacuationVisitor visitor;
235 heap->IterateStrongRoots(&visitor, VISIT_ALL);
236}
237#endif // VERIFY_HEAP
238
239
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000240void MarkCompactCollector::SetUp() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000241 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
242 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
243 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
244 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
245
246 free_list_old_space_.Reset(new FreeList(heap_->old_space()));
247 free_list_code_space_.Reset(new FreeList(heap_->code_space()));
248 free_list_map_space_.Reset(new FreeList(heap_->map_space()));
249 EnsureMarkingDequeIsReserved();
250 EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
251 slots_buffer_allocator_ = new SlotsBufferAllocator();
252
253 if (FLAG_flush_code) {
254 code_flusher_ = new CodeFlusher(isolate());
255 if (FLAG_trace_code_flushing) {
256 PrintF("[code-flushing is now on]\n");
257 }
258 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000259}
260
261
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400262void MarkCompactCollector::TearDown() {
263 AbortCompaction();
264 delete marking_deque_memory_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000265 delete slots_buffer_allocator_;
266 delete code_flusher_;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400267}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000268
269
270void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000271 DCHECK(!p->NeverEvacuate());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000272 p->MarkEvacuationCandidate();
273 evacuation_candidates_.Add(p);
274}
275
276
277static void TraceFragmentation(PagedSpace* space) {
278 int number_of_pages = space->CountTotalPages();
279 intptr_t reserved = (number_of_pages * space->AreaSize());
280 intptr_t free = reserved - space->SizeOfObjects();
281 PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
282 AllocationSpaceName(space->identity()), number_of_pages,
283 static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
284}
285
286
287bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
288 if (!compacting_) {
289 DCHECK(evacuation_candidates_.length() == 0);
290
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000291 CollectEvacuationCandidates(heap()->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000292
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000293 if (FLAG_compact_code_space) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000294 CollectEvacuationCandidates(heap()->code_space());
295 } else if (FLAG_trace_fragmentation) {
296 TraceFragmentation(heap()->code_space());
297 }
298
299 if (FLAG_trace_fragmentation) {
300 TraceFragmentation(heap()->map_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000301 }
302
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000303 heap()->old_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
304 heap()->code_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000305
306 compacting_ = evacuation_candidates_.length() > 0;
307 }
308
309 return compacting_;
310}
311
312
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000313void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() {
314 {
315 GCTracer::Scope gc_scope(heap()->tracer(),
316 GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
317 heap_->store_buffer()->ClearInvalidStoreBufferEntries();
318 }
319
320 {
321 GCTracer::Scope gc_scope(heap()->tracer(),
322 GCTracer::Scope::MC_CLEAR_SLOTS_BUFFER);
323 int number_of_pages = evacuation_candidates_.length();
324 for (int i = 0; i < number_of_pages; i++) {
325 Page* p = evacuation_candidates_[i];
326 SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer());
327 }
328 }
329#ifdef VERIFY_HEAP
330 if (FLAG_verify_heap) {
331 VerifyValidStoreAndSlotsBufferEntries();
332 }
333#endif
334}
335
336
337#ifdef VERIFY_HEAP
338static void VerifyValidSlotsBufferEntries(Heap* heap, PagedSpace* space) {
339 PageIterator it(space);
340 while (it.has_next()) {
341 Page* p = it.next();
342 SlotsBuffer::VerifySlots(heap, p->slots_buffer());
343 }
344}
345
346
347void MarkCompactCollector::VerifyValidStoreAndSlotsBufferEntries() {
348 heap()->store_buffer()->VerifyValidStoreBufferEntries();
349
350 VerifyValidSlotsBufferEntries(heap(), heap()->old_space());
351 VerifyValidSlotsBufferEntries(heap(), heap()->code_space());
352 VerifyValidSlotsBufferEntries(heap(), heap()->map_space());
353
354 LargeObjectIterator it(heap()->lo_space());
355 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
356 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
357 SlotsBuffer::VerifySlots(heap(), chunk->slots_buffer());
358 }
359}
360#endif
361
362
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000363void MarkCompactCollector::CollectGarbage() {
364 // Make sure that Prepare() has been called. The individual steps below will
365 // update the state as they proceed.
366 DCHECK(state_ == PREPARE_GC);
367
368 MarkLiveObjects();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000369
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000370 DCHECK(heap_->incremental_marking()->IsStopped());
371
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000372 ClearNonLiveReferences();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400373
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000374#ifdef VERIFY_HEAP
375 if (FLAG_verify_heap) {
376 VerifyMarking(heap_);
377 }
378#endif
379
380 SweepSpaces();
381
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000382 EvacuateNewSpaceAndCandidates();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000383
384 Finish();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000385}
386
387
388#ifdef VERIFY_HEAP
389void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
390 PageIterator it(space);
391
392 while (it.has_next()) {
393 Page* p = it.next();
394 CHECK(p->markbits()->IsClean());
395 CHECK_EQ(0, p->LiveBytes());
396 }
397}
398
399
400void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
401 NewSpacePageIterator it(space->bottom(), space->top());
402
403 while (it.has_next()) {
404 NewSpacePage* p = it.next();
405 CHECK(p->markbits()->IsClean());
406 CHECK_EQ(0, p->LiveBytes());
407 }
408}
409
410
411void MarkCompactCollector::VerifyMarkbitsAreClean() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000412 VerifyMarkbitsAreClean(heap_->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000413 VerifyMarkbitsAreClean(heap_->code_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000414 VerifyMarkbitsAreClean(heap_->map_space());
415 VerifyMarkbitsAreClean(heap_->new_space());
416
417 LargeObjectIterator it(heap_->lo_space());
418 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
419 MarkBit mark_bit = Marking::MarkBitFrom(obj);
420 CHECK(Marking::IsWhite(mark_bit));
421 CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
422 }
423}
424
425
426void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
427 HeapObjectIterator code_iterator(heap()->code_space());
428 for (HeapObject* obj = code_iterator.Next(); obj != NULL;
429 obj = code_iterator.Next()) {
430 Code* code = Code::cast(obj);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400431 if (!code->is_optimized_code()) continue;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000432 if (WillBeDeoptimized(code)) continue;
433 code->VerifyEmbeddedObjectsDependency();
434 }
435}
436
437
438void MarkCompactCollector::VerifyOmittedMapChecks() {
439 HeapObjectIterator iterator(heap()->map_space());
440 for (HeapObject* obj = iterator.Next(); obj != NULL; obj = iterator.Next()) {
441 Map* map = Map::cast(obj);
442 map->VerifyOmittedMapChecks();
443 }
444}
445#endif // VERIFY_HEAP
446
447
448static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
449 PageIterator it(space);
450
451 while (it.has_next()) {
452 Bitmap::Clear(it.next());
453 }
454}
455
456
457static void ClearMarkbitsInNewSpace(NewSpace* space) {
458 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
459
460 while (it.has_next()) {
461 Bitmap::Clear(it.next());
462 }
463}
464
465
466void MarkCompactCollector::ClearMarkbits() {
467 ClearMarkbitsInPagedSpace(heap_->code_space());
468 ClearMarkbitsInPagedSpace(heap_->map_space());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000469 ClearMarkbitsInPagedSpace(heap_->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000470 ClearMarkbitsInNewSpace(heap_->new_space());
471
472 LargeObjectIterator it(heap_->lo_space());
473 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000474 Marking::MarkWhite(Marking::MarkBitFrom(obj));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000475 Page::FromAddress(obj->address())->ResetProgressBar();
476 Page::FromAddress(obj->address())->ResetLiveBytes();
477 }
478}
479
480
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000481class MarkCompactCollector::CompactionTask : public CancelableTask {
482 public:
483 explicit CompactionTask(Heap* heap, CompactionSpaceCollection* spaces)
484 : CancelableTask(heap->isolate()), spaces_(spaces) {}
485
486 virtual ~CompactionTask() {}
487
488 private:
489 // v8::internal::CancelableTask overrides.
490 void RunInternal() override {
491 MarkCompactCollector* mark_compact =
492 isolate()->heap()->mark_compact_collector();
493 SlotsBuffer* evacuation_slots_buffer = nullptr;
494 mark_compact->EvacuatePages(spaces_, &evacuation_slots_buffer);
495 mark_compact->AddEvacuationSlotsBufferSynchronized(evacuation_slots_buffer);
496 mark_compact->pending_compaction_tasks_semaphore_.Signal();
497 }
498
499 CompactionSpaceCollection* spaces_;
500
501 DISALLOW_COPY_AND_ASSIGN(CompactionTask);
502};
503
504
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000505class MarkCompactCollector::SweeperTask : public v8::Task {
506 public:
507 SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {}
508
509 virtual ~SweeperTask() {}
510
511 private:
512 // v8::Task overrides.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000513 void Run() override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000514 heap_->mark_compact_collector()->SweepInParallel(space_, 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000515 heap_->mark_compact_collector()->pending_sweeper_tasks_semaphore_.Signal();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000516 }
517
518 Heap* heap_;
519 PagedSpace* space_;
520
521 DISALLOW_COPY_AND_ASSIGN(SweeperTask);
522};
523
524
525void MarkCompactCollector::StartSweeperThreads() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000526 DCHECK(free_list_old_space_.get()->IsEmpty());
527 DCHECK(free_list_code_space_.get()->IsEmpty());
528 DCHECK(free_list_map_space_.get()->IsEmpty());
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400529 V8::GetCurrentPlatform()->CallOnBackgroundThread(
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000530 new SweeperTask(heap(), heap()->old_space()),
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400531 v8::Platform::kShortRunningTask);
532 V8::GetCurrentPlatform()->CallOnBackgroundThread(
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000533 new SweeperTask(heap(), heap()->code_space()),
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400534 v8::Platform::kShortRunningTask);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000535 V8::GetCurrentPlatform()->CallOnBackgroundThread(
536 new SweeperTask(heap(), heap()->map_space()),
537 v8::Platform::kShortRunningTask);
538}
539
540
541void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) {
542 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
543 if (!page->SweepingCompleted()) {
544 SweepInParallel(page, owner);
545 if (!page->SweepingCompleted()) {
546 // We were not able to sweep that page, i.e., a concurrent
547 // sweeper thread currently owns this page. Wait for the sweeper
548 // thread to be done with this page.
549 page->WaitUntilSweepingCompleted();
550 }
551 }
552}
553
554
555void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
556 if (FLAG_concurrent_sweeping && !IsSweepingCompleted()) {
557 SweepInParallel(heap()->paged_space(space->identity()), 0);
558 space->RefillFreeList();
559 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000560}
561
562
563void MarkCompactCollector::EnsureSweepingCompleted() {
564 DCHECK(sweeping_in_progress_ == true);
565
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400566 // If sweeping is not completed or not running at all, we try to complete it
567 // here.
568 if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000569 SweepInParallel(heap()->paged_space(OLD_SPACE), 0);
570 SweepInParallel(heap()->paged_space(CODE_SPACE), 0);
571 SweepInParallel(heap()->paged_space(MAP_SPACE), 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000572 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000573
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400574 if (FLAG_concurrent_sweeping) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000575 pending_sweeper_tasks_semaphore_.Wait();
576 pending_sweeper_tasks_semaphore_.Wait();
577 pending_sweeper_tasks_semaphore_.Wait();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000578 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000579
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000580 ParallelSweepSpacesComplete();
581 sweeping_in_progress_ = false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000582 heap()->old_space()->RefillFreeList();
583 heap()->code_space()->RefillFreeList();
584 heap()->map_space()->RefillFreeList();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000585
586#ifdef VERIFY_HEAP
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400587 if (FLAG_verify_heap && !evacuation()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000588 VerifyEvacuation(heap_);
589 }
590#endif
591}
592
593
594bool MarkCompactCollector::IsSweepingCompleted() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000595 if (!pending_sweeper_tasks_semaphore_.WaitFor(
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400596 base::TimeDelta::FromSeconds(0))) {
597 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000598 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000599 pending_sweeper_tasks_semaphore_.Signal();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000600 return true;
601}
602
603
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000604void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000605 // This is only used when resizing an object.
606 DCHECK(MemoryChunk::FromAddress(old_start) ==
607 MemoryChunk::FromAddress(new_start));
608
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000609 if (!heap->incremental_marking()->IsMarking()) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000610
611 // If the mark doesn't move, we don't check the color of the object.
612 // It doesn't matter whether the object is black, since it hasn't changed
613 // size, so the adjustment to the live data count will be zero anyway.
614 if (old_start == new_start) return;
615
616 MarkBit new_mark_bit = MarkBitFrom(new_start);
617 MarkBit old_mark_bit = MarkBitFrom(old_start);
618
619#ifdef DEBUG
620 ObjectColor old_color = Color(old_mark_bit);
621#endif
622
623 if (Marking::IsBlack(old_mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000624 Marking::BlackToWhite(old_mark_bit);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000625 Marking::MarkBlack(new_mark_bit);
626 return;
627 } else if (Marking::IsGrey(old_mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000628 Marking::GreyToWhite(old_mark_bit);
629 heap->incremental_marking()->WhiteToGreyAndPush(
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000630 HeapObject::FromAddress(new_start), new_mark_bit);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000631 heap->incremental_marking()->RestartIfNotMarking();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000632 }
633
634#ifdef DEBUG
635 ObjectColor new_color = Color(new_mark_bit);
636 DCHECK(new_color == old_color);
637#endif
638}
639
640
641const char* AllocationSpaceName(AllocationSpace space) {
642 switch (space) {
643 case NEW_SPACE:
644 return "NEW_SPACE";
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000645 case OLD_SPACE:
646 return "OLD_SPACE";
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000647 case CODE_SPACE:
648 return "CODE_SPACE";
649 case MAP_SPACE:
650 return "MAP_SPACE";
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000651 case LO_SPACE:
652 return "LO_SPACE";
653 default:
654 UNREACHABLE();
655 }
656
657 return NULL;
658}
659
660
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000661void MarkCompactCollector::ComputeEvacuationHeuristics(
662 int area_size, int* target_fragmentation_percent,
663 int* max_evacuated_bytes) {
664 // For memory reducing mode we directly define both constants.
665 const int kTargetFragmentationPercentForReduceMemory = 20;
666 const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000667
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000668 // For regular mode (which is latency critical) we define less aggressive
669 // defaults to start and switch to a trace-based (using compaction speed)
670 // approach as soon as we have enough samples.
671 const int kTargetFragmentationPercent = 70;
672 const int kMaxEvacuatedBytes = 4 * Page::kPageSize;
673 // Time to take for a single area (=payload of page). Used as soon as there
674 // exist enough compaction speed samples.
675 const int kTargetMsPerArea = 1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000676
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000677 if (heap()->ShouldReduceMemory()) {
678 *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
679 *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000680 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000681 const intptr_t estimated_compaction_speed =
682 heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
683 if (estimated_compaction_speed != 0) {
684 // Estimate the target fragmentation based on traced compaction speed
685 // and a goal for a single page.
686 const intptr_t estimated_ms_per_area =
687 1 + static_cast<intptr_t>(area_size) / estimated_compaction_speed;
688 *target_fragmentation_percent =
689 100 - 100 * kTargetMsPerArea / estimated_ms_per_area;
690 if (*target_fragmentation_percent <
691 kTargetFragmentationPercentForReduceMemory) {
692 *target_fragmentation_percent =
693 kTargetFragmentationPercentForReduceMemory;
694 }
695 } else {
696 *target_fragmentation_percent = kTargetFragmentationPercent;
697 }
698 *max_evacuated_bytes = kMaxEvacuatedBytes;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000699 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000700}
701
702
703void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000704 DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000705
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000706 int number_of_pages = space->CountTotalPages();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000707 int area_size = space->AreaSize();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000708
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000709 // Pairs of (live_bytes_in_page, page).
710 typedef std::pair<int, Page*> LiveBytesPagePair;
711 std::vector<LiveBytesPagePair> pages;
712 pages.reserve(number_of_pages);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000713
714 PageIterator it(space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000715 while (it.has_next()) {
716 Page* p = it.next();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000717 if (p->NeverEvacuate()) continue;
718 if (p->IsFlagSet(Page::POPULAR_PAGE)) {
719 // This page had slots buffer overflow on previous GC, skip it.
720 p->ClearFlag(Page::POPULAR_PAGE);
721 continue;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000722 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000723 // Invariant: Evacuation candidates are just created when marking is
724 // started. At the end of a GC all evacuation candidates are cleared and
725 // their slot buffers are released.
726 CHECK(!p->IsEvacuationCandidate());
727 CHECK(p->slots_buffer() == NULL);
728 DCHECK(p->area_size() == area_size);
729 int live_bytes =
730 p->WasSwept() ? p->LiveBytesFromFreeList() : p->LiveBytes();
731 pages.push_back(std::make_pair(live_bytes, p));
732 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000733
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000734 int candidate_count = 0;
735 int total_live_bytes = 0;
736
737 const bool reduce_memory = heap()->ShouldReduceMemory();
738 if (FLAG_manual_evacuation_candidates_selection) {
739 for (size_t i = 0; i < pages.size(); i++) {
740 Page* p = pages[i].second;
741 if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
742 candidate_count++;
743 total_live_bytes += pages[i].first;
744 p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
745 AddEvacuationCandidate(p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000746 }
747 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000748 } else if (FLAG_stress_compaction) {
749 for (size_t i = 0; i < pages.size(); i++) {
750 Page* p = pages[i].second;
751 if (i % 2 == 0) {
752 candidate_count++;
753 total_live_bytes += pages[i].first;
754 AddEvacuationCandidate(p);
755 }
756 }
757 } else {
758 // The following approach determines the pages that should be evacuated.
759 //
760 // We use two conditions to decide whether a page qualifies as an evacuation
761 // candidate, or not:
762 // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
763 // between live bytes and capacity of this page (= area).
764 // * Evacuation quota: A global quota determining how much bytes should be
765 // compacted.
766 //
767 // The algorithm sorts all pages by live bytes and then iterates through
768 // them starting with the page with the most free memory, adding them to the
769 // set of evacuation candidates as long as both conditions (fragmentation
770 // and quota) hold.
771 int max_evacuated_bytes;
772 int target_fragmentation_percent;
773 ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
774 &max_evacuated_bytes);
775
776 const intptr_t free_bytes_threshold =
777 target_fragmentation_percent * (area_size / 100);
778
779 // Sort pages from the most free to the least free, then select
780 // the first n pages for evacuation such that:
781 // - the total size of evacuated objects does not exceed the specified
782 // limit.
783 // - fragmentation of (n+1)-th page does not exceed the specified limit.
784 std::sort(pages.begin(), pages.end(),
785 [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
786 return a.first < b.first;
787 });
788 for (size_t i = 0; i < pages.size(); i++) {
789 int live_bytes = pages[i].first;
790 int free_bytes = area_size - live_bytes;
791 if (FLAG_always_compact ||
792 ((free_bytes >= free_bytes_threshold) &&
793 ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
794 candidate_count++;
795 total_live_bytes += live_bytes;
796 }
797 if (FLAG_trace_fragmentation_verbose) {
798 PrintIsolate(isolate(),
799 "compaction-selection-page: space=%s free_bytes_page=%d "
800 "fragmentation_limit_kb=%d fragmentation_limit_percent=%d "
801 "sum_compaction_kb=%d "
802 "compaction_limit_kb=%d\n",
803 AllocationSpaceName(space->identity()), free_bytes / KB,
804 free_bytes_threshold / KB, target_fragmentation_percent,
805 total_live_bytes / KB, max_evacuated_bytes / KB);
806 }
807 }
808 // How many pages we will allocated for the evacuated objects
809 // in the worst case: ceil(total_live_bytes / area_size)
810 int estimated_new_pages = (total_live_bytes + area_size - 1) / area_size;
811 DCHECK_LE(estimated_new_pages, candidate_count);
812 int estimated_released_pages = candidate_count - estimated_new_pages;
813 // Avoid (compact -> expand) cycles.
814 if ((estimated_released_pages == 0) && !FLAG_always_compact) {
815 candidate_count = 0;
816 }
817 for (int i = 0; i < candidate_count; i++) {
818 AddEvacuationCandidate(pages[i].second);
819 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000820 }
821
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000822 if (FLAG_trace_fragmentation) {
823 PrintIsolate(isolate(),
824 "compaction-selection: space=%s reduce_memory=%d pages=%d "
825 "total_live_bytes=%d\n",
826 AllocationSpaceName(space->identity()), reduce_memory,
827 candidate_count, total_live_bytes / KB);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000828 }
829}
830
831
832void MarkCompactCollector::AbortCompaction() {
833 if (compacting_) {
834 int npages = evacuation_candidates_.length();
835 for (int i = 0; i < npages; i++) {
836 Page* p = evacuation_candidates_[i];
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000837 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000838 p->ClearEvacuationCandidate();
839 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
840 }
841 compacting_ = false;
842 evacuation_candidates_.Rewind(0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000843 }
844 DCHECK_EQ(0, evacuation_candidates_.length());
845}
846
847
848void MarkCompactCollector::Prepare() {
849 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
850
851#ifdef DEBUG
852 DCHECK(state_ == IDLE);
853 state_ = PREPARE_GC;
854#endif
855
856 DCHECK(!FLAG_never_compact || !FLAG_always_compact);
857
858 if (sweeping_in_progress()) {
859 // Instead of waiting we could also abort the sweeper threads here.
860 EnsureSweepingCompleted();
861 }
862
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000863 // If concurrent unmapping tasks are still running, we should wait for
864 // them here.
865 heap()->WaitUntilUnmappingOfFreeChunksCompleted();
866
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000867 // Clear marking bits if incremental marking is aborted.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000868 if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
869 heap()->incremental_marking()->Stop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000870 ClearMarkbits();
871 AbortWeakCollections();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400872 AbortWeakCells();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000873 AbortTransitionArrays();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000874 AbortCompaction();
875 was_marked_incrementally_ = false;
876 }
877
878 // Don't start compaction if we are in the middle of incremental
879 // marking cycle. We did not collect any slots.
880 if (!FLAG_never_compact && !was_marked_incrementally_) {
881 StartCompaction(NON_INCREMENTAL_COMPACTION);
882 }
883
884 PagedSpaces spaces(heap());
885 for (PagedSpace* space = spaces.next(); space != NULL;
886 space = spaces.next()) {
887 space->PrepareForMarkCompact();
888 }
889
890#ifdef VERIFY_HEAP
891 if (!was_marked_incrementally_ && FLAG_verify_heap) {
892 VerifyMarkbitsAreClean();
893 }
894#endif
895}
896
897
898void MarkCompactCollector::Finish() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000899 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_FINISH);
900
901 // The hashing of weak_object_to_code_table is no longer valid.
902 heap()->weak_object_to_code_table()->Rehash(
903 heap()->isolate()->factory()->undefined_value());
904
905 // Clear the marking state of live large objects.
906 heap_->lo_space()->ClearMarkingStateOfLiveObjects();
907
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000908#ifdef DEBUG
909 DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
910 state_ = IDLE;
911#endif
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000912 heap_->isolate()->inner_pointer_to_code_cache()->Flush();
913
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000914 // The stub cache is not traversed during GC; clear the cache to
915 // force lazy re-initialization of it. This must be done after the
916 // GC, because it relies on the new address of certain old space
917 // objects (empty string, illegal builtin).
918 isolate()->stub_cache()->Clear();
919
920 if (have_code_to_deoptimize_) {
921 // Some code objects were marked for deoptimization during the GC.
922 Deoptimizer::DeoptimizeMarkedCode(isolate());
923 have_code_to_deoptimize_ = false;
924 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400925
926 heap_->incremental_marking()->ClearIdleMarkingDelayCounter();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000927
928 if (marking_parity_ == EVEN_MARKING_PARITY) {
929 marking_parity_ = ODD_MARKING_PARITY;
930 } else {
931 DCHECK(marking_parity_ == ODD_MARKING_PARITY);
932 marking_parity_ = EVEN_MARKING_PARITY;
933 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000934}
935
936
937// -------------------------------------------------------------------------
938// Phase 1: tracing and marking live objects.
939// before: all objects are in normal state.
940// after: a live object's map pointer is marked as '00'.
941
942// Marking all live objects in the heap as part of mark-sweep or mark-compact
943// collection. Before marking, all objects are in their normal state. After
944// marking, live objects' map pointers are marked indicating that the object
945// has been found reachable.
946//
947// The marking algorithm is a (mostly) depth-first (because of possible stack
948// overflow) traversal of the graph of objects reachable from the roots. It
949// uses an explicit stack of pointers rather than recursion. The young
950// generation's inactive ('from') space is used as a marking stack. The
951// objects in the marking stack are the ones that have been reached and marked
952// but their children have not yet been visited.
953//
954// The marking stack can overflow during traversal. In that case, we set an
955// overflow flag. When the overflow flag is set, we continue marking objects
956// reachable from the objects on the marking stack, but no longer push them on
957// the marking stack. Instead, we mark them as both marked and overflowed.
958// When the stack is in the overflowed state, objects marked as overflowed
959// have been reached and marked but their children have not been visited yet.
960// After emptying the marking stack, we clear the overflow flag and traverse
961// the heap looking for objects marked as overflowed, push them on the stack,
962// and continue with marking. This process repeats until all reachable
963// objects have been marked.
964
965void CodeFlusher::ProcessJSFunctionCandidates() {
966 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
967 Object* undefined = isolate_->heap()->undefined_value();
968
969 JSFunction* candidate = jsfunction_candidates_head_;
970 JSFunction* next_candidate;
971 while (candidate != NULL) {
972 next_candidate = GetNextCandidate(candidate);
973 ClearNextCandidate(candidate, undefined);
974
975 SharedFunctionInfo* shared = candidate->shared();
976
977 Code* code = shared->code();
978 MarkBit code_mark = Marking::MarkBitFrom(code);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000979 if (Marking::IsWhite(code_mark)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000980 if (FLAG_trace_code_flushing && shared->is_compiled()) {
981 PrintF("[code-flushing clears: ");
982 shared->ShortPrint();
983 PrintF(" - age: %d]\n", code->GetAge());
984 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000985 // Always flush the optimized code map if there is one.
986 if (!shared->OptimizedCodeMapIsCleared()) {
987 shared->ClearOptimizedCodeMap();
988 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000989 shared->set_code(lazy_compile);
990 candidate->set_code(lazy_compile);
991 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000992 DCHECK(Marking::IsBlack(code_mark));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000993 candidate->set_code(code);
994 }
995
996 // We are in the middle of a GC cycle so the write barrier in the code
997 // setter did not record the slot update and we have to do that manually.
998 Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
999 Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001000 isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(
1001 candidate, slot, target);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001002
1003 Object** shared_code_slot =
1004 HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
1005 isolate_->heap()->mark_compact_collector()->RecordSlot(
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001006 shared, shared_code_slot, *shared_code_slot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001007
1008 candidate = next_candidate;
1009 }
1010
1011 jsfunction_candidates_head_ = NULL;
1012}
1013
1014
1015void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
1016 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
1017
1018 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1019 SharedFunctionInfo* next_candidate;
1020 while (candidate != NULL) {
1021 next_candidate = GetNextCandidate(candidate);
1022 ClearNextCandidate(candidate);
1023
1024 Code* code = candidate->code();
1025 MarkBit code_mark = Marking::MarkBitFrom(code);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001026 if (Marking::IsWhite(code_mark)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001027 if (FLAG_trace_code_flushing && candidate->is_compiled()) {
1028 PrintF("[code-flushing clears: ");
1029 candidate->ShortPrint();
1030 PrintF(" - age: %d]\n", code->GetAge());
1031 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001032 // Always flush the optimized code map if there is one.
1033 if (!candidate->OptimizedCodeMapIsCleared()) {
1034 candidate->ClearOptimizedCodeMap();
1035 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001036 candidate->set_code(lazy_compile);
1037 }
1038
1039 Object** code_slot =
1040 HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001041 isolate_->heap()->mark_compact_collector()->RecordSlot(candidate, code_slot,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001042 *code_slot);
1043
1044 candidate = next_candidate;
1045 }
1046
1047 shared_function_info_candidates_head_ = NULL;
1048}
1049
1050
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001051void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
1052 // Make sure previous flushing decisions are revisited.
1053 isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
1054
1055 if (FLAG_trace_code_flushing) {
1056 PrintF("[code-flushing abandons function-info: ");
1057 shared_info->ShortPrint();
1058 PrintF("]\n");
1059 }
1060
1061 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1062 SharedFunctionInfo* next_candidate;
1063 if (candidate == shared_info) {
1064 next_candidate = GetNextCandidate(shared_info);
1065 shared_function_info_candidates_head_ = next_candidate;
1066 ClearNextCandidate(shared_info);
1067 } else {
1068 while (candidate != NULL) {
1069 next_candidate = GetNextCandidate(candidate);
1070
1071 if (next_candidate == shared_info) {
1072 next_candidate = GetNextCandidate(shared_info);
1073 SetNextCandidate(candidate, next_candidate);
1074 ClearNextCandidate(shared_info);
1075 break;
1076 }
1077
1078 candidate = next_candidate;
1079 }
1080 }
1081}
1082
1083
1084void CodeFlusher::EvictCandidate(JSFunction* function) {
1085 DCHECK(!function->next_function_link()->IsUndefined());
1086 Object* undefined = isolate_->heap()->undefined_value();
1087
1088 // Make sure previous flushing decisions are revisited.
1089 isolate_->heap()->incremental_marking()->RecordWrites(function);
1090 isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
1091
1092 if (FLAG_trace_code_flushing) {
1093 PrintF("[code-flushing abandons closure: ");
1094 function->shared()->ShortPrint();
1095 PrintF("]\n");
1096 }
1097
1098 JSFunction* candidate = jsfunction_candidates_head_;
1099 JSFunction* next_candidate;
1100 if (candidate == function) {
1101 next_candidate = GetNextCandidate(function);
1102 jsfunction_candidates_head_ = next_candidate;
1103 ClearNextCandidate(function, undefined);
1104 } else {
1105 while (candidate != NULL) {
1106 next_candidate = GetNextCandidate(candidate);
1107
1108 if (next_candidate == function) {
1109 next_candidate = GetNextCandidate(function);
1110 SetNextCandidate(candidate, next_candidate);
1111 ClearNextCandidate(function, undefined);
1112 break;
1113 }
1114
1115 candidate = next_candidate;
1116 }
1117 }
1118}
1119
1120
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001121void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
1122 Heap* heap = isolate_->heap();
1123
1124 JSFunction** slot = &jsfunction_candidates_head_;
1125 JSFunction* candidate = jsfunction_candidates_head_;
1126 while (candidate != NULL) {
1127 if (heap->InFromSpace(candidate)) {
1128 v->VisitPointer(reinterpret_cast<Object**>(slot));
1129 }
1130 candidate = GetNextCandidate(*slot);
1131 slot = GetNextCandidateSlot(*slot);
1132 }
1133}
1134
1135
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001136class MarkCompactMarkingVisitor
1137 : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
1138 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001139 static void Initialize();
1140
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001141 INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
1142 MarkObjectByPointer(heap->mark_compact_collector(), object, p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001143 }
1144
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001145 INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
1146 Object** start, Object** end)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001147 // Mark all objects pointed to in [start, end).
1148 const int kMinRangeForMarkingRecursion = 64;
1149 if (end - start >= kMinRangeForMarkingRecursion) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001150 if (VisitUnmarkedObjects(heap, object, start, end)) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001151 // We are close to a stack overflow, so just mark the objects.
1152 }
1153 MarkCompactCollector* collector = heap->mark_compact_collector();
1154 for (Object** p = start; p < end; p++) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001155 MarkObjectByPointer(collector, object, p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001156 }
1157 }
1158
1159 // Marks the object black and pushes it on the marking stack.
1160 INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
1161 MarkBit mark = Marking::MarkBitFrom(object);
1162 heap->mark_compact_collector()->MarkObject(object, mark);
1163 }
1164
1165 // Marks the object black without pushing it on the marking stack.
1166 // Returns true if object needed marking and false otherwise.
1167 INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
1168 MarkBit mark_bit = Marking::MarkBitFrom(object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001169 if (Marking::IsWhite(mark_bit)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001170 heap->mark_compact_collector()->SetMark(object, mark_bit);
1171 return true;
1172 }
1173 return false;
1174 }
1175
1176 // Mark object pointed to by p.
1177 INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001178 HeapObject* object, Object** p)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001179 if (!(*p)->IsHeapObject()) return;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001180 HeapObject* target_object = HeapObject::cast(*p);
1181 collector->RecordSlot(object, p, target_object);
1182 MarkBit mark = Marking::MarkBitFrom(target_object);
1183 collector->MarkObject(target_object, mark);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001184 }
1185
1186
1187 // Visit an unmarked object.
1188 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1189 HeapObject* obj)) {
1190#ifdef DEBUG
1191 DCHECK(collector->heap()->Contains(obj));
1192 DCHECK(!collector->heap()->mark_compact_collector()->IsMarked(obj));
1193#endif
1194 Map* map = obj->map();
1195 Heap* heap = obj->GetHeap();
1196 MarkBit mark = Marking::MarkBitFrom(obj);
1197 heap->mark_compact_collector()->SetMark(obj, mark);
1198 // Mark the map pointer and the body.
1199 MarkBit map_mark = Marking::MarkBitFrom(map);
1200 heap->mark_compact_collector()->MarkObject(map, map_mark);
1201 IterateBody(map, obj);
1202 }
1203
1204 // Visit all unmarked objects pointed to by [start, end).
1205 // Returns false if the operation fails (lack of stack space).
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001206 INLINE(static bool VisitUnmarkedObjects(Heap* heap, HeapObject* object,
1207 Object** start, Object** end)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001208 // Return false is we are close to the stack limit.
1209 StackLimitCheck check(heap->isolate());
1210 if (check.HasOverflowed()) return false;
1211
1212 MarkCompactCollector* collector = heap->mark_compact_collector();
1213 // Visit the unmarked objects.
1214 for (Object** p = start; p < end; p++) {
1215 Object* o = *p;
1216 if (!o->IsHeapObject()) continue;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001217 collector->RecordSlot(object, p, o);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001218 HeapObject* obj = HeapObject::cast(o);
1219 MarkBit mark = Marking::MarkBitFrom(obj);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001220 if (Marking::IsBlackOrGrey(mark)) continue;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001221 VisitUnmarkedObject(collector, obj);
1222 }
1223 return true;
1224 }
1225
1226 private:
1227 template <int id>
1228 static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
1229
1230 // Code flushing support.
1231
1232 static const int kRegExpCodeThreshold = 5;
1233
1234 static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re,
1235 bool is_one_byte) {
1236 // Make sure that the fixed array is in fact initialized on the RegExp.
1237 // We could potentially trigger a GC when initializing the RegExp.
1238 if (HeapObject::cast(re->data())->map()->instance_type() !=
1239 FIXED_ARRAY_TYPE)
1240 return;
1241
1242 // Make sure this is a RegExp that actually contains code.
1243 if (re->TypeTag() != JSRegExp::IRREGEXP) return;
1244
1245 Object* code = re->DataAt(JSRegExp::code_index(is_one_byte));
1246 if (!code->IsSmi() &&
1247 HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1248 // Save a copy that can be reinstated if we need the code again.
1249 re->SetDataAt(JSRegExp::saved_code_index(is_one_byte), code);
1250
1251 // Saving a copy might create a pointer into compaction candidate
1252 // that was not observed by marker. This might happen if JSRegExp data
1253 // was marked through the compilation cache before marker reached JSRegExp
1254 // object.
1255 FixedArray* data = FixedArray::cast(re->data());
1256 Object** slot =
1257 data->data_start() + JSRegExp::saved_code_index(is_one_byte);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001258 heap->mark_compact_collector()->RecordSlot(data, slot, code);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001259
1260 // Set a number in the 0-255 range to guarantee no smi overflow.
1261 re->SetDataAt(JSRegExp::code_index(is_one_byte),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001262 Smi::FromInt(heap->ms_count() & 0xff));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001263 } else if (code->IsSmi()) {
1264 int value = Smi::cast(code)->value();
1265 // The regexp has not been compiled yet or there was a compilation error.
1266 if (value == JSRegExp::kUninitializedValue ||
1267 value == JSRegExp::kCompilationErrorValue) {
1268 return;
1269 }
1270
1271 // Check if we should flush now.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001272 if (value == ((heap->ms_count() - kRegExpCodeThreshold) & 0xff)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001273 re->SetDataAt(JSRegExp::code_index(is_one_byte),
1274 Smi::FromInt(JSRegExp::kUninitializedValue));
1275 re->SetDataAt(JSRegExp::saved_code_index(is_one_byte),
1276 Smi::FromInt(JSRegExp::kUninitializedValue));
1277 }
1278 }
1279 }
1280
1281
1282 // Works by setting the current sweep_generation (as a smi) in the
1283 // code object place in the data array of the RegExp and keeps a copy
1284 // around that can be reinstated if we reuse the RegExp before flushing.
1285 // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1286 // we flush the code.
1287 static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1288 Heap* heap = map->GetHeap();
1289 MarkCompactCollector* collector = heap->mark_compact_collector();
1290 if (!collector->is_code_flushing_enabled()) {
1291 VisitJSRegExp(map, object);
1292 return;
1293 }
1294 JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1295 // Flush code or set age on both one byte and two byte code.
1296 UpdateRegExpCodeAgeAndFlush(heap, re, true);
1297 UpdateRegExpCodeAgeAndFlush(heap, re, false);
1298 // Visit the fields of the RegExp, including the updated FixedArray.
1299 VisitJSRegExp(map, object);
1300 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001301};
1302
1303
1304void MarkCompactMarkingVisitor::Initialize() {
1305 StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
1306
1307 table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
1308
1309 if (FLAG_track_gc_object_stats) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001310 ObjectStatsVisitor::Initialize(&table_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001311 }
1312}
1313
1314
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001315class CodeMarkingVisitor : public ThreadVisitor {
1316 public:
1317 explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1318 : collector_(collector) {}
1319
1320 void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1321 collector_->PrepareThreadForCodeFlushing(isolate, top);
1322 }
1323
1324 private:
1325 MarkCompactCollector* collector_;
1326};
1327
1328
1329class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1330 public:
1331 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1332 : collector_(collector) {}
1333
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001334 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001335 for (Object** p = start; p < end; p++) VisitPointer(p);
1336 }
1337
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001338 void VisitPointer(Object** slot) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001339 Object* obj = *slot;
1340 if (obj->IsSharedFunctionInfo()) {
1341 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1342 MarkBit shared_mark = Marking::MarkBitFrom(shared);
1343 MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1344 collector_->MarkObject(shared->code(), code_mark);
1345 collector_->MarkObject(shared, shared_mark);
1346 }
1347 }
1348
1349 private:
1350 MarkCompactCollector* collector_;
1351};
1352
1353
1354void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1355 ThreadLocalTop* top) {
1356 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1357 // Note: for the frame that has a pending lazy deoptimization
1358 // StackFrame::unchecked_code will return a non-optimized code object for
1359 // the outermost function and StackFrame::LookupCode will return
1360 // actual optimized code object.
1361 StackFrame* frame = it.frame();
1362 Code* code = frame->unchecked_code();
1363 MarkBit code_mark = Marking::MarkBitFrom(code);
1364 MarkObject(code, code_mark);
1365 if (frame->is_optimized()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001366 Code* optimized_code = frame->LookupCode();
1367 MarkBit optimized_code_mark = Marking::MarkBitFrom(optimized_code);
1368 MarkObject(optimized_code, optimized_code_mark);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001369 }
1370 }
1371}
1372
1373
1374void MarkCompactCollector::PrepareForCodeFlushing() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001375 // If code flushing is disabled, there is no need to prepare for it.
1376 if (!is_code_flushing_enabled()) return;
1377
1378 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1379 // relies on it being marked before any other descriptor array.
1380 HeapObject* descriptor_array = heap()->empty_descriptor_array();
1381 MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1382 MarkObject(descriptor_array, descriptor_array_mark);
1383
1384 // Make sure we are not referencing the code from the stack.
1385 DCHECK(this == heap()->mark_compact_collector());
1386 PrepareThreadForCodeFlushing(heap()->isolate(),
1387 heap()->isolate()->thread_local_top());
1388
1389 // Iterate the archived stacks in all threads to check if
1390 // the code is referenced.
1391 CodeMarkingVisitor code_marking_visitor(this);
1392 heap()->isolate()->thread_manager()->IterateArchivedThreads(
1393 &code_marking_visitor);
1394
1395 SharedFunctionInfoMarkingVisitor visitor(this);
1396 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1397 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1398
1399 ProcessMarkingDeque();
1400}
1401
1402
1403// Visitor class for marking heap roots.
1404class RootMarkingVisitor : public ObjectVisitor {
1405 public:
1406 explicit RootMarkingVisitor(Heap* heap)
1407 : collector_(heap->mark_compact_collector()) {}
1408
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001409 void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001410
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001411 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001412 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1413 }
1414
1415 // Skip the weak next code link in a code object, which is visited in
1416 // ProcessTopOptimizedFrame.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001417 void VisitNextCodeLink(Object** p) override {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001418
1419 private:
1420 void MarkObjectByPointer(Object** p) {
1421 if (!(*p)->IsHeapObject()) return;
1422
1423 // Replace flat cons strings in place.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001424 HeapObject* object = HeapObject::cast(*p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001425 MarkBit mark_bit = Marking::MarkBitFrom(object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001426 if (Marking::IsBlackOrGrey(mark_bit)) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001427
1428 Map* map = object->map();
1429 // Mark the object.
1430 collector_->SetMark(object, mark_bit);
1431
1432 // Mark the map pointer and body, and push them on the marking stack.
1433 MarkBit map_mark = Marking::MarkBitFrom(map);
1434 collector_->MarkObject(map, map_mark);
1435 MarkCompactMarkingVisitor::IterateBody(map, object);
1436
1437 // Mark all the objects reachable from the map and body. May leave
1438 // overflowed objects in the heap.
1439 collector_->EmptyMarkingDeque();
1440 }
1441
1442 MarkCompactCollector* collector_;
1443};
1444
1445
1446// Helper class for pruning the string table.
1447template <bool finalize_external_strings>
1448class StringTableCleaner : public ObjectVisitor {
1449 public:
1450 explicit StringTableCleaner(Heap* heap) : heap_(heap), pointers_removed_(0) {}
1451
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001452 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001453 // Visit all HeapObject pointers in [start, end).
1454 for (Object** p = start; p < end; p++) {
1455 Object* o = *p;
1456 if (o->IsHeapObject() &&
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001457 Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(o)))) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001458 if (finalize_external_strings) {
1459 DCHECK(o->IsExternalString());
1460 heap_->FinalizeExternalString(String::cast(*p));
1461 } else {
1462 pointers_removed_++;
1463 }
1464 // Set the entry to the_hole_value (as deleted).
1465 *p = heap_->the_hole_value();
1466 }
1467 }
1468 }
1469
1470 int PointersRemoved() {
1471 DCHECK(!finalize_external_strings);
1472 return pointers_removed_;
1473 }
1474
1475 private:
1476 Heap* heap_;
1477 int pointers_removed_;
1478};
1479
1480
1481typedef StringTableCleaner<false> InternalizedStringTableCleaner;
1482typedef StringTableCleaner<true> ExternalStringTableCleaner;
1483
1484
1485// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1486// are retained.
1487class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1488 public:
1489 virtual Object* RetainAs(Object* object) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001490 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(object));
1491 DCHECK(!Marking::IsGrey(mark_bit));
1492 if (Marking::IsBlack(mark_bit)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001493 return object;
1494 } else if (object->IsAllocationSite() &&
1495 !(AllocationSite::cast(object)->IsZombie())) {
1496 // "dead" AllocationSites need to live long enough for a traversal of new
1497 // space. These sites get a one-time reprieve.
1498 AllocationSite* site = AllocationSite::cast(object);
1499 site->MarkZombie();
1500 site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
1501 return object;
1502 } else {
1503 return NULL;
1504 }
1505 }
1506};
1507
1508
1509// Fill the marking stack with overflowed objects returned by the given
1510// iterator. Stop when the marking stack is filled or the end of the space
1511// is reached, whichever comes first.
1512template <class T>
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001513void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001514 // The caller should ensure that the marking stack is initially not full,
1515 // so that we don't waste effort pointlessly scanning for objects.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001516 DCHECK(!marking_deque()->IsFull());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001517
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001518 Map* filler_map = heap()->one_pointer_filler_map();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001519 for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
1520 MarkBit markbit = Marking::MarkBitFrom(object);
1521 if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1522 Marking::GreyToBlack(markbit);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001523 PushBlack(object);
1524 if (marking_deque()->IsFull()) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001525 }
1526 }
1527}
1528
1529
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001530void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
1531 DCHECK(!marking_deque()->IsFull());
1532 LiveObjectIterator<kGreyObjects> it(p);
1533 HeapObject* object = NULL;
1534 while ((object = it.Next()) != NULL) {
1535 MarkBit markbit = Marking::MarkBitFrom(object);
1536 DCHECK(Marking::IsGrey(markbit));
1537 Marking::GreyToBlack(markbit);
1538 PushBlack(object);
1539 if (marking_deque()->IsFull()) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001540 }
1541}
1542
1543
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001544class MarkCompactCollector::HeapObjectVisitor {
1545 public:
1546 virtual ~HeapObjectVisitor() {}
1547 virtual bool Visit(HeapObject* object) = 0;
1548};
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001549
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001550
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001551class MarkCompactCollector::EvacuateVisitorBase
1552 : public MarkCompactCollector::HeapObjectVisitor {
1553 public:
1554 EvacuateVisitorBase(Heap* heap, SlotsBuffer** evacuation_slots_buffer)
1555 : heap_(heap), evacuation_slots_buffer_(evacuation_slots_buffer) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001556
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001557 bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
1558 HeapObject** target_object) {
1559 int size = object->Size();
1560 AllocationAlignment alignment = object->RequiredAlignment();
1561 AllocationResult allocation = target_space->AllocateRaw(size, alignment);
1562 if (allocation.To(target_object)) {
1563 heap_->mark_compact_collector()->MigrateObject(
1564 *target_object, object, size, target_space->identity(),
1565 evacuation_slots_buffer_);
1566 return true;
1567 }
1568 return false;
1569 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001570
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001571 protected:
1572 Heap* heap_;
1573 SlotsBuffer** evacuation_slots_buffer_;
1574};
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001575
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001576
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001577class MarkCompactCollector::EvacuateNewSpaceVisitor final
1578 : public MarkCompactCollector::EvacuateVisitorBase {
1579 public:
1580 static const intptr_t kLabSize = 4 * KB;
1581 static const intptr_t kMaxLabObjectSize = 256;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001582
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001583 explicit EvacuateNewSpaceVisitor(Heap* heap,
1584 SlotsBuffer** evacuation_slots_buffer,
1585 HashMap* local_pretenuring_feedback)
1586 : EvacuateVisitorBase(heap, evacuation_slots_buffer),
1587 buffer_(LocalAllocationBuffer::InvalidBuffer()),
1588 space_to_allocate_(NEW_SPACE),
1589 promoted_size_(0),
1590 semispace_copied_size_(0),
1591 local_pretenuring_feedback_(local_pretenuring_feedback) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001592
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001593 bool Visit(HeapObject* object) override {
1594 heap_->UpdateAllocationSite(object, local_pretenuring_feedback_);
1595 int size = object->Size();
1596 HeapObject* target_object = nullptr;
1597 if (heap_->ShouldBePromoted(object->address(), size) &&
1598 TryEvacuateObject(heap_->old_space(), object, &target_object)) {
1599 // If we end up needing more special cases, we should factor this out.
1600 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
1601 heap_->array_buffer_tracker()->Promote(
1602 JSArrayBuffer::cast(target_object));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001603 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001604 promoted_size_ += size;
1605 return true;
1606 }
1607 HeapObject* target = nullptr;
1608 AllocationSpace space = AllocateTargetObject(object, &target);
1609 heap_->mark_compact_collector()->MigrateObject(
1610 HeapObject::cast(target), object, size, space,
1611 (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_);
1612 if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
1613 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
1614 }
1615 semispace_copied_size_ += size;
1616 return true;
1617 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001618
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001619 intptr_t promoted_size() { return promoted_size_; }
1620 intptr_t semispace_copied_size() { return semispace_copied_size_; }
1621
1622 private:
1623 enum NewSpaceAllocationMode {
1624 kNonstickyBailoutOldSpace,
1625 kStickyBailoutOldSpace,
1626 };
1627
1628 inline AllocationSpace AllocateTargetObject(HeapObject* old_object,
1629 HeapObject** target_object) {
1630 const int size = old_object->Size();
1631 AllocationAlignment alignment = old_object->RequiredAlignment();
1632 AllocationResult allocation;
1633 if (space_to_allocate_ == NEW_SPACE) {
1634 if (size > kMaxLabObjectSize) {
1635 allocation =
1636 AllocateInNewSpace(size, alignment, kNonstickyBailoutOldSpace);
1637 } else {
1638 allocation = AllocateInLab(size, alignment);
1639 }
1640 }
1641 if (allocation.IsRetry() || (space_to_allocate_ == OLD_SPACE)) {
1642 allocation = AllocateInOldSpace(size, alignment);
1643 }
1644 bool ok = allocation.To(target_object);
1645 DCHECK(ok);
1646 USE(ok);
1647 return space_to_allocate_;
1648 }
1649
1650 inline bool NewLocalAllocationBuffer() {
1651 AllocationResult result =
1652 AllocateInNewSpace(kLabSize, kWordAligned, kStickyBailoutOldSpace);
1653 LocalAllocationBuffer saved_old_buffer = buffer_;
1654 buffer_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize);
1655 if (buffer_.IsValid()) {
1656 buffer_.TryMerge(&saved_old_buffer);
1657 return true;
1658 }
1659 return false;
1660 }
1661
1662 inline AllocationResult AllocateInNewSpace(int size_in_bytes,
1663 AllocationAlignment alignment,
1664 NewSpaceAllocationMode mode) {
1665 AllocationResult allocation =
1666 heap_->new_space()->AllocateRawSynchronized(size_in_bytes, alignment);
1667 if (allocation.IsRetry()) {
1668 if (!heap_->new_space()->AddFreshPageSynchronized()) {
1669 if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
1670 } else {
1671 allocation = heap_->new_space()->AllocateRawSynchronized(size_in_bytes,
1672 alignment);
1673 if (allocation.IsRetry()) {
1674 if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001675 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001676 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001677 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001678 return allocation;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001679 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001680
1681 inline AllocationResult AllocateInOldSpace(int size_in_bytes,
1682 AllocationAlignment alignment) {
1683 AllocationResult allocation =
1684 heap_->old_space()->AllocateRaw(size_in_bytes, alignment);
1685 if (allocation.IsRetry()) {
1686 FatalProcessOutOfMemory(
1687 "MarkCompactCollector: semi-space copy, fallback in old gen\n");
1688 }
1689 return allocation;
1690 }
1691
1692 inline AllocationResult AllocateInLab(int size_in_bytes,
1693 AllocationAlignment alignment) {
1694 AllocationResult allocation;
1695 if (!buffer_.IsValid()) {
1696 if (!NewLocalAllocationBuffer()) {
1697 space_to_allocate_ = OLD_SPACE;
1698 return AllocationResult::Retry(OLD_SPACE);
1699 }
1700 }
1701 allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
1702 if (allocation.IsRetry()) {
1703 if (!NewLocalAllocationBuffer()) {
1704 space_to_allocate_ = OLD_SPACE;
1705 return AllocationResult::Retry(OLD_SPACE);
1706 } else {
1707 allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
1708 if (allocation.IsRetry()) {
1709 space_to_allocate_ = OLD_SPACE;
1710 return AllocationResult::Retry(OLD_SPACE);
1711 }
1712 }
1713 }
1714 return allocation;
1715 }
1716
1717 LocalAllocationBuffer buffer_;
1718 AllocationSpace space_to_allocate_;
1719 intptr_t promoted_size_;
1720 intptr_t semispace_copied_size_;
1721 HashMap* local_pretenuring_feedback_;
1722};
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001723
1724
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001725class MarkCompactCollector::EvacuateOldSpaceVisitor final
1726 : public MarkCompactCollector::EvacuateVisitorBase {
1727 public:
1728 EvacuateOldSpaceVisitor(Heap* heap,
1729 CompactionSpaceCollection* compaction_spaces,
1730 SlotsBuffer** evacuation_slots_buffer)
1731 : EvacuateVisitorBase(heap, evacuation_slots_buffer),
1732 compaction_spaces_(compaction_spaces) {}
1733
1734 bool Visit(HeapObject* object) override {
1735 CompactionSpace* target_space = compaction_spaces_->Get(
1736 Page::FromAddress(object->address())->owner()->identity());
1737 HeapObject* target_object = nullptr;
1738 if (TryEvacuateObject(target_space, object, &target_object)) {
1739 DCHECK(object->map_word().IsForwardingAddress());
1740 return true;
1741 }
1742 return false;
1743 }
1744
1745 private:
1746 CompactionSpaceCollection* compaction_spaces_;
1747};
1748
1749
1750void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001751 PageIterator it(space);
1752 while (it.has_next()) {
1753 Page* p = it.next();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001754 DiscoverGreyObjectsOnPage(p);
1755 if (marking_deque()->IsFull()) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001756 }
1757}
1758
1759
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001760void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
1761 NewSpace* space = heap()->new_space();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001762 NewSpacePageIterator it(space->bottom(), space->top());
1763 while (it.has_next()) {
1764 NewSpacePage* page = it.next();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001765 DiscoverGreyObjectsOnPage(page);
1766 if (marking_deque()->IsFull()) return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001767 }
1768}
1769
1770
1771bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
1772 Object* o = *p;
1773 if (!o->IsHeapObject()) return false;
1774 HeapObject* heap_object = HeapObject::cast(o);
1775 MarkBit mark = Marking::MarkBitFrom(heap_object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001776 return Marking::IsWhite(mark);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001777}
1778
1779
1780bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
1781 Object** p) {
1782 Object* o = *p;
1783 DCHECK(o->IsHeapObject());
1784 HeapObject* heap_object = HeapObject::cast(o);
1785 MarkBit mark = Marking::MarkBitFrom(heap_object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001786 return Marking::IsWhite(mark);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001787}
1788
1789
1790void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
1791 StringTable* string_table = heap()->string_table();
1792 // Mark the string table itself.
1793 MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001794 if (Marking::IsWhite(string_table_mark)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001795 // String table could have already been marked by visiting the handles list.
1796 SetMark(string_table, string_table_mark);
1797 }
1798 // Explicitly mark the prefix.
1799 string_table->IteratePrefix(visitor);
1800 ProcessMarkingDeque();
1801}
1802
1803
1804void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
1805 MarkBit mark_bit = Marking::MarkBitFrom(site);
1806 SetMark(site, mark_bit);
1807}
1808
1809
1810void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
1811 // Mark the heap roots including global variables, stack variables,
1812 // etc., and all objects reachable from them.
1813 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
1814
1815 // Handle the string table specially.
1816 MarkStringTable(visitor);
1817
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001818 // There may be overflowed objects in the heap. Visit them now.
1819 while (marking_deque_.overflowed()) {
1820 RefillMarkingDeque();
1821 EmptyMarkingDeque();
1822 }
1823}
1824
1825
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001826void MarkCompactCollector::MarkImplicitRefGroups(
1827 MarkObjectFunction mark_object) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001828 List<ImplicitRefGroup*>* ref_groups =
1829 isolate()->global_handles()->implicit_ref_groups();
1830
1831 int last = 0;
1832 for (int i = 0; i < ref_groups->length(); i++) {
1833 ImplicitRefGroup* entry = ref_groups->at(i);
1834 DCHECK(entry != NULL);
1835
1836 if (!IsMarked(*entry->parent)) {
1837 (*ref_groups)[last++] = entry;
1838 continue;
1839 }
1840
1841 Object*** children = entry->children;
1842 // A parent object is marked, so mark all child heap objects.
1843 for (size_t j = 0; j < entry->length; ++j) {
1844 if ((*children[j])->IsHeapObject()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001845 mark_object(heap(), HeapObject::cast(*children[j]));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001846 }
1847 }
1848
1849 // Once the entire group has been marked, dispose it because it's
1850 // not needed anymore.
1851 delete entry;
1852 }
1853 ref_groups->Rewind(last);
1854}
1855
1856
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001857// Mark all objects reachable from the objects on the marking stack.
1858// Before: the marking stack contains zero or more heap object pointers.
1859// After: the marking stack is empty, and all objects reachable from the
1860// marking stack have been marked, or are overflowed in the heap.
1861void MarkCompactCollector::EmptyMarkingDeque() {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001862 Map* filler_map = heap_->one_pointer_filler_map();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001863 while (!marking_deque_.IsEmpty()) {
1864 HeapObject* object = marking_deque_.Pop();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001865 // Explicitly skip one word fillers. Incremental markbit patterns are
1866 // correct only for objects that occupy at least two words.
1867 Map* map = object->map();
1868 if (map == filler_map) continue;
1869
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001870 DCHECK(object->IsHeapObject());
1871 DCHECK(heap()->Contains(object));
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001872 DCHECK(!Marking::IsWhite(Marking::MarkBitFrom(object)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001873
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001874 MarkBit map_mark = Marking::MarkBitFrom(map);
1875 MarkObject(map, map_mark);
1876
1877 MarkCompactMarkingVisitor::IterateBody(map, object);
1878 }
1879}
1880
1881
1882// Sweep the heap for overflowed objects, clear their overflow bits, and
1883// push them on the marking stack. Stop early if the marking stack fills
1884// before sweeping completes. If sweeping completes, there are no remaining
1885// overflowed objects in the heap so the overflow flag on the markings stack
1886// is cleared.
1887void MarkCompactCollector::RefillMarkingDeque() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001888 isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001889 DCHECK(marking_deque_.overflowed());
1890
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001891 DiscoverGreyObjectsInNewSpace();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001892 if (marking_deque_.IsFull()) return;
1893
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001894 DiscoverGreyObjectsInSpace(heap()->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001895 if (marking_deque_.IsFull()) return;
1896
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001897 DiscoverGreyObjectsInSpace(heap()->code_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001898 if (marking_deque_.IsFull()) return;
1899
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001900 DiscoverGreyObjectsInSpace(heap()->map_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001901 if (marking_deque_.IsFull()) return;
1902
1903 LargeObjectIterator lo_it(heap()->lo_space());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001904 DiscoverGreyObjectsWithIterator(&lo_it);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001905 if (marking_deque_.IsFull()) return;
1906
1907 marking_deque_.ClearOverflowed();
1908}
1909
1910
1911// Mark all objects reachable (transitively) from objects on the marking
1912// stack. Before: the marking stack contains zero or more heap object
1913// pointers. After: the marking stack is empty and there are no overflowed
1914// objects in the heap.
1915void MarkCompactCollector::ProcessMarkingDeque() {
1916 EmptyMarkingDeque();
1917 while (marking_deque_.overflowed()) {
1918 RefillMarkingDeque();
1919 EmptyMarkingDeque();
1920 }
1921}
1922
1923
1924// Mark all objects reachable (transitively) from objects on the marking
1925// stack including references only considered in the atomic marking pause.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001926void MarkCompactCollector::ProcessEphemeralMarking(
1927 ObjectVisitor* visitor, bool only_process_harmony_weak_collections) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001928 bool work_to_do = true;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001929 DCHECK(marking_deque_.IsEmpty() && !marking_deque_.overflowed());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001930 while (work_to_do) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001931 if (!only_process_harmony_weak_collections) {
1932 isolate()->global_handles()->IterateObjectGroups(
1933 visitor, &IsUnmarkedHeapObjectWithHeap);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001934 MarkImplicitRefGroups(&MarkCompactMarkingVisitor::MarkObject);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001935 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001936 ProcessWeakCollections();
1937 work_to_do = !marking_deque_.IsEmpty();
1938 ProcessMarkingDeque();
1939 }
1940}
1941
1942
1943void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
1944 for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
1945 !it.done(); it.Advance()) {
1946 if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
1947 return;
1948 }
1949 if (it.frame()->type() == StackFrame::OPTIMIZED) {
1950 Code* code = it.frame()->LookupCode();
1951 if (!code->CanDeoptAt(it.frame()->pc())) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001952 Code::BodyDescriptor::IterateBody(code, visitor);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001953 }
1954 ProcessMarkingDeque();
1955 return;
1956 }
1957 }
1958}
1959
1960
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001961void MarkCompactCollector::EnsureMarkingDequeIsReserved() {
1962 DCHECK(!marking_deque_.in_use());
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001963 if (marking_deque_memory_ == NULL) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001964 marking_deque_memory_ = new base::VirtualMemory(kMaxMarkingDequeSize);
1965 marking_deque_memory_committed_ = 0;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001966 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001967 if (marking_deque_memory_ == NULL) {
1968 V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsReserved");
1969 }
1970}
1971
1972
1973void MarkCompactCollector::EnsureMarkingDequeIsCommitted(size_t max_size) {
1974 // If the marking deque is too small, we try to allocate a bigger one.
1975 // If that fails, make do with a smaller one.
1976 CHECK(!marking_deque_.in_use());
1977 for (size_t size = max_size; size >= kMinMarkingDequeSize; size >>= 1) {
1978 base::VirtualMemory* memory = marking_deque_memory_;
1979 size_t currently_committed = marking_deque_memory_committed_;
1980
1981 if (currently_committed == size) return;
1982
1983 if (currently_committed > size) {
1984 bool success = marking_deque_memory_->Uncommit(
1985 reinterpret_cast<Address>(marking_deque_memory_->address()) + size,
1986 currently_committed - size);
1987 if (success) {
1988 marking_deque_memory_committed_ = size;
1989 return;
1990 }
1991 UNREACHABLE();
1992 }
1993
1994 bool success = memory->Commit(
1995 reinterpret_cast<Address>(memory->address()) + currently_committed,
1996 size - currently_committed,
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001997 false); // Not executable.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001998 if (success) {
1999 marking_deque_memory_committed_ = size;
2000 return;
2001 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002002 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002003 V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsCommitted");
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002004}
2005
2006
2007void MarkCompactCollector::InitializeMarkingDeque() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002008 DCHECK(!marking_deque_.in_use());
2009 DCHECK(marking_deque_memory_committed_ > 0);
2010 Address addr = static_cast<Address>(marking_deque_memory_->address());
2011 size_t size = marking_deque_memory_committed_;
2012 if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
2013 marking_deque_.Initialize(addr, addr + size);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002014}
2015
2016
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002017void MarkingDeque::Initialize(Address low, Address high) {
2018 DCHECK(!in_use_);
2019 HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
2020 HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
2021 array_ = obj_low;
2022 mask_ = base::bits::RoundDownToPowerOfTwo32(
2023 static_cast<uint32_t>(obj_high - obj_low)) -
2024 1;
2025 top_ = bottom_ = 0;
2026 overflowed_ = false;
2027 in_use_ = true;
2028}
2029
2030
2031void MarkingDeque::Uninitialize(bool aborting) {
2032 if (!aborting) {
2033 DCHECK(IsEmpty());
2034 DCHECK(!overflowed_);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002035 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002036 DCHECK(in_use_);
2037 top_ = bottom_ = 0xdecbad;
2038 in_use_ = false;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002039}
2040
2041
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002042void MarkCompactCollector::MarkLiveObjects() {
2043 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK);
2044 double start_time = 0.0;
2045 if (FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002046 start_time = heap_->MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002047 }
2048 // The recursive GC marker detects when it is nearing stack overflow,
2049 // and switches to a different marking system. JS interrupts interfere
2050 // with the C stack limit check.
2051 PostponeInterruptsScope postpone(isolate());
2052
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002053 {
2054 GCTracer::Scope gc_scope(heap()->tracer(),
2055 GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
2056 IncrementalMarking* incremental_marking = heap_->incremental_marking();
2057 if (was_marked_incrementally_) {
2058 incremental_marking->Finalize();
2059 } else {
2060 // Abort any pending incremental activities e.g. incremental sweeping.
2061 incremental_marking->Stop();
2062 if (marking_deque_.in_use()) {
2063 marking_deque_.Uninitialize(true);
2064 }
2065 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002066 }
2067
2068#ifdef DEBUG
2069 DCHECK(state_ == PREPARE_GC);
2070 state_ = MARK_LIVE_OBJECTS;
2071#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002072
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002073 EnsureMarkingDequeIsCommittedAndInitialize(
2074 MarkCompactCollector::kMaxMarkingDequeSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002075
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002076 {
2077 GCTracer::Scope gc_scope(heap()->tracer(),
2078 GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
2079 PrepareForCodeFlushing();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002080 }
2081
2082 RootMarkingVisitor root_visitor(heap());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002083
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002084 {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002085 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
2086 MarkRoots(&root_visitor);
2087 ProcessTopOptimizedFrame(&root_visitor);
2088 }
2089
2090 {
2091 GCTracer::Scope gc_scope(heap()->tracer(),
2092 GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002093
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002094 // The objects reachable from the roots are marked, yet unreachable
2095 // objects are unmarked. Mark objects reachable due to host
2096 // application specific logic or through Harmony weak maps.
2097 ProcessEphemeralMarking(&root_visitor, false);
2098
2099 // The objects reachable from the roots, weak maps or object groups
2100 // are marked. Objects pointed to only by weak global handles cannot be
2101 // immediately reclaimed. Instead, we have to mark them as pending and mark
2102 // objects reachable from them.
2103 //
2104 // First we identify nonlive weak handles and mark them as pending
2105 // destruction.
2106 heap()->isolate()->global_handles()->IdentifyWeakHandles(
2107 &IsUnmarkedHeapObject);
2108 // Then we mark the objects.
2109 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2110 ProcessMarkingDeque();
2111
2112 // Repeat Harmony weak maps marking to mark unmarked objects reachable from
2113 // the weak roots we just marked as pending destruction.
2114 //
2115 // We only process harmony collections, as all object groups have been fully
2116 // processed and no weakly reachable node can discover new objects groups.
2117 ProcessEphemeralMarking(&root_visitor, true);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002118 }
2119
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002120 if (FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002121 heap_->tracer()->AddMarkingTime(heap_->MonotonicallyIncreasingTimeInMs() -
2122 start_time);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002123 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002124 if (FLAG_track_gc_object_stats) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002125 if (FLAG_trace_gc_object_stats) {
2126 heap()->object_stats_->TraceObjectStats();
2127 }
2128 heap()->object_stats_->CheckpointObjectStats();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002129 }
2130}
2131
2132
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002133void MarkCompactCollector::ClearNonLiveReferences() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002134 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002135
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002136 {
2137 GCTracer::Scope gc_scope(heap()->tracer(),
2138 GCTracer::Scope::MC_CLEAR_STRING_TABLE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002139
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002140 // Prune the string table removing all strings only pointed to by the
2141 // string table. Cannot use string_table() here because the string
2142 // table is marked.
2143 StringTable* string_table = heap()->string_table();
2144 InternalizedStringTableCleaner internalized_visitor(heap());
2145 string_table->IterateElements(&internalized_visitor);
2146 string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002147
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002148 ExternalStringTableCleaner external_visitor(heap());
2149 heap()->external_string_table_.Iterate(&external_visitor);
2150 heap()->external_string_table_.CleanUp();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002151 }
2152
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002153 {
2154 GCTracer::Scope gc_scope(heap()->tracer(),
2155 GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
2156 // Process the weak references.
2157 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2158 heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002159 }
2160
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002161 {
2162 GCTracer::Scope gc_scope(heap()->tracer(),
2163 GCTracer::Scope::MC_CLEAR_GLOBAL_HANDLES);
2164
2165 // Remove object groups after marking phase.
2166 heap()->isolate()->global_handles()->RemoveObjectGroups();
2167 heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002168 }
2169
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002170 // Flush code from collected candidates.
2171 if (is_code_flushing_enabled()) {
2172 GCTracer::Scope gc_scope(heap()->tracer(),
2173 GCTracer::Scope::MC_CLEAR_CODE_FLUSH);
2174 code_flusher_->ProcessCandidates();
2175 }
2176
2177
2178 DependentCode* dependent_code_list;
2179 Object* non_live_map_list;
2180 ClearWeakCells(&non_live_map_list, &dependent_code_list);
2181
2182 {
2183 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
2184 ClearSimpleMapTransitions(non_live_map_list);
2185 ClearFullMapTransitions();
2186 }
2187
2188 MarkDependentCodeForDeoptimization(dependent_code_list);
2189
2190 ClearWeakCollections();
2191
2192 ClearInvalidStoreAndSlotsBufferEntries();
2193}
2194
2195
2196void MarkCompactCollector::MarkDependentCodeForDeoptimization(
2197 DependentCode* list_head) {
2198 GCTracer::Scope gc_scope(heap()->tracer(),
2199 GCTracer::Scope::MC_CLEAR_DEPENDENT_CODE);
2200 Isolate* isolate = this->isolate();
2201 DependentCode* current = list_head;
2202 while (current->length() > 0) {
2203 have_code_to_deoptimize_ |= current->MarkCodeForDeoptimization(
2204 isolate, DependentCode::kWeakCodeGroup);
2205 current = current->next_link();
2206 }
2207
2208 WeakHashTable* table = heap_->weak_object_to_code_table();
2209 uint32_t capacity = table->Capacity();
2210 for (uint32_t i = 0; i < capacity; i++) {
2211 uint32_t key_index = table->EntryToIndex(i);
2212 Object* key = table->get(key_index);
2213 if (!table->IsKey(key)) continue;
2214 uint32_t value_index = table->EntryToValueIndex(i);
2215 Object* value = table->get(value_index);
2216 DCHECK(key->IsWeakCell());
2217 if (WeakCell::cast(key)->cleared()) {
2218 have_code_to_deoptimize_ |=
2219 DependentCode::cast(value)->MarkCodeForDeoptimization(
2220 isolate, DependentCode::kWeakCodeGroup);
2221 table->set(key_index, heap_->the_hole_value());
2222 table->set(value_index, heap_->the_hole_value());
2223 table->ElementRemoved();
2224 }
2225 }
2226}
2227
2228
2229void MarkCompactCollector::ClearSimpleMapTransitions(
2230 Object* non_live_map_list) {
2231 Object* the_hole_value = heap()->the_hole_value();
2232 Object* weak_cell_obj = non_live_map_list;
2233 while (weak_cell_obj != Smi::FromInt(0)) {
2234 WeakCell* weak_cell = WeakCell::cast(weak_cell_obj);
2235 Map* map = Map::cast(weak_cell->value());
2236 DCHECK(Marking::IsWhite(Marking::MarkBitFrom(map)));
2237 Object* potential_parent = map->constructor_or_backpointer();
2238 if (potential_parent->IsMap()) {
2239 Map* parent = Map::cast(potential_parent);
2240 if (Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent)) &&
2241 parent->raw_transitions() == weak_cell) {
2242 ClearSimpleMapTransition(parent, map);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002243 }
2244 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002245 weak_cell->clear();
2246 weak_cell_obj = weak_cell->next();
2247 weak_cell->clear_next(the_hole_value);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002248 }
2249}
2250
2251
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002252void MarkCompactCollector::ClearSimpleMapTransition(Map* map,
2253 Map* dead_transition) {
2254 // A previously existing simple transition (stored in a WeakCell) is going
2255 // to be cleared. Clear the useless cell pointer, and take ownership
2256 // of the descriptor array.
2257 map->set_raw_transitions(Smi::FromInt(0));
2258 int number_of_own_descriptors = map->NumberOfOwnDescriptors();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002259 DescriptorArray* descriptors = map->instance_descriptors();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002260 if (descriptors == dead_transition->instance_descriptors() &&
2261 number_of_own_descriptors > 0) {
2262 TrimDescriptorArray(map, descriptors);
2263 DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2264 map->set_owns_descriptors(true);
2265 }
2266}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002267
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002268
2269void MarkCompactCollector::ClearFullMapTransitions() {
2270 HeapObject* undefined = heap()->undefined_value();
2271 Object* obj = heap()->encountered_transition_arrays();
2272 while (obj != Smi::FromInt(0)) {
2273 TransitionArray* array = TransitionArray::cast(obj);
2274 int num_transitions = array->number_of_entries();
2275 DCHECK_EQ(TransitionArray::NumberOfTransitions(array), num_transitions);
2276 if (num_transitions > 0) {
2277 Map* map = array->GetTarget(0);
2278 Map* parent = Map::cast(map->constructor_or_backpointer());
2279 bool parent_is_alive =
2280 Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent));
2281 DescriptorArray* descriptors =
2282 parent_is_alive ? parent->instance_descriptors() : nullptr;
2283 bool descriptors_owner_died =
2284 CompactTransitionArray(parent, array, descriptors);
2285 if (descriptors_owner_died) {
2286 TrimDescriptorArray(parent, descriptors);
2287 }
2288 }
2289 obj = array->next_link();
2290 array->set_next_link(undefined, SKIP_WRITE_BARRIER);
2291 }
2292 heap()->set_encountered_transition_arrays(Smi::FromInt(0));
2293}
2294
2295
2296bool MarkCompactCollector::CompactTransitionArray(
2297 Map* map, TransitionArray* transitions, DescriptorArray* descriptors) {
2298 int num_transitions = transitions->number_of_entries();
2299 bool descriptors_owner_died = false;
2300 int transition_index = 0;
2301 // Compact all live transitions to the left.
2302 for (int i = 0; i < num_transitions; ++i) {
2303 Map* target = transitions->GetTarget(i);
2304 DCHECK_EQ(target->constructor_or_backpointer(), map);
2305 if (Marking::IsWhite(Marking::MarkBitFrom(target))) {
2306 if (descriptors != nullptr &&
2307 target->instance_descriptors() == descriptors) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002308 descriptors_owner_died = true;
2309 }
2310 } else {
2311 if (i != transition_index) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002312 Name* key = transitions->GetKey(i);
2313 transitions->SetKey(transition_index, key);
2314 Object** key_slot = transitions->GetKeySlot(transition_index);
2315 RecordSlot(transitions, key_slot, key);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002316 // Target slots do not need to be recorded since maps are not compacted.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002317 transitions->SetTarget(transition_index, transitions->GetTarget(i));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002318 }
2319 transition_index++;
2320 }
2321 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002322 // If there are no transitions to be cleared, return.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002323 if (transition_index == num_transitions) {
2324 DCHECK(!descriptors_owner_died);
2325 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002326 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002327 // Note that we never eliminate a transition array, though we might right-trim
2328 // such that number_of_transitions() == 0. If this assumption changes,
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002329 // TransitionArray::Insert() will need to deal with the case that a transition
2330 // array disappeared during GC.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002331 int trim = TransitionArray::Capacity(transitions) - transition_index;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002332 if (trim > 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002333 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
2334 transitions, trim * TransitionArray::kTransitionSize);
2335 transitions->SetNumberOfTransitions(transition_index);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002336 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002337 return descriptors_owner_died;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002338}
2339
2340
2341void MarkCompactCollector::TrimDescriptorArray(Map* map,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002342 DescriptorArray* descriptors) {
2343 int number_of_own_descriptors = map->NumberOfOwnDescriptors();
2344 if (number_of_own_descriptors == 0) {
2345 DCHECK(descriptors == heap_->empty_descriptor_array());
2346 return;
2347 }
2348
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002349 int number_of_descriptors = descriptors->number_of_descriptors_storage();
2350 int to_trim = number_of_descriptors - number_of_own_descriptors;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002351 if (to_trim > 0) {
2352 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
2353 descriptors, to_trim * DescriptorArray::kDescriptorSize);
2354 descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002355
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002356 if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
2357 descriptors->Sort();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002358
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002359 if (FLAG_unbox_double_fields) {
2360 LayoutDescriptor* layout_descriptor = map->layout_descriptor();
2361 layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
2362 number_of_own_descriptors);
2363 SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
2364 }
2365 }
2366 DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2367 map->set_owns_descriptors(true);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002368}
2369
2370
2371void MarkCompactCollector::TrimEnumCache(Map* map,
2372 DescriptorArray* descriptors) {
2373 int live_enum = map->EnumLength();
2374 if (live_enum == kInvalidEnumCacheSentinel) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002375 live_enum =
2376 map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002377 }
2378 if (live_enum == 0) return descriptors->ClearEnumCache();
2379
2380 FixedArray* enum_cache = descriptors->GetEnumCache();
2381
2382 int to_trim = enum_cache->length() - live_enum;
2383 if (to_trim <= 0) return;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002384 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
2385 descriptors->GetEnumCache(), to_trim);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002386
2387 if (!descriptors->HasEnumIndicesCache()) return;
2388 FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002389 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(enum_indices_cache,
2390 to_trim);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002391}
2392
2393
2394void MarkCompactCollector::ProcessWeakCollections() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002395 Object* weak_collection_obj = heap()->encountered_weak_collections();
2396 while (weak_collection_obj != Smi::FromInt(0)) {
2397 JSWeakCollection* weak_collection =
2398 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2399 DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2400 if (weak_collection->table()->IsHashTable()) {
2401 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002402 for (int i = 0; i < table->Capacity(); i++) {
2403 if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2404 Object** key_slot =
2405 table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002406 RecordSlot(table, key_slot, *key_slot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002407 Object** value_slot =
2408 table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002409 MarkCompactMarkingVisitor::MarkObjectByPointer(this, table,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002410 value_slot);
2411 }
2412 }
2413 }
2414 weak_collection_obj = weak_collection->next();
2415 }
2416}
2417
2418
2419void MarkCompactCollector::ClearWeakCollections() {
2420 GCTracer::Scope gc_scope(heap()->tracer(),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002421 GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002422 Object* weak_collection_obj = heap()->encountered_weak_collections();
2423 while (weak_collection_obj != Smi::FromInt(0)) {
2424 JSWeakCollection* weak_collection =
2425 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2426 DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2427 if (weak_collection->table()->IsHashTable()) {
2428 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2429 for (int i = 0; i < table->Capacity(); i++) {
2430 HeapObject* key = HeapObject::cast(table->KeyAt(i));
2431 if (!MarkCompactCollector::IsMarked(key)) {
2432 table->RemoveEntry(i);
2433 }
2434 }
2435 }
2436 weak_collection_obj = weak_collection->next();
2437 weak_collection->set_next(heap()->undefined_value());
2438 }
2439 heap()->set_encountered_weak_collections(Smi::FromInt(0));
2440}
2441
2442
2443void MarkCompactCollector::AbortWeakCollections() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002444 Object* weak_collection_obj = heap()->encountered_weak_collections();
2445 while (weak_collection_obj != Smi::FromInt(0)) {
2446 JSWeakCollection* weak_collection =
2447 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2448 weak_collection_obj = weak_collection->next();
2449 weak_collection->set_next(heap()->undefined_value());
2450 }
2451 heap()->set_encountered_weak_collections(Smi::FromInt(0));
2452}
2453
2454
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002455void MarkCompactCollector::ClearWeakCells(Object** non_live_map_list,
2456 DependentCode** dependent_code_list) {
2457 Heap* heap = this->heap();
2458 GCTracer::Scope gc_scope(heap->tracer(),
2459 GCTracer::Scope::MC_CLEAR_WEAK_CELLS);
2460 Object* weak_cell_obj = heap->encountered_weak_cells();
2461 Object* the_hole_value = heap->the_hole_value();
2462 DependentCode* dependent_code_head =
2463 DependentCode::cast(heap->empty_fixed_array());
2464 Object* non_live_map_head = Smi::FromInt(0);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002465 while (weak_cell_obj != Smi::FromInt(0)) {
2466 WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002467 Object* next_weak_cell = weak_cell->next();
2468 bool clear_value = true;
2469 bool clear_next = true;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002470 // We do not insert cleared weak cells into the list, so the value
2471 // cannot be a Smi here.
2472 HeapObject* value = HeapObject::cast(weak_cell->value());
2473 if (!MarkCompactCollector::IsMarked(value)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002474 // Cells for new-space objects embedded in optimized code are wrapped in
2475 // WeakCell and put into Heap::weak_object_to_code_table.
2476 // Such cells do not have any strong references but we want to keep them
2477 // alive as long as the cell value is alive.
2478 // TODO(ulan): remove this once we remove Heap::weak_object_to_code_table.
2479 if (value->IsCell()) {
2480 Object* cell_value = Cell::cast(value)->value();
2481 if (cell_value->IsHeapObject() &&
2482 MarkCompactCollector::IsMarked(HeapObject::cast(cell_value))) {
2483 // Resurrect the cell.
2484 MarkBit mark = Marking::MarkBitFrom(value);
2485 SetMark(value, mark);
2486 Object** slot = HeapObject::RawField(value, Cell::kValueOffset);
2487 RecordSlot(value, slot, *slot);
2488 slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
2489 RecordSlot(weak_cell, slot, *slot);
2490 clear_value = false;
2491 }
2492 }
2493 if (value->IsMap()) {
2494 // The map is non-live.
2495 Map* map = Map::cast(value);
2496 // Add dependent code to the dependent_code_list.
2497 DependentCode* candidate = map->dependent_code();
2498 // We rely on the fact that the weak code group comes first.
2499 STATIC_ASSERT(DependentCode::kWeakCodeGroup == 0);
2500 if (candidate->length() > 0 &&
2501 candidate->group() == DependentCode::kWeakCodeGroup) {
2502 candidate->set_next_link(dependent_code_head);
2503 dependent_code_head = candidate;
2504 }
2505 // Add the weak cell to the non_live_map list.
2506 weak_cell->set_next(non_live_map_head);
2507 non_live_map_head = weak_cell;
2508 clear_value = false;
2509 clear_next = false;
2510 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002511 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002512 // The value of the weak cell is alive.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002513 Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002514 RecordSlot(weak_cell, slot, *slot);
2515 clear_value = false;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002516 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002517 if (clear_value) {
2518 weak_cell->clear();
2519 }
2520 if (clear_next) {
2521 weak_cell->clear_next(the_hole_value);
2522 }
2523 weak_cell_obj = next_weak_cell;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002524 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002525 heap->set_encountered_weak_cells(Smi::FromInt(0));
2526 *non_live_map_list = non_live_map_head;
2527 *dependent_code_list = dependent_code_head;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002528}
2529
2530
2531void MarkCompactCollector::AbortWeakCells() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002532 Object* the_hole_value = heap()->the_hole_value();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002533 Object* weak_cell_obj = heap()->encountered_weak_cells();
2534 while (weak_cell_obj != Smi::FromInt(0)) {
2535 WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
2536 weak_cell_obj = weak_cell->next();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002537 weak_cell->clear_next(the_hole_value);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002538 }
2539 heap()->set_encountered_weak_cells(Smi::FromInt(0));
2540}
2541
2542
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002543void MarkCompactCollector::AbortTransitionArrays() {
2544 HeapObject* undefined = heap()->undefined_value();
2545 Object* obj = heap()->encountered_transition_arrays();
2546 while (obj != Smi::FromInt(0)) {
2547 TransitionArray* array = TransitionArray::cast(obj);
2548 obj = array->next_link();
2549 array->set_next_link(undefined, SKIP_WRITE_BARRIER);
2550 }
2551 heap()->set_encountered_transition_arrays(Smi::FromInt(0));
2552}
2553
2554
2555void MarkCompactCollector::RecordMigratedSlot(
2556 Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer) {
2557 // When parallel compaction is in progress, store and slots buffer entries
2558 // require synchronization.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002559 if (heap_->InNewSpace(value)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002560 if (compaction_in_progress_) {
2561 heap_->store_buffer()->MarkSynchronized(slot);
2562 } else {
2563 heap_->store_buffer()->Mark(slot);
2564 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002565 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002566 SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002567 reinterpret_cast<Object**>(slot),
2568 SlotsBuffer::IGNORE_OVERFLOW);
2569 }
2570}
2571
2572
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002573void MarkCompactCollector::RecordMigratedCodeEntrySlot(
2574 Address code_entry, Address code_entry_slot,
2575 SlotsBuffer** evacuation_slots_buffer) {
2576 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2577 SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
2578 SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
2579 SlotsBuffer::IGNORE_OVERFLOW);
2580 }
2581}
2582
2583
2584void MarkCompactCollector::RecordMigratedCodeObjectSlot(
2585 Address code_object, SlotsBuffer** evacuation_slots_buffer) {
2586 SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
2587 SlotsBuffer::RELOCATED_CODE_OBJECT, code_object,
2588 SlotsBuffer::IGNORE_OVERFLOW);
2589}
2590
2591
2592static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
2593 if (RelocInfo::IsCodeTarget(rmode)) {
2594 return SlotsBuffer::CODE_TARGET_SLOT;
2595 } else if (RelocInfo::IsCell(rmode)) {
2596 return SlotsBuffer::CELL_TARGET_SLOT;
2597 } else if (RelocInfo::IsEmbeddedObject(rmode)) {
2598 return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
2599 } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
2600 return SlotsBuffer::DEBUG_TARGET_SLOT;
2601 }
2602 UNREACHABLE();
2603 return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
2604}
2605
2606
2607static inline SlotsBuffer::SlotType DecodeSlotType(
2608 SlotsBuffer::ObjectSlot slot) {
2609 return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
2610}
2611
2612
2613void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
2614 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
2615 RelocInfo::Mode rmode = rinfo->rmode();
2616 if (target_page->IsEvacuationCandidate() &&
2617 (rinfo->host() == NULL ||
2618 !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
2619 Address addr = rinfo->pc();
2620 SlotsBuffer::SlotType slot_type = SlotTypeForRMode(rmode);
2621 if (rinfo->IsInConstantPool()) {
2622 addr = rinfo->constant_pool_entry_address();
2623 if (RelocInfo::IsCodeTarget(rmode)) {
2624 slot_type = SlotsBuffer::CODE_ENTRY_SLOT;
2625 } else {
2626 DCHECK(RelocInfo::IsEmbeddedObject(rmode));
2627 slot_type = SlotsBuffer::OBJECT_SLOT;
2628 }
2629 }
2630 bool success = SlotsBuffer::AddTo(
2631 slots_buffer_allocator_, target_page->slots_buffer_address(), slot_type,
2632 addr, SlotsBuffer::FAIL_ON_OVERFLOW);
2633 if (!success) {
2634 EvictPopularEvacuationCandidate(target_page);
2635 }
2636 }
2637}
2638
2639
2640class RecordMigratedSlotVisitor final : public ObjectVisitor {
2641 public:
2642 RecordMigratedSlotVisitor(MarkCompactCollector* collector,
2643 SlotsBuffer** evacuation_slots_buffer)
2644 : collector_(collector),
2645 evacuation_slots_buffer_(evacuation_slots_buffer) {}
2646
2647 V8_INLINE void VisitPointer(Object** p) override {
2648 collector_->RecordMigratedSlot(*p, reinterpret_cast<Address>(p),
2649 evacuation_slots_buffer_);
2650 }
2651
2652 V8_INLINE void VisitPointers(Object** start, Object** end) override {
2653 while (start < end) {
2654 collector_->RecordMigratedSlot(*start, reinterpret_cast<Address>(start),
2655 evacuation_slots_buffer_);
2656 ++start;
2657 }
2658 }
2659
2660 V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {
2661 if (collector_->compacting_) {
2662 Address code_entry = Memory::Address_at(code_entry_slot);
2663 collector_->RecordMigratedCodeEntrySlot(code_entry, code_entry_slot,
2664 evacuation_slots_buffer_);
2665 }
2666 }
2667
2668 private:
2669 MarkCompactCollector* collector_;
2670 SlotsBuffer** evacuation_slots_buffer_;
2671};
2672
2673
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002674// We scavenge new space simultaneously with sweeping. This is done in two
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002675// passes.
2676//
2677// The first pass migrates all alive objects from one semispace to another or
2678// promotes them to old space. Forwarding address is written directly into
2679// first word of object without any encoding. If object is dead we write
2680// NULL as a forwarding address.
2681//
2682// The second pass updates pointers to new space in all spaces. It is possible
2683// to encounter pointers to dead new space objects during traversal of pointers
2684// to new space. We should clear them to avoid encountering them during next
2685// pointer iteration. This is an issue if the store buffer overflows and we
2686// have to scan the entire old space, including dead objects, looking for
2687// pointers to new space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002688void MarkCompactCollector::MigrateObject(
2689 HeapObject* dst, HeapObject* src, int size, AllocationSpace dest,
2690 SlotsBuffer** evacuation_slots_buffer) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002691 Address dst_addr = dst->address();
2692 Address src_addr = src->address();
2693 DCHECK(heap()->AllowedToBeMigrated(src, dest));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002694 DCHECK(dest != LO_SPACE);
2695 if (dest == OLD_SPACE) {
2696 DCHECK_OBJECT_SIZE(size);
2697 DCHECK(evacuation_slots_buffer != nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002698 DCHECK(IsAligned(size, kPointerSize));
2699
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002700 heap()->MoveBlock(dst->address(), src->address(), size);
2701 RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer);
2702 dst->IterateBody(&visitor);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002703 } else if (dest == CODE_SPACE) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002704 DCHECK_CODEOBJECT_SIZE(size, heap()->code_space());
2705 DCHECK(evacuation_slots_buffer != nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002706 PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
2707 heap()->MoveBlock(dst_addr, src_addr, size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002708 RecordMigratedCodeObjectSlot(dst_addr, evacuation_slots_buffer);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002709 Code::cast(dst)->Relocate(dst_addr - src_addr);
2710 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002711 DCHECK_OBJECT_SIZE(size);
2712 DCHECK(evacuation_slots_buffer == nullptr);
2713 DCHECK(dest == NEW_SPACE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002714 heap()->MoveBlock(dst_addr, src_addr, size);
2715 }
2716 heap()->OnMoveEvent(dst, src, size);
2717 Memory::Address_at(src_addr) = dst_addr;
2718}
2719
2720
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002721static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
2722 SlotsBuffer::SlotType slot_type, Address addr) {
2723 switch (slot_type) {
2724 case SlotsBuffer::CODE_TARGET_SLOT: {
2725 RelocInfo rinfo(isolate, addr, RelocInfo::CODE_TARGET, 0, NULL);
2726 rinfo.Visit(isolate, v);
2727 break;
2728 }
2729 case SlotsBuffer::CELL_TARGET_SLOT: {
2730 RelocInfo rinfo(isolate, addr, RelocInfo::CELL, 0, NULL);
2731 rinfo.Visit(isolate, v);
2732 break;
2733 }
2734 case SlotsBuffer::CODE_ENTRY_SLOT: {
2735 v->VisitCodeEntry(addr);
2736 break;
2737 }
2738 case SlotsBuffer::RELOCATED_CODE_OBJECT: {
2739 HeapObject* obj = HeapObject::FromAddress(addr);
2740 Code::BodyDescriptor::IterateBody(obj, v);
2741 break;
2742 }
2743 case SlotsBuffer::DEBUG_TARGET_SLOT: {
2744 RelocInfo rinfo(isolate, addr, RelocInfo::DEBUG_BREAK_SLOT_AT_POSITION, 0,
2745 NULL);
2746 if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
2747 break;
2748 }
2749 case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
2750 RelocInfo rinfo(isolate, addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
2751 rinfo.Visit(isolate, v);
2752 break;
2753 }
2754 case SlotsBuffer::OBJECT_SLOT: {
2755 v->VisitPointer(reinterpret_cast<Object**>(addr));
2756 break;
2757 }
2758 default:
2759 UNREACHABLE();
2760 break;
2761 }
2762}
2763
2764
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002765// Visitor for updating pointers from live objects in old spaces to new space.
2766// It does not expect to encounter pointers to dead objects.
2767class PointersUpdatingVisitor : public ObjectVisitor {
2768 public:
2769 explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
2770
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002771 void VisitPointer(Object** p) override { UpdatePointer(p); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002772
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002773 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002774 for (Object** p = start; p < end; p++) UpdatePointer(p);
2775 }
2776
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002777 void VisitCell(RelocInfo* rinfo) override {
2778 DCHECK(rinfo->rmode() == RelocInfo::CELL);
2779 Object* cell = rinfo->target_cell();
2780 Object* old_cell = cell;
2781 VisitPointer(&cell);
2782 if (cell != old_cell) {
2783 rinfo->set_target_cell(reinterpret_cast<Cell*>(cell));
2784 }
2785 }
2786
2787 void VisitEmbeddedPointer(RelocInfo* rinfo) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002788 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
2789 Object* target = rinfo->target_object();
2790 Object* old_target = target;
2791 VisitPointer(&target);
2792 // Avoid unnecessary changes that might unnecessary flush the instruction
2793 // cache.
2794 if (target != old_target) {
2795 rinfo->set_target_object(target);
2796 }
2797 }
2798
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002799 void VisitCodeTarget(RelocInfo* rinfo) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002800 DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
2801 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2802 Object* old_target = target;
2803 VisitPointer(&target);
2804 if (target != old_target) {
2805 rinfo->set_target_address(Code::cast(target)->instruction_start());
2806 }
2807 }
2808
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002809 void VisitCodeAgeSequence(RelocInfo* rinfo) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002810 DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
2811 Object* stub = rinfo->code_age_stub();
2812 DCHECK(stub != NULL);
2813 VisitPointer(&stub);
2814 if (stub != rinfo->code_age_stub()) {
2815 rinfo->set_code_age_stub(Code::cast(stub));
2816 }
2817 }
2818
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002819 void VisitDebugTarget(RelocInfo* rinfo) override {
2820 DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2821 rinfo->IsPatchedDebugBreakSlotSequence());
2822 Object* target =
2823 Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002824 VisitPointer(&target);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002825 rinfo->set_debug_call_address(Code::cast(target)->instruction_start());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002826 }
2827
2828 static inline void UpdateSlot(Heap* heap, Object** slot) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002829 Object* obj = reinterpret_cast<Object*>(
2830 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002831
2832 if (!obj->IsHeapObject()) return;
2833
2834 HeapObject* heap_obj = HeapObject::cast(obj);
2835
2836 MapWord map_word = heap_obj->map_word();
2837 if (map_word.IsForwardingAddress()) {
2838 DCHECK(heap->InFromSpace(heap_obj) ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002839 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
2840 Page::FromAddress(heap_obj->address())
2841 ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002842 HeapObject* target = map_word.ToForwardingAddress();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04002843 base::NoBarrier_CompareAndSwap(
2844 reinterpret_cast<base::AtomicWord*>(slot),
2845 reinterpret_cast<base::AtomicWord>(obj),
2846 reinterpret_cast<base::AtomicWord>(target));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002847 DCHECK(!heap->InFromSpace(target) &&
2848 !MarkCompactCollector::IsOnEvacuationCandidate(target));
2849 }
2850 }
2851
2852 private:
2853 inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); }
2854
2855 Heap* heap_;
2856};
2857
2858
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002859void MarkCompactCollector::UpdateSlots(SlotsBuffer* buffer) {
2860 PointersUpdatingVisitor v(heap_);
2861 size_t buffer_size = buffer->Size();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002862
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002863 for (size_t slot_idx = 0; slot_idx < buffer_size; ++slot_idx) {
2864 SlotsBuffer::ObjectSlot slot = buffer->Get(slot_idx);
2865 if (!SlotsBuffer::IsTypedSlot(slot)) {
2866 PointersUpdatingVisitor::UpdateSlot(heap_, slot);
2867 } else {
2868 ++slot_idx;
2869 DCHECK(slot_idx < buffer_size);
2870 UpdateSlot(heap_->isolate(), &v, DecodeSlotType(slot),
2871 reinterpret_cast<Address>(buffer->Get(slot_idx)));
2872 }
2873 }
2874}
2875
2876
2877void MarkCompactCollector::UpdateSlotsRecordedIn(SlotsBuffer* buffer) {
2878 while (buffer != NULL) {
2879 UpdateSlots(buffer);
2880 buffer = buffer->next();
2881 }
2882}
2883
2884
2885static void UpdatePointer(HeapObject** address, HeapObject* object) {
2886 MapWord map_word = object->map_word();
2887 // The store buffer can still contain stale pointers in dead large objects.
2888 // Ignore these pointers here.
2889 DCHECK(map_word.IsForwardingAddress() ||
2890 object->GetHeap()->lo_space()->FindPage(
2891 reinterpret_cast<Address>(address)) != NULL);
2892 if (map_word.IsForwardingAddress()) {
2893 // Update the corresponding slot.
2894 *address = map_word.ToForwardingAddress();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002895 }
2896}
2897
2898
2899static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
2900 Object** p) {
2901 MapWord map_word = HeapObject::cast(*p)->map_word();
2902
2903 if (map_word.IsForwardingAddress()) {
2904 return String::cast(map_word.ToForwardingAddress());
2905 }
2906
2907 return String::cast(*p);
2908}
2909
2910
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002911bool MarkCompactCollector::IsSlotInBlackObject(Page* p, Address slot,
2912 HeapObject** out_object) {
2913 Space* owner = p->owner();
2914 if (owner == heap_->lo_space() || owner == NULL) {
2915 Object* large_object = heap_->lo_space()->FindObject(slot);
2916 // This object has to exist, otherwise we would not have recorded a slot
2917 // for it.
2918 CHECK(large_object->IsHeapObject());
2919 HeapObject* large_heap_object = HeapObject::cast(large_object);
2920 if (IsMarked(large_heap_object)) {
2921 *out_object = large_heap_object;
2922 return true;
2923 }
2924 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002925 }
2926
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002927 uint32_t mark_bit_index = p->AddressToMarkbitIndex(slot);
2928 unsigned int cell_index = mark_bit_index >> Bitmap::kBitsPerCellLog2;
2929 MarkBit::CellType index_mask = 1u << Bitmap::IndexInCell(mark_bit_index);
2930 MarkBit::CellType* cells = p->markbits()->cells();
2931 Address base_address = p->area_start();
2932 unsigned int base_address_cell_index = Bitmap::IndexToCell(
2933 Bitmap::CellAlignIndex(p->AddressToMarkbitIndex(base_address)));
2934
2935 // Check if the slot points to the start of an object. This can happen e.g.
2936 // when we left trim a fixed array. Such slots are invalid and we can remove
2937 // them.
2938 if (index_mask > 1) {
2939 if ((cells[cell_index] & index_mask) != 0 &&
2940 (cells[cell_index] & (index_mask >> 1)) == 0) {
2941 return false;
2942 }
2943 } else {
2944 // Left trimming moves the mark bits so we cannot be in the very first cell.
2945 DCHECK(cell_index != base_address_cell_index);
2946 if ((cells[cell_index] & index_mask) != 0 &&
2947 (cells[cell_index - 1] & (1u << Bitmap::kBitIndexMask)) == 0) {
2948 return false;
2949 }
2950 }
2951
2952 // Check if the object is in the current cell.
2953 MarkBit::CellType slot_mask;
2954 if ((cells[cell_index] == 0) ||
2955 (base::bits::CountTrailingZeros32(cells[cell_index]) >
2956 base::bits::CountTrailingZeros32(cells[cell_index] | index_mask))) {
2957 // If we are already in the first cell, there is no live object.
2958 if (cell_index == base_address_cell_index) return false;
2959
2960 // If not, find a cell in a preceding cell slot that has a mark bit set.
2961 do {
2962 cell_index--;
2963 } while (cell_index > base_address_cell_index && cells[cell_index] == 0);
2964
2965 // The slot must be in a dead object if there are no preceding cells that
2966 // have mark bits set.
2967 if (cells[cell_index] == 0) {
2968 return false;
2969 }
2970
2971 // The object is in a preceding cell. Set the mask to find any object.
2972 slot_mask = ~0u;
2973 } else {
2974 // We are interested in object mark bits right before the slot.
2975 slot_mask = index_mask + (index_mask - 1);
2976 }
2977
2978 MarkBit::CellType current_cell = cells[cell_index];
2979 CHECK(current_cell != 0);
2980
2981 // Find the last live object in the cell.
2982 unsigned int leading_zeros =
2983 base::bits::CountLeadingZeros32(current_cell & slot_mask);
2984 CHECK(leading_zeros != Bitmap::kBitsPerCell);
2985 int offset = static_cast<int>(Bitmap::kBitIndexMask - leading_zeros) - 1;
2986
2987 base_address += (cell_index - base_address_cell_index) *
2988 Bitmap::kBitsPerCell * kPointerSize;
2989 Address address = base_address + offset * kPointerSize;
2990 HeapObject* object = HeapObject::FromAddress(address);
2991 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
2992 CHECK(object->address() < reinterpret_cast<Address>(slot));
2993 if ((object->address() + kPointerSize) <= slot &&
2994 (object->address() + object->Size()) > slot) {
2995 // If the slot is within the last found object in the cell, the slot is
2996 // in a live object.
2997 // Slots pointing to the first word of an object are invalid and removed.
2998 // This can happen when we move the object header while left trimming.
2999 *out_object = object;
3000 return true;
3001 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003002 return false;
3003}
3004
3005
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003006bool MarkCompactCollector::IsSlotInBlackObjectSlow(Page* p, Address slot) {
3007 // This function does not support large objects right now.
3008 Space* owner = p->owner();
3009 if (owner == heap_->lo_space() || owner == NULL) {
3010 Object* large_object = heap_->lo_space()->FindObject(slot);
3011 // This object has to exist, otherwise we would not have recorded a slot
3012 // for it.
3013 CHECK(large_object->IsHeapObject());
3014 HeapObject* large_heap_object = HeapObject::cast(large_object);
3015 if (IsMarked(large_heap_object)) {
3016 return true;
3017 }
3018 return false;
3019 }
3020
3021 LiveObjectIterator<kBlackObjects> it(p);
3022 HeapObject* object = NULL;
3023 while ((object = it.Next()) != NULL) {
3024 int size = object->Size();
3025
3026 if (object->address() > slot) return false;
3027 if (object->address() <= slot && slot < (object->address() + size)) {
3028 return true;
3029 }
3030 }
3031 return false;
3032}
3033
3034
3035bool MarkCompactCollector::IsSlotInLiveObject(Address slot) {
3036 HeapObject* object = NULL;
3037 // The target object is black but we don't know if the source slot is black.
3038 // The source object could have died and the slot could be part of a free
3039 // space. Find out based on mark bits if the slot is part of a live object.
3040 if (!IsSlotInBlackObject(Page::FromAddress(slot), slot, &object)) {
3041 return false;
3042 }
3043
3044 DCHECK(object != NULL);
3045 int offset = static_cast<int>(slot - object->address());
3046 return object->IsValidSlot(offset);
3047}
3048
3049
3050void MarkCompactCollector::VerifyIsSlotInLiveObject(Address slot,
3051 HeapObject* object) {
3052 // The target object has to be black.
3053 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3054
3055 // The target object is black but we don't know if the source slot is black.
3056 // The source object could have died and the slot could be part of a free
3057 // space. Use the mark bit iterator to find out about liveness of the slot.
3058 CHECK(IsSlotInBlackObjectSlow(Page::FromAddress(slot), slot));
3059}
3060
3061
3062void MarkCompactCollector::EvacuateNewSpacePrologue() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003063 // There are soft limits in the allocation code, designed trigger a mark
3064 // sweep collection by failing allocations. But since we are already in
3065 // a mark-sweep allocation, there is no sense in trying to trigger one.
3066 AlwaysAllocateScope scope(isolate());
3067
3068 NewSpace* new_space = heap()->new_space();
3069
3070 // Store allocation range before flipping semispaces.
3071 Address from_bottom = new_space->bottom();
3072 Address from_top = new_space->top();
3073
3074 // Flip the semispaces. After flipping, to space is empty, from space has
3075 // live objects.
3076 new_space->Flip();
3077 new_space->ResetAllocationInfo();
3078
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003079 newspace_evacuation_candidates_.Clear();
3080 NewSpacePageIterator it(from_bottom, from_top);
3081 while (it.has_next()) {
3082 newspace_evacuation_candidates_.Add(it.next());
3083 }
3084}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003085
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003086
3087HashMap* MarkCompactCollector::EvacuateNewSpaceInParallel() {
3088 HashMap* local_pretenuring_feedback = new HashMap(
3089 HashMap::PointersMatch, kInitialLocalPretenuringFeedbackCapacity);
3090 EvacuateNewSpaceVisitor new_space_visitor(heap(), &migration_slots_buffer_,
3091 local_pretenuring_feedback);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003092 // First pass: traverse all objects in inactive semispace, remove marks,
3093 // migrate live objects and write forwarding addresses. This stage puts
3094 // new entries in the store buffer and may cause some pages to be marked
3095 // scan-on-scavenge.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003096 for (int i = 0; i < newspace_evacuation_candidates_.length(); i++) {
3097 NewSpacePage* p =
3098 reinterpret_cast<NewSpacePage*>(newspace_evacuation_candidates_[i]);
3099 bool ok = VisitLiveObjects(p, &new_space_visitor, kClearMarkbits);
3100 USE(ok);
3101 DCHECK(ok);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003102 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003103 heap_->IncrementPromotedObjectsSize(
3104 static_cast<int>(new_space_visitor.promoted_size()));
3105 heap_->IncrementSemiSpaceCopiedObjectSize(
3106 static_cast<int>(new_space_visitor.semispace_copied_size()));
3107 heap_->IncrementYoungSurvivorsCounter(
3108 static_cast<int>(new_space_visitor.promoted_size()) +
3109 static_cast<int>(new_space_visitor.semispace_copied_size()));
3110 return local_pretenuring_feedback;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003111}
3112
3113
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003114void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized(
3115 SlotsBuffer* evacuation_slots_buffer) {
3116 base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_);
3117 evacuation_slots_buffers_.Add(evacuation_slots_buffer);
3118}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003119
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003120
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003121int MarkCompactCollector::NumberOfParallelCompactionTasks() {
3122 if (!FLAG_parallel_compaction) return 1;
3123 // Compute the number of needed tasks based on a target compaction time, the
3124 // profiled compaction speed and marked live memory.
3125 //
3126 // The number of parallel compaction tasks is limited by:
3127 // - #evacuation pages
3128 // - (#cores - 1)
3129 // - a hard limit
3130 const double kTargetCompactionTimeInMs = 1;
3131 const int kMaxCompactionTasks = 8;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003132
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003133 intptr_t compaction_speed =
3134 heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3135 if (compaction_speed == 0) return 1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003136
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003137 intptr_t live_bytes = 0;
3138 for (Page* page : evacuation_candidates_) {
3139 live_bytes += page->LiveBytes();
3140 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003141
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003142 const int cores = Max(1, base::SysInfo::NumberOfProcessors() - 1);
3143 const int tasks =
3144 1 + static_cast<int>(static_cast<double>(live_bytes) / compaction_speed /
3145 kTargetCompactionTimeInMs);
3146 const int tasks_capped_pages = Min(evacuation_candidates_.length(), tasks);
3147 const int tasks_capped_cores = Min(cores, tasks_capped_pages);
3148 const int tasks_capped_hard = Min(kMaxCompactionTasks, tasks_capped_cores);
3149 return tasks_capped_hard;
3150}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003151
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003152
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003153void MarkCompactCollector::EvacuatePagesInParallel() {
3154 const int num_pages = evacuation_candidates_.length();
3155 if (num_pages == 0) return;
3156
3157 // Used for trace summary.
3158 intptr_t live_bytes = 0;
3159 intptr_t compaction_speed = 0;
3160 if (FLAG_trace_fragmentation) {
3161 for (Page* page : evacuation_candidates_) {
3162 live_bytes += page->LiveBytes();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003163 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003164 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003165 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003166 const int num_tasks = NumberOfParallelCompactionTasks();
3167
3168 // Set up compaction spaces.
3169 CompactionSpaceCollection** compaction_spaces_for_tasks =
3170 new CompactionSpaceCollection*[num_tasks];
3171 for (int i = 0; i < num_tasks; i++) {
3172 compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
3173 }
3174
3175 heap()->old_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
3176 num_tasks);
3177 heap()->code_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
3178 num_tasks);
3179
3180 uint32_t* task_ids = new uint32_t[num_tasks - 1];
3181 // Kick off parallel tasks.
3182 StartParallelCompaction(compaction_spaces_for_tasks, task_ids, num_tasks);
3183 // Wait for unfinished and not-yet-started tasks.
3184 WaitUntilCompactionCompleted(task_ids, num_tasks - 1);
3185 delete[] task_ids;
3186
3187 double compaction_duration = 0.0;
3188 intptr_t compacted_memory = 0;
3189 // Merge back memory (compacted and unused) from compaction spaces.
3190 for (int i = 0; i < num_tasks; i++) {
3191 heap()->old_space()->MergeCompactionSpace(
3192 compaction_spaces_for_tasks[i]->Get(OLD_SPACE));
3193 heap()->code_space()->MergeCompactionSpace(
3194 compaction_spaces_for_tasks[i]->Get(CODE_SPACE));
3195 compacted_memory += compaction_spaces_for_tasks[i]->bytes_compacted();
3196 compaction_duration += compaction_spaces_for_tasks[i]->duration();
3197 delete compaction_spaces_for_tasks[i];
3198 }
3199 delete[] compaction_spaces_for_tasks;
3200 heap()->tracer()->AddCompactionEvent(compaction_duration, compacted_memory);
3201
3202 // Finalize sequentially.
3203 int abandoned_pages = 0;
3204 for (int i = 0; i < num_pages; i++) {
3205 Page* p = evacuation_candidates_[i];
3206 switch (p->parallel_compaction_state().Value()) {
3207 case MemoryChunk::ParallelCompactingState::kCompactingAborted:
3208 // We have partially compacted the page, i.e., some objects may have
3209 // moved, others are still in place.
3210 // We need to:
3211 // - Leave the evacuation candidate flag for later processing of
3212 // slots buffer entries.
3213 // - Leave the slots buffer there for processing of entries added by
3214 // the write barrier.
3215 // - Rescan the page as slot recording in the migration buffer only
3216 // happens upon moving (which we potentially didn't do).
3217 // - Leave the page in the list of pages of a space since we could not
3218 // fully evacuate it.
3219 // - Mark them for rescanning for store buffer entries as we otherwise
3220 // might have stale store buffer entries that become "valid" again
3221 // after reusing the memory. Note that all existing store buffer
3222 // entries of such pages are filtered before rescanning.
3223 DCHECK(p->IsEvacuationCandidate());
3224 p->SetFlag(Page::COMPACTION_WAS_ABORTED);
3225 p->set_scan_on_scavenge(true);
3226 abandoned_pages++;
3227 break;
3228 case MemoryChunk::kCompactingFinalize:
3229 DCHECK(p->IsEvacuationCandidate());
3230 p->SetWasSwept();
3231 p->Unlink();
3232 break;
3233 case MemoryChunk::kCompactingDone:
3234 DCHECK(p->IsFlagSet(Page::POPULAR_PAGE));
3235 DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3236 break;
3237 default:
3238 // We should not observe kCompactingInProgress, or kCompactingDone.
3239 UNREACHABLE();
3240 }
3241 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
3242 }
3243 if (FLAG_trace_fragmentation) {
3244 PrintIsolate(isolate(),
3245 "%8.0f ms: compaction: parallel=%d pages=%d aborted=%d "
3246 "tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX
3247 "d compaction_speed=%" V8_PTR_PREFIX "d\n",
3248 isolate()->time_millis_since_init(), FLAG_parallel_compaction,
3249 num_pages, abandoned_pages, num_tasks,
3250 base::SysInfo::NumberOfProcessors(), live_bytes,
3251 compaction_speed);
3252 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003253}
3254
3255
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003256void MarkCompactCollector::StartParallelCompaction(
3257 CompactionSpaceCollection** compaction_spaces, uint32_t* task_ids,
3258 int len) {
3259 compaction_in_progress_ = true;
3260 for (int i = 1; i < len; i++) {
3261 CompactionTask* task = new CompactionTask(heap(), compaction_spaces[i]);
3262 task_ids[i - 1] = task->id();
3263 V8::GetCurrentPlatform()->CallOnBackgroundThread(
3264 task, v8::Platform::kShortRunningTask);
3265 }
3266
3267 // Contribute in main thread.
3268 EvacuatePages(compaction_spaces[0], &migration_slots_buffer_);
3269}
3270
3271
3272void MarkCompactCollector::WaitUntilCompactionCompleted(uint32_t* task_ids,
3273 int len) {
3274 // Try to cancel compaction tasks that have not been run (as they might be
3275 // stuck in a worker queue). Tasks that cannot be canceled, have either
3276 // already completed or are still running, hence we need to wait for their
3277 // semaphore signal.
3278 for (int i = 0; i < len; i++) {
3279 if (!heap()->isolate()->cancelable_task_manager()->TryAbort(task_ids[i])) {
3280 pending_compaction_tasks_semaphore_.Wait();
3281 }
3282 }
3283 compaction_in_progress_ = false;
3284}
3285
3286
3287void MarkCompactCollector::EvacuatePages(
3288 CompactionSpaceCollection* compaction_spaces,
3289 SlotsBuffer** evacuation_slots_buffer) {
3290 EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces,
3291 evacuation_slots_buffer);
3292 for (int i = 0; i < evacuation_candidates_.length(); i++) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003293 Page* p = evacuation_candidates_[i];
3294 DCHECK(p->IsEvacuationCandidate() ||
3295 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003296 DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) ==
3297 MemoryChunk::kSweepingDone);
3298 if (p->parallel_compaction_state().TrySetValue(
3299 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
3300 if (p->IsEvacuationCandidate()) {
3301 DCHECK_EQ(p->parallel_compaction_state().Value(),
3302 MemoryChunk::kCompactingInProgress);
3303 double start = heap()->MonotonicallyIncreasingTimeInMs();
3304 intptr_t live_bytes = p->LiveBytes();
3305 AlwaysAllocateScope always_allocate(isolate());
3306 if (VisitLiveObjects(p, &visitor, kClearMarkbits)) {
3307 p->ResetLiveBytes();
3308 p->parallel_compaction_state().SetValue(
3309 MemoryChunk::kCompactingFinalize);
3310 compaction_spaces->ReportCompactionProgress(
3311 heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes);
3312 } else {
3313 p->parallel_compaction_state().SetValue(
3314 MemoryChunk::kCompactingAborted);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003315 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003316 } else {
3317 // There could be popular pages in the list of evacuation candidates
3318 // which we do compact.
3319 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003320 }
3321 }
3322 }
3323}
3324
3325
3326class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3327 public:
3328 virtual Object* RetainAs(Object* object) {
3329 if (object->IsHeapObject()) {
3330 HeapObject* heap_object = HeapObject::cast(object);
3331 MapWord map_word = heap_object->map_word();
3332 if (map_word.IsForwardingAddress()) {
3333 return map_word.ToForwardingAddress();
3334 }
3335 }
3336 return object;
3337 }
3338};
3339
3340
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003341enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
3342
3343
3344enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
3345
3346
3347enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
3348
3349
3350template <MarkCompactCollector::SweepingParallelism mode>
3351static intptr_t Free(PagedSpace* space, FreeList* free_list, Address start,
3352 int size) {
3353 if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) {
3354 DCHECK(free_list == NULL);
3355 return space->Free(start, size);
3356 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003357 return size - free_list->Free(start, size);
3358 }
3359}
3360
3361
3362// Sweeps a page. After sweeping the page can be iterated.
3363// Slots in live objects pointing into evacuation candidates are updated
3364// if requested.
3365// Returns the size of the biggest continuous freed memory chunk in bytes.
3366template <SweepingMode sweeping_mode,
3367 MarkCompactCollector::SweepingParallelism parallelism,
3368 SkipListRebuildingMode skip_list_mode,
3369 FreeSpaceTreatmentMode free_space_mode>
3370static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
3371 ObjectVisitor* v) {
3372 DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
3373 DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3374 space->identity() == CODE_SPACE);
3375 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3376 DCHECK(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD ||
3377 sweeping_mode == SWEEP_ONLY);
3378
3379 Address free_start = p->area_start();
3380 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003381
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003382 // If we use the skip list for code space pages, we have to lock the skip
3383 // list because it could be accessed concurrently by the runtime or the
3384 // deoptimizer.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003385 SkipList* skip_list = p->skip_list();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003386 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3387 skip_list->Clear();
3388 }
3389
3390 intptr_t freed_bytes = 0;
3391 intptr_t max_freed_bytes = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003392 int curr_region = -1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003393
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003394 LiveObjectIterator<kBlackObjects> it(p);
3395 HeapObject* object = NULL;
3396 while ((object = it.Next()) != NULL) {
3397 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3398 Address free_end = object->address();
3399 if (free_end != free_start) {
3400 int size = static_cast<int>(free_end - free_start);
3401 if (free_space_mode == ZAP_FREE_SPACE) {
3402 memset(free_start, 0xcc, size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003403 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003404 freed_bytes = Free<parallelism>(space, free_list, free_start, size);
3405 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003406 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003407 Map* map = object->synchronized_map();
3408 int size = object->SizeFromMap(map);
3409 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3410 object->IterateBody(map->instance_type(), size, v);
3411 }
3412 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3413 int new_region_start = SkipList::RegionNumber(free_end);
3414 int new_region_end =
3415 SkipList::RegionNumber(free_end + size - kPointerSize);
3416 if (new_region_start != curr_region || new_region_end != curr_region) {
3417 skip_list->AddObject(free_end, size);
3418 curr_region = new_region_end;
3419 }
3420 }
3421 free_start = free_end + size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003422 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003423
3424 // Clear the mark bits of that page and reset live bytes count.
3425 Bitmap::Clear(p);
3426
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003427 if (free_start != p->area_end()) {
3428 int size = static_cast<int>(p->area_end() - free_start);
3429 if (free_space_mode == ZAP_FREE_SPACE) {
3430 memset(free_start, 0xcc, size);
3431 }
3432 freed_bytes = Free<parallelism>(space, free_list, free_start, size);
3433 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003434 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003435
3436 if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) {
3437 // When concurrent sweeping is active, the page will be marked after
3438 // sweeping by the main thread.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003439 p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingFinalize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003440 } else {
3441 p->SetWasSwept();
3442 }
3443 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
3444}
3445
3446
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003447void MarkCompactCollector::InvalidateCode(Code* code) {
3448 if (heap_->incremental_marking()->IsCompacting() &&
3449 !ShouldSkipEvacuationSlotRecording(code)) {
3450 DCHECK(compacting_);
3451
3452 // If the object is white than no slots were recorded on it yet.
3453 MarkBit mark_bit = Marking::MarkBitFrom(code);
3454 if (Marking::IsWhite(mark_bit)) return;
3455
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003456 // Ignore all slots that might have been recorded in the body of the
3457 // deoptimized code object. Assumption: no slots will be recorded for
3458 // this object after invalidating it.
3459 RemoveObjectSlots(code->instruction_start(),
3460 code->address() + code->Size());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003461 }
3462}
3463
3464
3465// Return true if the given code is deoptimized or will be deoptimized.
3466bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
3467 return code->is_optimized_code() && code->marked_for_deoptimization();
3468}
3469
3470
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003471void MarkCompactCollector::RemoveObjectSlots(Address start_slot,
3472 Address end_slot) {
3473 // Remove entries by replacing them with an old-space slot containing a smi
3474 // that is located in an unmovable page.
3475 int npages = evacuation_candidates_.length();
3476 for (int i = 0; i < npages; i++) {
3477 Page* p = evacuation_candidates_[i];
3478 DCHECK(p->IsEvacuationCandidate() ||
3479 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3480 if (p->IsEvacuationCandidate()) {
3481 SlotsBuffer::RemoveObjectSlots(heap_, p->slots_buffer(), start_slot,
3482 end_slot);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003483 }
3484 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003485}
3486
3487
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003488#ifdef VERIFY_HEAP
3489static void VerifyAllBlackObjects(MemoryChunk* page) {
3490 LiveObjectIterator<kAllLiveObjects> it(page);
3491 HeapObject* object = NULL;
3492 while ((object = it.Next()) != NULL) {
3493 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3494 }
3495}
3496#endif // VERIFY_HEAP
3497
3498
3499bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page,
3500 HeapObjectVisitor* visitor,
3501 IterationMode mode) {
3502#ifdef VERIFY_HEAP
3503 VerifyAllBlackObjects(page);
3504#endif // VERIFY_HEAP
3505
3506 LiveObjectIterator<kBlackObjects> it(page);
3507 HeapObject* object = nullptr;
3508 while ((object = it.Next()) != nullptr) {
3509 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3510 if (!visitor->Visit(object)) {
3511 if (mode == kClearMarkbits) {
3512 page->markbits()->ClearRange(
3513 page->AddressToMarkbitIndex(page->area_start()),
3514 page->AddressToMarkbitIndex(object->address()));
3515 RecomputeLiveBytes(page);
3516 }
3517 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003518 }
3519 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003520 if (mode == kClearMarkbits) {
3521 Bitmap::Clear(page);
3522 }
3523 return true;
3524}
3525
3526
3527void MarkCompactCollector::RecomputeLiveBytes(MemoryChunk* page) {
3528 LiveObjectIterator<kBlackObjects> it(page);
3529 int new_live_size = 0;
3530 HeapObject* object = nullptr;
3531 while ((object = it.Next()) != nullptr) {
3532 new_live_size += object->Size();
3533 }
3534 page->SetLiveBytes(new_live_size);
3535}
3536
3537
3538void MarkCompactCollector::VisitLiveObjectsBody(Page* page,
3539 ObjectVisitor* visitor) {
3540#ifdef VERIFY_HEAP
3541 VerifyAllBlackObjects(page);
3542#endif // VERIFY_HEAP
3543
3544 LiveObjectIterator<kBlackObjects> it(page);
3545 HeapObject* object = NULL;
3546 while ((object = it.Next()) != NULL) {
3547 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3548 Map* map = object->synchronized_map();
3549 int size = object->SizeFromMap(map);
3550 object->IterateBody(map->instance_type(), size, visitor);
3551 }
3552}
3553
3554
3555void MarkCompactCollector::SweepAbortedPages() {
3556 // Second pass on aborted pages.
3557 for (int i = 0; i < evacuation_candidates_.length(); i++) {
3558 Page* p = evacuation_candidates_[i];
3559 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3560 p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED);
3561 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3562 switch (space->identity()) {
3563 case OLD_SPACE:
3564 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
3565 IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
3566 break;
3567 case CODE_SPACE:
3568 if (FLAG_zap_code_space) {
3569 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
3570 ZAP_FREE_SPACE>(space, NULL, p, nullptr);
3571 } else {
3572 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
3573 IGNORE_FREE_SPACE>(space, NULL, p, nullptr);
3574 }
3575 break;
3576 default:
3577 UNREACHABLE();
3578 break;
3579 }
3580 }
3581 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003582}
3583
3584
3585void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003586 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003587 Heap::RelocationLock relocation_lock(heap());
3588
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003589 HashMap* local_pretenuring_feedback = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003590 {
3591 GCTracer::Scope gc_scope(heap()->tracer(),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003592 GCTracer::Scope::MC_EVACUATE_NEW_SPACE);
3593 EvacuationScope evacuation_scope(this);
3594 EvacuateNewSpacePrologue();
3595 local_pretenuring_feedback = EvacuateNewSpaceInParallel();
3596 heap_->new_space()->set_age_mark(heap_->new_space()->top());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003597 }
3598
3599 {
3600 GCTracer::Scope gc_scope(heap()->tracer(),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003601 GCTracer::Scope::MC_EVACUATE_CANDIDATES);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003602 EvacuationScope evacuation_scope(this);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003603 EvacuatePagesInParallel();
3604 }
3605
3606 {
3607 heap_->MergeAllocationSitePretenuringFeedback(*local_pretenuring_feedback);
3608 delete local_pretenuring_feedback;
3609 }
3610
3611 UpdatePointersAfterEvacuation();
3612
3613 {
3614 GCTracer::Scope gc_scope(heap()->tracer(),
3615 GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
3616 // After updating all pointers, we can finally sweep the aborted pages,
3617 // effectively overriding any forward pointers.
3618 SweepAbortedPages();
3619
3620 // EvacuateNewSpaceAndCandidates iterates over new space objects and for
3621 // ArrayBuffers either re-registers them as live or promotes them. This is
3622 // needed to properly free them.
3623 heap()->array_buffer_tracker()->FreeDead(false);
3624
3625 // Deallocate evacuated candidate pages.
3626 ReleaseEvacuationCandidates();
3627 }
3628
3629#ifdef VERIFY_HEAP
3630 if (FLAG_verify_heap && !sweeping_in_progress_) {
3631 VerifyEvacuation(heap());
3632 }
3633#endif
3634}
3635
3636
3637void MarkCompactCollector::UpdatePointersAfterEvacuation() {
3638 GCTracer::Scope gc_scope(heap()->tracer(),
3639 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
3640 {
3641 GCTracer::Scope gc_scope(
3642 heap()->tracer(),
3643 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
3644 UpdateSlotsRecordedIn(migration_slots_buffer_);
3645 if (FLAG_trace_fragmentation_verbose) {
3646 PrintF(" migration slots buffer: %d\n",
3647 SlotsBuffer::SizeOfChain(migration_slots_buffer_));
3648 }
3649 slots_buffer_allocator_->DeallocateChain(&migration_slots_buffer_);
3650 DCHECK(migration_slots_buffer_ == NULL);
3651
3652 // TODO(hpayer): Process the slots buffers in parallel. This has to be done
3653 // after evacuation of all pages finishes.
3654 int buffers = evacuation_slots_buffers_.length();
3655 for (int i = 0; i < buffers; i++) {
3656 SlotsBuffer* buffer = evacuation_slots_buffers_[i];
3657 UpdateSlotsRecordedIn(buffer);
3658 slots_buffer_allocator_->DeallocateChain(&buffer);
3659 }
3660 evacuation_slots_buffers_.Rewind(0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003661 }
3662
3663 // Second pass: find pointers to new space and update them.
3664 PointersUpdatingVisitor updating_visitor(heap());
3665
3666 {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003667 GCTracer::Scope gc_scope(
3668 heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003669 // Update pointers in to space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003670 SemiSpaceIterator to_it(heap()->new_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003671 for (HeapObject* object = to_it.Next(); object != NULL;
3672 object = to_it.Next()) {
3673 Map* map = object->map();
3674 object->IterateBody(map->instance_type(), object->SizeFromMap(map),
3675 &updating_visitor);
3676 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003677 // Update roots.
3678 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003679
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003680 StoreBufferRebuildScope scope(heap_, heap_->store_buffer(),
3681 &Heap::ScavengeStoreBufferCallback);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003682 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003683 }
3684
3685 int npages = evacuation_candidates_.length();
3686 {
3687 GCTracer::Scope gc_scope(
3688 heap()->tracer(),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003689 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003690 for (int i = 0; i < npages; i++) {
3691 Page* p = evacuation_candidates_[i];
3692 DCHECK(p->IsEvacuationCandidate() ||
3693 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3694
3695 if (p->IsEvacuationCandidate()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003696 UpdateSlotsRecordedIn(p->slots_buffer());
3697 if (FLAG_trace_fragmentation_verbose) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003698 PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
3699 SlotsBuffer::SizeOfChain(p->slots_buffer()));
3700 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003701 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003702
3703 // Important: skip list should be cleared only after roots were updated
3704 // because root iteration traverses the stack and might have to find
3705 // code objects from non-updated pc pointing into evacuation candidate.
3706 SkipList* list = p->skip_list();
3707 if (list != NULL) list->Clear();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003708
3709 // First pass on aborted pages, fixing up all live objects.
3710 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3711 p->ClearEvacuationCandidate();
3712 VisitLiveObjectsBody(p, &updating_visitor);
3713 }
3714 }
3715
3716 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003717 if (FLAG_gc_verbose) {
3718 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3719 reinterpret_cast<intptr_t>(p));
3720 }
3721 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3722 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
3723
3724 switch (space->identity()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003725 case OLD_SPACE:
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003726 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3727 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
3728 &updating_visitor);
3729 break;
3730 case CODE_SPACE:
3731 if (FLAG_zap_code_space) {
3732 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3733 REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(space, NULL, p,
3734 &updating_visitor);
3735 } else {
3736 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3737 REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
3738 &updating_visitor);
3739 }
3740 break;
3741 default:
3742 UNREACHABLE();
3743 break;
3744 }
3745 }
3746 }
3747 }
3748
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003749 {
3750 GCTracer::Scope gc_scope(heap()->tracer(),
3751 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
3752 heap_->string_table()->Iterate(&updating_visitor);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003753
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003754 // Update pointers from external string table.
3755 heap_->UpdateReferencesInExternalStringTable(
3756 &UpdateReferenceInExternalStringTableEntry);
3757
3758 EvacuationWeakObjectRetainer evacuation_object_retainer;
3759 heap()->ProcessAllWeakReferences(&evacuation_object_retainer);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003760 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003761}
3762
3763
3764void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() {
3765 int npages = evacuation_candidates_.length();
3766 for (int i = 0; i < npages; i++) {
3767 Page* p = evacuation_candidates_[i];
3768 if (!p->IsEvacuationCandidate()) continue;
3769 p->Unlink();
3770 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3771 p->InsertAfter(space->LastPage());
3772 }
3773}
3774
3775
3776void MarkCompactCollector::ReleaseEvacuationCandidates() {
3777 int npages = evacuation_candidates_.length();
3778 for (int i = 0; i < npages; i++) {
3779 Page* p = evacuation_candidates_[i];
3780 if (!p->IsEvacuationCandidate()) continue;
3781 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3782 space->Free(p->area_start(), p->area_size());
3783 p->set_scan_on_scavenge(false);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003784 p->ResetLiveBytes();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003785 CHECK(p->WasSwept());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003786 space->ReleasePage(p);
3787 }
3788 evacuation_candidates_.Rewind(0);
3789 compacting_ = false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003790 heap()->FilterStoreBufferEntriesOnAboutToBeFreedPages();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003791 heap()->FreeQueuedChunks();
3792}
3793
3794
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003795int MarkCompactCollector::SweepInParallel(PagedSpace* space,
3796 int required_freed_bytes) {
3797 int max_freed = 0;
3798 int max_freed_overall = 0;
3799 PageIterator it(space);
3800 while (it.has_next()) {
3801 Page* p = it.next();
3802 max_freed = SweepInParallel(p, space);
3803 DCHECK(max_freed >= 0);
3804 if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
3805 return max_freed;
3806 }
3807 max_freed_overall = Max(max_freed, max_freed_overall);
3808 if (p == space->end_of_unswept_pages()) break;
3809 }
3810 return max_freed_overall;
3811}
3812
3813
3814int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
3815 int max_freed = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003816 if (page->TryLock()) {
3817 // If this page was already swept in the meantime, we can return here.
3818 if (page->parallel_sweeping_state().Value() !=
3819 MemoryChunk::kSweepingPending) {
3820 page->mutex()->Unlock();
3821 return 0;
3822 }
3823 page->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingInProgress);
3824 FreeList* free_list;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003825 FreeList private_free_list(space);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003826 if (space->identity() == OLD_SPACE) {
3827 free_list = free_list_old_space_.get();
3828 max_freed =
3829 Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
3830 IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
3831 } else if (space->identity() == CODE_SPACE) {
3832 free_list = free_list_code_space_.get();
3833 max_freed =
3834 Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
3835 IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
3836 } else {
3837 free_list = free_list_map_space_.get();
3838 max_freed =
3839 Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
3840 IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
3841 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003842 free_list->Concatenate(&private_free_list);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003843 page->mutex()->Unlock();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003844 }
3845 return max_freed;
3846}
3847
3848
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003849void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003850 space->ClearStats();
3851
3852 // We defensively initialize end_of_unswept_pages_ here with the first page
3853 // of the pages list.
3854 space->set_end_of_unswept_pages(space->FirstPage());
3855
3856 PageIterator it(space);
3857
3858 int pages_swept = 0;
3859 bool unused_page_present = false;
3860 bool parallel_sweeping_active = false;
3861
3862 while (it.has_next()) {
3863 Page* p = it.next();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003864 DCHECK(p->parallel_sweeping_state().Value() == MemoryChunk::kSweepingDone);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003865
3866 // Clear sweeping flags indicating that marking bits are still intact.
3867 p->ClearWasSwept();
3868
3869 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
3870 p->IsEvacuationCandidate()) {
3871 // Will be processed in EvacuateNewSpaceAndCandidates.
3872 DCHECK(evacuation_candidates_.length() > 0);
3873 continue;
3874 }
3875
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003876 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
3877 // We need to sweep the page to get it into an iterable state again. Note
3878 // that this adds unusable memory into the free list that is later on
3879 // (in the free list) dropped again. Since we only use the flag for
3880 // testing this is fine.
3881 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
3882 IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
3883 continue;
3884 }
3885
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003886 // One unused page is kept, all further are released before sweeping them.
3887 if (p->LiveBytes() == 0) {
3888 if (unused_page_present) {
3889 if (FLAG_gc_verbose) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003890 PrintIsolate(isolate(), "sweeping: released page: %p", p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003891 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003892 space->ReleasePage(p);
3893 continue;
3894 }
3895 unused_page_present = true;
3896 }
3897
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003898 if (!parallel_sweeping_active) {
3899 if (FLAG_gc_verbose) {
3900 PrintIsolate(isolate(), "sweeping: %p", p);
3901 }
3902 if (space->identity() == CODE_SPACE) {
3903 if (FLAG_zap_code_space) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003904 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
3905 ZAP_FREE_SPACE>(space, NULL, p, NULL);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003906 } else {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003907 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
3908 IGNORE_FREE_SPACE>(space, NULL, p, NULL);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003909 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003910 } else {
3911 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
3912 IGNORE_FREE_SPACE>(space, NULL, p, NULL);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003913 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003914 pages_swept++;
3915 parallel_sweeping_active = true;
3916 } else {
3917 if (FLAG_gc_verbose) {
3918 PrintIsolate(isolate(), "sweeping: initialized for parallel: %p", p);
3919 }
3920 p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingPending);
3921 int to_sweep = p->area_size() - p->LiveBytes();
3922 space->accounting_stats_.ShrinkSpace(to_sweep);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003923 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003924 space->set_end_of_unswept_pages(p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003925 }
3926
3927 if (FLAG_gc_verbose) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003928 PrintIsolate(isolate(), "sweeping: space=%s pages_swept=%d",
3929 AllocationSpaceName(space->identity()), pages_swept);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003930 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003931}
3932
3933
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003934void MarkCompactCollector::SweepSpaces() {
3935 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
3936 double start_time = 0.0;
3937 if (FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003938 start_time = heap_->MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003939 }
3940
3941#ifdef DEBUG
3942 state_ = SWEEP_SPACES;
3943#endif
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003944
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003945 MoveEvacuationCandidatesToEndOfPagesList();
3946
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003947 {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003948 sweeping_in_progress_ = true;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003949 {
3950 GCTracer::Scope sweep_scope(heap()->tracer(),
3951 GCTracer::Scope::MC_SWEEP_OLD);
3952 StartSweepSpace(heap()->old_space());
3953 }
3954 {
3955 GCTracer::Scope sweep_scope(heap()->tracer(),
3956 GCTracer::Scope::MC_SWEEP_CODE);
3957 StartSweepSpace(heap()->code_space());
3958 }
3959 {
3960 GCTracer::Scope sweep_scope(heap()->tracer(),
3961 GCTracer::Scope::MC_SWEEP_MAP);
3962 StartSweepSpace(heap()->map_space());
3963 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04003964 if (FLAG_concurrent_sweeping) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003965 StartSweeperThreads();
3966 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003967 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003968
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003969 // Deallocate unmarked large objects.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003970 heap_->lo_space()->FreeUnmarkedObjects();
3971
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003972 // Give pages that are queued to be freed back to the OS. Invalid store
3973 // buffer entries are already filter out. We can just release the memory.
3974 heap()->FreeQueuedChunks();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003975
3976 if (FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003977 heap_->tracer()->AddSweepingTime(heap_->MonotonicallyIncreasingTimeInMs() -
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003978 start_time);
3979 }
3980}
3981
3982
3983void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
3984 PageIterator it(space);
3985 while (it.has_next()) {
3986 Page* p = it.next();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003987 if (p->parallel_sweeping_state().Value() ==
3988 MemoryChunk::kSweepingFinalize) {
3989 p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingDone);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003990 p->SetWasSwept();
3991 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003992 DCHECK(p->parallel_sweeping_state().Value() == MemoryChunk::kSweepingDone);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003993 }
3994}
3995
3996
3997void MarkCompactCollector::ParallelSweepSpacesComplete() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003998 ParallelSweepSpaceComplete(heap()->old_space());
3999 ParallelSweepSpaceComplete(heap()->code_space());
4000 ParallelSweepSpaceComplete(heap()->map_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004001}
4002
4003
4004// TODO(1466) ReportDeleteIfNeeded is not called currently.
4005// Our profiling tools do not expect intersections between
4006// code objects. We should either reenable it or change our tools.
4007void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
4008 Isolate* isolate) {
4009 if (obj->IsCode()) {
4010 PROFILE(isolate, CodeDeleteEvent(obj->address()));
4011 }
4012}
4013
4014
4015Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); }
4016
4017
4018void MarkCompactCollector::Initialize() {
4019 MarkCompactMarkingVisitor::Initialize();
4020 IncrementalMarking::Initialize();
4021}
4022
4023
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004024void MarkCompactCollector::EvictPopularEvacuationCandidate(Page* page) {
4025 if (FLAG_trace_fragmentation) {
4026 PrintF("Page %p is too popular. Disabling evacuation.\n",
4027 reinterpret_cast<void*>(page));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004028 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004029
4030 isolate()->CountUsage(v8::Isolate::UseCounterFeature::kSlotsBufferOverflow);
4031
4032 // TODO(gc) If all evacuation candidates are too popular we
4033 // should stop slots recording entirely.
4034 page->ClearEvacuationCandidate();
4035
4036 DCHECK(!page->IsFlagSet(Page::POPULAR_PAGE));
4037 page->SetFlag(Page::POPULAR_PAGE);
4038
4039 // We were not collecting slots on this page that point
4040 // to other evacuation candidates thus we have to
4041 // rescan the page after evacuation to discover and update all
4042 // pointers to evacuated objects.
4043 page->SetFlag(Page::RESCAN_ON_EVACUATION);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004044}
4045
4046
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004047void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* object, Address slot,
4048 Code* target) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004049 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4050 if (target_page->IsEvacuationCandidate() &&
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004051 !ShouldSkipEvacuationSlotRecording(object)) {
4052 if (!SlotsBuffer::AddTo(slots_buffer_allocator_,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004053 target_page->slots_buffer_address(),
4054 SlotsBuffer::CODE_ENTRY_SLOT, slot,
4055 SlotsBuffer::FAIL_ON_OVERFLOW)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004056 EvictPopularEvacuationCandidate(target_page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004057 }
4058 }
4059}
4060
4061
4062void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
4063 DCHECK(heap()->gc_state() == Heap::MARK_COMPACT);
4064 if (is_compacting()) {
4065 Code* host =
4066 isolate()->inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(
4067 pc);
4068 MarkBit mark_bit = Marking::MarkBitFrom(host);
4069 if (Marking::IsBlack(mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004070 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00004071 RecordRelocSlot(&rinfo, target);
4072 }
4073 }
4074}
4075
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00004076} // namespace internal
4077} // namespace v8