blob: cd207bcda27ba792ec2291abead68006c86932be [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_MARK_COMPACT_H_
6#define V8_HEAP_MARK_COMPACT_H_
7
8#include "src/base/bits.h"
9#include "src/heap/spaces.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010010#include "src/heap/store-buffer.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000011
12namespace v8 {
13namespace internal {
14
15// Callback function, returns whether an object is alive. The heap size
16// of the object is returned in size. It optionally updates the offset
17// to the first live object in the page (only used for old and map objects).
18typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
19
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000020// Callback function to mark an object in a given heap.
21typedef void (*MarkObjectFunction)(Heap* heap, HeapObject* object);
22
Ben Murdochb8a8cc12014-11-26 15:28:44 +000023// Forward declarations.
24class CodeFlusher;
25class MarkCompactCollector;
26class MarkingVisitor;
27class RootMarkingVisitor;
Ben Murdochb8a8cc12014-11-26 15:28:44 +000028
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000029class Marking : public AllStatic {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000030 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000031 INLINE(static MarkBit MarkBitFrom(Address addr)) {
32 MemoryChunk* p = MemoryChunk::FromAddress(addr);
33 return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr));
34 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +000035
36 INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
37 return MarkBitFrom(reinterpret_cast<Address>(obj));
38 }
39
40 // Impossible markbits: 01
41 static const char* kImpossibleBitPattern;
42 INLINE(static bool IsImpossible(MarkBit mark_bit)) {
43 return !mark_bit.Get() && mark_bit.Next().Get();
44 }
45
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000046 // Black markbits: 11
Ben Murdochb8a8cc12014-11-26 15:28:44 +000047 static const char* kBlackBitPattern;
48 INLINE(static bool IsBlack(MarkBit mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000049 return mark_bit.Get() && mark_bit.Next().Get();
Ben Murdochb8a8cc12014-11-26 15:28:44 +000050 }
51
52 // White markbits: 00 - this is required by the mark bit clearer.
53 static const char* kWhiteBitPattern;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000054 INLINE(static bool IsWhite(MarkBit mark_bit)) {
55 DCHECK(!IsImpossible(mark_bit));
56 return !mark_bit.Get();
57 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +000058
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000059 // Grey markbits: 10
Ben Murdochb8a8cc12014-11-26 15:28:44 +000060 static const char* kGreyBitPattern;
61 INLINE(static bool IsGrey(MarkBit mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000062 return mark_bit.Get() && !mark_bit.Next().Get();
Ben Murdochb8a8cc12014-11-26 15:28:44 +000063 }
64
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000065 // IsBlackOrGrey assumes that the first bit is set for black or grey
66 // objects.
67 INLINE(static bool IsBlackOrGrey(MarkBit mark_bit)) { return mark_bit.Get(); }
68
Ben Murdochb8a8cc12014-11-26 15:28:44 +000069 INLINE(static void MarkBlack(MarkBit mark_bit)) {
70 mark_bit.Set();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000071 mark_bit.Next().Set();
72 }
73
74 INLINE(static void MarkWhite(MarkBit mark_bit)) {
75 mark_bit.Clear();
Ben Murdochb8a8cc12014-11-26 15:28:44 +000076 mark_bit.Next().Clear();
77 }
78
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000079 INLINE(static void BlackToWhite(MarkBit markbit)) {
80 DCHECK(IsBlack(markbit));
81 markbit.Clear();
82 markbit.Next().Clear();
83 }
84
85 INLINE(static void GreyToWhite(MarkBit markbit)) {
86 DCHECK(IsGrey(markbit));
87 markbit.Clear();
88 markbit.Next().Clear();
89 }
90
91 INLINE(static void BlackToGrey(MarkBit markbit)) {
92 DCHECK(IsBlack(markbit));
93 markbit.Next().Clear();
94 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +000095
96 INLINE(static void WhiteToGrey(MarkBit markbit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000097 DCHECK(IsWhite(markbit));
98 markbit.Set();
99 }
100
101 INLINE(static void WhiteToBlack(MarkBit markbit)) {
102 DCHECK(IsWhite(markbit));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000103 markbit.Set();
104 markbit.Next().Set();
105 }
106
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000107 INLINE(static void GreyToBlack(MarkBit markbit)) {
108 DCHECK(IsGrey(markbit));
109 markbit.Next().Set();
110 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000111
112 INLINE(static void BlackToGrey(HeapObject* obj)) {
113 BlackToGrey(MarkBitFrom(obj));
114 }
115
116 INLINE(static void AnyToGrey(MarkBit markbit)) {
117 markbit.Set();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000118 markbit.Next().Clear();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000119 }
120
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000121 static void TransferMark(Heap* heap, Address old_start, Address new_start);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000122
123#ifdef DEBUG
124 enum ObjectColor {
125 BLACK_OBJECT,
126 WHITE_OBJECT,
127 GREY_OBJECT,
128 IMPOSSIBLE_COLOR
129 };
130
131 static const char* ColorName(ObjectColor color) {
132 switch (color) {
133 case BLACK_OBJECT:
134 return "black";
135 case WHITE_OBJECT:
136 return "white";
137 case GREY_OBJECT:
138 return "grey";
139 case IMPOSSIBLE_COLOR:
140 return "impossible";
141 }
142 return "error";
143 }
144
145 static ObjectColor Color(HeapObject* obj) {
146 return Color(Marking::MarkBitFrom(obj));
147 }
148
149 static ObjectColor Color(MarkBit mark_bit) {
150 if (IsBlack(mark_bit)) return BLACK_OBJECT;
151 if (IsWhite(mark_bit)) return WHITE_OBJECT;
152 if (IsGrey(mark_bit)) return GREY_OBJECT;
153 UNREACHABLE();
154 return IMPOSSIBLE_COLOR;
155 }
156#endif
157
158 // Returns true if the transferred color is black.
159 INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) {
Ben Murdochda12d292016-06-02 14:46:10 +0100160 if (Page::FromAddress(to->address())->IsFlagSet(Page::BLACK_PAGE))
161 return true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000162 MarkBit from_mark_bit = MarkBitFrom(from);
163 MarkBit to_mark_bit = MarkBitFrom(to);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000164 DCHECK(Marking::IsWhite(to_mark_bit));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000165 if (from_mark_bit.Get()) {
166 to_mark_bit.Set();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000167 if (from_mark_bit.Next().Get()) {
168 to_mark_bit.Next().Set();
169 return true;
170 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000171 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000172 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000173 }
174
175 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000176 DISALLOW_IMPLICIT_CONSTRUCTORS(Marking);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000177};
178
179// ----------------------------------------------------------------------------
180// Marking deque for tracing live objects.
181class MarkingDeque {
182 public:
183 MarkingDeque()
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000184 : array_(NULL),
185 top_(0),
186 bottom_(0),
187 mask_(0),
188 overflowed_(false),
189 in_use_(false) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000190
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000191 void Initialize(Address low, Address high);
192 void Uninitialize(bool aborting = false);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000193
194 inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
195
196 inline bool IsEmpty() { return top_ == bottom_; }
197
198 bool overflowed() const { return overflowed_; }
199
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000200 bool in_use() const { return in_use_; }
201
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000202 void ClearOverflowed() { overflowed_ = false; }
203
204 void SetOverflowed() { overflowed_ = true; }
205
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000206 // Push the object on the marking stack if there is room, otherwise mark the
207 // deque as overflowed and wait for a rescan of the heap.
208 INLINE(bool Push(HeapObject* object)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000209 DCHECK(object->IsHeapObject());
210 if (IsFull()) {
211 SetOverflowed();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000212 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000213 } else {
214 array_[top_] = object;
215 top_ = ((top_ + 1) & mask_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000216 return true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000217 }
218 }
219
220 INLINE(HeapObject* Pop()) {
221 DCHECK(!IsEmpty());
222 top_ = ((top_ - 1) & mask_);
223 HeapObject* object = array_[top_];
224 DCHECK(object->IsHeapObject());
225 return object;
226 }
227
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000228 // Unshift the object into the marking stack if there is room, otherwise mark
229 // the deque as overflowed and wait for a rescan of the heap.
230 INLINE(bool Unshift(HeapObject* object)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000231 DCHECK(object->IsHeapObject());
232 if (IsFull()) {
233 SetOverflowed();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000234 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000235 } else {
236 bottom_ = ((bottom_ - 1) & mask_);
237 array_[bottom_] = object;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000238 return true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000239 }
240 }
241
242 HeapObject** array() { return array_; }
243 int bottom() { return bottom_; }
244 int top() { return top_; }
245 int mask() { return mask_; }
246 void set_top(int top) { top_ = top; }
247
248 private:
249 HeapObject** array_;
250 // array_[(top - 1) & mask_] is the top element in the deque. The Deque is
251 // empty when top_ == bottom_. It is full when top_ + 1 == bottom
252 // (mod mask + 1).
253 int top_;
254 int bottom_;
255 int mask_;
256 bool overflowed_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000257 bool in_use_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000258
259 DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
260};
261
262
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000263// CodeFlusher collects candidates for code flushing during marking and
264// processes those candidates after marking has completed in order to
265// reset those functions referencing code objects that would otherwise
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000266// be unreachable. Code objects can be referenced in two ways:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000267// - SharedFunctionInfo references unoptimized code.
268// - JSFunction references either unoptimized or optimized code.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000269// We are not allowed to flush unoptimized code for functions that got
270// optimized or inlined into optimized code, because we might bailout
271// into the unoptimized code again during deoptimization.
272class CodeFlusher {
273 public:
274 explicit CodeFlusher(Isolate* isolate)
275 : isolate_(isolate),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000276 jsfunction_candidates_head_(nullptr),
277 shared_function_info_candidates_head_(nullptr) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000278
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000279 inline void AddCandidate(SharedFunctionInfo* shared_info);
280 inline void AddCandidate(JSFunction* function);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000281
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000282 void EvictCandidate(SharedFunctionInfo* shared_info);
283 void EvictCandidate(JSFunction* function);
284
285 void ProcessCandidates() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000286 ProcessSharedFunctionInfoCandidates();
287 ProcessJSFunctionCandidates();
288 }
289
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000290 void IteratePointersToFromSpace(ObjectVisitor* v);
291
292 private:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000293 void ProcessJSFunctionCandidates();
294 void ProcessSharedFunctionInfoCandidates();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000295
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000296 static inline JSFunction** GetNextCandidateSlot(JSFunction* candidate);
297 static inline JSFunction* GetNextCandidate(JSFunction* candidate);
298 static inline void SetNextCandidate(JSFunction* candidate,
299 JSFunction* next_candidate);
300 static inline void ClearNextCandidate(JSFunction* candidate,
301 Object* undefined);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000302
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000303 static inline SharedFunctionInfo* GetNextCandidate(
304 SharedFunctionInfo* candidate);
305 static inline void SetNextCandidate(SharedFunctionInfo* candidate,
306 SharedFunctionInfo* next_candidate);
307 static inline void ClearNextCandidate(SharedFunctionInfo* candidate);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000308
309 Isolate* isolate_;
310 JSFunction* jsfunction_candidates_head_;
311 SharedFunctionInfo* shared_function_info_candidates_head_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000312
313 DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
314};
315
316
317// Defined in isolate.h.
318class ThreadLocalTop;
319
Ben Murdochda12d292016-06-02 14:46:10 +0100320class MarkBitCellIterator BASE_EMBEDDED {
321 public:
322 explicit MarkBitCellIterator(MemoryChunk* chunk) : chunk_(chunk) {
323 last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex(
324 chunk_->AddressToMarkbitIndex(chunk_->area_end())));
325 cell_base_ = chunk_->area_start();
326 cell_index_ = Bitmap::IndexToCell(
327 Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_)));
328 cells_ = chunk_->markbits()->cells();
329 }
330
331 inline bool Done() { return cell_index_ == last_cell_index_; }
332
333 inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
334
335 inline MarkBit::CellType* CurrentCell() {
336 DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
337 chunk_->AddressToMarkbitIndex(cell_base_))));
338 return &cells_[cell_index_];
339 }
340
341 inline Address CurrentCellBase() {
342 DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
343 chunk_->AddressToMarkbitIndex(cell_base_))));
344 return cell_base_;
345 }
346
347 inline void Advance() {
348 cell_index_++;
349 cell_base_ += 32 * kPointerSize;
350 }
351
352 // Return the next mark bit cell. If there is no next it returns 0;
353 inline MarkBit::CellType PeekNext() {
354 if (HasNext()) {
355 return cells_[cell_index_ + 1];
356 }
357 return 0;
358 }
359
360 private:
361 MemoryChunk* chunk_;
362 MarkBit::CellType* cells_;
363 unsigned int last_cell_index_;
364 unsigned int cell_index_;
365 Address cell_base_;
366};
367
368// Grey objects can happen on black pages when black objects transition to
369// grey e.g. when calling RecordWrites on them.
370enum LiveObjectIterationMode {
371 kBlackObjects,
372 kGreyObjects,
373 kAllLiveObjects
374};
375
376template <LiveObjectIterationMode T>
377class LiveObjectIterator BASE_EMBEDDED {
378 public:
379 explicit LiveObjectIterator(MemoryChunk* chunk)
380 : chunk_(chunk),
381 it_(chunk_),
382 cell_base_(it_.CurrentCellBase()),
383 current_cell_(*it_.CurrentCell()) {
384 // Black pages can not be iterated.
385 DCHECK(!chunk->IsFlagSet(Page::BLACK_PAGE));
386 }
387
388 HeapObject* Next();
389
390 private:
391 MemoryChunk* chunk_;
392 MarkBitCellIterator it_;
393 Address cell_base_;
394 MarkBit::CellType current_cell_;
395};
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000396
397// -------------------------------------------------------------------------
398// Mark-Compact collector
399class MarkCompactCollector {
400 public:
Ben Murdochda12d292016-06-02 14:46:10 +0100401 class Evacuator;
402
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000403 enum IterationMode {
404 kKeepMarking,
405 kClearMarkbits,
406 };
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000407
408 static void Initialize();
409
410 void SetUp();
411
412 void TearDown();
413
414 void CollectEvacuationCandidates(PagedSpace* space);
415
416 void AddEvacuationCandidate(Page* p);
417
418 // Prepares for GC by resetting relocation info in old and map spaces and
419 // choosing spaces to compact.
420 void Prepare();
421
422 // Performs a global garbage collection.
423 void CollectGarbage();
424
425 enum CompactionMode { INCREMENTAL_COMPACTION, NON_INCREMENTAL_COMPACTION };
426
427 bool StartCompaction(CompactionMode mode);
428
429 void AbortCompaction();
430
431#ifdef DEBUG
432 // Checks whether performing mark-compact collection.
433 bool in_use() { return state_ > PREPARE_GC; }
434 bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
435#endif
436
437 // Determine type of object and emit deletion log event.
438 static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
439
440 // Distinguishable invalid map encodings (for single word and multiple words)
441 // that indicate free regions.
442 static const uint32_t kSingleFreeEncoding = 0;
443 static const uint32_t kMultiFreeEncoding = 1;
444
445 static inline bool IsMarked(Object* obj);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000446 static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000447
448 inline Heap* heap() const { return heap_; }
449 inline Isolate* isolate() const;
450
451 CodeFlusher* code_flusher() { return code_flusher_; }
452 inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000453
454 enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
455
456#ifdef VERIFY_HEAP
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000457 void VerifyValidStoreAndSlotsBufferEntries();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000458 void VerifyMarkbitsAreClean();
459 static void VerifyMarkbitsAreClean(PagedSpace* space);
460 static void VerifyMarkbitsAreClean(NewSpace* space);
461 void VerifyWeakEmbeddedObjectsInCode();
462 void VerifyOmittedMapChecks();
463#endif
464
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000465 INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
466 return Page::FromAddress(reinterpret_cast<Address>(host))
467 ->ShouldSkipEvacuationSlotRecording();
468 }
469
470 INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
471 return Page::FromAddress(reinterpret_cast<Address>(obj))
472 ->IsEvacuationCandidate();
473 }
474
Ben Murdochda12d292016-06-02 14:46:10 +0100475 void RecordRelocSlot(Code* host, RelocInfo* rinfo, Object* target);
476 void RecordCodeEntrySlot(HeapObject* host, Address slot, Code* target);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000477 void RecordCodeTargetPatch(Address pc, Code* target);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000478 INLINE(void RecordSlot(HeapObject* object, Object** slot, Object* target));
479 INLINE(void ForceRecordSlot(HeapObject* object, Object** slot,
480 Object* target));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000481
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000482 void UpdateSlots(SlotsBuffer* buffer);
483 void UpdateSlotsRecordedIn(SlotsBuffer* buffer);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000484
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000485 void InvalidateCode(Code* code);
486
487 void ClearMarkbits();
488
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000489 bool is_compacting() const { return compacting_; }
490
491 MarkingParity marking_parity() { return marking_parity_; }
492
493 // Concurrent and parallel sweeping support. If required_freed_bytes was set
494 // to a value larger than 0, then sweeping returns after a block of at least
495 // required_freed_bytes was freed. If required_freed_bytes was set to zero
496 // then the whole given space is swept. It returns the size of the maximum
497 // continuous freed memory chunk.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100498 int SweepInParallel(PagedSpace* space, int required_freed_bytes,
499 int max_pages = 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000500
501 // Sweeps a given page concurrently to the sweeper threads. It returns the
502 // size of the maximum continuous freed memory chunk.
503 int SweepInParallel(Page* page, PagedSpace* space);
504
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000505 // Ensures that sweeping is finished.
506 //
507 // Note: Can only be called safely from main thread.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000508 void EnsureSweepingCompleted();
509
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000510 void SweepOrWaitUntilSweepingCompleted(Page* page);
511
512 // Help out in sweeping the corresponding space and refill memory that has
513 // been regained.
514 //
515 // Note: Thread-safe.
516 void SweepAndRefill(CompactionSpace* space);
517
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000518 // If sweeper threads are not active this method will return true. If
519 // this is a latency issue we should be smarter here. Otherwise, it will
520 // return true if the sweeper threads are done processing the pages.
521 bool IsSweepingCompleted();
522
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000523 // Checks if sweeping is in progress right now on any space.
524 bool sweeping_in_progress() { return sweeping_in_progress_; }
525
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400526 void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000527
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400528 bool evacuation() const { return evacuation_; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000529
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000530 // Special case for processing weak references in a full collection. We need
531 // to artificially keep AllocationSites alive for a time.
532 void MarkAllocationSite(AllocationSite* site);
533
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000534 // Mark objects in implicit references groups if their parent object
535 // is marked.
536 void MarkImplicitRefGroups(MarkObjectFunction mark_object);
537
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400538 MarkingDeque* marking_deque() { return &marking_deque_; }
539
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000540 static const size_t kMaxMarkingDequeSize = 4 * MB;
541 static const size_t kMinMarkingDequeSize = 256 * KB;
542
543 void EnsureMarkingDequeIsCommittedAndInitialize(size_t max_size) {
544 if (!marking_deque_.in_use()) {
545 EnsureMarkingDequeIsCommitted(max_size);
546 InitializeMarkingDeque();
547 }
548 }
549
550 void EnsureMarkingDequeIsCommitted(size_t max_size);
551 void EnsureMarkingDequeIsReserved();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400552
553 void InitializeMarkingDeque();
554
Ben Murdochda12d292016-06-02 14:46:10 +0100555 // The following two methods can just be called after marking, when the
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000556 // whole transitive closure is known. They must be called before sweeping
557 // when mark bits are still intact.
Ben Murdochda12d292016-06-02 14:46:10 +0100558 bool IsSlotInBlackObject(MemoryChunk* p, Address slot);
559 HeapObject* FindBlackObjectBySlotSlow(Address slot);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000560
561 // Removes all the slots in the slot buffers that are within the given
562 // address range.
563 void RemoveObjectSlots(Address start_slot, Address end_slot);
564
Ben Murdochda12d292016-06-02 14:46:10 +0100565 base::Mutex* swept_pages_mutex() { return &swept_pages_mutex_; }
566 List<Page*>* swept_pages(AllocationSpace id) {
567 switch (id) {
568 case OLD_SPACE:
569 return &swept_old_space_pages_;
570 case CODE_SPACE:
571 return &swept_code_space_pages_;
572 case MAP_SPACE:
573 return &swept_map_space_pages_;
574 default:
575 UNREACHABLE();
576 }
577 return nullptr;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000578 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400579
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000580 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000581 class EvacuateNewSpaceVisitor;
582 class EvacuateOldSpaceVisitor;
583 class EvacuateVisitorBase;
584 class HeapObjectVisitor;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000585 class SweeperTask;
586
Ben Murdoch097c5b22016-05-18 11:27:45 +0100587 typedef std::vector<Page*> SweepingList;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000588
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000589 explicit MarkCompactCollector(Heap* heap);
590
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000591 bool WillBeDeoptimized(Code* code);
Ben Murdochda12d292016-06-02 14:46:10 +0100592 void ClearInvalidRememberedSetSlots();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000593
594 void StartSweeperThreads();
595
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000596 void ComputeEvacuationHeuristics(int area_size,
597 int* target_fragmentation_percent,
598 int* max_evacuated_bytes);
599
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000600#ifdef DEBUG
601 enum CollectorState {
602 IDLE,
603 PREPARE_GC,
604 MARK_LIVE_OBJECTS,
605 SWEEP_SPACES,
606 ENCODE_FORWARDING_ADDRESSES,
607 UPDATE_POINTERS,
608 RELOCATE_OBJECTS
609 };
610
611 // The current stage of the collector.
612 CollectorState state_;
613#endif
614
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000615 MarkingParity marking_parity_;
616
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000617 bool was_marked_incrementally_;
618
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400619 bool evacuation_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000620
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000621 // Finishes GC, performs heap verification if enabled.
622 void Finish();
623
624 // -----------------------------------------------------------------------
625 // Phase 1: Marking live objects.
626 //
627 // Before: The heap has been prepared for garbage collection by
628 // MarkCompactCollector::Prepare() and is otherwise in its
629 // normal state.
630 //
631 // After: Live objects are marked and non-live objects are unmarked.
632
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000633 friend class CodeMarkingVisitor;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000634 friend class IncrementalMarkingMarkingVisitor;
635 friend class MarkCompactMarkingVisitor;
636 friend class MarkingVisitor;
637 friend class RecordMigratedSlotVisitor;
638 friend class RootMarkingVisitor;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000639 friend class SharedFunctionInfoMarkingVisitor;
640
641 // Mark code objects that are active on the stack to prevent them
642 // from being flushed.
643 void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
644
645 void PrepareForCodeFlushing();
646
647 // Marking operations for objects reachable from roots.
648 void MarkLiveObjects();
649
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000650 // Pushes a black object onto the marking stack and accounts for live bytes.
651 // Note that this assumes live bytes have not yet been counted.
652 INLINE(void PushBlack(HeapObject* obj));
653
654 // Unshifts a black object into the marking stack and accounts for live bytes.
655 // Note that this assumes lives bytes have already been counted.
656 INLINE(void UnshiftBlack(HeapObject* obj));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000657
658 // Marks the object black and pushes it on the marking stack.
659 // This is for non-incremental marking only.
660 INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
661
662 // Marks the object black assuming that it is not yet marked.
663 // This is for non-incremental marking only.
664 INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
665
666 // Mark the heap roots and all objects reachable from them.
667 void MarkRoots(RootMarkingVisitor* visitor);
668
669 // Mark the string table specially. References to internalized strings from
670 // the string table are weak.
671 void MarkStringTable(RootMarkingVisitor* visitor);
672
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000673 // Mark objects reachable (transitively) from objects in the marking stack
674 // or overflowed in the heap.
675 void ProcessMarkingDeque();
676
677 // Mark objects reachable (transitively) from objects in the marking stack
678 // or overflowed in the heap. This respects references only considered in
679 // the final atomic marking pause including the following:
680 // - Processing of objects reachable through Harmony WeakMaps.
681 // - Objects reachable due to host application logic like object groups
682 // or implicit references' groups.
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400683 void ProcessEphemeralMarking(ObjectVisitor* visitor,
684 bool only_process_harmony_weak_collections);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000685
686 // If the call-site of the top optimized code was not prepared for
687 // deoptimization, then treat the maps in the code as strong pointers,
688 // otherwise a map can die and deoptimize the code.
689 void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
690
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000691 // Collects a list of dependent code from maps embedded in optimize code.
692 DependentCode* DependentCodeListFromNonLiveMaps();
693
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000694 // Mark objects reachable (transitively) from objects in the marking
695 // stack. This function empties the marking stack, but may leave
696 // overflowed objects in the heap, in which case the marking stack's
697 // overflow flag will be set.
698 void EmptyMarkingDeque();
699
700 // Refill the marking stack with overflowed objects from the heap. This
701 // function either leaves the marking stack full or clears the overflow
702 // flag on the marking stack.
703 void RefillMarkingDeque();
704
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000705 // Helper methods for refilling the marking stack by discovering grey objects
706 // on various pages of the heap. Used by {RefillMarkingDeque} only.
707 template <class T>
708 void DiscoverGreyObjectsWithIterator(T* it);
709 void DiscoverGreyObjectsOnPage(MemoryChunk* p);
710 void DiscoverGreyObjectsInSpace(PagedSpace* space);
711 void DiscoverGreyObjectsInNewSpace();
712
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000713 // Callback function for telling whether the object *p is an unmarked
714 // heap object.
715 static bool IsUnmarkedHeapObject(Object** p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000716
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000717 // Clear non-live references in weak cells, transition and descriptor arrays,
718 // and deoptimize dependent code of non-live maps.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000719 void ClearNonLiveReferences();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000720 void MarkDependentCodeForDeoptimization(DependentCode* list);
721 // Find non-live targets of simple transitions in the given list. Clear
722 // transitions to non-live targets and if needed trim descriptors arrays.
723 void ClearSimpleMapTransitions(Object* non_live_map_list);
724 void ClearSimpleMapTransition(Map* map, Map* dead_transition);
725 // Compact every array in the global list of transition arrays and
726 // trim the corresponding descriptor array if a transition target is non-live.
727 void ClearFullMapTransitions();
728 bool CompactTransitionArray(Map* map, TransitionArray* transitions,
729 DescriptorArray* descriptors);
730 void TrimDescriptorArray(Map* map, DescriptorArray* descriptors);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000731 void TrimEnumCache(Map* map, DescriptorArray* descriptors);
732
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000733 // Mark all values associated with reachable keys in weak collections
734 // encountered so far. This might push new object or even new weak maps onto
735 // the marking stack.
736 void ProcessWeakCollections();
737
738 // After all reachable objects have been marked those weak map entries
739 // with an unreachable key are removed from all encountered weak maps.
740 // The linked list of all encountered weak maps is destroyed.
741 void ClearWeakCollections();
742
743 // We have to remove all encountered weak maps from the list of weak
744 // collections when incremental marking is aborted.
745 void AbortWeakCollections();
746
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000747 void ClearWeakCells(Object** non_live_map_list,
748 DependentCode** dependent_code_list);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400749 void AbortWeakCells();
750
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000751 void AbortTransitionArrays();
752
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000753 // -----------------------------------------------------------------------
754 // Phase 2: Sweeping to clear mark bits and free non-live objects for
755 // a non-compacting collection.
756 //
757 // Before: Live objects are marked and non-live objects are unmarked.
758 //
759 // After: Live objects are unmarked, non-live regions have been added to
760 // their space's free list. Active eden semispace is compacted by
761 // evacuation.
762 //
763
Ben Murdoch097c5b22016-05-18 11:27:45 +0100764 inline SweepingList& sweeping_list(Space* space);
765
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000766 // If we are not compacting the heap, we simply sweep the spaces except
767 // for the large object space, clearing mark bits and adding unmarked
768 // regions to each space's free list.
769 void SweepSpaces();
770
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000771 void EvacuateNewSpacePrologue();
Ben Murdoch097c5b22016-05-18 11:27:45 +0100772 void EvacuateNewSpaceEpilogue();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000773
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000774 void EvacuatePagesInParallel();
775
776 // The number of parallel compaction tasks, including the main thread.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100777 int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000778
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000779 void EvacuateNewSpaceAndCandidates();
780
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000781 void UpdatePointersAfterEvacuation();
782
783 // Iterates through all live objects on a page using marking information.
784 // Returns whether all objects have successfully been visited.
785 bool VisitLiveObjects(MemoryChunk* page, HeapObjectVisitor* visitor,
786 IterationMode mode);
787
788 void VisitLiveObjectsBody(Page* page, ObjectVisitor* visitor);
789
790 void RecomputeLiveBytes(MemoryChunk* page);
791
792 void SweepAbortedPages();
793
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000794 void ReleaseEvacuationCandidates();
795
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000796 // Starts sweeping of a space by contributing on the main thread and setting
797 // up other pages for sweeping.
798 void StartSweepSpace(PagedSpace* space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000799
800 // Finalizes the parallel sweeping phase. Marks all the pages that were
801 // swept in parallel.
802 void ParallelSweepSpacesComplete();
803
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000804#ifdef DEBUG
805 friend class MarkObjectVisitor;
806 static void VisitObject(HeapObject* obj);
807
808 friend class UnmarkObjectVisitor;
809 static void UnmarkObject(HeapObject* obj);
810#endif
811
812 Heap* heap_;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400813 base::VirtualMemory* marking_deque_memory_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000814 size_t marking_deque_memory_committed_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000815 MarkingDeque marking_deque_;
816 CodeFlusher* code_flusher_;
817 bool have_code_to_deoptimize_;
818
819 List<Page*> evacuation_candidates_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100820 List<NewSpacePage*> newspace_evacuation_candidates_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000821
Ben Murdochda12d292016-06-02 14:46:10 +0100822 base::Mutex swept_pages_mutex_;
823 List<Page*> swept_old_space_pages_;
824 List<Page*> swept_code_space_pages_;
825 List<Page*> swept_map_space_pages_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000826
Ben Murdoch097c5b22016-05-18 11:27:45 +0100827 SweepingList sweeping_list_old_space_;
828 SweepingList sweeping_list_code_space_;
829 SweepingList sweeping_list_map_space_;
830
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000831 // True if we are collecting slots to perform evacuation from evacuation
832 // candidates.
833 bool compacting_;
834
835 // True if concurrent or parallel sweeping is currently in progress.
836 bool sweeping_in_progress_;
837
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000838 // Semaphore used to synchronize sweeper tasks.
839 base::Semaphore pending_sweeper_tasks_semaphore_;
840
841 // Semaphore used to synchronize compaction tasks.
842 base::Semaphore pending_compaction_tasks_semaphore_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000843
Ben Murdochda12d292016-06-02 14:46:10 +0100844 bool black_allocation_;
845
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000846 friend class Heap;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000847 friend class StoreBuffer;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000848};
849
850
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400851class EvacuationScope BASE_EMBEDDED {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000852 public:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400853 explicit EvacuationScope(MarkCompactCollector* collector)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000854 : collector_(collector) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400855 collector_->set_evacuation(true);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000856 }
857
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400858 ~EvacuationScope() { collector_->set_evacuation(false); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000859
860 private:
861 MarkCompactCollector* collector_;
862};
863
864
865const char* AllocationSpaceName(AllocationSpace space);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000866} // namespace internal
867} // namespace v8
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000868
869#endif // V8_HEAP_MARK_COMPACT_H_