blob: 07b289e2ba10225be9ef8a509e84783a88639fd3 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_MARK_COMPACT_H_
6#define V8_HEAP_MARK_COMPACT_H_
7
Ben Murdochc5610432016-08-08 18:44:38 +01008#include <deque>
9
Ben Murdochb8a8cc12014-11-26 15:28:44 +000010#include "src/base/bits.h"
11#include "src/heap/spaces.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010012#include "src/heap/store-buffer.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000013
14namespace v8 {
15namespace internal {
16
17// Callback function, returns whether an object is alive. The heap size
18// of the object is returned in size. It optionally updates the offset
19// to the first live object in the page (only used for old and map objects).
20typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
21
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000022// Callback function to mark an object in a given heap.
23typedef void (*MarkObjectFunction)(Heap* heap, HeapObject* object);
24
Ben Murdochb8a8cc12014-11-26 15:28:44 +000025// Forward declarations.
26class CodeFlusher;
27class MarkCompactCollector;
28class MarkingVisitor;
29class RootMarkingVisitor;
Ben Murdochb8a8cc12014-11-26 15:28:44 +000030
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000031class Marking : public AllStatic {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000032 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000033 INLINE(static MarkBit MarkBitFrom(Address addr)) {
34 MemoryChunk* p = MemoryChunk::FromAddress(addr);
35 return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr));
36 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +000037
38 INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
39 return MarkBitFrom(reinterpret_cast<Address>(obj));
40 }
41
42 // Impossible markbits: 01
43 static const char* kImpossibleBitPattern;
44 INLINE(static bool IsImpossible(MarkBit mark_bit)) {
45 return !mark_bit.Get() && mark_bit.Next().Get();
46 }
47
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000048 // Black markbits: 11
Ben Murdochb8a8cc12014-11-26 15:28:44 +000049 static const char* kBlackBitPattern;
50 INLINE(static bool IsBlack(MarkBit mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000051 return mark_bit.Get() && mark_bit.Next().Get();
Ben Murdochb8a8cc12014-11-26 15:28:44 +000052 }
53
54 // White markbits: 00 - this is required by the mark bit clearer.
55 static const char* kWhiteBitPattern;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000056 INLINE(static bool IsWhite(MarkBit mark_bit)) {
57 DCHECK(!IsImpossible(mark_bit));
58 return !mark_bit.Get();
59 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +000060
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000061 // Grey markbits: 10
Ben Murdochb8a8cc12014-11-26 15:28:44 +000062 static const char* kGreyBitPattern;
63 INLINE(static bool IsGrey(MarkBit mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000064 return mark_bit.Get() && !mark_bit.Next().Get();
Ben Murdochb8a8cc12014-11-26 15:28:44 +000065 }
66
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000067 // IsBlackOrGrey assumes that the first bit is set for black or grey
68 // objects.
69 INLINE(static bool IsBlackOrGrey(MarkBit mark_bit)) { return mark_bit.Get(); }
70
Ben Murdochb8a8cc12014-11-26 15:28:44 +000071 INLINE(static void MarkBlack(MarkBit mark_bit)) {
72 mark_bit.Set();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000073 mark_bit.Next().Set();
74 }
75
76 INLINE(static void MarkWhite(MarkBit mark_bit)) {
77 mark_bit.Clear();
Ben Murdochb8a8cc12014-11-26 15:28:44 +000078 mark_bit.Next().Clear();
79 }
80
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000081 INLINE(static void BlackToWhite(MarkBit markbit)) {
82 DCHECK(IsBlack(markbit));
83 markbit.Clear();
84 markbit.Next().Clear();
85 }
86
87 INLINE(static void GreyToWhite(MarkBit markbit)) {
88 DCHECK(IsGrey(markbit));
89 markbit.Clear();
90 markbit.Next().Clear();
91 }
92
93 INLINE(static void BlackToGrey(MarkBit markbit)) {
94 DCHECK(IsBlack(markbit));
95 markbit.Next().Clear();
96 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +000097
98 INLINE(static void WhiteToGrey(MarkBit markbit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000099 DCHECK(IsWhite(markbit));
100 markbit.Set();
101 }
102
103 INLINE(static void WhiteToBlack(MarkBit markbit)) {
104 DCHECK(IsWhite(markbit));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000105 markbit.Set();
106 markbit.Next().Set();
107 }
108
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000109 INLINE(static void GreyToBlack(MarkBit markbit)) {
110 DCHECK(IsGrey(markbit));
111 markbit.Next().Set();
112 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000113
114 INLINE(static void BlackToGrey(HeapObject* obj)) {
115 BlackToGrey(MarkBitFrom(obj));
116 }
117
118 INLINE(static void AnyToGrey(MarkBit markbit)) {
119 markbit.Set();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000120 markbit.Next().Clear();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000121 }
122
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000123 static void TransferMark(Heap* heap, Address old_start, Address new_start);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000124
125#ifdef DEBUG
126 enum ObjectColor {
127 BLACK_OBJECT,
128 WHITE_OBJECT,
129 GREY_OBJECT,
130 IMPOSSIBLE_COLOR
131 };
132
133 static const char* ColorName(ObjectColor color) {
134 switch (color) {
135 case BLACK_OBJECT:
136 return "black";
137 case WHITE_OBJECT:
138 return "white";
139 case GREY_OBJECT:
140 return "grey";
141 case IMPOSSIBLE_COLOR:
142 return "impossible";
143 }
144 return "error";
145 }
146
147 static ObjectColor Color(HeapObject* obj) {
148 return Color(Marking::MarkBitFrom(obj));
149 }
150
151 static ObjectColor Color(MarkBit mark_bit) {
152 if (IsBlack(mark_bit)) return BLACK_OBJECT;
153 if (IsWhite(mark_bit)) return WHITE_OBJECT;
154 if (IsGrey(mark_bit)) return GREY_OBJECT;
155 UNREACHABLE();
156 return IMPOSSIBLE_COLOR;
157 }
158#endif
159
160 // Returns true if the transferred color is black.
161 INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) {
Ben Murdochda12d292016-06-02 14:46:10 +0100162 if (Page::FromAddress(to->address())->IsFlagSet(Page::BLACK_PAGE))
163 return true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000164 MarkBit from_mark_bit = MarkBitFrom(from);
165 MarkBit to_mark_bit = MarkBitFrom(to);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000166 DCHECK(Marking::IsWhite(to_mark_bit));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000167 if (from_mark_bit.Get()) {
168 to_mark_bit.Set();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000169 if (from_mark_bit.Next().Get()) {
170 to_mark_bit.Next().Set();
171 return true;
172 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000173 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000174 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000175 }
176
177 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000178 DISALLOW_IMPLICIT_CONSTRUCTORS(Marking);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000179};
180
181// ----------------------------------------------------------------------------
182// Marking deque for tracing live objects.
183class MarkingDeque {
184 public:
185 MarkingDeque()
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000186 : array_(NULL),
187 top_(0),
188 bottom_(0),
189 mask_(0),
190 overflowed_(false),
191 in_use_(false) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000192
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000193 void Initialize(Address low, Address high);
194 void Uninitialize(bool aborting = false);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000195
196 inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
197
198 inline bool IsEmpty() { return top_ == bottom_; }
199
200 bool overflowed() const { return overflowed_; }
201
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000202 bool in_use() const { return in_use_; }
203
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000204 void ClearOverflowed() { overflowed_ = false; }
205
206 void SetOverflowed() { overflowed_ = true; }
207
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000208 // Push the object on the marking stack if there is room, otherwise mark the
209 // deque as overflowed and wait for a rescan of the heap.
210 INLINE(bool Push(HeapObject* object)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000211 DCHECK(object->IsHeapObject());
212 if (IsFull()) {
213 SetOverflowed();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000214 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000215 } else {
216 array_[top_] = object;
217 top_ = ((top_ + 1) & mask_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000218 return true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000219 }
220 }
221
222 INLINE(HeapObject* Pop()) {
223 DCHECK(!IsEmpty());
224 top_ = ((top_ - 1) & mask_);
225 HeapObject* object = array_[top_];
226 DCHECK(object->IsHeapObject());
227 return object;
228 }
229
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000230 // Unshift the object into the marking stack if there is room, otherwise mark
231 // the deque as overflowed and wait for a rescan of the heap.
232 INLINE(bool Unshift(HeapObject* object)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000233 DCHECK(object->IsHeapObject());
234 if (IsFull()) {
235 SetOverflowed();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000236 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000237 } else {
238 bottom_ = ((bottom_ - 1) & mask_);
239 array_[bottom_] = object;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000240 return true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000241 }
242 }
243
244 HeapObject** array() { return array_; }
245 int bottom() { return bottom_; }
246 int top() { return top_; }
247 int mask() { return mask_; }
248 void set_top(int top) { top_ = top; }
249
250 private:
251 HeapObject** array_;
252 // array_[(top - 1) & mask_] is the top element in the deque. The Deque is
253 // empty when top_ == bottom_. It is full when top_ + 1 == bottom
254 // (mod mask + 1).
255 int top_;
256 int bottom_;
257 int mask_;
258 bool overflowed_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000259 bool in_use_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000260
261 DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
262};
263
264
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000265// CodeFlusher collects candidates for code flushing during marking and
266// processes those candidates after marking has completed in order to
267// reset those functions referencing code objects that would otherwise
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000268// be unreachable. Code objects can be referenced in two ways:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000269// - SharedFunctionInfo references unoptimized code.
270// - JSFunction references either unoptimized or optimized code.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000271// We are not allowed to flush unoptimized code for functions that got
272// optimized or inlined into optimized code, because we might bailout
273// into the unoptimized code again during deoptimization.
274class CodeFlusher {
275 public:
276 explicit CodeFlusher(Isolate* isolate)
277 : isolate_(isolate),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000278 jsfunction_candidates_head_(nullptr),
279 shared_function_info_candidates_head_(nullptr) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000280
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000281 inline void AddCandidate(SharedFunctionInfo* shared_info);
282 inline void AddCandidate(JSFunction* function);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000283
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000284 void EvictCandidate(SharedFunctionInfo* shared_info);
285 void EvictCandidate(JSFunction* function);
286
287 void ProcessCandidates() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000288 ProcessSharedFunctionInfoCandidates();
289 ProcessJSFunctionCandidates();
290 }
291
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000292 void IteratePointersToFromSpace(ObjectVisitor* v);
293
294 private:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000295 void ProcessJSFunctionCandidates();
296 void ProcessSharedFunctionInfoCandidates();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000297
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000298 static inline JSFunction** GetNextCandidateSlot(JSFunction* candidate);
299 static inline JSFunction* GetNextCandidate(JSFunction* candidate);
300 static inline void SetNextCandidate(JSFunction* candidate,
301 JSFunction* next_candidate);
302 static inline void ClearNextCandidate(JSFunction* candidate,
303 Object* undefined);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000304
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000305 static inline SharedFunctionInfo* GetNextCandidate(
306 SharedFunctionInfo* candidate);
307 static inline void SetNextCandidate(SharedFunctionInfo* candidate,
308 SharedFunctionInfo* next_candidate);
309 static inline void ClearNextCandidate(SharedFunctionInfo* candidate);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000310
311 Isolate* isolate_;
312 JSFunction* jsfunction_candidates_head_;
313 SharedFunctionInfo* shared_function_info_candidates_head_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000314
315 DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
316};
317
318
319// Defined in isolate.h.
320class ThreadLocalTop;
321
Ben Murdochda12d292016-06-02 14:46:10 +0100322class MarkBitCellIterator BASE_EMBEDDED {
323 public:
324 explicit MarkBitCellIterator(MemoryChunk* chunk) : chunk_(chunk) {
325 last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex(
326 chunk_->AddressToMarkbitIndex(chunk_->area_end())));
327 cell_base_ = chunk_->area_start();
328 cell_index_ = Bitmap::IndexToCell(
329 Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_)));
330 cells_ = chunk_->markbits()->cells();
331 }
332
333 inline bool Done() { return cell_index_ == last_cell_index_; }
334
335 inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
336
337 inline MarkBit::CellType* CurrentCell() {
338 DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
339 chunk_->AddressToMarkbitIndex(cell_base_))));
340 return &cells_[cell_index_];
341 }
342
343 inline Address CurrentCellBase() {
344 DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
345 chunk_->AddressToMarkbitIndex(cell_base_))));
346 return cell_base_;
347 }
348
349 inline void Advance() {
350 cell_index_++;
351 cell_base_ += 32 * kPointerSize;
352 }
353
354 // Return the next mark bit cell. If there is no next it returns 0;
355 inline MarkBit::CellType PeekNext() {
356 if (HasNext()) {
357 return cells_[cell_index_ + 1];
358 }
359 return 0;
360 }
361
362 private:
363 MemoryChunk* chunk_;
364 MarkBit::CellType* cells_;
365 unsigned int last_cell_index_;
366 unsigned int cell_index_;
367 Address cell_base_;
368};
369
370// Grey objects can happen on black pages when black objects transition to
371// grey e.g. when calling RecordWrites on them.
372enum LiveObjectIterationMode {
373 kBlackObjects,
374 kGreyObjects,
375 kAllLiveObjects
376};
377
378template <LiveObjectIterationMode T>
379class LiveObjectIterator BASE_EMBEDDED {
380 public:
381 explicit LiveObjectIterator(MemoryChunk* chunk)
382 : chunk_(chunk),
383 it_(chunk_),
384 cell_base_(it_.CurrentCellBase()),
385 current_cell_(*it_.CurrentCell()) {
386 // Black pages can not be iterated.
387 DCHECK(!chunk->IsFlagSet(Page::BLACK_PAGE));
388 }
389
390 HeapObject* Next();
391
392 private:
393 MemoryChunk* chunk_;
394 MarkBitCellIterator it_;
395 Address cell_base_;
396 MarkBit::CellType current_cell_;
397};
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000398
399// -------------------------------------------------------------------------
400// Mark-Compact collector
401class MarkCompactCollector {
402 public:
Ben Murdochda12d292016-06-02 14:46:10 +0100403 class Evacuator;
404
Ben Murdochc5610432016-08-08 18:44:38 +0100405 class Sweeper {
406 public:
407 class SweeperTask;
408
409 enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
410 enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
Ben Murdoch61f157c2016-09-16 13:49:30 +0100411 enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST };
Ben Murdochc5610432016-08-08 18:44:38 +0100412 enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
413 enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
414
415 typedef std::deque<Page*> SweepingList;
416 typedef List<Page*> SweptList;
417
418 template <SweepingMode sweeping_mode, SweepingParallelism parallelism,
419 SkipListRebuildingMode skip_list_mode,
Ben Murdoch61f157c2016-09-16 13:49:30 +0100420 FreeListRebuildingMode free_list_mode,
Ben Murdochc5610432016-08-08 18:44:38 +0100421 FreeSpaceTreatmentMode free_space_mode>
422 static int RawSweep(PagedSpace* space, Page* p, ObjectVisitor* v);
423
424 explicit Sweeper(Heap* heap)
425 : heap_(heap),
426 pending_sweeper_tasks_semaphore_(0),
427 sweeping_in_progress_(false),
428 late_pages_(false),
429 num_sweeping_tasks_(0) {}
430
431 bool sweeping_in_progress() { return sweeping_in_progress_; }
432 bool contains_late_pages() { return late_pages_; }
433
434 void AddPage(AllocationSpace space, Page* page);
435 void AddLatePage(AllocationSpace space, Page* page);
436
437 int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes,
438 int max_pages = 0);
Ben Murdoch61f157c2016-09-16 13:49:30 +0100439 int ParallelSweepPage(Page* page, AllocationSpace identity);
Ben Murdochc5610432016-08-08 18:44:38 +0100440
441 void StartSweeping();
442 void StartSweepingHelper(AllocationSpace space_to_start);
443 void EnsureCompleted();
Ben Murdoch61f157c2016-09-16 13:49:30 +0100444 void EnsureNewSpaceCompleted();
Ben Murdochc5610432016-08-08 18:44:38 +0100445 bool IsSweepingCompleted();
446 void SweepOrWaitUntilSweepingCompleted(Page* page);
447
448 void AddSweptPageSafe(PagedSpace* space, Page* page);
449 Page* GetSweptPageSafe(PagedSpace* space);
450
451 private:
452 static const int kAllocationSpaces = LAST_PAGED_SPACE + 1;
453
454 template <typename Callback>
455 void ForAllSweepingSpaces(Callback callback) {
456 for (int i = 0; i < kAllocationSpaces; i++) {
457 callback(static_cast<AllocationSpace>(i));
458 }
459 }
460
461 Page* GetSweepingPageSafe(AllocationSpace space);
462 void AddSweepingPageSafe(AllocationSpace space, Page* page);
463
464 void PrepareToBeSweptPage(AllocationSpace space, Page* page);
465
466 Heap* heap_;
467 base::Semaphore pending_sweeper_tasks_semaphore_;
468 base::Mutex mutex_;
469 SweptList swept_list_[kAllocationSpaces];
470 SweepingList sweeping_list_[kAllocationSpaces];
471 bool sweeping_in_progress_;
472 bool late_pages_;
Ben Murdoch61f157c2016-09-16 13:49:30 +0100473 base::AtomicNumber<intptr_t> num_sweeping_tasks_;
Ben Murdochc5610432016-08-08 18:44:38 +0100474 };
475
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000476 enum IterationMode {
477 kKeepMarking,
478 kClearMarkbits,
479 };
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000480
481 static void Initialize();
482
483 void SetUp();
484
485 void TearDown();
486
487 void CollectEvacuationCandidates(PagedSpace* space);
488
489 void AddEvacuationCandidate(Page* p);
490
491 // Prepares for GC by resetting relocation info in old and map spaces and
492 // choosing spaces to compact.
493 void Prepare();
494
495 // Performs a global garbage collection.
496 void CollectGarbage();
497
498 enum CompactionMode { INCREMENTAL_COMPACTION, NON_INCREMENTAL_COMPACTION };
499
500 bool StartCompaction(CompactionMode mode);
501
502 void AbortCompaction();
503
504#ifdef DEBUG
505 // Checks whether performing mark-compact collection.
506 bool in_use() { return state_ > PREPARE_GC; }
507 bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
508#endif
509
510 // Determine type of object and emit deletion log event.
511 static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
512
513 // Distinguishable invalid map encodings (for single word and multiple words)
514 // that indicate free regions.
515 static const uint32_t kSingleFreeEncoding = 0;
516 static const uint32_t kMultiFreeEncoding = 1;
517
518 static inline bool IsMarked(Object* obj);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000519 static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000520
521 inline Heap* heap() const { return heap_; }
522 inline Isolate* isolate() const;
523
524 CodeFlusher* code_flusher() { return code_flusher_; }
525 inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000526
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000527#ifdef VERIFY_HEAP
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000528 void VerifyValidStoreAndSlotsBufferEntries();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000529 void VerifyMarkbitsAreClean();
530 static void VerifyMarkbitsAreClean(PagedSpace* space);
531 static void VerifyMarkbitsAreClean(NewSpace* space);
532 void VerifyWeakEmbeddedObjectsInCode();
533 void VerifyOmittedMapChecks();
534#endif
535
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000536 INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
537 return Page::FromAddress(reinterpret_cast<Address>(host))
538 ->ShouldSkipEvacuationSlotRecording();
539 }
540
541 INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
542 return Page::FromAddress(reinterpret_cast<Address>(obj))
543 ->IsEvacuationCandidate();
544 }
545
Ben Murdochda12d292016-06-02 14:46:10 +0100546 void RecordRelocSlot(Code* host, RelocInfo* rinfo, Object* target);
547 void RecordCodeEntrySlot(HeapObject* host, Address slot, Code* target);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000548 void RecordCodeTargetPatch(Address pc, Code* target);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000549 INLINE(void RecordSlot(HeapObject* object, Object** slot, Object* target));
550 INLINE(void ForceRecordSlot(HeapObject* object, Object** slot,
551 Object* target));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000552
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000553 void UpdateSlots(SlotsBuffer* buffer);
554 void UpdateSlotsRecordedIn(SlotsBuffer* buffer);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000555
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000556 void InvalidateCode(Code* code);
557
558 void ClearMarkbits();
559
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000560 bool is_compacting() const { return compacting_; }
561
562 MarkingParity marking_parity() { return marking_parity_; }
563
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000564 // Ensures that sweeping is finished.
565 //
566 // Note: Can only be called safely from main thread.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000567 void EnsureSweepingCompleted();
568
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000569 // Help out in sweeping the corresponding space and refill memory that has
570 // been regained.
571 //
572 // Note: Thread-safe.
573 void SweepAndRefill(CompactionSpace* space);
574
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000575 // Checks if sweeping is in progress right now on any space.
Ben Murdochc5610432016-08-08 18:44:38 +0100576 bool sweeping_in_progress() { return sweeper().sweeping_in_progress(); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000577
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400578 void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000579
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400580 bool evacuation() const { return evacuation_; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000581
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000582 // Special case for processing weak references in a full collection. We need
583 // to artificially keep AllocationSites alive for a time.
584 void MarkAllocationSite(AllocationSite* site);
585
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000586 // Mark objects in implicit references groups if their parent object
587 // is marked.
588 void MarkImplicitRefGroups(MarkObjectFunction mark_object);
589
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400590 MarkingDeque* marking_deque() { return &marking_deque_; }
591
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000592 static const size_t kMaxMarkingDequeSize = 4 * MB;
593 static const size_t kMinMarkingDequeSize = 256 * KB;
594
595 void EnsureMarkingDequeIsCommittedAndInitialize(size_t max_size) {
596 if (!marking_deque_.in_use()) {
597 EnsureMarkingDequeIsCommitted(max_size);
598 InitializeMarkingDeque();
599 }
600 }
601
602 void EnsureMarkingDequeIsCommitted(size_t max_size);
603 void EnsureMarkingDequeIsReserved();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400604
605 void InitializeMarkingDeque();
606
Ben Murdochda12d292016-06-02 14:46:10 +0100607 // The following two methods can just be called after marking, when the
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000608 // whole transitive closure is known. They must be called before sweeping
609 // when mark bits are still intact.
Ben Murdochda12d292016-06-02 14:46:10 +0100610 bool IsSlotInBlackObject(MemoryChunk* p, Address slot);
611 HeapObject* FindBlackObjectBySlotSlow(Address slot);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000612
613 // Removes all the slots in the slot buffers that are within the given
614 // address range.
615 void RemoveObjectSlots(Address start_slot, Address end_slot);
616
Ben Murdochc5610432016-08-08 18:44:38 +0100617 Sweeper& sweeper() { return sweeper_; }
618
Ben Murdoch61f157c2016-09-16 13:49:30 +0100619 void RegisterWrappersWithEmbedderHeapTracer();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400620
Ben Murdochc5610432016-08-08 18:44:38 +0100621 void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
622
623 EmbedderHeapTracer* embedder_heap_tracer() { return embedder_heap_tracer_; }
624
625 bool UsingEmbedderHeapTracer() { return embedder_heap_tracer(); }
626
627 void TracePossibleWrapper(JSObject* js_object);
628
629 void RegisterExternallyReferencedObject(Object** object);
630
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000631 private:
Ben Murdochc5610432016-08-08 18:44:38 +0100632 class EvacuateNewSpacePageVisitor;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000633 class EvacuateNewSpaceVisitor;
634 class EvacuateOldSpaceVisitor;
Ben Murdochc5610432016-08-08 18:44:38 +0100635 class EvacuateRecordOnlyVisitor;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000636 class EvacuateVisitorBase;
637 class HeapObjectVisitor;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000638
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000639 explicit MarkCompactCollector(Heap* heap);
640
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000641 bool WillBeDeoptimized(Code* code);
Ben Murdochda12d292016-06-02 14:46:10 +0100642 void ClearInvalidRememberedSetSlots();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000643
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000644 void ComputeEvacuationHeuristics(int area_size,
645 int* target_fragmentation_percent,
646 int* max_evacuated_bytes);
647
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000648 // Finishes GC, performs heap verification if enabled.
649 void Finish();
650
651 // -----------------------------------------------------------------------
652 // Phase 1: Marking live objects.
653 //
654 // Before: The heap has been prepared for garbage collection by
655 // MarkCompactCollector::Prepare() and is otherwise in its
656 // normal state.
657 //
658 // After: Live objects are marked and non-live objects are unmarked.
659
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000660 friend class CodeMarkingVisitor;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000661 friend class IncrementalMarkingMarkingVisitor;
662 friend class MarkCompactMarkingVisitor;
663 friend class MarkingVisitor;
664 friend class RecordMigratedSlotVisitor;
665 friend class RootMarkingVisitor;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000666 friend class SharedFunctionInfoMarkingVisitor;
667
668 // Mark code objects that are active on the stack to prevent them
669 // from being flushed.
670 void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
671
672 void PrepareForCodeFlushing();
673
674 // Marking operations for objects reachable from roots.
675 void MarkLiveObjects();
676
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000677 // Pushes a black object onto the marking stack and accounts for live bytes.
678 // Note that this assumes live bytes have not yet been counted.
679 INLINE(void PushBlack(HeapObject* obj));
680
681 // Unshifts a black object into the marking stack and accounts for live bytes.
682 // Note that this assumes lives bytes have already been counted.
683 INLINE(void UnshiftBlack(HeapObject* obj));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000684
685 // Marks the object black and pushes it on the marking stack.
686 // This is for non-incremental marking only.
687 INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
688
689 // Marks the object black assuming that it is not yet marked.
690 // This is for non-incremental marking only.
691 INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
692
693 // Mark the heap roots and all objects reachable from them.
694 void MarkRoots(RootMarkingVisitor* visitor);
695
696 // Mark the string table specially. References to internalized strings from
697 // the string table are weak.
698 void MarkStringTable(RootMarkingVisitor* visitor);
699
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000700 // Mark objects reachable (transitively) from objects in the marking stack
701 // or overflowed in the heap.
702 void ProcessMarkingDeque();
703
704 // Mark objects reachable (transitively) from objects in the marking stack
705 // or overflowed in the heap. This respects references only considered in
706 // the final atomic marking pause including the following:
707 // - Processing of objects reachable through Harmony WeakMaps.
Ben Murdochc5610432016-08-08 18:44:38 +0100708 // - Objects reachable due to host application logic like object groups,
709 // implicit references' groups, or embedder heap tracing.
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400710 void ProcessEphemeralMarking(ObjectVisitor* visitor,
711 bool only_process_harmony_weak_collections);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000712
713 // If the call-site of the top optimized code was not prepared for
714 // deoptimization, then treat the maps in the code as strong pointers,
715 // otherwise a map can die and deoptimize the code.
716 void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
717
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000718 // Collects a list of dependent code from maps embedded in optimize code.
719 DependentCode* DependentCodeListFromNonLiveMaps();
720
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000721 // Mark objects reachable (transitively) from objects in the marking
722 // stack. This function empties the marking stack, but may leave
723 // overflowed objects in the heap, in which case the marking stack's
724 // overflow flag will be set.
725 void EmptyMarkingDeque();
726
727 // Refill the marking stack with overflowed objects from the heap. This
728 // function either leaves the marking stack full or clears the overflow
729 // flag on the marking stack.
730 void RefillMarkingDeque();
731
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000732 // Helper methods for refilling the marking stack by discovering grey objects
733 // on various pages of the heap. Used by {RefillMarkingDeque} only.
734 template <class T>
735 void DiscoverGreyObjectsWithIterator(T* it);
736 void DiscoverGreyObjectsOnPage(MemoryChunk* p);
737 void DiscoverGreyObjectsInSpace(PagedSpace* space);
738 void DiscoverGreyObjectsInNewSpace();
739
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000740 // Callback function for telling whether the object *p is an unmarked
741 // heap object.
742 static bool IsUnmarkedHeapObject(Object** p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000743
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000744 // Clear non-live references in weak cells, transition and descriptor arrays,
745 // and deoptimize dependent code of non-live maps.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000746 void ClearNonLiveReferences();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000747 void MarkDependentCodeForDeoptimization(DependentCode* list);
748 // Find non-live targets of simple transitions in the given list. Clear
749 // transitions to non-live targets and if needed trim descriptors arrays.
750 void ClearSimpleMapTransitions(Object* non_live_map_list);
751 void ClearSimpleMapTransition(Map* map, Map* dead_transition);
752 // Compact every array in the global list of transition arrays and
753 // trim the corresponding descriptor array if a transition target is non-live.
754 void ClearFullMapTransitions();
755 bool CompactTransitionArray(Map* map, TransitionArray* transitions,
756 DescriptorArray* descriptors);
757 void TrimDescriptorArray(Map* map, DescriptorArray* descriptors);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000758 void TrimEnumCache(Map* map, DescriptorArray* descriptors);
759
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000760 // Mark all values associated with reachable keys in weak collections
761 // encountered so far. This might push new object or even new weak maps onto
762 // the marking stack.
763 void ProcessWeakCollections();
764
765 // After all reachable objects have been marked those weak map entries
766 // with an unreachable key are removed from all encountered weak maps.
767 // The linked list of all encountered weak maps is destroyed.
768 void ClearWeakCollections();
769
770 // We have to remove all encountered weak maps from the list of weak
771 // collections when incremental marking is aborted.
772 void AbortWeakCollections();
773
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000774 void ClearWeakCells(Object** non_live_map_list,
775 DependentCode** dependent_code_list);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400776 void AbortWeakCells();
777
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000778 void AbortTransitionArrays();
779
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000780 // -----------------------------------------------------------------------
781 // Phase 2: Sweeping to clear mark bits and free non-live objects for
782 // a non-compacting collection.
783 //
784 // Before: Live objects are marked and non-live objects are unmarked.
785 //
786 // After: Live objects are unmarked, non-live regions have been added to
787 // their space's free list. Active eden semispace is compacted by
788 // evacuation.
789 //
790
791 // If we are not compacting the heap, we simply sweep the spaces except
792 // for the large object space, clearing mark bits and adding unmarked
793 // regions to each space's free list.
794 void SweepSpaces();
795
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000796 void EvacuateNewSpacePrologue();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000797
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000798 void EvacuatePagesInParallel();
799
800 // The number of parallel compaction tasks, including the main thread.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100801 int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000802
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000803 void EvacuateNewSpaceAndCandidates();
804
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000805 void UpdatePointersAfterEvacuation();
806
807 // Iterates through all live objects on a page using marking information.
808 // Returns whether all objects have successfully been visited.
Ben Murdochc5610432016-08-08 18:44:38 +0100809 template <class Visitor>
810 bool VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000811 IterationMode mode);
812
813 void VisitLiveObjectsBody(Page* page, ObjectVisitor* visitor);
814
815 void RecomputeLiveBytes(MemoryChunk* page);
816
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000817 void ReleaseEvacuationCandidates();
818
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000819 // Starts sweeping of a space by contributing on the main thread and setting
820 // up other pages for sweeping.
821 void StartSweepSpace(PagedSpace* space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000822
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000823#ifdef DEBUG
824 friend class MarkObjectVisitor;
825 static void VisitObject(HeapObject* obj);
826
827 friend class UnmarkObjectVisitor;
828 static void UnmarkObject(HeapObject* obj);
829#endif
830
831 Heap* heap_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000832
Ben Murdochc5610432016-08-08 18:44:38 +0100833 base::Semaphore page_parallel_job_semaphore_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000834
Ben Murdochc5610432016-08-08 18:44:38 +0100835#ifdef DEBUG
836 enum CollectorState {
837 IDLE,
838 PREPARE_GC,
839 MARK_LIVE_OBJECTS,
840 SWEEP_SPACES,
841 ENCODE_FORWARDING_ADDRESSES,
842 UPDATE_POINTERS,
843 RELOCATE_OBJECTS
844 };
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000845
Ben Murdochc5610432016-08-08 18:44:38 +0100846 // The current stage of the collector.
847 CollectorState state_;
848#endif
849
850 MarkingParity marking_parity_;
851
852 bool was_marked_incrementally_;
853
854 bool evacuation_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100855
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000856 // True if we are collecting slots to perform evacuation from evacuation
857 // candidates.
858 bool compacting_;
859
Ben Murdochda12d292016-06-02 14:46:10 +0100860 bool black_allocation_;
861
Ben Murdochc5610432016-08-08 18:44:38 +0100862 bool have_code_to_deoptimize_;
863
864 base::VirtualMemory* marking_deque_memory_;
865 size_t marking_deque_memory_committed_;
866 MarkingDeque marking_deque_;
867 std::vector<std::pair<void*, void*>> wrappers_to_trace_;
868
869 CodeFlusher* code_flusher_;
870
871 EmbedderHeapTracer* embedder_heap_tracer_;
872
873 List<Page*> evacuation_candidates_;
874 List<Page*> newspace_evacuation_candidates_;
875
876 Sweeper sweeper_;
877
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000878 friend class Heap;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000879 friend class StoreBuffer;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000880};
881
882
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400883class EvacuationScope BASE_EMBEDDED {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000884 public:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400885 explicit EvacuationScope(MarkCompactCollector* collector)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000886 : collector_(collector) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400887 collector_->set_evacuation(true);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000888 }
889
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400890 ~EvacuationScope() { collector_->set_evacuation(false); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000891
892 private:
893 MarkCompactCollector* collector_;
894};
895
896
897const char* AllocationSpaceName(AllocationSpace space);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000898} // namespace internal
899} // namespace v8
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000900
901#endif // V8_HEAP_MARK_COMPACT_H_