blob: cc5449f97765e7d1a6c2993b963ff569edc2df89 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_MARK_COMPACT_H_
6#define V8_HEAP_MARK_COMPACT_H_
7
8#include "src/base/bits.h"
9#include "src/heap/spaces.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010010#include "src/heap/store-buffer.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000011
12namespace v8 {
13namespace internal {
14
15// Callback function, returns whether an object is alive. The heap size
16// of the object is returned in size. It optionally updates the offset
17// to the first live object in the page (only used for old and map objects).
18typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
19
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000020// Callback function to mark an object in a given heap.
21typedef void (*MarkObjectFunction)(Heap* heap, HeapObject* object);
22
Ben Murdochb8a8cc12014-11-26 15:28:44 +000023// Forward declarations.
24class CodeFlusher;
25class MarkCompactCollector;
26class MarkingVisitor;
27class RootMarkingVisitor;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000028class SlotsBuffer;
29class SlotsBufferAllocator;
Ben Murdochb8a8cc12014-11-26 15:28:44 +000030
31
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000032class Marking : public AllStatic {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000033 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000034 INLINE(static MarkBit MarkBitFrom(Address addr)) {
35 MemoryChunk* p = MemoryChunk::FromAddress(addr);
36 return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr));
37 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +000038
39 INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
40 return MarkBitFrom(reinterpret_cast<Address>(obj));
41 }
42
43 // Impossible markbits: 01
44 static const char* kImpossibleBitPattern;
45 INLINE(static bool IsImpossible(MarkBit mark_bit)) {
46 return !mark_bit.Get() && mark_bit.Next().Get();
47 }
48
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000049 // Black markbits: 11
Ben Murdochb8a8cc12014-11-26 15:28:44 +000050 static const char* kBlackBitPattern;
51 INLINE(static bool IsBlack(MarkBit mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000052 return mark_bit.Get() && mark_bit.Next().Get();
Ben Murdochb8a8cc12014-11-26 15:28:44 +000053 }
54
55 // White markbits: 00 - this is required by the mark bit clearer.
56 static const char* kWhiteBitPattern;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000057 INLINE(static bool IsWhite(MarkBit mark_bit)) {
58 DCHECK(!IsImpossible(mark_bit));
59 return !mark_bit.Get();
60 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +000061
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000062 // Grey markbits: 10
Ben Murdochb8a8cc12014-11-26 15:28:44 +000063 static const char* kGreyBitPattern;
64 INLINE(static bool IsGrey(MarkBit mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000065 return mark_bit.Get() && !mark_bit.Next().Get();
Ben Murdochb8a8cc12014-11-26 15:28:44 +000066 }
67
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000068 // IsBlackOrGrey assumes that the first bit is set for black or grey
69 // objects.
70 INLINE(static bool IsBlackOrGrey(MarkBit mark_bit)) { return mark_bit.Get(); }
71
Ben Murdochb8a8cc12014-11-26 15:28:44 +000072 INLINE(static void MarkBlack(MarkBit mark_bit)) {
73 mark_bit.Set();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000074 mark_bit.Next().Set();
75 }
76
77 INLINE(static void MarkWhite(MarkBit mark_bit)) {
78 mark_bit.Clear();
Ben Murdochb8a8cc12014-11-26 15:28:44 +000079 mark_bit.Next().Clear();
80 }
81
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000082 INLINE(static void BlackToWhite(MarkBit markbit)) {
83 DCHECK(IsBlack(markbit));
84 markbit.Clear();
85 markbit.Next().Clear();
86 }
87
88 INLINE(static void GreyToWhite(MarkBit markbit)) {
89 DCHECK(IsGrey(markbit));
90 markbit.Clear();
91 markbit.Next().Clear();
92 }
93
94 INLINE(static void BlackToGrey(MarkBit markbit)) {
95 DCHECK(IsBlack(markbit));
96 markbit.Next().Clear();
97 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +000098
99 INLINE(static void WhiteToGrey(MarkBit markbit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000100 DCHECK(IsWhite(markbit));
101 markbit.Set();
102 }
103
104 INLINE(static void WhiteToBlack(MarkBit markbit)) {
105 DCHECK(IsWhite(markbit));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000106 markbit.Set();
107 markbit.Next().Set();
108 }
109
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000110 INLINE(static void GreyToBlack(MarkBit markbit)) {
111 DCHECK(IsGrey(markbit));
112 markbit.Next().Set();
113 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000114
115 INLINE(static void BlackToGrey(HeapObject* obj)) {
116 BlackToGrey(MarkBitFrom(obj));
117 }
118
119 INLINE(static void AnyToGrey(MarkBit markbit)) {
120 markbit.Set();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000121 markbit.Next().Clear();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000122 }
123
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000124 static void TransferMark(Heap* heap, Address old_start, Address new_start);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000125
126#ifdef DEBUG
127 enum ObjectColor {
128 BLACK_OBJECT,
129 WHITE_OBJECT,
130 GREY_OBJECT,
131 IMPOSSIBLE_COLOR
132 };
133
134 static const char* ColorName(ObjectColor color) {
135 switch (color) {
136 case BLACK_OBJECT:
137 return "black";
138 case WHITE_OBJECT:
139 return "white";
140 case GREY_OBJECT:
141 return "grey";
142 case IMPOSSIBLE_COLOR:
143 return "impossible";
144 }
145 return "error";
146 }
147
148 static ObjectColor Color(HeapObject* obj) {
149 return Color(Marking::MarkBitFrom(obj));
150 }
151
152 static ObjectColor Color(MarkBit mark_bit) {
153 if (IsBlack(mark_bit)) return BLACK_OBJECT;
154 if (IsWhite(mark_bit)) return WHITE_OBJECT;
155 if (IsGrey(mark_bit)) return GREY_OBJECT;
156 UNREACHABLE();
157 return IMPOSSIBLE_COLOR;
158 }
159#endif
160
161 // Returns true if the transferred color is black.
162 INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) {
163 MarkBit from_mark_bit = MarkBitFrom(from);
164 MarkBit to_mark_bit = MarkBitFrom(to);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000165 DCHECK(Marking::IsWhite(to_mark_bit));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000166 if (from_mark_bit.Get()) {
167 to_mark_bit.Set();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000168 if (from_mark_bit.Next().Get()) {
169 to_mark_bit.Next().Set();
170 return true;
171 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000172 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000173 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000174 }
175
176 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000177 DISALLOW_IMPLICIT_CONSTRUCTORS(Marking);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000178};
179
180// ----------------------------------------------------------------------------
181// Marking deque for tracing live objects.
182class MarkingDeque {
183 public:
184 MarkingDeque()
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000185 : array_(NULL),
186 top_(0),
187 bottom_(0),
188 mask_(0),
189 overflowed_(false),
190 in_use_(false) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000191
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000192 void Initialize(Address low, Address high);
193 void Uninitialize(bool aborting = false);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000194
195 inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
196
197 inline bool IsEmpty() { return top_ == bottom_; }
198
199 bool overflowed() const { return overflowed_; }
200
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000201 bool in_use() const { return in_use_; }
202
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000203 void ClearOverflowed() { overflowed_ = false; }
204
205 void SetOverflowed() { overflowed_ = true; }
206
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000207 // Push the object on the marking stack if there is room, otherwise mark the
208 // deque as overflowed and wait for a rescan of the heap.
209 INLINE(bool Push(HeapObject* object)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000210 DCHECK(object->IsHeapObject());
211 if (IsFull()) {
212 SetOverflowed();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000213 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000214 } else {
215 array_[top_] = object;
216 top_ = ((top_ + 1) & mask_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000217 return true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000218 }
219 }
220
221 INLINE(HeapObject* Pop()) {
222 DCHECK(!IsEmpty());
223 top_ = ((top_ - 1) & mask_);
224 HeapObject* object = array_[top_];
225 DCHECK(object->IsHeapObject());
226 return object;
227 }
228
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000229 // Unshift the object into the marking stack if there is room, otherwise mark
230 // the deque as overflowed and wait for a rescan of the heap.
231 INLINE(bool Unshift(HeapObject* object)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000232 DCHECK(object->IsHeapObject());
233 if (IsFull()) {
234 SetOverflowed();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000235 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000236 } else {
237 bottom_ = ((bottom_ - 1) & mask_);
238 array_[bottom_] = object;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000239 return true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000240 }
241 }
242
243 HeapObject** array() { return array_; }
244 int bottom() { return bottom_; }
245 int top() { return top_; }
246 int mask() { return mask_; }
247 void set_top(int top) { top_ = top; }
248
249 private:
250 HeapObject** array_;
251 // array_[(top - 1) & mask_] is the top element in the deque. The Deque is
252 // empty when top_ == bottom_. It is full when top_ + 1 == bottom
253 // (mod mask + 1).
254 int top_;
255 int bottom_;
256 int mask_;
257 bool overflowed_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000258 bool in_use_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000259
260 DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
261};
262
263
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000264// CodeFlusher collects candidates for code flushing during marking and
265// processes those candidates after marking has completed in order to
266// reset those functions referencing code objects that would otherwise
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000267// be unreachable. Code objects can be referenced in two ways:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000268// - SharedFunctionInfo references unoptimized code.
269// - JSFunction references either unoptimized or optimized code.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000270// We are not allowed to flush unoptimized code for functions that got
271// optimized or inlined into optimized code, because we might bailout
272// into the unoptimized code again during deoptimization.
273class CodeFlusher {
274 public:
275 explicit CodeFlusher(Isolate* isolate)
276 : isolate_(isolate),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000277 jsfunction_candidates_head_(nullptr),
278 shared_function_info_candidates_head_(nullptr) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000279
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000280 inline void AddCandidate(SharedFunctionInfo* shared_info);
281 inline void AddCandidate(JSFunction* function);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000282
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000283 void EvictCandidate(SharedFunctionInfo* shared_info);
284 void EvictCandidate(JSFunction* function);
285
286 void ProcessCandidates() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000287 ProcessSharedFunctionInfoCandidates();
288 ProcessJSFunctionCandidates();
289 }
290
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000291 void IteratePointersToFromSpace(ObjectVisitor* v);
292
293 private:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000294 void ProcessJSFunctionCandidates();
295 void ProcessSharedFunctionInfoCandidates();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000296
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000297 static inline JSFunction** GetNextCandidateSlot(JSFunction* candidate);
298 static inline JSFunction* GetNextCandidate(JSFunction* candidate);
299 static inline void SetNextCandidate(JSFunction* candidate,
300 JSFunction* next_candidate);
301 static inline void ClearNextCandidate(JSFunction* candidate,
302 Object* undefined);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000303
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000304 static inline SharedFunctionInfo* GetNextCandidate(
305 SharedFunctionInfo* candidate);
306 static inline void SetNextCandidate(SharedFunctionInfo* candidate,
307 SharedFunctionInfo* next_candidate);
308 static inline void ClearNextCandidate(SharedFunctionInfo* candidate);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000309
310 Isolate* isolate_;
311 JSFunction* jsfunction_candidates_head_;
312 SharedFunctionInfo* shared_function_info_candidates_head_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000313
314 DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
315};
316
317
318// Defined in isolate.h.
319class ThreadLocalTop;
320
321
322// -------------------------------------------------------------------------
323// Mark-Compact collector
324class MarkCompactCollector {
325 public:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000326 enum IterationMode {
327 kKeepMarking,
328 kClearMarkbits,
329 };
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000330
331 static void Initialize();
332
333 void SetUp();
334
335 void TearDown();
336
337 void CollectEvacuationCandidates(PagedSpace* space);
338
339 void AddEvacuationCandidate(Page* p);
340
341 // Prepares for GC by resetting relocation info in old and map spaces and
342 // choosing spaces to compact.
343 void Prepare();
344
345 // Performs a global garbage collection.
346 void CollectGarbage();
347
348 enum CompactionMode { INCREMENTAL_COMPACTION, NON_INCREMENTAL_COMPACTION };
349
350 bool StartCompaction(CompactionMode mode);
351
352 void AbortCompaction();
353
354#ifdef DEBUG
355 // Checks whether performing mark-compact collection.
356 bool in_use() { return state_ > PREPARE_GC; }
357 bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
358#endif
359
360 // Determine type of object and emit deletion log event.
361 static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
362
363 // Distinguishable invalid map encodings (for single word and multiple words)
364 // that indicate free regions.
365 static const uint32_t kSingleFreeEncoding = 0;
366 static const uint32_t kMultiFreeEncoding = 1;
367
368 static inline bool IsMarked(Object* obj);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000369 static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000370
371 inline Heap* heap() const { return heap_; }
372 inline Isolate* isolate() const;
373
374 CodeFlusher* code_flusher() { return code_flusher_; }
375 inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000376
377 enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
378
379#ifdef VERIFY_HEAP
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000380 void VerifyValidStoreAndSlotsBufferEntries();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000381 void VerifyMarkbitsAreClean();
382 static void VerifyMarkbitsAreClean(PagedSpace* space);
383 static void VerifyMarkbitsAreClean(NewSpace* space);
384 void VerifyWeakEmbeddedObjectsInCode();
385 void VerifyOmittedMapChecks();
386#endif
387
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000388 INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
389 return Page::FromAddress(reinterpret_cast<Address>(host))
390 ->ShouldSkipEvacuationSlotRecording();
391 }
392
393 INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
394 return Page::FromAddress(reinterpret_cast<Address>(obj))
395 ->IsEvacuationCandidate();
396 }
397
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000398 void RecordRelocSlot(RelocInfo* rinfo, Object* target);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000399 void RecordCodeEntrySlot(HeapObject* object, Address slot, Code* target);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000400 void RecordCodeTargetPatch(Address pc, Code* target);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000401 INLINE(void RecordSlot(HeapObject* object, Object** slot, Object* target));
402 INLINE(void ForceRecordSlot(HeapObject* object, Object** slot,
403 Object* target));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000404
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000405 void UpdateSlots(SlotsBuffer* buffer);
406 void UpdateSlotsRecordedIn(SlotsBuffer* buffer);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000407
408 void MigrateObject(HeapObject* dst, HeapObject* src, int size,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000409 AllocationSpace to_old_space,
Ben Murdoch097c5b22016-05-18 11:27:45 +0100410 SlotsBuffer** evacuation_slots_buffer,
411 LocalStoreBuffer* local_store_buffer);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000412
413 void InvalidateCode(Code* code);
414
415 void ClearMarkbits();
416
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000417 bool is_compacting() const { return compacting_; }
418
419 MarkingParity marking_parity() { return marking_parity_; }
420
421 // Concurrent and parallel sweeping support. If required_freed_bytes was set
422 // to a value larger than 0, then sweeping returns after a block of at least
423 // required_freed_bytes was freed. If required_freed_bytes was set to zero
424 // then the whole given space is swept. It returns the size of the maximum
425 // continuous freed memory chunk.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100426 int SweepInParallel(PagedSpace* space, int required_freed_bytes,
427 int max_pages = 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000428
429 // Sweeps a given page concurrently to the sweeper threads. It returns the
430 // size of the maximum continuous freed memory chunk.
431 int SweepInParallel(Page* page, PagedSpace* space);
432
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000433 // Ensures that sweeping is finished.
434 //
435 // Note: Can only be called safely from main thread.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000436 void EnsureSweepingCompleted();
437
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000438 void SweepOrWaitUntilSweepingCompleted(Page* page);
439
440 // Help out in sweeping the corresponding space and refill memory that has
441 // been regained.
442 //
443 // Note: Thread-safe.
444 void SweepAndRefill(CompactionSpace* space);
445
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000446 // If sweeper threads are not active this method will return true. If
447 // this is a latency issue we should be smarter here. Otherwise, it will
448 // return true if the sweeper threads are done processing the pages.
449 bool IsSweepingCompleted();
450
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000451 // Checks if sweeping is in progress right now on any space.
452 bool sweeping_in_progress() { return sweeping_in_progress_; }
453
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400454 void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000455
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400456 bool evacuation() const { return evacuation_; }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000457
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000458 // Special case for processing weak references in a full collection. We need
459 // to artificially keep AllocationSites alive for a time.
460 void MarkAllocationSite(AllocationSite* site);
461
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000462 // Mark objects in implicit references groups if their parent object
463 // is marked.
464 void MarkImplicitRefGroups(MarkObjectFunction mark_object);
465
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400466 MarkingDeque* marking_deque() { return &marking_deque_; }
467
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000468 static const size_t kMaxMarkingDequeSize = 4 * MB;
469 static const size_t kMinMarkingDequeSize = 256 * KB;
470
471 void EnsureMarkingDequeIsCommittedAndInitialize(size_t max_size) {
472 if (!marking_deque_.in_use()) {
473 EnsureMarkingDequeIsCommitted(max_size);
474 InitializeMarkingDeque();
475 }
476 }
477
478 void EnsureMarkingDequeIsCommitted(size_t max_size);
479 void EnsureMarkingDequeIsReserved();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400480
481 void InitializeMarkingDeque();
482
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000483 // The following four methods can just be called after marking, when the
484 // whole transitive closure is known. They must be called before sweeping
485 // when mark bits are still intact.
486 bool IsSlotInBlackObject(Page* p, Address slot, HeapObject** out_object);
487 bool IsSlotInBlackObjectSlow(Page* p, Address slot);
488 bool IsSlotInLiveObject(Address slot);
489 void VerifyIsSlotInLiveObject(Address slot, HeapObject* object);
490
491 // Removes all the slots in the slot buffers that are within the given
492 // address range.
493 void RemoveObjectSlots(Address start_slot, Address end_slot);
494
495 //
496 // Free lists filled by sweeper and consumed by corresponding spaces
497 // (including compaction spaces).
498 //
499 base::SmartPointer<FreeList>& free_list_old_space() {
500 return free_list_old_space_;
501 }
502 base::SmartPointer<FreeList>& free_list_code_space() {
503 return free_list_code_space_;
504 }
505 base::SmartPointer<FreeList>& free_list_map_space() {
506 return free_list_map_space_;
507 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400508
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000509 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000510 class CompactionTask;
511 class EvacuateNewSpaceVisitor;
512 class EvacuateOldSpaceVisitor;
513 class EvacuateVisitorBase;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100514 class Evacuator;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000515 class HeapObjectVisitor;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000516 class SweeperTask;
517
Ben Murdoch097c5b22016-05-18 11:27:45 +0100518 typedef std::vector<Page*> SweepingList;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000519
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000520 explicit MarkCompactCollector(Heap* heap);
521
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000522 bool WillBeDeoptimized(Code* code);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000523 void EvictPopularEvacuationCandidate(Page* page);
524 void ClearInvalidStoreAndSlotsBufferEntries();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000525
526 void StartSweeperThreads();
527
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000528 void ComputeEvacuationHeuristics(int area_size,
529 int* target_fragmentation_percent,
530 int* max_evacuated_bytes);
531
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000532#ifdef DEBUG
533 enum CollectorState {
534 IDLE,
535 PREPARE_GC,
536 MARK_LIVE_OBJECTS,
537 SWEEP_SPACES,
538 ENCODE_FORWARDING_ADDRESSES,
539 UPDATE_POINTERS,
540 RELOCATE_OBJECTS
541 };
542
543 // The current stage of the collector.
544 CollectorState state_;
545#endif
546
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000547 MarkingParity marking_parity_;
548
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000549 bool was_marked_incrementally_;
550
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400551 bool evacuation_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000552
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000553 SlotsBufferAllocator* slots_buffer_allocator_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000554
555 SlotsBuffer* migration_slots_buffer_;
556
557 // Finishes GC, performs heap verification if enabled.
558 void Finish();
559
560 // -----------------------------------------------------------------------
561 // Phase 1: Marking live objects.
562 //
563 // Before: The heap has been prepared for garbage collection by
564 // MarkCompactCollector::Prepare() and is otherwise in its
565 // normal state.
566 //
567 // After: Live objects are marked and non-live objects are unmarked.
568
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000569 friend class CodeMarkingVisitor;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000570 friend class IncrementalMarkingMarkingVisitor;
571 friend class MarkCompactMarkingVisitor;
572 friend class MarkingVisitor;
573 friend class RecordMigratedSlotVisitor;
574 friend class RootMarkingVisitor;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000575 friend class SharedFunctionInfoMarkingVisitor;
576
577 // Mark code objects that are active on the stack to prevent them
578 // from being flushed.
579 void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
580
581 void PrepareForCodeFlushing();
582
583 // Marking operations for objects reachable from roots.
584 void MarkLiveObjects();
585
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000586 // Pushes a black object onto the marking stack and accounts for live bytes.
587 // Note that this assumes live bytes have not yet been counted.
588 INLINE(void PushBlack(HeapObject* obj));
589
590 // Unshifts a black object into the marking stack and accounts for live bytes.
591 // Note that this assumes lives bytes have already been counted.
592 INLINE(void UnshiftBlack(HeapObject* obj));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000593
594 // Marks the object black and pushes it on the marking stack.
595 // This is for non-incremental marking only.
596 INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
597
598 // Marks the object black assuming that it is not yet marked.
599 // This is for non-incremental marking only.
600 INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
601
602 // Mark the heap roots and all objects reachable from them.
603 void MarkRoots(RootMarkingVisitor* visitor);
604
605 // Mark the string table specially. References to internalized strings from
606 // the string table are weak.
607 void MarkStringTable(RootMarkingVisitor* visitor);
608
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000609 // Mark objects reachable (transitively) from objects in the marking stack
610 // or overflowed in the heap.
611 void ProcessMarkingDeque();
612
613 // Mark objects reachable (transitively) from objects in the marking stack
614 // or overflowed in the heap. This respects references only considered in
615 // the final atomic marking pause including the following:
616 // - Processing of objects reachable through Harmony WeakMaps.
617 // - Objects reachable due to host application logic like object groups
618 // or implicit references' groups.
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400619 void ProcessEphemeralMarking(ObjectVisitor* visitor,
620 bool only_process_harmony_weak_collections);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000621
622 // If the call-site of the top optimized code was not prepared for
623 // deoptimization, then treat the maps in the code as strong pointers,
624 // otherwise a map can die and deoptimize the code.
625 void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
626
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000627 // Collects a list of dependent code from maps embedded in optimize code.
628 DependentCode* DependentCodeListFromNonLiveMaps();
629
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000630 // Mark objects reachable (transitively) from objects in the marking
631 // stack. This function empties the marking stack, but may leave
632 // overflowed objects in the heap, in which case the marking stack's
633 // overflow flag will be set.
634 void EmptyMarkingDeque();
635
636 // Refill the marking stack with overflowed objects from the heap. This
637 // function either leaves the marking stack full or clears the overflow
638 // flag on the marking stack.
639 void RefillMarkingDeque();
640
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000641 // Helper methods for refilling the marking stack by discovering grey objects
642 // on various pages of the heap. Used by {RefillMarkingDeque} only.
643 template <class T>
644 void DiscoverGreyObjectsWithIterator(T* it);
645 void DiscoverGreyObjectsOnPage(MemoryChunk* p);
646 void DiscoverGreyObjectsInSpace(PagedSpace* space);
647 void DiscoverGreyObjectsInNewSpace();
648
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000649 // Callback function for telling whether the object *p is an unmarked
650 // heap object.
651 static bool IsUnmarkedHeapObject(Object** p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000652
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000653 // Clear non-live references in weak cells, transition and descriptor arrays,
654 // and deoptimize dependent code of non-live maps.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000655 void ClearNonLiveReferences();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000656 void MarkDependentCodeForDeoptimization(DependentCode* list);
657 // Find non-live targets of simple transitions in the given list. Clear
658 // transitions to non-live targets and if needed trim descriptors arrays.
659 void ClearSimpleMapTransitions(Object* non_live_map_list);
660 void ClearSimpleMapTransition(Map* map, Map* dead_transition);
661 // Compact every array in the global list of transition arrays and
662 // trim the corresponding descriptor array if a transition target is non-live.
663 void ClearFullMapTransitions();
664 bool CompactTransitionArray(Map* map, TransitionArray* transitions,
665 DescriptorArray* descriptors);
666 void TrimDescriptorArray(Map* map, DescriptorArray* descriptors);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000667 void TrimEnumCache(Map* map, DescriptorArray* descriptors);
668
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000669 // Mark all values associated with reachable keys in weak collections
670 // encountered so far. This might push new object or even new weak maps onto
671 // the marking stack.
672 void ProcessWeakCollections();
673
674 // After all reachable objects have been marked those weak map entries
675 // with an unreachable key are removed from all encountered weak maps.
676 // The linked list of all encountered weak maps is destroyed.
677 void ClearWeakCollections();
678
679 // We have to remove all encountered weak maps from the list of weak
680 // collections when incremental marking is aborted.
681 void AbortWeakCollections();
682
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000683 void ClearWeakCells(Object** non_live_map_list,
684 DependentCode** dependent_code_list);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400685 void AbortWeakCells();
686
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000687 void AbortTransitionArrays();
688
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000689 // -----------------------------------------------------------------------
690 // Phase 2: Sweeping to clear mark bits and free non-live objects for
691 // a non-compacting collection.
692 //
693 // Before: Live objects are marked and non-live objects are unmarked.
694 //
695 // After: Live objects are unmarked, non-live regions have been added to
696 // their space's free list. Active eden semispace is compacted by
697 // evacuation.
698 //
699
Ben Murdoch097c5b22016-05-18 11:27:45 +0100700 inline SweepingList& sweeping_list(Space* space);
701
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000702 // If we are not compacting the heap, we simply sweep the spaces except
703 // for the large object space, clearing mark bits and adding unmarked
704 // regions to each space's free list.
705 void SweepSpaces();
706
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000707 void EvacuateNewSpacePrologue();
Ben Murdoch097c5b22016-05-18 11:27:45 +0100708 void EvacuateNewSpaceEpilogue();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000709
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000710 void AddEvacuationSlotsBufferSynchronized(
711 SlotsBuffer* evacuation_slots_buffer);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000712
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000713 void EvacuatePagesInParallel();
714
715 // The number of parallel compaction tasks, including the main thread.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100716 int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000717
Ben Murdoch097c5b22016-05-18 11:27:45 +0100718 void StartParallelCompaction(Evacuator** evacuators, int len);
719 void WaitUntilCompactionCompleted(Evacuator** evacuators, int len);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000720
721 void EvacuateNewSpaceAndCandidates();
722
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000723 void UpdatePointersAfterEvacuation();
724
725 // Iterates through all live objects on a page using marking information.
726 // Returns whether all objects have successfully been visited.
727 bool VisitLiveObjects(MemoryChunk* page, HeapObjectVisitor* visitor,
728 IterationMode mode);
729
730 void VisitLiveObjectsBody(Page* page, ObjectVisitor* visitor);
731
732 void RecomputeLiveBytes(MemoryChunk* page);
733
734 void SweepAbortedPages();
735
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000736 void ReleaseEvacuationCandidates();
737
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000738 // Starts sweeping of a space by contributing on the main thread and setting
739 // up other pages for sweeping.
740 void StartSweepSpace(PagedSpace* space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000741
742 // Finalizes the parallel sweeping phase. Marks all the pages that were
743 // swept in parallel.
744 void ParallelSweepSpacesComplete();
745
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000746 // Updates store buffer and slot buffer for a pointer in a migrating object.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000747 void RecordMigratedSlot(Object* value, Address slot,
Ben Murdoch097c5b22016-05-18 11:27:45 +0100748 SlotsBuffer** evacuation_slots_buffer,
749 LocalStoreBuffer* local_store_buffer);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000750
751 // Adds the code entry slot to the slots buffer.
752 void RecordMigratedCodeEntrySlot(Address code_entry, Address code_entry_slot,
753 SlotsBuffer** evacuation_slots_buffer);
754
755 // Adds the slot of a moved code object.
756 void RecordMigratedCodeObjectSlot(Address code_object,
757 SlotsBuffer** evacuation_slots_buffer);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000758
759#ifdef DEBUG
760 friend class MarkObjectVisitor;
761 static void VisitObject(HeapObject* obj);
762
763 friend class UnmarkObjectVisitor;
764 static void UnmarkObject(HeapObject* obj);
765#endif
766
767 Heap* heap_;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400768 base::VirtualMemory* marking_deque_memory_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000769 size_t marking_deque_memory_committed_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000770 MarkingDeque marking_deque_;
771 CodeFlusher* code_flusher_;
772 bool have_code_to_deoptimize_;
773
774 List<Page*> evacuation_candidates_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100775 List<NewSpacePage*> newspace_evacuation_candidates_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000776
777 // The evacuation_slots_buffers_ are used by the compaction threads.
778 // When a compaction task finishes, it uses
779 // AddEvacuationSlotsbufferSynchronized to adds its slots buffer to the
780 // evacuation_slots_buffers_ list using the evacuation_slots_buffers_mutex_
781 // lock.
782 base::Mutex evacuation_slots_buffers_mutex_;
783 List<SlotsBuffer*> evacuation_slots_buffers_;
784
785 base::SmartPointer<FreeList> free_list_old_space_;
786 base::SmartPointer<FreeList> free_list_code_space_;
787 base::SmartPointer<FreeList> free_list_map_space_;
788
Ben Murdoch097c5b22016-05-18 11:27:45 +0100789 SweepingList sweeping_list_old_space_;
790 SweepingList sweeping_list_code_space_;
791 SweepingList sweeping_list_map_space_;
792
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000793 // True if we are collecting slots to perform evacuation from evacuation
794 // candidates.
795 bool compacting_;
796
797 // True if concurrent or parallel sweeping is currently in progress.
798 bool sweeping_in_progress_;
799
800 // True if parallel compaction is currently in progress.
801 bool compaction_in_progress_;
802
803 // Semaphore used to synchronize sweeper tasks.
804 base::Semaphore pending_sweeper_tasks_semaphore_;
805
806 // Semaphore used to synchronize compaction tasks.
807 base::Semaphore pending_compaction_tasks_semaphore_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000808
809 friend class Heap;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000810 friend class StoreBuffer;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000811};
812
813
814class MarkBitCellIterator BASE_EMBEDDED {
815 public:
816 explicit MarkBitCellIterator(MemoryChunk* chunk) : chunk_(chunk) {
817 last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex(
818 chunk_->AddressToMarkbitIndex(chunk_->area_end())));
819 cell_base_ = chunk_->area_start();
820 cell_index_ = Bitmap::IndexToCell(
821 Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_)));
822 cells_ = chunk_->markbits()->cells();
823 }
824
825 inline bool Done() { return cell_index_ == last_cell_index_; }
826
827 inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
828
829 inline MarkBit::CellType* CurrentCell() {
830 DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
831 chunk_->AddressToMarkbitIndex(cell_base_))));
832 return &cells_[cell_index_];
833 }
834
835 inline Address CurrentCellBase() {
836 DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
837 chunk_->AddressToMarkbitIndex(cell_base_))));
838 return cell_base_;
839 }
840
841 inline void Advance() {
842 cell_index_++;
843 cell_base_ += 32 * kPointerSize;
844 }
845
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000846 // Return the next mark bit cell. If there is no next it returns 0;
847 inline MarkBit::CellType PeekNext() {
848 if (HasNext()) {
849 return cells_[cell_index_ + 1];
850 }
851 return 0;
852 }
853
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000854 private:
855 MemoryChunk* chunk_;
856 MarkBit::CellType* cells_;
857 unsigned int last_cell_index_;
858 unsigned int cell_index_;
859 Address cell_base_;
860};
861
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000862enum LiveObjectIterationMode { kBlackObjects, kGreyObjects, kAllLiveObjects };
863
864template <LiveObjectIterationMode T>
865class LiveObjectIterator BASE_EMBEDDED {
866 public:
867 explicit LiveObjectIterator(MemoryChunk* chunk)
868 : chunk_(chunk),
869 it_(chunk_),
870 cell_base_(it_.CurrentCellBase()),
871 current_cell_(*it_.CurrentCell()) {}
872
873 HeapObject* Next();
874
875 private:
876 MemoryChunk* chunk_;
877 MarkBitCellIterator it_;
878 Address cell_base_;
879 MarkBit::CellType current_cell_;
880};
881
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000882
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400883class EvacuationScope BASE_EMBEDDED {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000884 public:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400885 explicit EvacuationScope(MarkCompactCollector* collector)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000886 : collector_(collector) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400887 collector_->set_evacuation(true);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000888 }
889
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400890 ~EvacuationScope() { collector_->set_evacuation(false); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000891
892 private:
893 MarkCompactCollector* collector_;
894};
895
896
897const char* AllocationSpaceName(AllocationSpace space);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000898} // namespace internal
899} // namespace v8
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000900
901#endif // V8_HEAP_MARK_COMPACT_H_