blob: aea5e1cf66249267d9afa35a868a193e930fa50e [file] [log] [blame]
ulan@chromium.org2efb9002012-01-19 15:36:35 +00001// Copyright 2012 the V8 project authors. All rights reserved.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_MARK_COMPACT_H_
29#define V8_MARK_COMPACT_H_
30
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000031#include "compiler-intrinsics.h"
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +000032#include "spaces.h"
33
kasperl@chromium.org71affb52009-05-26 05:44:31 +000034namespace v8 {
35namespace internal {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000036
37// Callback function, returns whether an object is alive. The heap size
38// of the object is returned in size. It optionally updates the offset
39// to the first live object in the page (only used for old and map objects).
40typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
41
mads.s.ager31e71382008-08-13 09:32:07 +000042// Forward declarations.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +000043class CodeFlusher;
44class GCTracer;
yangguo@chromium.org5f0b8ea2012-05-16 12:37:04 +000045class MarkCompactCollector;
kasper.lund7276f142008-07-30 08:49:36 +000046class MarkingVisitor;
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +000047class RootMarkingVisitor;
48
49
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000050class Marking {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +000051 public:
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000052 explicit Marking(Heap* heap)
53 : heap_(heap) {
54 }
55
ulan@chromium.org2e04b582013-02-21 14:06:02 +000056 INLINE(static MarkBit MarkBitFrom(Address addr));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000057
ulan@chromium.org2e04b582013-02-21 14:06:02 +000058 INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000059 return MarkBitFrom(reinterpret_cast<Address>(obj));
60 }
61
62 // Impossible markbits: 01
63 static const char* kImpossibleBitPattern;
ulan@chromium.org2e04b582013-02-21 14:06:02 +000064 INLINE(static bool IsImpossible(MarkBit mark_bit)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000065 return !mark_bit.Get() && mark_bit.Next().Get();
66 }
67
68 // Black markbits: 10 - this is required by the sweeper.
69 static const char* kBlackBitPattern;
ulan@chromium.org2e04b582013-02-21 14:06:02 +000070 INLINE(static bool IsBlack(MarkBit mark_bit)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000071 return mark_bit.Get() && !mark_bit.Next().Get();
72 }
73
74 // White markbits: 00 - this is required by the mark bit clearer.
75 static const char* kWhiteBitPattern;
ulan@chromium.org2e04b582013-02-21 14:06:02 +000076 INLINE(static bool IsWhite(MarkBit mark_bit)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000077 return !mark_bit.Get();
78 }
79
80 // Grey markbits: 11
81 static const char* kGreyBitPattern;
ulan@chromium.org2e04b582013-02-21 14:06:02 +000082 INLINE(static bool IsGrey(MarkBit mark_bit)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000083 return mark_bit.Get() && mark_bit.Next().Get();
84 }
85
ulan@chromium.org2e04b582013-02-21 14:06:02 +000086 INLINE(static void MarkBlack(MarkBit mark_bit)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000087 mark_bit.Set();
88 mark_bit.Next().Clear();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000089 }
90
ulan@chromium.org2e04b582013-02-21 14:06:02 +000091 INLINE(static void BlackToGrey(MarkBit markbit)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000092 markbit.Next().Set();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000093 }
94
ulan@chromium.org2e04b582013-02-21 14:06:02 +000095 INLINE(static void WhiteToGrey(MarkBit markbit)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000096 markbit.Set();
97 markbit.Next().Set();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000098 }
99
ulan@chromium.org2e04b582013-02-21 14:06:02 +0000100 INLINE(static void GreyToBlack(MarkBit markbit)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000101 markbit.Next().Clear();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000102 }
103
ulan@chromium.org2e04b582013-02-21 14:06:02 +0000104 INLINE(static void BlackToGrey(HeapObject* obj)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000105 BlackToGrey(MarkBitFrom(obj));
106 }
107
ulan@chromium.org2e04b582013-02-21 14:06:02 +0000108 INLINE(static void AnyToGrey(MarkBit markbit)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000109 markbit.Set();
110 markbit.Next().Set();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000111 }
112
113 // Returns true if the the object whose mark is transferred is marked black.
114 bool TransferMark(Address old_start, Address new_start);
115
116#ifdef DEBUG
117 enum ObjectColor {
118 BLACK_OBJECT,
119 WHITE_OBJECT,
120 GREY_OBJECT,
121 IMPOSSIBLE_COLOR
122 };
123
124 static const char* ColorName(ObjectColor color) {
125 switch (color) {
126 case BLACK_OBJECT: return "black";
127 case WHITE_OBJECT: return "white";
128 case GREY_OBJECT: return "grey";
129 case IMPOSSIBLE_COLOR: return "impossible";
130 }
131 return "error";
132 }
133
134 static ObjectColor Color(HeapObject* obj) {
135 return Color(Marking::MarkBitFrom(obj));
136 }
137
138 static ObjectColor Color(MarkBit mark_bit) {
139 if (IsBlack(mark_bit)) return BLACK_OBJECT;
140 if (IsWhite(mark_bit)) return WHITE_OBJECT;
141 if (IsGrey(mark_bit)) return GREY_OBJECT;
142 UNREACHABLE();
143 return IMPOSSIBLE_COLOR;
144 }
145#endif
146
147 // Returns true if the transferred color is black.
148 INLINE(static bool TransferColor(HeapObject* from,
149 HeapObject* to)) {
150 MarkBit from_mark_bit = MarkBitFrom(from);
151 MarkBit to_mark_bit = MarkBitFrom(to);
152 bool is_black = false;
153 if (from_mark_bit.Get()) {
154 to_mark_bit.Set();
155 is_black = true; // Looks black so far.
156 }
157 if (from_mark_bit.Next().Get()) {
158 to_mark_bit.Next().Set();
159 is_black = false; // Was actually gray.
160 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000161 return is_black;
162 }
163
164 private:
165 Heap* heap_;
166};
167
168// ----------------------------------------------------------------------------
169// Marking deque for tracing live objects.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000170class MarkingDeque {
171 public:
172 MarkingDeque()
173 : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) { }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000174
175 void Initialize(Address low, Address high) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000176 HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
177 HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
178 array_ = obj_low;
179 mask_ = RoundDownToPowerOf2(static_cast<int>(obj_high - obj_low)) - 1;
180 top_ = bottom_ = 0;
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000181 overflowed_ = false;
182 }
183
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000184 inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000185
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000186 inline bool IsEmpty() { return top_ == bottom_; }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000187
188 bool overflowed() const { return overflowed_; }
189
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000190 void ClearOverflowed() { overflowed_ = false; }
191
192 void SetOverflowed() { overflowed_ = true; }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000193
194 // Push the (marked) object on the marking stack if there is room,
195 // otherwise mark the object as overflowed and wait for a rescan of the
196 // heap.
ulan@chromium.org2e04b582013-02-21 14:06:02 +0000197 INLINE(void PushBlack(HeapObject* object)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000198 ASSERT(object->IsHeapObject());
199 if (IsFull()) {
200 Marking::BlackToGrey(object);
ulan@chromium.org2efb9002012-01-19 15:36:35 +0000201 MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000202 SetOverflowed();
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000203 } else {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000204 array_[top_] = object;
205 top_ = ((top_ + 1) & mask_);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000206 }
207 }
208
ulan@chromium.org2e04b582013-02-21 14:06:02 +0000209 INLINE(void PushGrey(HeapObject* object)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000210 ASSERT(object->IsHeapObject());
211 if (IsFull()) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000212 SetOverflowed();
213 } else {
214 array_[top_] = object;
215 top_ = ((top_ + 1) & mask_);
216 }
217 }
218
ulan@chromium.org2e04b582013-02-21 14:06:02 +0000219 INLINE(HeapObject* Pop()) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000220 ASSERT(!IsEmpty());
221 top_ = ((top_ - 1) & mask_);
222 HeapObject* object = array_[top_];
223 ASSERT(object->IsHeapObject());
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000224 return object;
225 }
226
ulan@chromium.org2e04b582013-02-21 14:06:02 +0000227 INLINE(void UnshiftGrey(HeapObject* object)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000228 ASSERT(object->IsHeapObject());
229 if (IsFull()) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000230 SetOverflowed();
231 } else {
232 bottom_ = ((bottom_ - 1) & mask_);
233 array_[bottom_] = object;
234 }
235 }
236
237 HeapObject** array() { return array_; }
238 int bottom() { return bottom_; }
239 int top() { return top_; }
240 int mask() { return mask_; }
241 void set_top(int top) { top_ = top; }
242
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000243 private:
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000244 HeapObject** array_;
245 // array_[(top - 1) & mask_] is the top element in the deque. The Deque is
246 // empty when top_ == bottom_. It is full when top_ + 1 == bottom
247 // (mod mask + 1).
248 int top_;
249 int bottom_;
250 int mask_;
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000251 bool overflowed_;
252
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000253 DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
254};
255
256
257class SlotsBufferAllocator {
258 public:
259 SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
260 void DeallocateBuffer(SlotsBuffer* buffer);
261
262 void DeallocateChain(SlotsBuffer** buffer_address);
263};
264
265
266// SlotsBuffer records a sequence of slots that has to be updated
267// after live objects were relocated from evacuation candidates.
268// All slots are either untyped or typed:
269// - Untyped slots are expected to contain a tagged object pointer.
270// They are recorded by an address.
271// - Typed slots are expected to contain an encoded pointer to a heap
272// object where the way of encoding depends on the type of the slot.
273// They are recorded as a pair (SlotType, slot address).
274// We assume that zero-page is never mapped this allows us to distinguish
275// untyped slots from typed slots during iteration by a simple comparison:
276// if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
277// is the first element of typed slot's pair.
278class SlotsBuffer {
279 public:
280 typedef Object** ObjectSlot;
281
282 explicit SlotsBuffer(SlotsBuffer* next_buffer)
283 : idx_(0), chain_length_(1), next_(next_buffer) {
284 if (next_ != NULL) {
285 chain_length_ = next_->chain_length_ + 1;
286 }
287 }
288
289 ~SlotsBuffer() {
290 }
291
292 void Add(ObjectSlot slot) {
293 ASSERT(0 <= idx_ && idx_ < kNumberOfElements);
294 slots_[idx_++] = slot;
295 }
296
297 enum SlotType {
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +0000298 EMBEDDED_OBJECT_SLOT,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000299 RELOCATED_CODE_OBJECT,
300 CODE_TARGET_SLOT,
301 CODE_ENTRY_SLOT,
302 DEBUG_TARGET_SLOT,
303 JS_RETURN_SLOT,
304 NUMBER_OF_SLOT_TYPES
305 };
306
ulan@chromium.org56c14af2012-09-20 12:51:09 +0000307 static const char* SlotTypeToString(SlotType type) {
308 switch (type) {
309 case EMBEDDED_OBJECT_SLOT:
310 return "EMBEDDED_OBJECT_SLOT";
311 case RELOCATED_CODE_OBJECT:
312 return "RELOCATED_CODE_OBJECT";
313 case CODE_TARGET_SLOT:
314 return "CODE_TARGET_SLOT";
315 case CODE_ENTRY_SLOT:
316 return "CODE_ENTRY_SLOT";
317 case DEBUG_TARGET_SLOT:
318 return "DEBUG_TARGET_SLOT";
319 case JS_RETURN_SLOT:
320 return "JS_RETURN_SLOT";
321 case NUMBER_OF_SLOT_TYPES:
322 return "NUMBER_OF_SLOT_TYPES";
323 }
324 return "UNKNOWN SlotType";
325 }
326
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000327 void UpdateSlots(Heap* heap);
328
329 void UpdateSlotsWithFilter(Heap* heap);
330
331 SlotsBuffer* next() { return next_; }
332
333 static int SizeOfChain(SlotsBuffer* buffer) {
334 if (buffer == NULL) return 0;
335 return static_cast<int>(buffer->idx_ +
336 (buffer->chain_length_ - 1) * kNumberOfElements);
337 }
338
339 inline bool IsFull() {
340 return idx_ == kNumberOfElements;
341 }
342
343 inline bool HasSpaceForTypedSlot() {
344 return idx_ < kNumberOfElements - 1;
345 }
346
347 static void UpdateSlotsRecordedIn(Heap* heap,
348 SlotsBuffer* buffer,
349 bool code_slots_filtering_required) {
350 while (buffer != NULL) {
351 if (code_slots_filtering_required) {
352 buffer->UpdateSlotsWithFilter(heap);
353 } else {
354 buffer->UpdateSlots(heap);
355 }
356 buffer = buffer->next();
357 }
358 }
359
360 enum AdditionMode {
361 FAIL_ON_OVERFLOW,
362 IGNORE_OVERFLOW
363 };
364
365 static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
366 return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
367 }
368
ulan@chromium.org2e04b582013-02-21 14:06:02 +0000369 INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
370 SlotsBuffer** buffer_address,
371 ObjectSlot slot,
372 AdditionMode mode)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000373 SlotsBuffer* buffer = *buffer_address;
374 if (buffer == NULL || buffer->IsFull()) {
375 if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
376 allocator->DeallocateChain(buffer_address);
377 return false;
378 }
379 buffer = allocator->AllocateBuffer(buffer);
380 *buffer_address = buffer;
381 }
382 buffer->Add(slot);
383 return true;
384 }
385
386 static bool IsTypedSlot(ObjectSlot slot);
387
388 static bool AddTo(SlotsBufferAllocator* allocator,
389 SlotsBuffer** buffer_address,
390 SlotType type,
391 Address addr,
392 AdditionMode mode);
393
394 static const int kNumberOfElements = 1021;
395
396 private:
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000397 static const int kChainLengthThreshold = 15;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000398
399 intptr_t idx_;
400 intptr_t chain_length_;
401 SlotsBuffer* next_;
402 ObjectSlot slots_[kNumberOfElements];
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000403};
kasper.lund7276f142008-07-30 08:49:36 +0000404
mads.s.ager31e71382008-08-13 09:32:07 +0000405
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000406// CodeFlusher collects candidates for code flushing during marking and
407// processes those candidates after marking has completed in order to
408// reset those functions referencing code objects that would otherwise
jkummerow@chromium.org4e308cf2013-05-17 13:39:16 +0000409// be unreachable. Code objects can be referenced in three ways:
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000410// - SharedFunctionInfo references unoptimized code.
411// - JSFunction references either unoptimized or optimized code.
jkummerow@chromium.org4e308cf2013-05-17 13:39:16 +0000412// - OptimizedCodeMap references optimized code.
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000413// We are not allowed to flush unoptimized code for functions that got
414// optimized or inlined into optimized code, because we might bailout
415// into the unoptimized code again during deoptimization.
416class CodeFlusher {
417 public:
418 explicit CodeFlusher(Isolate* isolate)
419 : isolate_(isolate),
420 jsfunction_candidates_head_(NULL),
jkummerow@chromium.org4e308cf2013-05-17 13:39:16 +0000421 shared_function_info_candidates_head_(NULL),
422 optimized_code_map_holder_head_(NULL) {}
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000423
424 void AddCandidate(SharedFunctionInfo* shared_info) {
verwaest@chromium.orge4ee6de2012-11-06 12:13:00 +0000425 if (GetNextCandidate(shared_info) == NULL) {
426 SetNextCandidate(shared_info, shared_function_info_candidates_head_);
427 shared_function_info_candidates_head_ = shared_info;
428 }
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000429 }
430
431 void AddCandidate(JSFunction* function) {
432 ASSERT(function->code() == function->shared()->code());
verwaest@chromium.orge4ee6de2012-11-06 12:13:00 +0000433 if (GetNextCandidate(function)->IsUndefined()) {
434 SetNextCandidate(function, jsfunction_candidates_head_);
435 jsfunction_candidates_head_ = function;
436 }
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000437 }
438
jkummerow@chromium.org4e308cf2013-05-17 13:39:16 +0000439 void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
440 if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
441 SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
442 optimized_code_map_holder_head_ = code_map_holder;
443 }
444 }
445
446 void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
yangguo@chromium.org9768bf12013-01-11 14:51:07 +0000447 void EvictCandidate(SharedFunctionInfo* shared_info);
verwaest@chromium.orge4ee6de2012-11-06 12:13:00 +0000448 void EvictCandidate(JSFunction* function);
449
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000450 void ProcessCandidates() {
jkummerow@chromium.org4e308cf2013-05-17 13:39:16 +0000451 ProcessOptimizedCodeMaps();
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000452 ProcessSharedFunctionInfoCandidates();
453 ProcessJSFunctionCandidates();
454 }
455
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +0000456 void EvictAllCandidates() {
jkummerow@chromium.org4e308cf2013-05-17 13:39:16 +0000457 EvictOptimizedCodeMaps();
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +0000458 EvictJSFunctionCandidates();
459 EvictSharedFunctionInfoCandidates();
460 }
461
verwaest@chromium.orge4ee6de2012-11-06 12:13:00 +0000462 void IteratePointersToFromSpace(ObjectVisitor* v);
463
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000464 private:
jkummerow@chromium.org4e308cf2013-05-17 13:39:16 +0000465 void ProcessOptimizedCodeMaps();
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000466 void ProcessJSFunctionCandidates();
467 void ProcessSharedFunctionInfoCandidates();
jkummerow@chromium.org4e308cf2013-05-17 13:39:16 +0000468 void EvictOptimizedCodeMaps();
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +0000469 void EvictJSFunctionCandidates();
470 void EvictSharedFunctionInfoCandidates();
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000471
verwaest@chromium.orge4ee6de2012-11-06 12:13:00 +0000472 static JSFunction** GetNextCandidateSlot(JSFunction* candidate) {
473 return reinterpret_cast<JSFunction**>(
474 HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
475 }
476
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000477 static JSFunction* GetNextCandidate(JSFunction* candidate) {
rossberg@chromium.org89e18f52012-10-22 13:09:53 +0000478 Object* next_candidate = candidate->next_function_link();
479 return reinterpret_cast<JSFunction*>(next_candidate);
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000480 }
481
482 static void SetNextCandidate(JSFunction* candidate,
483 JSFunction* next_candidate) {
rossberg@chromium.org89e18f52012-10-22 13:09:53 +0000484 candidate->set_next_function_link(next_candidate);
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000485 }
486
rossberg@chromium.org89e18f52012-10-22 13:09:53 +0000487 static void ClearNextCandidate(JSFunction* candidate, Object* undefined) {
488 ASSERT(undefined->IsUndefined());
489 candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000490 }
491
492 static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
rossberg@chromium.org89e18f52012-10-22 13:09:53 +0000493 Object* next_candidate = candidate->code()->gc_metadata();
494 return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000495 }
496
497 static void SetNextCandidate(SharedFunctionInfo* candidate,
498 SharedFunctionInfo* next_candidate) {
499 candidate->code()->set_gc_metadata(next_candidate);
500 }
501
rossberg@chromium.org89e18f52012-10-22 13:09:53 +0000502 static void ClearNextCandidate(SharedFunctionInfo* candidate) {
503 candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
504 }
505
jkummerow@chromium.org4e308cf2013-05-17 13:39:16 +0000506 static SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder) {
507 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
508 Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
509 return reinterpret_cast<SharedFunctionInfo*>(next_map);
510 }
511
512 static void SetNextCodeMap(SharedFunctionInfo* holder,
513 SharedFunctionInfo* next_holder) {
514 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
515 code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
516 }
517
518 static void ClearNextCodeMap(SharedFunctionInfo* holder) {
519 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
520 code_map->set_undefined(SharedFunctionInfo::kNextMapIndex);
521 }
522
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000523 Isolate* isolate_;
524 JSFunction* jsfunction_candidates_head_;
525 SharedFunctionInfo* shared_function_info_candidates_head_;
jkummerow@chromium.org4e308cf2013-05-17 13:39:16 +0000526 SharedFunctionInfo* optimized_code_map_holder_head_;
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000527
528 DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
529};
530
531
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +0000532// Defined in isolate.h.
533class ThreadLocalTop;
534
535
ager@chromium.orgddb913d2009-01-27 10:01:48 +0000536// -------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000537// Mark-Compact collector
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000538class MarkCompactCollector {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000539 public:
540 // Type of functions to compute forwarding addresses of objects in
541 // compacted spaces. Given an object and its size, return a (non-failure)
542 // Object* that will be the object after forwarding. There is a separate
543 // allocation function for each (compactable) space based on the location
544 // of the object before compaction.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000545 typedef MaybeObject* (*AllocationFunction)(Heap* heap,
546 HeapObject* object,
lrn@chromium.org303ada72010-10-27 09:33:13 +0000547 int object_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000548
549 // Type of functions to encode the forwarding address for an object.
550 // Given the object, its size, and the new (non-failure) object it will be
551 // forwarded to, encode the forwarding address. For paged spaces, the
552 // 'offset' input/output parameter contains the offset of the forwarded
553 // object from the forwarding address of the previous live object in the
554 // page as input, and is updated to contain the offset to be used for the
555 // next live object in the same page. For spaces using a different
ulan@chromium.org2efb9002012-01-19 15:36:35 +0000556 // encoding (i.e., contiguous spaces), the offset parameter is ignored.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000557 typedef void (*EncodingFunction)(Heap* heap,
558 HeapObject* old_object,
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000559 int object_size,
560 Object* new_object,
561 int* offset);
562
563 // Type of functions to process non-live objects.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000564 typedef void (*ProcessNonLiveFunction)(HeapObject* object, Isolate* isolate);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000565
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000566 // Pointer to member function, used in IterateLiveObjects.
567 typedef int (MarkCompactCollector::*LiveObjectCallback)(HeapObject* obj);
568
erik.corry@gmail.combbceb572012-03-09 10:52:05 +0000569 // Set the global flags, it must be called before Prepare to take effect.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000570 inline void SetFlags(int flags);
571
ager@chromium.orgea4f62e2010-08-16 16:28:43 +0000572 static void Initialize();
573
danno@chromium.orgd3c42102013-08-01 16:58:23 +0000574 void TearDown();
575
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000576 void CollectEvacuationCandidates(PagedSpace* space);
577
578 void AddEvacuationCandidate(Page* p);
579
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000580 // Prepares for GC by resetting relocation info in old and map spaces and
581 // choosing spaces to compact.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000582 void Prepare(GCTracer* tracer);
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000583
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000584 // Performs a global garbage collection.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000585 void CollectGarbage();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000586
jkummerow@chromium.orgab7dad42012-02-07 12:07:34 +0000587 enum CompactionMode {
588 INCREMENTAL_COMPACTION,
589 NON_INCREMENTAL_COMPACTION
590 };
591
592 bool StartCompaction(CompactionMode mode);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000593
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000594 void AbortCompaction();
kasper.lund7276f142008-07-30 08:49:36 +0000595
596 // During a full GC, there is a stack-allocated GCTracer that is used for
597 // bookkeeping information. Return a pointer to that tracer.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000598 GCTracer* tracer() { return tracer_; }
kasper.lund7276f142008-07-30 08:49:36 +0000599
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000600#ifdef DEBUG
601 // Checks whether performing mark-compact collection.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000602 bool in_use() { return state_ > PREPARE_GC; }
603 bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000604#endif
605
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000606 // Determine type of object and emit deletion log event.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000607 static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000608
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +0000609 // Distinguishable invalid map encodings (for single word and multiple words)
610 // that indicate free regions.
611 static const uint32_t kSingleFreeEncoding = 0;
612 static const uint32_t kMultiFreeEncoding = 1;
613
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000614 static inline bool IsMarked(Object* obj);
615
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000616 inline Heap* heap() const { return heap_; }
svenpanne@chromium.org876cca82013-03-18 14:43:20 +0000617 inline Isolate* isolate() const;
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000618
619 CodeFlusher* code_flusher() { return code_flusher_; }
620 inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
621 void EnableCodeFlushing(bool enable);
622
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000623 enum SweeperType {
624 CONSERVATIVE,
625 LAZY_CONSERVATIVE,
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000626 PARALLEL_CONSERVATIVE,
ulan@chromium.org2e04b582013-02-21 14:06:02 +0000627 CONCURRENT_CONSERVATIVE,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000628 PRECISE
629 };
630
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000631 enum SweepingParallelism {
632 SWEEP_SEQUENTIALLY,
633 SWEEP_IN_PARALLEL
634 };
635
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000636#ifdef VERIFY_HEAP
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000637 void VerifyMarkbitsAreClean();
638 static void VerifyMarkbitsAreClean(PagedSpace* space);
639 static void VerifyMarkbitsAreClean(NewSpace* space);
jkummerow@chromium.org25b0e212013-10-04 15:38:52 +0000640 void VerifyWeakEmbeddedObjectsInOptimizedCode();
danno@chromium.org59400602013-08-13 17:09:37 +0000641 void VerifyOmittedMapChecks();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000642#endif
643
644 // Sweep a single page from the given space conservatively.
645 // Return a number of reclaimed bytes.
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000646 template<SweepingParallelism type>
647 static intptr_t SweepConservatively(PagedSpace* space,
648 FreeList* free_list,
649 Page* p);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000650
651 INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
652 return Page::FromAddress(reinterpret_cast<Address>(anchor))->
653 ShouldSkipEvacuationSlotRecording();
654 }
655
656 INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
657 return Page::FromAddress(reinterpret_cast<Address>(host))->
658 ShouldSkipEvacuationSlotRecording();
659 }
660
661 INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
662 return Page::FromAddress(reinterpret_cast<Address>(obj))->
663 IsEvacuationCandidate();
664 }
665
ulan@chromium.org2e04b582013-02-21 14:06:02 +0000666 INLINE(void EvictEvacuationCandidate(Page* page)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000667 if (FLAG_trace_fragmentation) {
668 PrintF("Page %p is too popular. Disabling evacuation.\n",
669 reinterpret_cast<void*>(page));
670 }
671
672 // TODO(gc) If all evacuation candidates are too popular we
673 // should stop slots recording entirely.
674 page->ClearEvacuationCandidate();
675
676 // We were not collecting slots on this page that point
677 // to other evacuation candidates thus we have to
678 // rescan the page after evacuation to discover and update all
679 // pointers to evacuated objects.
680 if (page->owner()->identity() == OLD_DATA_SPACE) {
681 evacuation_candidates_.RemoveElement(page);
682 } else {
683 page->SetFlag(Page::RESCAN_ON_EVACUATION);
684 }
685 }
686
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +0000687 void RecordRelocSlot(RelocInfo* rinfo, Object* target);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000688 void RecordCodeEntrySlot(Address slot, Code* target);
verwaest@chromium.org33e09c82012-10-10 17:07:22 +0000689 void RecordCodeTargetPatch(Address pc, Code* target);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000690
691 INLINE(void RecordSlot(Object** anchor_slot, Object** slot, Object* object));
692
693 void MigrateObject(Address dst,
694 Address src,
695 int size,
696 AllocationSpace to_old_space);
697
698 bool TryPromoteObject(HeapObject* object, int object_size);
699
jkummerow@chromium.orgba72ec82013-07-22 09:21:20 +0000700 inline Object* encountered_weak_collections() {
701 return encountered_weak_collections_;
702 }
703 inline void set_encountered_weak_collections(Object* weak_collection) {
704 encountered_weak_collections_ = weak_collection;
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +0000705 }
706
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000707 void InvalidateCode(Code* code);
708
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000709 void ClearMarkbits();
710
svenpanne@chromium.org83130cf2012-11-30 10:13:25 +0000711 bool abort_incremental_marking() const { return abort_incremental_marking_; }
712
erik.corry@gmail.comed49e962012-04-17 11:57:53 +0000713 bool is_compacting() const { return compacting_; }
714
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +0000715 MarkingParity marking_parity() { return marking_parity_; }
716
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000717 // Concurrent and parallel sweeping support.
718 void SweepInParallel(PagedSpace* space,
719 FreeList* private_free_list,
720 FreeList* free_list);
721
722 void WaitUntilSweepingCompleted();
723
724 intptr_t StealMemoryFromSweeperThreads(PagedSpace* space);
725
726 bool AreSweeperThreadsActivated();
727
mmassi@chromium.org2f0efde2013-02-06 14:12:58 +0000728 bool IsConcurrentSweepingInProgress();
729
ulan@chromium.org750145a2013-03-07 15:14:13 +0000730 void set_sequential_sweeping(bool sequential_sweeping) {
731 sequential_sweeping_ = sequential_sweeping;
732 }
733
734 bool sequential_sweeping() const {
735 return sequential_sweeping_;
736 }
737
jkummerow@chromium.org25b0e212013-10-04 15:38:52 +0000738 // Mark the global table which maps weak objects to dependent code without
739 // marking its contents.
740 void MarkWeakObjectToCodeTable();
741
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000742 private:
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000743 MarkCompactCollector();
744 ~MarkCompactCollector();
745
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000746 bool MarkInvalidatedCode();
jkummerow@chromium.orgfb732b12013-07-26 10:27:09 +0000747 bool WillBeDeoptimized(Code* code);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000748 void RemoveDeadInvalidatedCode();
749 void ProcessInvalidatedCode(ObjectVisitor* visitor);
750
ulan@chromium.org6e196bf2013-03-13 09:38:22 +0000751 void UnlinkEvacuationCandidates();
ulan@chromium.org2e04b582013-02-21 14:06:02 +0000752 void ReleaseEvacuationCandidates();
753
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000754 void StartSweeperThreads();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000755
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000756#ifdef DEBUG
757 enum CollectorState {
758 IDLE,
759 PREPARE_GC,
760 MARK_LIVE_OBJECTS,
761 SWEEP_SPACES,
762 ENCODE_FORWARDING_ADDRESSES,
763 UPDATE_POINTERS,
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000764 RELOCATE_OBJECTS
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000765 };
766
767 // The current stage of the collector.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000768 CollectorState state_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000769#endif
sgjesse@chromium.orgc81c8942009-08-21 10:54:26 +0000770
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000771 // Global flag that forces sweeping to be precise, so we can traverse the
772 // heap.
773 bool sweep_precisely_;
sgjesse@chromium.orgc81c8942009-08-21 10:54:26 +0000774
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000775 bool reduce_memory_footprint_;
776
erik.corry@gmail.combbceb572012-03-09 10:52:05 +0000777 bool abort_incremental_marking_;
778
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +0000779 MarkingParity marking_parity_;
780
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000781 // True if we are collecting slots to perform evacuation from evacuation
782 // candidates.
783 bool compacting_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000784
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000785 bool was_marked_incrementally_;
ager@chromium.orga1645e22009-09-09 19:27:10 +0000786
hpayer@chromium.org7c3372b2013-02-13 17:26:04 +0000787 // True if concurrent or parallel sweeping is currently in progress.
788 bool sweeping_pending_;
789
ulan@chromium.org750145a2013-03-07 15:14:13 +0000790 bool sequential_sweeping_;
791
kasper.lund7276f142008-07-30 08:49:36 +0000792 // A pointer to the current stack-allocated GC tracer object during a full
793 // collection (NULL before and after).
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000794 GCTracer* tracer_;
kasper.lund7276f142008-07-30 08:49:36 +0000795
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000796 SlotsBufferAllocator slots_buffer_allocator_;
797
798 SlotsBuffer* migration_slots_buffer_;
799
ager@chromium.orgddb913d2009-01-27 10:01:48 +0000800 // Finishes GC, performs heap verification if enabled.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000801 void Finish();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000802
ager@chromium.orgddb913d2009-01-27 10:01:48 +0000803 // -----------------------------------------------------------------------
804 // Phase 1: Marking live objects.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000805 //
ager@chromium.orgddb913d2009-01-27 10:01:48 +0000806 // Before: The heap has been prepared for garbage collection by
807 // MarkCompactCollector::Prepare() and is otherwise in its
808 // normal state.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000809 //
ager@chromium.orgddb913d2009-01-27 10:01:48 +0000810 // After: Live objects are marked and non-live objects are unmarked.
811
mads.s.ager31e71382008-08-13 09:32:07 +0000812 friend class RootMarkingVisitor;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000813 friend class MarkingVisitor;
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000814 friend class MarkCompactMarkingVisitor;
ricow@chromium.org0b9f8502010-08-18 07:45:01 +0000815 friend class CodeMarkingVisitor;
816 friend class SharedFunctionInfoMarkingVisitor;
817
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +0000818 // Mark code objects that are active on the stack to prevent them
819 // from being flushed.
820 void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
821
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000822 void PrepareForCodeFlushing();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000823
824 // Marking operations for objects reachable from roots.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000825 void MarkLiveObjects();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000826
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000827 void AfterMarking();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000828
ulan@chromium.org2efb9002012-01-19 15:36:35 +0000829 // Marks the object black and pushes it on the marking stack.
yangguo@chromium.org5f0b8ea2012-05-16 12:37:04 +0000830 // This is for non-incremental marking only.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000831 INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
ager@chromium.org3bf7b912008-11-17 09:09:45 +0000832
yangguo@chromium.org5f0b8ea2012-05-16 12:37:04 +0000833 // Marks the object black assuming that it is not yet marked.
834 // This is for non-incremental marking only.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000835 INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
836
mads.s.ager31e71382008-08-13 09:32:07 +0000837 // Mark the heap roots and all objects reachable from them.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000838 void MarkRoots(RootMarkingVisitor* visitor);
ager@chromium.org5ec48922009-05-05 07:25:34 +0000839
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +0000840 // Mark the string table specially. References to internalized strings from
841 // the string table are weak.
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +0000842 void MarkStringTable(RootMarkingVisitor* visitor);
kasper.lund7276f142008-07-30 08:49:36 +0000843
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +0000844 // Mark objects in implicit references groups if their parent object
845 // is marked.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000846 void MarkImplicitRefGroups();
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +0000847
mads.s.ager31e71382008-08-13 09:32:07 +0000848 // Mark objects reachable (transitively) from objects in the marking stack
849 // or overflowed in the heap.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000850 void ProcessMarkingDeque();
mads.s.ager31e71382008-08-13 09:32:07 +0000851
verwaest@chromium.orgd4be0f02013-06-05 13:39:03 +0000852 // Mark objects reachable (transitively) from objects in the marking stack
853 // or overflowed in the heap. This respects references only considered in
854 // the final atomic marking pause including the following:
855 // - Processing of objects reachable through Harmony WeakMaps.
856 // - Objects reachable due to host application logic like object groups
857 // or implicit references' groups.
858 void ProcessEphemeralMarking(ObjectVisitor* visitor);
859
machenbach@chromium.orgc1789ee2013-07-05 07:09:57 +0000860 // If the call-site of the top optimized code was not prepared for
861 // deoptimization, then treat the maps in the code as strong pointers,
862 // otherwise a map can die and deoptimize the code.
863 void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
864
mads.s.ager31e71382008-08-13 09:32:07 +0000865 // Mark objects reachable (transitively) from objects in the marking
866 // stack. This function empties the marking stack, but may leave
867 // overflowed objects in the heap, in which case the marking stack's
868 // overflow flag will be set.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000869 void EmptyMarkingDeque();
mads.s.ager31e71382008-08-13 09:32:07 +0000870
871 // Refill the marking stack with overflowed objects from the heap. This
872 // function either leaves the marking stack full or clears the overflow
873 // flag on the marking stack.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000874 void RefillMarkingDeque();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000875
ricow@chromium.org4f693d62011-07-04 14:01:31 +0000876 // After reachable maps have been marked process per context object
877 // literal map caches removing unmarked entries.
878 void ProcessMapCaches();
879
ager@chromium.org9085a012009-05-11 19:22:57 +0000880 // Callback function for telling whether the object *p is an unmarked
881 // heap object.
882 static bool IsUnmarkedHeapObject(Object** p);
mmassi@chromium.org49a44672012-12-04 13:52:03 +0000883 static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000884
ager@chromium.orgddb913d2009-01-27 10:01:48 +0000885 // Map transitions from a live map to a dead map must be killed.
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +0000886 // We replace them with a null descriptor, with the same key.
yangguo@chromium.org003650e2013-01-24 16:31:08 +0000887 void ClearNonLiveReferences();
yangguo@chromium.org659ceec2012-01-26 07:37:54 +0000888 void ClearNonLivePrototypeTransitions(Map* map);
889 void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +0000890
jkummerow@chromium.org25b0e212013-10-04 15:38:52 +0000891 void ClearAndDeoptimizeDependentCode(DependentCode* dependent_code);
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +0000892 void ClearNonLiveDependentCode(DependentCode* dependent_code);
yangguo@chromium.org003650e2013-01-24 16:31:08 +0000893
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000894 // Marking detaches initial maps from SharedFunctionInfo objects
895 // to make this reference weak. We need to reattach initial maps
896 // back after collection. This is either done during
897 // ClearNonLiveTransitions pass or by calling this function.
898 void ReattachInitialMaps();
899
jkummerow@chromium.orgba72ec82013-07-22 09:21:20 +0000900 // Mark all values associated with reachable keys in weak collections
901 // encountered so far. This might push new object or even new weak maps onto
902 // the marking stack.
903 void ProcessWeakCollections();
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +0000904
905 // After all reachable objects have been marked those weak map entries
906 // with an unreachable key are removed from all encountered weak maps.
907 // The linked list of all encountered weak maps is destroyed.
jkummerow@chromium.orgba72ec82013-07-22 09:21:20 +0000908 void ClearWeakCollections();
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +0000909
ager@chromium.orgddb913d2009-01-27 10:01:48 +0000910 // -----------------------------------------------------------------------
911 // Phase 2: Sweeping to clear mark bits and free non-live objects for
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000912 // a non-compacting collection.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000913 //
ager@chromium.orgddb913d2009-01-27 10:01:48 +0000914 // Before: Live objects are marked and non-live objects are unmarked.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000915 //
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000916 // After: Live objects are unmarked, non-live regions have been added to
917 // their space's free list. Active eden semispace is compacted by
918 // evacuation.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000919 //
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000920
ager@chromium.orgddb913d2009-01-27 10:01:48 +0000921 // If we are not compacting the heap, we simply sweep the spaces except
922 // for the large object space, clearing mark bits and adding unmarked
923 // regions to each space's free list.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000924 void SweepSpaces();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000925
danno@chromium.org169691d2013-07-15 08:01:13 +0000926 int DiscoverAndPromoteBlackObjectsOnPage(NewSpace* new_space,
927 NewSpacePage* p);
928
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000929 void EvacuateNewSpace();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000930
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000931 void EvacuateLiveObjectsFromPage(Page* p);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000932
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000933 void EvacuatePages();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000934
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000935 void EvacuateNewSpaceAndCandidates();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000936
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000937 void SweepSpace(PagedSpace* space, SweeperType sweeper);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000938
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000939#ifdef DEBUG
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000940 friend class MarkObjectVisitor;
941 static void VisitObject(HeapObject* obj);
942
943 friend class UnmarkObjectVisitor;
944 static void UnmarkObject(HeapObject* obj);
945#endif
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000946
947 Heap* heap_;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000948 MarkingDeque marking_deque_;
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000949 CodeFlusher* code_flusher_;
jkummerow@chromium.orgba72ec82013-07-22 09:21:20 +0000950 Object* encountered_weak_collections_;
jkummerow@chromium.org3d00d0a2013-09-04 13:57:32 +0000951 bool have_code_to_deoptimize_;
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000952
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000953 List<Page*> evacuation_candidates_;
954 List<Code*> invalidated_code_;
955
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000956 friend class Heap;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000957};
958
959
jkummerow@chromium.org10480472013-07-17 08:22:15 +0000960class MarkBitCellIterator BASE_EMBEDDED {
961 public:
962 explicit MarkBitCellIterator(MemoryChunk* chunk)
963 : chunk_(chunk) {
964 last_cell_index_ = Bitmap::IndexToCell(
965 Bitmap::CellAlignIndex(
jkummerow@chromium.orgba72ec82013-07-22 09:21:20 +0000966 chunk_->AddressToMarkbitIndex(chunk_->area_end())));
967 cell_base_ = chunk_->area_start();
jkummerow@chromium.org10480472013-07-17 08:22:15 +0000968 cell_index_ = Bitmap::IndexToCell(
969 Bitmap::CellAlignIndex(
jkummerow@chromium.orgba72ec82013-07-22 09:21:20 +0000970 chunk_->AddressToMarkbitIndex(cell_base_)));
971 cells_ = chunk_->markbits()->cells();
jkummerow@chromium.org10480472013-07-17 08:22:15 +0000972 }
973
974 inline bool Done() { return cell_index_ == last_cell_index_; }
975
976 inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
977
978 inline MarkBit::CellType* CurrentCell() {
979 ASSERT(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
980 chunk_->AddressToMarkbitIndex(cell_base_))));
981 return &cells_[cell_index_];
982 }
983
984 inline Address CurrentCellBase() {
985 ASSERT(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
986 chunk_->AddressToMarkbitIndex(cell_base_))));
987 return cell_base_;
988 }
989
990 inline void Advance() {
991 cell_index_++;
992 cell_base_ += 32 * kPointerSize;
993 }
994
995 private:
996 MemoryChunk* chunk_;
997 MarkBit::CellType* cells_;
998 unsigned int last_cell_index_;
999 unsigned int cell_index_;
1000 Address cell_base_;
1001};
1002
1003
ulan@chromium.org750145a2013-03-07 15:14:13 +00001004class SequentialSweepingScope BASE_EMBEDDED {
1005 public:
1006 explicit SequentialSweepingScope(MarkCompactCollector *collector) :
1007 collector_(collector) {
1008 collector_->set_sequential_sweeping(true);
1009 }
1010
1011 ~SequentialSweepingScope() {
1012 collector_->set_sequential_sweeping(false);
1013 }
1014
1015 private:
1016 MarkCompactCollector* collector_;
1017};
1018
1019
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001020const char* AllocationSpaceName(AllocationSpace space);
1021
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001022} } // namespace v8::internal
1023
1024#endif // V8_MARK_COMPACT_H_