Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1 | // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #ifndef V8_HEAP_MARK_COMPACT_H_ |
| 6 | #define V8_HEAP_MARK_COMPACT_H_ |
| 7 | |
| 8 | #include "src/base/bits.h" |
| 9 | #include "src/heap/spaces.h" |
| 10 | |
| 11 | namespace v8 { |
| 12 | namespace internal { |
| 13 | |
| 14 | // Callback function, returns whether an object is alive. The heap size |
| 15 | // of the object is returned in size. It optionally updates the offset |
| 16 | // to the first live object in the page (only used for old and map objects). |
| 17 | typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset); |
| 18 | |
| 19 | // Forward declarations. |
| 20 | class CodeFlusher; |
| 21 | class MarkCompactCollector; |
| 22 | class MarkingVisitor; |
| 23 | class RootMarkingVisitor; |
| 24 | |
| 25 | |
| 26 | class Marking { |
| 27 | public: |
| 28 | explicit Marking(Heap* heap) : heap_(heap) {} |
| 29 | |
| 30 | INLINE(static MarkBit MarkBitFrom(Address addr)); |
| 31 | |
| 32 | INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) { |
| 33 | return MarkBitFrom(reinterpret_cast<Address>(obj)); |
| 34 | } |
| 35 | |
| 36 | // Impossible markbits: 01 |
| 37 | static const char* kImpossibleBitPattern; |
| 38 | INLINE(static bool IsImpossible(MarkBit mark_bit)) { |
| 39 | return !mark_bit.Get() && mark_bit.Next().Get(); |
| 40 | } |
| 41 | |
| 42 | // Black markbits: 10 - this is required by the sweeper. |
| 43 | static const char* kBlackBitPattern; |
| 44 | INLINE(static bool IsBlack(MarkBit mark_bit)) { |
| 45 | return mark_bit.Get() && !mark_bit.Next().Get(); |
| 46 | } |
| 47 | |
| 48 | // White markbits: 00 - this is required by the mark bit clearer. |
| 49 | static const char* kWhiteBitPattern; |
| 50 | INLINE(static bool IsWhite(MarkBit mark_bit)) { return !mark_bit.Get(); } |
| 51 | |
| 52 | // Grey markbits: 11 |
| 53 | static const char* kGreyBitPattern; |
| 54 | INLINE(static bool IsGrey(MarkBit mark_bit)) { |
| 55 | return mark_bit.Get() && mark_bit.Next().Get(); |
| 56 | } |
| 57 | |
| 58 | INLINE(static void MarkBlack(MarkBit mark_bit)) { |
| 59 | mark_bit.Set(); |
| 60 | mark_bit.Next().Clear(); |
| 61 | } |
| 62 | |
| 63 | INLINE(static void BlackToGrey(MarkBit markbit)) { markbit.Next().Set(); } |
| 64 | |
| 65 | INLINE(static void WhiteToGrey(MarkBit markbit)) { |
| 66 | markbit.Set(); |
| 67 | markbit.Next().Set(); |
| 68 | } |
| 69 | |
| 70 | INLINE(static void GreyToBlack(MarkBit markbit)) { markbit.Next().Clear(); } |
| 71 | |
| 72 | INLINE(static void BlackToGrey(HeapObject* obj)) { |
| 73 | BlackToGrey(MarkBitFrom(obj)); |
| 74 | } |
| 75 | |
| 76 | INLINE(static void AnyToGrey(MarkBit markbit)) { |
| 77 | markbit.Set(); |
| 78 | markbit.Next().Set(); |
| 79 | } |
| 80 | |
| 81 | void TransferMark(Address old_start, Address new_start); |
| 82 | |
| 83 | #ifdef DEBUG |
| 84 | enum ObjectColor { |
| 85 | BLACK_OBJECT, |
| 86 | WHITE_OBJECT, |
| 87 | GREY_OBJECT, |
| 88 | IMPOSSIBLE_COLOR |
| 89 | }; |
| 90 | |
| 91 | static const char* ColorName(ObjectColor color) { |
| 92 | switch (color) { |
| 93 | case BLACK_OBJECT: |
| 94 | return "black"; |
| 95 | case WHITE_OBJECT: |
| 96 | return "white"; |
| 97 | case GREY_OBJECT: |
| 98 | return "grey"; |
| 99 | case IMPOSSIBLE_COLOR: |
| 100 | return "impossible"; |
| 101 | } |
| 102 | return "error"; |
| 103 | } |
| 104 | |
| 105 | static ObjectColor Color(HeapObject* obj) { |
| 106 | return Color(Marking::MarkBitFrom(obj)); |
| 107 | } |
| 108 | |
| 109 | static ObjectColor Color(MarkBit mark_bit) { |
| 110 | if (IsBlack(mark_bit)) return BLACK_OBJECT; |
| 111 | if (IsWhite(mark_bit)) return WHITE_OBJECT; |
| 112 | if (IsGrey(mark_bit)) return GREY_OBJECT; |
| 113 | UNREACHABLE(); |
| 114 | return IMPOSSIBLE_COLOR; |
| 115 | } |
| 116 | #endif |
| 117 | |
| 118 | // Returns true if the transferred color is black. |
| 119 | INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) { |
| 120 | MarkBit from_mark_bit = MarkBitFrom(from); |
| 121 | MarkBit to_mark_bit = MarkBitFrom(to); |
| 122 | bool is_black = false; |
| 123 | if (from_mark_bit.Get()) { |
| 124 | to_mark_bit.Set(); |
| 125 | is_black = true; // Looks black so far. |
| 126 | } |
| 127 | if (from_mark_bit.Next().Get()) { |
| 128 | to_mark_bit.Next().Set(); |
| 129 | is_black = false; // Was actually gray. |
| 130 | } |
| 131 | return is_black; |
| 132 | } |
| 133 | |
| 134 | private: |
| 135 | Heap* heap_; |
| 136 | }; |
| 137 | |
| 138 | // ---------------------------------------------------------------------------- |
| 139 | // Marking deque for tracing live objects. |
| 140 | class MarkingDeque { |
| 141 | public: |
| 142 | MarkingDeque() |
| 143 | : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) {} |
| 144 | |
| 145 | void Initialize(Address low, Address high) { |
| 146 | HeapObject** obj_low = reinterpret_cast<HeapObject**>(low); |
| 147 | HeapObject** obj_high = reinterpret_cast<HeapObject**>(high); |
| 148 | array_ = obj_low; |
| 149 | mask_ = base::bits::RoundDownToPowerOfTwo32( |
| 150 | static_cast<uint32_t>(obj_high - obj_low)) - |
| 151 | 1; |
| 152 | top_ = bottom_ = 0; |
| 153 | overflowed_ = false; |
| 154 | } |
| 155 | |
| 156 | inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; } |
| 157 | |
| 158 | inline bool IsEmpty() { return top_ == bottom_; } |
| 159 | |
| 160 | bool overflowed() const { return overflowed_; } |
| 161 | |
| 162 | void ClearOverflowed() { overflowed_ = false; } |
| 163 | |
| 164 | void SetOverflowed() { overflowed_ = true; } |
| 165 | |
| 166 | // Push the (marked) object on the marking stack if there is room, |
| 167 | // otherwise mark the object as overflowed and wait for a rescan of the |
| 168 | // heap. |
| 169 | INLINE(void PushBlack(HeapObject* object)) { |
| 170 | DCHECK(object->IsHeapObject()); |
| 171 | if (IsFull()) { |
| 172 | Marking::BlackToGrey(object); |
| 173 | MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size()); |
| 174 | SetOverflowed(); |
| 175 | } else { |
| 176 | array_[top_] = object; |
| 177 | top_ = ((top_ + 1) & mask_); |
| 178 | } |
| 179 | } |
| 180 | |
| 181 | INLINE(void PushGrey(HeapObject* object)) { |
| 182 | DCHECK(object->IsHeapObject()); |
| 183 | if (IsFull()) { |
| 184 | SetOverflowed(); |
| 185 | } else { |
| 186 | array_[top_] = object; |
| 187 | top_ = ((top_ + 1) & mask_); |
| 188 | } |
| 189 | } |
| 190 | |
| 191 | INLINE(HeapObject* Pop()) { |
| 192 | DCHECK(!IsEmpty()); |
| 193 | top_ = ((top_ - 1) & mask_); |
| 194 | HeapObject* object = array_[top_]; |
| 195 | DCHECK(object->IsHeapObject()); |
| 196 | return object; |
| 197 | } |
| 198 | |
| 199 | INLINE(void UnshiftGrey(HeapObject* object)) { |
| 200 | DCHECK(object->IsHeapObject()); |
| 201 | if (IsFull()) { |
| 202 | SetOverflowed(); |
| 203 | } else { |
| 204 | bottom_ = ((bottom_ - 1) & mask_); |
| 205 | array_[bottom_] = object; |
| 206 | } |
| 207 | } |
| 208 | |
| 209 | HeapObject** array() { return array_; } |
| 210 | int bottom() { return bottom_; } |
| 211 | int top() { return top_; } |
| 212 | int mask() { return mask_; } |
| 213 | void set_top(int top) { top_ = top; } |
| 214 | |
| 215 | private: |
| 216 | HeapObject** array_; |
| 217 | // array_[(top - 1) & mask_] is the top element in the deque. The Deque is |
| 218 | // empty when top_ == bottom_. It is full when top_ + 1 == bottom |
| 219 | // (mod mask + 1). |
| 220 | int top_; |
| 221 | int bottom_; |
| 222 | int mask_; |
| 223 | bool overflowed_; |
| 224 | |
| 225 | DISALLOW_COPY_AND_ASSIGN(MarkingDeque); |
| 226 | }; |
| 227 | |
| 228 | |
| 229 | class SlotsBufferAllocator { |
| 230 | public: |
| 231 | SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer); |
| 232 | void DeallocateBuffer(SlotsBuffer* buffer); |
| 233 | |
| 234 | void DeallocateChain(SlotsBuffer** buffer_address); |
| 235 | }; |
| 236 | |
| 237 | |
| 238 | // SlotsBuffer records a sequence of slots that has to be updated |
| 239 | // after live objects were relocated from evacuation candidates. |
| 240 | // All slots are either untyped or typed: |
| 241 | // - Untyped slots are expected to contain a tagged object pointer. |
| 242 | // They are recorded by an address. |
| 243 | // - Typed slots are expected to contain an encoded pointer to a heap |
| 244 | // object where the way of encoding depends on the type of the slot. |
| 245 | // They are recorded as a pair (SlotType, slot address). |
| 246 | // We assume that zero-page is never mapped this allows us to distinguish |
| 247 | // untyped slots from typed slots during iteration by a simple comparison: |
| 248 | // if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it |
| 249 | // is the first element of typed slot's pair. |
| 250 | class SlotsBuffer { |
| 251 | public: |
| 252 | typedef Object** ObjectSlot; |
| 253 | |
| 254 | explicit SlotsBuffer(SlotsBuffer* next_buffer) |
| 255 | : idx_(0), chain_length_(1), next_(next_buffer) { |
| 256 | if (next_ != NULL) { |
| 257 | chain_length_ = next_->chain_length_ + 1; |
| 258 | } |
| 259 | } |
| 260 | |
| 261 | ~SlotsBuffer() {} |
| 262 | |
| 263 | void Add(ObjectSlot slot) { |
| 264 | DCHECK(0 <= idx_ && idx_ < kNumberOfElements); |
| 265 | slots_[idx_++] = slot; |
| 266 | } |
| 267 | |
| 268 | enum SlotType { |
| 269 | EMBEDDED_OBJECT_SLOT, |
| 270 | RELOCATED_CODE_OBJECT, |
| 271 | CODE_TARGET_SLOT, |
| 272 | CODE_ENTRY_SLOT, |
| 273 | DEBUG_TARGET_SLOT, |
| 274 | JS_RETURN_SLOT, |
| 275 | NUMBER_OF_SLOT_TYPES |
| 276 | }; |
| 277 | |
| 278 | static const char* SlotTypeToString(SlotType type) { |
| 279 | switch (type) { |
| 280 | case EMBEDDED_OBJECT_SLOT: |
| 281 | return "EMBEDDED_OBJECT_SLOT"; |
| 282 | case RELOCATED_CODE_OBJECT: |
| 283 | return "RELOCATED_CODE_OBJECT"; |
| 284 | case CODE_TARGET_SLOT: |
| 285 | return "CODE_TARGET_SLOT"; |
| 286 | case CODE_ENTRY_SLOT: |
| 287 | return "CODE_ENTRY_SLOT"; |
| 288 | case DEBUG_TARGET_SLOT: |
| 289 | return "DEBUG_TARGET_SLOT"; |
| 290 | case JS_RETURN_SLOT: |
| 291 | return "JS_RETURN_SLOT"; |
| 292 | case NUMBER_OF_SLOT_TYPES: |
| 293 | return "NUMBER_OF_SLOT_TYPES"; |
| 294 | } |
| 295 | return "UNKNOWN SlotType"; |
| 296 | } |
| 297 | |
| 298 | void UpdateSlots(Heap* heap); |
| 299 | |
| 300 | void UpdateSlotsWithFilter(Heap* heap); |
| 301 | |
| 302 | SlotsBuffer* next() { return next_; } |
| 303 | |
| 304 | static int SizeOfChain(SlotsBuffer* buffer) { |
| 305 | if (buffer == NULL) return 0; |
| 306 | return static_cast<int>(buffer->idx_ + |
| 307 | (buffer->chain_length_ - 1) * kNumberOfElements); |
| 308 | } |
| 309 | |
| 310 | inline bool IsFull() { return idx_ == kNumberOfElements; } |
| 311 | |
| 312 | inline bool HasSpaceForTypedSlot() { return idx_ < kNumberOfElements - 1; } |
| 313 | |
| 314 | static void UpdateSlotsRecordedIn(Heap* heap, SlotsBuffer* buffer, |
| 315 | bool code_slots_filtering_required) { |
| 316 | while (buffer != NULL) { |
| 317 | if (code_slots_filtering_required) { |
| 318 | buffer->UpdateSlotsWithFilter(heap); |
| 319 | } else { |
| 320 | buffer->UpdateSlots(heap); |
| 321 | } |
| 322 | buffer = buffer->next(); |
| 323 | } |
| 324 | } |
| 325 | |
| 326 | enum AdditionMode { FAIL_ON_OVERFLOW, IGNORE_OVERFLOW }; |
| 327 | |
| 328 | static bool ChainLengthThresholdReached(SlotsBuffer* buffer) { |
| 329 | return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold; |
| 330 | } |
| 331 | |
| 332 | INLINE(static bool AddTo(SlotsBufferAllocator* allocator, |
| 333 | SlotsBuffer** buffer_address, ObjectSlot slot, |
| 334 | AdditionMode mode)) { |
| 335 | SlotsBuffer* buffer = *buffer_address; |
| 336 | if (buffer == NULL || buffer->IsFull()) { |
| 337 | if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) { |
| 338 | allocator->DeallocateChain(buffer_address); |
| 339 | return false; |
| 340 | } |
| 341 | buffer = allocator->AllocateBuffer(buffer); |
| 342 | *buffer_address = buffer; |
| 343 | } |
| 344 | buffer->Add(slot); |
| 345 | return true; |
| 346 | } |
| 347 | |
| 348 | static bool IsTypedSlot(ObjectSlot slot); |
| 349 | |
| 350 | static bool AddTo(SlotsBufferAllocator* allocator, |
| 351 | SlotsBuffer** buffer_address, SlotType type, Address addr, |
| 352 | AdditionMode mode); |
| 353 | |
| 354 | static const int kNumberOfElements = 1021; |
| 355 | |
| 356 | private: |
| 357 | static const int kChainLengthThreshold = 15; |
| 358 | |
| 359 | intptr_t idx_; |
| 360 | intptr_t chain_length_; |
| 361 | SlotsBuffer* next_; |
| 362 | ObjectSlot slots_[kNumberOfElements]; |
| 363 | }; |
| 364 | |
| 365 | |
| 366 | // CodeFlusher collects candidates for code flushing during marking and |
| 367 | // processes those candidates after marking has completed in order to |
| 368 | // reset those functions referencing code objects that would otherwise |
| 369 | // be unreachable. Code objects can be referenced in three ways: |
| 370 | // - SharedFunctionInfo references unoptimized code. |
| 371 | // - JSFunction references either unoptimized or optimized code. |
| 372 | // - OptimizedCodeMap references optimized code. |
| 373 | // We are not allowed to flush unoptimized code for functions that got |
| 374 | // optimized or inlined into optimized code, because we might bailout |
| 375 | // into the unoptimized code again during deoptimization. |
| 376 | class CodeFlusher { |
| 377 | public: |
| 378 | explicit CodeFlusher(Isolate* isolate) |
| 379 | : isolate_(isolate), |
| 380 | jsfunction_candidates_head_(NULL), |
| 381 | shared_function_info_candidates_head_(NULL), |
| 382 | optimized_code_map_holder_head_(NULL) {} |
| 383 | |
| 384 | void AddCandidate(SharedFunctionInfo* shared_info) { |
| 385 | if (GetNextCandidate(shared_info) == NULL) { |
| 386 | SetNextCandidate(shared_info, shared_function_info_candidates_head_); |
| 387 | shared_function_info_candidates_head_ = shared_info; |
| 388 | } |
| 389 | } |
| 390 | |
| 391 | void AddCandidate(JSFunction* function) { |
| 392 | DCHECK(function->code() == function->shared()->code()); |
| 393 | if (GetNextCandidate(function)->IsUndefined()) { |
| 394 | SetNextCandidate(function, jsfunction_candidates_head_); |
| 395 | jsfunction_candidates_head_ = function; |
| 396 | } |
| 397 | } |
| 398 | |
| 399 | void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) { |
| 400 | if (GetNextCodeMap(code_map_holder)->IsUndefined()) { |
| 401 | SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_); |
| 402 | optimized_code_map_holder_head_ = code_map_holder; |
| 403 | } |
| 404 | } |
| 405 | |
| 406 | void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder); |
| 407 | void EvictCandidate(SharedFunctionInfo* shared_info); |
| 408 | void EvictCandidate(JSFunction* function); |
| 409 | |
| 410 | void ProcessCandidates() { |
| 411 | ProcessOptimizedCodeMaps(); |
| 412 | ProcessSharedFunctionInfoCandidates(); |
| 413 | ProcessJSFunctionCandidates(); |
| 414 | } |
| 415 | |
| 416 | void EvictAllCandidates() { |
| 417 | EvictOptimizedCodeMaps(); |
| 418 | EvictJSFunctionCandidates(); |
| 419 | EvictSharedFunctionInfoCandidates(); |
| 420 | } |
| 421 | |
| 422 | void IteratePointersToFromSpace(ObjectVisitor* v); |
| 423 | |
| 424 | private: |
| 425 | void ProcessOptimizedCodeMaps(); |
| 426 | void ProcessJSFunctionCandidates(); |
| 427 | void ProcessSharedFunctionInfoCandidates(); |
| 428 | void EvictOptimizedCodeMaps(); |
| 429 | void EvictJSFunctionCandidates(); |
| 430 | void EvictSharedFunctionInfoCandidates(); |
| 431 | |
| 432 | static JSFunction** GetNextCandidateSlot(JSFunction* candidate) { |
| 433 | return reinterpret_cast<JSFunction**>( |
| 434 | HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset)); |
| 435 | } |
| 436 | |
| 437 | static JSFunction* GetNextCandidate(JSFunction* candidate) { |
| 438 | Object* next_candidate = candidate->next_function_link(); |
| 439 | return reinterpret_cast<JSFunction*>(next_candidate); |
| 440 | } |
| 441 | |
| 442 | static void SetNextCandidate(JSFunction* candidate, |
| 443 | JSFunction* next_candidate) { |
| 444 | candidate->set_next_function_link(next_candidate); |
| 445 | } |
| 446 | |
| 447 | static void ClearNextCandidate(JSFunction* candidate, Object* undefined) { |
| 448 | DCHECK(undefined->IsUndefined()); |
| 449 | candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER); |
| 450 | } |
| 451 | |
| 452 | static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) { |
| 453 | Object* next_candidate = candidate->code()->gc_metadata(); |
| 454 | return reinterpret_cast<SharedFunctionInfo*>(next_candidate); |
| 455 | } |
| 456 | |
| 457 | static void SetNextCandidate(SharedFunctionInfo* candidate, |
| 458 | SharedFunctionInfo* next_candidate) { |
| 459 | candidate->code()->set_gc_metadata(next_candidate); |
| 460 | } |
| 461 | |
| 462 | static void ClearNextCandidate(SharedFunctionInfo* candidate) { |
| 463 | candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER); |
| 464 | } |
| 465 | |
| 466 | static SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder) { |
| 467 | FixedArray* code_map = FixedArray::cast(holder->optimized_code_map()); |
| 468 | Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex); |
| 469 | return reinterpret_cast<SharedFunctionInfo*>(next_map); |
| 470 | } |
| 471 | |
| 472 | static void SetNextCodeMap(SharedFunctionInfo* holder, |
| 473 | SharedFunctionInfo* next_holder) { |
| 474 | FixedArray* code_map = FixedArray::cast(holder->optimized_code_map()); |
| 475 | code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder); |
| 476 | } |
| 477 | |
| 478 | static void ClearNextCodeMap(SharedFunctionInfo* holder) { |
| 479 | FixedArray* code_map = FixedArray::cast(holder->optimized_code_map()); |
| 480 | code_map->set_undefined(SharedFunctionInfo::kNextMapIndex); |
| 481 | } |
| 482 | |
| 483 | Isolate* isolate_; |
| 484 | JSFunction* jsfunction_candidates_head_; |
| 485 | SharedFunctionInfo* shared_function_info_candidates_head_; |
| 486 | SharedFunctionInfo* optimized_code_map_holder_head_; |
| 487 | |
| 488 | DISALLOW_COPY_AND_ASSIGN(CodeFlusher); |
| 489 | }; |
| 490 | |
| 491 | |
| 492 | // Defined in isolate.h. |
| 493 | class ThreadLocalTop; |
| 494 | |
| 495 | |
| 496 | // ------------------------------------------------------------------------- |
| 497 | // Mark-Compact collector |
| 498 | class MarkCompactCollector { |
| 499 | public: |
| 500 | // Set the global flags, it must be called before Prepare to take effect. |
| 501 | inline void SetFlags(int flags); |
| 502 | |
| 503 | static void Initialize(); |
| 504 | |
| 505 | void SetUp(); |
| 506 | |
| 507 | void TearDown(); |
| 508 | |
| 509 | void CollectEvacuationCandidates(PagedSpace* space); |
| 510 | |
| 511 | void AddEvacuationCandidate(Page* p); |
| 512 | |
| 513 | // Prepares for GC by resetting relocation info in old and map spaces and |
| 514 | // choosing spaces to compact. |
| 515 | void Prepare(); |
| 516 | |
| 517 | // Performs a global garbage collection. |
| 518 | void CollectGarbage(); |
| 519 | |
| 520 | enum CompactionMode { INCREMENTAL_COMPACTION, NON_INCREMENTAL_COMPACTION }; |
| 521 | |
| 522 | bool StartCompaction(CompactionMode mode); |
| 523 | |
| 524 | void AbortCompaction(); |
| 525 | |
| 526 | #ifdef DEBUG |
| 527 | // Checks whether performing mark-compact collection. |
| 528 | bool in_use() { return state_ > PREPARE_GC; } |
| 529 | bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; } |
| 530 | #endif |
| 531 | |
| 532 | // Determine type of object and emit deletion log event. |
| 533 | static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate); |
| 534 | |
| 535 | // Distinguishable invalid map encodings (for single word and multiple words) |
| 536 | // that indicate free regions. |
| 537 | static const uint32_t kSingleFreeEncoding = 0; |
| 538 | static const uint32_t kMultiFreeEncoding = 1; |
| 539 | |
| 540 | static inline bool IsMarked(Object* obj); |
| 541 | |
| 542 | inline Heap* heap() const { return heap_; } |
| 543 | inline Isolate* isolate() const; |
| 544 | |
| 545 | CodeFlusher* code_flusher() { return code_flusher_; } |
| 546 | inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; } |
| 547 | void EnableCodeFlushing(bool enable); |
| 548 | |
| 549 | enum SweeperType { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 550 | CONCURRENT_SWEEPING, |
| 551 | SEQUENTIAL_SWEEPING |
| 552 | }; |
| 553 | |
| 554 | enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL }; |
| 555 | |
| 556 | #ifdef VERIFY_HEAP |
| 557 | void VerifyMarkbitsAreClean(); |
| 558 | static void VerifyMarkbitsAreClean(PagedSpace* space); |
| 559 | static void VerifyMarkbitsAreClean(NewSpace* space); |
| 560 | void VerifyWeakEmbeddedObjectsInCode(); |
| 561 | void VerifyOmittedMapChecks(); |
| 562 | #endif |
| 563 | |
| 564 | INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) { |
| 565 | return Page::FromAddress(reinterpret_cast<Address>(anchor)) |
| 566 | ->ShouldSkipEvacuationSlotRecording(); |
| 567 | } |
| 568 | |
| 569 | INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) { |
| 570 | return Page::FromAddress(reinterpret_cast<Address>(host)) |
| 571 | ->ShouldSkipEvacuationSlotRecording(); |
| 572 | } |
| 573 | |
| 574 | INLINE(static bool IsOnEvacuationCandidate(Object* obj)) { |
| 575 | return Page::FromAddress(reinterpret_cast<Address>(obj)) |
| 576 | ->IsEvacuationCandidate(); |
| 577 | } |
| 578 | |
| 579 | INLINE(void EvictEvacuationCandidate(Page* page)) { |
| 580 | if (FLAG_trace_fragmentation) { |
| 581 | PrintF("Page %p is too popular. Disabling evacuation.\n", |
| 582 | reinterpret_cast<void*>(page)); |
| 583 | } |
| 584 | |
| 585 | // TODO(gc) If all evacuation candidates are too popular we |
| 586 | // should stop slots recording entirely. |
| 587 | page->ClearEvacuationCandidate(); |
| 588 | |
| 589 | // We were not collecting slots on this page that point |
| 590 | // to other evacuation candidates thus we have to |
| 591 | // rescan the page after evacuation to discover and update all |
| 592 | // pointers to evacuated objects. |
| 593 | if (page->owner()->identity() == OLD_DATA_SPACE) { |
| 594 | evacuation_candidates_.RemoveElement(page); |
| 595 | } else { |
| 596 | page->SetFlag(Page::RESCAN_ON_EVACUATION); |
| 597 | } |
| 598 | } |
| 599 | |
| 600 | void RecordRelocSlot(RelocInfo* rinfo, Object* target); |
| 601 | void RecordCodeEntrySlot(Address slot, Code* target); |
| 602 | void RecordCodeTargetPatch(Address pc, Code* target); |
| 603 | |
| 604 | INLINE(void RecordSlot( |
| 605 | Object** anchor_slot, Object** slot, Object* object, |
| 606 | SlotsBuffer::AdditionMode mode = SlotsBuffer::FAIL_ON_OVERFLOW)); |
| 607 | |
| 608 | void MigrateObject(HeapObject* dst, HeapObject* src, int size, |
| 609 | AllocationSpace to_old_space); |
| 610 | |
| 611 | bool TryPromoteObject(HeapObject* object, int object_size); |
| 612 | |
| 613 | void InvalidateCode(Code* code); |
| 614 | |
| 615 | void ClearMarkbits(); |
| 616 | |
| 617 | bool abort_incremental_marking() const { return abort_incremental_marking_; } |
| 618 | |
| 619 | bool is_compacting() const { return compacting_; } |
| 620 | |
| 621 | MarkingParity marking_parity() { return marking_parity_; } |
| 622 | |
| 623 | // Concurrent and parallel sweeping support. If required_freed_bytes was set |
| 624 | // to a value larger than 0, then sweeping returns after a block of at least |
| 625 | // required_freed_bytes was freed. If required_freed_bytes was set to zero |
| 626 | // then the whole given space is swept. It returns the size of the maximum |
| 627 | // continuous freed memory chunk. |
| 628 | int SweepInParallel(PagedSpace* space, int required_freed_bytes); |
| 629 | |
| 630 | // Sweeps a given page concurrently to the sweeper threads. It returns the |
| 631 | // size of the maximum continuous freed memory chunk. |
| 632 | int SweepInParallel(Page* page, PagedSpace* space); |
| 633 | |
| 634 | void EnsureSweepingCompleted(); |
| 635 | |
| 636 | // If sweeper threads are not active this method will return true. If |
| 637 | // this is a latency issue we should be smarter here. Otherwise, it will |
| 638 | // return true if the sweeper threads are done processing the pages. |
| 639 | bool IsSweepingCompleted(); |
| 640 | |
| 641 | void RefillFreeList(PagedSpace* space); |
| 642 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 643 | // Checks if sweeping is in progress right now on any space. |
| 644 | bool sweeping_in_progress() { return sweeping_in_progress_; } |
| 645 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 646 | void set_evacuation(bool evacuation) { evacuation_ = evacuation; } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 647 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 648 | bool evacuation() const { return evacuation_; } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 649 | |
| 650 | // Mark the global table which maps weak objects to dependent code without |
| 651 | // marking its contents. |
| 652 | void MarkWeakObjectToCodeTable(); |
| 653 | |
| 654 | // Special case for processing weak references in a full collection. We need |
| 655 | // to artificially keep AllocationSites alive for a time. |
| 656 | void MarkAllocationSite(AllocationSite* site); |
| 657 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 658 | MarkingDeque* marking_deque() { return &marking_deque_; } |
| 659 | |
| 660 | void EnsureMarkingDequeIsCommittedAndInitialize(); |
| 661 | |
| 662 | void InitializeMarkingDeque(); |
| 663 | |
| 664 | void UncommitMarkingDeque(); |
| 665 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 666 | private: |
| 667 | class SweeperTask; |
| 668 | |
| 669 | explicit MarkCompactCollector(Heap* heap); |
| 670 | ~MarkCompactCollector(); |
| 671 | |
| 672 | bool MarkInvalidatedCode(); |
| 673 | bool WillBeDeoptimized(Code* code); |
| 674 | void RemoveDeadInvalidatedCode(); |
| 675 | void ProcessInvalidatedCode(ObjectVisitor* visitor); |
| 676 | |
| 677 | void StartSweeperThreads(); |
| 678 | |
| 679 | #ifdef DEBUG |
| 680 | enum CollectorState { |
| 681 | IDLE, |
| 682 | PREPARE_GC, |
| 683 | MARK_LIVE_OBJECTS, |
| 684 | SWEEP_SPACES, |
| 685 | ENCODE_FORWARDING_ADDRESSES, |
| 686 | UPDATE_POINTERS, |
| 687 | RELOCATE_OBJECTS |
| 688 | }; |
| 689 | |
| 690 | // The current stage of the collector. |
| 691 | CollectorState state_; |
| 692 | #endif |
| 693 | |
| 694 | bool reduce_memory_footprint_; |
| 695 | |
| 696 | bool abort_incremental_marking_; |
| 697 | |
| 698 | MarkingParity marking_parity_; |
| 699 | |
| 700 | // True if we are collecting slots to perform evacuation from evacuation |
| 701 | // candidates. |
| 702 | bool compacting_; |
| 703 | |
| 704 | bool was_marked_incrementally_; |
| 705 | |
| 706 | // True if concurrent or parallel sweeping is currently in progress. |
| 707 | bool sweeping_in_progress_; |
| 708 | |
| 709 | base::Semaphore pending_sweeper_jobs_semaphore_; |
| 710 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 711 | bool evacuation_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 712 | |
| 713 | SlotsBufferAllocator slots_buffer_allocator_; |
| 714 | |
| 715 | SlotsBuffer* migration_slots_buffer_; |
| 716 | |
| 717 | // Finishes GC, performs heap verification if enabled. |
| 718 | void Finish(); |
| 719 | |
| 720 | // ----------------------------------------------------------------------- |
| 721 | // Phase 1: Marking live objects. |
| 722 | // |
| 723 | // Before: The heap has been prepared for garbage collection by |
| 724 | // MarkCompactCollector::Prepare() and is otherwise in its |
| 725 | // normal state. |
| 726 | // |
| 727 | // After: Live objects are marked and non-live objects are unmarked. |
| 728 | |
| 729 | friend class RootMarkingVisitor; |
| 730 | friend class MarkingVisitor; |
| 731 | friend class MarkCompactMarkingVisitor; |
| 732 | friend class CodeMarkingVisitor; |
| 733 | friend class SharedFunctionInfoMarkingVisitor; |
| 734 | |
| 735 | // Mark code objects that are active on the stack to prevent them |
| 736 | // from being flushed. |
| 737 | void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top); |
| 738 | |
| 739 | void PrepareForCodeFlushing(); |
| 740 | |
| 741 | // Marking operations for objects reachable from roots. |
| 742 | void MarkLiveObjects(); |
| 743 | |
| 744 | void AfterMarking(); |
| 745 | |
| 746 | // Marks the object black and pushes it on the marking stack. |
| 747 | // This is for non-incremental marking only. |
| 748 | INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit)); |
| 749 | |
| 750 | // Marks the object black assuming that it is not yet marked. |
| 751 | // This is for non-incremental marking only. |
| 752 | INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit)); |
| 753 | |
| 754 | // Mark the heap roots and all objects reachable from them. |
| 755 | void MarkRoots(RootMarkingVisitor* visitor); |
| 756 | |
| 757 | // Mark the string table specially. References to internalized strings from |
| 758 | // the string table are weak. |
| 759 | void MarkStringTable(RootMarkingVisitor* visitor); |
| 760 | |
| 761 | // Mark objects in implicit references groups if their parent object |
| 762 | // is marked. |
| 763 | void MarkImplicitRefGroups(); |
| 764 | |
| 765 | // Mark objects reachable (transitively) from objects in the marking stack |
| 766 | // or overflowed in the heap. |
| 767 | void ProcessMarkingDeque(); |
| 768 | |
| 769 | // Mark objects reachable (transitively) from objects in the marking stack |
| 770 | // or overflowed in the heap. This respects references only considered in |
| 771 | // the final atomic marking pause including the following: |
| 772 | // - Processing of objects reachable through Harmony WeakMaps. |
| 773 | // - Objects reachable due to host application logic like object groups |
| 774 | // or implicit references' groups. |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 775 | void ProcessEphemeralMarking(ObjectVisitor* visitor, |
| 776 | bool only_process_harmony_weak_collections); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 777 | |
| 778 | // If the call-site of the top optimized code was not prepared for |
| 779 | // deoptimization, then treat the maps in the code as strong pointers, |
| 780 | // otherwise a map can die and deoptimize the code. |
| 781 | void ProcessTopOptimizedFrame(ObjectVisitor* visitor); |
| 782 | |
| 783 | // Mark objects reachable (transitively) from objects in the marking |
| 784 | // stack. This function empties the marking stack, but may leave |
| 785 | // overflowed objects in the heap, in which case the marking stack's |
| 786 | // overflow flag will be set. |
| 787 | void EmptyMarkingDeque(); |
| 788 | |
| 789 | // Refill the marking stack with overflowed objects from the heap. This |
| 790 | // function either leaves the marking stack full or clears the overflow |
| 791 | // flag on the marking stack. |
| 792 | void RefillMarkingDeque(); |
| 793 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 794 | // Callback function for telling whether the object *p is an unmarked |
| 795 | // heap object. |
| 796 | static bool IsUnmarkedHeapObject(Object** p); |
| 797 | static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p); |
| 798 | |
| 799 | // Map transitions from a live map to a dead map must be killed. |
| 800 | // We replace them with a null descriptor, with the same key. |
| 801 | void ClearNonLiveReferences(); |
| 802 | void ClearNonLivePrototypeTransitions(Map* map); |
| 803 | void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark); |
| 804 | void ClearMapTransitions(Map* map); |
| 805 | bool ClearMapBackPointer(Map* map); |
| 806 | void TrimDescriptorArray(Map* map, DescriptorArray* descriptors, |
| 807 | int number_of_own_descriptors); |
| 808 | void TrimEnumCache(Map* map, DescriptorArray* descriptors); |
| 809 | |
| 810 | void ClearDependentCode(DependentCode* dependent_code); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 811 | void ClearNonLiveDependentCode(DependentCode* dependent_code); |
| 812 | int ClearNonLiveDependentCodeInGroup(DependentCode* dependent_code, int group, |
| 813 | int start, int end, int new_start); |
| 814 | |
| 815 | // Mark all values associated with reachable keys in weak collections |
| 816 | // encountered so far. This might push new object or even new weak maps onto |
| 817 | // the marking stack. |
| 818 | void ProcessWeakCollections(); |
| 819 | |
| 820 | // After all reachable objects have been marked those weak map entries |
| 821 | // with an unreachable key are removed from all encountered weak maps. |
| 822 | // The linked list of all encountered weak maps is destroyed. |
| 823 | void ClearWeakCollections(); |
| 824 | |
| 825 | // We have to remove all encountered weak maps from the list of weak |
| 826 | // collections when incremental marking is aborted. |
| 827 | void AbortWeakCollections(); |
| 828 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 829 | |
| 830 | void ProcessAndClearWeakCells(); |
| 831 | void AbortWeakCells(); |
| 832 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 833 | // ----------------------------------------------------------------------- |
| 834 | // Phase 2: Sweeping to clear mark bits and free non-live objects for |
| 835 | // a non-compacting collection. |
| 836 | // |
| 837 | // Before: Live objects are marked and non-live objects are unmarked. |
| 838 | // |
| 839 | // After: Live objects are unmarked, non-live regions have been added to |
| 840 | // their space's free list. Active eden semispace is compacted by |
| 841 | // evacuation. |
| 842 | // |
| 843 | |
| 844 | // If we are not compacting the heap, we simply sweep the spaces except |
| 845 | // for the large object space, clearing mark bits and adding unmarked |
| 846 | // regions to each space's free list. |
| 847 | void SweepSpaces(); |
| 848 | |
| 849 | int DiscoverAndEvacuateBlackObjectsOnPage(NewSpace* new_space, |
| 850 | NewSpacePage* p); |
| 851 | |
| 852 | void EvacuateNewSpace(); |
| 853 | |
| 854 | void EvacuateLiveObjectsFromPage(Page* p); |
| 855 | |
| 856 | void EvacuatePages(); |
| 857 | |
| 858 | void EvacuateNewSpaceAndCandidates(); |
| 859 | |
| 860 | void ReleaseEvacuationCandidates(); |
| 861 | |
| 862 | // Moves the pages of the evacuation_candidates_ list to the end of their |
| 863 | // corresponding space pages list. |
| 864 | void MoveEvacuationCandidatesToEndOfPagesList(); |
| 865 | |
| 866 | void SweepSpace(PagedSpace* space, SweeperType sweeper); |
| 867 | |
| 868 | // Finalizes the parallel sweeping phase. Marks all the pages that were |
| 869 | // swept in parallel. |
| 870 | void ParallelSweepSpacesComplete(); |
| 871 | |
| 872 | void ParallelSweepSpaceComplete(PagedSpace* space); |
| 873 | |
| 874 | // Updates store buffer and slot buffer for a pointer in a migrating object. |
| 875 | void RecordMigratedSlot(Object* value, Address slot); |
| 876 | |
| 877 | #ifdef DEBUG |
| 878 | friend class MarkObjectVisitor; |
| 879 | static void VisitObject(HeapObject* obj); |
| 880 | |
| 881 | friend class UnmarkObjectVisitor; |
| 882 | static void UnmarkObject(HeapObject* obj); |
| 883 | #endif |
| 884 | |
| 885 | Heap* heap_; |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 886 | base::VirtualMemory* marking_deque_memory_; |
| 887 | bool marking_deque_memory_committed_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 888 | MarkingDeque marking_deque_; |
| 889 | CodeFlusher* code_flusher_; |
| 890 | bool have_code_to_deoptimize_; |
| 891 | |
| 892 | List<Page*> evacuation_candidates_; |
| 893 | List<Code*> invalidated_code_; |
| 894 | |
| 895 | SmartPointer<FreeList> free_list_old_data_space_; |
| 896 | SmartPointer<FreeList> free_list_old_pointer_space_; |
| 897 | |
| 898 | friend class Heap; |
| 899 | }; |
| 900 | |
| 901 | |
| 902 | class MarkBitCellIterator BASE_EMBEDDED { |
| 903 | public: |
| 904 | explicit MarkBitCellIterator(MemoryChunk* chunk) : chunk_(chunk) { |
| 905 | last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex( |
| 906 | chunk_->AddressToMarkbitIndex(chunk_->area_end()))); |
| 907 | cell_base_ = chunk_->area_start(); |
| 908 | cell_index_ = Bitmap::IndexToCell( |
| 909 | Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_))); |
| 910 | cells_ = chunk_->markbits()->cells(); |
| 911 | } |
| 912 | |
| 913 | inline bool Done() { return cell_index_ == last_cell_index_; } |
| 914 | |
| 915 | inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; } |
| 916 | |
| 917 | inline MarkBit::CellType* CurrentCell() { |
| 918 | DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex( |
| 919 | chunk_->AddressToMarkbitIndex(cell_base_)))); |
| 920 | return &cells_[cell_index_]; |
| 921 | } |
| 922 | |
| 923 | inline Address CurrentCellBase() { |
| 924 | DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex( |
| 925 | chunk_->AddressToMarkbitIndex(cell_base_)))); |
| 926 | return cell_base_; |
| 927 | } |
| 928 | |
| 929 | inline void Advance() { |
| 930 | cell_index_++; |
| 931 | cell_base_ += 32 * kPointerSize; |
| 932 | } |
| 933 | |
| 934 | private: |
| 935 | MemoryChunk* chunk_; |
| 936 | MarkBit::CellType* cells_; |
| 937 | unsigned int last_cell_index_; |
| 938 | unsigned int cell_index_; |
| 939 | Address cell_base_; |
| 940 | }; |
| 941 | |
| 942 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 943 | class EvacuationScope BASE_EMBEDDED { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 944 | public: |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 945 | explicit EvacuationScope(MarkCompactCollector* collector) |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 946 | : collector_(collector) { |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 947 | collector_->set_evacuation(true); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 948 | } |
| 949 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 950 | ~EvacuationScope() { collector_->set_evacuation(false); } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 951 | |
| 952 | private: |
| 953 | MarkCompactCollector* collector_; |
| 954 | }; |
| 955 | |
| 956 | |
| 957 | const char* AllocationSpaceName(AllocationSpace space); |
| 958 | } |
| 959 | } // namespace v8::internal |
| 960 | |
| 961 | #endif // V8_HEAP_MARK_COMPACT_H_ |