Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1 | // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #ifndef V8_HEAP_MARK_COMPACT_H_ |
| 6 | #define V8_HEAP_MARK_COMPACT_H_ |
| 7 | |
| 8 | #include "src/base/bits.h" |
| 9 | #include "src/heap/spaces.h" |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 10 | #include "src/heap/store-buffer.h" |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 11 | |
| 12 | namespace v8 { |
| 13 | namespace internal { |
| 14 | |
| 15 | // Callback function, returns whether an object is alive. The heap size |
| 16 | // of the object is returned in size. It optionally updates the offset |
| 17 | // to the first live object in the page (only used for old and map objects). |
| 18 | typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset); |
| 19 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 20 | // Callback function to mark an object in a given heap. |
| 21 | typedef void (*MarkObjectFunction)(Heap* heap, HeapObject* object); |
| 22 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 23 | // Forward declarations. |
| 24 | class CodeFlusher; |
| 25 | class MarkCompactCollector; |
| 26 | class MarkingVisitor; |
| 27 | class RootMarkingVisitor; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 28 | class SlotsBuffer; |
| 29 | class SlotsBufferAllocator; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 30 | |
| 31 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 32 | class Marking : public AllStatic { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 33 | public: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 34 | INLINE(static MarkBit MarkBitFrom(Address addr)) { |
| 35 | MemoryChunk* p = MemoryChunk::FromAddress(addr); |
| 36 | return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr)); |
| 37 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 38 | |
| 39 | INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) { |
| 40 | return MarkBitFrom(reinterpret_cast<Address>(obj)); |
| 41 | } |
| 42 | |
| 43 | // Impossible markbits: 01 |
| 44 | static const char* kImpossibleBitPattern; |
| 45 | INLINE(static bool IsImpossible(MarkBit mark_bit)) { |
| 46 | return !mark_bit.Get() && mark_bit.Next().Get(); |
| 47 | } |
| 48 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 49 | // Black markbits: 11 |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 50 | static const char* kBlackBitPattern; |
| 51 | INLINE(static bool IsBlack(MarkBit mark_bit)) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 52 | return mark_bit.Get() && mark_bit.Next().Get(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 53 | } |
| 54 | |
| 55 | // White markbits: 00 - this is required by the mark bit clearer. |
| 56 | static const char* kWhiteBitPattern; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 57 | INLINE(static bool IsWhite(MarkBit mark_bit)) { |
| 58 | DCHECK(!IsImpossible(mark_bit)); |
| 59 | return !mark_bit.Get(); |
| 60 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 61 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 62 | // Grey markbits: 10 |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 63 | static const char* kGreyBitPattern; |
| 64 | INLINE(static bool IsGrey(MarkBit mark_bit)) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 65 | return mark_bit.Get() && !mark_bit.Next().Get(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 66 | } |
| 67 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 68 | // IsBlackOrGrey assumes that the first bit is set for black or grey |
| 69 | // objects. |
| 70 | INLINE(static bool IsBlackOrGrey(MarkBit mark_bit)) { return mark_bit.Get(); } |
| 71 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 72 | INLINE(static void MarkBlack(MarkBit mark_bit)) { |
| 73 | mark_bit.Set(); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 74 | mark_bit.Next().Set(); |
| 75 | } |
| 76 | |
| 77 | INLINE(static void MarkWhite(MarkBit mark_bit)) { |
| 78 | mark_bit.Clear(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 79 | mark_bit.Next().Clear(); |
| 80 | } |
| 81 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 82 | INLINE(static void BlackToWhite(MarkBit markbit)) { |
| 83 | DCHECK(IsBlack(markbit)); |
| 84 | markbit.Clear(); |
| 85 | markbit.Next().Clear(); |
| 86 | } |
| 87 | |
| 88 | INLINE(static void GreyToWhite(MarkBit markbit)) { |
| 89 | DCHECK(IsGrey(markbit)); |
| 90 | markbit.Clear(); |
| 91 | markbit.Next().Clear(); |
| 92 | } |
| 93 | |
| 94 | INLINE(static void BlackToGrey(MarkBit markbit)) { |
| 95 | DCHECK(IsBlack(markbit)); |
| 96 | markbit.Next().Clear(); |
| 97 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 98 | |
| 99 | INLINE(static void WhiteToGrey(MarkBit markbit)) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 100 | DCHECK(IsWhite(markbit)); |
| 101 | markbit.Set(); |
| 102 | } |
| 103 | |
| 104 | INLINE(static void WhiteToBlack(MarkBit markbit)) { |
| 105 | DCHECK(IsWhite(markbit)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 106 | markbit.Set(); |
| 107 | markbit.Next().Set(); |
| 108 | } |
| 109 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 110 | INLINE(static void GreyToBlack(MarkBit markbit)) { |
| 111 | DCHECK(IsGrey(markbit)); |
| 112 | markbit.Next().Set(); |
| 113 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 114 | |
| 115 | INLINE(static void BlackToGrey(HeapObject* obj)) { |
| 116 | BlackToGrey(MarkBitFrom(obj)); |
| 117 | } |
| 118 | |
| 119 | INLINE(static void AnyToGrey(MarkBit markbit)) { |
| 120 | markbit.Set(); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 121 | markbit.Next().Clear(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 122 | } |
| 123 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 124 | static void TransferMark(Heap* heap, Address old_start, Address new_start); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 125 | |
| 126 | #ifdef DEBUG |
| 127 | enum ObjectColor { |
| 128 | BLACK_OBJECT, |
| 129 | WHITE_OBJECT, |
| 130 | GREY_OBJECT, |
| 131 | IMPOSSIBLE_COLOR |
| 132 | }; |
| 133 | |
| 134 | static const char* ColorName(ObjectColor color) { |
| 135 | switch (color) { |
| 136 | case BLACK_OBJECT: |
| 137 | return "black"; |
| 138 | case WHITE_OBJECT: |
| 139 | return "white"; |
| 140 | case GREY_OBJECT: |
| 141 | return "grey"; |
| 142 | case IMPOSSIBLE_COLOR: |
| 143 | return "impossible"; |
| 144 | } |
| 145 | return "error"; |
| 146 | } |
| 147 | |
| 148 | static ObjectColor Color(HeapObject* obj) { |
| 149 | return Color(Marking::MarkBitFrom(obj)); |
| 150 | } |
| 151 | |
| 152 | static ObjectColor Color(MarkBit mark_bit) { |
| 153 | if (IsBlack(mark_bit)) return BLACK_OBJECT; |
| 154 | if (IsWhite(mark_bit)) return WHITE_OBJECT; |
| 155 | if (IsGrey(mark_bit)) return GREY_OBJECT; |
| 156 | UNREACHABLE(); |
| 157 | return IMPOSSIBLE_COLOR; |
| 158 | } |
| 159 | #endif |
| 160 | |
| 161 | // Returns true if the transferred color is black. |
| 162 | INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) { |
| 163 | MarkBit from_mark_bit = MarkBitFrom(from); |
| 164 | MarkBit to_mark_bit = MarkBitFrom(to); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 165 | DCHECK(Marking::IsWhite(to_mark_bit)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 166 | if (from_mark_bit.Get()) { |
| 167 | to_mark_bit.Set(); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 168 | if (from_mark_bit.Next().Get()) { |
| 169 | to_mark_bit.Next().Set(); |
| 170 | return true; |
| 171 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 172 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 173 | return false; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 174 | } |
| 175 | |
| 176 | private: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 177 | DISALLOW_IMPLICIT_CONSTRUCTORS(Marking); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 178 | }; |
| 179 | |
| 180 | // ---------------------------------------------------------------------------- |
| 181 | // Marking deque for tracing live objects. |
| 182 | class MarkingDeque { |
| 183 | public: |
| 184 | MarkingDeque() |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 185 | : array_(NULL), |
| 186 | top_(0), |
| 187 | bottom_(0), |
| 188 | mask_(0), |
| 189 | overflowed_(false), |
| 190 | in_use_(false) {} |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 191 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 192 | void Initialize(Address low, Address high); |
| 193 | void Uninitialize(bool aborting = false); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 194 | |
| 195 | inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; } |
| 196 | |
| 197 | inline bool IsEmpty() { return top_ == bottom_; } |
| 198 | |
| 199 | bool overflowed() const { return overflowed_; } |
| 200 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 201 | bool in_use() const { return in_use_; } |
| 202 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 203 | void ClearOverflowed() { overflowed_ = false; } |
| 204 | |
| 205 | void SetOverflowed() { overflowed_ = true; } |
| 206 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 207 | // Push the object on the marking stack if there is room, otherwise mark the |
| 208 | // deque as overflowed and wait for a rescan of the heap. |
| 209 | INLINE(bool Push(HeapObject* object)) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 210 | DCHECK(object->IsHeapObject()); |
| 211 | if (IsFull()) { |
| 212 | SetOverflowed(); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 213 | return false; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 214 | } else { |
| 215 | array_[top_] = object; |
| 216 | top_ = ((top_ + 1) & mask_); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 217 | return true; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 218 | } |
| 219 | } |
| 220 | |
| 221 | INLINE(HeapObject* Pop()) { |
| 222 | DCHECK(!IsEmpty()); |
| 223 | top_ = ((top_ - 1) & mask_); |
| 224 | HeapObject* object = array_[top_]; |
| 225 | DCHECK(object->IsHeapObject()); |
| 226 | return object; |
| 227 | } |
| 228 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 229 | // Unshift the object into the marking stack if there is room, otherwise mark |
| 230 | // the deque as overflowed and wait for a rescan of the heap. |
| 231 | INLINE(bool Unshift(HeapObject* object)) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 232 | DCHECK(object->IsHeapObject()); |
| 233 | if (IsFull()) { |
| 234 | SetOverflowed(); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 235 | return false; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 236 | } else { |
| 237 | bottom_ = ((bottom_ - 1) & mask_); |
| 238 | array_[bottom_] = object; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 239 | return true; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 240 | } |
| 241 | } |
| 242 | |
| 243 | HeapObject** array() { return array_; } |
| 244 | int bottom() { return bottom_; } |
| 245 | int top() { return top_; } |
| 246 | int mask() { return mask_; } |
| 247 | void set_top(int top) { top_ = top; } |
| 248 | |
| 249 | private: |
| 250 | HeapObject** array_; |
| 251 | // array_[(top - 1) & mask_] is the top element in the deque. The Deque is |
| 252 | // empty when top_ == bottom_. It is full when top_ + 1 == bottom |
| 253 | // (mod mask + 1). |
| 254 | int top_; |
| 255 | int bottom_; |
| 256 | int mask_; |
| 257 | bool overflowed_; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 258 | bool in_use_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 259 | |
| 260 | DISALLOW_COPY_AND_ASSIGN(MarkingDeque); |
| 261 | }; |
| 262 | |
| 263 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 264 | // CodeFlusher collects candidates for code flushing during marking and |
| 265 | // processes those candidates after marking has completed in order to |
| 266 | // reset those functions referencing code objects that would otherwise |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 267 | // be unreachable. Code objects can be referenced in two ways: |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 268 | // - SharedFunctionInfo references unoptimized code. |
| 269 | // - JSFunction references either unoptimized or optimized code. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 270 | // We are not allowed to flush unoptimized code for functions that got |
| 271 | // optimized or inlined into optimized code, because we might bailout |
| 272 | // into the unoptimized code again during deoptimization. |
| 273 | class CodeFlusher { |
| 274 | public: |
| 275 | explicit CodeFlusher(Isolate* isolate) |
| 276 | : isolate_(isolate), |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 277 | jsfunction_candidates_head_(nullptr), |
| 278 | shared_function_info_candidates_head_(nullptr) {} |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 279 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 280 | inline void AddCandidate(SharedFunctionInfo* shared_info); |
| 281 | inline void AddCandidate(JSFunction* function); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 282 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 283 | void EvictCandidate(SharedFunctionInfo* shared_info); |
| 284 | void EvictCandidate(JSFunction* function); |
| 285 | |
| 286 | void ProcessCandidates() { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 287 | ProcessSharedFunctionInfoCandidates(); |
| 288 | ProcessJSFunctionCandidates(); |
| 289 | } |
| 290 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 291 | void IteratePointersToFromSpace(ObjectVisitor* v); |
| 292 | |
| 293 | private: |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 294 | void ProcessJSFunctionCandidates(); |
| 295 | void ProcessSharedFunctionInfoCandidates(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 296 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 297 | static inline JSFunction** GetNextCandidateSlot(JSFunction* candidate); |
| 298 | static inline JSFunction* GetNextCandidate(JSFunction* candidate); |
| 299 | static inline void SetNextCandidate(JSFunction* candidate, |
| 300 | JSFunction* next_candidate); |
| 301 | static inline void ClearNextCandidate(JSFunction* candidate, |
| 302 | Object* undefined); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 303 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 304 | static inline SharedFunctionInfo* GetNextCandidate( |
| 305 | SharedFunctionInfo* candidate); |
| 306 | static inline void SetNextCandidate(SharedFunctionInfo* candidate, |
| 307 | SharedFunctionInfo* next_candidate); |
| 308 | static inline void ClearNextCandidate(SharedFunctionInfo* candidate); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 309 | |
| 310 | Isolate* isolate_; |
| 311 | JSFunction* jsfunction_candidates_head_; |
| 312 | SharedFunctionInfo* shared_function_info_candidates_head_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 313 | |
| 314 | DISALLOW_COPY_AND_ASSIGN(CodeFlusher); |
| 315 | }; |
| 316 | |
| 317 | |
| 318 | // Defined in isolate.h. |
| 319 | class ThreadLocalTop; |
| 320 | |
| 321 | |
| 322 | // ------------------------------------------------------------------------- |
| 323 | // Mark-Compact collector |
| 324 | class MarkCompactCollector { |
| 325 | public: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 326 | enum IterationMode { |
| 327 | kKeepMarking, |
| 328 | kClearMarkbits, |
| 329 | }; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 330 | |
| 331 | static void Initialize(); |
| 332 | |
| 333 | void SetUp(); |
| 334 | |
| 335 | void TearDown(); |
| 336 | |
| 337 | void CollectEvacuationCandidates(PagedSpace* space); |
| 338 | |
| 339 | void AddEvacuationCandidate(Page* p); |
| 340 | |
| 341 | // Prepares for GC by resetting relocation info in old and map spaces and |
| 342 | // choosing spaces to compact. |
| 343 | void Prepare(); |
| 344 | |
| 345 | // Performs a global garbage collection. |
| 346 | void CollectGarbage(); |
| 347 | |
| 348 | enum CompactionMode { INCREMENTAL_COMPACTION, NON_INCREMENTAL_COMPACTION }; |
| 349 | |
| 350 | bool StartCompaction(CompactionMode mode); |
| 351 | |
| 352 | void AbortCompaction(); |
| 353 | |
| 354 | #ifdef DEBUG |
| 355 | // Checks whether performing mark-compact collection. |
| 356 | bool in_use() { return state_ > PREPARE_GC; } |
| 357 | bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; } |
| 358 | #endif |
| 359 | |
| 360 | // Determine type of object and emit deletion log event. |
| 361 | static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate); |
| 362 | |
| 363 | // Distinguishable invalid map encodings (for single word and multiple words) |
| 364 | // that indicate free regions. |
| 365 | static const uint32_t kSingleFreeEncoding = 0; |
| 366 | static const uint32_t kMultiFreeEncoding = 1; |
| 367 | |
| 368 | static inline bool IsMarked(Object* obj); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 369 | static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 370 | |
| 371 | inline Heap* heap() const { return heap_; } |
| 372 | inline Isolate* isolate() const; |
| 373 | |
| 374 | CodeFlusher* code_flusher() { return code_flusher_; } |
| 375 | inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 376 | |
| 377 | enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL }; |
| 378 | |
| 379 | #ifdef VERIFY_HEAP |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 380 | void VerifyValidStoreAndSlotsBufferEntries(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 381 | void VerifyMarkbitsAreClean(); |
| 382 | static void VerifyMarkbitsAreClean(PagedSpace* space); |
| 383 | static void VerifyMarkbitsAreClean(NewSpace* space); |
| 384 | void VerifyWeakEmbeddedObjectsInCode(); |
| 385 | void VerifyOmittedMapChecks(); |
| 386 | #endif |
| 387 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 388 | INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) { |
| 389 | return Page::FromAddress(reinterpret_cast<Address>(host)) |
| 390 | ->ShouldSkipEvacuationSlotRecording(); |
| 391 | } |
| 392 | |
| 393 | INLINE(static bool IsOnEvacuationCandidate(Object* obj)) { |
| 394 | return Page::FromAddress(reinterpret_cast<Address>(obj)) |
| 395 | ->IsEvacuationCandidate(); |
| 396 | } |
| 397 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 398 | void RecordRelocSlot(RelocInfo* rinfo, Object* target); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 399 | void RecordCodeEntrySlot(HeapObject* object, Address slot, Code* target); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 400 | void RecordCodeTargetPatch(Address pc, Code* target); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 401 | INLINE(void RecordSlot(HeapObject* object, Object** slot, Object* target)); |
| 402 | INLINE(void ForceRecordSlot(HeapObject* object, Object** slot, |
| 403 | Object* target)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 404 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 405 | void UpdateSlots(SlotsBuffer* buffer); |
| 406 | void UpdateSlotsRecordedIn(SlotsBuffer* buffer); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 407 | |
| 408 | void MigrateObject(HeapObject* dst, HeapObject* src, int size, |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 409 | AllocationSpace to_old_space, |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 410 | SlotsBuffer** evacuation_slots_buffer, |
| 411 | LocalStoreBuffer* local_store_buffer); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 412 | |
| 413 | void InvalidateCode(Code* code); |
| 414 | |
| 415 | void ClearMarkbits(); |
| 416 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 417 | bool is_compacting() const { return compacting_; } |
| 418 | |
| 419 | MarkingParity marking_parity() { return marking_parity_; } |
| 420 | |
| 421 | // Concurrent and parallel sweeping support. If required_freed_bytes was set |
| 422 | // to a value larger than 0, then sweeping returns after a block of at least |
| 423 | // required_freed_bytes was freed. If required_freed_bytes was set to zero |
| 424 | // then the whole given space is swept. It returns the size of the maximum |
| 425 | // continuous freed memory chunk. |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 426 | int SweepInParallel(PagedSpace* space, int required_freed_bytes, |
| 427 | int max_pages = 0); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 428 | |
| 429 | // Sweeps a given page concurrently to the sweeper threads. It returns the |
| 430 | // size of the maximum continuous freed memory chunk. |
| 431 | int SweepInParallel(Page* page, PagedSpace* space); |
| 432 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 433 | // Ensures that sweeping is finished. |
| 434 | // |
| 435 | // Note: Can only be called safely from main thread. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 436 | void EnsureSweepingCompleted(); |
| 437 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 438 | void SweepOrWaitUntilSweepingCompleted(Page* page); |
| 439 | |
| 440 | // Help out in sweeping the corresponding space and refill memory that has |
| 441 | // been regained. |
| 442 | // |
| 443 | // Note: Thread-safe. |
| 444 | void SweepAndRefill(CompactionSpace* space); |
| 445 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 446 | // If sweeper threads are not active this method will return true. If |
| 447 | // this is a latency issue we should be smarter here. Otherwise, it will |
| 448 | // return true if the sweeper threads are done processing the pages. |
| 449 | bool IsSweepingCompleted(); |
| 450 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 451 | // Checks if sweeping is in progress right now on any space. |
| 452 | bool sweeping_in_progress() { return sweeping_in_progress_; } |
| 453 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 454 | void set_evacuation(bool evacuation) { evacuation_ = evacuation; } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 455 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 456 | bool evacuation() const { return evacuation_; } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 457 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 458 | // Special case for processing weak references in a full collection. We need |
| 459 | // to artificially keep AllocationSites alive for a time. |
| 460 | void MarkAllocationSite(AllocationSite* site); |
| 461 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 462 | // Mark objects in implicit references groups if their parent object |
| 463 | // is marked. |
| 464 | void MarkImplicitRefGroups(MarkObjectFunction mark_object); |
| 465 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 466 | MarkingDeque* marking_deque() { return &marking_deque_; } |
| 467 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 468 | static const size_t kMaxMarkingDequeSize = 4 * MB; |
| 469 | static const size_t kMinMarkingDequeSize = 256 * KB; |
| 470 | |
| 471 | void EnsureMarkingDequeIsCommittedAndInitialize(size_t max_size) { |
| 472 | if (!marking_deque_.in_use()) { |
| 473 | EnsureMarkingDequeIsCommitted(max_size); |
| 474 | InitializeMarkingDeque(); |
| 475 | } |
| 476 | } |
| 477 | |
| 478 | void EnsureMarkingDequeIsCommitted(size_t max_size); |
| 479 | void EnsureMarkingDequeIsReserved(); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 480 | |
| 481 | void InitializeMarkingDeque(); |
| 482 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 483 | // The following four methods can just be called after marking, when the |
| 484 | // whole transitive closure is known. They must be called before sweeping |
| 485 | // when mark bits are still intact. |
| 486 | bool IsSlotInBlackObject(Page* p, Address slot, HeapObject** out_object); |
| 487 | bool IsSlotInBlackObjectSlow(Page* p, Address slot); |
| 488 | bool IsSlotInLiveObject(Address slot); |
| 489 | void VerifyIsSlotInLiveObject(Address slot, HeapObject* object); |
| 490 | |
| 491 | // Removes all the slots in the slot buffers that are within the given |
| 492 | // address range. |
| 493 | void RemoveObjectSlots(Address start_slot, Address end_slot); |
| 494 | |
| 495 | // |
| 496 | // Free lists filled by sweeper and consumed by corresponding spaces |
| 497 | // (including compaction spaces). |
| 498 | // |
| 499 | base::SmartPointer<FreeList>& free_list_old_space() { |
| 500 | return free_list_old_space_; |
| 501 | } |
| 502 | base::SmartPointer<FreeList>& free_list_code_space() { |
| 503 | return free_list_code_space_; |
| 504 | } |
| 505 | base::SmartPointer<FreeList>& free_list_map_space() { |
| 506 | return free_list_map_space_; |
| 507 | } |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 508 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 509 | private: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 510 | class CompactionTask; |
| 511 | class EvacuateNewSpaceVisitor; |
| 512 | class EvacuateOldSpaceVisitor; |
| 513 | class EvacuateVisitorBase; |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 514 | class Evacuator; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 515 | class HeapObjectVisitor; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 516 | class SweeperTask; |
| 517 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 518 | typedef std::vector<Page*> SweepingList; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 519 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 520 | explicit MarkCompactCollector(Heap* heap); |
| 521 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 522 | bool WillBeDeoptimized(Code* code); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 523 | void EvictPopularEvacuationCandidate(Page* page); |
| 524 | void ClearInvalidStoreAndSlotsBufferEntries(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 525 | |
| 526 | void StartSweeperThreads(); |
| 527 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 528 | void ComputeEvacuationHeuristics(int area_size, |
| 529 | int* target_fragmentation_percent, |
| 530 | int* max_evacuated_bytes); |
| 531 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 532 | #ifdef DEBUG |
| 533 | enum CollectorState { |
| 534 | IDLE, |
| 535 | PREPARE_GC, |
| 536 | MARK_LIVE_OBJECTS, |
| 537 | SWEEP_SPACES, |
| 538 | ENCODE_FORWARDING_ADDRESSES, |
| 539 | UPDATE_POINTERS, |
| 540 | RELOCATE_OBJECTS |
| 541 | }; |
| 542 | |
| 543 | // The current stage of the collector. |
| 544 | CollectorState state_; |
| 545 | #endif |
| 546 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 547 | MarkingParity marking_parity_; |
| 548 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 549 | bool was_marked_incrementally_; |
| 550 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 551 | bool evacuation_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 552 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 553 | SlotsBufferAllocator* slots_buffer_allocator_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 554 | |
| 555 | SlotsBuffer* migration_slots_buffer_; |
| 556 | |
| 557 | // Finishes GC, performs heap verification if enabled. |
| 558 | void Finish(); |
| 559 | |
| 560 | // ----------------------------------------------------------------------- |
| 561 | // Phase 1: Marking live objects. |
| 562 | // |
| 563 | // Before: The heap has been prepared for garbage collection by |
| 564 | // MarkCompactCollector::Prepare() and is otherwise in its |
| 565 | // normal state. |
| 566 | // |
| 567 | // After: Live objects are marked and non-live objects are unmarked. |
| 568 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 569 | friend class CodeMarkingVisitor; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 570 | friend class IncrementalMarkingMarkingVisitor; |
| 571 | friend class MarkCompactMarkingVisitor; |
| 572 | friend class MarkingVisitor; |
| 573 | friend class RecordMigratedSlotVisitor; |
| 574 | friend class RootMarkingVisitor; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 575 | friend class SharedFunctionInfoMarkingVisitor; |
| 576 | |
| 577 | // Mark code objects that are active on the stack to prevent them |
| 578 | // from being flushed. |
| 579 | void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top); |
| 580 | |
| 581 | void PrepareForCodeFlushing(); |
| 582 | |
| 583 | // Marking operations for objects reachable from roots. |
| 584 | void MarkLiveObjects(); |
| 585 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 586 | // Pushes a black object onto the marking stack and accounts for live bytes. |
| 587 | // Note that this assumes live bytes have not yet been counted. |
| 588 | INLINE(void PushBlack(HeapObject* obj)); |
| 589 | |
| 590 | // Unshifts a black object into the marking stack and accounts for live bytes. |
| 591 | // Note that this assumes lives bytes have already been counted. |
| 592 | INLINE(void UnshiftBlack(HeapObject* obj)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 593 | |
| 594 | // Marks the object black and pushes it on the marking stack. |
| 595 | // This is for non-incremental marking only. |
| 596 | INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit)); |
| 597 | |
| 598 | // Marks the object black assuming that it is not yet marked. |
| 599 | // This is for non-incremental marking only. |
| 600 | INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit)); |
| 601 | |
| 602 | // Mark the heap roots and all objects reachable from them. |
| 603 | void MarkRoots(RootMarkingVisitor* visitor); |
| 604 | |
| 605 | // Mark the string table specially. References to internalized strings from |
| 606 | // the string table are weak. |
| 607 | void MarkStringTable(RootMarkingVisitor* visitor); |
| 608 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 609 | // Mark objects reachable (transitively) from objects in the marking stack |
| 610 | // or overflowed in the heap. |
| 611 | void ProcessMarkingDeque(); |
| 612 | |
| 613 | // Mark objects reachable (transitively) from objects in the marking stack |
| 614 | // or overflowed in the heap. This respects references only considered in |
| 615 | // the final atomic marking pause including the following: |
| 616 | // - Processing of objects reachable through Harmony WeakMaps. |
| 617 | // - Objects reachable due to host application logic like object groups |
| 618 | // or implicit references' groups. |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 619 | void ProcessEphemeralMarking(ObjectVisitor* visitor, |
| 620 | bool only_process_harmony_weak_collections); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 621 | |
| 622 | // If the call-site of the top optimized code was not prepared for |
| 623 | // deoptimization, then treat the maps in the code as strong pointers, |
| 624 | // otherwise a map can die and deoptimize the code. |
| 625 | void ProcessTopOptimizedFrame(ObjectVisitor* visitor); |
| 626 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 627 | // Collects a list of dependent code from maps embedded in optimize code. |
| 628 | DependentCode* DependentCodeListFromNonLiveMaps(); |
| 629 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 630 | // Mark objects reachable (transitively) from objects in the marking |
| 631 | // stack. This function empties the marking stack, but may leave |
| 632 | // overflowed objects in the heap, in which case the marking stack's |
| 633 | // overflow flag will be set. |
| 634 | void EmptyMarkingDeque(); |
| 635 | |
| 636 | // Refill the marking stack with overflowed objects from the heap. This |
| 637 | // function either leaves the marking stack full or clears the overflow |
| 638 | // flag on the marking stack. |
| 639 | void RefillMarkingDeque(); |
| 640 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 641 | // Helper methods for refilling the marking stack by discovering grey objects |
| 642 | // on various pages of the heap. Used by {RefillMarkingDeque} only. |
| 643 | template <class T> |
| 644 | void DiscoverGreyObjectsWithIterator(T* it); |
| 645 | void DiscoverGreyObjectsOnPage(MemoryChunk* p); |
| 646 | void DiscoverGreyObjectsInSpace(PagedSpace* space); |
| 647 | void DiscoverGreyObjectsInNewSpace(); |
| 648 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 649 | // Callback function for telling whether the object *p is an unmarked |
| 650 | // heap object. |
| 651 | static bool IsUnmarkedHeapObject(Object** p); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 652 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 653 | // Clear non-live references in weak cells, transition and descriptor arrays, |
| 654 | // and deoptimize dependent code of non-live maps. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 655 | void ClearNonLiveReferences(); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 656 | void MarkDependentCodeForDeoptimization(DependentCode* list); |
| 657 | // Find non-live targets of simple transitions in the given list. Clear |
| 658 | // transitions to non-live targets and if needed trim descriptors arrays. |
| 659 | void ClearSimpleMapTransitions(Object* non_live_map_list); |
| 660 | void ClearSimpleMapTransition(Map* map, Map* dead_transition); |
| 661 | // Compact every array in the global list of transition arrays and |
| 662 | // trim the corresponding descriptor array if a transition target is non-live. |
| 663 | void ClearFullMapTransitions(); |
| 664 | bool CompactTransitionArray(Map* map, TransitionArray* transitions, |
| 665 | DescriptorArray* descriptors); |
| 666 | void TrimDescriptorArray(Map* map, DescriptorArray* descriptors); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 667 | void TrimEnumCache(Map* map, DescriptorArray* descriptors); |
| 668 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 669 | // Mark all values associated with reachable keys in weak collections |
| 670 | // encountered so far. This might push new object or even new weak maps onto |
| 671 | // the marking stack. |
| 672 | void ProcessWeakCollections(); |
| 673 | |
| 674 | // After all reachable objects have been marked those weak map entries |
| 675 | // with an unreachable key are removed from all encountered weak maps. |
| 676 | // The linked list of all encountered weak maps is destroyed. |
| 677 | void ClearWeakCollections(); |
| 678 | |
| 679 | // We have to remove all encountered weak maps from the list of weak |
| 680 | // collections when incremental marking is aborted. |
| 681 | void AbortWeakCollections(); |
| 682 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 683 | void ClearWeakCells(Object** non_live_map_list, |
| 684 | DependentCode** dependent_code_list); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 685 | void AbortWeakCells(); |
| 686 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 687 | void AbortTransitionArrays(); |
| 688 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 689 | // ----------------------------------------------------------------------- |
| 690 | // Phase 2: Sweeping to clear mark bits and free non-live objects for |
| 691 | // a non-compacting collection. |
| 692 | // |
| 693 | // Before: Live objects are marked and non-live objects are unmarked. |
| 694 | // |
| 695 | // After: Live objects are unmarked, non-live regions have been added to |
| 696 | // their space's free list. Active eden semispace is compacted by |
| 697 | // evacuation. |
| 698 | // |
| 699 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 700 | inline SweepingList& sweeping_list(Space* space); |
| 701 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 702 | // If we are not compacting the heap, we simply sweep the spaces except |
| 703 | // for the large object space, clearing mark bits and adding unmarked |
| 704 | // regions to each space's free list. |
| 705 | void SweepSpaces(); |
| 706 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 707 | void EvacuateNewSpacePrologue(); |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 708 | void EvacuateNewSpaceEpilogue(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 709 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 710 | void AddEvacuationSlotsBufferSynchronized( |
| 711 | SlotsBuffer* evacuation_slots_buffer); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 712 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 713 | void EvacuatePagesInParallel(); |
| 714 | |
| 715 | // The number of parallel compaction tasks, including the main thread. |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 716 | int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 717 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 718 | void StartParallelCompaction(Evacuator** evacuators, int len); |
| 719 | void WaitUntilCompactionCompleted(Evacuator** evacuators, int len); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 720 | |
| 721 | void EvacuateNewSpaceAndCandidates(); |
| 722 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 723 | void UpdatePointersAfterEvacuation(); |
| 724 | |
| 725 | // Iterates through all live objects on a page using marking information. |
| 726 | // Returns whether all objects have successfully been visited. |
| 727 | bool VisitLiveObjects(MemoryChunk* page, HeapObjectVisitor* visitor, |
| 728 | IterationMode mode); |
| 729 | |
| 730 | void VisitLiveObjectsBody(Page* page, ObjectVisitor* visitor); |
| 731 | |
| 732 | void RecomputeLiveBytes(MemoryChunk* page); |
| 733 | |
| 734 | void SweepAbortedPages(); |
| 735 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 736 | void ReleaseEvacuationCandidates(); |
| 737 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 738 | // Starts sweeping of a space by contributing on the main thread and setting |
| 739 | // up other pages for sweeping. |
| 740 | void StartSweepSpace(PagedSpace* space); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 741 | |
| 742 | // Finalizes the parallel sweeping phase. Marks all the pages that were |
| 743 | // swept in parallel. |
| 744 | void ParallelSweepSpacesComplete(); |
| 745 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 746 | // Updates store buffer and slot buffer for a pointer in a migrating object. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 747 | void RecordMigratedSlot(Object* value, Address slot, |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 748 | SlotsBuffer** evacuation_slots_buffer, |
| 749 | LocalStoreBuffer* local_store_buffer); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 750 | |
| 751 | // Adds the code entry slot to the slots buffer. |
| 752 | void RecordMigratedCodeEntrySlot(Address code_entry, Address code_entry_slot, |
| 753 | SlotsBuffer** evacuation_slots_buffer); |
| 754 | |
| 755 | // Adds the slot of a moved code object. |
| 756 | void RecordMigratedCodeObjectSlot(Address code_object, |
| 757 | SlotsBuffer** evacuation_slots_buffer); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 758 | |
| 759 | #ifdef DEBUG |
| 760 | friend class MarkObjectVisitor; |
| 761 | static void VisitObject(HeapObject* obj); |
| 762 | |
| 763 | friend class UnmarkObjectVisitor; |
| 764 | static void UnmarkObject(HeapObject* obj); |
| 765 | #endif |
| 766 | |
| 767 | Heap* heap_; |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 768 | base::VirtualMemory* marking_deque_memory_; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 769 | size_t marking_deque_memory_committed_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 770 | MarkingDeque marking_deque_; |
| 771 | CodeFlusher* code_flusher_; |
| 772 | bool have_code_to_deoptimize_; |
| 773 | |
| 774 | List<Page*> evacuation_candidates_; |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 775 | List<NewSpacePage*> newspace_evacuation_candidates_; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 776 | |
| 777 | // The evacuation_slots_buffers_ are used by the compaction threads. |
| 778 | // When a compaction task finishes, it uses |
| 779 | // AddEvacuationSlotsbufferSynchronized to adds its slots buffer to the |
| 780 | // evacuation_slots_buffers_ list using the evacuation_slots_buffers_mutex_ |
| 781 | // lock. |
| 782 | base::Mutex evacuation_slots_buffers_mutex_; |
| 783 | List<SlotsBuffer*> evacuation_slots_buffers_; |
| 784 | |
| 785 | base::SmartPointer<FreeList> free_list_old_space_; |
| 786 | base::SmartPointer<FreeList> free_list_code_space_; |
| 787 | base::SmartPointer<FreeList> free_list_map_space_; |
| 788 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 789 | SweepingList sweeping_list_old_space_; |
| 790 | SweepingList sweeping_list_code_space_; |
| 791 | SweepingList sweeping_list_map_space_; |
| 792 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 793 | // True if we are collecting slots to perform evacuation from evacuation |
| 794 | // candidates. |
| 795 | bool compacting_; |
| 796 | |
| 797 | // True if concurrent or parallel sweeping is currently in progress. |
| 798 | bool sweeping_in_progress_; |
| 799 | |
| 800 | // True if parallel compaction is currently in progress. |
| 801 | bool compaction_in_progress_; |
| 802 | |
| 803 | // Semaphore used to synchronize sweeper tasks. |
| 804 | base::Semaphore pending_sweeper_tasks_semaphore_; |
| 805 | |
| 806 | // Semaphore used to synchronize compaction tasks. |
| 807 | base::Semaphore pending_compaction_tasks_semaphore_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 808 | |
| 809 | friend class Heap; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 810 | friend class StoreBuffer; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 811 | }; |
| 812 | |
| 813 | |
| 814 | class MarkBitCellIterator BASE_EMBEDDED { |
| 815 | public: |
| 816 | explicit MarkBitCellIterator(MemoryChunk* chunk) : chunk_(chunk) { |
| 817 | last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex( |
| 818 | chunk_->AddressToMarkbitIndex(chunk_->area_end()))); |
| 819 | cell_base_ = chunk_->area_start(); |
| 820 | cell_index_ = Bitmap::IndexToCell( |
| 821 | Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_))); |
| 822 | cells_ = chunk_->markbits()->cells(); |
| 823 | } |
| 824 | |
| 825 | inline bool Done() { return cell_index_ == last_cell_index_; } |
| 826 | |
| 827 | inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; } |
| 828 | |
| 829 | inline MarkBit::CellType* CurrentCell() { |
| 830 | DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex( |
| 831 | chunk_->AddressToMarkbitIndex(cell_base_)))); |
| 832 | return &cells_[cell_index_]; |
| 833 | } |
| 834 | |
| 835 | inline Address CurrentCellBase() { |
| 836 | DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex( |
| 837 | chunk_->AddressToMarkbitIndex(cell_base_)))); |
| 838 | return cell_base_; |
| 839 | } |
| 840 | |
| 841 | inline void Advance() { |
| 842 | cell_index_++; |
| 843 | cell_base_ += 32 * kPointerSize; |
| 844 | } |
| 845 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 846 | // Return the next mark bit cell. If there is no next it returns 0; |
| 847 | inline MarkBit::CellType PeekNext() { |
| 848 | if (HasNext()) { |
| 849 | return cells_[cell_index_ + 1]; |
| 850 | } |
| 851 | return 0; |
| 852 | } |
| 853 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 854 | private: |
| 855 | MemoryChunk* chunk_; |
| 856 | MarkBit::CellType* cells_; |
| 857 | unsigned int last_cell_index_; |
| 858 | unsigned int cell_index_; |
| 859 | Address cell_base_; |
| 860 | }; |
| 861 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 862 | enum LiveObjectIterationMode { kBlackObjects, kGreyObjects, kAllLiveObjects }; |
| 863 | |
| 864 | template <LiveObjectIterationMode T> |
| 865 | class LiveObjectIterator BASE_EMBEDDED { |
| 866 | public: |
| 867 | explicit LiveObjectIterator(MemoryChunk* chunk) |
| 868 | : chunk_(chunk), |
| 869 | it_(chunk_), |
| 870 | cell_base_(it_.CurrentCellBase()), |
| 871 | current_cell_(*it_.CurrentCell()) {} |
| 872 | |
| 873 | HeapObject* Next(); |
| 874 | |
| 875 | private: |
| 876 | MemoryChunk* chunk_; |
| 877 | MarkBitCellIterator it_; |
| 878 | Address cell_base_; |
| 879 | MarkBit::CellType current_cell_; |
| 880 | }; |
| 881 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 882 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 883 | class EvacuationScope BASE_EMBEDDED { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 884 | public: |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 885 | explicit EvacuationScope(MarkCompactCollector* collector) |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 886 | : collector_(collector) { |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 887 | collector_->set_evacuation(true); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 888 | } |
| 889 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 890 | ~EvacuationScope() { collector_->set_evacuation(false); } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 891 | |
| 892 | private: |
| 893 | MarkCompactCollector* collector_; |
| 894 | }; |
| 895 | |
| 896 | |
| 897 | const char* AllocationSpaceName(AllocationSpace space); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 898 | } // namespace internal |
| 899 | } // namespace v8 |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 900 | |
| 901 | #endif // V8_HEAP_MARK_COMPACT_H_ |