Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1 | // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #ifndef V8_HEAP_MARK_COMPACT_H_ |
| 6 | #define V8_HEAP_MARK_COMPACT_H_ |
| 7 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 8 | #include <deque> |
| 9 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 10 | #include "src/base/bits.h" |
| 11 | #include "src/heap/spaces.h" |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 12 | #include "src/heap/store-buffer.h" |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 13 | |
| 14 | namespace v8 { |
| 15 | namespace internal { |
| 16 | |
| 17 | // Callback function, returns whether an object is alive. The heap size |
| 18 | // of the object is returned in size. It optionally updates the offset |
| 19 | // to the first live object in the page (only used for old and map objects). |
| 20 | typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset); |
| 21 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 22 | // Callback function to mark an object in a given heap. |
| 23 | typedef void (*MarkObjectFunction)(Heap* heap, HeapObject* object); |
| 24 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 25 | // Forward declarations. |
| 26 | class CodeFlusher; |
| 27 | class MarkCompactCollector; |
| 28 | class MarkingVisitor; |
| 29 | class RootMarkingVisitor; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 30 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 31 | class Marking : public AllStatic { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 32 | public: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 33 | INLINE(static MarkBit MarkBitFrom(Address addr)) { |
| 34 | MemoryChunk* p = MemoryChunk::FromAddress(addr); |
| 35 | return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr)); |
| 36 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 37 | |
| 38 | INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) { |
| 39 | return MarkBitFrom(reinterpret_cast<Address>(obj)); |
| 40 | } |
| 41 | |
| 42 | // Impossible markbits: 01 |
| 43 | static const char* kImpossibleBitPattern; |
| 44 | INLINE(static bool IsImpossible(MarkBit mark_bit)) { |
| 45 | return !mark_bit.Get() && mark_bit.Next().Get(); |
| 46 | } |
| 47 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 48 | // Black markbits: 11 |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 49 | static const char* kBlackBitPattern; |
| 50 | INLINE(static bool IsBlack(MarkBit mark_bit)) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 51 | return mark_bit.Get() && mark_bit.Next().Get(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 52 | } |
| 53 | |
| 54 | // White markbits: 00 - this is required by the mark bit clearer. |
| 55 | static const char* kWhiteBitPattern; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 56 | INLINE(static bool IsWhite(MarkBit mark_bit)) { |
| 57 | DCHECK(!IsImpossible(mark_bit)); |
| 58 | return !mark_bit.Get(); |
| 59 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 60 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 61 | // Grey markbits: 10 |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 62 | static const char* kGreyBitPattern; |
| 63 | INLINE(static bool IsGrey(MarkBit mark_bit)) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 64 | return mark_bit.Get() && !mark_bit.Next().Get(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 65 | } |
| 66 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 67 | // IsBlackOrGrey assumes that the first bit is set for black or grey |
| 68 | // objects. |
| 69 | INLINE(static bool IsBlackOrGrey(MarkBit mark_bit)) { return mark_bit.Get(); } |
| 70 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 71 | INLINE(static void MarkBlack(MarkBit mark_bit)) { |
| 72 | mark_bit.Set(); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 73 | mark_bit.Next().Set(); |
| 74 | } |
| 75 | |
| 76 | INLINE(static void MarkWhite(MarkBit mark_bit)) { |
| 77 | mark_bit.Clear(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 78 | mark_bit.Next().Clear(); |
| 79 | } |
| 80 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 81 | INLINE(static void BlackToWhite(MarkBit markbit)) { |
| 82 | DCHECK(IsBlack(markbit)); |
| 83 | markbit.Clear(); |
| 84 | markbit.Next().Clear(); |
| 85 | } |
| 86 | |
| 87 | INLINE(static void GreyToWhite(MarkBit markbit)) { |
| 88 | DCHECK(IsGrey(markbit)); |
| 89 | markbit.Clear(); |
| 90 | markbit.Next().Clear(); |
| 91 | } |
| 92 | |
| 93 | INLINE(static void BlackToGrey(MarkBit markbit)) { |
| 94 | DCHECK(IsBlack(markbit)); |
| 95 | markbit.Next().Clear(); |
| 96 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 97 | |
| 98 | INLINE(static void WhiteToGrey(MarkBit markbit)) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 99 | DCHECK(IsWhite(markbit)); |
| 100 | markbit.Set(); |
| 101 | } |
| 102 | |
| 103 | INLINE(static void WhiteToBlack(MarkBit markbit)) { |
| 104 | DCHECK(IsWhite(markbit)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 105 | markbit.Set(); |
| 106 | markbit.Next().Set(); |
| 107 | } |
| 108 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 109 | INLINE(static void GreyToBlack(MarkBit markbit)) { |
| 110 | DCHECK(IsGrey(markbit)); |
| 111 | markbit.Next().Set(); |
| 112 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 113 | |
| 114 | INLINE(static void BlackToGrey(HeapObject* obj)) { |
| 115 | BlackToGrey(MarkBitFrom(obj)); |
| 116 | } |
| 117 | |
| 118 | INLINE(static void AnyToGrey(MarkBit markbit)) { |
| 119 | markbit.Set(); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 120 | markbit.Next().Clear(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 121 | } |
| 122 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 123 | static void TransferMark(Heap* heap, Address old_start, Address new_start); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 124 | |
| 125 | #ifdef DEBUG |
| 126 | enum ObjectColor { |
| 127 | BLACK_OBJECT, |
| 128 | WHITE_OBJECT, |
| 129 | GREY_OBJECT, |
| 130 | IMPOSSIBLE_COLOR |
| 131 | }; |
| 132 | |
| 133 | static const char* ColorName(ObjectColor color) { |
| 134 | switch (color) { |
| 135 | case BLACK_OBJECT: |
| 136 | return "black"; |
| 137 | case WHITE_OBJECT: |
| 138 | return "white"; |
| 139 | case GREY_OBJECT: |
| 140 | return "grey"; |
| 141 | case IMPOSSIBLE_COLOR: |
| 142 | return "impossible"; |
| 143 | } |
| 144 | return "error"; |
| 145 | } |
| 146 | |
| 147 | static ObjectColor Color(HeapObject* obj) { |
| 148 | return Color(Marking::MarkBitFrom(obj)); |
| 149 | } |
| 150 | |
| 151 | static ObjectColor Color(MarkBit mark_bit) { |
| 152 | if (IsBlack(mark_bit)) return BLACK_OBJECT; |
| 153 | if (IsWhite(mark_bit)) return WHITE_OBJECT; |
| 154 | if (IsGrey(mark_bit)) return GREY_OBJECT; |
| 155 | UNREACHABLE(); |
| 156 | return IMPOSSIBLE_COLOR; |
| 157 | } |
| 158 | #endif |
| 159 | |
| 160 | // Returns true if the transferred color is black. |
| 161 | INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) { |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 162 | if (Page::FromAddress(to->address())->IsFlagSet(Page::BLACK_PAGE)) |
| 163 | return true; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 164 | MarkBit from_mark_bit = MarkBitFrom(from); |
| 165 | MarkBit to_mark_bit = MarkBitFrom(to); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 166 | DCHECK(Marking::IsWhite(to_mark_bit)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 167 | if (from_mark_bit.Get()) { |
| 168 | to_mark_bit.Set(); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 169 | if (from_mark_bit.Next().Get()) { |
| 170 | to_mark_bit.Next().Set(); |
| 171 | return true; |
| 172 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 173 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 174 | return false; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 175 | } |
| 176 | |
| 177 | private: |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 178 | DISALLOW_IMPLICIT_CONSTRUCTORS(Marking); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 179 | }; |
| 180 | |
| 181 | // ---------------------------------------------------------------------------- |
| 182 | // Marking deque for tracing live objects. |
| 183 | class MarkingDeque { |
| 184 | public: |
| 185 | MarkingDeque() |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 186 | : array_(NULL), |
| 187 | top_(0), |
| 188 | bottom_(0), |
| 189 | mask_(0), |
| 190 | overflowed_(false), |
| 191 | in_use_(false) {} |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 192 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 193 | void Initialize(Address low, Address high); |
| 194 | void Uninitialize(bool aborting = false); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 195 | |
| 196 | inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; } |
| 197 | |
| 198 | inline bool IsEmpty() { return top_ == bottom_; } |
| 199 | |
| 200 | bool overflowed() const { return overflowed_; } |
| 201 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 202 | bool in_use() const { return in_use_; } |
| 203 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 204 | void ClearOverflowed() { overflowed_ = false; } |
| 205 | |
| 206 | void SetOverflowed() { overflowed_ = true; } |
| 207 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 208 | // Push the object on the marking stack if there is room, otherwise mark the |
| 209 | // deque as overflowed and wait for a rescan of the heap. |
| 210 | INLINE(bool Push(HeapObject* object)) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 211 | DCHECK(object->IsHeapObject()); |
| 212 | if (IsFull()) { |
| 213 | SetOverflowed(); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 214 | return false; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 215 | } else { |
| 216 | array_[top_] = object; |
| 217 | top_ = ((top_ + 1) & mask_); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 218 | return true; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 219 | } |
| 220 | } |
| 221 | |
| 222 | INLINE(HeapObject* Pop()) { |
| 223 | DCHECK(!IsEmpty()); |
| 224 | top_ = ((top_ - 1) & mask_); |
| 225 | HeapObject* object = array_[top_]; |
| 226 | DCHECK(object->IsHeapObject()); |
| 227 | return object; |
| 228 | } |
| 229 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 230 | // Unshift the object into the marking stack if there is room, otherwise mark |
| 231 | // the deque as overflowed and wait for a rescan of the heap. |
| 232 | INLINE(bool Unshift(HeapObject* object)) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 233 | DCHECK(object->IsHeapObject()); |
| 234 | if (IsFull()) { |
| 235 | SetOverflowed(); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 236 | return false; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 237 | } else { |
| 238 | bottom_ = ((bottom_ - 1) & mask_); |
| 239 | array_[bottom_] = object; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 240 | return true; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 241 | } |
| 242 | } |
| 243 | |
| 244 | HeapObject** array() { return array_; } |
| 245 | int bottom() { return bottom_; } |
| 246 | int top() { return top_; } |
| 247 | int mask() { return mask_; } |
| 248 | void set_top(int top) { top_ = top; } |
| 249 | |
| 250 | private: |
| 251 | HeapObject** array_; |
| 252 | // array_[(top - 1) & mask_] is the top element in the deque. The Deque is |
| 253 | // empty when top_ == bottom_. It is full when top_ + 1 == bottom |
| 254 | // (mod mask + 1). |
| 255 | int top_; |
| 256 | int bottom_; |
| 257 | int mask_; |
| 258 | bool overflowed_; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 259 | bool in_use_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 260 | |
| 261 | DISALLOW_COPY_AND_ASSIGN(MarkingDeque); |
| 262 | }; |
| 263 | |
| 264 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 265 | // CodeFlusher collects candidates for code flushing during marking and |
| 266 | // processes those candidates after marking has completed in order to |
| 267 | // reset those functions referencing code objects that would otherwise |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 268 | // be unreachable. Code objects can be referenced in two ways: |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 269 | // - SharedFunctionInfo references unoptimized code. |
| 270 | // - JSFunction references either unoptimized or optimized code. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 271 | // We are not allowed to flush unoptimized code for functions that got |
| 272 | // optimized or inlined into optimized code, because we might bailout |
| 273 | // into the unoptimized code again during deoptimization. |
| 274 | class CodeFlusher { |
| 275 | public: |
| 276 | explicit CodeFlusher(Isolate* isolate) |
| 277 | : isolate_(isolate), |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 278 | jsfunction_candidates_head_(nullptr), |
| 279 | shared_function_info_candidates_head_(nullptr) {} |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 280 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 281 | inline void AddCandidate(SharedFunctionInfo* shared_info); |
| 282 | inline void AddCandidate(JSFunction* function); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 283 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 284 | void EvictCandidate(SharedFunctionInfo* shared_info); |
| 285 | void EvictCandidate(JSFunction* function); |
| 286 | |
| 287 | void ProcessCandidates() { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 288 | ProcessSharedFunctionInfoCandidates(); |
| 289 | ProcessJSFunctionCandidates(); |
| 290 | } |
| 291 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 292 | void IteratePointersToFromSpace(ObjectVisitor* v); |
| 293 | |
| 294 | private: |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 295 | void ProcessJSFunctionCandidates(); |
| 296 | void ProcessSharedFunctionInfoCandidates(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 297 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 298 | static inline JSFunction** GetNextCandidateSlot(JSFunction* candidate); |
| 299 | static inline JSFunction* GetNextCandidate(JSFunction* candidate); |
| 300 | static inline void SetNextCandidate(JSFunction* candidate, |
| 301 | JSFunction* next_candidate); |
| 302 | static inline void ClearNextCandidate(JSFunction* candidate, |
| 303 | Object* undefined); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 304 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 305 | static inline SharedFunctionInfo* GetNextCandidate( |
| 306 | SharedFunctionInfo* candidate); |
| 307 | static inline void SetNextCandidate(SharedFunctionInfo* candidate, |
| 308 | SharedFunctionInfo* next_candidate); |
| 309 | static inline void ClearNextCandidate(SharedFunctionInfo* candidate); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 310 | |
| 311 | Isolate* isolate_; |
| 312 | JSFunction* jsfunction_candidates_head_; |
| 313 | SharedFunctionInfo* shared_function_info_candidates_head_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 314 | |
| 315 | DISALLOW_COPY_AND_ASSIGN(CodeFlusher); |
| 316 | }; |
| 317 | |
| 318 | |
| 319 | // Defined in isolate.h. |
| 320 | class ThreadLocalTop; |
| 321 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 322 | class MarkBitCellIterator BASE_EMBEDDED { |
| 323 | public: |
| 324 | explicit MarkBitCellIterator(MemoryChunk* chunk) : chunk_(chunk) { |
| 325 | last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex( |
| 326 | chunk_->AddressToMarkbitIndex(chunk_->area_end()))); |
| 327 | cell_base_ = chunk_->area_start(); |
| 328 | cell_index_ = Bitmap::IndexToCell( |
| 329 | Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_))); |
| 330 | cells_ = chunk_->markbits()->cells(); |
| 331 | } |
| 332 | |
| 333 | inline bool Done() { return cell_index_ == last_cell_index_; } |
| 334 | |
| 335 | inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; } |
| 336 | |
| 337 | inline MarkBit::CellType* CurrentCell() { |
| 338 | DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex( |
| 339 | chunk_->AddressToMarkbitIndex(cell_base_)))); |
| 340 | return &cells_[cell_index_]; |
| 341 | } |
| 342 | |
| 343 | inline Address CurrentCellBase() { |
| 344 | DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex( |
| 345 | chunk_->AddressToMarkbitIndex(cell_base_)))); |
| 346 | return cell_base_; |
| 347 | } |
| 348 | |
| 349 | inline void Advance() { |
| 350 | cell_index_++; |
| 351 | cell_base_ += 32 * kPointerSize; |
| 352 | } |
| 353 | |
| 354 | // Return the next mark bit cell. If there is no next it returns 0; |
| 355 | inline MarkBit::CellType PeekNext() { |
| 356 | if (HasNext()) { |
| 357 | return cells_[cell_index_ + 1]; |
| 358 | } |
| 359 | return 0; |
| 360 | } |
| 361 | |
| 362 | private: |
| 363 | MemoryChunk* chunk_; |
| 364 | MarkBit::CellType* cells_; |
| 365 | unsigned int last_cell_index_; |
| 366 | unsigned int cell_index_; |
| 367 | Address cell_base_; |
| 368 | }; |
| 369 | |
| 370 | // Grey objects can happen on black pages when black objects transition to |
| 371 | // grey e.g. when calling RecordWrites on them. |
| 372 | enum LiveObjectIterationMode { |
| 373 | kBlackObjects, |
| 374 | kGreyObjects, |
| 375 | kAllLiveObjects |
| 376 | }; |
| 377 | |
| 378 | template <LiveObjectIterationMode T> |
| 379 | class LiveObjectIterator BASE_EMBEDDED { |
| 380 | public: |
| 381 | explicit LiveObjectIterator(MemoryChunk* chunk) |
| 382 | : chunk_(chunk), |
| 383 | it_(chunk_), |
| 384 | cell_base_(it_.CurrentCellBase()), |
| 385 | current_cell_(*it_.CurrentCell()) { |
| 386 | // Black pages can not be iterated. |
| 387 | DCHECK(!chunk->IsFlagSet(Page::BLACK_PAGE)); |
| 388 | } |
| 389 | |
| 390 | HeapObject* Next(); |
| 391 | |
| 392 | private: |
| 393 | MemoryChunk* chunk_; |
| 394 | MarkBitCellIterator it_; |
| 395 | Address cell_base_; |
| 396 | MarkBit::CellType current_cell_; |
| 397 | }; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 398 | |
| 399 | // ------------------------------------------------------------------------- |
| 400 | // Mark-Compact collector |
| 401 | class MarkCompactCollector { |
| 402 | public: |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 403 | class Evacuator; |
| 404 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 405 | class Sweeper { |
| 406 | public: |
| 407 | class SweeperTask; |
| 408 | |
| 409 | enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS }; |
| 410 | enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST }; |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 411 | enum FreeListRebuildingMode { REBUILD_FREE_LIST, IGNORE_FREE_LIST }; |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 412 | enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE }; |
| 413 | enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL }; |
| 414 | |
| 415 | typedef std::deque<Page*> SweepingList; |
| 416 | typedef List<Page*> SweptList; |
| 417 | |
| 418 | template <SweepingMode sweeping_mode, SweepingParallelism parallelism, |
| 419 | SkipListRebuildingMode skip_list_mode, |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 420 | FreeListRebuildingMode free_list_mode, |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 421 | FreeSpaceTreatmentMode free_space_mode> |
| 422 | static int RawSweep(PagedSpace* space, Page* p, ObjectVisitor* v); |
| 423 | |
| 424 | explicit Sweeper(Heap* heap) |
| 425 | : heap_(heap), |
| 426 | pending_sweeper_tasks_semaphore_(0), |
| 427 | sweeping_in_progress_(false), |
| 428 | late_pages_(false), |
| 429 | num_sweeping_tasks_(0) {} |
| 430 | |
| 431 | bool sweeping_in_progress() { return sweeping_in_progress_; } |
| 432 | bool contains_late_pages() { return late_pages_; } |
| 433 | |
| 434 | void AddPage(AllocationSpace space, Page* page); |
| 435 | void AddLatePage(AllocationSpace space, Page* page); |
| 436 | |
| 437 | int ParallelSweepSpace(AllocationSpace identity, int required_freed_bytes, |
| 438 | int max_pages = 0); |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 439 | int ParallelSweepPage(Page* page, AllocationSpace identity); |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 440 | |
| 441 | void StartSweeping(); |
| 442 | void StartSweepingHelper(AllocationSpace space_to_start); |
| 443 | void EnsureCompleted(); |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 444 | void EnsureNewSpaceCompleted(); |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 445 | bool IsSweepingCompleted(); |
| 446 | void SweepOrWaitUntilSweepingCompleted(Page* page); |
| 447 | |
| 448 | void AddSweptPageSafe(PagedSpace* space, Page* page); |
| 449 | Page* GetSweptPageSafe(PagedSpace* space); |
| 450 | |
| 451 | private: |
| 452 | static const int kAllocationSpaces = LAST_PAGED_SPACE + 1; |
| 453 | |
| 454 | template <typename Callback> |
| 455 | void ForAllSweepingSpaces(Callback callback) { |
| 456 | for (int i = 0; i < kAllocationSpaces; i++) { |
| 457 | callback(static_cast<AllocationSpace>(i)); |
| 458 | } |
| 459 | } |
| 460 | |
| 461 | Page* GetSweepingPageSafe(AllocationSpace space); |
| 462 | void AddSweepingPageSafe(AllocationSpace space, Page* page); |
| 463 | |
| 464 | void PrepareToBeSweptPage(AllocationSpace space, Page* page); |
| 465 | |
| 466 | Heap* heap_; |
| 467 | base::Semaphore pending_sweeper_tasks_semaphore_; |
| 468 | base::Mutex mutex_; |
| 469 | SweptList swept_list_[kAllocationSpaces]; |
| 470 | SweepingList sweeping_list_[kAllocationSpaces]; |
| 471 | bool sweeping_in_progress_; |
| 472 | bool late_pages_; |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 473 | base::AtomicNumber<intptr_t> num_sweeping_tasks_; |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 474 | }; |
| 475 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 476 | enum IterationMode { |
| 477 | kKeepMarking, |
| 478 | kClearMarkbits, |
| 479 | }; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 480 | |
| 481 | static void Initialize(); |
| 482 | |
| 483 | void SetUp(); |
| 484 | |
| 485 | void TearDown(); |
| 486 | |
| 487 | void CollectEvacuationCandidates(PagedSpace* space); |
| 488 | |
| 489 | void AddEvacuationCandidate(Page* p); |
| 490 | |
| 491 | // Prepares for GC by resetting relocation info in old and map spaces and |
| 492 | // choosing spaces to compact. |
| 493 | void Prepare(); |
| 494 | |
| 495 | // Performs a global garbage collection. |
| 496 | void CollectGarbage(); |
| 497 | |
| 498 | enum CompactionMode { INCREMENTAL_COMPACTION, NON_INCREMENTAL_COMPACTION }; |
| 499 | |
| 500 | bool StartCompaction(CompactionMode mode); |
| 501 | |
| 502 | void AbortCompaction(); |
| 503 | |
| 504 | #ifdef DEBUG |
| 505 | // Checks whether performing mark-compact collection. |
| 506 | bool in_use() { return state_ > PREPARE_GC; } |
| 507 | bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; } |
| 508 | #endif |
| 509 | |
| 510 | // Determine type of object and emit deletion log event. |
| 511 | static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate); |
| 512 | |
| 513 | // Distinguishable invalid map encodings (for single word and multiple words) |
| 514 | // that indicate free regions. |
| 515 | static const uint32_t kSingleFreeEncoding = 0; |
| 516 | static const uint32_t kMultiFreeEncoding = 1; |
| 517 | |
| 518 | static inline bool IsMarked(Object* obj); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 519 | static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 520 | |
| 521 | inline Heap* heap() const { return heap_; } |
| 522 | inline Isolate* isolate() const; |
| 523 | |
| 524 | CodeFlusher* code_flusher() { return code_flusher_; } |
| 525 | inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 526 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 527 | #ifdef VERIFY_HEAP |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 528 | void VerifyValidStoreAndSlotsBufferEntries(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 529 | void VerifyMarkbitsAreClean(); |
| 530 | static void VerifyMarkbitsAreClean(PagedSpace* space); |
| 531 | static void VerifyMarkbitsAreClean(NewSpace* space); |
| 532 | void VerifyWeakEmbeddedObjectsInCode(); |
| 533 | void VerifyOmittedMapChecks(); |
| 534 | #endif |
| 535 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 536 | INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) { |
| 537 | return Page::FromAddress(reinterpret_cast<Address>(host)) |
| 538 | ->ShouldSkipEvacuationSlotRecording(); |
| 539 | } |
| 540 | |
| 541 | INLINE(static bool IsOnEvacuationCandidate(Object* obj)) { |
| 542 | return Page::FromAddress(reinterpret_cast<Address>(obj)) |
| 543 | ->IsEvacuationCandidate(); |
| 544 | } |
| 545 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 546 | void RecordRelocSlot(Code* host, RelocInfo* rinfo, Object* target); |
| 547 | void RecordCodeEntrySlot(HeapObject* host, Address slot, Code* target); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 548 | void RecordCodeTargetPatch(Address pc, Code* target); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 549 | INLINE(void RecordSlot(HeapObject* object, Object** slot, Object* target)); |
| 550 | INLINE(void ForceRecordSlot(HeapObject* object, Object** slot, |
| 551 | Object* target)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 552 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 553 | void UpdateSlots(SlotsBuffer* buffer); |
| 554 | void UpdateSlotsRecordedIn(SlotsBuffer* buffer); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 555 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 556 | void InvalidateCode(Code* code); |
| 557 | |
| 558 | void ClearMarkbits(); |
| 559 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 560 | bool is_compacting() const { return compacting_; } |
| 561 | |
| 562 | MarkingParity marking_parity() { return marking_parity_; } |
| 563 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 564 | // Ensures that sweeping is finished. |
| 565 | // |
| 566 | // Note: Can only be called safely from main thread. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 567 | void EnsureSweepingCompleted(); |
| 568 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 569 | // Help out in sweeping the corresponding space and refill memory that has |
| 570 | // been regained. |
| 571 | // |
| 572 | // Note: Thread-safe. |
| 573 | void SweepAndRefill(CompactionSpace* space); |
| 574 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 575 | // Checks if sweeping is in progress right now on any space. |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 576 | bool sweeping_in_progress() { return sweeper().sweeping_in_progress(); } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 577 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 578 | void set_evacuation(bool evacuation) { evacuation_ = evacuation; } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 579 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 580 | bool evacuation() const { return evacuation_; } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 581 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 582 | // Special case for processing weak references in a full collection. We need |
| 583 | // to artificially keep AllocationSites alive for a time. |
| 584 | void MarkAllocationSite(AllocationSite* site); |
| 585 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 586 | // Mark objects in implicit references groups if their parent object |
| 587 | // is marked. |
| 588 | void MarkImplicitRefGroups(MarkObjectFunction mark_object); |
| 589 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 590 | MarkingDeque* marking_deque() { return &marking_deque_; } |
| 591 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 592 | static const size_t kMaxMarkingDequeSize = 4 * MB; |
| 593 | static const size_t kMinMarkingDequeSize = 256 * KB; |
| 594 | |
| 595 | void EnsureMarkingDequeIsCommittedAndInitialize(size_t max_size) { |
| 596 | if (!marking_deque_.in_use()) { |
| 597 | EnsureMarkingDequeIsCommitted(max_size); |
| 598 | InitializeMarkingDeque(); |
| 599 | } |
| 600 | } |
| 601 | |
| 602 | void EnsureMarkingDequeIsCommitted(size_t max_size); |
| 603 | void EnsureMarkingDequeIsReserved(); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 604 | |
| 605 | void InitializeMarkingDeque(); |
| 606 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 607 | // The following two methods can just be called after marking, when the |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 608 | // whole transitive closure is known. They must be called before sweeping |
| 609 | // when mark bits are still intact. |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 610 | bool IsSlotInBlackObject(MemoryChunk* p, Address slot); |
| 611 | HeapObject* FindBlackObjectBySlotSlow(Address slot); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 612 | |
| 613 | // Removes all the slots in the slot buffers that are within the given |
| 614 | // address range. |
| 615 | void RemoveObjectSlots(Address start_slot, Address end_slot); |
| 616 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 617 | Sweeper& sweeper() { return sweeper_; } |
| 618 | |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 619 | void RegisterWrappersWithEmbedderHeapTracer(); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 620 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 621 | void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer); |
| 622 | |
| 623 | EmbedderHeapTracer* embedder_heap_tracer() { return embedder_heap_tracer_; } |
| 624 | |
| 625 | bool UsingEmbedderHeapTracer() { return embedder_heap_tracer(); } |
| 626 | |
| 627 | void TracePossibleWrapper(JSObject* js_object); |
| 628 | |
| 629 | void RegisterExternallyReferencedObject(Object** object); |
| 630 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 631 | private: |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 632 | class EvacuateNewSpacePageVisitor; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 633 | class EvacuateNewSpaceVisitor; |
| 634 | class EvacuateOldSpaceVisitor; |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 635 | class EvacuateRecordOnlyVisitor; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 636 | class EvacuateVisitorBase; |
| 637 | class HeapObjectVisitor; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 638 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 639 | explicit MarkCompactCollector(Heap* heap); |
| 640 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 641 | bool WillBeDeoptimized(Code* code); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 642 | void ClearInvalidRememberedSetSlots(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 643 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 644 | void ComputeEvacuationHeuristics(int area_size, |
| 645 | int* target_fragmentation_percent, |
| 646 | int* max_evacuated_bytes); |
| 647 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 648 | // Finishes GC, performs heap verification if enabled. |
| 649 | void Finish(); |
| 650 | |
| 651 | // ----------------------------------------------------------------------- |
| 652 | // Phase 1: Marking live objects. |
| 653 | // |
| 654 | // Before: The heap has been prepared for garbage collection by |
| 655 | // MarkCompactCollector::Prepare() and is otherwise in its |
| 656 | // normal state. |
| 657 | // |
| 658 | // After: Live objects are marked and non-live objects are unmarked. |
| 659 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 660 | friend class CodeMarkingVisitor; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 661 | friend class IncrementalMarkingMarkingVisitor; |
| 662 | friend class MarkCompactMarkingVisitor; |
| 663 | friend class MarkingVisitor; |
| 664 | friend class RecordMigratedSlotVisitor; |
| 665 | friend class RootMarkingVisitor; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 666 | friend class SharedFunctionInfoMarkingVisitor; |
| 667 | |
| 668 | // Mark code objects that are active on the stack to prevent them |
| 669 | // from being flushed. |
| 670 | void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top); |
| 671 | |
| 672 | void PrepareForCodeFlushing(); |
| 673 | |
| 674 | // Marking operations for objects reachable from roots. |
| 675 | void MarkLiveObjects(); |
| 676 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 677 | // Pushes a black object onto the marking stack and accounts for live bytes. |
| 678 | // Note that this assumes live bytes have not yet been counted. |
| 679 | INLINE(void PushBlack(HeapObject* obj)); |
| 680 | |
| 681 | // Unshifts a black object into the marking stack and accounts for live bytes. |
| 682 | // Note that this assumes lives bytes have already been counted. |
| 683 | INLINE(void UnshiftBlack(HeapObject* obj)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 684 | |
| 685 | // Marks the object black and pushes it on the marking stack. |
| 686 | // This is for non-incremental marking only. |
| 687 | INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit)); |
| 688 | |
| 689 | // Marks the object black assuming that it is not yet marked. |
| 690 | // This is for non-incremental marking only. |
| 691 | INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit)); |
| 692 | |
| 693 | // Mark the heap roots and all objects reachable from them. |
| 694 | void MarkRoots(RootMarkingVisitor* visitor); |
| 695 | |
| 696 | // Mark the string table specially. References to internalized strings from |
| 697 | // the string table are weak. |
| 698 | void MarkStringTable(RootMarkingVisitor* visitor); |
| 699 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 700 | // Mark objects reachable (transitively) from objects in the marking stack |
| 701 | // or overflowed in the heap. |
| 702 | void ProcessMarkingDeque(); |
| 703 | |
| 704 | // Mark objects reachable (transitively) from objects in the marking stack |
| 705 | // or overflowed in the heap. This respects references only considered in |
| 706 | // the final atomic marking pause including the following: |
| 707 | // - Processing of objects reachable through Harmony WeakMaps. |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 708 | // - Objects reachable due to host application logic like object groups, |
| 709 | // implicit references' groups, or embedder heap tracing. |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 710 | void ProcessEphemeralMarking(ObjectVisitor* visitor, |
| 711 | bool only_process_harmony_weak_collections); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 712 | |
| 713 | // If the call-site of the top optimized code was not prepared for |
| 714 | // deoptimization, then treat the maps in the code as strong pointers, |
| 715 | // otherwise a map can die and deoptimize the code. |
| 716 | void ProcessTopOptimizedFrame(ObjectVisitor* visitor); |
| 717 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 718 | // Collects a list of dependent code from maps embedded in optimize code. |
| 719 | DependentCode* DependentCodeListFromNonLiveMaps(); |
| 720 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 721 | // Mark objects reachable (transitively) from objects in the marking |
| 722 | // stack. This function empties the marking stack, but may leave |
| 723 | // overflowed objects in the heap, in which case the marking stack's |
| 724 | // overflow flag will be set. |
| 725 | void EmptyMarkingDeque(); |
| 726 | |
| 727 | // Refill the marking stack with overflowed objects from the heap. This |
| 728 | // function either leaves the marking stack full or clears the overflow |
| 729 | // flag on the marking stack. |
| 730 | void RefillMarkingDeque(); |
| 731 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 732 | // Helper methods for refilling the marking stack by discovering grey objects |
| 733 | // on various pages of the heap. Used by {RefillMarkingDeque} only. |
| 734 | template <class T> |
| 735 | void DiscoverGreyObjectsWithIterator(T* it); |
| 736 | void DiscoverGreyObjectsOnPage(MemoryChunk* p); |
| 737 | void DiscoverGreyObjectsInSpace(PagedSpace* space); |
| 738 | void DiscoverGreyObjectsInNewSpace(); |
| 739 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 740 | // Callback function for telling whether the object *p is an unmarked |
| 741 | // heap object. |
| 742 | static bool IsUnmarkedHeapObject(Object** p); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 743 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 744 | // Clear non-live references in weak cells, transition and descriptor arrays, |
| 745 | // and deoptimize dependent code of non-live maps. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 746 | void ClearNonLiveReferences(); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 747 | void MarkDependentCodeForDeoptimization(DependentCode* list); |
| 748 | // Find non-live targets of simple transitions in the given list. Clear |
| 749 | // transitions to non-live targets and if needed trim descriptors arrays. |
| 750 | void ClearSimpleMapTransitions(Object* non_live_map_list); |
| 751 | void ClearSimpleMapTransition(Map* map, Map* dead_transition); |
| 752 | // Compact every array in the global list of transition arrays and |
| 753 | // trim the corresponding descriptor array if a transition target is non-live. |
| 754 | void ClearFullMapTransitions(); |
| 755 | bool CompactTransitionArray(Map* map, TransitionArray* transitions, |
| 756 | DescriptorArray* descriptors); |
| 757 | void TrimDescriptorArray(Map* map, DescriptorArray* descriptors); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 758 | void TrimEnumCache(Map* map, DescriptorArray* descriptors); |
| 759 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 760 | // Mark all values associated with reachable keys in weak collections |
| 761 | // encountered so far. This might push new object or even new weak maps onto |
| 762 | // the marking stack. |
| 763 | void ProcessWeakCollections(); |
| 764 | |
| 765 | // After all reachable objects have been marked those weak map entries |
| 766 | // with an unreachable key are removed from all encountered weak maps. |
| 767 | // The linked list of all encountered weak maps is destroyed. |
| 768 | void ClearWeakCollections(); |
| 769 | |
| 770 | // We have to remove all encountered weak maps from the list of weak |
| 771 | // collections when incremental marking is aborted. |
| 772 | void AbortWeakCollections(); |
| 773 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 774 | void ClearWeakCells(Object** non_live_map_list, |
| 775 | DependentCode** dependent_code_list); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 776 | void AbortWeakCells(); |
| 777 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 778 | void AbortTransitionArrays(); |
| 779 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 780 | // ----------------------------------------------------------------------- |
| 781 | // Phase 2: Sweeping to clear mark bits and free non-live objects for |
| 782 | // a non-compacting collection. |
| 783 | // |
| 784 | // Before: Live objects are marked and non-live objects are unmarked. |
| 785 | // |
| 786 | // After: Live objects are unmarked, non-live regions have been added to |
| 787 | // their space's free list. Active eden semispace is compacted by |
| 788 | // evacuation. |
| 789 | // |
| 790 | |
| 791 | // If we are not compacting the heap, we simply sweep the spaces except |
| 792 | // for the large object space, clearing mark bits and adding unmarked |
| 793 | // regions to each space's free list. |
| 794 | void SweepSpaces(); |
| 795 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 796 | void EvacuateNewSpacePrologue(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 797 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 798 | void EvacuatePagesInParallel(); |
| 799 | |
| 800 | // The number of parallel compaction tasks, including the main thread. |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 801 | int NumberOfParallelCompactionTasks(int pages, intptr_t live_bytes); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 802 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 803 | void EvacuateNewSpaceAndCandidates(); |
| 804 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 805 | void UpdatePointersAfterEvacuation(); |
| 806 | |
| 807 | // Iterates through all live objects on a page using marking information. |
| 808 | // Returns whether all objects have successfully been visited. |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 809 | template <class Visitor> |
| 810 | bool VisitLiveObjects(MemoryChunk* page, Visitor* visitor, |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 811 | IterationMode mode); |
| 812 | |
| 813 | void VisitLiveObjectsBody(Page* page, ObjectVisitor* visitor); |
| 814 | |
| 815 | void RecomputeLiveBytes(MemoryChunk* page); |
| 816 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 817 | void ReleaseEvacuationCandidates(); |
| 818 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 819 | // Starts sweeping of a space by contributing on the main thread and setting |
| 820 | // up other pages for sweeping. |
| 821 | void StartSweepSpace(PagedSpace* space); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 822 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 823 | #ifdef DEBUG |
| 824 | friend class MarkObjectVisitor; |
| 825 | static void VisitObject(HeapObject* obj); |
| 826 | |
| 827 | friend class UnmarkObjectVisitor; |
| 828 | static void UnmarkObject(HeapObject* obj); |
| 829 | #endif |
| 830 | |
| 831 | Heap* heap_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 832 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 833 | base::Semaphore page_parallel_job_semaphore_; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 834 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 835 | #ifdef DEBUG |
| 836 | enum CollectorState { |
| 837 | IDLE, |
| 838 | PREPARE_GC, |
| 839 | MARK_LIVE_OBJECTS, |
| 840 | SWEEP_SPACES, |
| 841 | ENCODE_FORWARDING_ADDRESSES, |
| 842 | UPDATE_POINTERS, |
| 843 | RELOCATE_OBJECTS |
| 844 | }; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 845 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 846 | // The current stage of the collector. |
| 847 | CollectorState state_; |
| 848 | #endif |
| 849 | |
| 850 | MarkingParity marking_parity_; |
| 851 | |
| 852 | bool was_marked_incrementally_; |
| 853 | |
| 854 | bool evacuation_; |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 855 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 856 | // True if we are collecting slots to perform evacuation from evacuation |
| 857 | // candidates. |
| 858 | bool compacting_; |
| 859 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 860 | bool black_allocation_; |
| 861 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 862 | bool have_code_to_deoptimize_; |
| 863 | |
| 864 | base::VirtualMemory* marking_deque_memory_; |
| 865 | size_t marking_deque_memory_committed_; |
| 866 | MarkingDeque marking_deque_; |
| 867 | std::vector<std::pair<void*, void*>> wrappers_to_trace_; |
| 868 | |
| 869 | CodeFlusher* code_flusher_; |
| 870 | |
| 871 | EmbedderHeapTracer* embedder_heap_tracer_; |
| 872 | |
| 873 | List<Page*> evacuation_candidates_; |
| 874 | List<Page*> newspace_evacuation_candidates_; |
| 875 | |
| 876 | Sweeper sweeper_; |
| 877 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 878 | friend class Heap; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 879 | friend class StoreBuffer; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 880 | }; |
| 881 | |
| 882 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 883 | class EvacuationScope BASE_EMBEDDED { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 884 | public: |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 885 | explicit EvacuationScope(MarkCompactCollector* collector) |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 886 | : collector_(collector) { |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 887 | collector_->set_evacuation(true); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 888 | } |
| 889 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 890 | ~EvacuationScope() { collector_->set_evacuation(false); } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 891 | |
| 892 | private: |
| 893 | MarkCompactCollector* collector_; |
| 894 | }; |
| 895 | |
| 896 | |
| 897 | const char* AllocationSpaceName(AllocationSpace space); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 898 | } // namespace internal |
| 899 | } // namespace v8 |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 900 | |
| 901 | #endif // V8_HEAP_MARK_COMPACT_H_ |