Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1 | // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 5 | #include "src/heap/incremental-marking.h" |
| 6 | |
| 7 | #include "src/code-stubs.h" |
| 8 | #include "src/compilation-cache.h" |
| 9 | #include "src/conversions.h" |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 10 | #include "src/heap/gc-idle-time-handler.h" |
| 11 | #include "src/heap/gc-tracer.h" |
| 12 | #include "src/heap/mark-compact-inl.h" |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 13 | #include "src/heap/objects-visiting.h" |
| 14 | #include "src/heap/objects-visiting-inl.h" |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 15 | #include "src/v8.h" |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 16 | |
| 17 | namespace v8 { |
| 18 | namespace internal { |
| 19 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 20 | IncrementalMarking::StepActions IncrementalMarking::IdleStepActions() { |
| 21 | return StepActions(IncrementalMarking::NO_GC_VIA_STACK_GUARD, |
| 22 | IncrementalMarking::FORCE_MARKING, |
| 23 | IncrementalMarking::DO_NOT_FORCE_COMPLETION); |
| 24 | } |
| 25 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 26 | |
| 27 | IncrementalMarking::IncrementalMarking(Heap* heap) |
| 28 | : heap_(heap), |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 29 | observer_(*this, kAllocatedThreshold), |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 30 | state_(STOPPED), |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 31 | is_compacting_(false), |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 32 | steps_count_(0), |
| 33 | old_generation_space_available_at_start_of_incremental_(0), |
| 34 | old_generation_space_used_at_start_of_incremental_(0), |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 35 | bytes_rescanned_(0), |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 36 | should_hurry_(false), |
| 37 | marking_speed_(0), |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 38 | bytes_scanned_(0), |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 39 | allocated_(0), |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 40 | write_barriers_invoked_since_last_step_(0), |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 41 | idle_marking_delay_counter_(0), |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 42 | no_marking_scope_depth_(0), |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 43 | unscanned_bytes_of_large_object_(0), |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 44 | was_activated_(false), |
| 45 | finalize_marking_completed_(false), |
| 46 | incremental_marking_finalization_rounds_(0), |
| 47 | request_type_(COMPLETE_MARKING) {} |
| 48 | |
| 49 | |
| 50 | bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) { |
| 51 | HeapObject* value_heap_obj = HeapObject::cast(value); |
| 52 | MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj); |
| 53 | DCHECK(!Marking::IsImpossible(value_bit)); |
| 54 | |
| 55 | MarkBit obj_bit = Marking::MarkBitFrom(obj); |
| 56 | DCHECK(!Marking::IsImpossible(obj_bit)); |
| 57 | bool is_black = Marking::IsBlack(obj_bit); |
| 58 | |
| 59 | if (is_black && Marking::IsWhite(value_bit)) { |
| 60 | WhiteToGreyAndPush(value_heap_obj, value_bit); |
| 61 | RestartIfNotMarking(); |
| 62 | } |
| 63 | return is_compacting_ && is_black; |
| 64 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 65 | |
| 66 | |
| 67 | void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot, |
| 68 | Object* value) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 69 | if (BaseRecordWrite(obj, value) && slot != NULL) { |
| 70 | // Object is not going to be rescanned we need to record the slot. |
| 71 | heap_->mark_compact_collector()->RecordSlot(obj, slot, value); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 72 | } |
| 73 | } |
| 74 | |
| 75 | |
| 76 | void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot, |
| 77 | Isolate* isolate) { |
| 78 | DCHECK(obj->IsHeapObject()); |
| 79 | IncrementalMarking* marking = isolate->heap()->incremental_marking(); |
| 80 | |
| 81 | MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); |
| 82 | int counter = chunk->write_barrier_counter(); |
| 83 | if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) { |
| 84 | marking->write_barriers_invoked_since_last_step_ += |
| 85 | MemoryChunk::kWriteBarrierCounterGranularity - |
| 86 | chunk->write_barrier_counter(); |
| 87 | chunk->set_write_barrier_counter( |
| 88 | MemoryChunk::kWriteBarrierCounterGranularity); |
| 89 | } |
| 90 | |
| 91 | marking->RecordWrite(obj, slot, *slot); |
| 92 | } |
| 93 | |
| 94 | |
| 95 | void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc, |
| 96 | HeapObject* value) { |
| 97 | if (IsMarking()) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 98 | RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 99 | RecordWriteIntoCode(host, &rinfo, value); |
| 100 | } |
| 101 | } |
| 102 | |
| 103 | |
| 104 | void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) { |
| 105 | if (IsMarking()) { |
| 106 | Code* host = heap_->isolate() |
| 107 | ->inner_pointer_to_code_cache() |
| 108 | ->GcSafeFindCodeForInnerPointer(pc); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 109 | RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 110 | RecordWriteIntoCode(host, &rinfo, value); |
| 111 | } |
| 112 | } |
| 113 | |
| 114 | |
| 115 | void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host, |
| 116 | Object** slot, |
| 117 | Code* value) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 118 | if (BaseRecordWrite(host, value)) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 119 | DCHECK(slot != NULL); |
| 120 | heap_->mark_compact_collector()->RecordCodeEntrySlot( |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 121 | host, reinterpret_cast<Address>(slot), value); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 122 | } |
| 123 | } |
| 124 | |
| 125 | |
| 126 | void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj, |
| 127 | RelocInfo* rinfo, |
| 128 | Object* value) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 129 | if (BaseRecordWrite(obj, value)) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 130 | // Object is not going to be rescanned. We need to record the slot. |
| 131 | heap_->mark_compact_collector()->RecordRelocSlot(rinfo, |
| 132 | Code::cast(value)); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 133 | } |
| 134 | } |
| 135 | |
| 136 | |
| 137 | void IncrementalMarking::RecordWrites(HeapObject* obj) { |
| 138 | if (IsMarking()) { |
| 139 | MarkBit obj_bit = Marking::MarkBitFrom(obj); |
| 140 | if (Marking::IsBlack(obj_bit)) { |
| 141 | MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); |
| 142 | if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) { |
| 143 | chunk->set_progress_bar(0); |
| 144 | } |
| 145 | BlackToGreyAndUnshift(obj, obj_bit); |
| 146 | RestartIfNotMarking(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 147 | } |
| 148 | } |
| 149 | } |
| 150 | |
| 151 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 152 | void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj, |
| 153 | MarkBit mark_bit) { |
| 154 | DCHECK(Marking::MarkBitFrom(obj) == mark_bit); |
| 155 | DCHECK(obj->Size() >= 2 * kPointerSize); |
| 156 | DCHECK(IsMarking()); |
| 157 | Marking::BlackToGrey(mark_bit); |
| 158 | int obj_size = obj->Size(); |
| 159 | MemoryChunk::IncrementLiveBytesFromGC(obj, -obj_size); |
| 160 | bytes_scanned_ -= obj_size; |
| 161 | int64_t old_bytes_rescanned = bytes_rescanned_; |
| 162 | bytes_rescanned_ = old_bytes_rescanned + obj_size; |
| 163 | if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) { |
| 164 | if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSizeOfObjects()) { |
| 165 | // If we have queued twice the heap size for rescanning then we are |
| 166 | // going around in circles, scanning the same objects again and again |
| 167 | // as the program mutates the heap faster than we can incrementally |
| 168 | // trace it. In this case we switch to non-incremental marking in |
| 169 | // order to finish off this marking phase. |
| 170 | if (FLAG_trace_incremental_marking) { |
| 171 | PrintIsolate( |
| 172 | heap()->isolate(), |
| 173 | "Hurrying incremental marking because of lack of progress\n"); |
| 174 | } |
| 175 | marking_speed_ = kMaxMarkingSpeed; |
| 176 | } |
| 177 | } |
| 178 | |
| 179 | heap_->mark_compact_collector()->marking_deque()->Unshift(obj); |
| 180 | } |
| 181 | |
| 182 | |
| 183 | void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) { |
| 184 | Marking::WhiteToGrey(mark_bit); |
| 185 | heap_->mark_compact_collector()->marking_deque()->Push(obj); |
| 186 | } |
| 187 | |
| 188 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 189 | static void MarkObjectGreyDoNotEnqueue(Object* obj) { |
| 190 | if (obj->IsHeapObject()) { |
| 191 | HeapObject* heap_obj = HeapObject::cast(obj); |
| 192 | MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj)); |
| 193 | if (Marking::IsBlack(mark_bit)) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 194 | MemoryChunk::IncrementLiveBytesFromGC(heap_obj, -heap_obj->Size()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 195 | } |
| 196 | Marking::AnyToGrey(mark_bit); |
| 197 | } |
| 198 | } |
| 199 | |
| 200 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 201 | static inline void MarkBlackOrKeepBlack(HeapObject* heap_object, |
| 202 | MarkBit mark_bit, int size) { |
| 203 | DCHECK(!Marking::IsImpossible(mark_bit)); |
| 204 | if (Marking::IsBlack(mark_bit)) return; |
| 205 | Marking::MarkBlack(mark_bit); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 206 | MemoryChunk::IncrementLiveBytesFromGC(heap_object, size); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 207 | } |
| 208 | |
| 209 | |
| 210 | class IncrementalMarkingMarkingVisitor |
| 211 | : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> { |
| 212 | public: |
| 213 | static void Initialize() { |
| 214 | StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize(); |
| 215 | table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental); |
| 216 | table_.Register(kVisitNativeContext, &VisitNativeContextIncremental); |
| 217 | table_.Register(kVisitJSRegExp, &VisitJSRegExp); |
| 218 | } |
| 219 | |
| 220 | static const int kProgressBarScanningChunk = 32 * 1024; |
| 221 | |
| 222 | static void VisitFixedArrayIncremental(Map* map, HeapObject* object) { |
| 223 | MemoryChunk* chunk = MemoryChunk::FromAddress(object->address()); |
| 224 | // TODO(mstarzinger): Move setting of the flag to the allocation site of |
| 225 | // the array. The visitor should just check the flag. |
| 226 | if (FLAG_use_marking_progress_bar && |
| 227 | chunk->owner()->identity() == LO_SPACE) { |
| 228 | chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR); |
| 229 | } |
| 230 | if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) { |
| 231 | Heap* heap = map->GetHeap(); |
| 232 | // When using a progress bar for large fixed arrays, scan only a chunk of |
| 233 | // the array and try to push it onto the marking deque again until it is |
| 234 | // fully scanned. Fall back to scanning it through to the end in case this |
| 235 | // fails because of a full deque. |
| 236 | int object_size = FixedArray::BodyDescriptor::SizeOf(map, object); |
| 237 | int start_offset = |
| 238 | Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar()); |
| 239 | int end_offset = |
| 240 | Min(object_size, start_offset + kProgressBarScanningChunk); |
| 241 | int already_scanned_offset = start_offset; |
| 242 | bool scan_until_end = false; |
| 243 | do { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 244 | VisitPointers(heap, object, HeapObject::RawField(object, start_offset), |
| 245 | HeapObject::RawField(object, end_offset)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 246 | start_offset = end_offset; |
| 247 | end_offset = Min(object_size, end_offset + kProgressBarScanningChunk); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 248 | scan_until_end = |
| 249 | heap->mark_compact_collector()->marking_deque()->IsFull(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 250 | } while (scan_until_end && start_offset < object_size); |
| 251 | chunk->set_progress_bar(start_offset); |
| 252 | if (start_offset < object_size) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 253 | if (Marking::IsGrey(Marking::MarkBitFrom(object))) { |
| 254 | heap->mark_compact_collector()->marking_deque()->Unshift(object); |
| 255 | } else { |
| 256 | DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
| 257 | heap->mark_compact_collector()->UnshiftBlack(object); |
| 258 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 259 | heap->incremental_marking()->NotifyIncompleteScanOfObject( |
| 260 | object_size - (start_offset - already_scanned_offset)); |
| 261 | } |
| 262 | } else { |
| 263 | FixedArrayVisitor::Visit(map, object); |
| 264 | } |
| 265 | } |
| 266 | |
| 267 | static void VisitNativeContextIncremental(Map* map, HeapObject* object) { |
| 268 | Context* context = Context::cast(object); |
| 269 | |
| 270 | // We will mark cache black with a separate pass when we finish marking. |
| 271 | // Note that GC can happen when the context is not fully initialized, |
| 272 | // so the cache can be undefined. |
| 273 | Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX); |
| 274 | if (!cache->IsUndefined()) { |
| 275 | MarkObjectGreyDoNotEnqueue(cache); |
| 276 | } |
| 277 | VisitNativeContext(map, context); |
| 278 | } |
| 279 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 280 | INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) { |
| 281 | Object* target = *p; |
| 282 | if (target->IsHeapObject()) { |
| 283 | heap->mark_compact_collector()->RecordSlot(object, p, target); |
| 284 | MarkObject(heap, target); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 285 | } |
| 286 | } |
| 287 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 288 | INLINE(static void VisitPointers(Heap* heap, HeapObject* object, |
| 289 | Object** start, Object** end)) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 290 | for (Object** p = start; p < end; p++) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 291 | Object* target = *p; |
| 292 | if (target->IsHeapObject()) { |
| 293 | heap->mark_compact_collector()->RecordSlot(object, p, target); |
| 294 | MarkObject(heap, target); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 295 | } |
| 296 | } |
| 297 | } |
| 298 | |
| 299 | // Marks the object grey and pushes it on the marking stack. |
| 300 | INLINE(static void MarkObject(Heap* heap, Object* obj)) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 301 | IncrementalMarking::MarkObject(heap, HeapObject::cast(obj)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 302 | } |
| 303 | |
| 304 | // Marks the object black without pushing it on the marking stack. |
| 305 | // Returns true if object needed marking and false otherwise. |
| 306 | INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) { |
| 307 | HeapObject* heap_object = HeapObject::cast(obj); |
| 308 | MarkBit mark_bit = Marking::MarkBitFrom(heap_object); |
| 309 | if (Marking::IsWhite(mark_bit)) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 310 | Marking::MarkBlack(mark_bit); |
| 311 | MemoryChunk::IncrementLiveBytesFromGC(heap_object, heap_object->Size()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 312 | return true; |
| 313 | } |
| 314 | return false; |
| 315 | } |
| 316 | }; |
| 317 | |
| 318 | |
| 319 | class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor { |
| 320 | public: |
| 321 | explicit IncrementalMarkingRootMarkingVisitor( |
| 322 | IncrementalMarking* incremental_marking) |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 323 | : heap_(incremental_marking->heap()) {} |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 324 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 325 | void VisitPointer(Object** p) override { MarkObjectByPointer(p); } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 326 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 327 | void VisitPointers(Object** start, Object** end) override { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 328 | for (Object** p = start; p < end; p++) MarkObjectByPointer(p); |
| 329 | } |
| 330 | |
| 331 | private: |
| 332 | void MarkObjectByPointer(Object** p) { |
| 333 | Object* obj = *p; |
| 334 | if (!obj->IsHeapObject()) return; |
| 335 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 336 | IncrementalMarking::MarkObject(heap_, HeapObject::cast(obj)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 337 | } |
| 338 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 339 | Heap* heap_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 340 | }; |
| 341 | |
| 342 | |
| 343 | void IncrementalMarking::Initialize() { |
| 344 | IncrementalMarkingMarkingVisitor::Initialize(); |
| 345 | } |
| 346 | |
| 347 | |
| 348 | void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk, |
| 349 | bool is_marking, |
| 350 | bool is_compacting) { |
| 351 | if (is_marking) { |
| 352 | chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); |
| 353 | chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 354 | } else { |
| 355 | chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); |
| 356 | chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); |
| 357 | } |
| 358 | } |
| 359 | |
| 360 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 361 | void IncrementalMarking::SetNewSpacePageFlags(MemoryChunk* chunk, |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 362 | bool is_marking) { |
| 363 | chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); |
| 364 | if (is_marking) { |
| 365 | chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); |
| 366 | } else { |
| 367 | chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); |
| 368 | } |
| 369 | chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE); |
| 370 | } |
| 371 | |
| 372 | |
| 373 | void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace( |
| 374 | PagedSpace* space) { |
| 375 | PageIterator it(space); |
| 376 | while (it.has_next()) { |
| 377 | Page* p = it.next(); |
| 378 | SetOldSpacePageFlags(p, false, false); |
| 379 | } |
| 380 | } |
| 381 | |
| 382 | |
| 383 | void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace( |
| 384 | NewSpace* space) { |
| 385 | NewSpacePageIterator it(space); |
| 386 | while (it.has_next()) { |
| 387 | NewSpacePage* p = it.next(); |
| 388 | SetNewSpacePageFlags(p, false); |
| 389 | } |
| 390 | } |
| 391 | |
| 392 | |
| 393 | void IncrementalMarking::DeactivateIncrementalWriteBarrier() { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 394 | DeactivateIncrementalWriteBarrierForSpace(heap_->old_space()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 395 | DeactivateIncrementalWriteBarrierForSpace(heap_->map_space()); |
| 396 | DeactivateIncrementalWriteBarrierForSpace(heap_->code_space()); |
| 397 | DeactivateIncrementalWriteBarrierForSpace(heap_->new_space()); |
| 398 | |
| 399 | LargePage* lop = heap_->lo_space()->first_page(); |
| 400 | while (lop->is_valid()) { |
| 401 | SetOldSpacePageFlags(lop, false, false); |
| 402 | lop = lop->next_page(); |
| 403 | } |
| 404 | } |
| 405 | |
| 406 | |
| 407 | void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) { |
| 408 | PageIterator it(space); |
| 409 | while (it.has_next()) { |
| 410 | Page* p = it.next(); |
| 411 | SetOldSpacePageFlags(p, true, is_compacting_); |
| 412 | } |
| 413 | } |
| 414 | |
| 415 | |
| 416 | void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) { |
| 417 | NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd()); |
| 418 | while (it.has_next()) { |
| 419 | NewSpacePage* p = it.next(); |
| 420 | SetNewSpacePageFlags(p, true); |
| 421 | } |
| 422 | } |
| 423 | |
| 424 | |
| 425 | void IncrementalMarking::ActivateIncrementalWriteBarrier() { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 426 | ActivateIncrementalWriteBarrier(heap_->old_space()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 427 | ActivateIncrementalWriteBarrier(heap_->map_space()); |
| 428 | ActivateIncrementalWriteBarrier(heap_->code_space()); |
| 429 | ActivateIncrementalWriteBarrier(heap_->new_space()); |
| 430 | |
| 431 | LargePage* lop = heap_->lo_space()->first_page(); |
| 432 | while (lop->is_valid()) { |
| 433 | SetOldSpacePageFlags(lop, true, is_compacting_); |
| 434 | lop = lop->next_page(); |
| 435 | } |
| 436 | } |
| 437 | |
| 438 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 439 | bool IncrementalMarking::ShouldActivateEvenWithoutIdleNotification() { |
| 440 | return CanBeActivated() && |
| 441 | heap_->HeapIsFullEnoughToStartIncrementalMarking( |
| 442 | heap_->old_generation_allocation_limit()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 443 | } |
| 444 | |
| 445 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 446 | bool IncrementalMarking::WasActivated() { return was_activated_; } |
| 447 | |
| 448 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 449 | bool IncrementalMarking::CanBeActivated() { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 450 | #ifndef DEBUG |
| 451 | static const intptr_t kActivationThreshold = 8 * MB; |
| 452 | #else |
| 453 | // TODO(gc) consider setting this to some low level so that some |
| 454 | // debug tests run with incremental marking and some without. |
| 455 | static const intptr_t kActivationThreshold = 0; |
| 456 | #endif |
| 457 | // Only start incremental marking in a safe state: 1) when incremental |
| 458 | // marking is turned on, 2) when we are currently not in a GC, and |
| 459 | // 3) when we are currently not serializing or deserializing the heap. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 460 | // Don't switch on for very small heaps. |
| 461 | return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC && |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 462 | heap_->deserialization_complete() && |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 463 | !heap_->isolate()->serializer_enabled() && |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 464 | heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold; |
| 465 | } |
| 466 | |
| 467 | |
| 468 | void IncrementalMarking::ActivateGeneratedStub(Code* stub) { |
| 469 | DCHECK(RecordWriteStub::GetMode(stub) == RecordWriteStub::STORE_BUFFER_ONLY); |
| 470 | |
| 471 | if (!IsMarking()) { |
| 472 | // Initially stub is generated in STORE_BUFFER_ONLY mode thus |
| 473 | // we don't need to do anything if incremental marking is |
| 474 | // not active. |
| 475 | } else if (IsCompacting()) { |
| 476 | RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION); |
| 477 | } else { |
| 478 | RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL); |
| 479 | } |
| 480 | } |
| 481 | |
| 482 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 483 | void IncrementalMarking::NotifyOfHighPromotionRate() { |
| 484 | if (IsMarking()) { |
| 485 | if (marking_speed_ < kFastMarking) { |
| 486 | if (FLAG_trace_gc) { |
| 487 | PrintIsolate(heap()->isolate(), |
| 488 | "Increasing marking speed to %d " |
| 489 | "due to high promotion rate\n", |
| 490 | static_cast<int>(kFastMarking)); |
| 491 | } |
| 492 | marking_speed_ = kFastMarking; |
| 493 | } |
| 494 | } |
| 495 | } |
| 496 | |
| 497 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 498 | static void PatchIncrementalMarkingRecordWriteStubs( |
| 499 | Heap* heap, RecordWriteStub::Mode mode) { |
| 500 | UnseededNumberDictionary* stubs = heap->code_stubs(); |
| 501 | |
| 502 | int capacity = stubs->Capacity(); |
| 503 | for (int i = 0; i < capacity; i++) { |
| 504 | Object* k = stubs->KeyAt(i); |
| 505 | if (stubs->IsKey(k)) { |
| 506 | uint32_t key = NumberToUint32(k); |
| 507 | |
| 508 | if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) { |
| 509 | Object* e = stubs->ValueAt(i); |
| 510 | if (e->IsCode()) { |
| 511 | RecordWriteStub::Patch(Code::cast(e), mode); |
| 512 | } |
| 513 | } |
| 514 | } |
| 515 | } |
| 516 | } |
| 517 | |
| 518 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 519 | void IncrementalMarking::Start(const char* reason) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 520 | if (FLAG_trace_incremental_marking) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 521 | PrintF("[IncrementalMarking] Start (%s)\n", |
| 522 | (reason == nullptr) ? "unknown reason" : reason); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 523 | } |
| 524 | DCHECK(FLAG_incremental_marking); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 525 | DCHECK(state_ == STOPPED); |
| 526 | DCHECK(heap_->gc_state() == Heap::NOT_IN_GC); |
| 527 | DCHECK(!heap_->isolate()->serializer_enabled()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 528 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 529 | HistogramTimerScope incremental_marking_scope( |
| 530 | heap_->isolate()->counters()->gc_incremental_marking_start()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 531 | ResetStepCounters(); |
| 532 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 533 | was_activated_ = true; |
| 534 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 535 | if (!heap_->mark_compact_collector()->sweeping_in_progress()) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 536 | StartMarking(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 537 | } else { |
| 538 | if (FLAG_trace_incremental_marking) { |
| 539 | PrintF("[IncrementalMarking] Start sweeping.\n"); |
| 540 | } |
| 541 | state_ = SWEEPING; |
| 542 | } |
| 543 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 544 | heap_->new_space()->AddInlineAllocationObserver(&observer_); |
| 545 | |
| 546 | incremental_marking_job()->Start(heap_); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 547 | } |
| 548 | |
| 549 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 550 | void IncrementalMarking::StartMarking() { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 551 | if (FLAG_trace_incremental_marking) { |
| 552 | PrintF("[IncrementalMarking] Start marking\n"); |
| 553 | } |
| 554 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 555 | is_compacting_ = !FLAG_never_compact && |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 556 | heap_->mark_compact_collector()->StartCompaction( |
| 557 | MarkCompactCollector::INCREMENTAL_COMPACTION); |
| 558 | |
| 559 | state_ = MARKING; |
| 560 | |
| 561 | RecordWriteStub::Mode mode = is_compacting_ |
| 562 | ? RecordWriteStub::INCREMENTAL_COMPACTION |
| 563 | : RecordWriteStub::INCREMENTAL; |
| 564 | |
| 565 | PatchIncrementalMarkingRecordWriteStubs(heap_, mode); |
| 566 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 567 | heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize( |
| 568 | MarkCompactCollector::kMaxMarkingDequeSize); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 569 | |
| 570 | ActivateIncrementalWriteBarrier(); |
| 571 | |
| 572 | // Marking bits are cleared by the sweeper. |
| 573 | #ifdef VERIFY_HEAP |
| 574 | if (FLAG_verify_heap) { |
| 575 | heap_->mark_compact_collector()->VerifyMarkbitsAreClean(); |
| 576 | } |
| 577 | #endif |
| 578 | |
| 579 | heap_->CompletelyClearInstanceofCache(); |
| 580 | heap_->isolate()->compilation_cache()->MarkCompactPrologue(); |
| 581 | |
| 582 | if (FLAG_cleanup_code_caches_at_gc) { |
| 583 | // We will mark cache black with a separate pass |
| 584 | // when we finish marking. |
| 585 | MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache()); |
| 586 | } |
| 587 | |
| 588 | // Mark strong roots grey. |
| 589 | IncrementalMarkingRootMarkingVisitor visitor(this); |
| 590 | heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG); |
| 591 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 592 | // Ready to start incremental marking. |
| 593 | if (FLAG_trace_incremental_marking) { |
| 594 | PrintF("[IncrementalMarking] Running\n"); |
| 595 | } |
| 596 | } |
| 597 | |
| 598 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 599 | void IncrementalMarking::MarkRoots() { |
| 600 | DCHECK(!finalize_marking_completed_); |
| 601 | DCHECK(IsMarking()); |
| 602 | |
| 603 | IncrementalMarkingRootMarkingVisitor visitor(this); |
| 604 | heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG); |
| 605 | } |
| 606 | |
| 607 | |
| 608 | void IncrementalMarking::MarkObjectGroups() { |
| 609 | DCHECK(!finalize_marking_completed_); |
| 610 | DCHECK(IsMarking()); |
| 611 | |
| 612 | IncrementalMarkingRootMarkingVisitor visitor(this); |
| 613 | heap_->mark_compact_collector()->MarkImplicitRefGroups(&MarkObject); |
| 614 | heap_->isolate()->global_handles()->IterateObjectGroups( |
| 615 | &visitor, &MarkCompactCollector::IsUnmarkedHeapObjectWithHeap); |
| 616 | heap_->isolate()->global_handles()->RemoveImplicitRefGroups(); |
| 617 | heap_->isolate()->global_handles()->RemoveObjectGroups(); |
| 618 | } |
| 619 | |
| 620 | |
| 621 | void IncrementalMarking::ProcessWeakCells() { |
| 622 | DCHECK(!finalize_marking_completed_); |
| 623 | DCHECK(IsMarking()); |
| 624 | |
| 625 | Object* the_hole_value = heap()->the_hole_value(); |
| 626 | Object* weak_cell_obj = heap()->encountered_weak_cells(); |
| 627 | Object* weak_cell_head = Smi::FromInt(0); |
| 628 | WeakCell* prev_weak_cell_obj = NULL; |
| 629 | while (weak_cell_obj != Smi::FromInt(0)) { |
| 630 | WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj); |
| 631 | // We do not insert cleared weak cells into the list, so the value |
| 632 | // cannot be a Smi here. |
| 633 | HeapObject* value = HeapObject::cast(weak_cell->value()); |
| 634 | // Remove weak cells with live objects from the list, they do not need |
| 635 | // clearing. |
| 636 | if (MarkCompactCollector::IsMarked(value)) { |
| 637 | // Record slot, if value is pointing to an evacuation candidate. |
| 638 | Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset); |
| 639 | heap_->mark_compact_collector()->RecordSlot(weak_cell, slot, *slot); |
| 640 | // Remove entry somewhere after top. |
| 641 | if (prev_weak_cell_obj != NULL) { |
| 642 | prev_weak_cell_obj->set_next(weak_cell->next()); |
| 643 | } |
| 644 | weak_cell_obj = weak_cell->next(); |
| 645 | weak_cell->clear_next(the_hole_value); |
| 646 | } else { |
| 647 | if (weak_cell_head == Smi::FromInt(0)) { |
| 648 | weak_cell_head = weak_cell; |
| 649 | } |
| 650 | prev_weak_cell_obj = weak_cell; |
| 651 | weak_cell_obj = weak_cell->next(); |
| 652 | } |
| 653 | } |
| 654 | // Top may have changed. |
| 655 | heap()->set_encountered_weak_cells(weak_cell_head); |
| 656 | } |
| 657 | |
| 658 | |
| 659 | bool ShouldRetainMap(Map* map, int age) { |
| 660 | if (age == 0) { |
| 661 | // The map has aged. Do not retain this map. |
| 662 | return false; |
| 663 | } |
| 664 | Object* constructor = map->GetConstructor(); |
| 665 | if (!constructor->IsHeapObject() || |
| 666 | Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(constructor)))) { |
| 667 | // The constructor is dead, no new objects with this map can |
| 668 | // be created. Do not retain this map. |
| 669 | return false; |
| 670 | } |
| 671 | return true; |
| 672 | } |
| 673 | |
| 674 | |
| 675 | void IncrementalMarking::RetainMaps() { |
| 676 | // Do not retain dead maps if flag disables it or there is |
| 677 | // - memory pressure (reduce_memory_footprint_), |
| 678 | // - GC is requested by tests or dev-tools (abort_incremental_marking_). |
| 679 | bool map_retaining_is_disabled = heap()->ShouldReduceMemory() || |
| 680 | heap()->ShouldAbortIncrementalMarking() || |
| 681 | FLAG_retain_maps_for_n_gc == 0; |
| 682 | ArrayList* retained_maps = heap()->retained_maps(); |
| 683 | int length = retained_maps->Length(); |
| 684 | // The number_of_disposed_maps separates maps in the retained_maps |
| 685 | // array that were created before and after context disposal. |
| 686 | // We do not age and retain disposed maps to avoid memory leaks. |
| 687 | int number_of_disposed_maps = heap()->number_of_disposed_maps_; |
| 688 | for (int i = 0; i < length; i += 2) { |
| 689 | DCHECK(retained_maps->Get(i)->IsWeakCell()); |
| 690 | WeakCell* cell = WeakCell::cast(retained_maps->Get(i)); |
| 691 | if (cell->cleared()) continue; |
| 692 | int age = Smi::cast(retained_maps->Get(i + 1))->value(); |
| 693 | int new_age; |
| 694 | Map* map = Map::cast(cell->value()); |
| 695 | MarkBit map_mark = Marking::MarkBitFrom(map); |
| 696 | if (i >= number_of_disposed_maps && !map_retaining_is_disabled && |
| 697 | Marking::IsWhite(map_mark)) { |
| 698 | if (ShouldRetainMap(map, age)) { |
| 699 | MarkObject(heap(), map); |
| 700 | } |
| 701 | Object* prototype = map->prototype(); |
| 702 | if (age > 0 && prototype->IsHeapObject() && |
| 703 | Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(prototype)))) { |
| 704 | // The prototype is not marked, age the map. |
| 705 | new_age = age - 1; |
| 706 | } else { |
| 707 | // The prototype and the constructor are marked, this map keeps only |
| 708 | // transition tree alive, not JSObjects. Do not age the map. |
| 709 | new_age = age; |
| 710 | } |
| 711 | } else { |
| 712 | new_age = FLAG_retain_maps_for_n_gc; |
| 713 | } |
| 714 | // Compact the array and update the age. |
| 715 | if (new_age != age) { |
| 716 | retained_maps->Set(i + 1, Smi::FromInt(new_age)); |
| 717 | } |
| 718 | } |
| 719 | } |
| 720 | |
| 721 | |
| 722 | void IncrementalMarking::FinalizeIncrementally() { |
| 723 | DCHECK(!finalize_marking_completed_); |
| 724 | DCHECK(IsMarking()); |
| 725 | |
| 726 | double start = heap_->MonotonicallyIncreasingTimeInMs(); |
| 727 | |
| 728 | int old_marking_deque_top = |
| 729 | heap_->mark_compact_collector()->marking_deque()->top(); |
| 730 | |
| 731 | // After finishing incremental marking, we try to discover all unmarked |
| 732 | // objects to reduce the marking load in the final pause. |
| 733 | // 1) We scan and mark the roots again to find all changes to the root set. |
| 734 | // 2) We mark the object groups. |
| 735 | // 3) Age and retain maps embedded in optimized code. |
| 736 | // 4) Remove weak cell with live values from the list of weak cells, they |
| 737 | // do not need processing during GC. |
| 738 | MarkRoots(); |
| 739 | MarkObjectGroups(); |
| 740 | if (incremental_marking_finalization_rounds_ == 0) { |
| 741 | // Map retaining is needed for perfromance, not correctness, |
| 742 | // so we can do it only once at the beginning of the finalization. |
| 743 | RetainMaps(); |
| 744 | } |
| 745 | ProcessWeakCells(); |
| 746 | |
| 747 | int marking_progress = |
| 748 | abs(old_marking_deque_top - |
| 749 | heap_->mark_compact_collector()->marking_deque()->top()); |
| 750 | |
| 751 | double end = heap_->MonotonicallyIncreasingTimeInMs(); |
| 752 | double delta = end - start; |
| 753 | heap_->tracer()->AddMarkingTime(delta); |
| 754 | heap_->tracer()->AddIncrementalMarkingFinalizationStep(delta); |
| 755 | if (FLAG_trace_incremental_marking) { |
| 756 | PrintF( |
| 757 | "[IncrementalMarking] Finalize incrementally round %d, " |
| 758 | "spent %d ms, marking progress %d.\n", |
| 759 | static_cast<int>(delta), incremental_marking_finalization_rounds_, |
| 760 | marking_progress); |
| 761 | } |
| 762 | |
| 763 | ++incremental_marking_finalization_rounds_; |
| 764 | if ((incremental_marking_finalization_rounds_ >= |
| 765 | FLAG_max_incremental_marking_finalization_rounds) || |
| 766 | (marking_progress < |
| 767 | FLAG_min_progress_during_incremental_marking_finalization)) { |
| 768 | finalize_marking_completed_ = true; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 769 | } |
| 770 | } |
| 771 | |
| 772 | |
| 773 | void IncrementalMarking::UpdateMarkingDequeAfterScavenge() { |
| 774 | if (!IsMarking()) return; |
| 775 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 776 | MarkingDeque* marking_deque = |
| 777 | heap_->mark_compact_collector()->marking_deque(); |
| 778 | int current = marking_deque->bottom(); |
| 779 | int mask = marking_deque->mask(); |
| 780 | int limit = marking_deque->top(); |
| 781 | HeapObject** array = marking_deque->array(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 782 | int new_top = current; |
| 783 | |
| 784 | Map* filler_map = heap_->one_pointer_filler_map(); |
| 785 | |
| 786 | while (current != limit) { |
| 787 | HeapObject* obj = array[current]; |
| 788 | DCHECK(obj->IsHeapObject()); |
| 789 | current = ((current + 1) & mask); |
| 790 | if (heap_->InNewSpace(obj)) { |
| 791 | MapWord map_word = obj->map_word(); |
| 792 | if (map_word.IsForwardingAddress()) { |
| 793 | HeapObject* dest = map_word.ToForwardingAddress(); |
| 794 | array[new_top] = dest; |
| 795 | new_top = ((new_top + 1) & mask); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 796 | DCHECK(new_top != marking_deque->bottom()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 797 | #ifdef DEBUG |
| 798 | MarkBit mark_bit = Marking::MarkBitFrom(obj); |
| 799 | DCHECK(Marking::IsGrey(mark_bit) || |
| 800 | (obj->IsFiller() && Marking::IsWhite(mark_bit))); |
| 801 | #endif |
| 802 | } |
| 803 | } else if (obj->map() != filler_map) { |
| 804 | // Skip one word filler objects that appear on the |
| 805 | // stack when we perform in place array shift. |
| 806 | array[new_top] = obj; |
| 807 | new_top = ((new_top + 1) & mask); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 808 | DCHECK(new_top != marking_deque->bottom()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 809 | #ifdef DEBUG |
| 810 | MarkBit mark_bit = Marking::MarkBitFrom(obj); |
| 811 | MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); |
| 812 | DCHECK(Marking::IsGrey(mark_bit) || |
| 813 | (obj->IsFiller() && Marking::IsWhite(mark_bit)) || |
| 814 | (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) && |
| 815 | Marking::IsBlack(mark_bit))); |
| 816 | #endif |
| 817 | } |
| 818 | } |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 819 | marking_deque->set_top(new_top); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 820 | } |
| 821 | |
| 822 | |
| 823 | void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 824 | MarkObject(heap_, map); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 825 | |
| 826 | IncrementalMarkingMarkingVisitor::IterateBody(map, obj); |
| 827 | |
| 828 | MarkBit mark_bit = Marking::MarkBitFrom(obj); |
| 829 | #if ENABLE_SLOW_DCHECKS |
| 830 | MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); |
| 831 | SLOW_DCHECK(Marking::IsGrey(mark_bit) || |
| 832 | (obj->IsFiller() && Marking::IsWhite(mark_bit)) || |
| 833 | (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) && |
| 834 | Marking::IsBlack(mark_bit))); |
| 835 | #endif |
| 836 | MarkBlackOrKeepBlack(obj, mark_bit, size); |
| 837 | } |
| 838 | |
| 839 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 840 | void IncrementalMarking::MarkObject(Heap* heap, HeapObject* obj) { |
| 841 | MarkBit mark_bit = Marking::MarkBitFrom(obj); |
| 842 | if (Marking::IsWhite(mark_bit)) { |
| 843 | heap->incremental_marking()->WhiteToGreyAndPush(obj, mark_bit); |
| 844 | } |
| 845 | } |
| 846 | |
| 847 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 848 | intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) { |
| 849 | intptr_t bytes_processed = 0; |
| 850 | Map* filler_map = heap_->one_pointer_filler_map(); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 851 | MarkingDeque* marking_deque = |
| 852 | heap_->mark_compact_collector()->marking_deque(); |
| 853 | while (!marking_deque->IsEmpty() && bytes_processed < bytes_to_process) { |
| 854 | HeapObject* obj = marking_deque->Pop(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 855 | |
| 856 | // Explicitly skip one word fillers. Incremental markbit patterns are |
| 857 | // correct only for objects that occupy at least two words. |
| 858 | Map* map = obj->map(); |
| 859 | if (map == filler_map) continue; |
| 860 | |
| 861 | int size = obj->SizeFromMap(map); |
| 862 | unscanned_bytes_of_large_object_ = 0; |
| 863 | VisitObject(map, obj, size); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 864 | bytes_processed += size - unscanned_bytes_of_large_object_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 865 | } |
| 866 | return bytes_processed; |
| 867 | } |
| 868 | |
| 869 | |
| 870 | void IncrementalMarking::ProcessMarkingDeque() { |
| 871 | Map* filler_map = heap_->one_pointer_filler_map(); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 872 | MarkingDeque* marking_deque = |
| 873 | heap_->mark_compact_collector()->marking_deque(); |
| 874 | while (!marking_deque->IsEmpty()) { |
| 875 | HeapObject* obj = marking_deque->Pop(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 876 | |
| 877 | // Explicitly skip one word fillers. Incremental markbit patterns are |
| 878 | // correct only for objects that occupy at least two words. |
| 879 | Map* map = obj->map(); |
| 880 | if (map == filler_map) continue; |
| 881 | |
| 882 | VisitObject(map, obj, obj->SizeFromMap(map)); |
| 883 | } |
| 884 | } |
| 885 | |
| 886 | |
| 887 | void IncrementalMarking::Hurry() { |
| 888 | if (state() == MARKING) { |
| 889 | double start = 0.0; |
| 890 | if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 891 | start = heap_->MonotonicallyIncreasingTimeInMs(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 892 | if (FLAG_trace_incremental_marking) { |
| 893 | PrintF("[IncrementalMarking] Hurry\n"); |
| 894 | } |
| 895 | } |
| 896 | // TODO(gc) hurry can mark objects it encounters black as mutator |
| 897 | // was stopped. |
| 898 | ProcessMarkingDeque(); |
| 899 | state_ = COMPLETE; |
| 900 | if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 901 | double end = heap_->MonotonicallyIncreasingTimeInMs(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 902 | double delta = end - start; |
| 903 | heap_->tracer()->AddMarkingTime(delta); |
| 904 | if (FLAG_trace_incremental_marking) { |
| 905 | PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n", |
| 906 | static_cast<int>(delta)); |
| 907 | } |
| 908 | } |
| 909 | } |
| 910 | |
| 911 | if (FLAG_cleanup_code_caches_at_gc) { |
| 912 | PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache(); |
| 913 | Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache)); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 914 | MemoryChunk::IncrementLiveBytesFromGC(poly_cache, |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 915 | PolymorphicCodeCache::kSize); |
| 916 | } |
| 917 | |
| 918 | Object* context = heap_->native_contexts_list(); |
| 919 | while (!context->IsUndefined()) { |
| 920 | // GC can happen when the context is not fully initialized, |
| 921 | // so the cache can be undefined. |
| 922 | HeapObject* cache = HeapObject::cast( |
| 923 | Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX)); |
| 924 | if (!cache->IsUndefined()) { |
| 925 | MarkBit mark_bit = Marking::MarkBitFrom(cache); |
| 926 | if (Marking::IsGrey(mark_bit)) { |
| 927 | Marking::GreyToBlack(mark_bit); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 928 | MemoryChunk::IncrementLiveBytesFromGC(cache, cache->Size()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 929 | } |
| 930 | } |
| 931 | context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); |
| 932 | } |
| 933 | } |
| 934 | |
| 935 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 936 | void IncrementalMarking::Stop() { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 937 | if (IsStopped()) return; |
| 938 | if (FLAG_trace_incremental_marking) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 939 | PrintF("[IncrementalMarking] Stopping.\n"); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 940 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 941 | |
| 942 | heap_->new_space()->RemoveInlineAllocationObserver(&observer_); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 943 | IncrementalMarking::set_should_hurry(false); |
| 944 | ResetStepCounters(); |
| 945 | if (IsMarking()) { |
| 946 | PatchIncrementalMarkingRecordWriteStubs(heap_, |
| 947 | RecordWriteStub::STORE_BUFFER_ONLY); |
| 948 | DeactivateIncrementalWriteBarrier(); |
| 949 | |
| 950 | if (is_compacting_) { |
| 951 | LargeObjectIterator it(heap_->lo_space()); |
| 952 | for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 953 | Page* p = Page::FromAddress(obj->address()); |
| 954 | if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { |
| 955 | p->ClearFlag(Page::RESCAN_ON_EVACUATION); |
| 956 | } |
| 957 | } |
| 958 | } |
| 959 | } |
| 960 | heap_->isolate()->stack_guard()->ClearGC(); |
| 961 | state_ = STOPPED; |
| 962 | is_compacting_ = false; |
| 963 | } |
| 964 | |
| 965 | |
| 966 | void IncrementalMarking::Finalize() { |
| 967 | Hurry(); |
| 968 | state_ = STOPPED; |
| 969 | is_compacting_ = false; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 970 | |
| 971 | heap_->new_space()->RemoveInlineAllocationObserver(&observer_); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 972 | IncrementalMarking::set_should_hurry(false); |
| 973 | ResetStepCounters(); |
| 974 | PatchIncrementalMarkingRecordWriteStubs(heap_, |
| 975 | RecordWriteStub::STORE_BUFFER_ONLY); |
| 976 | DeactivateIncrementalWriteBarrier(); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 977 | DCHECK(heap_->mark_compact_collector()->marking_deque()->IsEmpty()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 978 | heap_->isolate()->stack_guard()->ClearGC(); |
| 979 | } |
| 980 | |
| 981 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 982 | void IncrementalMarking::FinalizeMarking(CompletionAction action) { |
| 983 | DCHECK(!finalize_marking_completed_); |
| 984 | if (FLAG_trace_incremental_marking) { |
| 985 | PrintF( |
| 986 | "[IncrementalMarking] requesting finalization of incremental " |
| 987 | "marking.\n"); |
| 988 | } |
| 989 | request_type_ = FINALIZATION; |
| 990 | if (action == GC_VIA_STACK_GUARD) { |
| 991 | heap_->isolate()->stack_guard()->RequestGC(); |
| 992 | } |
| 993 | } |
| 994 | |
| 995 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 996 | void IncrementalMarking::MarkingComplete(CompletionAction action) { |
| 997 | state_ = COMPLETE; |
| 998 | // We will set the stack guard to request a GC now. This will mean the rest |
| 999 | // of the GC gets performed as soon as possible (we can't do a GC here in a |
| 1000 | // record-write context). If a few things get allocated between now and then |
| 1001 | // that shouldn't make us do a scavenge and keep being incremental, so we set |
| 1002 | // the should-hurry flag to indicate that there can't be much work left to do. |
| 1003 | set_should_hurry(true); |
| 1004 | if (FLAG_trace_incremental_marking) { |
| 1005 | PrintF("[IncrementalMarking] Complete (normal).\n"); |
| 1006 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1007 | request_type_ = COMPLETE_MARKING; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1008 | if (action == GC_VIA_STACK_GUARD) { |
| 1009 | heap_->isolate()->stack_guard()->RequestGC(); |
| 1010 | } |
| 1011 | } |
| 1012 | |
| 1013 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1014 | void IncrementalMarking::Epilogue() { |
| 1015 | was_activated_ = false; |
| 1016 | finalize_marking_completed_ = false; |
| 1017 | incremental_marking_finalization_rounds_ = 0; |
| 1018 | } |
| 1019 | |
| 1020 | |
| 1021 | double IncrementalMarking::AdvanceIncrementalMarking( |
| 1022 | intptr_t step_size_in_bytes, double deadline_in_ms, |
| 1023 | IncrementalMarking::StepActions step_actions) { |
| 1024 | DCHECK(!IsStopped()); |
| 1025 | |
| 1026 | if (step_size_in_bytes == 0) { |
| 1027 | step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize( |
| 1028 | static_cast<size_t>(GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs), |
| 1029 | static_cast<size_t>( |
| 1030 | heap() |
| 1031 | ->tracer() |
| 1032 | ->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond())); |
| 1033 | } |
| 1034 | |
| 1035 | double remaining_time_in_ms = 0.0; |
| 1036 | do { |
| 1037 | Step(step_size_in_bytes, step_actions.completion_action, |
| 1038 | step_actions.force_marking, step_actions.force_completion); |
| 1039 | remaining_time_in_ms = |
| 1040 | deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs(); |
| 1041 | } while (remaining_time_in_ms >= |
| 1042 | 2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs && |
| 1043 | !IsComplete() && |
| 1044 | !heap()->mark_compact_collector()->marking_deque()->IsEmpty()); |
| 1045 | return remaining_time_in_ms; |
| 1046 | } |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1047 | |
| 1048 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1049 | void IncrementalMarking::OldSpaceStep(intptr_t allocated) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1050 | if (IsStopped() && ShouldActivateEvenWithoutIdleNotification()) { |
| 1051 | heap()->StartIncrementalMarking(Heap::kNoGCFlags, kNoGCCallbackFlags, |
| 1052 | "old space step"); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1053 | } else { |
| 1054 | Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD); |
| 1055 | } |
| 1056 | } |
| 1057 | |
| 1058 | |
| 1059 | void IncrementalMarking::SpeedUp() { |
| 1060 | bool speed_up = false; |
| 1061 | |
| 1062 | if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1063 | if (FLAG_trace_incremental_marking) { |
| 1064 | PrintIsolate(heap()->isolate(), "Speed up marking after %d steps\n", |
| 1065 | static_cast<int>(kMarkingSpeedAccellerationInterval)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1066 | } |
| 1067 | speed_up = true; |
| 1068 | } |
| 1069 | |
| 1070 | bool space_left_is_very_small = |
| 1071 | (old_generation_space_available_at_start_of_incremental_ < 10 * MB); |
| 1072 | |
| 1073 | bool only_1_nth_of_space_that_was_available_still_left = |
| 1074 | (SpaceLeftInOldSpace() * (marking_speed_ + 1) < |
| 1075 | old_generation_space_available_at_start_of_incremental_); |
| 1076 | |
| 1077 | if (space_left_is_very_small || |
| 1078 | only_1_nth_of_space_that_was_available_still_left) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1079 | if (FLAG_trace_incremental_marking) |
| 1080 | PrintIsolate(heap()->isolate(), |
| 1081 | "Speed up marking because of low space left\n"); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1082 | speed_up = true; |
| 1083 | } |
| 1084 | |
| 1085 | bool size_of_old_space_multiplied_by_n_during_marking = |
| 1086 | (heap_->PromotedTotalSize() > |
| 1087 | (marking_speed_ + 1) * |
| 1088 | old_generation_space_used_at_start_of_incremental_); |
| 1089 | if (size_of_old_space_multiplied_by_n_during_marking) { |
| 1090 | speed_up = true; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1091 | if (FLAG_trace_incremental_marking) { |
| 1092 | PrintIsolate(heap()->isolate(), |
| 1093 | "Speed up marking because of heap size increase\n"); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1094 | } |
| 1095 | } |
| 1096 | |
| 1097 | int64_t promoted_during_marking = |
| 1098 | heap_->PromotedTotalSize() - |
| 1099 | old_generation_space_used_at_start_of_incremental_; |
| 1100 | intptr_t delay = marking_speed_ * MB; |
| 1101 | intptr_t scavenge_slack = heap_->MaxSemiSpaceSize(); |
| 1102 | |
| 1103 | // We try to scan at at least twice the speed that we are allocating. |
| 1104 | if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1105 | if (FLAG_trace_incremental_marking) { |
| 1106 | PrintIsolate(heap()->isolate(), |
| 1107 | "Speed up marking because marker was not keeping up\n"); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1108 | } |
| 1109 | speed_up = true; |
| 1110 | } |
| 1111 | |
| 1112 | if (speed_up) { |
| 1113 | if (state_ != MARKING) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1114 | if (FLAG_trace_incremental_marking) { |
| 1115 | PrintIsolate(heap()->isolate(), |
| 1116 | "Postponing speeding up marking until marking starts\n"); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1117 | } |
| 1118 | } else { |
| 1119 | marking_speed_ += kMarkingSpeedAccelleration; |
| 1120 | marking_speed_ = static_cast<int>( |
| 1121 | Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3))); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1122 | if (FLAG_trace_incremental_marking) { |
| 1123 | PrintIsolate(heap()->isolate(), "Marking speed increased to %d\n", |
| 1124 | marking_speed_); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1125 | } |
| 1126 | } |
| 1127 | } |
| 1128 | } |
| 1129 | |
| 1130 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1131 | intptr_t IncrementalMarking::Step(intptr_t allocated_bytes, |
| 1132 | CompletionAction action, |
| 1133 | ForceMarkingAction marking, |
| 1134 | ForceCompletionAction completion) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1135 | DCHECK(allocated_bytes >= 0); |
| 1136 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1137 | if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking || |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1138 | (state_ != SWEEPING && state_ != MARKING)) { |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1139 | return 0; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1140 | } |
| 1141 | |
| 1142 | allocated_ += allocated_bytes; |
| 1143 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1144 | if (marking == DO_NOT_FORCE_MARKING && allocated_ < kAllocatedThreshold && |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1145 | write_barriers_invoked_since_last_step_ < |
| 1146 | kWriteBarriersInvokedThreshold) { |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1147 | return 0; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1148 | } |
| 1149 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1150 | // If an idle notification happened recently, we delay marking steps. |
| 1151 | if (marking == DO_NOT_FORCE_MARKING && |
| 1152 | heap_->RecentIdleNotificationHappened()) { |
| 1153 | return 0; |
| 1154 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1155 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1156 | if (state_ == MARKING && no_marking_scope_depth_ > 0) return 0; |
| 1157 | |
| 1158 | intptr_t bytes_processed = 0; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1159 | { |
| 1160 | HistogramTimerScope incremental_marking_scope( |
| 1161 | heap_->isolate()->counters()->gc_incremental_marking()); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1162 | double start = heap_->MonotonicallyIncreasingTimeInMs(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1163 | |
| 1164 | // The marking speed is driven either by the allocation rate or by the rate |
| 1165 | // at which we are having to check the color of objects in the write |
| 1166 | // barrier. |
| 1167 | // It is possible for a tight non-allocating loop to run a lot of write |
| 1168 | // barriers before we get here and check them (marking can only take place |
| 1169 | // on |
| 1170 | // allocation), so to reduce the lumpiness we don't use the write barriers |
| 1171 | // invoked since last step directly to determine the amount of work to do. |
| 1172 | intptr_t bytes_to_process = |
| 1173 | marking_speed_ * |
| 1174 | Max(allocated_, write_barriers_invoked_since_last_step_); |
| 1175 | allocated_ = 0; |
| 1176 | write_barriers_invoked_since_last_step_ = 0; |
| 1177 | |
| 1178 | bytes_scanned_ += bytes_to_process; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1179 | |
| 1180 | if (state_ == SWEEPING) { |
| 1181 | if (heap_->mark_compact_collector()->sweeping_in_progress() && |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1182 | (heap_->mark_compact_collector()->IsSweepingCompleted() || |
| 1183 | !FLAG_concurrent_sweeping)) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1184 | heap_->mark_compact_collector()->EnsureSweepingCompleted(); |
| 1185 | } |
| 1186 | if (!heap_->mark_compact_collector()->sweeping_in_progress()) { |
| 1187 | bytes_scanned_ = 0; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1188 | StartMarking(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1189 | } |
| 1190 | } else if (state_ == MARKING) { |
| 1191 | bytes_processed = ProcessMarkingDeque(bytes_to_process); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1192 | if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) { |
| 1193 | if (completion == FORCE_COMPLETION || |
| 1194 | IsIdleMarkingDelayCounterLimitReached()) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1195 | if (!finalize_marking_completed_) { |
| 1196 | FinalizeMarking(action); |
| 1197 | } else { |
| 1198 | MarkingComplete(action); |
| 1199 | } |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1200 | } else { |
| 1201 | IncrementIdleMarkingDelayCounter(); |
| 1202 | } |
| 1203 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1204 | } |
| 1205 | |
| 1206 | steps_count_++; |
| 1207 | |
| 1208 | // Speed up marking if we are marking too slow or if we are almost done |
| 1209 | // with marking. |
| 1210 | SpeedUp(); |
| 1211 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1212 | double end = heap_->MonotonicallyIncreasingTimeInMs(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1213 | double duration = (end - start); |
| 1214 | // Note that we report zero bytes here when sweeping was in progress or |
| 1215 | // when we just started incremental marking. In these cases we did not |
| 1216 | // process the marking deque. |
| 1217 | heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed); |
| 1218 | } |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1219 | return bytes_processed; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1220 | } |
| 1221 | |
| 1222 | |
| 1223 | void IncrementalMarking::ResetStepCounters() { |
| 1224 | steps_count_ = 0; |
| 1225 | old_generation_space_available_at_start_of_incremental_ = |
| 1226 | SpaceLeftInOldSpace(); |
| 1227 | old_generation_space_used_at_start_of_incremental_ = |
| 1228 | heap_->PromotedTotalSize(); |
| 1229 | bytes_rescanned_ = 0; |
| 1230 | marking_speed_ = kInitialMarkingSpeed; |
| 1231 | bytes_scanned_ = 0; |
| 1232 | write_barriers_invoked_since_last_step_ = 0; |
| 1233 | } |
| 1234 | |
| 1235 | |
| 1236 | int64_t IncrementalMarking::SpaceLeftInOldSpace() { |
| 1237 | return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects(); |
| 1238 | } |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1239 | |
| 1240 | |
| 1241 | bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() { |
| 1242 | return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter; |
| 1243 | } |
| 1244 | |
| 1245 | |
| 1246 | void IncrementalMarking::IncrementIdleMarkingDelayCounter() { |
| 1247 | idle_marking_delay_counter_++; |
| 1248 | } |
| 1249 | |
| 1250 | |
| 1251 | void IncrementalMarking::ClearIdleMarkingDelayCounter() { |
| 1252 | idle_marking_delay_counter_ = 0; |
| 1253 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1254 | } // namespace internal |
| 1255 | } // namespace v8 |