Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1 | // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 5 | #include "src/heap/incremental-marking.h" |
| 6 | |
| 7 | #include "src/code-stubs.h" |
| 8 | #include "src/compilation-cache.h" |
| 9 | #include "src/conversions.h" |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 10 | #include "src/heap/gc-idle-time-handler.h" |
| 11 | #include "src/heap/gc-tracer.h" |
| 12 | #include "src/heap/mark-compact-inl.h" |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 13 | #include "src/heap/objects-visiting.h" |
| 14 | #include "src/heap/objects-visiting-inl.h" |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 15 | #include "src/tracing/trace-event.h" |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 16 | #include "src/v8.h" |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 17 | |
| 18 | namespace v8 { |
| 19 | namespace internal { |
| 20 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 21 | IncrementalMarking::StepActions IncrementalMarking::IdleStepActions() { |
| 22 | return StepActions(IncrementalMarking::NO_GC_VIA_STACK_GUARD, |
| 23 | IncrementalMarking::FORCE_MARKING, |
| 24 | IncrementalMarking::DO_NOT_FORCE_COMPLETION); |
| 25 | } |
| 26 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 27 | IncrementalMarking::IncrementalMarking(Heap* heap) |
| 28 | : heap_(heap), |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 29 | observer_(*this, kAllocatedThreshold), |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 30 | state_(STOPPED), |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 31 | is_compacting_(false), |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 32 | steps_count_(0), |
| 33 | old_generation_space_available_at_start_of_incremental_(0), |
| 34 | old_generation_space_used_at_start_of_incremental_(0), |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 35 | bytes_rescanned_(0), |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 36 | should_hurry_(false), |
| 37 | marking_speed_(0), |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 38 | bytes_scanned_(0), |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 39 | allocated_(0), |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 40 | write_barriers_invoked_since_last_step_(0), |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 41 | idle_marking_delay_counter_(0), |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 42 | unscanned_bytes_of_large_object_(0), |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 43 | was_activated_(false), |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 44 | black_allocation_(false), |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 45 | finalize_marking_completed_(false), |
| 46 | incremental_marking_finalization_rounds_(0), |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 47 | request_type_(NONE) {} |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 48 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 49 | bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) { |
| 50 | HeapObject* value_heap_obj = HeapObject::cast(value); |
| 51 | MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj); |
| 52 | DCHECK(!Marking::IsImpossible(value_bit)); |
| 53 | |
| 54 | MarkBit obj_bit = Marking::MarkBitFrom(obj); |
| 55 | DCHECK(!Marking::IsImpossible(obj_bit)); |
| 56 | bool is_black = Marking::IsBlack(obj_bit); |
| 57 | |
| 58 | if (is_black && Marking::IsWhite(value_bit)) { |
| 59 | WhiteToGreyAndPush(value_heap_obj, value_bit); |
| 60 | RestartIfNotMarking(); |
| 61 | } |
| 62 | return is_compacting_ && is_black; |
| 63 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 64 | |
| 65 | |
| 66 | void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot, |
| 67 | Object* value) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 68 | if (BaseRecordWrite(obj, value) && slot != NULL) { |
| 69 | // Object is not going to be rescanned we need to record the slot. |
| 70 | heap_->mark_compact_collector()->RecordSlot(obj, slot, value); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 71 | } |
| 72 | } |
| 73 | |
| 74 | |
| 75 | void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot, |
| 76 | Isolate* isolate) { |
| 77 | DCHECK(obj->IsHeapObject()); |
| 78 | IncrementalMarking* marking = isolate->heap()->incremental_marking(); |
| 79 | |
| 80 | MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); |
| 81 | int counter = chunk->write_barrier_counter(); |
| 82 | if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) { |
| 83 | marking->write_barriers_invoked_since_last_step_ += |
| 84 | MemoryChunk::kWriteBarrierCounterGranularity - |
| 85 | chunk->write_barrier_counter(); |
| 86 | chunk->set_write_barrier_counter( |
| 87 | MemoryChunk::kWriteBarrierCounterGranularity); |
| 88 | } |
| 89 | |
| 90 | marking->RecordWrite(obj, slot, *slot); |
| 91 | } |
| 92 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 93 | // static |
| 94 | void IncrementalMarking::RecordWriteOfCodeEntryFromCode(JSFunction* host, |
| 95 | Object** slot, |
| 96 | Isolate* isolate) { |
| 97 | DCHECK(host->IsJSFunction()); |
| 98 | IncrementalMarking* marking = isolate->heap()->incremental_marking(); |
| 99 | Code* value = Code::cast( |
| 100 | Code::GetObjectFromEntryAddress(reinterpret_cast<Address>(slot))); |
| 101 | marking->RecordWriteOfCodeEntry(host, slot, value); |
| 102 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 103 | |
| 104 | void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc, |
| 105 | HeapObject* value) { |
| 106 | if (IsMarking()) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 107 | RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 108 | RecordWriteIntoCode(host, &rinfo, value); |
| 109 | } |
| 110 | } |
| 111 | |
| 112 | |
| 113 | void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) { |
| 114 | if (IsMarking()) { |
| 115 | Code* host = heap_->isolate() |
| 116 | ->inner_pointer_to_code_cache() |
| 117 | ->GcSafeFindCodeForInnerPointer(pc); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 118 | RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 119 | RecordWriteIntoCode(host, &rinfo, value); |
| 120 | } |
| 121 | } |
| 122 | |
| 123 | |
| 124 | void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host, |
| 125 | Object** slot, |
| 126 | Code* value) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 127 | if (BaseRecordWrite(host, value)) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 128 | DCHECK(slot != NULL); |
| 129 | heap_->mark_compact_collector()->RecordCodeEntrySlot( |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 130 | host, reinterpret_cast<Address>(slot), value); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 131 | } |
| 132 | } |
| 133 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 134 | void IncrementalMarking::RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 135 | Object* value) { |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 136 | if (BaseRecordWrite(host, value)) { |
| 137 | // Object is not going to be rescanned. We need to record the slot. |
| 138 | heap_->mark_compact_collector()->RecordRelocSlot(host, rinfo, value); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 139 | } |
| 140 | } |
| 141 | |
| 142 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 143 | void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) { |
| 144 | Marking::WhiteToGrey(mark_bit); |
| 145 | heap_->mark_compact_collector()->marking_deque()->Push(obj); |
| 146 | } |
| 147 | |
| 148 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 149 | static void MarkObjectGreyDoNotEnqueue(Object* obj) { |
| 150 | if (obj->IsHeapObject()) { |
| 151 | HeapObject* heap_obj = HeapObject::cast(obj); |
| 152 | MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj)); |
| 153 | if (Marking::IsBlack(mark_bit)) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 154 | MemoryChunk::IncrementLiveBytesFromGC(heap_obj, -heap_obj->Size()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 155 | } |
| 156 | Marking::AnyToGrey(mark_bit); |
| 157 | } |
| 158 | } |
| 159 | |
| 160 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 161 | static inline void MarkBlackOrKeepBlack(HeapObject* heap_object, |
| 162 | MarkBit mark_bit, int size) { |
| 163 | DCHECK(!Marking::IsImpossible(mark_bit)); |
| 164 | if (Marking::IsBlack(mark_bit)) return; |
| 165 | Marking::MarkBlack(mark_bit); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 166 | MemoryChunk::IncrementLiveBytesFromGC(heap_object, size); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 167 | } |
| 168 | |
| 169 | |
| 170 | class IncrementalMarkingMarkingVisitor |
| 171 | : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> { |
| 172 | public: |
| 173 | static void Initialize() { |
| 174 | StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize(); |
| 175 | table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental); |
| 176 | table_.Register(kVisitNativeContext, &VisitNativeContextIncremental); |
| 177 | table_.Register(kVisitJSRegExp, &VisitJSRegExp); |
| 178 | } |
| 179 | |
| 180 | static const int kProgressBarScanningChunk = 32 * 1024; |
| 181 | |
| 182 | static void VisitFixedArrayIncremental(Map* map, HeapObject* object) { |
| 183 | MemoryChunk* chunk = MemoryChunk::FromAddress(object->address()); |
| 184 | // TODO(mstarzinger): Move setting of the flag to the allocation site of |
| 185 | // the array. The visitor should just check the flag. |
| 186 | if (FLAG_use_marking_progress_bar && |
| 187 | chunk->owner()->identity() == LO_SPACE) { |
| 188 | chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR); |
| 189 | } |
| 190 | if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) { |
| 191 | Heap* heap = map->GetHeap(); |
| 192 | // When using a progress bar for large fixed arrays, scan only a chunk of |
| 193 | // the array and try to push it onto the marking deque again until it is |
| 194 | // fully scanned. Fall back to scanning it through to the end in case this |
| 195 | // fails because of a full deque. |
| 196 | int object_size = FixedArray::BodyDescriptor::SizeOf(map, object); |
| 197 | int start_offset = |
| 198 | Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar()); |
| 199 | int end_offset = |
| 200 | Min(object_size, start_offset + kProgressBarScanningChunk); |
| 201 | int already_scanned_offset = start_offset; |
| 202 | bool scan_until_end = false; |
| 203 | do { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 204 | VisitPointers(heap, object, HeapObject::RawField(object, start_offset), |
| 205 | HeapObject::RawField(object, end_offset)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 206 | start_offset = end_offset; |
| 207 | end_offset = Min(object_size, end_offset + kProgressBarScanningChunk); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 208 | scan_until_end = |
| 209 | heap->mark_compact_collector()->marking_deque()->IsFull(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 210 | } while (scan_until_end && start_offset < object_size); |
| 211 | chunk->set_progress_bar(start_offset); |
| 212 | if (start_offset < object_size) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 213 | if (Marking::IsGrey(Marking::MarkBitFrom(object))) { |
| 214 | heap->mark_compact_collector()->marking_deque()->Unshift(object); |
| 215 | } else { |
| 216 | DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
| 217 | heap->mark_compact_collector()->UnshiftBlack(object); |
| 218 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 219 | heap->incremental_marking()->NotifyIncompleteScanOfObject( |
| 220 | object_size - (start_offset - already_scanned_offset)); |
| 221 | } |
| 222 | } else { |
| 223 | FixedArrayVisitor::Visit(map, object); |
| 224 | } |
| 225 | } |
| 226 | |
| 227 | static void VisitNativeContextIncremental(Map* map, HeapObject* object) { |
| 228 | Context* context = Context::cast(object); |
| 229 | |
| 230 | // We will mark cache black with a separate pass when we finish marking. |
| 231 | // Note that GC can happen when the context is not fully initialized, |
| 232 | // so the cache can be undefined. |
| 233 | Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX); |
| 234 | if (!cache->IsUndefined()) { |
| 235 | MarkObjectGreyDoNotEnqueue(cache); |
| 236 | } |
| 237 | VisitNativeContext(map, context); |
| 238 | } |
| 239 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 240 | INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) { |
| 241 | Object* target = *p; |
| 242 | if (target->IsHeapObject()) { |
| 243 | heap->mark_compact_collector()->RecordSlot(object, p, target); |
| 244 | MarkObject(heap, target); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 245 | } |
| 246 | } |
| 247 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 248 | INLINE(static void VisitPointers(Heap* heap, HeapObject* object, |
| 249 | Object** start, Object** end)) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 250 | for (Object** p = start; p < end; p++) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 251 | Object* target = *p; |
| 252 | if (target->IsHeapObject()) { |
| 253 | heap->mark_compact_collector()->RecordSlot(object, p, target); |
| 254 | MarkObject(heap, target); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 255 | } |
| 256 | } |
| 257 | } |
| 258 | |
| 259 | // Marks the object grey and pushes it on the marking stack. |
| 260 | INLINE(static void MarkObject(Heap* heap, Object* obj)) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 261 | IncrementalMarking::MarkObject(heap, HeapObject::cast(obj)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 262 | } |
| 263 | |
| 264 | // Marks the object black without pushing it on the marking stack. |
| 265 | // Returns true if object needed marking and false otherwise. |
| 266 | INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) { |
| 267 | HeapObject* heap_object = HeapObject::cast(obj); |
| 268 | MarkBit mark_bit = Marking::MarkBitFrom(heap_object); |
| 269 | if (Marking::IsWhite(mark_bit)) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 270 | Marking::MarkBlack(mark_bit); |
| 271 | MemoryChunk::IncrementLiveBytesFromGC(heap_object, heap_object->Size()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 272 | return true; |
| 273 | } |
| 274 | return false; |
| 275 | } |
| 276 | }; |
| 277 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 278 | void IncrementalMarking::IterateBlackObject(HeapObject* object) { |
| 279 | if (IsMarking() && Marking::IsBlack(Marking::MarkBitFrom(object))) { |
| 280 | Page* page = Page::FromAddress(object->address()); |
| 281 | if ((page->owner() != nullptr) && (page->owner()->identity() == LO_SPACE)) { |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 282 | // IterateBlackObject requires us to visit the whole object. |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 283 | page->ResetProgressBar(); |
| 284 | } |
| 285 | IncrementalMarkingMarkingVisitor::IterateBody(object->map(), object); |
| 286 | } |
| 287 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 288 | |
| 289 | class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor { |
| 290 | public: |
| 291 | explicit IncrementalMarkingRootMarkingVisitor( |
| 292 | IncrementalMarking* incremental_marking) |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 293 | : heap_(incremental_marking->heap()) {} |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 294 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 295 | void VisitPointer(Object** p) override { MarkObjectByPointer(p); } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 296 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 297 | void VisitPointers(Object** start, Object** end) override { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 298 | for (Object** p = start; p < end; p++) MarkObjectByPointer(p); |
| 299 | } |
| 300 | |
| 301 | private: |
| 302 | void MarkObjectByPointer(Object** p) { |
| 303 | Object* obj = *p; |
| 304 | if (!obj->IsHeapObject()) return; |
| 305 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 306 | IncrementalMarking::MarkObject(heap_, HeapObject::cast(obj)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 307 | } |
| 308 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 309 | Heap* heap_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 310 | }; |
| 311 | |
| 312 | |
| 313 | void IncrementalMarking::Initialize() { |
| 314 | IncrementalMarkingMarkingVisitor::Initialize(); |
| 315 | } |
| 316 | |
| 317 | |
| 318 | void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk, |
| 319 | bool is_marking, |
| 320 | bool is_compacting) { |
| 321 | if (is_marking) { |
| 322 | chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); |
| 323 | chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 324 | } else { |
| 325 | chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); |
| 326 | chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); |
| 327 | } |
| 328 | } |
| 329 | |
| 330 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 331 | void IncrementalMarking::SetNewSpacePageFlags(MemoryChunk* chunk, |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 332 | bool is_marking) { |
| 333 | chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); |
| 334 | if (is_marking) { |
| 335 | chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); |
| 336 | } else { |
| 337 | chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); |
| 338 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 339 | } |
| 340 | |
| 341 | |
| 342 | void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace( |
| 343 | PagedSpace* space) { |
| 344 | PageIterator it(space); |
| 345 | while (it.has_next()) { |
| 346 | Page* p = it.next(); |
| 347 | SetOldSpacePageFlags(p, false, false); |
| 348 | } |
| 349 | } |
| 350 | |
| 351 | |
| 352 | void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace( |
| 353 | NewSpace* space) { |
| 354 | NewSpacePageIterator it(space); |
| 355 | while (it.has_next()) { |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 356 | Page* p = it.next(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 357 | SetNewSpacePageFlags(p, false); |
| 358 | } |
| 359 | } |
| 360 | |
| 361 | |
| 362 | void IncrementalMarking::DeactivateIncrementalWriteBarrier() { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 363 | DeactivateIncrementalWriteBarrierForSpace(heap_->old_space()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 364 | DeactivateIncrementalWriteBarrierForSpace(heap_->map_space()); |
| 365 | DeactivateIncrementalWriteBarrierForSpace(heap_->code_space()); |
| 366 | DeactivateIncrementalWriteBarrierForSpace(heap_->new_space()); |
| 367 | |
| 368 | LargePage* lop = heap_->lo_space()->first_page(); |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 369 | while (LargePage::IsValid(lop)) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 370 | SetOldSpacePageFlags(lop, false, false); |
| 371 | lop = lop->next_page(); |
| 372 | } |
| 373 | } |
| 374 | |
| 375 | |
| 376 | void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) { |
| 377 | PageIterator it(space); |
| 378 | while (it.has_next()) { |
| 379 | Page* p = it.next(); |
| 380 | SetOldSpacePageFlags(p, true, is_compacting_); |
| 381 | } |
| 382 | } |
| 383 | |
| 384 | |
| 385 | void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) { |
| 386 | NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd()); |
| 387 | while (it.has_next()) { |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 388 | Page* p = it.next(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 389 | SetNewSpacePageFlags(p, true); |
| 390 | } |
| 391 | } |
| 392 | |
| 393 | |
| 394 | void IncrementalMarking::ActivateIncrementalWriteBarrier() { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 395 | ActivateIncrementalWriteBarrier(heap_->old_space()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 396 | ActivateIncrementalWriteBarrier(heap_->map_space()); |
| 397 | ActivateIncrementalWriteBarrier(heap_->code_space()); |
| 398 | ActivateIncrementalWriteBarrier(heap_->new_space()); |
| 399 | |
| 400 | LargePage* lop = heap_->lo_space()->first_page(); |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 401 | while (LargePage::IsValid(lop)) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 402 | SetOldSpacePageFlags(lop, true, is_compacting_); |
| 403 | lop = lop->next_page(); |
| 404 | } |
| 405 | } |
| 406 | |
| 407 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 408 | bool IncrementalMarking::ShouldActivateEvenWithoutIdleNotification() { |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 409 | #ifndef DEBUG |
| 410 | static const intptr_t kActivationThreshold = 8 * MB; |
| 411 | #else |
| 412 | // TODO(gc) consider setting this to some low level so that some |
| 413 | // debug tests run with incremental marking and some without. |
| 414 | static const intptr_t kActivationThreshold = 0; |
| 415 | #endif |
| 416 | // Don't switch on for very small heaps. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 417 | return CanBeActivated() && |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 418 | heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold && |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 419 | heap_->HeapIsFullEnoughToStartIncrementalMarking( |
| 420 | heap_->old_generation_allocation_limit()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 421 | } |
| 422 | |
| 423 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 424 | bool IncrementalMarking::WasActivated() { return was_activated_; } |
| 425 | |
| 426 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 427 | bool IncrementalMarking::CanBeActivated() { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 428 | // Only start incremental marking in a safe state: 1) when incremental |
| 429 | // marking is turned on, 2) when we are currently not in a GC, and |
| 430 | // 3) when we are currently not serializing or deserializing the heap. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 431 | return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC && |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 432 | heap_->deserialization_complete() && |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 433 | !heap_->isolate()->serializer_enabled(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 434 | } |
| 435 | |
| 436 | |
| 437 | void IncrementalMarking::ActivateGeneratedStub(Code* stub) { |
| 438 | DCHECK(RecordWriteStub::GetMode(stub) == RecordWriteStub::STORE_BUFFER_ONLY); |
| 439 | |
| 440 | if (!IsMarking()) { |
| 441 | // Initially stub is generated in STORE_BUFFER_ONLY mode thus |
| 442 | // we don't need to do anything if incremental marking is |
| 443 | // not active. |
| 444 | } else if (IsCompacting()) { |
| 445 | RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION); |
| 446 | } else { |
| 447 | RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL); |
| 448 | } |
| 449 | } |
| 450 | |
| 451 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 452 | void IncrementalMarking::NotifyOfHighPromotionRate() { |
| 453 | if (IsMarking()) { |
| 454 | if (marking_speed_ < kFastMarking) { |
| 455 | if (FLAG_trace_gc) { |
| 456 | PrintIsolate(heap()->isolate(), |
| 457 | "Increasing marking speed to %d " |
| 458 | "due to high promotion rate\n", |
| 459 | static_cast<int>(kFastMarking)); |
| 460 | } |
| 461 | marking_speed_ = kFastMarking; |
| 462 | } |
| 463 | } |
| 464 | } |
| 465 | |
| 466 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 467 | static void PatchIncrementalMarkingRecordWriteStubs( |
| 468 | Heap* heap, RecordWriteStub::Mode mode) { |
| 469 | UnseededNumberDictionary* stubs = heap->code_stubs(); |
| 470 | |
| 471 | int capacity = stubs->Capacity(); |
| 472 | for (int i = 0; i < capacity; i++) { |
| 473 | Object* k = stubs->KeyAt(i); |
| 474 | if (stubs->IsKey(k)) { |
| 475 | uint32_t key = NumberToUint32(k); |
| 476 | |
| 477 | if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) { |
| 478 | Object* e = stubs->ValueAt(i); |
| 479 | if (e->IsCode()) { |
| 480 | RecordWriteStub::Patch(Code::cast(e), mode); |
| 481 | } |
| 482 | } |
| 483 | } |
| 484 | } |
| 485 | } |
| 486 | |
| 487 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 488 | void IncrementalMarking::Start(const char* reason) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 489 | if (FLAG_trace_incremental_marking) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 490 | PrintF("[IncrementalMarking] Start (%s)\n", |
| 491 | (reason == nullptr) ? "unknown reason" : reason); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 492 | } |
| 493 | DCHECK(FLAG_incremental_marking); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 494 | DCHECK(state_ == STOPPED); |
| 495 | DCHECK(heap_->gc_state() == Heap::NOT_IN_GC); |
| 496 | DCHECK(!heap_->isolate()->serializer_enabled()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 497 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 498 | HistogramTimerScope incremental_marking_scope( |
| 499 | heap_->isolate()->counters()->gc_incremental_marking_start()); |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 500 | TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart"); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 501 | ResetStepCounters(); |
| 502 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 503 | was_activated_ = true; |
| 504 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 505 | if (!heap_->mark_compact_collector()->sweeping_in_progress()) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 506 | StartMarking(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 507 | } else { |
| 508 | if (FLAG_trace_incremental_marking) { |
| 509 | PrintF("[IncrementalMarking] Start sweeping.\n"); |
| 510 | } |
| 511 | state_ = SWEEPING; |
| 512 | } |
| 513 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 514 | heap_->new_space()->AddAllocationObserver(&observer_); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 515 | |
| 516 | incremental_marking_job()->Start(heap_); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 517 | } |
| 518 | |
| 519 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 520 | void IncrementalMarking::StartMarking() { |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 521 | if (heap_->isolate()->serializer_enabled()) { |
| 522 | // Black allocation currently starts when we start incremental marking, |
| 523 | // but we cannot enable black allocation while deserializing. Hence, we |
| 524 | // have to delay the start of incremental marking in that case. |
| 525 | if (FLAG_trace_incremental_marking) { |
| 526 | PrintF("[IncrementalMarking] Start delayed - serializer\n"); |
| 527 | } |
| 528 | return; |
| 529 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 530 | if (FLAG_trace_incremental_marking) { |
| 531 | PrintF("[IncrementalMarking] Start marking\n"); |
| 532 | } |
| 533 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 534 | is_compacting_ = !FLAG_never_compact && |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 535 | heap_->mark_compact_collector()->StartCompaction( |
| 536 | MarkCompactCollector::INCREMENTAL_COMPACTION); |
| 537 | |
| 538 | state_ = MARKING; |
| 539 | |
| 540 | RecordWriteStub::Mode mode = is_compacting_ |
| 541 | ? RecordWriteStub::INCREMENTAL_COMPACTION |
| 542 | : RecordWriteStub::INCREMENTAL; |
| 543 | |
| 544 | PatchIncrementalMarkingRecordWriteStubs(heap_, mode); |
| 545 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 546 | heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize( |
| 547 | MarkCompactCollector::kMaxMarkingDequeSize); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 548 | |
| 549 | ActivateIncrementalWriteBarrier(); |
| 550 | |
| 551 | // Marking bits are cleared by the sweeper. |
| 552 | #ifdef VERIFY_HEAP |
| 553 | if (FLAG_verify_heap) { |
| 554 | heap_->mark_compact_collector()->VerifyMarkbitsAreClean(); |
| 555 | } |
| 556 | #endif |
| 557 | |
| 558 | heap_->CompletelyClearInstanceofCache(); |
| 559 | heap_->isolate()->compilation_cache()->MarkCompactPrologue(); |
| 560 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 561 | // Mark strong roots grey. |
| 562 | IncrementalMarkingRootMarkingVisitor visitor(this); |
| 563 | heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG); |
| 564 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 565 | // Ready to start incremental marking. |
| 566 | if (FLAG_trace_incremental_marking) { |
| 567 | PrintF("[IncrementalMarking] Running\n"); |
| 568 | } |
| 569 | } |
| 570 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 571 | void IncrementalMarking::StartBlackAllocation() { |
| 572 | DCHECK(FLAG_black_allocation); |
| 573 | DCHECK(IsMarking()); |
| 574 | black_allocation_ = true; |
| 575 | OldSpace* old_space = heap()->old_space(); |
| 576 | old_space->EmptyAllocationInfo(); |
| 577 | old_space->free_list()->Reset(); |
| 578 | if (FLAG_trace_incremental_marking) { |
| 579 | PrintF("[IncrementalMarking] Black allocation started\n"); |
| 580 | } |
| 581 | } |
| 582 | |
| 583 | void IncrementalMarking::FinishBlackAllocation() { |
| 584 | if (black_allocation_) { |
| 585 | black_allocation_ = false; |
| 586 | if (FLAG_trace_incremental_marking) { |
| 587 | PrintF("[IncrementalMarking] Black allocation finished\n"); |
| 588 | } |
| 589 | } |
| 590 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 591 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 592 | void IncrementalMarking::MarkRoots() { |
| 593 | DCHECK(!finalize_marking_completed_); |
| 594 | DCHECK(IsMarking()); |
| 595 | |
| 596 | IncrementalMarkingRootMarkingVisitor visitor(this); |
| 597 | heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG); |
| 598 | } |
| 599 | |
| 600 | |
| 601 | void IncrementalMarking::MarkObjectGroups() { |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 602 | DCHECK(!heap_->UsingEmbedderHeapTracer()); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 603 | DCHECK(!finalize_marking_completed_); |
| 604 | DCHECK(IsMarking()); |
| 605 | |
| 606 | IncrementalMarkingRootMarkingVisitor visitor(this); |
| 607 | heap_->mark_compact_collector()->MarkImplicitRefGroups(&MarkObject); |
| 608 | heap_->isolate()->global_handles()->IterateObjectGroups( |
| 609 | &visitor, &MarkCompactCollector::IsUnmarkedHeapObjectWithHeap); |
| 610 | heap_->isolate()->global_handles()->RemoveImplicitRefGroups(); |
| 611 | heap_->isolate()->global_handles()->RemoveObjectGroups(); |
| 612 | } |
| 613 | |
| 614 | |
| 615 | void IncrementalMarking::ProcessWeakCells() { |
| 616 | DCHECK(!finalize_marking_completed_); |
| 617 | DCHECK(IsMarking()); |
| 618 | |
| 619 | Object* the_hole_value = heap()->the_hole_value(); |
| 620 | Object* weak_cell_obj = heap()->encountered_weak_cells(); |
| 621 | Object* weak_cell_head = Smi::FromInt(0); |
| 622 | WeakCell* prev_weak_cell_obj = NULL; |
| 623 | while (weak_cell_obj != Smi::FromInt(0)) { |
| 624 | WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj); |
| 625 | // We do not insert cleared weak cells into the list, so the value |
| 626 | // cannot be a Smi here. |
| 627 | HeapObject* value = HeapObject::cast(weak_cell->value()); |
| 628 | // Remove weak cells with live objects from the list, they do not need |
| 629 | // clearing. |
| 630 | if (MarkCompactCollector::IsMarked(value)) { |
| 631 | // Record slot, if value is pointing to an evacuation candidate. |
| 632 | Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset); |
| 633 | heap_->mark_compact_collector()->RecordSlot(weak_cell, slot, *slot); |
| 634 | // Remove entry somewhere after top. |
| 635 | if (prev_weak_cell_obj != NULL) { |
| 636 | prev_weak_cell_obj->set_next(weak_cell->next()); |
| 637 | } |
| 638 | weak_cell_obj = weak_cell->next(); |
| 639 | weak_cell->clear_next(the_hole_value); |
| 640 | } else { |
| 641 | if (weak_cell_head == Smi::FromInt(0)) { |
| 642 | weak_cell_head = weak_cell; |
| 643 | } |
| 644 | prev_weak_cell_obj = weak_cell; |
| 645 | weak_cell_obj = weak_cell->next(); |
| 646 | } |
| 647 | } |
| 648 | // Top may have changed. |
| 649 | heap()->set_encountered_weak_cells(weak_cell_head); |
| 650 | } |
| 651 | |
| 652 | |
| 653 | bool ShouldRetainMap(Map* map, int age) { |
| 654 | if (age == 0) { |
| 655 | // The map has aged. Do not retain this map. |
| 656 | return false; |
| 657 | } |
| 658 | Object* constructor = map->GetConstructor(); |
| 659 | if (!constructor->IsHeapObject() || |
| 660 | Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(constructor)))) { |
| 661 | // The constructor is dead, no new objects with this map can |
| 662 | // be created. Do not retain this map. |
| 663 | return false; |
| 664 | } |
| 665 | return true; |
| 666 | } |
| 667 | |
| 668 | |
| 669 | void IncrementalMarking::RetainMaps() { |
| 670 | // Do not retain dead maps if flag disables it or there is |
| 671 | // - memory pressure (reduce_memory_footprint_), |
| 672 | // - GC is requested by tests or dev-tools (abort_incremental_marking_). |
| 673 | bool map_retaining_is_disabled = heap()->ShouldReduceMemory() || |
| 674 | heap()->ShouldAbortIncrementalMarking() || |
| 675 | FLAG_retain_maps_for_n_gc == 0; |
| 676 | ArrayList* retained_maps = heap()->retained_maps(); |
| 677 | int length = retained_maps->Length(); |
| 678 | // The number_of_disposed_maps separates maps in the retained_maps |
| 679 | // array that were created before and after context disposal. |
| 680 | // We do not age and retain disposed maps to avoid memory leaks. |
| 681 | int number_of_disposed_maps = heap()->number_of_disposed_maps_; |
| 682 | for (int i = 0; i < length; i += 2) { |
| 683 | DCHECK(retained_maps->Get(i)->IsWeakCell()); |
| 684 | WeakCell* cell = WeakCell::cast(retained_maps->Get(i)); |
| 685 | if (cell->cleared()) continue; |
| 686 | int age = Smi::cast(retained_maps->Get(i + 1))->value(); |
| 687 | int new_age; |
| 688 | Map* map = Map::cast(cell->value()); |
| 689 | MarkBit map_mark = Marking::MarkBitFrom(map); |
| 690 | if (i >= number_of_disposed_maps && !map_retaining_is_disabled && |
| 691 | Marking::IsWhite(map_mark)) { |
| 692 | if (ShouldRetainMap(map, age)) { |
| 693 | MarkObject(heap(), map); |
| 694 | } |
| 695 | Object* prototype = map->prototype(); |
| 696 | if (age > 0 && prototype->IsHeapObject() && |
| 697 | Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(prototype)))) { |
| 698 | // The prototype is not marked, age the map. |
| 699 | new_age = age - 1; |
| 700 | } else { |
| 701 | // The prototype and the constructor are marked, this map keeps only |
| 702 | // transition tree alive, not JSObjects. Do not age the map. |
| 703 | new_age = age; |
| 704 | } |
| 705 | } else { |
| 706 | new_age = FLAG_retain_maps_for_n_gc; |
| 707 | } |
| 708 | // Compact the array and update the age. |
| 709 | if (new_age != age) { |
| 710 | retained_maps->Set(i + 1, Smi::FromInt(new_age)); |
| 711 | } |
| 712 | } |
| 713 | } |
| 714 | |
| 715 | |
| 716 | void IncrementalMarking::FinalizeIncrementally() { |
| 717 | DCHECK(!finalize_marking_completed_); |
| 718 | DCHECK(IsMarking()); |
| 719 | |
| 720 | double start = heap_->MonotonicallyIncreasingTimeInMs(); |
| 721 | |
| 722 | int old_marking_deque_top = |
| 723 | heap_->mark_compact_collector()->marking_deque()->top(); |
| 724 | |
| 725 | // After finishing incremental marking, we try to discover all unmarked |
| 726 | // objects to reduce the marking load in the final pause. |
| 727 | // 1) We scan and mark the roots again to find all changes to the root set. |
| 728 | // 2) We mark the object groups. |
| 729 | // 3) Age and retain maps embedded in optimized code. |
| 730 | // 4) Remove weak cell with live values from the list of weak cells, they |
| 731 | // do not need processing during GC. |
| 732 | MarkRoots(); |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 733 | if (!heap_->UsingEmbedderHeapTracer()) { |
| 734 | MarkObjectGroups(); |
| 735 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 736 | if (incremental_marking_finalization_rounds_ == 0) { |
| 737 | // Map retaining is needed for perfromance, not correctness, |
| 738 | // so we can do it only once at the beginning of the finalization. |
| 739 | RetainMaps(); |
| 740 | } |
| 741 | ProcessWeakCells(); |
| 742 | |
| 743 | int marking_progress = |
| 744 | abs(old_marking_deque_top - |
| 745 | heap_->mark_compact_collector()->marking_deque()->top()); |
| 746 | |
| 747 | double end = heap_->MonotonicallyIncreasingTimeInMs(); |
| 748 | double delta = end - start; |
| 749 | heap_->tracer()->AddMarkingTime(delta); |
| 750 | heap_->tracer()->AddIncrementalMarkingFinalizationStep(delta); |
| 751 | if (FLAG_trace_incremental_marking) { |
| 752 | PrintF( |
| 753 | "[IncrementalMarking] Finalize incrementally round %d, " |
| 754 | "spent %d ms, marking progress %d.\n", |
| 755 | static_cast<int>(delta), incremental_marking_finalization_rounds_, |
| 756 | marking_progress); |
| 757 | } |
| 758 | |
| 759 | ++incremental_marking_finalization_rounds_; |
| 760 | if ((incremental_marking_finalization_rounds_ >= |
| 761 | FLAG_max_incremental_marking_finalization_rounds) || |
| 762 | (marking_progress < |
| 763 | FLAG_min_progress_during_incremental_marking_finalization)) { |
| 764 | finalize_marking_completed_ = true; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 765 | } |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 766 | |
| 767 | if (FLAG_black_allocation && !heap()->ShouldReduceMemory() && |
| 768 | !black_allocation_) { |
| 769 | // TODO(hpayer): Move to an earlier point as soon as we make faster marking |
| 770 | // progress. |
| 771 | StartBlackAllocation(); |
| 772 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 773 | } |
| 774 | |
| 775 | |
| 776 | void IncrementalMarking::UpdateMarkingDequeAfterScavenge() { |
| 777 | if (!IsMarking()) return; |
| 778 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 779 | MarkingDeque* marking_deque = |
| 780 | heap_->mark_compact_collector()->marking_deque(); |
| 781 | int current = marking_deque->bottom(); |
| 782 | int mask = marking_deque->mask(); |
| 783 | int limit = marking_deque->top(); |
| 784 | HeapObject** array = marking_deque->array(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 785 | int new_top = current; |
| 786 | |
| 787 | Map* filler_map = heap_->one_pointer_filler_map(); |
| 788 | |
| 789 | while (current != limit) { |
| 790 | HeapObject* obj = array[current]; |
| 791 | DCHECK(obj->IsHeapObject()); |
| 792 | current = ((current + 1) & mask); |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 793 | // Only pointers to from space have to be updated. |
| 794 | if (heap_->InFromSpace(obj)) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 795 | MapWord map_word = obj->map_word(); |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 796 | // There may be objects on the marking deque that do not exist anymore, |
| 797 | // e.g. left trimmed objects or objects from the root set (frames). |
| 798 | // If these object are dead at scavenging time, their marking deque |
| 799 | // entries will not point to forwarding addresses. Hence, we can discard |
| 800 | // them. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 801 | if (map_word.IsForwardingAddress()) { |
| 802 | HeapObject* dest = map_word.ToForwardingAddress(); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 803 | if (Page::FromAddress(dest->address())->IsFlagSet(Page::BLACK_PAGE)) |
| 804 | continue; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 805 | array[new_top] = dest; |
| 806 | new_top = ((new_top + 1) & mask); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 807 | DCHECK(new_top != marking_deque->bottom()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 808 | #ifdef DEBUG |
| 809 | MarkBit mark_bit = Marking::MarkBitFrom(obj); |
| 810 | DCHECK(Marking::IsGrey(mark_bit) || |
| 811 | (obj->IsFiller() && Marking::IsWhite(mark_bit))); |
| 812 | #endif |
| 813 | } |
| 814 | } else if (obj->map() != filler_map) { |
| 815 | // Skip one word filler objects that appear on the |
| 816 | // stack when we perform in place array shift. |
| 817 | array[new_top] = obj; |
| 818 | new_top = ((new_top + 1) & mask); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 819 | DCHECK(new_top != marking_deque->bottom()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 820 | #ifdef DEBUG |
| 821 | MarkBit mark_bit = Marking::MarkBitFrom(obj); |
| 822 | MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); |
| 823 | DCHECK(Marking::IsGrey(mark_bit) || |
| 824 | (obj->IsFiller() && Marking::IsWhite(mark_bit)) || |
| 825 | (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) && |
| 826 | Marking::IsBlack(mark_bit))); |
| 827 | #endif |
| 828 | } |
| 829 | } |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 830 | marking_deque->set_top(new_top); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 831 | } |
| 832 | |
| 833 | |
| 834 | void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 835 | MarkObject(heap_, map); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 836 | |
| 837 | IncrementalMarkingMarkingVisitor::IterateBody(map, obj); |
| 838 | |
| 839 | MarkBit mark_bit = Marking::MarkBitFrom(obj); |
| 840 | #if ENABLE_SLOW_DCHECKS |
| 841 | MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); |
| 842 | SLOW_DCHECK(Marking::IsGrey(mark_bit) || |
| 843 | (obj->IsFiller() && Marking::IsWhite(mark_bit)) || |
| 844 | (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) && |
| 845 | Marking::IsBlack(mark_bit))); |
| 846 | #endif |
| 847 | MarkBlackOrKeepBlack(obj, mark_bit, size); |
| 848 | } |
| 849 | |
| 850 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 851 | void IncrementalMarking::MarkObject(Heap* heap, HeapObject* obj) { |
| 852 | MarkBit mark_bit = Marking::MarkBitFrom(obj); |
| 853 | if (Marking::IsWhite(mark_bit)) { |
| 854 | heap->incremental_marking()->WhiteToGreyAndPush(obj, mark_bit); |
| 855 | } |
| 856 | } |
| 857 | |
| 858 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 859 | intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) { |
| 860 | intptr_t bytes_processed = 0; |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 861 | Map* one_pointer_filler_map = heap_->one_pointer_filler_map(); |
| 862 | Map* two_pointer_filler_map = heap_->two_pointer_filler_map(); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 863 | MarkingDeque* marking_deque = |
| 864 | heap_->mark_compact_collector()->marking_deque(); |
| 865 | while (!marking_deque->IsEmpty() && bytes_processed < bytes_to_process) { |
| 866 | HeapObject* obj = marking_deque->Pop(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 867 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 868 | // Explicitly skip one and two word fillers. Incremental markbit patterns |
| 869 | // are correct only for objects that occupy at least two words. |
| 870 | // Moreover, slots filtering for left-trimmed arrays works only when |
| 871 | // the distance between the old array start and the new array start |
| 872 | // is greater than two if both starts are marked. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 873 | Map* map = obj->map(); |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 874 | if (map == one_pointer_filler_map || map == two_pointer_filler_map) |
| 875 | continue; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 876 | |
| 877 | int size = obj->SizeFromMap(map); |
| 878 | unscanned_bytes_of_large_object_ = 0; |
| 879 | VisitObject(map, obj, size); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 880 | bytes_processed += size - unscanned_bytes_of_large_object_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 881 | } |
| 882 | return bytes_processed; |
| 883 | } |
| 884 | |
| 885 | |
| 886 | void IncrementalMarking::ProcessMarkingDeque() { |
| 887 | Map* filler_map = heap_->one_pointer_filler_map(); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 888 | MarkingDeque* marking_deque = |
| 889 | heap_->mark_compact_collector()->marking_deque(); |
| 890 | while (!marking_deque->IsEmpty()) { |
| 891 | HeapObject* obj = marking_deque->Pop(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 892 | |
| 893 | // Explicitly skip one word fillers. Incremental markbit patterns are |
| 894 | // correct only for objects that occupy at least two words. |
| 895 | Map* map = obj->map(); |
| 896 | if (map == filler_map) continue; |
| 897 | |
| 898 | VisitObject(map, obj, obj->SizeFromMap(map)); |
| 899 | } |
| 900 | } |
| 901 | |
| 902 | |
| 903 | void IncrementalMarking::Hurry() { |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 904 | // A scavenge may have pushed new objects on the marking deque (due to black |
| 905 | // allocation) even in COMPLETE state. This may happen if scavenges are |
| 906 | // forced e.g. in tests. It should not happen when COMPLETE was set when |
| 907 | // incremental marking finished and a regular GC was triggered after that |
| 908 | // because should_hurry_ will force a full GC. |
| 909 | if (!heap_->mark_compact_collector()->marking_deque()->IsEmpty()) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 910 | double start = 0.0; |
| 911 | if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 912 | start = heap_->MonotonicallyIncreasingTimeInMs(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 913 | if (FLAG_trace_incremental_marking) { |
| 914 | PrintF("[IncrementalMarking] Hurry\n"); |
| 915 | } |
| 916 | } |
| 917 | // TODO(gc) hurry can mark objects it encounters black as mutator |
| 918 | // was stopped. |
| 919 | ProcessMarkingDeque(); |
| 920 | state_ = COMPLETE; |
| 921 | if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 922 | double end = heap_->MonotonicallyIncreasingTimeInMs(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 923 | double delta = end - start; |
| 924 | heap_->tracer()->AddMarkingTime(delta); |
| 925 | if (FLAG_trace_incremental_marking) { |
| 926 | PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n", |
| 927 | static_cast<int>(delta)); |
| 928 | } |
| 929 | } |
| 930 | } |
| 931 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 932 | Object* context = heap_->native_contexts_list(); |
| 933 | while (!context->IsUndefined()) { |
| 934 | // GC can happen when the context is not fully initialized, |
| 935 | // so the cache can be undefined. |
| 936 | HeapObject* cache = HeapObject::cast( |
| 937 | Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX)); |
| 938 | if (!cache->IsUndefined()) { |
| 939 | MarkBit mark_bit = Marking::MarkBitFrom(cache); |
| 940 | if (Marking::IsGrey(mark_bit)) { |
| 941 | Marking::GreyToBlack(mark_bit); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 942 | MemoryChunk::IncrementLiveBytesFromGC(cache, cache->Size()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 943 | } |
| 944 | } |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 945 | context = Context::cast(context)->next_context_link(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 946 | } |
| 947 | } |
| 948 | |
| 949 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 950 | void IncrementalMarking::Stop() { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 951 | if (IsStopped()) return; |
| 952 | if (FLAG_trace_incremental_marking) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 953 | PrintF("[IncrementalMarking] Stopping.\n"); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 954 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 955 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 956 | heap_->new_space()->RemoveAllocationObserver(&observer_); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 957 | IncrementalMarking::set_should_hurry(false); |
| 958 | ResetStepCounters(); |
| 959 | if (IsMarking()) { |
| 960 | PatchIncrementalMarkingRecordWriteStubs(heap_, |
| 961 | RecordWriteStub::STORE_BUFFER_ONLY); |
| 962 | DeactivateIncrementalWriteBarrier(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 963 | } |
| 964 | heap_->isolate()->stack_guard()->ClearGC(); |
| 965 | state_ = STOPPED; |
| 966 | is_compacting_ = false; |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 967 | FinishBlackAllocation(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 968 | } |
| 969 | |
| 970 | |
| 971 | void IncrementalMarking::Finalize() { |
| 972 | Hurry(); |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 973 | Stop(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 974 | } |
| 975 | |
| 976 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 977 | void IncrementalMarking::FinalizeMarking(CompletionAction action) { |
| 978 | DCHECK(!finalize_marking_completed_); |
| 979 | if (FLAG_trace_incremental_marking) { |
| 980 | PrintF( |
| 981 | "[IncrementalMarking] requesting finalization of incremental " |
| 982 | "marking.\n"); |
| 983 | } |
| 984 | request_type_ = FINALIZATION; |
| 985 | if (action == GC_VIA_STACK_GUARD) { |
| 986 | heap_->isolate()->stack_guard()->RequestGC(); |
| 987 | } |
| 988 | } |
| 989 | |
| 990 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 991 | void IncrementalMarking::MarkingComplete(CompletionAction action) { |
| 992 | state_ = COMPLETE; |
| 993 | // We will set the stack guard to request a GC now. This will mean the rest |
| 994 | // of the GC gets performed as soon as possible (we can't do a GC here in a |
| 995 | // record-write context). If a few things get allocated between now and then |
| 996 | // that shouldn't make us do a scavenge and keep being incremental, so we set |
| 997 | // the should-hurry flag to indicate that there can't be much work left to do. |
| 998 | set_should_hurry(true); |
| 999 | if (FLAG_trace_incremental_marking) { |
| 1000 | PrintF("[IncrementalMarking] Complete (normal).\n"); |
| 1001 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1002 | request_type_ = COMPLETE_MARKING; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1003 | if (action == GC_VIA_STACK_GUARD) { |
| 1004 | heap_->isolate()->stack_guard()->RequestGC(); |
| 1005 | } |
| 1006 | } |
| 1007 | |
| 1008 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1009 | void IncrementalMarking::Epilogue() { |
| 1010 | was_activated_ = false; |
| 1011 | finalize_marking_completed_ = false; |
| 1012 | incremental_marking_finalization_rounds_ = 0; |
| 1013 | } |
| 1014 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1015 | double IncrementalMarking::AdvanceIncrementalMarking( |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1016 | double deadline_in_ms, IncrementalMarking::StepActions step_actions) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1017 | DCHECK(!IsStopped()); |
| 1018 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1019 | intptr_t step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize( |
| 1020 | GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs, |
| 1021 | heap() |
| 1022 | ->tracer() |
| 1023 | ->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond()); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1024 | double remaining_time_in_ms = 0.0; |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1025 | intptr_t bytes_processed = 0; |
| 1026 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1027 | do { |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1028 | bytes_processed = |
| 1029 | Step(step_size_in_bytes, step_actions.completion_action, |
| 1030 | step_actions.force_marking, step_actions.force_completion); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1031 | remaining_time_in_ms = |
| 1032 | deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs(); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1033 | } while (bytes_processed > 0 && |
| 1034 | remaining_time_in_ms >= |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1035 | 2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs && |
| 1036 | !IsComplete() && |
| 1037 | !heap()->mark_compact_collector()->marking_deque()->IsEmpty()); |
| 1038 | return remaining_time_in_ms; |
| 1039 | } |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1040 | |
| 1041 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1042 | void IncrementalMarking::OldSpaceStep(intptr_t allocated) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1043 | if (IsStopped() && ShouldActivateEvenWithoutIdleNotification()) { |
| 1044 | heap()->StartIncrementalMarking(Heap::kNoGCFlags, kNoGCCallbackFlags, |
| 1045 | "old space step"); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1046 | } else { |
| 1047 | Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD); |
| 1048 | } |
| 1049 | } |
| 1050 | |
| 1051 | |
| 1052 | void IncrementalMarking::SpeedUp() { |
| 1053 | bool speed_up = false; |
| 1054 | |
| 1055 | if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1056 | if (FLAG_trace_incremental_marking) { |
| 1057 | PrintIsolate(heap()->isolate(), "Speed up marking after %d steps\n", |
| 1058 | static_cast<int>(kMarkingSpeedAccellerationInterval)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1059 | } |
| 1060 | speed_up = true; |
| 1061 | } |
| 1062 | |
| 1063 | bool space_left_is_very_small = |
| 1064 | (old_generation_space_available_at_start_of_incremental_ < 10 * MB); |
| 1065 | |
| 1066 | bool only_1_nth_of_space_that_was_available_still_left = |
| 1067 | (SpaceLeftInOldSpace() * (marking_speed_ + 1) < |
| 1068 | old_generation_space_available_at_start_of_incremental_); |
| 1069 | |
| 1070 | if (space_left_is_very_small || |
| 1071 | only_1_nth_of_space_that_was_available_still_left) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1072 | if (FLAG_trace_incremental_marking) |
| 1073 | PrintIsolate(heap()->isolate(), |
| 1074 | "Speed up marking because of low space left\n"); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1075 | speed_up = true; |
| 1076 | } |
| 1077 | |
| 1078 | bool size_of_old_space_multiplied_by_n_during_marking = |
| 1079 | (heap_->PromotedTotalSize() > |
| 1080 | (marking_speed_ + 1) * |
| 1081 | old_generation_space_used_at_start_of_incremental_); |
| 1082 | if (size_of_old_space_multiplied_by_n_during_marking) { |
| 1083 | speed_up = true; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1084 | if (FLAG_trace_incremental_marking) { |
| 1085 | PrintIsolate(heap()->isolate(), |
| 1086 | "Speed up marking because of heap size increase\n"); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1087 | } |
| 1088 | } |
| 1089 | |
| 1090 | int64_t promoted_during_marking = |
| 1091 | heap_->PromotedTotalSize() - |
| 1092 | old_generation_space_used_at_start_of_incremental_; |
| 1093 | intptr_t delay = marking_speed_ * MB; |
| 1094 | intptr_t scavenge_slack = heap_->MaxSemiSpaceSize(); |
| 1095 | |
| 1096 | // We try to scan at at least twice the speed that we are allocating. |
| 1097 | if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1098 | if (FLAG_trace_incremental_marking) { |
| 1099 | PrintIsolate(heap()->isolate(), |
| 1100 | "Speed up marking because marker was not keeping up\n"); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1101 | } |
| 1102 | speed_up = true; |
| 1103 | } |
| 1104 | |
| 1105 | if (speed_up) { |
| 1106 | if (state_ != MARKING) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1107 | if (FLAG_trace_incremental_marking) { |
| 1108 | PrintIsolate(heap()->isolate(), |
| 1109 | "Postponing speeding up marking until marking starts\n"); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1110 | } |
| 1111 | } else { |
| 1112 | marking_speed_ += kMarkingSpeedAccelleration; |
| 1113 | marking_speed_ = static_cast<int>( |
| 1114 | Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3))); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1115 | if (FLAG_trace_incremental_marking) { |
| 1116 | PrintIsolate(heap()->isolate(), "Marking speed increased to %d\n", |
| 1117 | marking_speed_); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1118 | } |
| 1119 | } |
| 1120 | } |
| 1121 | } |
| 1122 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 1123 | void IncrementalMarking::FinalizeSweeping() { |
| 1124 | DCHECK(state_ == SWEEPING); |
| 1125 | if (heap_->mark_compact_collector()->sweeping_in_progress() && |
| 1126 | (heap_->mark_compact_collector()->sweeper().IsSweepingCompleted() || |
| 1127 | !FLAG_concurrent_sweeping)) { |
| 1128 | heap_->mark_compact_collector()->EnsureSweepingCompleted(); |
| 1129 | } |
| 1130 | if (!heap_->mark_compact_collector()->sweeping_in_progress()) { |
| 1131 | bytes_scanned_ = 0; |
| 1132 | StartMarking(); |
| 1133 | } |
| 1134 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1135 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1136 | intptr_t IncrementalMarking::Step(intptr_t allocated_bytes, |
| 1137 | CompletionAction action, |
| 1138 | ForceMarkingAction marking, |
| 1139 | ForceCompletionAction completion) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1140 | DCHECK(allocated_bytes >= 0); |
| 1141 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1142 | if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking || |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1143 | (state_ != SWEEPING && state_ != MARKING)) { |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1144 | return 0; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1145 | } |
| 1146 | |
| 1147 | allocated_ += allocated_bytes; |
| 1148 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1149 | if (marking == DO_NOT_FORCE_MARKING && allocated_ < kAllocatedThreshold && |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1150 | write_barriers_invoked_since_last_step_ < |
| 1151 | kWriteBarriersInvokedThreshold) { |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1152 | return 0; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1153 | } |
| 1154 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1155 | // If an idle notification happened recently, we delay marking steps. |
| 1156 | if (marking == DO_NOT_FORCE_MARKING && |
| 1157 | heap_->RecentIdleNotificationHappened()) { |
| 1158 | return 0; |
| 1159 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1160 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1161 | intptr_t bytes_processed = 0; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1162 | { |
| 1163 | HistogramTimerScope incremental_marking_scope( |
| 1164 | heap_->isolate()->counters()->gc_incremental_marking()); |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 1165 | TRACE_EVENT0("v8", "V8.GCIncrementalMarking"); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1166 | double start = heap_->MonotonicallyIncreasingTimeInMs(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1167 | |
| 1168 | // The marking speed is driven either by the allocation rate or by the rate |
| 1169 | // at which we are having to check the color of objects in the write |
| 1170 | // barrier. |
| 1171 | // It is possible for a tight non-allocating loop to run a lot of write |
| 1172 | // barriers before we get here and check them (marking can only take place |
| 1173 | // on |
| 1174 | // allocation), so to reduce the lumpiness we don't use the write barriers |
| 1175 | // invoked since last step directly to determine the amount of work to do. |
| 1176 | intptr_t bytes_to_process = |
| 1177 | marking_speed_ * |
| 1178 | Max(allocated_, write_barriers_invoked_since_last_step_); |
| 1179 | allocated_ = 0; |
| 1180 | write_barriers_invoked_since_last_step_ = 0; |
| 1181 | |
| 1182 | bytes_scanned_ += bytes_to_process; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1183 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 1184 | // TODO(hpayer): Do not account for sweeping finalization while marking. |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1185 | if (state_ == SWEEPING) { |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 1186 | FinalizeSweeping(); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1187 | } |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame^] | 1188 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 1189 | if (state_ == MARKING) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1190 | bytes_processed = ProcessMarkingDeque(bytes_to_process); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1191 | if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) { |
| 1192 | if (completion == FORCE_COMPLETION || |
| 1193 | IsIdleMarkingDelayCounterLimitReached()) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1194 | if (!finalize_marking_completed_) { |
| 1195 | FinalizeMarking(action); |
| 1196 | } else { |
| 1197 | MarkingComplete(action); |
| 1198 | } |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1199 | } else { |
| 1200 | IncrementIdleMarkingDelayCounter(); |
| 1201 | } |
| 1202 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1203 | } |
| 1204 | |
| 1205 | steps_count_++; |
| 1206 | |
| 1207 | // Speed up marking if we are marking too slow or if we are almost done |
| 1208 | // with marking. |
| 1209 | SpeedUp(); |
| 1210 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1211 | double end = heap_->MonotonicallyIncreasingTimeInMs(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1212 | double duration = (end - start); |
| 1213 | // Note that we report zero bytes here when sweeping was in progress or |
| 1214 | // when we just started incremental marking. In these cases we did not |
| 1215 | // process the marking deque. |
| 1216 | heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed); |
| 1217 | } |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1218 | return bytes_processed; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1219 | } |
| 1220 | |
| 1221 | |
| 1222 | void IncrementalMarking::ResetStepCounters() { |
| 1223 | steps_count_ = 0; |
| 1224 | old_generation_space_available_at_start_of_incremental_ = |
| 1225 | SpaceLeftInOldSpace(); |
| 1226 | old_generation_space_used_at_start_of_incremental_ = |
| 1227 | heap_->PromotedTotalSize(); |
| 1228 | bytes_rescanned_ = 0; |
| 1229 | marking_speed_ = kInitialMarkingSpeed; |
| 1230 | bytes_scanned_ = 0; |
| 1231 | write_barriers_invoked_since_last_step_ = 0; |
| 1232 | } |
| 1233 | |
| 1234 | |
| 1235 | int64_t IncrementalMarking::SpaceLeftInOldSpace() { |
| 1236 | return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects(); |
| 1237 | } |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1238 | |
| 1239 | |
| 1240 | bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() { |
| 1241 | return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter; |
| 1242 | } |
| 1243 | |
| 1244 | |
| 1245 | void IncrementalMarking::IncrementIdleMarkingDelayCounter() { |
| 1246 | idle_marking_delay_counter_++; |
| 1247 | } |
| 1248 | |
| 1249 | |
| 1250 | void IncrementalMarking::ClearIdleMarkingDelayCounter() { |
| 1251 | idle_marking_delay_counter_ = 0; |
| 1252 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 1253 | } // namespace internal |
| 1254 | } // namespace v8 |