blob: f578d43b474481c6b8acb984f53210fbc3366579 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005#include "src/heap/incremental-marking.h"
6
7#include "src/code-stubs.h"
8#include "src/compilation-cache.h"
9#include "src/conversions.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000010#include "src/heap/gc-idle-time-handler.h"
11#include "src/heap/gc-tracer.h"
12#include "src/heap/mark-compact-inl.h"
Ben Murdoch61f157c2016-09-16 13:49:30 +010013#include "src/heap/object-stats.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000014#include "src/heap/objects-visiting-inl.h"
Ben Murdoch61f157c2016-09-16 13:49:30 +010015#include "src/heap/objects-visiting.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010016#include "src/tracing/trace-event.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000017#include "src/v8.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000018
19namespace v8 {
20namespace internal {
21
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000022IncrementalMarking::StepActions IncrementalMarking::IdleStepActions() {
23 return StepActions(IncrementalMarking::NO_GC_VIA_STACK_GUARD,
24 IncrementalMarking::FORCE_MARKING,
25 IncrementalMarking::DO_NOT_FORCE_COMPLETION);
26}
27
Ben Murdochb8a8cc12014-11-26 15:28:44 +000028IncrementalMarking::IncrementalMarking(Heap* heap)
29 : heap_(heap),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000030 observer_(*this, kAllocatedThreshold),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000031 state_(STOPPED),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000032 is_compacting_(false),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000033 steps_count_(0),
34 old_generation_space_available_at_start_of_incremental_(0),
35 old_generation_space_used_at_start_of_incremental_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000036 bytes_rescanned_(0),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000037 should_hurry_(false),
38 marking_speed_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000039 bytes_scanned_(0),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000040 allocated_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000041 write_barriers_invoked_since_last_step_(0),
Emily Bernierd0a1eb72015-03-24 16:35:39 -040042 idle_marking_delay_counter_(0),
Emily Bernierd0a1eb72015-03-24 16:35:39 -040043 unscanned_bytes_of_large_object_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000044 was_activated_(false),
Ben Murdochda12d292016-06-02 14:46:10 +010045 black_allocation_(false),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000046 finalize_marking_completed_(false),
47 incremental_marking_finalization_rounds_(0),
Ben Murdochda12d292016-06-02 14:46:10 +010048 request_type_(NONE) {}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000049
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000050bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
51 HeapObject* value_heap_obj = HeapObject::cast(value);
52 MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
53 DCHECK(!Marking::IsImpossible(value_bit));
54
55 MarkBit obj_bit = Marking::MarkBitFrom(obj);
56 DCHECK(!Marking::IsImpossible(obj_bit));
57 bool is_black = Marking::IsBlack(obj_bit);
58
59 if (is_black && Marking::IsWhite(value_bit)) {
60 WhiteToGreyAndPush(value_heap_obj, value_bit);
61 RestartIfNotMarking();
62 }
63 return is_compacting_ && is_black;
64}
Ben Murdochb8a8cc12014-11-26 15:28:44 +000065
66
67void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
68 Object* value) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000069 if (BaseRecordWrite(obj, value) && slot != NULL) {
70 // Object is not going to be rescanned we need to record the slot.
71 heap_->mark_compact_collector()->RecordSlot(obj, slot, value);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000072 }
73}
74
75
76void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
77 Isolate* isolate) {
78 DCHECK(obj->IsHeapObject());
79 IncrementalMarking* marking = isolate->heap()->incremental_marking();
80
81 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
82 int counter = chunk->write_barrier_counter();
83 if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
84 marking->write_barriers_invoked_since_last_step_ +=
85 MemoryChunk::kWriteBarrierCounterGranularity -
86 chunk->write_barrier_counter();
87 chunk->set_write_barrier_counter(
88 MemoryChunk::kWriteBarrierCounterGranularity);
89 }
90
91 marking->RecordWrite(obj, slot, *slot);
92}
93
Ben Murdoch097c5b22016-05-18 11:27:45 +010094// static
95void IncrementalMarking::RecordWriteOfCodeEntryFromCode(JSFunction* host,
96 Object** slot,
97 Isolate* isolate) {
98 DCHECK(host->IsJSFunction());
99 IncrementalMarking* marking = isolate->heap()->incremental_marking();
100 Code* value = Code::cast(
101 Code::GetObjectFromEntryAddress(reinterpret_cast<Address>(slot)));
102 marking->RecordWriteOfCodeEntry(host, slot, value);
103}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000104
105void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
106 HeapObject* value) {
107 if (IsMarking()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000108 RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000109 RecordWriteIntoCode(host, &rinfo, value);
110 }
111}
112
113
114void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
115 if (IsMarking()) {
116 Code* host = heap_->isolate()
117 ->inner_pointer_to_code_cache()
118 ->GcSafeFindCodeForInnerPointer(pc);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000119 RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000120 RecordWriteIntoCode(host, &rinfo, value);
121 }
122}
123
124
125void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
126 Object** slot,
127 Code* value) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000128 if (BaseRecordWrite(host, value)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000129 DCHECK(slot != NULL);
130 heap_->mark_compact_collector()->RecordCodeEntrySlot(
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000131 host, reinterpret_cast<Address>(slot), value);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000132 }
133}
134
Ben Murdochda12d292016-06-02 14:46:10 +0100135void IncrementalMarking::RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo,
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000136 Object* value) {
Ben Murdochda12d292016-06-02 14:46:10 +0100137 if (BaseRecordWrite(host, value)) {
138 // Object is not going to be rescanned. We need to record the slot.
139 heap_->mark_compact_collector()->RecordRelocSlot(host, rinfo, value);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000140 }
141}
142
143
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000144void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
145 Marking::WhiteToGrey(mark_bit);
146 heap_->mark_compact_collector()->marking_deque()->Push(obj);
147}
148
149
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000150static void MarkObjectGreyDoNotEnqueue(Object* obj) {
151 if (obj->IsHeapObject()) {
152 HeapObject* heap_obj = HeapObject::cast(obj);
153 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
154 if (Marking::IsBlack(mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000155 MemoryChunk::IncrementLiveBytesFromGC(heap_obj, -heap_obj->Size());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000156 }
157 Marking::AnyToGrey(mark_bit);
158 }
159}
160
161
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000162static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
163 MarkBit mark_bit, int size) {
164 DCHECK(!Marking::IsImpossible(mark_bit));
165 if (Marking::IsBlack(mark_bit)) return;
166 Marking::MarkBlack(mark_bit);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000167 MemoryChunk::IncrementLiveBytesFromGC(heap_object, size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000168}
169
170
171class IncrementalMarkingMarkingVisitor
172 : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
173 public:
174 static void Initialize() {
175 StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
176 table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
177 table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
178 table_.Register(kVisitJSRegExp, &VisitJSRegExp);
Ben Murdoch61f157c2016-09-16 13:49:30 +0100179 if (FLAG_track_gc_object_stats) {
180 IncrementalMarkingObjectStatsVisitor::Initialize(&table_);
181 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000182 }
183
184 static const int kProgressBarScanningChunk = 32 * 1024;
185
186 static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
187 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
188 // TODO(mstarzinger): Move setting of the flag to the allocation site of
189 // the array. The visitor should just check the flag.
190 if (FLAG_use_marking_progress_bar &&
191 chunk->owner()->identity() == LO_SPACE) {
192 chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
193 }
194 if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
195 Heap* heap = map->GetHeap();
196 // When using a progress bar for large fixed arrays, scan only a chunk of
197 // the array and try to push it onto the marking deque again until it is
198 // fully scanned. Fall back to scanning it through to the end in case this
199 // fails because of a full deque.
200 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
201 int start_offset =
202 Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
203 int end_offset =
204 Min(object_size, start_offset + kProgressBarScanningChunk);
205 int already_scanned_offset = start_offset;
206 bool scan_until_end = false;
207 do {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000208 VisitPointers(heap, object, HeapObject::RawField(object, start_offset),
209 HeapObject::RawField(object, end_offset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000210 start_offset = end_offset;
211 end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400212 scan_until_end =
213 heap->mark_compact_collector()->marking_deque()->IsFull();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000214 } while (scan_until_end && start_offset < object_size);
215 chunk->set_progress_bar(start_offset);
216 if (start_offset < object_size) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000217 if (Marking::IsGrey(Marking::MarkBitFrom(object))) {
218 heap->mark_compact_collector()->marking_deque()->Unshift(object);
219 } else {
220 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
221 heap->mark_compact_collector()->UnshiftBlack(object);
222 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000223 heap->incremental_marking()->NotifyIncompleteScanOfObject(
224 object_size - (start_offset - already_scanned_offset));
225 }
226 } else {
227 FixedArrayVisitor::Visit(map, object);
228 }
229 }
230
231 static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
232 Context* context = Context::cast(object);
233
234 // We will mark cache black with a separate pass when we finish marking.
235 // Note that GC can happen when the context is not fully initialized,
236 // so the cache can be undefined.
237 Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX);
Ben Murdoch61f157c2016-09-16 13:49:30 +0100238 if (!cache->IsUndefined(map->GetIsolate())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000239 MarkObjectGreyDoNotEnqueue(cache);
240 }
241 VisitNativeContext(map, context);
242 }
243
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000244 INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
245 Object* target = *p;
246 if (target->IsHeapObject()) {
247 heap->mark_compact_collector()->RecordSlot(object, p, target);
248 MarkObject(heap, target);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000249 }
250 }
251
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000252 INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
253 Object** start, Object** end)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000254 for (Object** p = start; p < end; p++) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000255 Object* target = *p;
256 if (target->IsHeapObject()) {
257 heap->mark_compact_collector()->RecordSlot(object, p, target);
258 MarkObject(heap, target);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000259 }
260 }
261 }
262
263 // Marks the object grey and pushes it on the marking stack.
264 INLINE(static void MarkObject(Heap* heap, Object* obj)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000265 IncrementalMarking::MarkObject(heap, HeapObject::cast(obj));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000266 }
267
268 // Marks the object black without pushing it on the marking stack.
269 // Returns true if object needed marking and false otherwise.
270 INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
271 HeapObject* heap_object = HeapObject::cast(obj);
272 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
273 if (Marking::IsWhite(mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000274 Marking::MarkBlack(mark_bit);
275 MemoryChunk::IncrementLiveBytesFromGC(heap_object, heap_object->Size());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000276 return true;
277 }
278 return false;
279 }
280};
281
Ben Murdochda12d292016-06-02 14:46:10 +0100282void IncrementalMarking::IterateBlackObject(HeapObject* object) {
283 if (IsMarking() && Marking::IsBlack(Marking::MarkBitFrom(object))) {
284 Page* page = Page::FromAddress(object->address());
285 if ((page->owner() != nullptr) && (page->owner()->identity() == LO_SPACE)) {
Ben Murdochc5610432016-08-08 18:44:38 +0100286 // IterateBlackObject requires us to visit the whole object.
Ben Murdochda12d292016-06-02 14:46:10 +0100287 page->ResetProgressBar();
288 }
289 IncrementalMarkingMarkingVisitor::IterateBody(object->map(), object);
290 }
291}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000292
293class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
294 public:
295 explicit IncrementalMarkingRootMarkingVisitor(
296 IncrementalMarking* incremental_marking)
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000297 : heap_(incremental_marking->heap()) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000298
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000299 void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000300
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000301 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000302 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
303 }
304
305 private:
306 void MarkObjectByPointer(Object** p) {
307 Object* obj = *p;
308 if (!obj->IsHeapObject()) return;
309
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000310 IncrementalMarking::MarkObject(heap_, HeapObject::cast(obj));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000311 }
312
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000313 Heap* heap_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000314};
315
316
317void IncrementalMarking::Initialize() {
318 IncrementalMarkingMarkingVisitor::Initialize();
319}
320
321
322void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
323 bool is_marking,
324 bool is_compacting) {
325 if (is_marking) {
326 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
327 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000328 } else {
329 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
330 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
331 }
332}
333
334
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000335void IncrementalMarking::SetNewSpacePageFlags(MemoryChunk* chunk,
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000336 bool is_marking) {
337 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
338 if (is_marking) {
339 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
340 } else {
341 chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
342 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000343}
344
345
346void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
347 PagedSpace* space) {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100348 for (Page* p : *space) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000349 SetOldSpacePageFlags(p, false, false);
350 }
351}
352
353
354void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
355 NewSpace* space) {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100356 for (Page* p : *space) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000357 SetNewSpacePageFlags(p, false);
358 }
359}
360
361
362void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000363 DeactivateIncrementalWriteBarrierForSpace(heap_->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000364 DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
365 DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
366 DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
367
Ben Murdoch61f157c2016-09-16 13:49:30 +0100368 for (LargePage* lop : *heap_->lo_space()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000369 SetOldSpacePageFlags(lop, false, false);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000370 }
371}
372
373
374void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100375 for (Page* p : *space) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000376 SetOldSpacePageFlags(p, true, is_compacting_);
377 }
378}
379
380
381void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100382 for (Page* p : *space) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000383 SetNewSpacePageFlags(p, true);
384 }
385}
386
387
388void IncrementalMarking::ActivateIncrementalWriteBarrier() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000389 ActivateIncrementalWriteBarrier(heap_->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000390 ActivateIncrementalWriteBarrier(heap_->map_space());
391 ActivateIncrementalWriteBarrier(heap_->code_space());
392 ActivateIncrementalWriteBarrier(heap_->new_space());
393
Ben Murdoch61f157c2016-09-16 13:49:30 +0100394 for (LargePage* lop : *heap_->lo_space()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000395 SetOldSpacePageFlags(lop, true, is_compacting_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000396 }
397}
398
399
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000400bool IncrementalMarking::ShouldActivateEvenWithoutIdleNotification() {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100401#ifndef DEBUG
402 static const intptr_t kActivationThreshold = 8 * MB;
403#else
404 // TODO(gc) consider setting this to some low level so that some
405 // debug tests run with incremental marking and some without.
406 static const intptr_t kActivationThreshold = 0;
407#endif
408 // Don't switch on for very small heaps.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000409 return CanBeActivated() &&
Ben Murdoch097c5b22016-05-18 11:27:45 +0100410 heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold &&
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000411 heap_->HeapIsFullEnoughToStartIncrementalMarking(
412 heap_->old_generation_allocation_limit());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000413}
414
415
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400416bool IncrementalMarking::WasActivated() { return was_activated_; }
417
418
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000419bool IncrementalMarking::CanBeActivated() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000420 // Only start incremental marking in a safe state: 1) when incremental
421 // marking is turned on, 2) when we are currently not in a GC, and
422 // 3) when we are currently not serializing or deserializing the heap.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000423 return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400424 heap_->deserialization_complete() &&
Ben Murdoch097c5b22016-05-18 11:27:45 +0100425 !heap_->isolate()->serializer_enabled();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000426}
427
428
429void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
430 DCHECK(RecordWriteStub::GetMode(stub) == RecordWriteStub::STORE_BUFFER_ONLY);
431
432 if (!IsMarking()) {
433 // Initially stub is generated in STORE_BUFFER_ONLY mode thus
434 // we don't need to do anything if incremental marking is
435 // not active.
436 } else if (IsCompacting()) {
437 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
438 } else {
439 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
440 }
441}
442
443
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000444void IncrementalMarking::NotifyOfHighPromotionRate() {
445 if (IsMarking()) {
446 if (marking_speed_ < kFastMarking) {
447 if (FLAG_trace_gc) {
448 PrintIsolate(heap()->isolate(),
449 "Increasing marking speed to %d "
450 "due to high promotion rate\n",
451 static_cast<int>(kFastMarking));
452 }
453 marking_speed_ = kFastMarking;
454 }
455 }
456}
457
458
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000459static void PatchIncrementalMarkingRecordWriteStubs(
460 Heap* heap, RecordWriteStub::Mode mode) {
461 UnseededNumberDictionary* stubs = heap->code_stubs();
462
463 int capacity = stubs->Capacity();
Ben Murdoch61f157c2016-09-16 13:49:30 +0100464 Isolate* isolate = heap->isolate();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000465 for (int i = 0; i < capacity; i++) {
466 Object* k = stubs->KeyAt(i);
Ben Murdoch61f157c2016-09-16 13:49:30 +0100467 if (stubs->IsKey(isolate, k)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000468 uint32_t key = NumberToUint32(k);
469
470 if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) {
471 Object* e = stubs->ValueAt(i);
472 if (e->IsCode()) {
473 RecordWriteStub::Patch(Code::cast(e), mode);
474 }
475 }
476 }
477 }
478}
479
480
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000481void IncrementalMarking::Start(const char* reason) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000482 if (FLAG_trace_incremental_marking) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000483 PrintF("[IncrementalMarking] Start (%s)\n",
484 (reason == nullptr) ? "unknown reason" : reason);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000485 }
486 DCHECK(FLAG_incremental_marking);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000487 DCHECK(state_ == STOPPED);
488 DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
489 DCHECK(!heap_->isolate()->serializer_enabled());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000490
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000491 HistogramTimerScope incremental_marking_scope(
492 heap_->isolate()->counters()->gc_incremental_marking_start());
Ben Murdoch097c5b22016-05-18 11:27:45 +0100493 TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart");
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000494 ResetStepCounters();
495
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400496 was_activated_ = true;
497
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000498 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000499 StartMarking();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000500 } else {
501 if (FLAG_trace_incremental_marking) {
502 PrintF("[IncrementalMarking] Start sweeping.\n");
503 }
504 state_ = SWEEPING;
505 }
506
Ben Murdoch097c5b22016-05-18 11:27:45 +0100507 heap_->new_space()->AddAllocationObserver(&observer_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000508
509 incremental_marking_job()->Start(heap_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000510}
511
512
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000513void IncrementalMarking::StartMarking() {
Ben Murdochda12d292016-06-02 14:46:10 +0100514 if (heap_->isolate()->serializer_enabled()) {
515 // Black allocation currently starts when we start incremental marking,
516 // but we cannot enable black allocation while deserializing. Hence, we
517 // have to delay the start of incremental marking in that case.
518 if (FLAG_trace_incremental_marking) {
519 PrintF("[IncrementalMarking] Start delayed - serializer\n");
520 }
521 return;
522 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000523 if (FLAG_trace_incremental_marking) {
524 PrintF("[IncrementalMarking] Start marking\n");
525 }
526
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000527 is_compacting_ = !FLAG_never_compact &&
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000528 heap_->mark_compact_collector()->StartCompaction(
529 MarkCompactCollector::INCREMENTAL_COMPACTION);
530
531 state_ = MARKING;
532
Ben Murdoch61f157c2016-09-16 13:49:30 +0100533 if (heap_->UsingEmbedderHeapTracer()) {
534 heap_->mark_compact_collector()->embedder_heap_tracer()->TracePrologue();
535 }
536
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000537 RecordWriteStub::Mode mode = is_compacting_
538 ? RecordWriteStub::INCREMENTAL_COMPACTION
539 : RecordWriteStub::INCREMENTAL;
540
541 PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
542
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000543 heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize(
544 MarkCompactCollector::kMaxMarkingDequeSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000545
546 ActivateIncrementalWriteBarrier();
547
548// Marking bits are cleared by the sweeper.
549#ifdef VERIFY_HEAP
550 if (FLAG_verify_heap) {
551 heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
552 }
553#endif
554
555 heap_->CompletelyClearInstanceofCache();
556 heap_->isolate()->compilation_cache()->MarkCompactPrologue();
557
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000558 // Mark strong roots grey.
559 IncrementalMarkingRootMarkingVisitor visitor(this);
560 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
561
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000562 // Ready to start incremental marking.
563 if (FLAG_trace_incremental_marking) {
564 PrintF("[IncrementalMarking] Running\n");
565 }
566}
567
Ben Murdochda12d292016-06-02 14:46:10 +0100568void IncrementalMarking::StartBlackAllocation() {
569 DCHECK(FLAG_black_allocation);
570 DCHECK(IsMarking());
571 black_allocation_ = true;
572 OldSpace* old_space = heap()->old_space();
573 old_space->EmptyAllocationInfo();
574 old_space->free_list()->Reset();
575 if (FLAG_trace_incremental_marking) {
576 PrintF("[IncrementalMarking] Black allocation started\n");
577 }
578}
579
580void IncrementalMarking::FinishBlackAllocation() {
581 if (black_allocation_) {
582 black_allocation_ = false;
583 if (FLAG_trace_incremental_marking) {
584 PrintF("[IncrementalMarking] Black allocation finished\n");
585 }
586 }
587}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000588
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000589void IncrementalMarking::MarkRoots() {
590 DCHECK(!finalize_marking_completed_);
591 DCHECK(IsMarking());
592
593 IncrementalMarkingRootMarkingVisitor visitor(this);
594 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
595}
596
597
598void IncrementalMarking::MarkObjectGroups() {
Ben Murdochc5610432016-08-08 18:44:38 +0100599 DCHECK(!heap_->UsingEmbedderHeapTracer());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000600 DCHECK(!finalize_marking_completed_);
601 DCHECK(IsMarking());
602
603 IncrementalMarkingRootMarkingVisitor visitor(this);
604 heap_->mark_compact_collector()->MarkImplicitRefGroups(&MarkObject);
605 heap_->isolate()->global_handles()->IterateObjectGroups(
606 &visitor, &MarkCompactCollector::IsUnmarkedHeapObjectWithHeap);
607 heap_->isolate()->global_handles()->RemoveImplicitRefGroups();
608 heap_->isolate()->global_handles()->RemoveObjectGroups();
609}
610
611
612void IncrementalMarking::ProcessWeakCells() {
613 DCHECK(!finalize_marking_completed_);
614 DCHECK(IsMarking());
615
616 Object* the_hole_value = heap()->the_hole_value();
617 Object* weak_cell_obj = heap()->encountered_weak_cells();
618 Object* weak_cell_head = Smi::FromInt(0);
619 WeakCell* prev_weak_cell_obj = NULL;
620 while (weak_cell_obj != Smi::FromInt(0)) {
621 WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
622 // We do not insert cleared weak cells into the list, so the value
623 // cannot be a Smi here.
624 HeapObject* value = HeapObject::cast(weak_cell->value());
625 // Remove weak cells with live objects from the list, they do not need
626 // clearing.
627 if (MarkCompactCollector::IsMarked(value)) {
628 // Record slot, if value is pointing to an evacuation candidate.
629 Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
630 heap_->mark_compact_collector()->RecordSlot(weak_cell, slot, *slot);
631 // Remove entry somewhere after top.
632 if (prev_weak_cell_obj != NULL) {
633 prev_weak_cell_obj->set_next(weak_cell->next());
634 }
635 weak_cell_obj = weak_cell->next();
636 weak_cell->clear_next(the_hole_value);
637 } else {
638 if (weak_cell_head == Smi::FromInt(0)) {
639 weak_cell_head = weak_cell;
640 }
641 prev_weak_cell_obj = weak_cell;
642 weak_cell_obj = weak_cell->next();
643 }
644 }
645 // Top may have changed.
646 heap()->set_encountered_weak_cells(weak_cell_head);
647}
648
649
650bool ShouldRetainMap(Map* map, int age) {
651 if (age == 0) {
652 // The map has aged. Do not retain this map.
653 return false;
654 }
655 Object* constructor = map->GetConstructor();
656 if (!constructor->IsHeapObject() ||
657 Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(constructor)))) {
658 // The constructor is dead, no new objects with this map can
659 // be created. Do not retain this map.
660 return false;
661 }
662 return true;
663}
664
665
666void IncrementalMarking::RetainMaps() {
667 // Do not retain dead maps if flag disables it or there is
668 // - memory pressure (reduce_memory_footprint_),
669 // - GC is requested by tests or dev-tools (abort_incremental_marking_).
670 bool map_retaining_is_disabled = heap()->ShouldReduceMemory() ||
671 heap()->ShouldAbortIncrementalMarking() ||
672 FLAG_retain_maps_for_n_gc == 0;
673 ArrayList* retained_maps = heap()->retained_maps();
674 int length = retained_maps->Length();
675 // The number_of_disposed_maps separates maps in the retained_maps
676 // array that were created before and after context disposal.
677 // We do not age and retain disposed maps to avoid memory leaks.
678 int number_of_disposed_maps = heap()->number_of_disposed_maps_;
679 for (int i = 0; i < length; i += 2) {
680 DCHECK(retained_maps->Get(i)->IsWeakCell());
681 WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
682 if (cell->cleared()) continue;
683 int age = Smi::cast(retained_maps->Get(i + 1))->value();
684 int new_age;
685 Map* map = Map::cast(cell->value());
686 MarkBit map_mark = Marking::MarkBitFrom(map);
687 if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
688 Marking::IsWhite(map_mark)) {
689 if (ShouldRetainMap(map, age)) {
690 MarkObject(heap(), map);
691 }
692 Object* prototype = map->prototype();
693 if (age > 0 && prototype->IsHeapObject() &&
694 Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(prototype)))) {
695 // The prototype is not marked, age the map.
696 new_age = age - 1;
697 } else {
698 // The prototype and the constructor are marked, this map keeps only
699 // transition tree alive, not JSObjects. Do not age the map.
700 new_age = age;
701 }
702 } else {
703 new_age = FLAG_retain_maps_for_n_gc;
704 }
705 // Compact the array and update the age.
706 if (new_age != age) {
707 retained_maps->Set(i + 1, Smi::FromInt(new_age));
708 }
709 }
710}
711
712
713void IncrementalMarking::FinalizeIncrementally() {
714 DCHECK(!finalize_marking_completed_);
715 DCHECK(IsMarking());
716
717 double start = heap_->MonotonicallyIncreasingTimeInMs();
718
719 int old_marking_deque_top =
720 heap_->mark_compact_collector()->marking_deque()->top();
721
722 // After finishing incremental marking, we try to discover all unmarked
723 // objects to reduce the marking load in the final pause.
724 // 1) We scan and mark the roots again to find all changes to the root set.
725 // 2) We mark the object groups.
726 // 3) Age and retain maps embedded in optimized code.
727 // 4) Remove weak cell with live values from the list of weak cells, they
728 // do not need processing during GC.
729 MarkRoots();
Ben Murdochc5610432016-08-08 18:44:38 +0100730 if (!heap_->UsingEmbedderHeapTracer()) {
731 MarkObjectGroups();
732 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000733 if (incremental_marking_finalization_rounds_ == 0) {
734 // Map retaining is needed for perfromance, not correctness,
735 // so we can do it only once at the beginning of the finalization.
736 RetainMaps();
737 }
738 ProcessWeakCells();
739
740 int marking_progress =
741 abs(old_marking_deque_top -
742 heap_->mark_compact_collector()->marking_deque()->top());
743
744 double end = heap_->MonotonicallyIncreasingTimeInMs();
745 double delta = end - start;
746 heap_->tracer()->AddMarkingTime(delta);
747 heap_->tracer()->AddIncrementalMarkingFinalizationStep(delta);
748 if (FLAG_trace_incremental_marking) {
749 PrintF(
750 "[IncrementalMarking] Finalize incrementally round %d, "
751 "spent %d ms, marking progress %d.\n",
752 static_cast<int>(delta), incremental_marking_finalization_rounds_,
753 marking_progress);
754 }
755
756 ++incremental_marking_finalization_rounds_;
757 if ((incremental_marking_finalization_rounds_ >=
758 FLAG_max_incremental_marking_finalization_rounds) ||
759 (marking_progress <
760 FLAG_min_progress_during_incremental_marking_finalization)) {
761 finalize_marking_completed_ = true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000762 }
Ben Murdochda12d292016-06-02 14:46:10 +0100763
764 if (FLAG_black_allocation && !heap()->ShouldReduceMemory() &&
765 !black_allocation_) {
766 // TODO(hpayer): Move to an earlier point as soon as we make faster marking
767 // progress.
768 StartBlackAllocation();
769 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000770}
771
772
773void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
774 if (!IsMarking()) return;
775
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400776 MarkingDeque* marking_deque =
777 heap_->mark_compact_collector()->marking_deque();
778 int current = marking_deque->bottom();
779 int mask = marking_deque->mask();
780 int limit = marking_deque->top();
781 HeapObject** array = marking_deque->array();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000782 int new_top = current;
783
784 Map* filler_map = heap_->one_pointer_filler_map();
785
786 while (current != limit) {
787 HeapObject* obj = array[current];
788 DCHECK(obj->IsHeapObject());
789 current = ((current + 1) & mask);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100790 // Only pointers to from space have to be updated.
791 if (heap_->InFromSpace(obj)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000792 MapWord map_word = obj->map_word();
Ben Murdoch097c5b22016-05-18 11:27:45 +0100793 // There may be objects on the marking deque that do not exist anymore,
794 // e.g. left trimmed objects or objects from the root set (frames).
795 // If these object are dead at scavenging time, their marking deque
796 // entries will not point to forwarding addresses. Hence, we can discard
797 // them.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000798 if (map_word.IsForwardingAddress()) {
799 HeapObject* dest = map_word.ToForwardingAddress();
Ben Murdochda12d292016-06-02 14:46:10 +0100800 if (Page::FromAddress(dest->address())->IsFlagSet(Page::BLACK_PAGE))
801 continue;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000802 array[new_top] = dest;
803 new_top = ((new_top + 1) & mask);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400804 DCHECK(new_top != marking_deque->bottom());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000805#ifdef DEBUG
806 MarkBit mark_bit = Marking::MarkBitFrom(obj);
807 DCHECK(Marking::IsGrey(mark_bit) ||
808 (obj->IsFiller() && Marking::IsWhite(mark_bit)));
809#endif
810 }
811 } else if (obj->map() != filler_map) {
812 // Skip one word filler objects that appear on the
813 // stack when we perform in place array shift.
814 array[new_top] = obj;
815 new_top = ((new_top + 1) & mask);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400816 DCHECK(new_top != marking_deque->bottom());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000817#ifdef DEBUG
818 MarkBit mark_bit = Marking::MarkBitFrom(obj);
819 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
820 DCHECK(Marking::IsGrey(mark_bit) ||
821 (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
822 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
823 Marking::IsBlack(mark_bit)));
824#endif
825 }
826 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400827 marking_deque->set_top(new_top);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000828}
829
830
831void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000832 MarkObject(heap_, map);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000833
834 IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
835
836 MarkBit mark_bit = Marking::MarkBitFrom(obj);
837#if ENABLE_SLOW_DCHECKS
838 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
839 SLOW_DCHECK(Marking::IsGrey(mark_bit) ||
840 (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
841 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
842 Marking::IsBlack(mark_bit)));
843#endif
844 MarkBlackOrKeepBlack(obj, mark_bit, size);
845}
846
847
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000848void IncrementalMarking::MarkObject(Heap* heap, HeapObject* obj) {
849 MarkBit mark_bit = Marking::MarkBitFrom(obj);
850 if (Marking::IsWhite(mark_bit)) {
851 heap->incremental_marking()->WhiteToGreyAndPush(obj, mark_bit);
852 }
853}
854
855
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000856intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
857 intptr_t bytes_processed = 0;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100858 Map* one_pointer_filler_map = heap_->one_pointer_filler_map();
859 Map* two_pointer_filler_map = heap_->two_pointer_filler_map();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400860 MarkingDeque* marking_deque =
861 heap_->mark_compact_collector()->marking_deque();
862 while (!marking_deque->IsEmpty() && bytes_processed < bytes_to_process) {
863 HeapObject* obj = marking_deque->Pop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000864
Ben Murdoch097c5b22016-05-18 11:27:45 +0100865 // Explicitly skip one and two word fillers. Incremental markbit patterns
866 // are correct only for objects that occupy at least two words.
867 // Moreover, slots filtering for left-trimmed arrays works only when
868 // the distance between the old array start and the new array start
869 // is greater than two if both starts are marked.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000870 Map* map = obj->map();
Ben Murdoch097c5b22016-05-18 11:27:45 +0100871 if (map == one_pointer_filler_map || map == two_pointer_filler_map)
872 continue;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000873
874 int size = obj->SizeFromMap(map);
875 unscanned_bytes_of_large_object_ = 0;
876 VisitObject(map, obj, size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000877 bytes_processed += size - unscanned_bytes_of_large_object_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000878 }
879 return bytes_processed;
880}
881
882
883void IncrementalMarking::ProcessMarkingDeque() {
884 Map* filler_map = heap_->one_pointer_filler_map();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400885 MarkingDeque* marking_deque =
886 heap_->mark_compact_collector()->marking_deque();
887 while (!marking_deque->IsEmpty()) {
888 HeapObject* obj = marking_deque->Pop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000889
890 // Explicitly skip one word fillers. Incremental markbit patterns are
891 // correct only for objects that occupy at least two words.
892 Map* map = obj->map();
893 if (map == filler_map) continue;
894
895 VisitObject(map, obj, obj->SizeFromMap(map));
896 }
897}
898
899
900void IncrementalMarking::Hurry() {
Ben Murdochda12d292016-06-02 14:46:10 +0100901 // A scavenge may have pushed new objects on the marking deque (due to black
902 // allocation) even in COMPLETE state. This may happen if scavenges are
903 // forced e.g. in tests. It should not happen when COMPLETE was set when
904 // incremental marking finished and a regular GC was triggered after that
905 // because should_hurry_ will force a full GC.
906 if (!heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000907 double start = 0.0;
908 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000909 start = heap_->MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000910 if (FLAG_trace_incremental_marking) {
911 PrintF("[IncrementalMarking] Hurry\n");
912 }
913 }
914 // TODO(gc) hurry can mark objects it encounters black as mutator
915 // was stopped.
916 ProcessMarkingDeque();
917 state_ = COMPLETE;
918 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000919 double end = heap_->MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000920 double delta = end - start;
921 heap_->tracer()->AddMarkingTime(delta);
922 if (FLAG_trace_incremental_marking) {
923 PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
924 static_cast<int>(delta));
925 }
926 }
927 }
928
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000929 Object* context = heap_->native_contexts_list();
Ben Murdoch61f157c2016-09-16 13:49:30 +0100930 while (!context->IsUndefined(heap_->isolate())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000931 // GC can happen when the context is not fully initialized,
932 // so the cache can be undefined.
933 HeapObject* cache = HeapObject::cast(
934 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
Ben Murdoch61f157c2016-09-16 13:49:30 +0100935 if (!cache->IsUndefined(heap_->isolate())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000936 MarkBit mark_bit = Marking::MarkBitFrom(cache);
937 if (Marking::IsGrey(mark_bit)) {
938 Marking::GreyToBlack(mark_bit);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000939 MemoryChunk::IncrementLiveBytesFromGC(cache, cache->Size());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000940 }
941 }
Ben Murdochc5610432016-08-08 18:44:38 +0100942 context = Context::cast(context)->next_context_link();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000943 }
944}
945
946
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000947void IncrementalMarking::Stop() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000948 if (IsStopped()) return;
949 if (FLAG_trace_incremental_marking) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000950 PrintF("[IncrementalMarking] Stopping.\n");
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000951 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000952
Ben Murdoch097c5b22016-05-18 11:27:45 +0100953 heap_->new_space()->RemoveAllocationObserver(&observer_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000954 IncrementalMarking::set_should_hurry(false);
955 ResetStepCounters();
956 if (IsMarking()) {
957 PatchIncrementalMarkingRecordWriteStubs(heap_,
958 RecordWriteStub::STORE_BUFFER_ONLY);
959 DeactivateIncrementalWriteBarrier();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000960 }
961 heap_->isolate()->stack_guard()->ClearGC();
962 state_ = STOPPED;
963 is_compacting_ = false;
Ben Murdochda12d292016-06-02 14:46:10 +0100964 FinishBlackAllocation();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000965}
966
967
968void IncrementalMarking::Finalize() {
969 Hurry();
Ben Murdoch097c5b22016-05-18 11:27:45 +0100970 Stop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000971}
972
973
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000974void IncrementalMarking::FinalizeMarking(CompletionAction action) {
975 DCHECK(!finalize_marking_completed_);
976 if (FLAG_trace_incremental_marking) {
977 PrintF(
978 "[IncrementalMarking] requesting finalization of incremental "
979 "marking.\n");
980 }
981 request_type_ = FINALIZATION;
982 if (action == GC_VIA_STACK_GUARD) {
983 heap_->isolate()->stack_guard()->RequestGC();
984 }
985}
986
987
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000988void IncrementalMarking::MarkingComplete(CompletionAction action) {
989 state_ = COMPLETE;
990 // We will set the stack guard to request a GC now. This will mean the rest
991 // of the GC gets performed as soon as possible (we can't do a GC here in a
992 // record-write context). If a few things get allocated between now and then
993 // that shouldn't make us do a scavenge and keep being incremental, so we set
994 // the should-hurry flag to indicate that there can't be much work left to do.
995 set_should_hurry(true);
996 if (FLAG_trace_incremental_marking) {
997 PrintF("[IncrementalMarking] Complete (normal).\n");
998 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000999 request_type_ = COMPLETE_MARKING;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001000 if (action == GC_VIA_STACK_GUARD) {
1001 heap_->isolate()->stack_guard()->RequestGC();
1002 }
1003}
1004
1005
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001006void IncrementalMarking::Epilogue() {
1007 was_activated_ = false;
1008 finalize_marking_completed_ = false;
1009 incremental_marking_finalization_rounds_ = 0;
1010}
1011
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001012double IncrementalMarking::AdvanceIncrementalMarking(
Ben Murdochda12d292016-06-02 14:46:10 +01001013 double deadline_in_ms, IncrementalMarking::StepActions step_actions) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001014 DCHECK(!IsStopped());
1015
Ben Murdochda12d292016-06-02 14:46:10 +01001016 intptr_t step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
1017 GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs,
1018 heap()
1019 ->tracer()
1020 ->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001021 double remaining_time_in_ms = 0.0;
Ben Murdochda12d292016-06-02 14:46:10 +01001022 intptr_t bytes_processed = 0;
1023
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001024 do {
Ben Murdochda12d292016-06-02 14:46:10 +01001025 bytes_processed =
1026 Step(step_size_in_bytes, step_actions.completion_action,
1027 step_actions.force_marking, step_actions.force_completion);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001028 remaining_time_in_ms =
1029 deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
Ben Murdochda12d292016-06-02 14:46:10 +01001030 } while (bytes_processed > 0 &&
1031 remaining_time_in_ms >=
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001032 2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs &&
1033 !IsComplete() &&
1034 !heap()->mark_compact_collector()->marking_deque()->IsEmpty());
1035 return remaining_time_in_ms;
1036}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001037
1038
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001039void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001040 if (IsStopped() && ShouldActivateEvenWithoutIdleNotification()) {
1041 heap()->StartIncrementalMarking(Heap::kNoGCFlags, kNoGCCallbackFlags,
1042 "old space step");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001043 } else {
1044 Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
1045 }
1046}
1047
1048
1049void IncrementalMarking::SpeedUp() {
1050 bool speed_up = false;
1051
1052 if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001053 if (FLAG_trace_incremental_marking) {
1054 PrintIsolate(heap()->isolate(), "Speed up marking after %d steps\n",
1055 static_cast<int>(kMarkingSpeedAccellerationInterval));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001056 }
1057 speed_up = true;
1058 }
1059
1060 bool space_left_is_very_small =
1061 (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
1062
1063 bool only_1_nth_of_space_that_was_available_still_left =
1064 (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
1065 old_generation_space_available_at_start_of_incremental_);
1066
1067 if (space_left_is_very_small ||
1068 only_1_nth_of_space_that_was_available_still_left) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001069 if (FLAG_trace_incremental_marking)
1070 PrintIsolate(heap()->isolate(),
1071 "Speed up marking because of low space left\n");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001072 speed_up = true;
1073 }
1074
1075 bool size_of_old_space_multiplied_by_n_during_marking =
1076 (heap_->PromotedTotalSize() >
1077 (marking_speed_ + 1) *
1078 old_generation_space_used_at_start_of_incremental_);
1079 if (size_of_old_space_multiplied_by_n_during_marking) {
1080 speed_up = true;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001081 if (FLAG_trace_incremental_marking) {
1082 PrintIsolate(heap()->isolate(),
1083 "Speed up marking because of heap size increase\n");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001084 }
1085 }
1086
1087 int64_t promoted_during_marking =
1088 heap_->PromotedTotalSize() -
1089 old_generation_space_used_at_start_of_incremental_;
1090 intptr_t delay = marking_speed_ * MB;
1091 intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
1092
1093 // We try to scan at at least twice the speed that we are allocating.
1094 if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001095 if (FLAG_trace_incremental_marking) {
1096 PrintIsolate(heap()->isolate(),
1097 "Speed up marking because marker was not keeping up\n");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001098 }
1099 speed_up = true;
1100 }
1101
1102 if (speed_up) {
1103 if (state_ != MARKING) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001104 if (FLAG_trace_incremental_marking) {
1105 PrintIsolate(heap()->isolate(),
1106 "Postponing speeding up marking until marking starts\n");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001107 }
1108 } else {
1109 marking_speed_ += kMarkingSpeedAccelleration;
1110 marking_speed_ = static_cast<int>(
1111 Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001112 if (FLAG_trace_incremental_marking) {
1113 PrintIsolate(heap()->isolate(), "Marking speed increased to %d\n",
1114 marking_speed_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001115 }
1116 }
1117 }
1118}
1119
Ben Murdochc5610432016-08-08 18:44:38 +01001120void IncrementalMarking::FinalizeSweeping() {
1121 DCHECK(state_ == SWEEPING);
1122 if (heap_->mark_compact_collector()->sweeping_in_progress() &&
1123 (heap_->mark_compact_collector()->sweeper().IsSweepingCompleted() ||
1124 !FLAG_concurrent_sweeping)) {
1125 heap_->mark_compact_collector()->EnsureSweepingCompleted();
1126 }
1127 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
1128 bytes_scanned_ = 0;
1129 StartMarking();
1130 }
1131}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001132
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001133intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
1134 CompletionAction action,
1135 ForceMarkingAction marking,
1136 ForceCompletionAction completion) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001137 DCHECK(allocated_bytes >= 0);
1138
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001139 if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001140 (state_ != SWEEPING && state_ != MARKING)) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001141 return 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001142 }
1143
1144 allocated_ += allocated_bytes;
1145
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001146 if (marking == DO_NOT_FORCE_MARKING && allocated_ < kAllocatedThreshold &&
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001147 write_barriers_invoked_since_last_step_ <
1148 kWriteBarriersInvokedThreshold) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001149 return 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001150 }
1151
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001152 // If an idle notification happened recently, we delay marking steps.
1153 if (marking == DO_NOT_FORCE_MARKING &&
1154 heap_->RecentIdleNotificationHappened()) {
1155 return 0;
1156 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001157
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001158 intptr_t bytes_processed = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001159 {
1160 HistogramTimerScope incremental_marking_scope(
1161 heap_->isolate()->counters()->gc_incremental_marking());
Ben Murdoch097c5b22016-05-18 11:27:45 +01001162 TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001163 double start = heap_->MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001164
1165 // The marking speed is driven either by the allocation rate or by the rate
1166 // at which we are having to check the color of objects in the write
1167 // barrier.
1168 // It is possible for a tight non-allocating loop to run a lot of write
1169 // barriers before we get here and check them (marking can only take place
1170 // on
1171 // allocation), so to reduce the lumpiness we don't use the write barriers
1172 // invoked since last step directly to determine the amount of work to do.
1173 intptr_t bytes_to_process =
1174 marking_speed_ *
1175 Max(allocated_, write_barriers_invoked_since_last_step_);
1176 allocated_ = 0;
1177 write_barriers_invoked_since_last_step_ = 0;
1178
1179 bytes_scanned_ += bytes_to_process;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001180
Ben Murdochc5610432016-08-08 18:44:38 +01001181 // TODO(hpayer): Do not account for sweeping finalization while marking.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001182 if (state_ == SWEEPING) {
Ben Murdochc5610432016-08-08 18:44:38 +01001183 FinalizeSweeping();
Ben Murdochda12d292016-06-02 14:46:10 +01001184 }
Ben Murdochc5610432016-08-08 18:44:38 +01001185
Ben Murdochda12d292016-06-02 14:46:10 +01001186 if (state_ == MARKING) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001187 bytes_processed = ProcessMarkingDeque(bytes_to_process);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001188 if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
1189 if (completion == FORCE_COMPLETION ||
1190 IsIdleMarkingDelayCounterLimitReached()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001191 if (!finalize_marking_completed_) {
1192 FinalizeMarking(action);
1193 } else {
1194 MarkingComplete(action);
1195 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001196 } else {
1197 IncrementIdleMarkingDelayCounter();
1198 }
1199 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001200 }
1201
1202 steps_count_++;
1203
1204 // Speed up marking if we are marking too slow or if we are almost done
1205 // with marking.
1206 SpeedUp();
1207
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001208 double end = heap_->MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001209 double duration = (end - start);
1210 // Note that we report zero bytes here when sweeping was in progress or
1211 // when we just started incremental marking. In these cases we did not
1212 // process the marking deque.
1213 heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
1214 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001215 return bytes_processed;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001216}
1217
1218
1219void IncrementalMarking::ResetStepCounters() {
1220 steps_count_ = 0;
1221 old_generation_space_available_at_start_of_incremental_ =
1222 SpaceLeftInOldSpace();
1223 old_generation_space_used_at_start_of_incremental_ =
1224 heap_->PromotedTotalSize();
1225 bytes_rescanned_ = 0;
1226 marking_speed_ = kInitialMarkingSpeed;
1227 bytes_scanned_ = 0;
1228 write_barriers_invoked_since_last_step_ = 0;
1229}
1230
1231
1232int64_t IncrementalMarking::SpaceLeftInOldSpace() {
1233 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
1234}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001235
1236
1237bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() {
1238 return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter;
1239}
1240
1241
1242void IncrementalMarking::IncrementIdleMarkingDelayCounter() {
1243 idle_marking_delay_counter_++;
1244}
1245
1246
1247void IncrementalMarking::ClearIdleMarkingDelayCounter() {
1248 idle_marking_delay_counter_ = 0;
1249}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001250} // namespace internal
1251} // namespace v8