blob: ce6f6ee69b014affc541a4d0f47a983295a28e9c [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005#include "src/heap/incremental-marking.h"
6
7#include "src/code-stubs.h"
8#include "src/compilation-cache.h"
9#include "src/conversions.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000010#include "src/heap/gc-idle-time-handler.h"
11#include "src/heap/gc-tracer.h"
12#include "src/heap/mark-compact-inl.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000013#include "src/heap/objects-visiting.h"
14#include "src/heap/objects-visiting-inl.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010015#include "src/tracing/trace-event.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000016#include "src/v8.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000017
18namespace v8 {
19namespace internal {
20
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000021IncrementalMarking::StepActions IncrementalMarking::IdleStepActions() {
22 return StepActions(IncrementalMarking::NO_GC_VIA_STACK_GUARD,
23 IncrementalMarking::FORCE_MARKING,
24 IncrementalMarking::DO_NOT_FORCE_COMPLETION);
25}
26
Ben Murdochb8a8cc12014-11-26 15:28:44 +000027IncrementalMarking::IncrementalMarking(Heap* heap)
28 : heap_(heap),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000029 observer_(*this, kAllocatedThreshold),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000030 state_(STOPPED),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000031 is_compacting_(false),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000032 steps_count_(0),
33 old_generation_space_available_at_start_of_incremental_(0),
34 old_generation_space_used_at_start_of_incremental_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000035 bytes_rescanned_(0),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000036 should_hurry_(false),
37 marking_speed_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000038 bytes_scanned_(0),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000039 allocated_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000040 write_barriers_invoked_since_last_step_(0),
Emily Bernierd0a1eb72015-03-24 16:35:39 -040041 idle_marking_delay_counter_(0),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000042 no_marking_scope_depth_(0),
Emily Bernierd0a1eb72015-03-24 16:35:39 -040043 unscanned_bytes_of_large_object_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000044 was_activated_(false),
45 finalize_marking_completed_(false),
46 incremental_marking_finalization_rounds_(0),
47 request_type_(COMPLETE_MARKING) {}
48
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000049bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
50 HeapObject* value_heap_obj = HeapObject::cast(value);
51 MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
52 DCHECK(!Marking::IsImpossible(value_bit));
53
54 MarkBit obj_bit = Marking::MarkBitFrom(obj);
55 DCHECK(!Marking::IsImpossible(obj_bit));
56 bool is_black = Marking::IsBlack(obj_bit);
57
58 if (is_black && Marking::IsWhite(value_bit)) {
59 WhiteToGreyAndPush(value_heap_obj, value_bit);
60 RestartIfNotMarking();
61 }
62 return is_compacting_ && is_black;
63}
Ben Murdochb8a8cc12014-11-26 15:28:44 +000064
65
66void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
67 Object* value) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000068 if (BaseRecordWrite(obj, value) && slot != NULL) {
69 // Object is not going to be rescanned we need to record the slot.
70 heap_->mark_compact_collector()->RecordSlot(obj, slot, value);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000071 }
72}
73
74
75void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
76 Isolate* isolate) {
77 DCHECK(obj->IsHeapObject());
78 IncrementalMarking* marking = isolate->heap()->incremental_marking();
79
80 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
81 int counter = chunk->write_barrier_counter();
82 if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
83 marking->write_barriers_invoked_since_last_step_ +=
84 MemoryChunk::kWriteBarrierCounterGranularity -
85 chunk->write_barrier_counter();
86 chunk->set_write_barrier_counter(
87 MemoryChunk::kWriteBarrierCounterGranularity);
88 }
89
90 marking->RecordWrite(obj, slot, *slot);
91}
92
Ben Murdoch097c5b22016-05-18 11:27:45 +010093// static
94void IncrementalMarking::RecordWriteOfCodeEntryFromCode(JSFunction* host,
95 Object** slot,
96 Isolate* isolate) {
97 DCHECK(host->IsJSFunction());
98 IncrementalMarking* marking = isolate->heap()->incremental_marking();
99 Code* value = Code::cast(
100 Code::GetObjectFromEntryAddress(reinterpret_cast<Address>(slot)));
101 marking->RecordWriteOfCodeEntry(host, slot, value);
102}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000103
104void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
105 HeapObject* value) {
106 if (IsMarking()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000107 RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000108 RecordWriteIntoCode(host, &rinfo, value);
109 }
110}
111
112
113void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
114 if (IsMarking()) {
115 Code* host = heap_->isolate()
116 ->inner_pointer_to_code_cache()
117 ->GcSafeFindCodeForInnerPointer(pc);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000118 RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000119 RecordWriteIntoCode(host, &rinfo, value);
120 }
121}
122
123
124void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
125 Object** slot,
126 Code* value) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000127 if (BaseRecordWrite(host, value)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000128 DCHECK(slot != NULL);
129 heap_->mark_compact_collector()->RecordCodeEntrySlot(
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000130 host, reinterpret_cast<Address>(slot), value);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000131 }
132}
133
134
135void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
136 RelocInfo* rinfo,
137 Object* value) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000138 if (BaseRecordWrite(obj, value)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000139 // Object is not going to be rescanned. We need to record the slot.
Ben Murdoch097c5b22016-05-18 11:27:45 +0100140 heap_->mark_compact_collector()->RecordRelocSlot(rinfo, value);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000141 }
142}
143
144
145void IncrementalMarking::RecordWrites(HeapObject* obj) {
146 if (IsMarking()) {
147 MarkBit obj_bit = Marking::MarkBitFrom(obj);
148 if (Marking::IsBlack(obj_bit)) {
149 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
150 if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
151 chunk->set_progress_bar(0);
152 }
153 BlackToGreyAndUnshift(obj, obj_bit);
154 RestartIfNotMarking();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000155 }
156 }
157}
158
159
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000160void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
161 MarkBit mark_bit) {
162 DCHECK(Marking::MarkBitFrom(obj) == mark_bit);
163 DCHECK(obj->Size() >= 2 * kPointerSize);
164 DCHECK(IsMarking());
165 Marking::BlackToGrey(mark_bit);
166 int obj_size = obj->Size();
167 MemoryChunk::IncrementLiveBytesFromGC(obj, -obj_size);
168 bytes_scanned_ -= obj_size;
169 int64_t old_bytes_rescanned = bytes_rescanned_;
170 bytes_rescanned_ = old_bytes_rescanned + obj_size;
171 if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
172 if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSizeOfObjects()) {
173 // If we have queued twice the heap size for rescanning then we are
174 // going around in circles, scanning the same objects again and again
175 // as the program mutates the heap faster than we can incrementally
176 // trace it. In this case we switch to non-incremental marking in
177 // order to finish off this marking phase.
178 if (FLAG_trace_incremental_marking) {
179 PrintIsolate(
180 heap()->isolate(),
181 "Hurrying incremental marking because of lack of progress\n");
182 }
183 marking_speed_ = kMaxMarkingSpeed;
184 }
185 }
186
187 heap_->mark_compact_collector()->marking_deque()->Unshift(obj);
188}
189
190
191void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
192 Marking::WhiteToGrey(mark_bit);
193 heap_->mark_compact_collector()->marking_deque()->Push(obj);
194}
195
196
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000197static void MarkObjectGreyDoNotEnqueue(Object* obj) {
198 if (obj->IsHeapObject()) {
199 HeapObject* heap_obj = HeapObject::cast(obj);
200 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
201 if (Marking::IsBlack(mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000202 MemoryChunk::IncrementLiveBytesFromGC(heap_obj, -heap_obj->Size());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000203 }
204 Marking::AnyToGrey(mark_bit);
205 }
206}
207
208
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000209static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
210 MarkBit mark_bit, int size) {
211 DCHECK(!Marking::IsImpossible(mark_bit));
212 if (Marking::IsBlack(mark_bit)) return;
213 Marking::MarkBlack(mark_bit);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000214 MemoryChunk::IncrementLiveBytesFromGC(heap_object, size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000215}
216
217
218class IncrementalMarkingMarkingVisitor
219 : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
220 public:
221 static void Initialize() {
222 StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
223 table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
224 table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
225 table_.Register(kVisitJSRegExp, &VisitJSRegExp);
226 }
227
228 static const int kProgressBarScanningChunk = 32 * 1024;
229
230 static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
231 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
232 // TODO(mstarzinger): Move setting of the flag to the allocation site of
233 // the array. The visitor should just check the flag.
234 if (FLAG_use_marking_progress_bar &&
235 chunk->owner()->identity() == LO_SPACE) {
236 chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
237 }
238 if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
239 Heap* heap = map->GetHeap();
240 // When using a progress bar for large fixed arrays, scan only a chunk of
241 // the array and try to push it onto the marking deque again until it is
242 // fully scanned. Fall back to scanning it through to the end in case this
243 // fails because of a full deque.
244 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
245 int start_offset =
246 Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
247 int end_offset =
248 Min(object_size, start_offset + kProgressBarScanningChunk);
249 int already_scanned_offset = start_offset;
250 bool scan_until_end = false;
251 do {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000252 VisitPointers(heap, object, HeapObject::RawField(object, start_offset),
253 HeapObject::RawField(object, end_offset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000254 start_offset = end_offset;
255 end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400256 scan_until_end =
257 heap->mark_compact_collector()->marking_deque()->IsFull();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000258 } while (scan_until_end && start_offset < object_size);
259 chunk->set_progress_bar(start_offset);
260 if (start_offset < object_size) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000261 if (Marking::IsGrey(Marking::MarkBitFrom(object))) {
262 heap->mark_compact_collector()->marking_deque()->Unshift(object);
263 } else {
264 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
265 heap->mark_compact_collector()->UnshiftBlack(object);
266 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000267 heap->incremental_marking()->NotifyIncompleteScanOfObject(
268 object_size - (start_offset - already_scanned_offset));
269 }
270 } else {
271 FixedArrayVisitor::Visit(map, object);
272 }
273 }
274
275 static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
276 Context* context = Context::cast(object);
277
278 // We will mark cache black with a separate pass when we finish marking.
279 // Note that GC can happen when the context is not fully initialized,
280 // so the cache can be undefined.
281 Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX);
282 if (!cache->IsUndefined()) {
283 MarkObjectGreyDoNotEnqueue(cache);
284 }
285 VisitNativeContext(map, context);
286 }
287
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000288 INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
289 Object* target = *p;
290 if (target->IsHeapObject()) {
291 heap->mark_compact_collector()->RecordSlot(object, p, target);
292 MarkObject(heap, target);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000293 }
294 }
295
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000296 INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
297 Object** start, Object** end)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000298 for (Object** p = start; p < end; p++) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000299 Object* target = *p;
300 if (target->IsHeapObject()) {
301 heap->mark_compact_collector()->RecordSlot(object, p, target);
302 MarkObject(heap, target);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000303 }
304 }
305 }
306
307 // Marks the object grey and pushes it on the marking stack.
308 INLINE(static void MarkObject(Heap* heap, Object* obj)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000309 IncrementalMarking::MarkObject(heap, HeapObject::cast(obj));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000310 }
311
312 // Marks the object black without pushing it on the marking stack.
313 // Returns true if object needed marking and false otherwise.
314 INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
315 HeapObject* heap_object = HeapObject::cast(obj);
316 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
317 if (Marking::IsWhite(mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000318 Marking::MarkBlack(mark_bit);
319 MemoryChunk::IncrementLiveBytesFromGC(heap_object, heap_object->Size());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000320 return true;
321 }
322 return false;
323 }
324};
325
326
327class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
328 public:
329 explicit IncrementalMarkingRootMarkingVisitor(
330 IncrementalMarking* incremental_marking)
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000331 : heap_(incremental_marking->heap()) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000332
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000333 void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000334
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000335 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000336 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
337 }
338
339 private:
340 void MarkObjectByPointer(Object** p) {
341 Object* obj = *p;
342 if (!obj->IsHeapObject()) return;
343
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000344 IncrementalMarking::MarkObject(heap_, HeapObject::cast(obj));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000345 }
346
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000347 Heap* heap_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000348};
349
350
351void IncrementalMarking::Initialize() {
352 IncrementalMarkingMarkingVisitor::Initialize();
353}
354
355
356void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
357 bool is_marking,
358 bool is_compacting) {
359 if (is_marking) {
360 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
361 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000362 } else {
363 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
364 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
365 }
366}
367
368
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000369void IncrementalMarking::SetNewSpacePageFlags(MemoryChunk* chunk,
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000370 bool is_marking) {
371 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
372 if (is_marking) {
373 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
374 } else {
375 chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
376 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000377}
378
379
380void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
381 PagedSpace* space) {
382 PageIterator it(space);
383 while (it.has_next()) {
384 Page* p = it.next();
385 SetOldSpacePageFlags(p, false, false);
386 }
387}
388
389
390void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
391 NewSpace* space) {
392 NewSpacePageIterator it(space);
393 while (it.has_next()) {
394 NewSpacePage* p = it.next();
395 SetNewSpacePageFlags(p, false);
396 }
397}
398
399
400void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000401 DeactivateIncrementalWriteBarrierForSpace(heap_->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000402 DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
403 DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
404 DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
405
406 LargePage* lop = heap_->lo_space()->first_page();
407 while (lop->is_valid()) {
408 SetOldSpacePageFlags(lop, false, false);
409 lop = lop->next_page();
410 }
411}
412
413
414void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
415 PageIterator it(space);
416 while (it.has_next()) {
417 Page* p = it.next();
418 SetOldSpacePageFlags(p, true, is_compacting_);
419 }
420}
421
422
423void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
424 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
425 while (it.has_next()) {
426 NewSpacePage* p = it.next();
427 SetNewSpacePageFlags(p, true);
428 }
429}
430
431
432void IncrementalMarking::ActivateIncrementalWriteBarrier() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000433 ActivateIncrementalWriteBarrier(heap_->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000434 ActivateIncrementalWriteBarrier(heap_->map_space());
435 ActivateIncrementalWriteBarrier(heap_->code_space());
436 ActivateIncrementalWriteBarrier(heap_->new_space());
437
438 LargePage* lop = heap_->lo_space()->first_page();
439 while (lop->is_valid()) {
440 SetOldSpacePageFlags(lop, true, is_compacting_);
441 lop = lop->next_page();
442 }
443}
444
445
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000446bool IncrementalMarking::ShouldActivateEvenWithoutIdleNotification() {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100447#ifndef DEBUG
448 static const intptr_t kActivationThreshold = 8 * MB;
449#else
450 // TODO(gc) consider setting this to some low level so that some
451 // debug tests run with incremental marking and some without.
452 static const intptr_t kActivationThreshold = 0;
453#endif
454 // Don't switch on for very small heaps.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000455 return CanBeActivated() &&
Ben Murdoch097c5b22016-05-18 11:27:45 +0100456 heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold &&
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000457 heap_->HeapIsFullEnoughToStartIncrementalMarking(
458 heap_->old_generation_allocation_limit());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000459}
460
461
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400462bool IncrementalMarking::WasActivated() { return was_activated_; }
463
464
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000465bool IncrementalMarking::CanBeActivated() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000466 // Only start incremental marking in a safe state: 1) when incremental
467 // marking is turned on, 2) when we are currently not in a GC, and
468 // 3) when we are currently not serializing or deserializing the heap.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000469 return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400470 heap_->deserialization_complete() &&
Ben Murdoch097c5b22016-05-18 11:27:45 +0100471 !heap_->isolate()->serializer_enabled();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000472}
473
474
475void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
476 DCHECK(RecordWriteStub::GetMode(stub) == RecordWriteStub::STORE_BUFFER_ONLY);
477
478 if (!IsMarking()) {
479 // Initially stub is generated in STORE_BUFFER_ONLY mode thus
480 // we don't need to do anything if incremental marking is
481 // not active.
482 } else if (IsCompacting()) {
483 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
484 } else {
485 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
486 }
487}
488
489
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000490void IncrementalMarking::NotifyOfHighPromotionRate() {
491 if (IsMarking()) {
492 if (marking_speed_ < kFastMarking) {
493 if (FLAG_trace_gc) {
494 PrintIsolate(heap()->isolate(),
495 "Increasing marking speed to %d "
496 "due to high promotion rate\n",
497 static_cast<int>(kFastMarking));
498 }
499 marking_speed_ = kFastMarking;
500 }
501 }
502}
503
504
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000505static void PatchIncrementalMarkingRecordWriteStubs(
506 Heap* heap, RecordWriteStub::Mode mode) {
507 UnseededNumberDictionary* stubs = heap->code_stubs();
508
509 int capacity = stubs->Capacity();
510 for (int i = 0; i < capacity; i++) {
511 Object* k = stubs->KeyAt(i);
512 if (stubs->IsKey(k)) {
513 uint32_t key = NumberToUint32(k);
514
515 if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) {
516 Object* e = stubs->ValueAt(i);
517 if (e->IsCode()) {
518 RecordWriteStub::Patch(Code::cast(e), mode);
519 }
520 }
521 }
522 }
523}
524
525
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000526void IncrementalMarking::Start(const char* reason) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000527 if (FLAG_trace_incremental_marking) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000528 PrintF("[IncrementalMarking] Start (%s)\n",
529 (reason == nullptr) ? "unknown reason" : reason);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000530 }
531 DCHECK(FLAG_incremental_marking);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000532 DCHECK(state_ == STOPPED);
533 DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
534 DCHECK(!heap_->isolate()->serializer_enabled());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000535
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000536 HistogramTimerScope incremental_marking_scope(
537 heap_->isolate()->counters()->gc_incremental_marking_start());
Ben Murdoch097c5b22016-05-18 11:27:45 +0100538 TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart");
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000539 ResetStepCounters();
540
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400541 was_activated_ = true;
542
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000543 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000544 StartMarking();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000545 } else {
546 if (FLAG_trace_incremental_marking) {
547 PrintF("[IncrementalMarking] Start sweeping.\n");
548 }
549 state_ = SWEEPING;
550 }
551
Ben Murdoch097c5b22016-05-18 11:27:45 +0100552 heap_->new_space()->AddAllocationObserver(&observer_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000553
554 incremental_marking_job()->Start(heap_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000555}
556
557
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000558void IncrementalMarking::StartMarking() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000559 if (FLAG_trace_incremental_marking) {
560 PrintF("[IncrementalMarking] Start marking\n");
561 }
562
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000563 is_compacting_ = !FLAG_never_compact &&
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000564 heap_->mark_compact_collector()->StartCompaction(
565 MarkCompactCollector::INCREMENTAL_COMPACTION);
566
567 state_ = MARKING;
568
569 RecordWriteStub::Mode mode = is_compacting_
570 ? RecordWriteStub::INCREMENTAL_COMPACTION
571 : RecordWriteStub::INCREMENTAL;
572
573 PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
574
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000575 heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize(
576 MarkCompactCollector::kMaxMarkingDequeSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000577
578 ActivateIncrementalWriteBarrier();
579
580// Marking bits are cleared by the sweeper.
581#ifdef VERIFY_HEAP
582 if (FLAG_verify_heap) {
583 heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
584 }
585#endif
586
587 heap_->CompletelyClearInstanceofCache();
588 heap_->isolate()->compilation_cache()->MarkCompactPrologue();
589
590 if (FLAG_cleanup_code_caches_at_gc) {
591 // We will mark cache black with a separate pass
592 // when we finish marking.
593 MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
594 }
595
596 // Mark strong roots grey.
597 IncrementalMarkingRootMarkingVisitor visitor(this);
598 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
599
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000600 // Ready to start incremental marking.
601 if (FLAG_trace_incremental_marking) {
602 PrintF("[IncrementalMarking] Running\n");
603 }
604}
605
606
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000607void IncrementalMarking::MarkRoots() {
608 DCHECK(!finalize_marking_completed_);
609 DCHECK(IsMarking());
610
611 IncrementalMarkingRootMarkingVisitor visitor(this);
612 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
613}
614
615
616void IncrementalMarking::MarkObjectGroups() {
617 DCHECK(!finalize_marking_completed_);
618 DCHECK(IsMarking());
619
620 IncrementalMarkingRootMarkingVisitor visitor(this);
621 heap_->mark_compact_collector()->MarkImplicitRefGroups(&MarkObject);
622 heap_->isolate()->global_handles()->IterateObjectGroups(
623 &visitor, &MarkCompactCollector::IsUnmarkedHeapObjectWithHeap);
624 heap_->isolate()->global_handles()->RemoveImplicitRefGroups();
625 heap_->isolate()->global_handles()->RemoveObjectGroups();
626}
627
628
629void IncrementalMarking::ProcessWeakCells() {
630 DCHECK(!finalize_marking_completed_);
631 DCHECK(IsMarking());
632
633 Object* the_hole_value = heap()->the_hole_value();
634 Object* weak_cell_obj = heap()->encountered_weak_cells();
635 Object* weak_cell_head = Smi::FromInt(0);
636 WeakCell* prev_weak_cell_obj = NULL;
637 while (weak_cell_obj != Smi::FromInt(0)) {
638 WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
639 // We do not insert cleared weak cells into the list, so the value
640 // cannot be a Smi here.
641 HeapObject* value = HeapObject::cast(weak_cell->value());
642 // Remove weak cells with live objects from the list, they do not need
643 // clearing.
644 if (MarkCompactCollector::IsMarked(value)) {
645 // Record slot, if value is pointing to an evacuation candidate.
646 Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
647 heap_->mark_compact_collector()->RecordSlot(weak_cell, slot, *slot);
648 // Remove entry somewhere after top.
649 if (prev_weak_cell_obj != NULL) {
650 prev_weak_cell_obj->set_next(weak_cell->next());
651 }
652 weak_cell_obj = weak_cell->next();
653 weak_cell->clear_next(the_hole_value);
654 } else {
655 if (weak_cell_head == Smi::FromInt(0)) {
656 weak_cell_head = weak_cell;
657 }
658 prev_weak_cell_obj = weak_cell;
659 weak_cell_obj = weak_cell->next();
660 }
661 }
662 // Top may have changed.
663 heap()->set_encountered_weak_cells(weak_cell_head);
664}
665
666
667bool ShouldRetainMap(Map* map, int age) {
668 if (age == 0) {
669 // The map has aged. Do not retain this map.
670 return false;
671 }
672 Object* constructor = map->GetConstructor();
673 if (!constructor->IsHeapObject() ||
674 Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(constructor)))) {
675 // The constructor is dead, no new objects with this map can
676 // be created. Do not retain this map.
677 return false;
678 }
679 return true;
680}
681
682
683void IncrementalMarking::RetainMaps() {
684 // Do not retain dead maps if flag disables it or there is
685 // - memory pressure (reduce_memory_footprint_),
686 // - GC is requested by tests or dev-tools (abort_incremental_marking_).
687 bool map_retaining_is_disabled = heap()->ShouldReduceMemory() ||
688 heap()->ShouldAbortIncrementalMarking() ||
689 FLAG_retain_maps_for_n_gc == 0;
690 ArrayList* retained_maps = heap()->retained_maps();
691 int length = retained_maps->Length();
692 // The number_of_disposed_maps separates maps in the retained_maps
693 // array that were created before and after context disposal.
694 // We do not age and retain disposed maps to avoid memory leaks.
695 int number_of_disposed_maps = heap()->number_of_disposed_maps_;
696 for (int i = 0; i < length; i += 2) {
697 DCHECK(retained_maps->Get(i)->IsWeakCell());
698 WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
699 if (cell->cleared()) continue;
700 int age = Smi::cast(retained_maps->Get(i + 1))->value();
701 int new_age;
702 Map* map = Map::cast(cell->value());
703 MarkBit map_mark = Marking::MarkBitFrom(map);
704 if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
705 Marking::IsWhite(map_mark)) {
706 if (ShouldRetainMap(map, age)) {
707 MarkObject(heap(), map);
708 }
709 Object* prototype = map->prototype();
710 if (age > 0 && prototype->IsHeapObject() &&
711 Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(prototype)))) {
712 // The prototype is not marked, age the map.
713 new_age = age - 1;
714 } else {
715 // The prototype and the constructor are marked, this map keeps only
716 // transition tree alive, not JSObjects. Do not age the map.
717 new_age = age;
718 }
719 } else {
720 new_age = FLAG_retain_maps_for_n_gc;
721 }
722 // Compact the array and update the age.
723 if (new_age != age) {
724 retained_maps->Set(i + 1, Smi::FromInt(new_age));
725 }
726 }
727}
728
729
730void IncrementalMarking::FinalizeIncrementally() {
731 DCHECK(!finalize_marking_completed_);
732 DCHECK(IsMarking());
733
734 double start = heap_->MonotonicallyIncreasingTimeInMs();
735
736 int old_marking_deque_top =
737 heap_->mark_compact_collector()->marking_deque()->top();
738
739 // After finishing incremental marking, we try to discover all unmarked
740 // objects to reduce the marking load in the final pause.
741 // 1) We scan and mark the roots again to find all changes to the root set.
742 // 2) We mark the object groups.
743 // 3) Age and retain maps embedded in optimized code.
744 // 4) Remove weak cell with live values from the list of weak cells, they
745 // do not need processing during GC.
746 MarkRoots();
747 MarkObjectGroups();
748 if (incremental_marking_finalization_rounds_ == 0) {
749 // Map retaining is needed for perfromance, not correctness,
750 // so we can do it only once at the beginning of the finalization.
751 RetainMaps();
752 }
753 ProcessWeakCells();
754
755 int marking_progress =
756 abs(old_marking_deque_top -
757 heap_->mark_compact_collector()->marking_deque()->top());
758
759 double end = heap_->MonotonicallyIncreasingTimeInMs();
760 double delta = end - start;
761 heap_->tracer()->AddMarkingTime(delta);
762 heap_->tracer()->AddIncrementalMarkingFinalizationStep(delta);
763 if (FLAG_trace_incremental_marking) {
764 PrintF(
765 "[IncrementalMarking] Finalize incrementally round %d, "
766 "spent %d ms, marking progress %d.\n",
767 static_cast<int>(delta), incremental_marking_finalization_rounds_,
768 marking_progress);
769 }
770
771 ++incremental_marking_finalization_rounds_;
772 if ((incremental_marking_finalization_rounds_ >=
773 FLAG_max_incremental_marking_finalization_rounds) ||
774 (marking_progress <
775 FLAG_min_progress_during_incremental_marking_finalization)) {
776 finalize_marking_completed_ = true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000777 }
778}
779
780
781void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
782 if (!IsMarking()) return;
783
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400784 MarkingDeque* marking_deque =
785 heap_->mark_compact_collector()->marking_deque();
786 int current = marking_deque->bottom();
787 int mask = marking_deque->mask();
788 int limit = marking_deque->top();
789 HeapObject** array = marking_deque->array();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000790 int new_top = current;
791
792 Map* filler_map = heap_->one_pointer_filler_map();
793
794 while (current != limit) {
795 HeapObject* obj = array[current];
796 DCHECK(obj->IsHeapObject());
797 current = ((current + 1) & mask);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100798 // Only pointers to from space have to be updated.
799 if (heap_->InFromSpace(obj)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000800 MapWord map_word = obj->map_word();
Ben Murdoch097c5b22016-05-18 11:27:45 +0100801 // There may be objects on the marking deque that do not exist anymore,
802 // e.g. left trimmed objects or objects from the root set (frames).
803 // If these object are dead at scavenging time, their marking deque
804 // entries will not point to forwarding addresses. Hence, we can discard
805 // them.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000806 if (map_word.IsForwardingAddress()) {
807 HeapObject* dest = map_word.ToForwardingAddress();
808 array[new_top] = dest;
809 new_top = ((new_top + 1) & mask);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400810 DCHECK(new_top != marking_deque->bottom());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000811#ifdef DEBUG
812 MarkBit mark_bit = Marking::MarkBitFrom(obj);
813 DCHECK(Marking::IsGrey(mark_bit) ||
814 (obj->IsFiller() && Marking::IsWhite(mark_bit)));
815#endif
816 }
817 } else if (obj->map() != filler_map) {
818 // Skip one word filler objects that appear on the
819 // stack when we perform in place array shift.
820 array[new_top] = obj;
821 new_top = ((new_top + 1) & mask);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400822 DCHECK(new_top != marking_deque->bottom());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000823#ifdef DEBUG
824 MarkBit mark_bit = Marking::MarkBitFrom(obj);
825 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
826 DCHECK(Marking::IsGrey(mark_bit) ||
827 (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
828 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
829 Marking::IsBlack(mark_bit)));
830#endif
831 }
832 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400833 marking_deque->set_top(new_top);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000834}
835
836
837void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000838 MarkObject(heap_, map);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000839
840 IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
841
842 MarkBit mark_bit = Marking::MarkBitFrom(obj);
843#if ENABLE_SLOW_DCHECKS
844 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
845 SLOW_DCHECK(Marking::IsGrey(mark_bit) ||
846 (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
847 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
848 Marking::IsBlack(mark_bit)));
849#endif
850 MarkBlackOrKeepBlack(obj, mark_bit, size);
851}
852
853
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000854void IncrementalMarking::MarkObject(Heap* heap, HeapObject* obj) {
855 MarkBit mark_bit = Marking::MarkBitFrom(obj);
856 if (Marking::IsWhite(mark_bit)) {
857 heap->incremental_marking()->WhiteToGreyAndPush(obj, mark_bit);
858 }
859}
860
861
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000862intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
863 intptr_t bytes_processed = 0;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100864 Map* one_pointer_filler_map = heap_->one_pointer_filler_map();
865 Map* two_pointer_filler_map = heap_->two_pointer_filler_map();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400866 MarkingDeque* marking_deque =
867 heap_->mark_compact_collector()->marking_deque();
868 while (!marking_deque->IsEmpty() && bytes_processed < bytes_to_process) {
869 HeapObject* obj = marking_deque->Pop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000870
Ben Murdoch097c5b22016-05-18 11:27:45 +0100871 // Explicitly skip one and two word fillers. Incremental markbit patterns
872 // are correct only for objects that occupy at least two words.
873 // Moreover, slots filtering for left-trimmed arrays works only when
874 // the distance between the old array start and the new array start
875 // is greater than two if both starts are marked.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000876 Map* map = obj->map();
Ben Murdoch097c5b22016-05-18 11:27:45 +0100877 if (map == one_pointer_filler_map || map == two_pointer_filler_map)
878 continue;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000879
880 int size = obj->SizeFromMap(map);
881 unscanned_bytes_of_large_object_ = 0;
882 VisitObject(map, obj, size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000883 bytes_processed += size - unscanned_bytes_of_large_object_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000884 }
885 return bytes_processed;
886}
887
888
889void IncrementalMarking::ProcessMarkingDeque() {
890 Map* filler_map = heap_->one_pointer_filler_map();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400891 MarkingDeque* marking_deque =
892 heap_->mark_compact_collector()->marking_deque();
893 while (!marking_deque->IsEmpty()) {
894 HeapObject* obj = marking_deque->Pop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000895
896 // Explicitly skip one word fillers. Incremental markbit patterns are
897 // correct only for objects that occupy at least two words.
898 Map* map = obj->map();
899 if (map == filler_map) continue;
900
901 VisitObject(map, obj, obj->SizeFromMap(map));
902 }
903}
904
905
906void IncrementalMarking::Hurry() {
907 if (state() == MARKING) {
908 double start = 0.0;
909 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000910 start = heap_->MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000911 if (FLAG_trace_incremental_marking) {
912 PrintF("[IncrementalMarking] Hurry\n");
913 }
914 }
915 // TODO(gc) hurry can mark objects it encounters black as mutator
916 // was stopped.
917 ProcessMarkingDeque();
918 state_ = COMPLETE;
919 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000920 double end = heap_->MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000921 double delta = end - start;
922 heap_->tracer()->AddMarkingTime(delta);
923 if (FLAG_trace_incremental_marking) {
924 PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
925 static_cast<int>(delta));
926 }
927 }
928 }
929
930 if (FLAG_cleanup_code_caches_at_gc) {
931 PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
932 Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000933 MemoryChunk::IncrementLiveBytesFromGC(poly_cache,
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000934 PolymorphicCodeCache::kSize);
935 }
936
937 Object* context = heap_->native_contexts_list();
938 while (!context->IsUndefined()) {
939 // GC can happen when the context is not fully initialized,
940 // so the cache can be undefined.
941 HeapObject* cache = HeapObject::cast(
942 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
943 if (!cache->IsUndefined()) {
944 MarkBit mark_bit = Marking::MarkBitFrom(cache);
945 if (Marking::IsGrey(mark_bit)) {
946 Marking::GreyToBlack(mark_bit);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000947 MemoryChunk::IncrementLiveBytesFromGC(cache, cache->Size());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000948 }
949 }
950 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
951 }
952}
953
954
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000955void IncrementalMarking::Stop() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000956 if (IsStopped()) return;
957 if (FLAG_trace_incremental_marking) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000958 PrintF("[IncrementalMarking] Stopping.\n");
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000959 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000960
Ben Murdoch097c5b22016-05-18 11:27:45 +0100961 heap_->new_space()->RemoveAllocationObserver(&observer_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000962 IncrementalMarking::set_should_hurry(false);
963 ResetStepCounters();
964 if (IsMarking()) {
965 PatchIncrementalMarkingRecordWriteStubs(heap_,
966 RecordWriteStub::STORE_BUFFER_ONLY);
967 DeactivateIncrementalWriteBarrier();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000968 }
969 heap_->isolate()->stack_guard()->ClearGC();
970 state_ = STOPPED;
971 is_compacting_ = false;
972}
973
974
975void IncrementalMarking::Finalize() {
976 Hurry();
Ben Murdoch097c5b22016-05-18 11:27:45 +0100977 Stop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000978}
979
980
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000981void IncrementalMarking::FinalizeMarking(CompletionAction action) {
982 DCHECK(!finalize_marking_completed_);
983 if (FLAG_trace_incremental_marking) {
984 PrintF(
985 "[IncrementalMarking] requesting finalization of incremental "
986 "marking.\n");
987 }
988 request_type_ = FINALIZATION;
989 if (action == GC_VIA_STACK_GUARD) {
990 heap_->isolate()->stack_guard()->RequestGC();
991 }
992}
993
994
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000995void IncrementalMarking::MarkingComplete(CompletionAction action) {
996 state_ = COMPLETE;
997 // We will set the stack guard to request a GC now. This will mean the rest
998 // of the GC gets performed as soon as possible (we can't do a GC here in a
999 // record-write context). If a few things get allocated between now and then
1000 // that shouldn't make us do a scavenge and keep being incremental, so we set
1001 // the should-hurry flag to indicate that there can't be much work left to do.
1002 set_should_hurry(true);
1003 if (FLAG_trace_incremental_marking) {
1004 PrintF("[IncrementalMarking] Complete (normal).\n");
1005 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001006 request_type_ = COMPLETE_MARKING;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001007 if (action == GC_VIA_STACK_GUARD) {
1008 heap_->isolate()->stack_guard()->RequestGC();
1009 }
1010}
1011
1012
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001013void IncrementalMarking::Epilogue() {
1014 was_activated_ = false;
1015 finalize_marking_completed_ = false;
1016 incremental_marking_finalization_rounds_ = 0;
1017}
1018
1019
1020double IncrementalMarking::AdvanceIncrementalMarking(
1021 intptr_t step_size_in_bytes, double deadline_in_ms,
1022 IncrementalMarking::StepActions step_actions) {
1023 DCHECK(!IsStopped());
1024
1025 if (step_size_in_bytes == 0) {
1026 step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
1027 static_cast<size_t>(GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs),
1028 static_cast<size_t>(
1029 heap()
1030 ->tracer()
1031 ->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond()));
1032 }
1033
1034 double remaining_time_in_ms = 0.0;
1035 do {
1036 Step(step_size_in_bytes, step_actions.completion_action,
1037 step_actions.force_marking, step_actions.force_completion);
1038 remaining_time_in_ms =
1039 deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
1040 } while (remaining_time_in_ms >=
1041 2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs &&
1042 !IsComplete() &&
1043 !heap()->mark_compact_collector()->marking_deque()->IsEmpty());
1044 return remaining_time_in_ms;
1045}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001046
1047
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001048void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001049 if (IsStopped() && ShouldActivateEvenWithoutIdleNotification()) {
1050 heap()->StartIncrementalMarking(Heap::kNoGCFlags, kNoGCCallbackFlags,
1051 "old space step");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001052 } else {
1053 Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
1054 }
1055}
1056
1057
1058void IncrementalMarking::SpeedUp() {
1059 bool speed_up = false;
1060
1061 if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001062 if (FLAG_trace_incremental_marking) {
1063 PrintIsolate(heap()->isolate(), "Speed up marking after %d steps\n",
1064 static_cast<int>(kMarkingSpeedAccellerationInterval));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001065 }
1066 speed_up = true;
1067 }
1068
1069 bool space_left_is_very_small =
1070 (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
1071
1072 bool only_1_nth_of_space_that_was_available_still_left =
1073 (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
1074 old_generation_space_available_at_start_of_incremental_);
1075
1076 if (space_left_is_very_small ||
1077 only_1_nth_of_space_that_was_available_still_left) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001078 if (FLAG_trace_incremental_marking)
1079 PrintIsolate(heap()->isolate(),
1080 "Speed up marking because of low space left\n");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001081 speed_up = true;
1082 }
1083
1084 bool size_of_old_space_multiplied_by_n_during_marking =
1085 (heap_->PromotedTotalSize() >
1086 (marking_speed_ + 1) *
1087 old_generation_space_used_at_start_of_incremental_);
1088 if (size_of_old_space_multiplied_by_n_during_marking) {
1089 speed_up = true;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001090 if (FLAG_trace_incremental_marking) {
1091 PrintIsolate(heap()->isolate(),
1092 "Speed up marking because of heap size increase\n");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001093 }
1094 }
1095
1096 int64_t promoted_during_marking =
1097 heap_->PromotedTotalSize() -
1098 old_generation_space_used_at_start_of_incremental_;
1099 intptr_t delay = marking_speed_ * MB;
1100 intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
1101
1102 // We try to scan at at least twice the speed that we are allocating.
1103 if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001104 if (FLAG_trace_incremental_marking) {
1105 PrintIsolate(heap()->isolate(),
1106 "Speed up marking because marker was not keeping up\n");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001107 }
1108 speed_up = true;
1109 }
1110
1111 if (speed_up) {
1112 if (state_ != MARKING) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001113 if (FLAG_trace_incremental_marking) {
1114 PrintIsolate(heap()->isolate(),
1115 "Postponing speeding up marking until marking starts\n");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001116 }
1117 } else {
1118 marking_speed_ += kMarkingSpeedAccelleration;
1119 marking_speed_ = static_cast<int>(
1120 Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001121 if (FLAG_trace_incremental_marking) {
1122 PrintIsolate(heap()->isolate(), "Marking speed increased to %d\n",
1123 marking_speed_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001124 }
1125 }
1126 }
1127}
1128
1129
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001130intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
1131 CompletionAction action,
1132 ForceMarkingAction marking,
1133 ForceCompletionAction completion) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001134 DCHECK(allocated_bytes >= 0);
1135
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001136 if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001137 (state_ != SWEEPING && state_ != MARKING)) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001138 return 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001139 }
1140
1141 allocated_ += allocated_bytes;
1142
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001143 if (marking == DO_NOT_FORCE_MARKING && allocated_ < kAllocatedThreshold &&
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001144 write_barriers_invoked_since_last_step_ <
1145 kWriteBarriersInvokedThreshold) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001146 return 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001147 }
1148
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001149 // If an idle notification happened recently, we delay marking steps.
1150 if (marking == DO_NOT_FORCE_MARKING &&
1151 heap_->RecentIdleNotificationHappened()) {
1152 return 0;
1153 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001154
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001155 if (state_ == MARKING && no_marking_scope_depth_ > 0) return 0;
1156
1157 intptr_t bytes_processed = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001158 {
1159 HistogramTimerScope incremental_marking_scope(
1160 heap_->isolate()->counters()->gc_incremental_marking());
Ben Murdoch097c5b22016-05-18 11:27:45 +01001161 TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001162 double start = heap_->MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001163
1164 // The marking speed is driven either by the allocation rate or by the rate
1165 // at which we are having to check the color of objects in the write
1166 // barrier.
1167 // It is possible for a tight non-allocating loop to run a lot of write
1168 // barriers before we get here and check them (marking can only take place
1169 // on
1170 // allocation), so to reduce the lumpiness we don't use the write barriers
1171 // invoked since last step directly to determine the amount of work to do.
1172 intptr_t bytes_to_process =
1173 marking_speed_ *
1174 Max(allocated_, write_barriers_invoked_since_last_step_);
1175 allocated_ = 0;
1176 write_barriers_invoked_since_last_step_ = 0;
1177
1178 bytes_scanned_ += bytes_to_process;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001179
1180 if (state_ == SWEEPING) {
1181 if (heap_->mark_compact_collector()->sweeping_in_progress() &&
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001182 (heap_->mark_compact_collector()->IsSweepingCompleted() ||
1183 !FLAG_concurrent_sweeping)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001184 heap_->mark_compact_collector()->EnsureSweepingCompleted();
1185 }
1186 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
1187 bytes_scanned_ = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001188 StartMarking();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001189 }
1190 } else if (state_ == MARKING) {
1191 bytes_processed = ProcessMarkingDeque(bytes_to_process);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001192 if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
1193 if (completion == FORCE_COMPLETION ||
1194 IsIdleMarkingDelayCounterLimitReached()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001195 if (!finalize_marking_completed_) {
1196 FinalizeMarking(action);
1197 } else {
1198 MarkingComplete(action);
1199 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001200 } else {
1201 IncrementIdleMarkingDelayCounter();
1202 }
1203 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001204 }
1205
1206 steps_count_++;
1207
1208 // Speed up marking if we are marking too slow or if we are almost done
1209 // with marking.
1210 SpeedUp();
1211
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001212 double end = heap_->MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001213 double duration = (end - start);
1214 // Note that we report zero bytes here when sweeping was in progress or
1215 // when we just started incremental marking. In these cases we did not
1216 // process the marking deque.
1217 heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
1218 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001219 return bytes_processed;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001220}
1221
1222
1223void IncrementalMarking::ResetStepCounters() {
1224 steps_count_ = 0;
1225 old_generation_space_available_at_start_of_incremental_ =
1226 SpaceLeftInOldSpace();
1227 old_generation_space_used_at_start_of_incremental_ =
1228 heap_->PromotedTotalSize();
1229 bytes_rescanned_ = 0;
1230 marking_speed_ = kInitialMarkingSpeed;
1231 bytes_scanned_ = 0;
1232 write_barriers_invoked_since_last_step_ = 0;
1233}
1234
1235
1236int64_t IncrementalMarking::SpaceLeftInOldSpace() {
1237 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
1238}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001239
1240
1241bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() {
1242 return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter;
1243}
1244
1245
1246void IncrementalMarking::IncrementIdleMarkingDelayCounter() {
1247 idle_marking_delay_counter_++;
1248}
1249
1250
1251void IncrementalMarking::ClearIdleMarkingDelayCounter() {
1252 idle_marking_delay_counter_ = 0;
1253}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001254} // namespace internal
1255} // namespace v8