blob: c250b90b1c99d260be6ae137d212ca02bc97db35 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005#include "src/heap/incremental-marking.h"
6
7#include "src/code-stubs.h"
8#include "src/compilation-cache.h"
9#include "src/conversions.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000010#include "src/heap/gc-idle-time-handler.h"
11#include "src/heap/gc-tracer.h"
12#include "src/heap/mark-compact-inl.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000013#include "src/heap/objects-visiting.h"
14#include "src/heap/objects-visiting-inl.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010015#include "src/tracing/trace-event.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000016#include "src/v8.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000017
18namespace v8 {
19namespace internal {
20
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000021IncrementalMarking::StepActions IncrementalMarking::IdleStepActions() {
22 return StepActions(IncrementalMarking::NO_GC_VIA_STACK_GUARD,
23 IncrementalMarking::FORCE_MARKING,
24 IncrementalMarking::DO_NOT_FORCE_COMPLETION);
25}
26
Ben Murdochb8a8cc12014-11-26 15:28:44 +000027IncrementalMarking::IncrementalMarking(Heap* heap)
28 : heap_(heap),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000029 observer_(*this, kAllocatedThreshold),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000030 state_(STOPPED),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000031 is_compacting_(false),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000032 steps_count_(0),
33 old_generation_space_available_at_start_of_incremental_(0),
34 old_generation_space_used_at_start_of_incremental_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000035 bytes_rescanned_(0),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000036 should_hurry_(false),
37 marking_speed_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000038 bytes_scanned_(0),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000039 allocated_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000040 write_barriers_invoked_since_last_step_(0),
Emily Bernierd0a1eb72015-03-24 16:35:39 -040041 idle_marking_delay_counter_(0),
Emily Bernierd0a1eb72015-03-24 16:35:39 -040042 unscanned_bytes_of_large_object_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000043 was_activated_(false),
Ben Murdochda12d292016-06-02 14:46:10 +010044 black_allocation_(false),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000045 finalize_marking_completed_(false),
46 incremental_marking_finalization_rounds_(0),
Ben Murdochda12d292016-06-02 14:46:10 +010047 request_type_(NONE) {}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000048
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000049bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
50 HeapObject* value_heap_obj = HeapObject::cast(value);
51 MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
52 DCHECK(!Marking::IsImpossible(value_bit));
53
54 MarkBit obj_bit = Marking::MarkBitFrom(obj);
55 DCHECK(!Marking::IsImpossible(obj_bit));
56 bool is_black = Marking::IsBlack(obj_bit);
57
58 if (is_black && Marking::IsWhite(value_bit)) {
59 WhiteToGreyAndPush(value_heap_obj, value_bit);
60 RestartIfNotMarking();
61 }
62 return is_compacting_ && is_black;
63}
Ben Murdochb8a8cc12014-11-26 15:28:44 +000064
65
66void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
67 Object* value) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000068 if (BaseRecordWrite(obj, value) && slot != NULL) {
69 // Object is not going to be rescanned we need to record the slot.
70 heap_->mark_compact_collector()->RecordSlot(obj, slot, value);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000071 }
72}
73
74
75void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
76 Isolate* isolate) {
77 DCHECK(obj->IsHeapObject());
78 IncrementalMarking* marking = isolate->heap()->incremental_marking();
79
80 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
81 int counter = chunk->write_barrier_counter();
82 if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
83 marking->write_barriers_invoked_since_last_step_ +=
84 MemoryChunk::kWriteBarrierCounterGranularity -
85 chunk->write_barrier_counter();
86 chunk->set_write_barrier_counter(
87 MemoryChunk::kWriteBarrierCounterGranularity);
88 }
89
90 marking->RecordWrite(obj, slot, *slot);
91}
92
Ben Murdoch097c5b22016-05-18 11:27:45 +010093// static
94void IncrementalMarking::RecordWriteOfCodeEntryFromCode(JSFunction* host,
95 Object** slot,
96 Isolate* isolate) {
97 DCHECK(host->IsJSFunction());
98 IncrementalMarking* marking = isolate->heap()->incremental_marking();
99 Code* value = Code::cast(
100 Code::GetObjectFromEntryAddress(reinterpret_cast<Address>(slot)));
101 marking->RecordWriteOfCodeEntry(host, slot, value);
102}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000103
104void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
105 HeapObject* value) {
106 if (IsMarking()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000107 RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000108 RecordWriteIntoCode(host, &rinfo, value);
109 }
110}
111
112
113void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
114 if (IsMarking()) {
115 Code* host = heap_->isolate()
116 ->inner_pointer_to_code_cache()
117 ->GcSafeFindCodeForInnerPointer(pc);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000118 RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000119 RecordWriteIntoCode(host, &rinfo, value);
120 }
121}
122
123
124void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
125 Object** slot,
126 Code* value) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000127 if (BaseRecordWrite(host, value)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000128 DCHECK(slot != NULL);
129 heap_->mark_compact_collector()->RecordCodeEntrySlot(
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000130 host, reinterpret_cast<Address>(slot), value);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000131 }
132}
133
Ben Murdochda12d292016-06-02 14:46:10 +0100134void IncrementalMarking::RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo,
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000135 Object* value) {
Ben Murdochda12d292016-06-02 14:46:10 +0100136 if (BaseRecordWrite(host, value)) {
137 // Object is not going to be rescanned. We need to record the slot.
138 heap_->mark_compact_collector()->RecordRelocSlot(host, rinfo, value);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000139 }
140}
141
142
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000143void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
144 Marking::WhiteToGrey(mark_bit);
145 heap_->mark_compact_collector()->marking_deque()->Push(obj);
146}
147
148
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000149static void MarkObjectGreyDoNotEnqueue(Object* obj) {
150 if (obj->IsHeapObject()) {
151 HeapObject* heap_obj = HeapObject::cast(obj);
152 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
153 if (Marking::IsBlack(mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000154 MemoryChunk::IncrementLiveBytesFromGC(heap_obj, -heap_obj->Size());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000155 }
156 Marking::AnyToGrey(mark_bit);
157 }
158}
159
160
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000161static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
162 MarkBit mark_bit, int size) {
163 DCHECK(!Marking::IsImpossible(mark_bit));
164 if (Marking::IsBlack(mark_bit)) return;
165 Marking::MarkBlack(mark_bit);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000166 MemoryChunk::IncrementLiveBytesFromGC(heap_object, size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000167}
168
169
170class IncrementalMarkingMarkingVisitor
171 : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
172 public:
173 static void Initialize() {
174 StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
175 table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
176 table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
177 table_.Register(kVisitJSRegExp, &VisitJSRegExp);
178 }
179
180 static const int kProgressBarScanningChunk = 32 * 1024;
181
182 static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
183 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
184 // TODO(mstarzinger): Move setting of the flag to the allocation site of
185 // the array. The visitor should just check the flag.
186 if (FLAG_use_marking_progress_bar &&
187 chunk->owner()->identity() == LO_SPACE) {
188 chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
189 }
190 if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
191 Heap* heap = map->GetHeap();
192 // When using a progress bar for large fixed arrays, scan only a chunk of
193 // the array and try to push it onto the marking deque again until it is
194 // fully scanned. Fall back to scanning it through to the end in case this
195 // fails because of a full deque.
196 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
197 int start_offset =
198 Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
199 int end_offset =
200 Min(object_size, start_offset + kProgressBarScanningChunk);
201 int already_scanned_offset = start_offset;
202 bool scan_until_end = false;
203 do {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000204 VisitPointers(heap, object, HeapObject::RawField(object, start_offset),
205 HeapObject::RawField(object, end_offset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000206 start_offset = end_offset;
207 end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400208 scan_until_end =
209 heap->mark_compact_collector()->marking_deque()->IsFull();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000210 } while (scan_until_end && start_offset < object_size);
211 chunk->set_progress_bar(start_offset);
212 if (start_offset < object_size) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000213 if (Marking::IsGrey(Marking::MarkBitFrom(object))) {
214 heap->mark_compact_collector()->marking_deque()->Unshift(object);
215 } else {
216 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
217 heap->mark_compact_collector()->UnshiftBlack(object);
218 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000219 heap->incremental_marking()->NotifyIncompleteScanOfObject(
220 object_size - (start_offset - already_scanned_offset));
221 }
222 } else {
223 FixedArrayVisitor::Visit(map, object);
224 }
225 }
226
227 static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
228 Context* context = Context::cast(object);
229
230 // We will mark cache black with a separate pass when we finish marking.
231 // Note that GC can happen when the context is not fully initialized,
232 // so the cache can be undefined.
233 Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX);
234 if (!cache->IsUndefined()) {
235 MarkObjectGreyDoNotEnqueue(cache);
236 }
237 VisitNativeContext(map, context);
238 }
239
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000240 INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
241 Object* target = *p;
242 if (target->IsHeapObject()) {
243 heap->mark_compact_collector()->RecordSlot(object, p, target);
244 MarkObject(heap, target);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000245 }
246 }
247
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000248 INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
249 Object** start, Object** end)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000250 for (Object** p = start; p < end; p++) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000251 Object* target = *p;
252 if (target->IsHeapObject()) {
253 heap->mark_compact_collector()->RecordSlot(object, p, target);
254 MarkObject(heap, target);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000255 }
256 }
257 }
258
259 // Marks the object grey and pushes it on the marking stack.
260 INLINE(static void MarkObject(Heap* heap, Object* obj)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000261 IncrementalMarking::MarkObject(heap, HeapObject::cast(obj));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000262 }
263
264 // Marks the object black without pushing it on the marking stack.
265 // Returns true if object needed marking and false otherwise.
266 INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
267 HeapObject* heap_object = HeapObject::cast(obj);
268 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
269 if (Marking::IsWhite(mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000270 Marking::MarkBlack(mark_bit);
271 MemoryChunk::IncrementLiveBytesFromGC(heap_object, heap_object->Size());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000272 return true;
273 }
274 return false;
275 }
276};
277
Ben Murdochda12d292016-06-02 14:46:10 +0100278void IncrementalMarking::IterateBlackObject(HeapObject* object) {
279 if (IsMarking() && Marking::IsBlack(Marking::MarkBitFrom(object))) {
280 Page* page = Page::FromAddress(object->address());
281 if ((page->owner() != nullptr) && (page->owner()->identity() == LO_SPACE)) {
Ben Murdochc5610432016-08-08 18:44:38 +0100282 // IterateBlackObject requires us to visit the whole object.
Ben Murdochda12d292016-06-02 14:46:10 +0100283 page->ResetProgressBar();
284 }
285 IncrementalMarkingMarkingVisitor::IterateBody(object->map(), object);
286 }
287}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000288
289class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
290 public:
291 explicit IncrementalMarkingRootMarkingVisitor(
292 IncrementalMarking* incremental_marking)
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000293 : heap_(incremental_marking->heap()) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000294
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000295 void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000296
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000297 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000298 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
299 }
300
301 private:
302 void MarkObjectByPointer(Object** p) {
303 Object* obj = *p;
304 if (!obj->IsHeapObject()) return;
305
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000306 IncrementalMarking::MarkObject(heap_, HeapObject::cast(obj));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000307 }
308
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000309 Heap* heap_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000310};
311
312
313void IncrementalMarking::Initialize() {
314 IncrementalMarkingMarkingVisitor::Initialize();
315}
316
317
318void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
319 bool is_marking,
320 bool is_compacting) {
321 if (is_marking) {
322 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
323 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000324 } else {
325 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
326 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
327 }
328}
329
330
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000331void IncrementalMarking::SetNewSpacePageFlags(MemoryChunk* chunk,
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000332 bool is_marking) {
333 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
334 if (is_marking) {
335 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
336 } else {
337 chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
338 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000339}
340
341
342void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
343 PagedSpace* space) {
344 PageIterator it(space);
345 while (it.has_next()) {
346 Page* p = it.next();
347 SetOldSpacePageFlags(p, false, false);
348 }
349}
350
351
352void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
353 NewSpace* space) {
354 NewSpacePageIterator it(space);
355 while (it.has_next()) {
Ben Murdochc5610432016-08-08 18:44:38 +0100356 Page* p = it.next();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000357 SetNewSpacePageFlags(p, false);
358 }
359}
360
361
362void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000363 DeactivateIncrementalWriteBarrierForSpace(heap_->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000364 DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
365 DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
366 DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
367
368 LargePage* lop = heap_->lo_space()->first_page();
Ben Murdochc5610432016-08-08 18:44:38 +0100369 while (LargePage::IsValid(lop)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000370 SetOldSpacePageFlags(lop, false, false);
371 lop = lop->next_page();
372 }
373}
374
375
376void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
377 PageIterator it(space);
378 while (it.has_next()) {
379 Page* p = it.next();
380 SetOldSpacePageFlags(p, true, is_compacting_);
381 }
382}
383
384
385void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
386 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
387 while (it.has_next()) {
Ben Murdochc5610432016-08-08 18:44:38 +0100388 Page* p = it.next();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000389 SetNewSpacePageFlags(p, true);
390 }
391}
392
393
394void IncrementalMarking::ActivateIncrementalWriteBarrier() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000395 ActivateIncrementalWriteBarrier(heap_->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000396 ActivateIncrementalWriteBarrier(heap_->map_space());
397 ActivateIncrementalWriteBarrier(heap_->code_space());
398 ActivateIncrementalWriteBarrier(heap_->new_space());
399
400 LargePage* lop = heap_->lo_space()->first_page();
Ben Murdochc5610432016-08-08 18:44:38 +0100401 while (LargePage::IsValid(lop)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000402 SetOldSpacePageFlags(lop, true, is_compacting_);
403 lop = lop->next_page();
404 }
405}
406
407
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000408bool IncrementalMarking::ShouldActivateEvenWithoutIdleNotification() {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100409#ifndef DEBUG
410 static const intptr_t kActivationThreshold = 8 * MB;
411#else
412 // TODO(gc) consider setting this to some low level so that some
413 // debug tests run with incremental marking and some without.
414 static const intptr_t kActivationThreshold = 0;
415#endif
416 // Don't switch on for very small heaps.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000417 return CanBeActivated() &&
Ben Murdoch097c5b22016-05-18 11:27:45 +0100418 heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold &&
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000419 heap_->HeapIsFullEnoughToStartIncrementalMarking(
420 heap_->old_generation_allocation_limit());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000421}
422
423
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400424bool IncrementalMarking::WasActivated() { return was_activated_; }
425
426
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000427bool IncrementalMarking::CanBeActivated() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000428 // Only start incremental marking in a safe state: 1) when incremental
429 // marking is turned on, 2) when we are currently not in a GC, and
430 // 3) when we are currently not serializing or deserializing the heap.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000431 return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400432 heap_->deserialization_complete() &&
Ben Murdoch097c5b22016-05-18 11:27:45 +0100433 !heap_->isolate()->serializer_enabled();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000434}
435
436
437void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
438 DCHECK(RecordWriteStub::GetMode(stub) == RecordWriteStub::STORE_BUFFER_ONLY);
439
440 if (!IsMarking()) {
441 // Initially stub is generated in STORE_BUFFER_ONLY mode thus
442 // we don't need to do anything if incremental marking is
443 // not active.
444 } else if (IsCompacting()) {
445 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
446 } else {
447 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
448 }
449}
450
451
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000452void IncrementalMarking::NotifyOfHighPromotionRate() {
453 if (IsMarking()) {
454 if (marking_speed_ < kFastMarking) {
455 if (FLAG_trace_gc) {
456 PrintIsolate(heap()->isolate(),
457 "Increasing marking speed to %d "
458 "due to high promotion rate\n",
459 static_cast<int>(kFastMarking));
460 }
461 marking_speed_ = kFastMarking;
462 }
463 }
464}
465
466
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000467static void PatchIncrementalMarkingRecordWriteStubs(
468 Heap* heap, RecordWriteStub::Mode mode) {
469 UnseededNumberDictionary* stubs = heap->code_stubs();
470
471 int capacity = stubs->Capacity();
472 for (int i = 0; i < capacity; i++) {
473 Object* k = stubs->KeyAt(i);
474 if (stubs->IsKey(k)) {
475 uint32_t key = NumberToUint32(k);
476
477 if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) {
478 Object* e = stubs->ValueAt(i);
479 if (e->IsCode()) {
480 RecordWriteStub::Patch(Code::cast(e), mode);
481 }
482 }
483 }
484 }
485}
486
487
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000488void IncrementalMarking::Start(const char* reason) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000489 if (FLAG_trace_incremental_marking) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000490 PrintF("[IncrementalMarking] Start (%s)\n",
491 (reason == nullptr) ? "unknown reason" : reason);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000492 }
493 DCHECK(FLAG_incremental_marking);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000494 DCHECK(state_ == STOPPED);
495 DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
496 DCHECK(!heap_->isolate()->serializer_enabled());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000497
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000498 HistogramTimerScope incremental_marking_scope(
499 heap_->isolate()->counters()->gc_incremental_marking_start());
Ben Murdoch097c5b22016-05-18 11:27:45 +0100500 TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart");
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000501 ResetStepCounters();
502
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400503 was_activated_ = true;
504
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000505 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000506 StartMarking();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000507 } else {
508 if (FLAG_trace_incremental_marking) {
509 PrintF("[IncrementalMarking] Start sweeping.\n");
510 }
511 state_ = SWEEPING;
512 }
513
Ben Murdoch097c5b22016-05-18 11:27:45 +0100514 heap_->new_space()->AddAllocationObserver(&observer_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000515
516 incremental_marking_job()->Start(heap_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000517}
518
519
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000520void IncrementalMarking::StartMarking() {
Ben Murdochda12d292016-06-02 14:46:10 +0100521 if (heap_->isolate()->serializer_enabled()) {
522 // Black allocation currently starts when we start incremental marking,
523 // but we cannot enable black allocation while deserializing. Hence, we
524 // have to delay the start of incremental marking in that case.
525 if (FLAG_trace_incremental_marking) {
526 PrintF("[IncrementalMarking] Start delayed - serializer\n");
527 }
528 return;
529 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000530 if (FLAG_trace_incremental_marking) {
531 PrintF("[IncrementalMarking] Start marking\n");
532 }
533
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000534 is_compacting_ = !FLAG_never_compact &&
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000535 heap_->mark_compact_collector()->StartCompaction(
536 MarkCompactCollector::INCREMENTAL_COMPACTION);
537
538 state_ = MARKING;
539
540 RecordWriteStub::Mode mode = is_compacting_
541 ? RecordWriteStub::INCREMENTAL_COMPACTION
542 : RecordWriteStub::INCREMENTAL;
543
544 PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
545
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000546 heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize(
547 MarkCompactCollector::kMaxMarkingDequeSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000548
549 ActivateIncrementalWriteBarrier();
550
551// Marking bits are cleared by the sweeper.
552#ifdef VERIFY_HEAP
553 if (FLAG_verify_heap) {
554 heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
555 }
556#endif
557
558 heap_->CompletelyClearInstanceofCache();
559 heap_->isolate()->compilation_cache()->MarkCompactPrologue();
560
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000561 // Mark strong roots grey.
562 IncrementalMarkingRootMarkingVisitor visitor(this);
563 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
564
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000565 // Ready to start incremental marking.
566 if (FLAG_trace_incremental_marking) {
567 PrintF("[IncrementalMarking] Running\n");
568 }
569}
570
Ben Murdochda12d292016-06-02 14:46:10 +0100571void IncrementalMarking::StartBlackAllocation() {
572 DCHECK(FLAG_black_allocation);
573 DCHECK(IsMarking());
574 black_allocation_ = true;
575 OldSpace* old_space = heap()->old_space();
576 old_space->EmptyAllocationInfo();
577 old_space->free_list()->Reset();
578 if (FLAG_trace_incremental_marking) {
579 PrintF("[IncrementalMarking] Black allocation started\n");
580 }
581}
582
583void IncrementalMarking::FinishBlackAllocation() {
584 if (black_allocation_) {
585 black_allocation_ = false;
586 if (FLAG_trace_incremental_marking) {
587 PrintF("[IncrementalMarking] Black allocation finished\n");
588 }
589 }
590}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000591
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000592void IncrementalMarking::MarkRoots() {
593 DCHECK(!finalize_marking_completed_);
594 DCHECK(IsMarking());
595
596 IncrementalMarkingRootMarkingVisitor visitor(this);
597 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
598}
599
600
601void IncrementalMarking::MarkObjectGroups() {
Ben Murdochc5610432016-08-08 18:44:38 +0100602 DCHECK(!heap_->UsingEmbedderHeapTracer());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000603 DCHECK(!finalize_marking_completed_);
604 DCHECK(IsMarking());
605
606 IncrementalMarkingRootMarkingVisitor visitor(this);
607 heap_->mark_compact_collector()->MarkImplicitRefGroups(&MarkObject);
608 heap_->isolate()->global_handles()->IterateObjectGroups(
609 &visitor, &MarkCompactCollector::IsUnmarkedHeapObjectWithHeap);
610 heap_->isolate()->global_handles()->RemoveImplicitRefGroups();
611 heap_->isolate()->global_handles()->RemoveObjectGroups();
612}
613
614
615void IncrementalMarking::ProcessWeakCells() {
616 DCHECK(!finalize_marking_completed_);
617 DCHECK(IsMarking());
618
619 Object* the_hole_value = heap()->the_hole_value();
620 Object* weak_cell_obj = heap()->encountered_weak_cells();
621 Object* weak_cell_head = Smi::FromInt(0);
622 WeakCell* prev_weak_cell_obj = NULL;
623 while (weak_cell_obj != Smi::FromInt(0)) {
624 WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
625 // We do not insert cleared weak cells into the list, so the value
626 // cannot be a Smi here.
627 HeapObject* value = HeapObject::cast(weak_cell->value());
628 // Remove weak cells with live objects from the list, they do not need
629 // clearing.
630 if (MarkCompactCollector::IsMarked(value)) {
631 // Record slot, if value is pointing to an evacuation candidate.
632 Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
633 heap_->mark_compact_collector()->RecordSlot(weak_cell, slot, *slot);
634 // Remove entry somewhere after top.
635 if (prev_weak_cell_obj != NULL) {
636 prev_weak_cell_obj->set_next(weak_cell->next());
637 }
638 weak_cell_obj = weak_cell->next();
639 weak_cell->clear_next(the_hole_value);
640 } else {
641 if (weak_cell_head == Smi::FromInt(0)) {
642 weak_cell_head = weak_cell;
643 }
644 prev_weak_cell_obj = weak_cell;
645 weak_cell_obj = weak_cell->next();
646 }
647 }
648 // Top may have changed.
649 heap()->set_encountered_weak_cells(weak_cell_head);
650}
651
652
653bool ShouldRetainMap(Map* map, int age) {
654 if (age == 0) {
655 // The map has aged. Do not retain this map.
656 return false;
657 }
658 Object* constructor = map->GetConstructor();
659 if (!constructor->IsHeapObject() ||
660 Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(constructor)))) {
661 // The constructor is dead, no new objects with this map can
662 // be created. Do not retain this map.
663 return false;
664 }
665 return true;
666}
667
668
669void IncrementalMarking::RetainMaps() {
670 // Do not retain dead maps if flag disables it or there is
671 // - memory pressure (reduce_memory_footprint_),
672 // - GC is requested by tests or dev-tools (abort_incremental_marking_).
673 bool map_retaining_is_disabled = heap()->ShouldReduceMemory() ||
674 heap()->ShouldAbortIncrementalMarking() ||
675 FLAG_retain_maps_for_n_gc == 0;
676 ArrayList* retained_maps = heap()->retained_maps();
677 int length = retained_maps->Length();
678 // The number_of_disposed_maps separates maps in the retained_maps
679 // array that were created before and after context disposal.
680 // We do not age and retain disposed maps to avoid memory leaks.
681 int number_of_disposed_maps = heap()->number_of_disposed_maps_;
682 for (int i = 0; i < length; i += 2) {
683 DCHECK(retained_maps->Get(i)->IsWeakCell());
684 WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
685 if (cell->cleared()) continue;
686 int age = Smi::cast(retained_maps->Get(i + 1))->value();
687 int new_age;
688 Map* map = Map::cast(cell->value());
689 MarkBit map_mark = Marking::MarkBitFrom(map);
690 if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
691 Marking::IsWhite(map_mark)) {
692 if (ShouldRetainMap(map, age)) {
693 MarkObject(heap(), map);
694 }
695 Object* prototype = map->prototype();
696 if (age > 0 && prototype->IsHeapObject() &&
697 Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(prototype)))) {
698 // The prototype is not marked, age the map.
699 new_age = age - 1;
700 } else {
701 // The prototype and the constructor are marked, this map keeps only
702 // transition tree alive, not JSObjects. Do not age the map.
703 new_age = age;
704 }
705 } else {
706 new_age = FLAG_retain_maps_for_n_gc;
707 }
708 // Compact the array and update the age.
709 if (new_age != age) {
710 retained_maps->Set(i + 1, Smi::FromInt(new_age));
711 }
712 }
713}
714
715
716void IncrementalMarking::FinalizeIncrementally() {
717 DCHECK(!finalize_marking_completed_);
718 DCHECK(IsMarking());
719
720 double start = heap_->MonotonicallyIncreasingTimeInMs();
721
722 int old_marking_deque_top =
723 heap_->mark_compact_collector()->marking_deque()->top();
724
725 // After finishing incremental marking, we try to discover all unmarked
726 // objects to reduce the marking load in the final pause.
727 // 1) We scan and mark the roots again to find all changes to the root set.
728 // 2) We mark the object groups.
729 // 3) Age and retain maps embedded in optimized code.
730 // 4) Remove weak cell with live values from the list of weak cells, they
731 // do not need processing during GC.
732 MarkRoots();
Ben Murdochc5610432016-08-08 18:44:38 +0100733 if (!heap_->UsingEmbedderHeapTracer()) {
734 MarkObjectGroups();
735 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000736 if (incremental_marking_finalization_rounds_ == 0) {
737 // Map retaining is needed for perfromance, not correctness,
738 // so we can do it only once at the beginning of the finalization.
739 RetainMaps();
740 }
741 ProcessWeakCells();
742
743 int marking_progress =
744 abs(old_marking_deque_top -
745 heap_->mark_compact_collector()->marking_deque()->top());
746
747 double end = heap_->MonotonicallyIncreasingTimeInMs();
748 double delta = end - start;
749 heap_->tracer()->AddMarkingTime(delta);
750 heap_->tracer()->AddIncrementalMarkingFinalizationStep(delta);
751 if (FLAG_trace_incremental_marking) {
752 PrintF(
753 "[IncrementalMarking] Finalize incrementally round %d, "
754 "spent %d ms, marking progress %d.\n",
755 static_cast<int>(delta), incremental_marking_finalization_rounds_,
756 marking_progress);
757 }
758
759 ++incremental_marking_finalization_rounds_;
760 if ((incremental_marking_finalization_rounds_ >=
761 FLAG_max_incremental_marking_finalization_rounds) ||
762 (marking_progress <
763 FLAG_min_progress_during_incremental_marking_finalization)) {
764 finalize_marking_completed_ = true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000765 }
Ben Murdochda12d292016-06-02 14:46:10 +0100766
767 if (FLAG_black_allocation && !heap()->ShouldReduceMemory() &&
768 !black_allocation_) {
769 // TODO(hpayer): Move to an earlier point as soon as we make faster marking
770 // progress.
771 StartBlackAllocation();
772 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000773}
774
775
776void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
777 if (!IsMarking()) return;
778
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400779 MarkingDeque* marking_deque =
780 heap_->mark_compact_collector()->marking_deque();
781 int current = marking_deque->bottom();
782 int mask = marking_deque->mask();
783 int limit = marking_deque->top();
784 HeapObject** array = marking_deque->array();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000785 int new_top = current;
786
787 Map* filler_map = heap_->one_pointer_filler_map();
788
789 while (current != limit) {
790 HeapObject* obj = array[current];
791 DCHECK(obj->IsHeapObject());
792 current = ((current + 1) & mask);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100793 // Only pointers to from space have to be updated.
794 if (heap_->InFromSpace(obj)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000795 MapWord map_word = obj->map_word();
Ben Murdoch097c5b22016-05-18 11:27:45 +0100796 // There may be objects on the marking deque that do not exist anymore,
797 // e.g. left trimmed objects or objects from the root set (frames).
798 // If these object are dead at scavenging time, their marking deque
799 // entries will not point to forwarding addresses. Hence, we can discard
800 // them.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000801 if (map_word.IsForwardingAddress()) {
802 HeapObject* dest = map_word.ToForwardingAddress();
Ben Murdochda12d292016-06-02 14:46:10 +0100803 if (Page::FromAddress(dest->address())->IsFlagSet(Page::BLACK_PAGE))
804 continue;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000805 array[new_top] = dest;
806 new_top = ((new_top + 1) & mask);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400807 DCHECK(new_top != marking_deque->bottom());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000808#ifdef DEBUG
809 MarkBit mark_bit = Marking::MarkBitFrom(obj);
810 DCHECK(Marking::IsGrey(mark_bit) ||
811 (obj->IsFiller() && Marking::IsWhite(mark_bit)));
812#endif
813 }
814 } else if (obj->map() != filler_map) {
815 // Skip one word filler objects that appear on the
816 // stack when we perform in place array shift.
817 array[new_top] = obj;
818 new_top = ((new_top + 1) & mask);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400819 DCHECK(new_top != marking_deque->bottom());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000820#ifdef DEBUG
821 MarkBit mark_bit = Marking::MarkBitFrom(obj);
822 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
823 DCHECK(Marking::IsGrey(mark_bit) ||
824 (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
825 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
826 Marking::IsBlack(mark_bit)));
827#endif
828 }
829 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400830 marking_deque->set_top(new_top);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000831}
832
833
834void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000835 MarkObject(heap_, map);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000836
837 IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
838
839 MarkBit mark_bit = Marking::MarkBitFrom(obj);
840#if ENABLE_SLOW_DCHECKS
841 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
842 SLOW_DCHECK(Marking::IsGrey(mark_bit) ||
843 (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
844 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
845 Marking::IsBlack(mark_bit)));
846#endif
847 MarkBlackOrKeepBlack(obj, mark_bit, size);
848}
849
850
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000851void IncrementalMarking::MarkObject(Heap* heap, HeapObject* obj) {
852 MarkBit mark_bit = Marking::MarkBitFrom(obj);
853 if (Marking::IsWhite(mark_bit)) {
854 heap->incremental_marking()->WhiteToGreyAndPush(obj, mark_bit);
855 }
856}
857
858
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000859intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
860 intptr_t bytes_processed = 0;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100861 Map* one_pointer_filler_map = heap_->one_pointer_filler_map();
862 Map* two_pointer_filler_map = heap_->two_pointer_filler_map();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400863 MarkingDeque* marking_deque =
864 heap_->mark_compact_collector()->marking_deque();
865 while (!marking_deque->IsEmpty() && bytes_processed < bytes_to_process) {
866 HeapObject* obj = marking_deque->Pop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000867
Ben Murdoch097c5b22016-05-18 11:27:45 +0100868 // Explicitly skip one and two word fillers. Incremental markbit patterns
869 // are correct only for objects that occupy at least two words.
870 // Moreover, slots filtering for left-trimmed arrays works only when
871 // the distance between the old array start and the new array start
872 // is greater than two if both starts are marked.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000873 Map* map = obj->map();
Ben Murdoch097c5b22016-05-18 11:27:45 +0100874 if (map == one_pointer_filler_map || map == two_pointer_filler_map)
875 continue;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000876
877 int size = obj->SizeFromMap(map);
878 unscanned_bytes_of_large_object_ = 0;
879 VisitObject(map, obj, size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000880 bytes_processed += size - unscanned_bytes_of_large_object_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000881 }
882 return bytes_processed;
883}
884
885
886void IncrementalMarking::ProcessMarkingDeque() {
887 Map* filler_map = heap_->one_pointer_filler_map();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400888 MarkingDeque* marking_deque =
889 heap_->mark_compact_collector()->marking_deque();
890 while (!marking_deque->IsEmpty()) {
891 HeapObject* obj = marking_deque->Pop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000892
893 // Explicitly skip one word fillers. Incremental markbit patterns are
894 // correct only for objects that occupy at least two words.
895 Map* map = obj->map();
896 if (map == filler_map) continue;
897
898 VisitObject(map, obj, obj->SizeFromMap(map));
899 }
900}
901
902
903void IncrementalMarking::Hurry() {
Ben Murdochda12d292016-06-02 14:46:10 +0100904 // A scavenge may have pushed new objects on the marking deque (due to black
905 // allocation) even in COMPLETE state. This may happen if scavenges are
906 // forced e.g. in tests. It should not happen when COMPLETE was set when
907 // incremental marking finished and a regular GC was triggered after that
908 // because should_hurry_ will force a full GC.
909 if (!heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000910 double start = 0.0;
911 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000912 start = heap_->MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000913 if (FLAG_trace_incremental_marking) {
914 PrintF("[IncrementalMarking] Hurry\n");
915 }
916 }
917 // TODO(gc) hurry can mark objects it encounters black as mutator
918 // was stopped.
919 ProcessMarkingDeque();
920 state_ = COMPLETE;
921 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000922 double end = heap_->MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000923 double delta = end - start;
924 heap_->tracer()->AddMarkingTime(delta);
925 if (FLAG_trace_incremental_marking) {
926 PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
927 static_cast<int>(delta));
928 }
929 }
930 }
931
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000932 Object* context = heap_->native_contexts_list();
933 while (!context->IsUndefined()) {
934 // GC can happen when the context is not fully initialized,
935 // so the cache can be undefined.
936 HeapObject* cache = HeapObject::cast(
937 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
938 if (!cache->IsUndefined()) {
939 MarkBit mark_bit = Marking::MarkBitFrom(cache);
940 if (Marking::IsGrey(mark_bit)) {
941 Marking::GreyToBlack(mark_bit);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000942 MemoryChunk::IncrementLiveBytesFromGC(cache, cache->Size());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000943 }
944 }
Ben Murdochc5610432016-08-08 18:44:38 +0100945 context = Context::cast(context)->next_context_link();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000946 }
947}
948
949
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000950void IncrementalMarking::Stop() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000951 if (IsStopped()) return;
952 if (FLAG_trace_incremental_marking) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000953 PrintF("[IncrementalMarking] Stopping.\n");
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000954 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000955
Ben Murdoch097c5b22016-05-18 11:27:45 +0100956 heap_->new_space()->RemoveAllocationObserver(&observer_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000957 IncrementalMarking::set_should_hurry(false);
958 ResetStepCounters();
959 if (IsMarking()) {
960 PatchIncrementalMarkingRecordWriteStubs(heap_,
961 RecordWriteStub::STORE_BUFFER_ONLY);
962 DeactivateIncrementalWriteBarrier();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000963 }
964 heap_->isolate()->stack_guard()->ClearGC();
965 state_ = STOPPED;
966 is_compacting_ = false;
Ben Murdochda12d292016-06-02 14:46:10 +0100967 FinishBlackAllocation();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000968}
969
970
971void IncrementalMarking::Finalize() {
972 Hurry();
Ben Murdoch097c5b22016-05-18 11:27:45 +0100973 Stop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000974}
975
976
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000977void IncrementalMarking::FinalizeMarking(CompletionAction action) {
978 DCHECK(!finalize_marking_completed_);
979 if (FLAG_trace_incremental_marking) {
980 PrintF(
981 "[IncrementalMarking] requesting finalization of incremental "
982 "marking.\n");
983 }
984 request_type_ = FINALIZATION;
985 if (action == GC_VIA_STACK_GUARD) {
986 heap_->isolate()->stack_guard()->RequestGC();
987 }
988}
989
990
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000991void IncrementalMarking::MarkingComplete(CompletionAction action) {
992 state_ = COMPLETE;
993 // We will set the stack guard to request a GC now. This will mean the rest
994 // of the GC gets performed as soon as possible (we can't do a GC here in a
995 // record-write context). If a few things get allocated between now and then
996 // that shouldn't make us do a scavenge and keep being incremental, so we set
997 // the should-hurry flag to indicate that there can't be much work left to do.
998 set_should_hurry(true);
999 if (FLAG_trace_incremental_marking) {
1000 PrintF("[IncrementalMarking] Complete (normal).\n");
1001 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001002 request_type_ = COMPLETE_MARKING;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001003 if (action == GC_VIA_STACK_GUARD) {
1004 heap_->isolate()->stack_guard()->RequestGC();
1005 }
1006}
1007
1008
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001009void IncrementalMarking::Epilogue() {
1010 was_activated_ = false;
1011 finalize_marking_completed_ = false;
1012 incremental_marking_finalization_rounds_ = 0;
1013}
1014
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001015double IncrementalMarking::AdvanceIncrementalMarking(
Ben Murdochda12d292016-06-02 14:46:10 +01001016 double deadline_in_ms, IncrementalMarking::StepActions step_actions) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001017 DCHECK(!IsStopped());
1018
Ben Murdochda12d292016-06-02 14:46:10 +01001019 intptr_t step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
1020 GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs,
1021 heap()
1022 ->tracer()
1023 ->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001024 double remaining_time_in_ms = 0.0;
Ben Murdochda12d292016-06-02 14:46:10 +01001025 intptr_t bytes_processed = 0;
1026
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001027 do {
Ben Murdochda12d292016-06-02 14:46:10 +01001028 bytes_processed =
1029 Step(step_size_in_bytes, step_actions.completion_action,
1030 step_actions.force_marking, step_actions.force_completion);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001031 remaining_time_in_ms =
1032 deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
Ben Murdochda12d292016-06-02 14:46:10 +01001033 } while (bytes_processed > 0 &&
1034 remaining_time_in_ms >=
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001035 2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs &&
1036 !IsComplete() &&
1037 !heap()->mark_compact_collector()->marking_deque()->IsEmpty());
1038 return remaining_time_in_ms;
1039}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001040
1041
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001042void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001043 if (IsStopped() && ShouldActivateEvenWithoutIdleNotification()) {
1044 heap()->StartIncrementalMarking(Heap::kNoGCFlags, kNoGCCallbackFlags,
1045 "old space step");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001046 } else {
1047 Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
1048 }
1049}
1050
1051
1052void IncrementalMarking::SpeedUp() {
1053 bool speed_up = false;
1054
1055 if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001056 if (FLAG_trace_incremental_marking) {
1057 PrintIsolate(heap()->isolate(), "Speed up marking after %d steps\n",
1058 static_cast<int>(kMarkingSpeedAccellerationInterval));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001059 }
1060 speed_up = true;
1061 }
1062
1063 bool space_left_is_very_small =
1064 (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
1065
1066 bool only_1_nth_of_space_that_was_available_still_left =
1067 (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
1068 old_generation_space_available_at_start_of_incremental_);
1069
1070 if (space_left_is_very_small ||
1071 only_1_nth_of_space_that_was_available_still_left) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001072 if (FLAG_trace_incremental_marking)
1073 PrintIsolate(heap()->isolate(),
1074 "Speed up marking because of low space left\n");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001075 speed_up = true;
1076 }
1077
1078 bool size_of_old_space_multiplied_by_n_during_marking =
1079 (heap_->PromotedTotalSize() >
1080 (marking_speed_ + 1) *
1081 old_generation_space_used_at_start_of_incremental_);
1082 if (size_of_old_space_multiplied_by_n_during_marking) {
1083 speed_up = true;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001084 if (FLAG_trace_incremental_marking) {
1085 PrintIsolate(heap()->isolate(),
1086 "Speed up marking because of heap size increase\n");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001087 }
1088 }
1089
1090 int64_t promoted_during_marking =
1091 heap_->PromotedTotalSize() -
1092 old_generation_space_used_at_start_of_incremental_;
1093 intptr_t delay = marking_speed_ * MB;
1094 intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
1095
1096 // We try to scan at at least twice the speed that we are allocating.
1097 if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001098 if (FLAG_trace_incremental_marking) {
1099 PrintIsolate(heap()->isolate(),
1100 "Speed up marking because marker was not keeping up\n");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001101 }
1102 speed_up = true;
1103 }
1104
1105 if (speed_up) {
1106 if (state_ != MARKING) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001107 if (FLAG_trace_incremental_marking) {
1108 PrintIsolate(heap()->isolate(),
1109 "Postponing speeding up marking until marking starts\n");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001110 }
1111 } else {
1112 marking_speed_ += kMarkingSpeedAccelleration;
1113 marking_speed_ = static_cast<int>(
1114 Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001115 if (FLAG_trace_incremental_marking) {
1116 PrintIsolate(heap()->isolate(), "Marking speed increased to %d\n",
1117 marking_speed_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001118 }
1119 }
1120 }
1121}
1122
Ben Murdochc5610432016-08-08 18:44:38 +01001123void IncrementalMarking::FinalizeSweeping() {
1124 DCHECK(state_ == SWEEPING);
1125 if (heap_->mark_compact_collector()->sweeping_in_progress() &&
1126 (heap_->mark_compact_collector()->sweeper().IsSweepingCompleted() ||
1127 !FLAG_concurrent_sweeping)) {
1128 heap_->mark_compact_collector()->EnsureSweepingCompleted();
1129 }
1130 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
1131 bytes_scanned_ = 0;
1132 StartMarking();
1133 }
1134}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001135
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001136intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
1137 CompletionAction action,
1138 ForceMarkingAction marking,
1139 ForceCompletionAction completion) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001140 DCHECK(allocated_bytes >= 0);
1141
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001142 if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001143 (state_ != SWEEPING && state_ != MARKING)) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001144 return 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001145 }
1146
1147 allocated_ += allocated_bytes;
1148
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001149 if (marking == DO_NOT_FORCE_MARKING && allocated_ < kAllocatedThreshold &&
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001150 write_barriers_invoked_since_last_step_ <
1151 kWriteBarriersInvokedThreshold) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001152 return 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001153 }
1154
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001155 // If an idle notification happened recently, we delay marking steps.
1156 if (marking == DO_NOT_FORCE_MARKING &&
1157 heap_->RecentIdleNotificationHappened()) {
1158 return 0;
1159 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001160
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001161 intptr_t bytes_processed = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001162 {
1163 HistogramTimerScope incremental_marking_scope(
1164 heap_->isolate()->counters()->gc_incremental_marking());
Ben Murdoch097c5b22016-05-18 11:27:45 +01001165 TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001166 double start = heap_->MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001167
1168 // The marking speed is driven either by the allocation rate or by the rate
1169 // at which we are having to check the color of objects in the write
1170 // barrier.
1171 // It is possible for a tight non-allocating loop to run a lot of write
1172 // barriers before we get here and check them (marking can only take place
1173 // on
1174 // allocation), so to reduce the lumpiness we don't use the write barriers
1175 // invoked since last step directly to determine the amount of work to do.
1176 intptr_t bytes_to_process =
1177 marking_speed_ *
1178 Max(allocated_, write_barriers_invoked_since_last_step_);
1179 allocated_ = 0;
1180 write_barriers_invoked_since_last_step_ = 0;
1181
1182 bytes_scanned_ += bytes_to_process;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001183
Ben Murdochc5610432016-08-08 18:44:38 +01001184 // TODO(hpayer): Do not account for sweeping finalization while marking.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001185 if (state_ == SWEEPING) {
Ben Murdochc5610432016-08-08 18:44:38 +01001186 FinalizeSweeping();
Ben Murdochda12d292016-06-02 14:46:10 +01001187 }
Ben Murdochc5610432016-08-08 18:44:38 +01001188
Ben Murdochda12d292016-06-02 14:46:10 +01001189 if (state_ == MARKING) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001190 bytes_processed = ProcessMarkingDeque(bytes_to_process);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001191 if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
1192 if (completion == FORCE_COMPLETION ||
1193 IsIdleMarkingDelayCounterLimitReached()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001194 if (!finalize_marking_completed_) {
1195 FinalizeMarking(action);
1196 } else {
1197 MarkingComplete(action);
1198 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001199 } else {
1200 IncrementIdleMarkingDelayCounter();
1201 }
1202 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001203 }
1204
1205 steps_count_++;
1206
1207 // Speed up marking if we are marking too slow or if we are almost done
1208 // with marking.
1209 SpeedUp();
1210
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001211 double end = heap_->MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001212 double duration = (end - start);
1213 // Note that we report zero bytes here when sweeping was in progress or
1214 // when we just started incremental marking. In these cases we did not
1215 // process the marking deque.
1216 heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
1217 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001218 return bytes_processed;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001219}
1220
1221
1222void IncrementalMarking::ResetStepCounters() {
1223 steps_count_ = 0;
1224 old_generation_space_available_at_start_of_incremental_ =
1225 SpaceLeftInOldSpace();
1226 old_generation_space_used_at_start_of_incremental_ =
1227 heap_->PromotedTotalSize();
1228 bytes_rescanned_ = 0;
1229 marking_speed_ = kInitialMarkingSpeed;
1230 bytes_scanned_ = 0;
1231 write_barriers_invoked_since_last_step_ = 0;
1232}
1233
1234
1235int64_t IncrementalMarking::SpaceLeftInOldSpace() {
1236 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
1237}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001238
1239
1240bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() {
1241 return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter;
1242}
1243
1244
1245void IncrementalMarking::IncrementIdleMarkingDelayCounter() {
1246 idle_marking_delay_counter_++;
1247}
1248
1249
1250void IncrementalMarking::ClearIdleMarkingDelayCounter() {
1251 idle_marking_delay_counter_ = 0;
1252}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001253} // namespace internal
1254} // namespace v8