blob: 376e8488cedf312ab588d38583a01499568413fb [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005#include "src/heap/incremental-marking.h"
6
7#include "src/code-stubs.h"
8#include "src/compilation-cache.h"
9#include "src/conversions.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000010#include "src/heap/gc-idle-time-handler.h"
11#include "src/heap/gc-tracer.h"
12#include "src/heap/mark-compact-inl.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000013#include "src/heap/objects-visiting.h"
14#include "src/heap/objects-visiting-inl.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010015#include "src/tracing/trace-event.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000016#include "src/v8.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000017
18namespace v8 {
19namespace internal {
20
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000021IncrementalMarking::StepActions IncrementalMarking::IdleStepActions() {
22 return StepActions(IncrementalMarking::NO_GC_VIA_STACK_GUARD,
23 IncrementalMarking::FORCE_MARKING,
24 IncrementalMarking::DO_NOT_FORCE_COMPLETION);
25}
26
Ben Murdochb8a8cc12014-11-26 15:28:44 +000027IncrementalMarking::IncrementalMarking(Heap* heap)
28 : heap_(heap),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000029 observer_(*this, kAllocatedThreshold),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000030 state_(STOPPED),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000031 is_compacting_(false),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000032 steps_count_(0),
33 old_generation_space_available_at_start_of_incremental_(0),
34 old_generation_space_used_at_start_of_incremental_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000035 bytes_rescanned_(0),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000036 should_hurry_(false),
37 marking_speed_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000038 bytes_scanned_(0),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000039 allocated_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000040 write_barriers_invoked_since_last_step_(0),
Emily Bernierd0a1eb72015-03-24 16:35:39 -040041 idle_marking_delay_counter_(0),
Emily Bernierd0a1eb72015-03-24 16:35:39 -040042 unscanned_bytes_of_large_object_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000043 was_activated_(false),
Ben Murdochda12d292016-06-02 14:46:10 +010044 black_allocation_(false),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000045 finalize_marking_completed_(false),
46 incremental_marking_finalization_rounds_(0),
Ben Murdochda12d292016-06-02 14:46:10 +010047 request_type_(NONE) {}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000048
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000049bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
50 HeapObject* value_heap_obj = HeapObject::cast(value);
51 MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
52 DCHECK(!Marking::IsImpossible(value_bit));
53
54 MarkBit obj_bit = Marking::MarkBitFrom(obj);
55 DCHECK(!Marking::IsImpossible(obj_bit));
56 bool is_black = Marking::IsBlack(obj_bit);
57
58 if (is_black && Marking::IsWhite(value_bit)) {
59 WhiteToGreyAndPush(value_heap_obj, value_bit);
60 RestartIfNotMarking();
61 }
62 return is_compacting_ && is_black;
63}
Ben Murdochb8a8cc12014-11-26 15:28:44 +000064
65
66void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
67 Object* value) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000068 if (BaseRecordWrite(obj, value) && slot != NULL) {
69 // Object is not going to be rescanned we need to record the slot.
70 heap_->mark_compact_collector()->RecordSlot(obj, slot, value);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000071 }
72}
73
74
75void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
76 Isolate* isolate) {
77 DCHECK(obj->IsHeapObject());
78 IncrementalMarking* marking = isolate->heap()->incremental_marking();
79
80 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
81 int counter = chunk->write_barrier_counter();
82 if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
83 marking->write_barriers_invoked_since_last_step_ +=
84 MemoryChunk::kWriteBarrierCounterGranularity -
85 chunk->write_barrier_counter();
86 chunk->set_write_barrier_counter(
87 MemoryChunk::kWriteBarrierCounterGranularity);
88 }
89
90 marking->RecordWrite(obj, slot, *slot);
91}
92
Ben Murdoch097c5b22016-05-18 11:27:45 +010093// static
94void IncrementalMarking::RecordWriteOfCodeEntryFromCode(JSFunction* host,
95 Object** slot,
96 Isolate* isolate) {
97 DCHECK(host->IsJSFunction());
98 IncrementalMarking* marking = isolate->heap()->incremental_marking();
99 Code* value = Code::cast(
100 Code::GetObjectFromEntryAddress(reinterpret_cast<Address>(slot)));
101 marking->RecordWriteOfCodeEntry(host, slot, value);
102}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000103
104void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
105 HeapObject* value) {
106 if (IsMarking()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000107 RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000108 RecordWriteIntoCode(host, &rinfo, value);
109 }
110}
111
112
113void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
114 if (IsMarking()) {
115 Code* host = heap_->isolate()
116 ->inner_pointer_to_code_cache()
117 ->GcSafeFindCodeForInnerPointer(pc);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000118 RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000119 RecordWriteIntoCode(host, &rinfo, value);
120 }
121}
122
123
124void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
125 Object** slot,
126 Code* value) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000127 if (BaseRecordWrite(host, value)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000128 DCHECK(slot != NULL);
129 heap_->mark_compact_collector()->RecordCodeEntrySlot(
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000130 host, reinterpret_cast<Address>(slot), value);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000131 }
132}
133
Ben Murdochda12d292016-06-02 14:46:10 +0100134void IncrementalMarking::RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo,
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000135 Object* value) {
Ben Murdochda12d292016-06-02 14:46:10 +0100136 if (BaseRecordWrite(host, value)) {
137 // Object is not going to be rescanned. We need to record the slot.
138 heap_->mark_compact_collector()->RecordRelocSlot(host, rinfo, value);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000139 }
140}
141
142
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000143void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
144 Marking::WhiteToGrey(mark_bit);
145 heap_->mark_compact_collector()->marking_deque()->Push(obj);
146}
147
148
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000149static void MarkObjectGreyDoNotEnqueue(Object* obj) {
150 if (obj->IsHeapObject()) {
151 HeapObject* heap_obj = HeapObject::cast(obj);
152 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
153 if (Marking::IsBlack(mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000154 MemoryChunk::IncrementLiveBytesFromGC(heap_obj, -heap_obj->Size());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000155 }
156 Marking::AnyToGrey(mark_bit);
157 }
158}
159
160
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000161static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
162 MarkBit mark_bit, int size) {
163 DCHECK(!Marking::IsImpossible(mark_bit));
164 if (Marking::IsBlack(mark_bit)) return;
165 Marking::MarkBlack(mark_bit);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000166 MemoryChunk::IncrementLiveBytesFromGC(heap_object, size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000167}
168
169
170class IncrementalMarkingMarkingVisitor
171 : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
172 public:
173 static void Initialize() {
174 StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
175 table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
176 table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
177 table_.Register(kVisitJSRegExp, &VisitJSRegExp);
178 }
179
180 static const int kProgressBarScanningChunk = 32 * 1024;
181
182 static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
183 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
184 // TODO(mstarzinger): Move setting of the flag to the allocation site of
185 // the array. The visitor should just check the flag.
186 if (FLAG_use_marking_progress_bar &&
187 chunk->owner()->identity() == LO_SPACE) {
188 chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
189 }
190 if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
191 Heap* heap = map->GetHeap();
192 // When using a progress bar for large fixed arrays, scan only a chunk of
193 // the array and try to push it onto the marking deque again until it is
194 // fully scanned. Fall back to scanning it through to the end in case this
195 // fails because of a full deque.
196 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
197 int start_offset =
198 Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
199 int end_offset =
200 Min(object_size, start_offset + kProgressBarScanningChunk);
201 int already_scanned_offset = start_offset;
202 bool scan_until_end = false;
203 do {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000204 VisitPointers(heap, object, HeapObject::RawField(object, start_offset),
205 HeapObject::RawField(object, end_offset));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000206 start_offset = end_offset;
207 end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400208 scan_until_end =
209 heap->mark_compact_collector()->marking_deque()->IsFull();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000210 } while (scan_until_end && start_offset < object_size);
211 chunk->set_progress_bar(start_offset);
212 if (start_offset < object_size) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000213 if (Marking::IsGrey(Marking::MarkBitFrom(object))) {
214 heap->mark_compact_collector()->marking_deque()->Unshift(object);
215 } else {
216 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
217 heap->mark_compact_collector()->UnshiftBlack(object);
218 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000219 heap->incremental_marking()->NotifyIncompleteScanOfObject(
220 object_size - (start_offset - already_scanned_offset));
221 }
222 } else {
223 FixedArrayVisitor::Visit(map, object);
224 }
225 }
226
227 static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
228 Context* context = Context::cast(object);
229
230 // We will mark cache black with a separate pass when we finish marking.
231 // Note that GC can happen when the context is not fully initialized,
232 // so the cache can be undefined.
233 Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX);
234 if (!cache->IsUndefined()) {
235 MarkObjectGreyDoNotEnqueue(cache);
236 }
237 VisitNativeContext(map, context);
238 }
239
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000240 INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
241 Object* target = *p;
242 if (target->IsHeapObject()) {
243 heap->mark_compact_collector()->RecordSlot(object, p, target);
244 MarkObject(heap, target);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000245 }
246 }
247
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000248 INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
249 Object** start, Object** end)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000250 for (Object** p = start; p < end; p++) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000251 Object* target = *p;
252 if (target->IsHeapObject()) {
253 heap->mark_compact_collector()->RecordSlot(object, p, target);
254 MarkObject(heap, target);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000255 }
256 }
257 }
258
259 // Marks the object grey and pushes it on the marking stack.
260 INLINE(static void MarkObject(Heap* heap, Object* obj)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000261 IncrementalMarking::MarkObject(heap, HeapObject::cast(obj));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000262 }
263
264 // Marks the object black without pushing it on the marking stack.
265 // Returns true if object needed marking and false otherwise.
266 INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
267 HeapObject* heap_object = HeapObject::cast(obj);
268 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
269 if (Marking::IsWhite(mark_bit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000270 Marking::MarkBlack(mark_bit);
271 MemoryChunk::IncrementLiveBytesFromGC(heap_object, heap_object->Size());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000272 return true;
273 }
274 return false;
275 }
276};
277
Ben Murdochda12d292016-06-02 14:46:10 +0100278void IncrementalMarking::IterateBlackObject(HeapObject* object) {
279 if (IsMarking() && Marking::IsBlack(Marking::MarkBitFrom(object))) {
280 Page* page = Page::FromAddress(object->address());
281 if ((page->owner() != nullptr) && (page->owner()->identity() == LO_SPACE)) {
282 // IterateBlackObject requires us to visit the hole object.
283 page->ResetProgressBar();
284 }
285 IncrementalMarkingMarkingVisitor::IterateBody(object->map(), object);
286 }
287}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000288
289class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
290 public:
291 explicit IncrementalMarkingRootMarkingVisitor(
292 IncrementalMarking* incremental_marking)
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000293 : heap_(incremental_marking->heap()) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000294
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000295 void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000296
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000297 void VisitPointers(Object** start, Object** end) override {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000298 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
299 }
300
301 private:
302 void MarkObjectByPointer(Object** p) {
303 Object* obj = *p;
304 if (!obj->IsHeapObject()) return;
305
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000306 IncrementalMarking::MarkObject(heap_, HeapObject::cast(obj));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000307 }
308
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000309 Heap* heap_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000310};
311
312
313void IncrementalMarking::Initialize() {
314 IncrementalMarkingMarkingVisitor::Initialize();
315}
316
317
318void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
319 bool is_marking,
320 bool is_compacting) {
321 if (is_marking) {
322 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
323 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000324 } else {
325 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
326 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
327 }
328}
329
330
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000331void IncrementalMarking::SetNewSpacePageFlags(MemoryChunk* chunk,
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000332 bool is_marking) {
333 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
334 if (is_marking) {
335 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
336 } else {
337 chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
338 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000339}
340
341
342void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
343 PagedSpace* space) {
344 PageIterator it(space);
345 while (it.has_next()) {
346 Page* p = it.next();
347 SetOldSpacePageFlags(p, false, false);
348 }
349}
350
351
352void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
353 NewSpace* space) {
354 NewSpacePageIterator it(space);
355 while (it.has_next()) {
356 NewSpacePage* p = it.next();
357 SetNewSpacePageFlags(p, false);
358 }
359}
360
361
362void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000363 DeactivateIncrementalWriteBarrierForSpace(heap_->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000364 DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
365 DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
366 DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
367
368 LargePage* lop = heap_->lo_space()->first_page();
369 while (lop->is_valid()) {
370 SetOldSpacePageFlags(lop, false, false);
371 lop = lop->next_page();
372 }
373}
374
375
376void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
377 PageIterator it(space);
378 while (it.has_next()) {
379 Page* p = it.next();
380 SetOldSpacePageFlags(p, true, is_compacting_);
381 }
382}
383
384
385void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
386 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
387 while (it.has_next()) {
388 NewSpacePage* p = it.next();
389 SetNewSpacePageFlags(p, true);
390 }
391}
392
393
394void IncrementalMarking::ActivateIncrementalWriteBarrier() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000395 ActivateIncrementalWriteBarrier(heap_->old_space());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000396 ActivateIncrementalWriteBarrier(heap_->map_space());
397 ActivateIncrementalWriteBarrier(heap_->code_space());
398 ActivateIncrementalWriteBarrier(heap_->new_space());
399
400 LargePage* lop = heap_->lo_space()->first_page();
401 while (lop->is_valid()) {
402 SetOldSpacePageFlags(lop, true, is_compacting_);
403 lop = lop->next_page();
404 }
405}
406
407
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000408bool IncrementalMarking::ShouldActivateEvenWithoutIdleNotification() {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100409#ifndef DEBUG
410 static const intptr_t kActivationThreshold = 8 * MB;
411#else
412 // TODO(gc) consider setting this to some low level so that some
413 // debug tests run with incremental marking and some without.
414 static const intptr_t kActivationThreshold = 0;
415#endif
416 // Don't switch on for very small heaps.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000417 return CanBeActivated() &&
Ben Murdoch097c5b22016-05-18 11:27:45 +0100418 heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold &&
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000419 heap_->HeapIsFullEnoughToStartIncrementalMarking(
420 heap_->old_generation_allocation_limit());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000421}
422
423
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400424bool IncrementalMarking::WasActivated() { return was_activated_; }
425
426
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000427bool IncrementalMarking::CanBeActivated() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000428 // Only start incremental marking in a safe state: 1) when incremental
429 // marking is turned on, 2) when we are currently not in a GC, and
430 // 3) when we are currently not serializing or deserializing the heap.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000431 return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400432 heap_->deserialization_complete() &&
Ben Murdoch097c5b22016-05-18 11:27:45 +0100433 !heap_->isolate()->serializer_enabled();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000434}
435
436
437void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
438 DCHECK(RecordWriteStub::GetMode(stub) == RecordWriteStub::STORE_BUFFER_ONLY);
439
440 if (!IsMarking()) {
441 // Initially stub is generated in STORE_BUFFER_ONLY mode thus
442 // we don't need to do anything if incremental marking is
443 // not active.
444 } else if (IsCompacting()) {
445 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
446 } else {
447 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
448 }
449}
450
451
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000452void IncrementalMarking::NotifyOfHighPromotionRate() {
453 if (IsMarking()) {
454 if (marking_speed_ < kFastMarking) {
455 if (FLAG_trace_gc) {
456 PrintIsolate(heap()->isolate(),
457 "Increasing marking speed to %d "
458 "due to high promotion rate\n",
459 static_cast<int>(kFastMarking));
460 }
461 marking_speed_ = kFastMarking;
462 }
463 }
464}
465
466
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000467static void PatchIncrementalMarkingRecordWriteStubs(
468 Heap* heap, RecordWriteStub::Mode mode) {
469 UnseededNumberDictionary* stubs = heap->code_stubs();
470
471 int capacity = stubs->Capacity();
472 for (int i = 0; i < capacity; i++) {
473 Object* k = stubs->KeyAt(i);
474 if (stubs->IsKey(k)) {
475 uint32_t key = NumberToUint32(k);
476
477 if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) {
478 Object* e = stubs->ValueAt(i);
479 if (e->IsCode()) {
480 RecordWriteStub::Patch(Code::cast(e), mode);
481 }
482 }
483 }
484 }
485}
486
487
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000488void IncrementalMarking::Start(const char* reason) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000489 if (FLAG_trace_incremental_marking) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000490 PrintF("[IncrementalMarking] Start (%s)\n",
491 (reason == nullptr) ? "unknown reason" : reason);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000492 }
493 DCHECK(FLAG_incremental_marking);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000494 DCHECK(state_ == STOPPED);
495 DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
496 DCHECK(!heap_->isolate()->serializer_enabled());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000497
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000498 HistogramTimerScope incremental_marking_scope(
499 heap_->isolate()->counters()->gc_incremental_marking_start());
Ben Murdoch097c5b22016-05-18 11:27:45 +0100500 TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart");
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000501 ResetStepCounters();
502
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400503 was_activated_ = true;
504
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000505 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000506 StartMarking();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000507 } else {
508 if (FLAG_trace_incremental_marking) {
509 PrintF("[IncrementalMarking] Start sweeping.\n");
510 }
511 state_ = SWEEPING;
512 }
513
Ben Murdoch097c5b22016-05-18 11:27:45 +0100514 heap_->new_space()->AddAllocationObserver(&observer_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000515
516 incremental_marking_job()->Start(heap_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000517}
518
519
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000520void IncrementalMarking::StartMarking() {
Ben Murdochda12d292016-06-02 14:46:10 +0100521 if (heap_->isolate()->serializer_enabled()) {
522 // Black allocation currently starts when we start incremental marking,
523 // but we cannot enable black allocation while deserializing. Hence, we
524 // have to delay the start of incremental marking in that case.
525 if (FLAG_trace_incremental_marking) {
526 PrintF("[IncrementalMarking] Start delayed - serializer\n");
527 }
528 return;
529 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000530 if (FLAG_trace_incremental_marking) {
531 PrintF("[IncrementalMarking] Start marking\n");
532 }
533
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000534 is_compacting_ = !FLAG_never_compact &&
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000535 heap_->mark_compact_collector()->StartCompaction(
536 MarkCompactCollector::INCREMENTAL_COMPACTION);
537
538 state_ = MARKING;
539
540 RecordWriteStub::Mode mode = is_compacting_
541 ? RecordWriteStub::INCREMENTAL_COMPACTION
542 : RecordWriteStub::INCREMENTAL;
543
544 PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
545
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000546 heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize(
547 MarkCompactCollector::kMaxMarkingDequeSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000548
549 ActivateIncrementalWriteBarrier();
550
551// Marking bits are cleared by the sweeper.
552#ifdef VERIFY_HEAP
553 if (FLAG_verify_heap) {
554 heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
555 }
556#endif
557
558 heap_->CompletelyClearInstanceofCache();
559 heap_->isolate()->compilation_cache()->MarkCompactPrologue();
560
561 if (FLAG_cleanup_code_caches_at_gc) {
562 // We will mark cache black with a separate pass
563 // when we finish marking.
564 MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
565 }
566
567 // Mark strong roots grey.
568 IncrementalMarkingRootMarkingVisitor visitor(this);
569 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
570
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000571 // Ready to start incremental marking.
572 if (FLAG_trace_incremental_marking) {
573 PrintF("[IncrementalMarking] Running\n");
574 }
575}
576
Ben Murdochda12d292016-06-02 14:46:10 +0100577void IncrementalMarking::StartBlackAllocation() {
578 DCHECK(FLAG_black_allocation);
579 DCHECK(IsMarking());
580 black_allocation_ = true;
581 OldSpace* old_space = heap()->old_space();
582 old_space->EmptyAllocationInfo();
583 old_space->free_list()->Reset();
584 if (FLAG_trace_incremental_marking) {
585 PrintF("[IncrementalMarking] Black allocation started\n");
586 }
587}
588
589void IncrementalMarking::FinishBlackAllocation() {
590 if (black_allocation_) {
591 black_allocation_ = false;
592 if (FLAG_trace_incremental_marking) {
593 PrintF("[IncrementalMarking] Black allocation finished\n");
594 }
595 }
596}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000597
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000598void IncrementalMarking::MarkRoots() {
599 DCHECK(!finalize_marking_completed_);
600 DCHECK(IsMarking());
601
602 IncrementalMarkingRootMarkingVisitor visitor(this);
603 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
604}
605
606
607void IncrementalMarking::MarkObjectGroups() {
608 DCHECK(!finalize_marking_completed_);
609 DCHECK(IsMarking());
610
611 IncrementalMarkingRootMarkingVisitor visitor(this);
612 heap_->mark_compact_collector()->MarkImplicitRefGroups(&MarkObject);
613 heap_->isolate()->global_handles()->IterateObjectGroups(
614 &visitor, &MarkCompactCollector::IsUnmarkedHeapObjectWithHeap);
615 heap_->isolate()->global_handles()->RemoveImplicitRefGroups();
616 heap_->isolate()->global_handles()->RemoveObjectGroups();
617}
618
619
620void IncrementalMarking::ProcessWeakCells() {
621 DCHECK(!finalize_marking_completed_);
622 DCHECK(IsMarking());
623
624 Object* the_hole_value = heap()->the_hole_value();
625 Object* weak_cell_obj = heap()->encountered_weak_cells();
626 Object* weak_cell_head = Smi::FromInt(0);
627 WeakCell* prev_weak_cell_obj = NULL;
628 while (weak_cell_obj != Smi::FromInt(0)) {
629 WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
630 // We do not insert cleared weak cells into the list, so the value
631 // cannot be a Smi here.
632 HeapObject* value = HeapObject::cast(weak_cell->value());
633 // Remove weak cells with live objects from the list, they do not need
634 // clearing.
635 if (MarkCompactCollector::IsMarked(value)) {
636 // Record slot, if value is pointing to an evacuation candidate.
637 Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
638 heap_->mark_compact_collector()->RecordSlot(weak_cell, slot, *slot);
639 // Remove entry somewhere after top.
640 if (prev_weak_cell_obj != NULL) {
641 prev_weak_cell_obj->set_next(weak_cell->next());
642 }
643 weak_cell_obj = weak_cell->next();
644 weak_cell->clear_next(the_hole_value);
645 } else {
646 if (weak_cell_head == Smi::FromInt(0)) {
647 weak_cell_head = weak_cell;
648 }
649 prev_weak_cell_obj = weak_cell;
650 weak_cell_obj = weak_cell->next();
651 }
652 }
653 // Top may have changed.
654 heap()->set_encountered_weak_cells(weak_cell_head);
655}
656
657
658bool ShouldRetainMap(Map* map, int age) {
659 if (age == 0) {
660 // The map has aged. Do not retain this map.
661 return false;
662 }
663 Object* constructor = map->GetConstructor();
664 if (!constructor->IsHeapObject() ||
665 Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(constructor)))) {
666 // The constructor is dead, no new objects with this map can
667 // be created. Do not retain this map.
668 return false;
669 }
670 return true;
671}
672
673
674void IncrementalMarking::RetainMaps() {
675 // Do not retain dead maps if flag disables it or there is
676 // - memory pressure (reduce_memory_footprint_),
677 // - GC is requested by tests or dev-tools (abort_incremental_marking_).
678 bool map_retaining_is_disabled = heap()->ShouldReduceMemory() ||
679 heap()->ShouldAbortIncrementalMarking() ||
680 FLAG_retain_maps_for_n_gc == 0;
681 ArrayList* retained_maps = heap()->retained_maps();
682 int length = retained_maps->Length();
683 // The number_of_disposed_maps separates maps in the retained_maps
684 // array that were created before and after context disposal.
685 // We do not age and retain disposed maps to avoid memory leaks.
686 int number_of_disposed_maps = heap()->number_of_disposed_maps_;
687 for (int i = 0; i < length; i += 2) {
688 DCHECK(retained_maps->Get(i)->IsWeakCell());
689 WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
690 if (cell->cleared()) continue;
691 int age = Smi::cast(retained_maps->Get(i + 1))->value();
692 int new_age;
693 Map* map = Map::cast(cell->value());
694 MarkBit map_mark = Marking::MarkBitFrom(map);
695 if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
696 Marking::IsWhite(map_mark)) {
697 if (ShouldRetainMap(map, age)) {
698 MarkObject(heap(), map);
699 }
700 Object* prototype = map->prototype();
701 if (age > 0 && prototype->IsHeapObject() &&
702 Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(prototype)))) {
703 // The prototype is not marked, age the map.
704 new_age = age - 1;
705 } else {
706 // The prototype and the constructor are marked, this map keeps only
707 // transition tree alive, not JSObjects. Do not age the map.
708 new_age = age;
709 }
710 } else {
711 new_age = FLAG_retain_maps_for_n_gc;
712 }
713 // Compact the array and update the age.
714 if (new_age != age) {
715 retained_maps->Set(i + 1, Smi::FromInt(new_age));
716 }
717 }
718}
719
720
721void IncrementalMarking::FinalizeIncrementally() {
722 DCHECK(!finalize_marking_completed_);
723 DCHECK(IsMarking());
724
725 double start = heap_->MonotonicallyIncreasingTimeInMs();
726
727 int old_marking_deque_top =
728 heap_->mark_compact_collector()->marking_deque()->top();
729
730 // After finishing incremental marking, we try to discover all unmarked
731 // objects to reduce the marking load in the final pause.
732 // 1) We scan and mark the roots again to find all changes to the root set.
733 // 2) We mark the object groups.
734 // 3) Age and retain maps embedded in optimized code.
735 // 4) Remove weak cell with live values from the list of weak cells, they
736 // do not need processing during GC.
737 MarkRoots();
738 MarkObjectGroups();
739 if (incremental_marking_finalization_rounds_ == 0) {
740 // Map retaining is needed for perfromance, not correctness,
741 // so we can do it only once at the beginning of the finalization.
742 RetainMaps();
743 }
744 ProcessWeakCells();
745
746 int marking_progress =
747 abs(old_marking_deque_top -
748 heap_->mark_compact_collector()->marking_deque()->top());
749
750 double end = heap_->MonotonicallyIncreasingTimeInMs();
751 double delta = end - start;
752 heap_->tracer()->AddMarkingTime(delta);
753 heap_->tracer()->AddIncrementalMarkingFinalizationStep(delta);
754 if (FLAG_trace_incremental_marking) {
755 PrintF(
756 "[IncrementalMarking] Finalize incrementally round %d, "
757 "spent %d ms, marking progress %d.\n",
758 static_cast<int>(delta), incremental_marking_finalization_rounds_,
759 marking_progress);
760 }
761
762 ++incremental_marking_finalization_rounds_;
763 if ((incremental_marking_finalization_rounds_ >=
764 FLAG_max_incremental_marking_finalization_rounds) ||
765 (marking_progress <
766 FLAG_min_progress_during_incremental_marking_finalization)) {
767 finalize_marking_completed_ = true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000768 }
Ben Murdochda12d292016-06-02 14:46:10 +0100769
770 if (FLAG_black_allocation && !heap()->ShouldReduceMemory() &&
771 !black_allocation_) {
772 // TODO(hpayer): Move to an earlier point as soon as we make faster marking
773 // progress.
774 StartBlackAllocation();
775 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000776}
777
778
779void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
780 if (!IsMarking()) return;
781
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400782 MarkingDeque* marking_deque =
783 heap_->mark_compact_collector()->marking_deque();
784 int current = marking_deque->bottom();
785 int mask = marking_deque->mask();
786 int limit = marking_deque->top();
787 HeapObject** array = marking_deque->array();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000788 int new_top = current;
789
790 Map* filler_map = heap_->one_pointer_filler_map();
791
792 while (current != limit) {
793 HeapObject* obj = array[current];
794 DCHECK(obj->IsHeapObject());
795 current = ((current + 1) & mask);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100796 // Only pointers to from space have to be updated.
797 if (heap_->InFromSpace(obj)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000798 MapWord map_word = obj->map_word();
Ben Murdoch097c5b22016-05-18 11:27:45 +0100799 // There may be objects on the marking deque that do not exist anymore,
800 // e.g. left trimmed objects or objects from the root set (frames).
801 // If these object are dead at scavenging time, their marking deque
802 // entries will not point to forwarding addresses. Hence, we can discard
803 // them.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000804 if (map_word.IsForwardingAddress()) {
805 HeapObject* dest = map_word.ToForwardingAddress();
Ben Murdochda12d292016-06-02 14:46:10 +0100806 if (Page::FromAddress(dest->address())->IsFlagSet(Page::BLACK_PAGE))
807 continue;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000808 array[new_top] = dest;
809 new_top = ((new_top + 1) & mask);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400810 DCHECK(new_top != marking_deque->bottom());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000811#ifdef DEBUG
812 MarkBit mark_bit = Marking::MarkBitFrom(obj);
813 DCHECK(Marking::IsGrey(mark_bit) ||
814 (obj->IsFiller() && Marking::IsWhite(mark_bit)));
815#endif
816 }
817 } else if (obj->map() != filler_map) {
818 // Skip one word filler objects that appear on the
819 // stack when we perform in place array shift.
820 array[new_top] = obj;
821 new_top = ((new_top + 1) & mask);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400822 DCHECK(new_top != marking_deque->bottom());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000823#ifdef DEBUG
824 MarkBit mark_bit = Marking::MarkBitFrom(obj);
825 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
826 DCHECK(Marking::IsGrey(mark_bit) ||
827 (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
828 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
829 Marking::IsBlack(mark_bit)));
830#endif
831 }
832 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400833 marking_deque->set_top(new_top);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000834}
835
836
837void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000838 MarkObject(heap_, map);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000839
840 IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
841
842 MarkBit mark_bit = Marking::MarkBitFrom(obj);
843#if ENABLE_SLOW_DCHECKS
844 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
845 SLOW_DCHECK(Marking::IsGrey(mark_bit) ||
846 (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
847 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
848 Marking::IsBlack(mark_bit)));
849#endif
850 MarkBlackOrKeepBlack(obj, mark_bit, size);
851}
852
853
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000854void IncrementalMarking::MarkObject(Heap* heap, HeapObject* obj) {
855 MarkBit mark_bit = Marking::MarkBitFrom(obj);
856 if (Marking::IsWhite(mark_bit)) {
857 heap->incremental_marking()->WhiteToGreyAndPush(obj, mark_bit);
858 }
859}
860
861
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000862intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
863 intptr_t bytes_processed = 0;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100864 Map* one_pointer_filler_map = heap_->one_pointer_filler_map();
865 Map* two_pointer_filler_map = heap_->two_pointer_filler_map();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400866 MarkingDeque* marking_deque =
867 heap_->mark_compact_collector()->marking_deque();
868 while (!marking_deque->IsEmpty() && bytes_processed < bytes_to_process) {
869 HeapObject* obj = marking_deque->Pop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000870
Ben Murdoch097c5b22016-05-18 11:27:45 +0100871 // Explicitly skip one and two word fillers. Incremental markbit patterns
872 // are correct only for objects that occupy at least two words.
873 // Moreover, slots filtering for left-trimmed arrays works only when
874 // the distance between the old array start and the new array start
875 // is greater than two if both starts are marked.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000876 Map* map = obj->map();
Ben Murdoch097c5b22016-05-18 11:27:45 +0100877 if (map == one_pointer_filler_map || map == two_pointer_filler_map)
878 continue;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000879
880 int size = obj->SizeFromMap(map);
881 unscanned_bytes_of_large_object_ = 0;
882 VisitObject(map, obj, size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000883 bytes_processed += size - unscanned_bytes_of_large_object_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000884 }
885 return bytes_processed;
886}
887
888
889void IncrementalMarking::ProcessMarkingDeque() {
890 Map* filler_map = heap_->one_pointer_filler_map();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400891 MarkingDeque* marking_deque =
892 heap_->mark_compact_collector()->marking_deque();
893 while (!marking_deque->IsEmpty()) {
894 HeapObject* obj = marking_deque->Pop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000895
896 // Explicitly skip one word fillers. Incremental markbit patterns are
897 // correct only for objects that occupy at least two words.
898 Map* map = obj->map();
899 if (map == filler_map) continue;
900
901 VisitObject(map, obj, obj->SizeFromMap(map));
902 }
903}
904
905
906void IncrementalMarking::Hurry() {
Ben Murdochda12d292016-06-02 14:46:10 +0100907 // A scavenge may have pushed new objects on the marking deque (due to black
908 // allocation) even in COMPLETE state. This may happen if scavenges are
909 // forced e.g. in tests. It should not happen when COMPLETE was set when
910 // incremental marking finished and a regular GC was triggered after that
911 // because should_hurry_ will force a full GC.
912 if (!heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000913 double start = 0.0;
914 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000915 start = heap_->MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000916 if (FLAG_trace_incremental_marking) {
917 PrintF("[IncrementalMarking] Hurry\n");
918 }
919 }
920 // TODO(gc) hurry can mark objects it encounters black as mutator
921 // was stopped.
922 ProcessMarkingDeque();
923 state_ = COMPLETE;
924 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000925 double end = heap_->MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000926 double delta = end - start;
927 heap_->tracer()->AddMarkingTime(delta);
928 if (FLAG_trace_incremental_marking) {
929 PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
930 static_cast<int>(delta));
931 }
932 }
933 }
934
935 if (FLAG_cleanup_code_caches_at_gc) {
936 PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
937 Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000938 MemoryChunk::IncrementLiveBytesFromGC(poly_cache,
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000939 PolymorphicCodeCache::kSize);
940 }
941
942 Object* context = heap_->native_contexts_list();
943 while (!context->IsUndefined()) {
944 // GC can happen when the context is not fully initialized,
945 // so the cache can be undefined.
946 HeapObject* cache = HeapObject::cast(
947 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
948 if (!cache->IsUndefined()) {
949 MarkBit mark_bit = Marking::MarkBitFrom(cache);
950 if (Marking::IsGrey(mark_bit)) {
951 Marking::GreyToBlack(mark_bit);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000952 MemoryChunk::IncrementLiveBytesFromGC(cache, cache->Size());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000953 }
954 }
955 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
956 }
957}
958
959
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000960void IncrementalMarking::Stop() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000961 if (IsStopped()) return;
962 if (FLAG_trace_incremental_marking) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000963 PrintF("[IncrementalMarking] Stopping.\n");
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000964 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000965
Ben Murdoch097c5b22016-05-18 11:27:45 +0100966 heap_->new_space()->RemoveAllocationObserver(&observer_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000967 IncrementalMarking::set_should_hurry(false);
968 ResetStepCounters();
969 if (IsMarking()) {
970 PatchIncrementalMarkingRecordWriteStubs(heap_,
971 RecordWriteStub::STORE_BUFFER_ONLY);
972 DeactivateIncrementalWriteBarrier();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000973 }
974 heap_->isolate()->stack_guard()->ClearGC();
975 state_ = STOPPED;
976 is_compacting_ = false;
Ben Murdochda12d292016-06-02 14:46:10 +0100977 FinishBlackAllocation();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000978}
979
980
981void IncrementalMarking::Finalize() {
982 Hurry();
Ben Murdoch097c5b22016-05-18 11:27:45 +0100983 Stop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000984}
985
986
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000987void IncrementalMarking::FinalizeMarking(CompletionAction action) {
988 DCHECK(!finalize_marking_completed_);
989 if (FLAG_trace_incremental_marking) {
990 PrintF(
991 "[IncrementalMarking] requesting finalization of incremental "
992 "marking.\n");
993 }
994 request_type_ = FINALIZATION;
995 if (action == GC_VIA_STACK_GUARD) {
996 heap_->isolate()->stack_guard()->RequestGC();
997 }
998}
999
1000
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001001void IncrementalMarking::MarkingComplete(CompletionAction action) {
1002 state_ = COMPLETE;
1003 // We will set the stack guard to request a GC now. This will mean the rest
1004 // of the GC gets performed as soon as possible (we can't do a GC here in a
1005 // record-write context). If a few things get allocated between now and then
1006 // that shouldn't make us do a scavenge and keep being incremental, so we set
1007 // the should-hurry flag to indicate that there can't be much work left to do.
1008 set_should_hurry(true);
1009 if (FLAG_trace_incremental_marking) {
1010 PrintF("[IncrementalMarking] Complete (normal).\n");
1011 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001012 request_type_ = COMPLETE_MARKING;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001013 if (action == GC_VIA_STACK_GUARD) {
1014 heap_->isolate()->stack_guard()->RequestGC();
1015 }
1016}
1017
1018
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001019void IncrementalMarking::Epilogue() {
1020 was_activated_ = false;
1021 finalize_marking_completed_ = false;
1022 incremental_marking_finalization_rounds_ = 0;
1023}
1024
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001025double IncrementalMarking::AdvanceIncrementalMarking(
Ben Murdochda12d292016-06-02 14:46:10 +01001026 double deadline_in_ms, IncrementalMarking::StepActions step_actions) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001027 DCHECK(!IsStopped());
1028
Ben Murdochda12d292016-06-02 14:46:10 +01001029 intptr_t step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
1030 GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs,
1031 heap()
1032 ->tracer()
1033 ->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001034 double remaining_time_in_ms = 0.0;
Ben Murdochda12d292016-06-02 14:46:10 +01001035 intptr_t bytes_processed = 0;
1036
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001037 do {
Ben Murdochda12d292016-06-02 14:46:10 +01001038 bytes_processed =
1039 Step(step_size_in_bytes, step_actions.completion_action,
1040 step_actions.force_marking, step_actions.force_completion);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001041 remaining_time_in_ms =
1042 deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
Ben Murdochda12d292016-06-02 14:46:10 +01001043 } while (bytes_processed > 0 &&
1044 remaining_time_in_ms >=
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001045 2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs &&
1046 !IsComplete() &&
1047 !heap()->mark_compact_collector()->marking_deque()->IsEmpty());
1048 return remaining_time_in_ms;
1049}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001050
1051
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001052void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001053 if (IsStopped() && ShouldActivateEvenWithoutIdleNotification()) {
1054 heap()->StartIncrementalMarking(Heap::kNoGCFlags, kNoGCCallbackFlags,
1055 "old space step");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001056 } else {
1057 Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
1058 }
1059}
1060
1061
1062void IncrementalMarking::SpeedUp() {
1063 bool speed_up = false;
1064
1065 if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001066 if (FLAG_trace_incremental_marking) {
1067 PrintIsolate(heap()->isolate(), "Speed up marking after %d steps\n",
1068 static_cast<int>(kMarkingSpeedAccellerationInterval));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001069 }
1070 speed_up = true;
1071 }
1072
1073 bool space_left_is_very_small =
1074 (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
1075
1076 bool only_1_nth_of_space_that_was_available_still_left =
1077 (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
1078 old_generation_space_available_at_start_of_incremental_);
1079
1080 if (space_left_is_very_small ||
1081 only_1_nth_of_space_that_was_available_still_left) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001082 if (FLAG_trace_incremental_marking)
1083 PrintIsolate(heap()->isolate(),
1084 "Speed up marking because of low space left\n");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001085 speed_up = true;
1086 }
1087
1088 bool size_of_old_space_multiplied_by_n_during_marking =
1089 (heap_->PromotedTotalSize() >
1090 (marking_speed_ + 1) *
1091 old_generation_space_used_at_start_of_incremental_);
1092 if (size_of_old_space_multiplied_by_n_during_marking) {
1093 speed_up = true;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001094 if (FLAG_trace_incremental_marking) {
1095 PrintIsolate(heap()->isolate(),
1096 "Speed up marking because of heap size increase\n");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001097 }
1098 }
1099
1100 int64_t promoted_during_marking =
1101 heap_->PromotedTotalSize() -
1102 old_generation_space_used_at_start_of_incremental_;
1103 intptr_t delay = marking_speed_ * MB;
1104 intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
1105
1106 // We try to scan at at least twice the speed that we are allocating.
1107 if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001108 if (FLAG_trace_incremental_marking) {
1109 PrintIsolate(heap()->isolate(),
1110 "Speed up marking because marker was not keeping up\n");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001111 }
1112 speed_up = true;
1113 }
1114
1115 if (speed_up) {
1116 if (state_ != MARKING) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001117 if (FLAG_trace_incremental_marking) {
1118 PrintIsolate(heap()->isolate(),
1119 "Postponing speeding up marking until marking starts\n");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001120 }
1121 } else {
1122 marking_speed_ += kMarkingSpeedAccelleration;
1123 marking_speed_ = static_cast<int>(
1124 Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001125 if (FLAG_trace_incremental_marking) {
1126 PrintIsolate(heap()->isolate(), "Marking speed increased to %d\n",
1127 marking_speed_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001128 }
1129 }
1130 }
1131}
1132
1133
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001134intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
1135 CompletionAction action,
1136 ForceMarkingAction marking,
1137 ForceCompletionAction completion) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001138 DCHECK(allocated_bytes >= 0);
1139
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001140 if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001141 (state_ != SWEEPING && state_ != MARKING)) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001142 return 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001143 }
1144
1145 allocated_ += allocated_bytes;
1146
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001147 if (marking == DO_NOT_FORCE_MARKING && allocated_ < kAllocatedThreshold &&
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001148 write_barriers_invoked_since_last_step_ <
1149 kWriteBarriersInvokedThreshold) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001150 return 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001151 }
1152
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001153 // If an idle notification happened recently, we delay marking steps.
1154 if (marking == DO_NOT_FORCE_MARKING &&
1155 heap_->RecentIdleNotificationHappened()) {
1156 return 0;
1157 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001158
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001159 intptr_t bytes_processed = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001160 {
1161 HistogramTimerScope incremental_marking_scope(
1162 heap_->isolate()->counters()->gc_incremental_marking());
Ben Murdoch097c5b22016-05-18 11:27:45 +01001163 TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001164 double start = heap_->MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001165
1166 // The marking speed is driven either by the allocation rate or by the rate
1167 // at which we are having to check the color of objects in the write
1168 // barrier.
1169 // It is possible for a tight non-allocating loop to run a lot of write
1170 // barriers before we get here and check them (marking can only take place
1171 // on
1172 // allocation), so to reduce the lumpiness we don't use the write barriers
1173 // invoked since last step directly to determine the amount of work to do.
1174 intptr_t bytes_to_process =
1175 marking_speed_ *
1176 Max(allocated_, write_barriers_invoked_since_last_step_);
1177 allocated_ = 0;
1178 write_barriers_invoked_since_last_step_ = 0;
1179
1180 bytes_scanned_ += bytes_to_process;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001181
1182 if (state_ == SWEEPING) {
1183 if (heap_->mark_compact_collector()->sweeping_in_progress() &&
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001184 (heap_->mark_compact_collector()->IsSweepingCompleted() ||
1185 !FLAG_concurrent_sweeping)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001186 heap_->mark_compact_collector()->EnsureSweepingCompleted();
1187 }
1188 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
1189 bytes_scanned_ = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001190 StartMarking();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001191 }
Ben Murdochda12d292016-06-02 14:46:10 +01001192 }
1193 if (state_ == MARKING) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001194 bytes_processed = ProcessMarkingDeque(bytes_to_process);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001195 if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
1196 if (completion == FORCE_COMPLETION ||
1197 IsIdleMarkingDelayCounterLimitReached()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001198 if (!finalize_marking_completed_) {
1199 FinalizeMarking(action);
1200 } else {
1201 MarkingComplete(action);
1202 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001203 } else {
1204 IncrementIdleMarkingDelayCounter();
1205 }
1206 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001207 }
1208
1209 steps_count_++;
1210
1211 // Speed up marking if we are marking too slow or if we are almost done
1212 // with marking.
1213 SpeedUp();
1214
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001215 double end = heap_->MonotonicallyIncreasingTimeInMs();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001216 double duration = (end - start);
1217 // Note that we report zero bytes here when sweeping was in progress or
1218 // when we just started incremental marking. In these cases we did not
1219 // process the marking deque.
1220 heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
1221 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001222 return bytes_processed;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001223}
1224
1225
1226void IncrementalMarking::ResetStepCounters() {
1227 steps_count_ = 0;
1228 old_generation_space_available_at_start_of_incremental_ =
1229 SpaceLeftInOldSpace();
1230 old_generation_space_used_at_start_of_incremental_ =
1231 heap_->PromotedTotalSize();
1232 bytes_rescanned_ = 0;
1233 marking_speed_ = kInitialMarkingSpeed;
1234 bytes_scanned_ = 0;
1235 write_barriers_invoked_since_last_step_ = 0;
1236}
1237
1238
1239int64_t IncrementalMarking::SpaceLeftInOldSpace() {
1240 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
1241}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001242
1243
1244bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() {
1245 return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter;
1246}
1247
1248
1249void IncrementalMarking::IncrementIdleMarkingDelayCounter() {
1250 idle_marking_delay_counter_++;
1251}
1252
1253
1254void IncrementalMarking::ClearIdleMarkingDelayCounter() {
1255 idle_marking_delay_counter_ = 0;
1256}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001257} // namespace internal
1258} // namespace v8