blob: aadd17c94bd58be4f570dacb15ff6d5b71d3ac0d [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/v8.h"
6
7#include "src/heap/incremental-marking.h"
8
9#include "src/code-stubs.h"
10#include "src/compilation-cache.h"
11#include "src/conversions.h"
12#include "src/heap/objects-visiting.h"
13#include "src/heap/objects-visiting-inl.h"
14
15namespace v8 {
16namespace internal {
17
18
19IncrementalMarking::IncrementalMarking(Heap* heap)
20 : heap_(heap),
21 state_(STOPPED),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000022 steps_count_(0),
23 old_generation_space_available_at_start_of_incremental_(0),
24 old_generation_space_used_at_start_of_incremental_(0),
25 should_hurry_(false),
26 marking_speed_(0),
27 allocated_(0),
Emily Bernierd0a1eb72015-03-24 16:35:39 -040028 idle_marking_delay_counter_(0),
Ben Murdochb8a8cc12014-11-26 15:28:44 +000029 no_marking_scope_depth_(0),
Emily Bernierd0a1eb72015-03-24 16:35:39 -040030 unscanned_bytes_of_large_object_(0),
31 was_activated_(false) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +000032
33
34void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
35 Object* value) {
36 if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
37 MarkBit obj_bit = Marking::MarkBitFrom(obj);
38 if (Marking::IsBlack(obj_bit)) {
39 // Object is not going to be rescanned we need to record the slot.
40 heap_->mark_compact_collector()->RecordSlot(HeapObject::RawField(obj, 0),
41 slot, value);
42 }
43 }
44}
45
46
47void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
48 Isolate* isolate) {
49 DCHECK(obj->IsHeapObject());
50 IncrementalMarking* marking = isolate->heap()->incremental_marking();
51
52 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
53 int counter = chunk->write_barrier_counter();
54 if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
55 marking->write_barriers_invoked_since_last_step_ +=
56 MemoryChunk::kWriteBarrierCounterGranularity -
57 chunk->write_barrier_counter();
58 chunk->set_write_barrier_counter(
59 MemoryChunk::kWriteBarrierCounterGranularity);
60 }
61
62 marking->RecordWrite(obj, slot, *slot);
63}
64
65
66void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
67 HeapObject* value) {
68 if (IsMarking()) {
69 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
70 RecordWriteIntoCode(host, &rinfo, value);
71 }
72}
73
74
75void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
76 if (IsMarking()) {
77 Code* host = heap_->isolate()
78 ->inner_pointer_to_code_cache()
79 ->GcSafeFindCodeForInnerPointer(pc);
80 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
81 RecordWriteIntoCode(host, &rinfo, value);
82 }
83}
84
85
86void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
87 Object** slot,
88 Code* value) {
89 if (BaseRecordWrite(host, slot, value)) {
90 DCHECK(slot != NULL);
91 heap_->mark_compact_collector()->RecordCodeEntrySlot(
92 reinterpret_cast<Address>(slot), value);
93 }
94}
95
96
97void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
98 RelocInfo* rinfo,
99 Object* value) {
100 MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
101 if (Marking::IsWhite(value_bit)) {
102 MarkBit obj_bit = Marking::MarkBitFrom(obj);
103 if (Marking::IsBlack(obj_bit)) {
104 BlackToGreyAndUnshift(obj, obj_bit);
105 RestartIfNotMarking();
106 }
107 // Object is either grey or white. It will be scanned if survives.
108 return;
109 }
110
111 if (is_compacting_) {
112 MarkBit obj_bit = Marking::MarkBitFrom(obj);
113 if (Marking::IsBlack(obj_bit)) {
114 // Object is not going to be rescanned. We need to record the slot.
115 heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
116 Code::cast(value));
117 }
118 }
119}
120
121
122static void MarkObjectGreyDoNotEnqueue(Object* obj) {
123 if (obj->IsHeapObject()) {
124 HeapObject* heap_obj = HeapObject::cast(obj);
125 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
126 if (Marking::IsBlack(mark_bit)) {
127 MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
128 -heap_obj->Size());
129 }
130 Marking::AnyToGrey(mark_bit);
131 }
132}
133
134
135static inline void MarkBlackOrKeepGrey(HeapObject* heap_object,
136 MarkBit mark_bit, int size) {
137 DCHECK(!Marking::IsImpossible(mark_bit));
138 if (mark_bit.Get()) return;
139 mark_bit.Set();
140 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
141 DCHECK(Marking::IsBlack(mark_bit));
142}
143
144
145static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
146 MarkBit mark_bit, int size) {
147 DCHECK(!Marking::IsImpossible(mark_bit));
148 if (Marking::IsBlack(mark_bit)) return;
149 Marking::MarkBlack(mark_bit);
150 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
151 DCHECK(Marking::IsBlack(mark_bit));
152}
153
154
155class IncrementalMarkingMarkingVisitor
156 : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
157 public:
158 static void Initialize() {
159 StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
160 table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
161 table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
162 table_.Register(kVisitJSRegExp, &VisitJSRegExp);
163 }
164
165 static const int kProgressBarScanningChunk = 32 * 1024;
166
167 static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
168 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
169 // TODO(mstarzinger): Move setting of the flag to the allocation site of
170 // the array. The visitor should just check the flag.
171 if (FLAG_use_marking_progress_bar &&
172 chunk->owner()->identity() == LO_SPACE) {
173 chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
174 }
175 if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
176 Heap* heap = map->GetHeap();
177 // When using a progress bar for large fixed arrays, scan only a chunk of
178 // the array and try to push it onto the marking deque again until it is
179 // fully scanned. Fall back to scanning it through to the end in case this
180 // fails because of a full deque.
181 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
182 int start_offset =
183 Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
184 int end_offset =
185 Min(object_size, start_offset + kProgressBarScanningChunk);
186 int already_scanned_offset = start_offset;
187 bool scan_until_end = false;
188 do {
189 VisitPointersWithAnchor(heap, HeapObject::RawField(object, 0),
190 HeapObject::RawField(object, start_offset),
191 HeapObject::RawField(object, end_offset));
192 start_offset = end_offset;
193 end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400194 scan_until_end =
195 heap->mark_compact_collector()->marking_deque()->IsFull();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000196 } while (scan_until_end && start_offset < object_size);
197 chunk->set_progress_bar(start_offset);
198 if (start_offset < object_size) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400199 heap->mark_compact_collector()->marking_deque()->UnshiftGrey(object);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000200 heap->incremental_marking()->NotifyIncompleteScanOfObject(
201 object_size - (start_offset - already_scanned_offset));
202 }
203 } else {
204 FixedArrayVisitor::Visit(map, object);
205 }
206 }
207
208 static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
209 Context* context = Context::cast(object);
210
211 // We will mark cache black with a separate pass when we finish marking.
212 // Note that GC can happen when the context is not fully initialized,
213 // so the cache can be undefined.
214 Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX);
215 if (!cache->IsUndefined()) {
216 MarkObjectGreyDoNotEnqueue(cache);
217 }
218 VisitNativeContext(map, context);
219 }
220
221 INLINE(static void VisitPointer(Heap* heap, Object** p)) {
222 Object* obj = *p;
223 if (obj->IsHeapObject()) {
224 heap->mark_compact_collector()->RecordSlot(p, p, obj);
225 MarkObject(heap, obj);
226 }
227 }
228
229 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
230 for (Object** p = start; p < end; p++) {
231 Object* obj = *p;
232 if (obj->IsHeapObject()) {
233 heap->mark_compact_collector()->RecordSlot(start, p, obj);
234 MarkObject(heap, obj);
235 }
236 }
237 }
238
239 INLINE(static void VisitPointersWithAnchor(Heap* heap, Object** anchor,
240 Object** start, Object** end)) {
241 for (Object** p = start; p < end; p++) {
242 Object* obj = *p;
243 if (obj->IsHeapObject()) {
244 heap->mark_compact_collector()->RecordSlot(anchor, p, obj);
245 MarkObject(heap, obj);
246 }
247 }
248 }
249
250 // Marks the object grey and pushes it on the marking stack.
251 INLINE(static void MarkObject(Heap* heap, Object* obj)) {
252 HeapObject* heap_object = HeapObject::cast(obj);
253 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
254 if (mark_bit.data_only()) {
255 MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
256 } else if (Marking::IsWhite(mark_bit)) {
257 heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit);
258 }
259 }
260
261 // Marks the object black without pushing it on the marking stack.
262 // Returns true if object needed marking and false otherwise.
263 INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
264 HeapObject* heap_object = HeapObject::cast(obj);
265 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
266 if (Marking::IsWhite(mark_bit)) {
267 mark_bit.Set();
268 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
269 heap_object->Size());
270 return true;
271 }
272 return false;
273 }
274};
275
276
277class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
278 public:
279 explicit IncrementalMarkingRootMarkingVisitor(
280 IncrementalMarking* incremental_marking)
281 : incremental_marking_(incremental_marking) {}
282
283 void VisitPointer(Object** p) { MarkObjectByPointer(p); }
284
285 void VisitPointers(Object** start, Object** end) {
286 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
287 }
288
289 private:
290 void MarkObjectByPointer(Object** p) {
291 Object* obj = *p;
292 if (!obj->IsHeapObject()) return;
293
294 HeapObject* heap_object = HeapObject::cast(obj);
295 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
296 if (mark_bit.data_only()) {
297 MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
298 } else {
299 if (Marking::IsWhite(mark_bit)) {
300 incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
301 }
302 }
303 }
304
305 IncrementalMarking* incremental_marking_;
306};
307
308
309void IncrementalMarking::Initialize() {
310 IncrementalMarkingMarkingVisitor::Initialize();
311}
312
313
314void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
315 bool is_marking,
316 bool is_compacting) {
317 if (is_marking) {
318 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
319 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
320
321 // It's difficult to filter out slots recorded for large objects.
322 if (chunk->owner()->identity() == LO_SPACE &&
323 chunk->size() > static_cast<size_t>(Page::kPageSize) && is_compacting) {
324 chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
325 }
326 } else if (chunk->owner()->identity() == CELL_SPACE ||
327 chunk->owner()->identity() == PROPERTY_CELL_SPACE ||
328 chunk->scan_on_scavenge()) {
329 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
330 chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
331 } else {
332 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
333 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
334 }
335}
336
337
338void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
339 bool is_marking) {
340 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
341 if (is_marking) {
342 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
343 } else {
344 chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
345 }
346 chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
347}
348
349
350void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
351 PagedSpace* space) {
352 PageIterator it(space);
353 while (it.has_next()) {
354 Page* p = it.next();
355 SetOldSpacePageFlags(p, false, false);
356 }
357}
358
359
360void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
361 NewSpace* space) {
362 NewSpacePageIterator it(space);
363 while (it.has_next()) {
364 NewSpacePage* p = it.next();
365 SetNewSpacePageFlags(p, false);
366 }
367}
368
369
370void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
371 DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
372 DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
373 DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
374 DeactivateIncrementalWriteBarrierForSpace(heap_->property_cell_space());
375 DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
376 DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
377 DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
378
379 LargePage* lop = heap_->lo_space()->first_page();
380 while (lop->is_valid()) {
381 SetOldSpacePageFlags(lop, false, false);
382 lop = lop->next_page();
383 }
384}
385
386
387void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
388 PageIterator it(space);
389 while (it.has_next()) {
390 Page* p = it.next();
391 SetOldSpacePageFlags(p, true, is_compacting_);
392 }
393}
394
395
396void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
397 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
398 while (it.has_next()) {
399 NewSpacePage* p = it.next();
400 SetNewSpacePageFlags(p, true);
401 }
402}
403
404
405void IncrementalMarking::ActivateIncrementalWriteBarrier() {
406 ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
407 ActivateIncrementalWriteBarrier(heap_->old_data_space());
408 ActivateIncrementalWriteBarrier(heap_->cell_space());
409 ActivateIncrementalWriteBarrier(heap_->property_cell_space());
410 ActivateIncrementalWriteBarrier(heap_->map_space());
411 ActivateIncrementalWriteBarrier(heap_->code_space());
412 ActivateIncrementalWriteBarrier(heap_->new_space());
413
414 LargePage* lop = heap_->lo_space()->first_page();
415 while (lop->is_valid()) {
416 SetOldSpacePageFlags(lop, true, is_compacting_);
417 lop = lop->next_page();
418 }
419}
420
421
422bool IncrementalMarking::ShouldActivate() {
423 return WorthActivating() && heap_->NextGCIsLikelyToBeFull();
424}
425
426
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400427bool IncrementalMarking::WasActivated() { return was_activated_; }
428
429
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000430bool IncrementalMarking::WorthActivating() {
431#ifndef DEBUG
432 static const intptr_t kActivationThreshold = 8 * MB;
433#else
434 // TODO(gc) consider setting this to some low level so that some
435 // debug tests run with incremental marking and some without.
436 static const intptr_t kActivationThreshold = 0;
437#endif
438 // Only start incremental marking in a safe state: 1) when incremental
439 // marking is turned on, 2) when we are currently not in a GC, and
440 // 3) when we are currently not serializing or deserializing the heap.
441 return FLAG_incremental_marking && FLAG_incremental_marking_steps &&
442 heap_->gc_state() == Heap::NOT_IN_GC &&
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400443 heap_->deserialization_complete() &&
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000444 !heap_->isolate()->serializer_enabled() &&
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000445 heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
446}
447
448
449void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
450 DCHECK(RecordWriteStub::GetMode(stub) == RecordWriteStub::STORE_BUFFER_ONLY);
451
452 if (!IsMarking()) {
453 // Initially stub is generated in STORE_BUFFER_ONLY mode thus
454 // we don't need to do anything if incremental marking is
455 // not active.
456 } else if (IsCompacting()) {
457 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
458 } else {
459 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
460 }
461}
462
463
464static void PatchIncrementalMarkingRecordWriteStubs(
465 Heap* heap, RecordWriteStub::Mode mode) {
466 UnseededNumberDictionary* stubs = heap->code_stubs();
467
468 int capacity = stubs->Capacity();
469 for (int i = 0; i < capacity; i++) {
470 Object* k = stubs->KeyAt(i);
471 if (stubs->IsKey(k)) {
472 uint32_t key = NumberToUint32(k);
473
474 if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) {
475 Object* e = stubs->ValueAt(i);
476 if (e->IsCode()) {
477 RecordWriteStub::Patch(Code::cast(e), mode);
478 }
479 }
480 }
481 }
482}
483
484
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000485void IncrementalMarking::Start(CompactionFlag flag) {
486 if (FLAG_trace_incremental_marking) {
487 PrintF("[IncrementalMarking] Start\n");
488 }
489 DCHECK(FLAG_incremental_marking);
490 DCHECK(FLAG_incremental_marking_steps);
491 DCHECK(state_ == STOPPED);
492 DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
493 DCHECK(!heap_->isolate()->serializer_enabled());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000494
495 ResetStepCounters();
496
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400497 was_activated_ = true;
498
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000499 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
500 StartMarking(flag);
501 } else {
502 if (FLAG_trace_incremental_marking) {
503 PrintF("[IncrementalMarking] Start sweeping.\n");
504 }
505 state_ = SWEEPING;
506 }
507
508 heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
509}
510
511
512void IncrementalMarking::StartMarking(CompactionFlag flag) {
513 if (FLAG_trace_incremental_marking) {
514 PrintF("[IncrementalMarking] Start marking\n");
515 }
516
517 is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
518 heap_->mark_compact_collector()->StartCompaction(
519 MarkCompactCollector::INCREMENTAL_COMPACTION);
520
521 state_ = MARKING;
522
523 RecordWriteStub::Mode mode = is_compacting_
524 ? RecordWriteStub::INCREMENTAL_COMPACTION
525 : RecordWriteStub::INCREMENTAL;
526
527 PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
528
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400529 heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000530
531 ActivateIncrementalWriteBarrier();
532
533// Marking bits are cleared by the sweeper.
534#ifdef VERIFY_HEAP
535 if (FLAG_verify_heap) {
536 heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
537 }
538#endif
539
540 heap_->CompletelyClearInstanceofCache();
541 heap_->isolate()->compilation_cache()->MarkCompactPrologue();
542
543 if (FLAG_cleanup_code_caches_at_gc) {
544 // We will mark cache black with a separate pass
545 // when we finish marking.
546 MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
547 }
548
549 // Mark strong roots grey.
550 IncrementalMarkingRootMarkingVisitor visitor(this);
551 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
552
553 heap_->mark_compact_collector()->MarkWeakObjectToCodeTable();
554
555 // Ready to start incremental marking.
556 if (FLAG_trace_incremental_marking) {
557 PrintF("[IncrementalMarking] Running\n");
558 }
559}
560
561
562void IncrementalMarking::PrepareForScavenge() {
563 if (!IsMarking()) return;
564 NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
565 heap_->new_space()->FromSpaceEnd());
566 while (it.has_next()) {
567 Bitmap::Clear(it.next());
568 }
569}
570
571
572void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
573 if (!IsMarking()) return;
574
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400575 MarkingDeque* marking_deque =
576 heap_->mark_compact_collector()->marking_deque();
577 int current = marking_deque->bottom();
578 int mask = marking_deque->mask();
579 int limit = marking_deque->top();
580 HeapObject** array = marking_deque->array();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000581 int new_top = current;
582
583 Map* filler_map = heap_->one_pointer_filler_map();
584
585 while (current != limit) {
586 HeapObject* obj = array[current];
587 DCHECK(obj->IsHeapObject());
588 current = ((current + 1) & mask);
589 if (heap_->InNewSpace(obj)) {
590 MapWord map_word = obj->map_word();
591 if (map_word.IsForwardingAddress()) {
592 HeapObject* dest = map_word.ToForwardingAddress();
593 array[new_top] = dest;
594 new_top = ((new_top + 1) & mask);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400595 DCHECK(new_top != marking_deque->bottom());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000596#ifdef DEBUG
597 MarkBit mark_bit = Marking::MarkBitFrom(obj);
598 DCHECK(Marking::IsGrey(mark_bit) ||
599 (obj->IsFiller() && Marking::IsWhite(mark_bit)));
600#endif
601 }
602 } else if (obj->map() != filler_map) {
603 // Skip one word filler objects that appear on the
604 // stack when we perform in place array shift.
605 array[new_top] = obj;
606 new_top = ((new_top + 1) & mask);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400607 DCHECK(new_top != marking_deque->bottom());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000608#ifdef DEBUG
609 MarkBit mark_bit = Marking::MarkBitFrom(obj);
610 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
611 DCHECK(Marking::IsGrey(mark_bit) ||
612 (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
613 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
614 Marking::IsBlack(mark_bit)));
615#endif
616 }
617 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400618 marking_deque->set_top(new_top);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000619}
620
621
622void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
623 MarkBit map_mark_bit = Marking::MarkBitFrom(map);
624 if (Marking::IsWhite(map_mark_bit)) {
625 WhiteToGreyAndPush(map, map_mark_bit);
626 }
627
628 IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
629
630 MarkBit mark_bit = Marking::MarkBitFrom(obj);
631#if ENABLE_SLOW_DCHECKS
632 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
633 SLOW_DCHECK(Marking::IsGrey(mark_bit) ||
634 (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
635 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
636 Marking::IsBlack(mark_bit)));
637#endif
638 MarkBlackOrKeepBlack(obj, mark_bit, size);
639}
640
641
642intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
643 intptr_t bytes_processed = 0;
644 Map* filler_map = heap_->one_pointer_filler_map();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400645 MarkingDeque* marking_deque =
646 heap_->mark_compact_collector()->marking_deque();
647 while (!marking_deque->IsEmpty() && bytes_processed < bytes_to_process) {
648 HeapObject* obj = marking_deque->Pop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000649
650 // Explicitly skip one word fillers. Incremental markbit patterns are
651 // correct only for objects that occupy at least two words.
652 Map* map = obj->map();
653 if (map == filler_map) continue;
654
655 int size = obj->SizeFromMap(map);
656 unscanned_bytes_of_large_object_ = 0;
657 VisitObject(map, obj, size);
658 int delta = (size - unscanned_bytes_of_large_object_);
659 // TODO(jochen): remove after http://crbug.com/381820 is resolved.
660 CHECK_LT(0, delta);
661 bytes_processed += delta;
662 }
663 return bytes_processed;
664}
665
666
667void IncrementalMarking::ProcessMarkingDeque() {
668 Map* filler_map = heap_->one_pointer_filler_map();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400669 MarkingDeque* marking_deque =
670 heap_->mark_compact_collector()->marking_deque();
671 while (!marking_deque->IsEmpty()) {
672 HeapObject* obj = marking_deque->Pop();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000673
674 // Explicitly skip one word fillers. Incremental markbit patterns are
675 // correct only for objects that occupy at least two words.
676 Map* map = obj->map();
677 if (map == filler_map) continue;
678
679 VisitObject(map, obj, obj->SizeFromMap(map));
680 }
681}
682
683
684void IncrementalMarking::Hurry() {
685 if (state() == MARKING) {
686 double start = 0.0;
687 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
688 start = base::OS::TimeCurrentMillis();
689 if (FLAG_trace_incremental_marking) {
690 PrintF("[IncrementalMarking] Hurry\n");
691 }
692 }
693 // TODO(gc) hurry can mark objects it encounters black as mutator
694 // was stopped.
695 ProcessMarkingDeque();
696 state_ = COMPLETE;
697 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
698 double end = base::OS::TimeCurrentMillis();
699 double delta = end - start;
700 heap_->tracer()->AddMarkingTime(delta);
701 if (FLAG_trace_incremental_marking) {
702 PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
703 static_cast<int>(delta));
704 }
705 }
706 }
707
708 if (FLAG_cleanup_code_caches_at_gc) {
709 PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
710 Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
711 MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
712 PolymorphicCodeCache::kSize);
713 }
714
715 Object* context = heap_->native_contexts_list();
716 while (!context->IsUndefined()) {
717 // GC can happen when the context is not fully initialized,
718 // so the cache can be undefined.
719 HeapObject* cache = HeapObject::cast(
720 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
721 if (!cache->IsUndefined()) {
722 MarkBit mark_bit = Marking::MarkBitFrom(cache);
723 if (Marking::IsGrey(mark_bit)) {
724 Marking::GreyToBlack(mark_bit);
725 MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size());
726 }
727 }
728 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
729 }
730}
731
732
733void IncrementalMarking::Abort() {
734 if (IsStopped()) return;
735 if (FLAG_trace_incremental_marking) {
736 PrintF("[IncrementalMarking] Aborting.\n");
737 }
738 heap_->new_space()->LowerInlineAllocationLimit(0);
739 IncrementalMarking::set_should_hurry(false);
740 ResetStepCounters();
741 if (IsMarking()) {
742 PatchIncrementalMarkingRecordWriteStubs(heap_,
743 RecordWriteStub::STORE_BUFFER_ONLY);
744 DeactivateIncrementalWriteBarrier();
745
746 if (is_compacting_) {
747 LargeObjectIterator it(heap_->lo_space());
748 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
749 Page* p = Page::FromAddress(obj->address());
750 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
751 p->ClearFlag(Page::RESCAN_ON_EVACUATION);
752 }
753 }
754 }
755 }
756 heap_->isolate()->stack_guard()->ClearGC();
757 state_ = STOPPED;
758 is_compacting_ = false;
759}
760
761
762void IncrementalMarking::Finalize() {
763 Hurry();
764 state_ = STOPPED;
765 is_compacting_ = false;
766 heap_->new_space()->LowerInlineAllocationLimit(0);
767 IncrementalMarking::set_should_hurry(false);
768 ResetStepCounters();
769 PatchIncrementalMarkingRecordWriteStubs(heap_,
770 RecordWriteStub::STORE_BUFFER_ONLY);
771 DeactivateIncrementalWriteBarrier();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400772 DCHECK(heap_->mark_compact_collector()->marking_deque()->IsEmpty());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000773 heap_->isolate()->stack_guard()->ClearGC();
774}
775
776
777void IncrementalMarking::MarkingComplete(CompletionAction action) {
778 state_ = COMPLETE;
779 // We will set the stack guard to request a GC now. This will mean the rest
780 // of the GC gets performed as soon as possible (we can't do a GC here in a
781 // record-write context). If a few things get allocated between now and then
782 // that shouldn't make us do a scavenge and keep being incremental, so we set
783 // the should-hurry flag to indicate that there can't be much work left to do.
784 set_should_hurry(true);
785 if (FLAG_trace_incremental_marking) {
786 PrintF("[IncrementalMarking] Complete (normal).\n");
787 }
788 if (action == GC_VIA_STACK_GUARD) {
789 heap_->isolate()->stack_guard()->RequestGC();
790 }
791}
792
793
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400794void IncrementalMarking::Epilogue() { was_activated_ = false; }
795
796
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000797void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
798 if (IsStopped() && ShouldActivate()) {
799 // TODO(hpayer): Let's play safe for now, but compaction should be
800 // in principle possible.
801 Start(PREVENT_COMPACTION);
802 } else {
803 Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
804 }
805}
806
807
808void IncrementalMarking::SpeedUp() {
809 bool speed_up = false;
810
811 if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
812 if (FLAG_trace_gc) {
813 PrintPID("Speed up marking after %d steps\n",
814 static_cast<int>(kMarkingSpeedAccellerationInterval));
815 }
816 speed_up = true;
817 }
818
819 bool space_left_is_very_small =
820 (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
821
822 bool only_1_nth_of_space_that_was_available_still_left =
823 (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
824 old_generation_space_available_at_start_of_incremental_);
825
826 if (space_left_is_very_small ||
827 only_1_nth_of_space_that_was_available_still_left) {
828 if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n");
829 speed_up = true;
830 }
831
832 bool size_of_old_space_multiplied_by_n_during_marking =
833 (heap_->PromotedTotalSize() >
834 (marking_speed_ + 1) *
835 old_generation_space_used_at_start_of_incremental_);
836 if (size_of_old_space_multiplied_by_n_during_marking) {
837 speed_up = true;
838 if (FLAG_trace_gc) {
839 PrintPID("Speed up marking because of heap size increase\n");
840 }
841 }
842
843 int64_t promoted_during_marking =
844 heap_->PromotedTotalSize() -
845 old_generation_space_used_at_start_of_incremental_;
846 intptr_t delay = marking_speed_ * MB;
847 intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
848
849 // We try to scan at at least twice the speed that we are allocating.
850 if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
851 if (FLAG_trace_gc) {
852 PrintPID("Speed up marking because marker was not keeping up\n");
853 }
854 speed_up = true;
855 }
856
857 if (speed_up) {
858 if (state_ != MARKING) {
859 if (FLAG_trace_gc) {
860 PrintPID("Postponing speeding up marking until marking starts\n");
861 }
862 } else {
863 marking_speed_ += kMarkingSpeedAccelleration;
864 marking_speed_ = static_cast<int>(
865 Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
866 if (FLAG_trace_gc) {
867 PrintPID("Marking speed increased to %d\n", marking_speed_);
868 }
869 }
870 }
871}
872
873
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400874intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
875 CompletionAction action,
876 ForceMarkingAction marking,
877 ForceCompletionAction completion) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000878 if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
879 !FLAG_incremental_marking_steps ||
880 (state_ != SWEEPING && state_ != MARKING)) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400881 return 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000882 }
883
884 allocated_ += allocated_bytes;
885
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400886 if (marking == DO_NOT_FORCE_MARKING && allocated_ < kAllocatedThreshold &&
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000887 write_barriers_invoked_since_last_step_ <
888 kWriteBarriersInvokedThreshold) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400889 return 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000890 }
891
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400892 // If an idle notification happened recently, we delay marking steps.
893 if (marking == DO_NOT_FORCE_MARKING &&
894 heap_->RecentIdleNotificationHappened()) {
895 return 0;
896 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000897
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400898 if (state_ == MARKING && no_marking_scope_depth_ > 0) return 0;
899
900 intptr_t bytes_processed = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000901 {
902 HistogramTimerScope incremental_marking_scope(
903 heap_->isolate()->counters()->gc_incremental_marking());
904 double start = base::OS::TimeCurrentMillis();
905
906 // The marking speed is driven either by the allocation rate or by the rate
907 // at which we are having to check the color of objects in the write
908 // barrier.
909 // It is possible for a tight non-allocating loop to run a lot of write
910 // barriers before we get here and check them (marking can only take place
911 // on
912 // allocation), so to reduce the lumpiness we don't use the write barriers
913 // invoked since last step directly to determine the amount of work to do.
914 intptr_t bytes_to_process =
915 marking_speed_ *
916 Max(allocated_, write_barriers_invoked_since_last_step_);
917 allocated_ = 0;
918 write_barriers_invoked_since_last_step_ = 0;
919
920 bytes_scanned_ += bytes_to_process;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000921
922 if (state_ == SWEEPING) {
923 if (heap_->mark_compact_collector()->sweeping_in_progress() &&
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400924 (heap_->mark_compact_collector()->IsSweepingCompleted() ||
925 !FLAG_concurrent_sweeping)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000926 heap_->mark_compact_collector()->EnsureSweepingCompleted();
927 }
928 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
929 bytes_scanned_ = 0;
930 StartMarking(PREVENT_COMPACTION);
931 }
932 } else if (state_ == MARKING) {
933 bytes_processed = ProcessMarkingDeque(bytes_to_process);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400934 if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
935 if (completion == FORCE_COMPLETION ||
936 IsIdleMarkingDelayCounterLimitReached()) {
937 MarkingComplete(action);
938 } else {
939 IncrementIdleMarkingDelayCounter();
940 }
941 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000942 }
943
944 steps_count_++;
945
946 // Speed up marking if we are marking too slow or if we are almost done
947 // with marking.
948 SpeedUp();
949
950 double end = base::OS::TimeCurrentMillis();
951 double duration = (end - start);
952 // Note that we report zero bytes here when sweeping was in progress or
953 // when we just started incremental marking. In these cases we did not
954 // process the marking deque.
955 heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
956 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400957 return bytes_processed;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000958}
959
960
961void IncrementalMarking::ResetStepCounters() {
962 steps_count_ = 0;
963 old_generation_space_available_at_start_of_incremental_ =
964 SpaceLeftInOldSpace();
965 old_generation_space_used_at_start_of_incremental_ =
966 heap_->PromotedTotalSize();
967 bytes_rescanned_ = 0;
968 marking_speed_ = kInitialMarkingSpeed;
969 bytes_scanned_ = 0;
970 write_barriers_invoked_since_last_step_ = 0;
971}
972
973
974int64_t IncrementalMarking::SpaceLeftInOldSpace() {
975 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
976}
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400977
978
979bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() {
980 return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter;
981}
982
983
984void IncrementalMarking::IncrementIdleMarkingDelayCounter() {
985 idle_marking_delay_counter_++;
986}
987
988
989void IncrementalMarking::ClearIdleMarkingDelayCounter() {
990 idle_marking_delay_counter_ = 0;
991}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000992}
993} // namespace v8::internal