blob: ebbee25ac02d749551ea1c1de200db32987bd276 [file] [log] [blame]
yangguo@chromium.org5f0b8ea2012-05-16 12:37:04 +00001// Copyright 2012 the V8 project authors. All rights reserved.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "incremental-marking.h"
31
32#include "code-stubs.h"
33#include "compilation-cache.h"
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +000034#include "objects-visiting.h"
35#include "objects-visiting-inl.h"
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000036#include "v8conversions.h"
37
38namespace v8 {
39namespace internal {
40
41
42IncrementalMarking::IncrementalMarking(Heap* heap)
43 : heap_(heap),
44 state_(STOPPED),
45 marking_deque_memory_(NULL),
danno@chromium.orgc612e022011-11-10 11:38:15 +000046 marking_deque_memory_committed_(false),
yangguo@chromium.org5f0b8ea2012-05-16 12:37:04 +000047 marker_(this, heap->mark_compact_collector()),
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000048 steps_count_(0),
49 steps_took_(0),
50 longest_step_(0.0),
51 old_generation_space_available_at_start_of_incremental_(0),
52 old_generation_space_used_at_start_of_incremental_(0),
53 steps_count_since_last_gc_(0),
54 steps_took_since_last_gc_(0),
55 should_hurry_(false),
56 allocation_marking_factor_(0),
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +000057 allocated_(0),
58 no_marking_scope_depth_(0) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000059}
60
61
62void IncrementalMarking::TearDown() {
63 delete marking_deque_memory_;
64}
65
66
jkummerow@chromium.orgc3b37122011-11-07 10:14:12 +000067void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
68 Object** slot,
69 Object* value) {
70 if (BaseRecordWrite(obj, slot, value) && is_compacting_ && slot != NULL) {
71 MarkBit obj_bit = Marking::MarkBitFrom(obj);
72 if (Marking::IsBlack(obj_bit)) {
73 // Object is not going to be rescanned we need to record the slot.
74 heap_->mark_compact_collector()->RecordSlot(
75 HeapObject::RawField(obj, 0), slot, value);
76 }
77 }
78}
79
80
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000081void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
82 Object* value,
83 Isolate* isolate) {
84 ASSERT(obj->IsHeapObject());
85
86 // Fast cases should already be covered by RecordWriteStub.
87 ASSERT(value->IsHeapObject());
88 ASSERT(!value->IsHeapNumber());
svenpanne@chromium.orga8bb4d92011-10-10 13:20:40 +000089 ASSERT(!value->IsString() ||
90 value->IsConsString() ||
91 value->IsSlicedString());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000092 ASSERT(Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(value))));
93
94 IncrementalMarking* marking = isolate->heap()->incremental_marking();
95 ASSERT(!marking->is_compacting_);
96 marking->RecordWrite(obj, NULL, value);
97}
98
99
100void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj,
101 Object** slot,
102 Isolate* isolate) {
103 IncrementalMarking* marking = isolate->heap()->incremental_marking();
104 ASSERT(marking->is_compacting_);
105 marking->RecordWrite(obj, slot, *slot);
106}
107
108
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000109void IncrementalMarking::RecordCodeTargetPatch(Code* host,
110 Address pc,
111 HeapObject* value) {
112 if (IsMarking()) {
113 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
114 RecordWriteIntoCode(host, &rinfo, value);
115 }
116}
117
118
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000119void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
120 if (IsMarking()) {
121 Code* host = heap_->isolate()->inner_pointer_to_code_cache()->
122 GcSafeFindCodeForInnerPointer(pc);
123 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
124 RecordWriteIntoCode(host, &rinfo, value);
125 }
126}
127
128
jkummerow@chromium.orgc3b37122011-11-07 10:14:12 +0000129void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000130 Object** slot,
131 Code* value) {
132 if (BaseRecordWrite(host, slot, value) && is_compacting_) {
133 ASSERT(slot != NULL);
134 heap_->mark_compact_collector()->
135 RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
136 }
137}
138
139
jkummerow@chromium.orgc3b37122011-11-07 10:14:12 +0000140void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
141 RelocInfo* rinfo,
142 Object* value) {
143 MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
144 if (Marking::IsWhite(value_bit)) {
145 MarkBit obj_bit = Marking::MarkBitFrom(obj);
146 if (Marking::IsBlack(obj_bit)) {
147 BlackToGreyAndUnshift(obj, obj_bit);
148 RestartIfNotMarking();
149 }
150 // Object is either grey or white. It will be scanned if survives.
151 return;
152 }
153
154 if (is_compacting_) {
155 MarkBit obj_bit = Marking::MarkBitFrom(obj);
156 if (Marking::IsBlack(obj_bit)) {
157 // Object is not going to be rescanned. We need to record the slot.
158 heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
159 Code::cast(value));
160 }
161 }
162}
163
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000164
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000165class IncrementalMarkingMarkingVisitor
166 : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000167 public:
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000168 static void Initialize() {
169 StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
170
171 table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo);
172
173 table_.Register(kVisitJSFunction, &VisitJSFunction);
174
175 table_.Register(kVisitJSRegExp, &VisitJSRegExp);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000176 }
177
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000178 static inline void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo) {
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +0000179 ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
180 Object* target = rinfo->target_object();
181 if (target->NonFailureIsHeapObject()) {
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000182 heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
183 MarkObject(heap, target);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000184 }
185 }
186
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000187 static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000188 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
danno@chromium.org88aa0582012-03-23 15:11:57 +0000189 Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
190 if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000191 && (target->ic_age() != heap->global_ic_age())) {
danno@chromium.org88aa0582012-03-23 15:11:57 +0000192 IC::Clear(rinfo->pc());
193 target = Code::GetCodeFromTargetAddress(rinfo->target_address());
194 }
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000195 heap->mark_compact_collector()->RecordRelocSlot(rinfo, Code::cast(target));
196 MarkObject(heap, target);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000197 }
198
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000199 static void VisitCode(Map* map, HeapObject* object) {
200 Heap* heap = map->GetHeap();
201 Code* code = reinterpret_cast<Code*>(object);
202 code->CodeIterateBody<IncrementalMarkingMarkingVisitor>(heap);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000203 }
204
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000205 static void VisitJSWeakMap(Map* map, HeapObject* object) {
206 Heap* heap = map->GetHeap();
207 VisitPointers(heap,
208 HeapObject::RawField(object, JSWeakMap::kPropertiesOffset),
209 HeapObject::RawField(object, JSWeakMap::kSize));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000210 }
211
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000212 static void VisitSharedFunctionInfo(Map* map, HeapObject* object) {
213 Heap* heap = map->GetHeap();
214 SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
215 if (shared->ic_age() != heap->global_ic_age()) {
216 shared->ResetForNewContext(heap->global_ic_age());
fschneider@chromium.org7d10be52012-04-10 12:30:14 +0000217 }
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000218 FixedBodyVisitor<IncrementalMarkingMarkingVisitor,
219 SharedFunctionInfo::BodyDescriptor,
220 void>::Visit(map, object);
fschneider@chromium.org7d10be52012-04-10 12:30:14 +0000221 }
222
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000223 static inline void VisitJSFunction(Map* map, HeapObject* object) {
224 Heap* heap = map->GetHeap();
225 // Iterate over all fields in the body but take care in dealing with
226 // the code entry and skip weak fields.
227 VisitPointers(heap,
228 HeapObject::RawField(object, JSFunction::kPropertiesOffset),
229 HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
230 VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
231 VisitPointers(heap,
232 HeapObject::RawField(object,
233 JSFunction::kCodeEntryOffset + kPointerSize),
234 HeapObject::RawField(object,
235 JSFunction::kNonWeakFieldsEndOffset));
236 }
237
238 INLINE(static void VisitPointer(Heap* heap, Object** p)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000239 Object* obj = *p;
240 if (obj->NonFailureIsHeapObject()) {
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000241 heap->mark_compact_collector()->RecordSlot(p, p, obj);
242 MarkObject(heap, obj);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000243 }
244 }
245
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000246 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000247 for (Object** p = start; p < end; p++) {
248 Object* obj = *p;
249 if (obj->NonFailureIsHeapObject()) {
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000250 heap->mark_compact_collector()->RecordSlot(start, p, obj);
251 MarkObject(heap, obj);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000252 }
253 }
254 }
255
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000256 INLINE(static void MarkObject(Heap* heap, Object* obj)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000257 HeapObject* heap_object = HeapObject::cast(obj);
258 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
259 if (mark_bit.data_only()) {
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000260 if (heap->incremental_marking()->MarkBlackOrKeepGrey(mark_bit)) {
ulan@chromium.org2efb9002012-01-19 15:36:35 +0000261 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
262 heap_object->Size());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000263 }
264 } else if (Marking::IsWhite(mark_bit)) {
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000265 heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000266 }
267 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000268};
269
270
271class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
272 public:
273 IncrementalMarkingRootMarkingVisitor(Heap* heap,
274 IncrementalMarking* incremental_marking)
275 : heap_(heap),
276 incremental_marking_(incremental_marking) {
277 }
278
279 void VisitPointer(Object** p) {
280 MarkObjectByPointer(p);
281 }
282
283 void VisitPointers(Object** start, Object** end) {
284 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
285 }
286
287 private:
288 void MarkObjectByPointer(Object** p) {
289 Object* obj = *p;
290 if (!obj->IsHeapObject()) return;
291
292 HeapObject* heap_object = HeapObject::cast(obj);
293 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
294 if (mark_bit.data_only()) {
295 if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
ulan@chromium.org2efb9002012-01-19 15:36:35 +0000296 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
297 heap_object->Size());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000298 }
299 } else {
300 if (Marking::IsWhite(mark_bit)) {
301 incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
302 }
303 }
304 }
305
306 Heap* heap_;
307 IncrementalMarking* incremental_marking_;
308};
309
310
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000311void IncrementalMarking::Initialize() {
312 IncrementalMarkingMarkingVisitor::Initialize();
313}
314
315
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000316void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
317 bool is_marking,
318 bool is_compacting) {
319 if (is_marking) {
320 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
321 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
322
323 // It's difficult to filter out slots recorded for large objects.
324 if (chunk->owner()->identity() == LO_SPACE &&
325 chunk->size() > static_cast<size_t>(Page::kPageSize) &&
326 is_compacting) {
327 chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
328 }
329 } else if (chunk->owner()->identity() == CELL_SPACE ||
330 chunk->scan_on_scavenge()) {
331 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
332 chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
333 } else {
334 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
335 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
336 }
337}
338
339
340void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
341 bool is_marking) {
342 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
343 if (is_marking) {
344 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
345 } else {
346 chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
347 }
348 chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
349}
350
351
352void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
353 PagedSpace* space) {
354 PageIterator it(space);
355 while (it.has_next()) {
356 Page* p = it.next();
357 SetOldSpacePageFlags(p, false, false);
358 }
359}
360
361
362void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
363 NewSpace* space) {
364 NewSpacePageIterator it(space);
365 while (it.has_next()) {
366 NewSpacePage* p = it.next();
367 SetNewSpacePageFlags(p, false);
368 }
369}
370
371
372void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
373 DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
374 DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
375 DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
376 DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
377 DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
378 DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
379
380 LargePage* lop = heap_->lo_space()->first_page();
381 while (lop->is_valid()) {
382 SetOldSpacePageFlags(lop, false, false);
383 lop = lop->next_page();
384 }
385}
386
387
388void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
389 PageIterator it(space);
390 while (it.has_next()) {
391 Page* p = it.next();
392 SetOldSpacePageFlags(p, true, is_compacting_);
393 }
394}
395
396
397void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
398 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
399 while (it.has_next()) {
400 NewSpacePage* p = it.next();
401 SetNewSpacePageFlags(p, true);
402 }
403}
404
405
406void IncrementalMarking::ActivateIncrementalWriteBarrier() {
407 ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
408 ActivateIncrementalWriteBarrier(heap_->old_data_space());
409 ActivateIncrementalWriteBarrier(heap_->cell_space());
410 ActivateIncrementalWriteBarrier(heap_->map_space());
411 ActivateIncrementalWriteBarrier(heap_->code_space());
412 ActivateIncrementalWriteBarrier(heap_->new_space());
413
414 LargePage* lop = heap_->lo_space()->first_page();
415 while (lop->is_valid()) {
416 SetOldSpacePageFlags(lop, true, is_compacting_);
417 lop = lop->next_page();
418 }
419}
420
421
422bool IncrementalMarking::WorthActivating() {
423#ifndef DEBUG
424 static const intptr_t kActivationThreshold = 8 * MB;
425#else
426 // TODO(gc) consider setting this to some low level so that some
427 // debug tests run with incremental marking and some without.
428 static const intptr_t kActivationThreshold = 0;
429#endif
430
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000431 return !FLAG_expose_gc &&
432 FLAG_incremental_marking &&
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +0000433 !Serializer::enabled() &&
yangguo@chromium.org154ff992012-03-13 08:09:54 +0000434 heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000435}
436
437
438void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
439 ASSERT(RecordWriteStub::GetMode(stub) ==
440 RecordWriteStub::STORE_BUFFER_ONLY);
441
442 if (!IsMarking()) {
443 // Initially stub is generated in STORE_BUFFER_ONLY mode thus
444 // we don't need to do anything if incremental marking is
445 // not active.
446 } else if (IsCompacting()) {
447 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
448 } else {
449 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
450 }
451}
452
453
454static void PatchIncrementalMarkingRecordWriteStubs(
455 Heap* heap, RecordWriteStub::Mode mode) {
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +0000456 UnseededNumberDictionary* stubs = heap->code_stubs();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000457
458 int capacity = stubs->Capacity();
459 for (int i = 0; i < capacity; i++) {
460 Object* k = stubs->KeyAt(i);
461 if (stubs->IsKey(k)) {
462 uint32_t key = NumberToUint32(k);
463
464 if (CodeStub::MajorKeyFromKey(key) ==
465 CodeStub::RecordWrite) {
466 Object* e = stubs->ValueAt(i);
467 if (e->IsCode()) {
468 RecordWriteStub::Patch(Code::cast(e), mode);
469 }
470 }
471 }
472 }
473}
474
475
476void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
477 if (marking_deque_memory_ == NULL) {
478 marking_deque_memory_ = new VirtualMemory(4 * MB);
danno@chromium.orgc612e022011-11-10 11:38:15 +0000479 }
480 if (!marking_deque_memory_committed_) {
481 bool success = marking_deque_memory_->Commit(
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000482 reinterpret_cast<Address>(marking_deque_memory_->address()),
483 marking_deque_memory_->size(),
484 false); // Not executable.
danno@chromium.orgc612e022011-11-10 11:38:15 +0000485 CHECK(success);
486 marking_deque_memory_committed_ = true;
487 }
488}
489
490void IncrementalMarking::UncommitMarkingDeque() {
ricow@chromium.org27bf2882011-11-17 08:34:43 +0000491 if (state_ == STOPPED && marking_deque_memory_committed_) {
danno@chromium.orgc612e022011-11-10 11:38:15 +0000492 bool success = marking_deque_memory_->Uncommit(
493 reinterpret_cast<Address>(marking_deque_memory_->address()),
494 marking_deque_memory_->size());
495 CHECK(success);
496 marking_deque_memory_committed_ = false;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000497 }
498}
499
500
501void IncrementalMarking::Start() {
502 if (FLAG_trace_incremental_marking) {
503 PrintF("[IncrementalMarking] Start\n");
504 }
505 ASSERT(FLAG_incremental_marking);
506 ASSERT(state_ == STOPPED);
507
508 ResetStepCounters();
509
510 if (heap_->old_pointer_space()->IsSweepingComplete() &&
511 heap_->old_data_space()->IsSweepingComplete()) {
ricow@chromium.orgfa52deb2011-10-11 19:09:42 +0000512 StartMarking(ALLOW_COMPACTION);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000513 } else {
514 if (FLAG_trace_incremental_marking) {
515 PrintF("[IncrementalMarking] Start sweeping.\n");
516 }
517 state_ = SWEEPING;
518 }
519
520 heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
521}
522
523
524static void MarkObjectGreyDoNotEnqueue(Object* obj) {
525 if (obj->IsHeapObject()) {
526 HeapObject* heap_obj = HeapObject::cast(obj);
527 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
528 if (Marking::IsBlack(mark_bit)) {
ulan@chromium.org2efb9002012-01-19 15:36:35 +0000529 MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
530 -heap_obj->Size());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000531 }
532 Marking::AnyToGrey(mark_bit);
533 }
534}
535
536
ricow@chromium.orgfa52deb2011-10-11 19:09:42 +0000537void IncrementalMarking::StartMarking(CompactionFlag flag) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000538 if (FLAG_trace_incremental_marking) {
539 PrintF("[IncrementalMarking] Start marking\n");
540 }
541
ricow@chromium.orgfa52deb2011-10-11 19:09:42 +0000542 is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
jkummerow@chromium.orgab7dad42012-02-07 12:07:34 +0000543 heap_->mark_compact_collector()->StartCompaction(
544 MarkCompactCollector::INCREMENTAL_COMPACTION);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000545
546 state_ = MARKING;
547
548 RecordWriteStub::Mode mode = is_compacting_ ?
549 RecordWriteStub::INCREMENTAL_COMPACTION : RecordWriteStub::INCREMENTAL;
550
551 PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
552
553 EnsureMarkingDequeIsCommitted();
554
555 // Initialize marking stack.
556 Address addr = static_cast<Address>(marking_deque_memory_->address());
557 size_t size = marking_deque_memory_->size();
558 if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
559 marking_deque_.Initialize(addr, addr + size);
560
561 ActivateIncrementalWriteBarrier();
562
563#ifdef DEBUG
564 // Marking bits are cleared by the sweeper.
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000565 if (FLAG_verify_heap) {
566 heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
567 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000568#endif
569
570 heap_->CompletelyClearInstanceofCache();
571 heap_->isolate()->compilation_cache()->MarkCompactPrologue();
572
573 if (FLAG_cleanup_code_caches_at_gc) {
574 // We will mark cache black with a separate pass
575 // when we finish marking.
576 MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
577 }
578
579 // Mark strong roots grey.
580 IncrementalMarkingRootMarkingVisitor visitor(heap_, this);
581 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
582
583 // Ready to start incremental marking.
584 if (FLAG_trace_incremental_marking) {
585 PrintF("[IncrementalMarking] Running\n");
586 }
587}
588
589
590void IncrementalMarking::PrepareForScavenge() {
591 if (!IsMarking()) return;
592 NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
593 heap_->new_space()->FromSpaceEnd());
594 while (it.has_next()) {
595 Bitmap::Clear(it.next());
596 }
597}
598
599
600void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
601 if (!IsMarking()) return;
602
603 int current = marking_deque_.bottom();
604 int mask = marking_deque_.mask();
605 int limit = marking_deque_.top();
606 HeapObject** array = marking_deque_.array();
607 int new_top = current;
608
609 Map* filler_map = heap_->one_pointer_filler_map();
610
611 while (current != limit) {
612 HeapObject* obj = array[current];
613 ASSERT(obj->IsHeapObject());
614 current = ((current + 1) & mask);
615 if (heap_->InNewSpace(obj)) {
616 MapWord map_word = obj->map_word();
617 if (map_word.IsForwardingAddress()) {
618 HeapObject* dest = map_word.ToForwardingAddress();
619 array[new_top] = dest;
620 new_top = ((new_top + 1) & mask);
621 ASSERT(new_top != marking_deque_.bottom());
ricow@chromium.orgfa52deb2011-10-11 19:09:42 +0000622#ifdef DEBUG
623 MarkBit mark_bit = Marking::MarkBitFrom(obj);
624 ASSERT(Marking::IsGrey(mark_bit) ||
625 (obj->IsFiller() && Marking::IsWhite(mark_bit)));
626#endif
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000627 }
628 } else if (obj->map() != filler_map) {
629 // Skip one word filler objects that appear on the
630 // stack when we perform in place array shift.
631 array[new_top] = obj;
632 new_top = ((new_top + 1) & mask);
633 ASSERT(new_top != marking_deque_.bottom());
ricow@chromium.orgfa52deb2011-10-11 19:09:42 +0000634#ifdef DEBUG
635 MarkBit mark_bit = Marking::MarkBitFrom(obj);
636 ASSERT(Marking::IsGrey(mark_bit) ||
637 (obj->IsFiller() && Marking::IsWhite(mark_bit)));
638#endif
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000639 }
640 }
641 marking_deque_.set_top(new_top);
642
643 steps_took_since_last_gc_ = 0;
644 steps_count_since_last_gc_ = 0;
645 longest_step_ = 0.0;
646}
647
648
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000649void IncrementalMarking::Hurry() {
650 if (state() == MARKING) {
651 double start = 0.0;
652 if (FLAG_trace_incremental_marking) {
653 PrintF("[IncrementalMarking] Hurry\n");
654 start = OS::TimeCurrentMillis();
655 }
656 // TODO(gc) hurry can mark objects it encounters black as mutator
657 // was stopped.
658 Map* filler_map = heap_->one_pointer_filler_map();
659 Map* global_context_map = heap_->global_context_map();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000660 while (!marking_deque_.IsEmpty()) {
661 HeapObject* obj = marking_deque_.Pop();
662
663 // Explicitly skip one word fillers. Incremental markbit patterns are
664 // correct only for objects that occupy at least two words.
665 Map* map = obj->map();
666 if (map == filler_map) {
667 continue;
668 } else if (map == global_context_map) {
669 // Global contexts have weak fields.
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000670 IncrementalMarkingMarkingVisitor::VisitGlobalContext(map, obj);
yangguo@chromium.org5f0b8ea2012-05-16 12:37:04 +0000671 } else if (map->instance_type() == MAP_TYPE) {
672 Map* map = Map::cast(obj);
673 heap_->ClearCacheOnMap(map);
674
675 // When map collection is enabled we have to mark through map's
676 // transitions and back pointers in a special way to make these links
677 // weak. Only maps for subclasses of JSReceiver can have transitions.
678 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
679 if (FLAG_collect_maps &&
680 map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
681 marker_.MarkMapContents(map);
682 } else {
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000683 IncrementalMarkingMarkingVisitor::VisitPointers(
684 heap_,
yangguo@chromium.org5f0b8ea2012-05-16 12:37:04 +0000685 HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
686 HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
687 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000688 } else {
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000689 MarkBit map_mark_bit = Marking::MarkBitFrom(map);
690 if (Marking::IsWhite(map_mark_bit)) {
691 WhiteToGreyAndPush(map, map_mark_bit);
692 }
693 IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000694 }
695
696 MarkBit mark_bit = Marking::MarkBitFrom(obj);
697 ASSERT(!Marking::IsBlack(mark_bit));
698 Marking::MarkBlack(mark_bit);
ulan@chromium.org2efb9002012-01-19 15:36:35 +0000699 MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000700 }
701 state_ = COMPLETE;
702 if (FLAG_trace_incremental_marking) {
703 double end = OS::TimeCurrentMillis();
704 PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
705 static_cast<int>(end - start));
706 }
707 }
708
709 if (FLAG_cleanup_code_caches_at_gc) {
710 PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
711 Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
ulan@chromium.org2efb9002012-01-19 15:36:35 +0000712 MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
713 PolymorphicCodeCache::kSize);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000714 }
715
716 Object* context = heap_->global_contexts_list();
717 while (!context->IsUndefined()) {
ricow@chromium.org7ad65222011-12-19 12:13:11 +0000718 // GC can happen when the context is not fully initialized,
719 // so the cache can be undefined.
720 HeapObject* cache = HeapObject::cast(
721 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
722 if (!cache->IsUndefined()) {
723 MarkBit mark_bit = Marking::MarkBitFrom(cache);
724 if (Marking::IsGrey(mark_bit)) {
725 Marking::GreyToBlack(mark_bit);
ulan@chromium.org2efb9002012-01-19 15:36:35 +0000726 MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size());
ricow@chromium.org7ad65222011-12-19 12:13:11 +0000727 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000728 }
729 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
730 }
731}
732
733
734void IncrementalMarking::Abort() {
735 if (IsStopped()) return;
736 if (FLAG_trace_incremental_marking) {
737 PrintF("[IncrementalMarking] Aborting.\n");
738 }
739 heap_->new_space()->LowerInlineAllocationLimit(0);
740 IncrementalMarking::set_should_hurry(false);
741 ResetStepCounters();
742 if (IsMarking()) {
743 PatchIncrementalMarkingRecordWriteStubs(heap_,
744 RecordWriteStub::STORE_BUFFER_ONLY);
745 DeactivateIncrementalWriteBarrier();
746
747 if (is_compacting_) {
748 LargeObjectIterator it(heap_->lo_space());
749 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
750 Page* p = Page::FromAddress(obj->address());
751 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
752 p->ClearFlag(Page::RESCAN_ON_EVACUATION);
753 }
754 }
755 }
756 }
757 heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
758 state_ = STOPPED;
759 is_compacting_ = false;
760}
761
762
763void IncrementalMarking::Finalize() {
764 Hurry();
765 state_ = STOPPED;
766 is_compacting_ = false;
767 heap_->new_space()->LowerInlineAllocationLimit(0);
768 IncrementalMarking::set_should_hurry(false);
769 ResetStepCounters();
770 PatchIncrementalMarkingRecordWriteStubs(heap_,
771 RecordWriteStub::STORE_BUFFER_ONLY);
772 DeactivateIncrementalWriteBarrier();
773 ASSERT(marking_deque_.IsEmpty());
774 heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
775}
776
777
fschneider@chromium.org7d10be52012-04-10 12:30:14 +0000778void IncrementalMarking::MarkingComplete(CompletionAction action) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000779 state_ = COMPLETE;
780 // We will set the stack guard to request a GC now. This will mean the rest
781 // of the GC gets performed as soon as possible (we can't do a GC here in a
782 // record-write context). If a few things get allocated between now and then
783 // that shouldn't make us do a scavenge and keep being incremental, so we set
784 // the should-hurry flag to indicate that there can't be much work left to do.
785 set_should_hurry(true);
786 if (FLAG_trace_incremental_marking) {
787 PrintF("[IncrementalMarking] Complete (normal).\n");
788 }
fschneider@chromium.org7d10be52012-04-10 12:30:14 +0000789 if (action == GC_VIA_STACK_GUARD) {
svenpanne@chromium.orgecb9dd62011-12-01 08:22:35 +0000790 heap_->isolate()->stack_guard()->RequestGC();
791 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000792}
793
794
fschneider@chromium.org7d10be52012-04-10 12:30:14 +0000795void IncrementalMarking::Step(intptr_t allocated_bytes,
796 CompletionAction action) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000797 if (heap_->gc_state() != Heap::NOT_IN_GC ||
798 !FLAG_incremental_marking ||
799 !FLAG_incremental_marking_steps ||
800 (state_ != SWEEPING && state_ != MARKING)) {
801 return;
802 }
803
804 allocated_ += allocated_bytes;
805
806 if (allocated_ < kAllocatedThreshold) return;
807
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000808 if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
809
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000810 intptr_t bytes_to_process = allocated_ * allocation_marking_factor_;
danno@chromium.orgc612e022011-11-10 11:38:15 +0000811 bytes_scanned_ += bytes_to_process;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000812
813 double start = 0;
814
815 if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
816 start = OS::TimeCurrentMillis();
817 }
818
819 if (state_ == SWEEPING) {
svenpanne@chromium.orgecb9dd62011-12-01 08:22:35 +0000820 if (heap_->AdvanceSweepers(static_cast<int>(bytes_to_process))) {
danno@chromium.orgc612e022011-11-10 11:38:15 +0000821 bytes_scanned_ = 0;
ricow@chromium.orgfa52deb2011-10-11 19:09:42 +0000822 StartMarking(PREVENT_COMPACTION);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000823 }
824 } else if (state_ == MARKING) {
825 Map* filler_map = heap_->one_pointer_filler_map();
826 Map* global_context_map = heap_->global_context_map();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000827 while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
828 HeapObject* obj = marking_deque_.Pop();
829
830 // Explicitly skip one word fillers. Incremental markbit patterns are
831 // correct only for objects that occupy at least two words.
832 Map* map = obj->map();
833 if (map == filler_map) continue;
834
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000835 int size = obj->SizeFromMap(map);
836 bytes_to_process -= size;
837 MarkBit map_mark_bit = Marking::MarkBitFrom(map);
838 if (Marking::IsWhite(map_mark_bit)) {
839 WhiteToGreyAndPush(map, map_mark_bit);
840 }
841
842 // TODO(gc) switch to static visitor instead of normal visitor.
843 if (map == global_context_map) {
844 // Global contexts have weak fields.
845 Context* ctx = Context::cast(obj);
846
847 // We will mark cache black with a separate pass
848 // when we finish marking.
849 MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
850
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000851 IncrementalMarkingMarkingVisitor::VisitGlobalContext(map, ctx);
yangguo@chromium.org5f0b8ea2012-05-16 12:37:04 +0000852 } else if (map->instance_type() == MAP_TYPE) {
853 Map* map = Map::cast(obj);
854 heap_->ClearCacheOnMap(map);
855
856 // When map collection is enabled we have to mark through map's
857 // transitions and back pointers in a special way to make these links
858 // weak. Only maps for subclasses of JSReceiver can have transitions.
859 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
860 if (FLAG_collect_maps &&
861 map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
862 marker_.MarkMapContents(map);
863 } else {
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000864 IncrementalMarkingMarkingVisitor::VisitPointers(
865 heap_,
yangguo@chromium.org5f0b8ea2012-05-16 12:37:04 +0000866 HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
867 HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
868 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000869 } else {
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000870 IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000871 }
872
873 MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000874 SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
875 (obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000876 Marking::MarkBlack(obj_mark_bit);
ulan@chromium.org2efb9002012-01-19 15:36:35 +0000877 MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000878 }
fschneider@chromium.org7d10be52012-04-10 12:30:14 +0000879 if (marking_deque_.IsEmpty()) MarkingComplete(action);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000880 }
881
882 allocated_ = 0;
883
884 steps_count_++;
885 steps_count_since_last_gc_++;
886
887 bool speed_up = false;
888
danno@chromium.orgc612e022011-11-10 11:38:15 +0000889 if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000890 if (FLAG_trace_gc) {
rossberg@chromium.org657d53b2012-07-12 11:06:03 +0000891 PrintPID("Speed up marking after %d steps\n",
892 static_cast<int>(kAllocationMarkingFactorSpeedupInterval));
danno@chromium.orgc612e022011-11-10 11:38:15 +0000893 }
894 speed_up = true;
895 }
896
897 bool space_left_is_very_small =
898 (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
899
900 bool only_1_nth_of_space_that_was_available_still_left =
901 (SpaceLeftInOldSpace() * (allocation_marking_factor_ + 1) <
902 old_generation_space_available_at_start_of_incremental_);
903
904 if (space_left_is_very_small ||
905 only_1_nth_of_space_that_was_available_still_left) {
rossberg@chromium.org657d53b2012-07-12 11:06:03 +0000906 if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n");
danno@chromium.orgc612e022011-11-10 11:38:15 +0000907 speed_up = true;
908 }
909
910 bool size_of_old_space_multiplied_by_n_during_marking =
911 (heap_->PromotedTotalSize() >
912 (allocation_marking_factor_ + 1) *
913 old_generation_space_used_at_start_of_incremental_);
914 if (size_of_old_space_multiplied_by_n_during_marking) {
915 speed_up = true;
916 if (FLAG_trace_gc) {
rossberg@chromium.org657d53b2012-07-12 11:06:03 +0000917 PrintPID("Speed up marking because of heap size increase\n");
danno@chromium.orgc612e022011-11-10 11:38:15 +0000918 }
919 }
920
921 int64_t promoted_during_marking = heap_->PromotedTotalSize()
922 - old_generation_space_used_at_start_of_incremental_;
923 intptr_t delay = allocation_marking_factor_ * MB;
924 intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
925
926 // We try to scan at at least twice the speed that we are allocating.
927 if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
928 if (FLAG_trace_gc) {
rossberg@chromium.org657d53b2012-07-12 11:06:03 +0000929 PrintPID("Speed up marking because marker was not keeping up\n");
danno@chromium.orgc612e022011-11-10 11:38:15 +0000930 }
931 speed_up = true;
932 }
933
934 if (speed_up) {
935 if (state_ != MARKING) {
936 if (FLAG_trace_gc) {
rossberg@chromium.org657d53b2012-07-12 11:06:03 +0000937 PrintPID("Postponing speeding up marking until marking starts\n");
danno@chromium.orgc612e022011-11-10 11:38:15 +0000938 }
939 } else {
940 allocation_marking_factor_ += kAllocationMarkingFactorSpeedup;
941 allocation_marking_factor_ = static_cast<int>(
942 Min(kMaxAllocationMarkingFactor,
943 static_cast<intptr_t>(allocation_marking_factor_ * 1.3)));
944 if (FLAG_trace_gc) {
rossberg@chromium.org657d53b2012-07-12 11:06:03 +0000945 PrintPID("Marking speed increased to %d\n", allocation_marking_factor_);
danno@chromium.orgc612e022011-11-10 11:38:15 +0000946 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000947 }
948 }
949
950 if (FLAG_trace_incremental_marking || FLAG_trace_gc) {
951 double end = OS::TimeCurrentMillis();
952 double delta = (end - start);
953 longest_step_ = Max(longest_step_, delta);
954 steps_took_ += delta;
955 steps_took_since_last_gc_ += delta;
956 }
957}
958
959
960void IncrementalMarking::ResetStepCounters() {
961 steps_count_ = 0;
962 steps_took_ = 0;
963 longest_step_ = 0.0;
964 old_generation_space_available_at_start_of_incremental_ =
965 SpaceLeftInOldSpace();
966 old_generation_space_used_at_start_of_incremental_ =
967 heap_->PromotedTotalSize();
968 steps_count_since_last_gc_ = 0;
969 steps_took_since_last_gc_ = 0;
970 bytes_rescanned_ = 0;
971 allocation_marking_factor_ = kInitialAllocationMarkingFactor;
danno@chromium.orgc612e022011-11-10 11:38:15 +0000972 bytes_scanned_ = 0;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000973}
974
975
976int64_t IncrementalMarking::SpaceLeftInOldSpace() {
jkummerow@chromium.org212d9642012-05-11 15:02:09 +0000977 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000978}
979
980} } // namespace v8::internal