blob: bacbb93ad2e8cb6a335c577e8ccf69ade8c82d63 [file] [log] [blame]
yangguo@chromium.org5f0b8ea2012-05-16 12:37:04 +00001// Copyright 2012 the V8 project authors. All rights reserved.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "incremental-marking.h"
31
32#include "code-stubs.h"
33#include "compilation-cache.h"
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +000034#include "objects-visiting.h"
35#include "objects-visiting-inl.h"
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000036#include "v8conversions.h"
37
38namespace v8 {
39namespace internal {
40
41
42IncrementalMarking::IncrementalMarking(Heap* heap)
43 : heap_(heap),
44 state_(STOPPED),
45 marking_deque_memory_(NULL),
danno@chromium.orgc612e022011-11-10 11:38:15 +000046 marking_deque_memory_committed_(false),
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000047 steps_count_(0),
48 steps_took_(0),
49 longest_step_(0.0),
50 old_generation_space_available_at_start_of_incremental_(0),
51 old_generation_space_used_at_start_of_incremental_(0),
52 steps_count_since_last_gc_(0),
53 steps_took_since_last_gc_(0),
54 should_hurry_(false),
verwaest@chromium.org33e09c82012-10-10 17:07:22 +000055 marking_speed_(0),
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +000056 allocated_(0),
57 no_marking_scope_depth_(0) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000058}
59
60
61void IncrementalMarking::TearDown() {
62 delete marking_deque_memory_;
63}
64
65
jkummerow@chromium.orgc3b37122011-11-07 10:14:12 +000066void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
67 Object** slot,
68 Object* value) {
ulan@chromium.org56c14af2012-09-20 12:51:09 +000069 if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
jkummerow@chromium.orgc3b37122011-11-07 10:14:12 +000070 MarkBit obj_bit = Marking::MarkBitFrom(obj);
71 if (Marking::IsBlack(obj_bit)) {
72 // Object is not going to be rescanned we need to record the slot.
73 heap_->mark_compact_collector()->RecordSlot(
74 HeapObject::RawField(obj, 0), slot, value);
75 }
76 }
77}
78
79
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000080void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
ulan@chromium.org8e8d8822012-11-23 14:36:46 +000081 Object** slot,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000082 Isolate* isolate) {
83 ASSERT(obj->IsHeapObject());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000084 IncrementalMarking* marking = isolate->heap()->incremental_marking();
85 ASSERT(!marking->is_compacting_);
verwaest@chromium.org33e09c82012-10-10 17:07:22 +000086
87 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
88 int counter = chunk->write_barrier_counter();
89 if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
90 marking->write_barriers_invoked_since_last_step_ +=
91 MemoryChunk::kWriteBarrierCounterGranularity -
92 chunk->write_barrier_counter();
93 chunk->set_write_barrier_counter(
94 MemoryChunk::kWriteBarrierCounterGranularity);
95 }
96
ulan@chromium.org8e8d8822012-11-23 14:36:46 +000097 marking->RecordWrite(obj, slot, *slot);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000098}
99
100
101void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj,
102 Object** slot,
103 Isolate* isolate) {
verwaest@chromium.org33e09c82012-10-10 17:07:22 +0000104 ASSERT(obj->IsHeapObject());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000105 IncrementalMarking* marking = isolate->heap()->incremental_marking();
106 ASSERT(marking->is_compacting_);
verwaest@chromium.org33e09c82012-10-10 17:07:22 +0000107
108 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
109 int counter = chunk->write_barrier_counter();
110 if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
111 marking->write_barriers_invoked_since_last_step_ +=
112 MemoryChunk::kWriteBarrierCounterGranularity -
113 chunk->write_barrier_counter();
114 chunk->set_write_barrier_counter(
115 MemoryChunk::kWriteBarrierCounterGranularity);
116 }
117
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000118 marking->RecordWrite(obj, slot, *slot);
119}
120
121
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000122void IncrementalMarking::RecordCodeTargetPatch(Code* host,
123 Address pc,
124 HeapObject* value) {
125 if (IsMarking()) {
126 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
127 RecordWriteIntoCode(host, &rinfo, value);
128 }
129}
130
131
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000132void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
133 if (IsMarking()) {
134 Code* host = heap_->isolate()->inner_pointer_to_code_cache()->
135 GcSafeFindCodeForInnerPointer(pc);
136 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
137 RecordWriteIntoCode(host, &rinfo, value);
138 }
139}
140
141
jkummerow@chromium.orgc3b37122011-11-07 10:14:12 +0000142void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
ulan@chromium.org56c14af2012-09-20 12:51:09 +0000143 Object** slot,
144 Code* value) {
145 if (BaseRecordWrite(host, slot, value)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000146 ASSERT(slot != NULL);
147 heap_->mark_compact_collector()->
148 RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
149 }
150}
151
152
jkummerow@chromium.orgc3b37122011-11-07 10:14:12 +0000153void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
154 RelocInfo* rinfo,
155 Object* value) {
156 MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
157 if (Marking::IsWhite(value_bit)) {
158 MarkBit obj_bit = Marking::MarkBitFrom(obj);
159 if (Marking::IsBlack(obj_bit)) {
160 BlackToGreyAndUnshift(obj, obj_bit);
161 RestartIfNotMarking();
162 }
163 // Object is either grey or white. It will be scanned if survives.
164 return;
165 }
166
167 if (is_compacting_) {
168 MarkBit obj_bit = Marking::MarkBitFrom(obj);
169 if (Marking::IsBlack(obj_bit)) {
170 // Object is not going to be rescanned. We need to record the slot.
171 heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
172 Code::cast(value));
173 }
174 }
175}
176
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000177
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +0000178static void MarkObjectGreyDoNotEnqueue(Object* obj) {
179 if (obj->IsHeapObject()) {
180 HeapObject* heap_obj = HeapObject::cast(obj);
181 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
182 if (Marking::IsBlack(mark_bit)) {
183 MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
184 -heap_obj->Size());
185 }
186 Marking::AnyToGrey(mark_bit);
187 }
188}
189
190
yangguo@chromium.orgfb377212012-11-16 14:43:43 +0000191static inline void MarkBlackOrKeepGrey(HeapObject* heap_object,
192 MarkBit mark_bit,
193 int size) {
194 ASSERT(!Marking::IsImpossible(mark_bit));
195 if (mark_bit.Get()) return;
196 mark_bit.Set();
197 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
198 ASSERT(Marking::IsBlack(mark_bit));
199}
200
201
202static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
203 MarkBit mark_bit,
204 int size) {
205 ASSERT(!Marking::IsImpossible(mark_bit));
206 if (Marking::IsBlack(mark_bit)) return;
207 Marking::MarkBlack(mark_bit);
208 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
209 ASSERT(Marking::IsBlack(mark_bit));
210}
211
212
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000213class IncrementalMarkingMarkingVisitor
214 : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000215 public:
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000216 static void Initialize() {
217 StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
yangguo@chromium.orgfb377212012-11-16 14:43:43 +0000218 table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +0000219 table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000220 table_.Register(kVisitJSRegExp, &VisitJSRegExp);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000221 }
222
yangguo@chromium.orgfb377212012-11-16 14:43:43 +0000223 static const int kProgressBarScanningChunk = 32 * 1024;
224
225 static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
226 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
227 // TODO(mstarzinger): Move setting of the flag to the allocation site of
228 // the array. The visitor should just check the flag.
229 if (FLAG_use_marking_progress_bar &&
230 chunk->owner()->identity() == LO_SPACE) {
231 chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
232 }
233 if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
234 Heap* heap = map->GetHeap();
235 // When using a progress bar for large fixed arrays, scan only a chunk of
236 // the array and try to push it onto the marking deque again until it is
237 // fully scanned. Fall back to scanning it through to the end in case this
238 // fails because of a full deque.
239 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
240 int start_offset = Max(FixedArray::BodyDescriptor::kStartOffset,
241 chunk->progress_bar());
242 int end_offset = Min(object_size,
243 start_offset + kProgressBarScanningChunk);
244 bool scan_until_end = false;
245 do {
246 VisitPointersWithAnchor(heap,
247 HeapObject::RawField(object, 0),
248 HeapObject::RawField(object, start_offset),
249 HeapObject::RawField(object, end_offset));
250 start_offset = end_offset;
251 end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
252 scan_until_end = heap->incremental_marking()->marking_deque()->IsFull();
253 } while (scan_until_end && start_offset < object_size);
254 chunk->set_progress_bar(start_offset);
255 if (start_offset < object_size) {
256 heap->incremental_marking()->marking_deque()->UnshiftGrey(object);
257 }
258 } else {
259 FixedArrayVisitor::Visit(map, object);
260 }
261 }
262
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +0000263 static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
264 Context* context = Context::cast(object);
265
266 // We will mark cache black with a separate pass
267 // when we finish marking.
268 MarkObjectGreyDoNotEnqueue(context->normalized_map_cache());
269 VisitNativeContext(map, context);
270 }
271
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000272 static void VisitJSWeakMap(Map* map, HeapObject* object) {
273 Heap* heap = map->GetHeap();
274 VisitPointers(heap,
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +0000275 HeapObject::RawField(object, JSWeakMap::kPropertiesOffset),
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000276 HeapObject::RawField(object, JSWeakMap::kSize));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000277 }
278
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000279 static void BeforeVisitingSharedFunctionInfo(HeapObject* object) {}
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000280
281 INLINE(static void VisitPointer(Heap* heap, Object** p)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000282 Object* obj = *p;
283 if (obj->NonFailureIsHeapObject()) {
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000284 heap->mark_compact_collector()->RecordSlot(p, p, obj);
285 MarkObject(heap, obj);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000286 }
287 }
288
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +0000289 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000290 for (Object** p = start; p < end; p++) {
291 Object* obj = *p;
292 if (obj->NonFailureIsHeapObject()) {
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +0000293 heap->mark_compact_collector()->RecordSlot(start, p, obj);
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000294 MarkObject(heap, obj);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000295 }
296 }
297 }
298
yangguo@chromium.orgfb377212012-11-16 14:43:43 +0000299 INLINE(static void VisitPointersWithAnchor(Heap* heap,
300 Object** anchor,
301 Object** start,
302 Object** end)) {
303 for (Object** p = start; p < end; p++) {
304 Object* obj = *p;
305 if (obj->NonFailureIsHeapObject()) {
306 heap->mark_compact_collector()->RecordSlot(anchor, p, obj);
307 MarkObject(heap, obj);
308 }
309 }
310 }
311
verwaest@chromium.org33e09c82012-10-10 17:07:22 +0000312 // Marks the object grey and pushes it on the marking stack.
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000313 INLINE(static void MarkObject(Heap* heap, Object* obj)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000314 HeapObject* heap_object = HeapObject::cast(obj);
315 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
316 if (mark_bit.data_only()) {
yangguo@chromium.orgfb377212012-11-16 14:43:43 +0000317 MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000318 } else if (Marking::IsWhite(mark_bit)) {
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000319 heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000320 }
321 }
verwaest@chromium.org33e09c82012-10-10 17:07:22 +0000322
323 // Marks the object black without pushing it on the marking stack.
324 // Returns true if object needed marking and false otherwise.
325 INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
326 HeapObject* heap_object = HeapObject::cast(obj);
327 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
328 if (Marking::IsWhite(mark_bit)) {
329 mark_bit.Set();
330 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
331 heap_object->Size());
332 return true;
333 }
334 return false;
335 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000336};
337
338
339class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
340 public:
svenpanne@chromium.org83130cf2012-11-30 10:13:25 +0000341 explicit IncrementalMarkingRootMarkingVisitor(
342 IncrementalMarking* incremental_marking)
343 : incremental_marking_(incremental_marking) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000344 }
345
346 void VisitPointer(Object** p) {
347 MarkObjectByPointer(p);
348 }
349
350 void VisitPointers(Object** start, Object** end) {
351 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
352 }
353
354 private:
355 void MarkObjectByPointer(Object** p) {
356 Object* obj = *p;
357 if (!obj->IsHeapObject()) return;
358
359 HeapObject* heap_object = HeapObject::cast(obj);
360 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
361 if (mark_bit.data_only()) {
yangguo@chromium.orgfb377212012-11-16 14:43:43 +0000362 MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000363 } else {
364 if (Marking::IsWhite(mark_bit)) {
365 incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
366 }
367 }
368 }
369
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000370 IncrementalMarking* incremental_marking_;
371};
372
373
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000374void IncrementalMarking::Initialize() {
375 IncrementalMarkingMarkingVisitor::Initialize();
376}
377
378
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000379void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
380 bool is_marking,
381 bool is_compacting) {
382 if (is_marking) {
383 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
384 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
385
386 // It's difficult to filter out slots recorded for large objects.
387 if (chunk->owner()->identity() == LO_SPACE &&
388 chunk->size() > static_cast<size_t>(Page::kPageSize) &&
389 is_compacting) {
390 chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
391 }
392 } else if (chunk->owner()->identity() == CELL_SPACE ||
393 chunk->scan_on_scavenge()) {
394 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
395 chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
396 } else {
397 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
398 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
399 }
400}
401
402
403void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
404 bool is_marking) {
405 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
406 if (is_marking) {
407 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
408 } else {
409 chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
410 }
411 chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
412}
413
414
415void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
416 PagedSpace* space) {
417 PageIterator it(space);
418 while (it.has_next()) {
419 Page* p = it.next();
420 SetOldSpacePageFlags(p, false, false);
421 }
422}
423
424
425void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
426 NewSpace* space) {
427 NewSpacePageIterator it(space);
428 while (it.has_next()) {
429 NewSpacePage* p = it.next();
430 SetNewSpacePageFlags(p, false);
431 }
432}
433
434
435void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
436 DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
437 DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
438 DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
439 DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
440 DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
441 DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
442
443 LargePage* lop = heap_->lo_space()->first_page();
444 while (lop->is_valid()) {
445 SetOldSpacePageFlags(lop, false, false);
446 lop = lop->next_page();
447 }
448}
449
450
451void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
452 PageIterator it(space);
453 while (it.has_next()) {
454 Page* p = it.next();
455 SetOldSpacePageFlags(p, true, is_compacting_);
456 }
457}
458
459
460void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
461 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
462 while (it.has_next()) {
463 NewSpacePage* p = it.next();
464 SetNewSpacePageFlags(p, true);
465 }
466}
467
468
469void IncrementalMarking::ActivateIncrementalWriteBarrier() {
470 ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
471 ActivateIncrementalWriteBarrier(heap_->old_data_space());
472 ActivateIncrementalWriteBarrier(heap_->cell_space());
473 ActivateIncrementalWriteBarrier(heap_->map_space());
474 ActivateIncrementalWriteBarrier(heap_->code_space());
475 ActivateIncrementalWriteBarrier(heap_->new_space());
476
477 LargePage* lop = heap_->lo_space()->first_page();
478 while (lop->is_valid()) {
479 SetOldSpacePageFlags(lop, true, is_compacting_);
480 lop = lop->next_page();
481 }
482}
483
484
485bool IncrementalMarking::WorthActivating() {
486#ifndef DEBUG
487 static const intptr_t kActivationThreshold = 8 * MB;
488#else
489 // TODO(gc) consider setting this to some low level so that some
490 // debug tests run with incremental marking and some without.
491 static const intptr_t kActivationThreshold = 0;
492#endif
ulan@chromium.org57ff8812013-05-10 08:16:55 +0000493 // Only start incremental marking in a safe state: 1) when expose GC is
494 // deactivated, 2) when incremental marking is turned on, 3) when we are
495 // currently not in a GC, and 4) when we are currently not serializing
496 // or deserializing the heap.
svenpanne@chromium.org83130cf2012-11-30 10:13:25 +0000497 return !FLAG_expose_gc &&
498 FLAG_incremental_marking &&
ulan@chromium.org57ff8812013-05-10 08:16:55 +0000499 FLAG_incremental_marking_steps &&
500 heap_->gc_state() == Heap::NOT_IN_GC &&
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +0000501 !Serializer::enabled() &&
ulan@chromium.org57ff8812013-05-10 08:16:55 +0000502 heap_->isolate()->IsInitialized() &&
yangguo@chromium.org154ff992012-03-13 08:09:54 +0000503 heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000504}
505
506
507void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
508 ASSERT(RecordWriteStub::GetMode(stub) ==
509 RecordWriteStub::STORE_BUFFER_ONLY);
510
511 if (!IsMarking()) {
512 // Initially stub is generated in STORE_BUFFER_ONLY mode thus
513 // we don't need to do anything if incremental marking is
514 // not active.
515 } else if (IsCompacting()) {
516 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
517 } else {
518 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
519 }
520}
521
522
523static void PatchIncrementalMarkingRecordWriteStubs(
524 Heap* heap, RecordWriteStub::Mode mode) {
erik.corry@gmail.comf2038fb2012-01-16 11:42:08 +0000525 UnseededNumberDictionary* stubs = heap->code_stubs();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000526
527 int capacity = stubs->Capacity();
528 for (int i = 0; i < capacity; i++) {
529 Object* k = stubs->KeyAt(i);
530 if (stubs->IsKey(k)) {
531 uint32_t key = NumberToUint32(k);
532
533 if (CodeStub::MajorKeyFromKey(key) ==
534 CodeStub::RecordWrite) {
535 Object* e = stubs->ValueAt(i);
536 if (e->IsCode()) {
537 RecordWriteStub::Patch(Code::cast(e), mode);
538 }
539 }
540 }
541 }
542}
543
544
545void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
546 if (marking_deque_memory_ == NULL) {
547 marking_deque_memory_ = new VirtualMemory(4 * MB);
danno@chromium.orgc612e022011-11-10 11:38:15 +0000548 }
549 if (!marking_deque_memory_committed_) {
550 bool success = marking_deque_memory_->Commit(
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000551 reinterpret_cast<Address>(marking_deque_memory_->address()),
552 marking_deque_memory_->size(),
553 false); // Not executable.
danno@chromium.orgc612e022011-11-10 11:38:15 +0000554 CHECK(success);
555 marking_deque_memory_committed_ = true;
556 }
557}
558
559void IncrementalMarking::UncommitMarkingDeque() {
ricow@chromium.org27bf2882011-11-17 08:34:43 +0000560 if (state_ == STOPPED && marking_deque_memory_committed_) {
danno@chromium.orgc612e022011-11-10 11:38:15 +0000561 bool success = marking_deque_memory_->Uncommit(
562 reinterpret_cast<Address>(marking_deque_memory_->address()),
563 marking_deque_memory_->size());
564 CHECK(success);
565 marking_deque_memory_committed_ = false;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000566 }
567}
568
569
ulan@chromium.org57ff8812013-05-10 08:16:55 +0000570void IncrementalMarking::Start(CompactionFlag flag) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000571 if (FLAG_trace_incremental_marking) {
572 PrintF("[IncrementalMarking] Start\n");
573 }
574 ASSERT(FLAG_incremental_marking);
ulan@chromium.org57ff8812013-05-10 08:16:55 +0000575 ASSERT(FLAG_incremental_marking_steps);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000576 ASSERT(state_ == STOPPED);
ulan@chromium.org57ff8812013-05-10 08:16:55 +0000577 ASSERT(heap_->gc_state() == Heap::NOT_IN_GC);
578 ASSERT(!Serializer::enabled());
579 ASSERT(heap_->isolate()->IsInitialized());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000580
581 ResetStepCounters();
582
mmassi@chromium.org2f0efde2013-02-06 14:12:58 +0000583 if (heap_->IsSweepingComplete()) {
ulan@chromium.org57ff8812013-05-10 08:16:55 +0000584 StartMarking(flag);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000585 } else {
586 if (FLAG_trace_incremental_marking) {
587 PrintF("[IncrementalMarking] Start sweeping.\n");
588 }
589 state_ = SWEEPING;
590 }
591
592 heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
593}
594
595
ricow@chromium.orgfa52deb2011-10-11 19:09:42 +0000596void IncrementalMarking::StartMarking(CompactionFlag flag) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000597 if (FLAG_trace_incremental_marking) {
598 PrintF("[IncrementalMarking] Start marking\n");
599 }
600
ricow@chromium.orgfa52deb2011-10-11 19:09:42 +0000601 is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
jkummerow@chromium.orgab7dad42012-02-07 12:07:34 +0000602 heap_->mark_compact_collector()->StartCompaction(
603 MarkCompactCollector::INCREMENTAL_COMPACTION);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000604
605 state_ = MARKING;
606
607 RecordWriteStub::Mode mode = is_compacting_ ?
608 RecordWriteStub::INCREMENTAL_COMPACTION : RecordWriteStub::INCREMENTAL;
609
610 PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
611
612 EnsureMarkingDequeIsCommitted();
613
614 // Initialize marking stack.
615 Address addr = static_cast<Address>(marking_deque_memory_->address());
616 size_t size = marking_deque_memory_->size();
617 if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
618 marking_deque_.Initialize(addr, addr + size);
619
620 ActivateIncrementalWriteBarrier();
621
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000622 // Marking bits are cleared by the sweeper.
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000623#ifdef VERIFY_HEAP
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000624 if (FLAG_verify_heap) {
625 heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
626 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000627#endif
628
629 heap_->CompletelyClearInstanceofCache();
630 heap_->isolate()->compilation_cache()->MarkCompactPrologue();
631
632 if (FLAG_cleanup_code_caches_at_gc) {
633 // We will mark cache black with a separate pass
634 // when we finish marking.
635 MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
636 }
637
638 // Mark strong roots grey.
svenpanne@chromium.org83130cf2012-11-30 10:13:25 +0000639 IncrementalMarkingRootMarkingVisitor visitor(this);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000640 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
641
642 // Ready to start incremental marking.
643 if (FLAG_trace_incremental_marking) {
644 PrintF("[IncrementalMarking] Running\n");
645 }
646}
647
648
649void IncrementalMarking::PrepareForScavenge() {
650 if (!IsMarking()) return;
651 NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
652 heap_->new_space()->FromSpaceEnd());
653 while (it.has_next()) {
654 Bitmap::Clear(it.next());
655 }
656}
657
658
659void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
660 if (!IsMarking()) return;
661
662 int current = marking_deque_.bottom();
663 int mask = marking_deque_.mask();
664 int limit = marking_deque_.top();
665 HeapObject** array = marking_deque_.array();
666 int new_top = current;
667
668 Map* filler_map = heap_->one_pointer_filler_map();
669
670 while (current != limit) {
671 HeapObject* obj = array[current];
672 ASSERT(obj->IsHeapObject());
673 current = ((current + 1) & mask);
674 if (heap_->InNewSpace(obj)) {
675 MapWord map_word = obj->map_word();
676 if (map_word.IsForwardingAddress()) {
677 HeapObject* dest = map_word.ToForwardingAddress();
678 array[new_top] = dest;
679 new_top = ((new_top + 1) & mask);
680 ASSERT(new_top != marking_deque_.bottom());
ricow@chromium.orgfa52deb2011-10-11 19:09:42 +0000681#ifdef DEBUG
682 MarkBit mark_bit = Marking::MarkBitFrom(obj);
683 ASSERT(Marking::IsGrey(mark_bit) ||
684 (obj->IsFiller() && Marking::IsWhite(mark_bit)));
685#endif
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000686 }
687 } else if (obj->map() != filler_map) {
688 // Skip one word filler objects that appear on the
689 // stack when we perform in place array shift.
690 array[new_top] = obj;
691 new_top = ((new_top + 1) & mask);
692 ASSERT(new_top != marking_deque_.bottom());
ricow@chromium.orgfa52deb2011-10-11 19:09:42 +0000693#ifdef DEBUG
694 MarkBit mark_bit = Marking::MarkBitFrom(obj);
yangguo@chromium.orgfb377212012-11-16 14:43:43 +0000695 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
ricow@chromium.orgfa52deb2011-10-11 19:09:42 +0000696 ASSERT(Marking::IsGrey(mark_bit) ||
yangguo@chromium.orgfb377212012-11-16 14:43:43 +0000697 (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
698 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
699 Marking::IsBlack(mark_bit)));
ricow@chromium.orgfa52deb2011-10-11 19:09:42 +0000700#endif
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000701 }
702 }
703 marking_deque_.set_top(new_top);
704
705 steps_took_since_last_gc_ = 0;
706 steps_count_since_last_gc_ = 0;
707 longest_step_ = 0.0;
708}
709
710
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +0000711void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
712 MarkBit map_mark_bit = Marking::MarkBitFrom(map);
713 if (Marking::IsWhite(map_mark_bit)) {
714 WhiteToGreyAndPush(map, map_mark_bit);
715 }
716
717 IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
718
yangguo@chromium.orgfb377212012-11-16 14:43:43 +0000719 MarkBit mark_bit = Marking::MarkBitFrom(obj);
720#ifdef DEBUG
721 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
722 SLOW_ASSERT(Marking::IsGrey(mark_bit) ||
723 (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
724 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
725 Marking::IsBlack(mark_bit)));
726#endif
727 MarkBlackOrKeepBlack(obj, mark_bit, size);
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +0000728}
729
730
731void IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
732 Map* filler_map = heap_->one_pointer_filler_map();
733 while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
734 HeapObject* obj = marking_deque_.Pop();
735
736 // Explicitly skip one word fillers. Incremental markbit patterns are
737 // correct only for objects that occupy at least two words.
738 Map* map = obj->map();
739 if (map == filler_map) continue;
740
741 int size = obj->SizeFromMap(map);
742 bytes_to_process -= size;
743 VisitObject(map, obj, size);
744 }
745}
746
747
748void IncrementalMarking::ProcessMarkingDeque() {
749 Map* filler_map = heap_->one_pointer_filler_map();
750 while (!marking_deque_.IsEmpty()) {
751 HeapObject* obj = marking_deque_.Pop();
752
753 // Explicitly skip one word fillers. Incremental markbit patterns are
754 // correct only for objects that occupy at least two words.
755 Map* map = obj->map();
756 if (map == filler_map) continue;
757
758 VisitObject(map, obj, obj->SizeFromMap(map));
759 }
760}
761
762
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000763void IncrementalMarking::Hurry() {
764 if (state() == MARKING) {
765 double start = 0.0;
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000766 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000767 start = OS::TimeCurrentMillis();
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000768 if (FLAG_trace_incremental_marking) {
769 PrintF("[IncrementalMarking] Hurry\n");
770 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000771 }
772 // TODO(gc) hurry can mark objects it encounters black as mutator
773 // was stopped.
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +0000774 ProcessMarkingDeque();
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +0000775 state_ = COMPLETE;
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000776 if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +0000777 double end = OS::TimeCurrentMillis();
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000778 double delta = end - start;
779 heap_->AddMarkingTime(delta);
780 if (FLAG_trace_incremental_marking) {
781 PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
782 static_cast<int>(delta));
783 }
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +0000784 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000785 }
786
787 if (FLAG_cleanup_code_caches_at_gc) {
788 PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
789 Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
ulan@chromium.org2efb9002012-01-19 15:36:35 +0000790 MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
791 PolymorphicCodeCache::kSize);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000792 }
793
yangguo@chromium.org46839fb2012-08-28 09:06:19 +0000794 Object* context = heap_->native_contexts_list();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000795 while (!context->IsUndefined()) {
ricow@chromium.org7ad65222011-12-19 12:13:11 +0000796 // GC can happen when the context is not fully initialized,
797 // so the cache can be undefined.
798 HeapObject* cache = HeapObject::cast(
799 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
800 if (!cache->IsUndefined()) {
801 MarkBit mark_bit = Marking::MarkBitFrom(cache);
802 if (Marking::IsGrey(mark_bit)) {
803 Marking::GreyToBlack(mark_bit);
ulan@chromium.org2efb9002012-01-19 15:36:35 +0000804 MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size());
ricow@chromium.org7ad65222011-12-19 12:13:11 +0000805 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000806 }
807 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
808 }
809}
810
811
812void IncrementalMarking::Abort() {
813 if (IsStopped()) return;
814 if (FLAG_trace_incremental_marking) {
815 PrintF("[IncrementalMarking] Aborting.\n");
816 }
817 heap_->new_space()->LowerInlineAllocationLimit(0);
818 IncrementalMarking::set_should_hurry(false);
819 ResetStepCounters();
820 if (IsMarking()) {
821 PatchIncrementalMarkingRecordWriteStubs(heap_,
822 RecordWriteStub::STORE_BUFFER_ONLY);
823 DeactivateIncrementalWriteBarrier();
824
825 if (is_compacting_) {
826 LargeObjectIterator it(heap_->lo_space());
827 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
828 Page* p = Page::FromAddress(obj->address());
829 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
830 p->ClearFlag(Page::RESCAN_ON_EVACUATION);
831 }
832 }
833 }
834 }
835 heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
836 state_ = STOPPED;
837 is_compacting_ = false;
838}
839
840
841void IncrementalMarking::Finalize() {
842 Hurry();
843 state_ = STOPPED;
844 is_compacting_ = false;
845 heap_->new_space()->LowerInlineAllocationLimit(0);
846 IncrementalMarking::set_should_hurry(false);
847 ResetStepCounters();
848 PatchIncrementalMarkingRecordWriteStubs(heap_,
849 RecordWriteStub::STORE_BUFFER_ONLY);
850 DeactivateIncrementalWriteBarrier();
851 ASSERT(marking_deque_.IsEmpty());
852 heap_->isolate()->stack_guard()->Continue(GC_REQUEST);
853}
854
855
fschneider@chromium.org7d10be52012-04-10 12:30:14 +0000856void IncrementalMarking::MarkingComplete(CompletionAction action) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000857 state_ = COMPLETE;
858 // We will set the stack guard to request a GC now. This will mean the rest
859 // of the GC gets performed as soon as possible (we can't do a GC here in a
860 // record-write context). If a few things get allocated between now and then
861 // that shouldn't make us do a scavenge and keep being incremental, so we set
862 // the should-hurry flag to indicate that there can't be much work left to do.
863 set_should_hurry(true);
864 if (FLAG_trace_incremental_marking) {
865 PrintF("[IncrementalMarking] Complete (normal).\n");
866 }
fschneider@chromium.org7d10be52012-04-10 12:30:14 +0000867 if (action == GC_VIA_STACK_GUARD) {
svenpanne@chromium.orgecb9dd62011-12-01 08:22:35 +0000868 heap_->isolate()->stack_guard()->RequestGC();
869 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000870}
871
872
ulan@chromium.org57ff8812013-05-10 08:16:55 +0000873void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
874 if (IsStopped() && WorthActivating() && heap_->NextGCIsLikelyToBeFull()) {
875 // TODO(hpayer): Let's play safe for now, but compaction should be
876 // in principle possible.
877 Start(PREVENT_COMPACTION);
878 } else {
879 Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
880 }
881}
882
883
fschneider@chromium.org7d10be52012-04-10 12:30:14 +0000884void IncrementalMarking::Step(intptr_t allocated_bytes,
885 CompletionAction action) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000886 if (heap_->gc_state() != Heap::NOT_IN_GC ||
887 !FLAG_incremental_marking ||
888 !FLAG_incremental_marking_steps ||
889 (state_ != SWEEPING && state_ != MARKING)) {
890 return;
891 }
892
893 allocated_ += allocated_bytes;
894
verwaest@chromium.org33e09c82012-10-10 17:07:22 +0000895 if (allocated_ < kAllocatedThreshold &&
896 write_barriers_invoked_since_last_step_ <
897 kWriteBarriersInvokedThreshold) {
898 return;
899 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000900
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000901 if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
902
verwaest@chromium.org33e09c82012-10-10 17:07:22 +0000903 // The marking speed is driven either by the allocation rate or by the rate
904 // at which we are having to check the color of objects in the write barrier.
905 // It is possible for a tight non-allocating loop to run a lot of write
906 // barriers before we get here and check them (marking can only take place on
907 // allocation), so to reduce the lumpiness we don't use the write barriers
908 // invoked since last step directly to determine the amount of work to do.
909 intptr_t bytes_to_process =
jkummerow@chromium.org5323a9c2012-12-10 19:00:50 +0000910 marking_speed_ * Max(allocated_, write_barriers_invoked_since_last_step_);
verwaest@chromium.org33e09c82012-10-10 17:07:22 +0000911 allocated_ = 0;
912 write_barriers_invoked_since_last_step_ = 0;
913
danno@chromium.orgc612e022011-11-10 11:38:15 +0000914 bytes_scanned_ += bytes_to_process;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000915
916 double start = 0;
917
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000918 if (FLAG_trace_incremental_marking || FLAG_trace_gc ||
919 FLAG_print_cumulative_gc_stat) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000920 start = OS::TimeCurrentMillis();
921 }
922
923 if (state_ == SWEEPING) {
ulan@chromium.org750145a2013-03-07 15:14:13 +0000924 if (heap_->EnsureSweepersProgressed(static_cast<int>(bytes_to_process))) {
danno@chromium.orgc612e022011-11-10 11:38:15 +0000925 bytes_scanned_ = 0;
ricow@chromium.orgfa52deb2011-10-11 19:09:42 +0000926 StartMarking(PREVENT_COMPACTION);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000927 }
928 } else if (state_ == MARKING) {
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +0000929 ProcessMarkingDeque(bytes_to_process);
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +0000930 if (marking_deque_.IsEmpty()) MarkingComplete(action);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000931 }
932
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000933 steps_count_++;
934 steps_count_since_last_gc_++;
935
936 bool speed_up = false;
937
verwaest@chromium.org33e09c82012-10-10 17:07:22 +0000938 if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000939 if (FLAG_trace_gc) {
rossberg@chromium.org657d53b2012-07-12 11:06:03 +0000940 PrintPID("Speed up marking after %d steps\n",
verwaest@chromium.org33e09c82012-10-10 17:07:22 +0000941 static_cast<int>(kMarkingSpeedAccellerationInterval));
danno@chromium.orgc612e022011-11-10 11:38:15 +0000942 }
943 speed_up = true;
944 }
945
946 bool space_left_is_very_small =
947 (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
948
949 bool only_1_nth_of_space_that_was_available_still_left =
verwaest@chromium.org33e09c82012-10-10 17:07:22 +0000950 (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
danno@chromium.orgc612e022011-11-10 11:38:15 +0000951 old_generation_space_available_at_start_of_incremental_);
952
953 if (space_left_is_very_small ||
954 only_1_nth_of_space_that_was_available_still_left) {
rossberg@chromium.org657d53b2012-07-12 11:06:03 +0000955 if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n");
danno@chromium.orgc612e022011-11-10 11:38:15 +0000956 speed_up = true;
957 }
958
959 bool size_of_old_space_multiplied_by_n_during_marking =
960 (heap_->PromotedTotalSize() >
verwaest@chromium.org33e09c82012-10-10 17:07:22 +0000961 (marking_speed_ + 1) *
danno@chromium.orgc612e022011-11-10 11:38:15 +0000962 old_generation_space_used_at_start_of_incremental_);
963 if (size_of_old_space_multiplied_by_n_during_marking) {
964 speed_up = true;
965 if (FLAG_trace_gc) {
rossberg@chromium.org657d53b2012-07-12 11:06:03 +0000966 PrintPID("Speed up marking because of heap size increase\n");
danno@chromium.orgc612e022011-11-10 11:38:15 +0000967 }
968 }
969
970 int64_t promoted_during_marking = heap_->PromotedTotalSize()
971 - old_generation_space_used_at_start_of_incremental_;
verwaest@chromium.org33e09c82012-10-10 17:07:22 +0000972 intptr_t delay = marking_speed_ * MB;
danno@chromium.orgc612e022011-11-10 11:38:15 +0000973 intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
974
975 // We try to scan at at least twice the speed that we are allocating.
976 if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
977 if (FLAG_trace_gc) {
rossberg@chromium.org657d53b2012-07-12 11:06:03 +0000978 PrintPID("Speed up marking because marker was not keeping up\n");
danno@chromium.orgc612e022011-11-10 11:38:15 +0000979 }
980 speed_up = true;
981 }
982
983 if (speed_up) {
984 if (state_ != MARKING) {
985 if (FLAG_trace_gc) {
rossberg@chromium.org657d53b2012-07-12 11:06:03 +0000986 PrintPID("Postponing speeding up marking until marking starts\n");
danno@chromium.orgc612e022011-11-10 11:38:15 +0000987 }
988 } else {
ulan@chromium.org57ff8812013-05-10 08:16:55 +0000989 marking_speed_ += kMarkingSpeedAccelleration;
verwaest@chromium.org33e09c82012-10-10 17:07:22 +0000990 marking_speed_ = static_cast<int>(
991 Min(kMaxMarkingSpeed,
992 static_cast<intptr_t>(marking_speed_ * 1.3)));
danno@chromium.orgc612e022011-11-10 11:38:15 +0000993 if (FLAG_trace_gc) {
verwaest@chromium.org33e09c82012-10-10 17:07:22 +0000994 PrintPID("Marking speed increased to %d\n", marking_speed_);
danno@chromium.orgc612e022011-11-10 11:38:15 +0000995 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000996 }
997 }
998
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000999 if (FLAG_trace_incremental_marking || FLAG_trace_gc ||
1000 FLAG_print_cumulative_gc_stat) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001001 double end = OS::TimeCurrentMillis();
1002 double delta = (end - start);
1003 longest_step_ = Max(longest_step_, delta);
1004 steps_took_ += delta;
1005 steps_took_since_last_gc_ += delta;
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00001006 heap_->AddMarkingTime(delta);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001007 }
1008}
1009
1010
1011void IncrementalMarking::ResetStepCounters() {
1012 steps_count_ = 0;
1013 steps_took_ = 0;
1014 longest_step_ = 0.0;
1015 old_generation_space_available_at_start_of_incremental_ =
1016 SpaceLeftInOldSpace();
1017 old_generation_space_used_at_start_of_incremental_ =
1018 heap_->PromotedTotalSize();
1019 steps_count_since_last_gc_ = 0;
1020 steps_took_since_last_gc_ = 0;
1021 bytes_rescanned_ = 0;
verwaest@chromium.org33e09c82012-10-10 17:07:22 +00001022 marking_speed_ = kInitialMarkingSpeed;
danno@chromium.orgc612e022011-11-10 11:38:15 +00001023 bytes_scanned_ = 0;
verwaest@chromium.org33e09c82012-10-10 17:07:22 +00001024 write_barriers_invoked_since_last_step_ = 0;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001025}
1026
1027
1028int64_t IncrementalMarking::SpaceLeftInOldSpace() {
jkummerow@chromium.org212d9642012-05-11 15:02:09 +00001029 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001030}
1031
1032} } // namespace v8::internal