blob: e284b4264a9f6f6ec9e011afd7320acc1f0f2645 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "execution.h"
31#include "global-handles.h"
32#include "ic-inl.h"
33#include "mark-compact.h"
34#include "stub-cache.h"
35
36namespace v8 {
37namespace internal {
38
39// -------------------------------------------------------------------------
40// MarkCompactCollector
41
42bool MarkCompactCollector::force_compaction_ = false;
43bool MarkCompactCollector::compacting_collection_ = false;
44bool MarkCompactCollector::compact_on_next_gc_ = false;
45
46int MarkCompactCollector::previous_marked_count_ = 0;
47GCTracer* MarkCompactCollector::tracer_ = NULL;
48
49
50#ifdef DEBUG
51MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE;
52
53// Counters used for debugging the marking phase of mark-compact or mark-sweep
54// collection.
55int MarkCompactCollector::live_bytes_ = 0;
56int MarkCompactCollector::live_young_objects_ = 0;
57int MarkCompactCollector::live_old_data_objects_ = 0;
58int MarkCompactCollector::live_old_pointer_objects_ = 0;
59int MarkCompactCollector::live_code_objects_ = 0;
60int MarkCompactCollector::live_map_objects_ = 0;
61int MarkCompactCollector::live_cell_objects_ = 0;
62int MarkCompactCollector::live_lo_objects_ = 0;
63#endif
64
65void MarkCompactCollector::CollectGarbage() {
66 // Make sure that Prepare() has been called. The individual steps below will
67 // update the state as they proceed.
68 ASSERT(state_ == PREPARE_GC);
69
70 // Prepare has selected whether to compact the old generation or not.
71 // Tell the tracer.
72 if (IsCompacting()) tracer_->set_is_compacting();
73
74 MarkLiveObjects();
75
76 if (FLAG_collect_maps) ClearNonLiveTransitions();
77
78 SweepLargeObjectSpace();
79
80 if (IsCompacting()) {
81 EncodeForwardingAddresses();
82
83 UpdatePointers();
84
85 RelocateObjects();
86
87 RebuildRSets();
88
89 } else {
90 SweepSpaces();
91 }
92
93 Finish();
94
95 // Save the count of marked objects remaining after the collection and
96 // null out the GC tracer.
97 previous_marked_count_ = tracer_->marked_count();
98 ASSERT(previous_marked_count_ == 0);
99 tracer_ = NULL;
100}
101
102
103void MarkCompactCollector::Prepare(GCTracer* tracer) {
104 // Rather than passing the tracer around we stash it in a static member
105 // variable.
106 tracer_ = tracer;
107
108#ifdef DEBUG
109 ASSERT(state_ == IDLE);
110 state_ = PREPARE_GC;
111#endif
112 ASSERT(!FLAG_always_compact || !FLAG_never_compact);
113
114 compacting_collection_ =
115 FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
116 compact_on_next_gc_ = false;
117
118 if (FLAG_never_compact) compacting_collection_ = false;
Leon Clarkee46be812010-01-19 14:06:41 +0000119 if (!Heap::map_space()->MapPointersEncodable())
120 compacting_collection_ = false;
Steve Blocka7e24c12009-10-30 11:49:00 +0000121 if (FLAG_collect_maps) CreateBackPointers();
122
123#ifdef DEBUG
124 if (compacting_collection_) {
125 // We will write bookkeeping information to the remembered set area
126 // starting now.
127 Page::set_rset_state(Page::NOT_IN_USE);
128 }
129#endif
130
131 PagedSpaces spaces;
132 while (PagedSpace* space = spaces.next()) {
133 space->PrepareForMarkCompact(compacting_collection_);
134 }
135
136#ifdef DEBUG
137 live_bytes_ = 0;
138 live_young_objects_ = 0;
139 live_old_pointer_objects_ = 0;
140 live_old_data_objects_ = 0;
141 live_code_objects_ = 0;
142 live_map_objects_ = 0;
143 live_cell_objects_ = 0;
144 live_lo_objects_ = 0;
145#endif
146}
147
148
149void MarkCompactCollector::Finish() {
150#ifdef DEBUG
151 ASSERT(state_ == SWEEP_SPACES || state_ == REBUILD_RSETS);
152 state_ = IDLE;
153#endif
154 // The stub cache is not traversed during GC; clear the cache to
155 // force lazy re-initialization of it. This must be done after the
156 // GC, because it relies on the new address of certain old space
157 // objects (empty string, illegal builtin).
158 StubCache::Clear();
159
Leon Clarkee46be812010-01-19 14:06:41 +0000160 ExternalStringTable::CleanUp();
161
Steve Blocka7e24c12009-10-30 11:49:00 +0000162 // If we've just compacted old space there's no reason to check the
163 // fragmentation limit. Just return.
164 if (HasCompacted()) return;
165
166 // We compact the old generation on the next GC if it has gotten too
167 // fragmented (ie, we could recover an expected amount of space by
168 // reclaiming the waste and free list blocks).
169 static const int kFragmentationLimit = 15; // Percent.
170 static const int kFragmentationAllowed = 1 * MB; // Absolute.
171 int old_gen_recoverable = 0;
172 int old_gen_used = 0;
173
174 OldSpaces spaces;
175 while (OldSpace* space = spaces.next()) {
176 old_gen_recoverable += space->Waste() + space->AvailableFree();
177 old_gen_used += space->Size();
178 }
179
180 int old_gen_fragmentation =
181 static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
182 if (old_gen_fragmentation > kFragmentationLimit &&
183 old_gen_recoverable > kFragmentationAllowed) {
184 compact_on_next_gc_ = true;
185 }
186}
187
188
189// -------------------------------------------------------------------------
190// Phase 1: tracing and marking live objects.
191// before: all objects are in normal state.
192// after: a live object's map pointer is marked as '00'.
193
194// Marking all live objects in the heap as part of mark-sweep or mark-compact
195// collection. Before marking, all objects are in their normal state. After
196// marking, live objects' map pointers are marked indicating that the object
197// has been found reachable.
198//
199// The marking algorithm is a (mostly) depth-first (because of possible stack
200// overflow) traversal of the graph of objects reachable from the roots. It
201// uses an explicit stack of pointers rather than recursion. The young
202// generation's inactive ('from') space is used as a marking stack. The
203// objects in the marking stack are the ones that have been reached and marked
204// but their children have not yet been visited.
205//
206// The marking stack can overflow during traversal. In that case, we set an
207// overflow flag. When the overflow flag is set, we continue marking objects
208// reachable from the objects on the marking stack, but no longer push them on
209// the marking stack. Instead, we mark them as both marked and overflowed.
210// When the stack is in the overflowed state, objects marked as overflowed
211// have been reached and marked but their children have not been visited yet.
212// After emptying the marking stack, we clear the overflow flag and traverse
213// the heap looking for objects marked as overflowed, push them on the stack,
214// and continue with marking. This process repeats until all reachable
215// objects have been marked.
216
217static MarkingStack marking_stack;
218
219
220static inline HeapObject* ShortCircuitConsString(Object** p) {
221 // Optimization: If the heap object pointed to by p is a non-symbol
222 // cons string whose right substring is Heap::empty_string, update
223 // it in place to its left substring. Return the updated value.
224 //
225 // Here we assume that if we change *p, we replace it with a heap object
226 // (ie, the left substring of a cons string is always a heap object).
227 //
228 // The check performed is:
229 // object->IsConsString() && !object->IsSymbol() &&
230 // (ConsString::cast(object)->second() == Heap::empty_string())
231 // except the maps for the object and its possible substrings might be
232 // marked.
233 HeapObject* object = HeapObject::cast(*p);
234 MapWord map_word = object->map_word();
235 map_word.ClearMark();
236 InstanceType type = map_word.ToMap()->instance_type();
237 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
238
239 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
240 if (second != Heap::raw_unchecked_empty_string()) {
241 return object;
242 }
243
244 // Since we don't have the object's start, it is impossible to update the
245 // remembered set. Therefore, we only replace the string with its left
246 // substring when the remembered set does not change.
247 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
248 if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object;
249
250 *p = first;
251 return HeapObject::cast(first);
252}
253
254
255// Helper class for marking pointers in HeapObjects.
256class MarkingVisitor : public ObjectVisitor {
257 public:
258 void VisitPointer(Object** p) {
259 MarkObjectByPointer(p);
260 }
261
262 void VisitPointers(Object** start, Object** end) {
263 // Mark all objects pointed to in [start, end).
264 const int kMinRangeForMarkingRecursion = 64;
265 if (end - start >= kMinRangeForMarkingRecursion) {
266 if (VisitUnmarkedObjects(start, end)) return;
267 // We are close to a stack overflow, so just mark the objects.
268 }
269 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
270 }
271
272 void VisitCodeTarget(RelocInfo* rinfo) {
273 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
274 Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
275 if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
276 IC::Clear(rinfo->pc());
277 // Please note targets for cleared inline cached do not have to be
278 // marked since they are contained in Heap::non_monomorphic_cache().
279 } else {
280 MarkCompactCollector::MarkObject(code);
281 }
282 }
283
284 void VisitDebugTarget(RelocInfo* rinfo) {
285 ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) &&
Steve Block3ce2e202009-11-05 08:53:23 +0000286 rinfo->IsPatchedReturnSequence());
Steve Blocka7e24c12009-10-30 11:49:00 +0000287 HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
288 MarkCompactCollector::MarkObject(code);
Steve Blocka7e24c12009-10-30 11:49:00 +0000289 }
290
291 private:
292 // Mark object pointed to by p.
293 void MarkObjectByPointer(Object** p) {
294 if (!(*p)->IsHeapObject()) return;
295 HeapObject* object = ShortCircuitConsString(p);
296 MarkCompactCollector::MarkObject(object);
297 }
298
299 // Tells whether the mark sweep collection will perform compaction.
300 bool IsCompacting() { return MarkCompactCollector::IsCompacting(); }
301
302 // Visit an unmarked object.
303 void VisitUnmarkedObject(HeapObject* obj) {
304#ifdef DEBUG
305 ASSERT(Heap::Contains(obj));
306 ASSERT(!obj->IsMarked());
307#endif
308 Map* map = obj->map();
309 MarkCompactCollector::SetMark(obj);
310 // Mark the map pointer and the body.
311 MarkCompactCollector::MarkObject(map);
312 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), this);
313 }
314
315 // Visit all unmarked objects pointed to by [start, end).
316 // Returns false if the operation fails (lack of stack space).
317 inline bool VisitUnmarkedObjects(Object** start, Object** end) {
318 // Return false is we are close to the stack limit.
319 StackLimitCheck check;
320 if (check.HasOverflowed()) return false;
321
322 // Visit the unmarked objects.
323 for (Object** p = start; p < end; p++) {
324 if (!(*p)->IsHeapObject()) continue;
325 HeapObject* obj = HeapObject::cast(*p);
326 if (obj->IsMarked()) continue;
327 VisitUnmarkedObject(obj);
328 }
329 return true;
330 }
331};
332
333
334// Visitor class for marking heap roots.
335class RootMarkingVisitor : public ObjectVisitor {
336 public:
337 void VisitPointer(Object** p) {
338 MarkObjectByPointer(p);
339 }
340
341 void VisitPointers(Object** start, Object** end) {
342 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
343 }
344
345 MarkingVisitor* stack_visitor() { return &stack_visitor_; }
346
347 private:
348 MarkingVisitor stack_visitor_;
349
350 void MarkObjectByPointer(Object** p) {
351 if (!(*p)->IsHeapObject()) return;
352
353 // Replace flat cons strings in place.
354 HeapObject* object = ShortCircuitConsString(p);
355 if (object->IsMarked()) return;
356
357 Map* map = object->map();
358 // Mark the object.
359 MarkCompactCollector::SetMark(object);
360 // Mark the map pointer and body, and push them on the marking stack.
361 MarkCompactCollector::MarkObject(map);
362 object->IterateBody(map->instance_type(), object->SizeFromMap(map),
363 &stack_visitor_);
364
365 // Mark all the objects reachable from the map and body. May leave
366 // overflowed objects in the heap.
367 MarkCompactCollector::EmptyMarkingStack(&stack_visitor_);
368 }
369};
370
371
372// Helper class for pruning the symbol table.
373class SymbolTableCleaner : public ObjectVisitor {
374 public:
375 SymbolTableCleaner() : pointers_removed_(0) { }
Leon Clarkee46be812010-01-19 14:06:41 +0000376
377 virtual void VisitPointers(Object** start, Object** end) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000378 // Visit all HeapObject pointers in [start, end).
379 for (Object** p = start; p < end; p++) {
380 if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
381 // Check if the symbol being pruned is an external symbol. We need to
382 // delete the associated external data as this symbol is going away.
383
Steve Blocka7e24c12009-10-30 11:49:00 +0000384 // Since no objects have yet been moved we can safely access the map of
385 // the object.
Leon Clarkee46be812010-01-19 14:06:41 +0000386 if ((*p)->IsExternalString()) {
387 Heap::FinalizeExternalString(String::cast(*p));
Steve Blocka7e24c12009-10-30 11:49:00 +0000388 }
389 // Set the entry to null_value (as deleted).
390 *p = Heap::raw_unchecked_null_value();
391 pointers_removed_++;
392 }
393 }
394 }
395
396 int PointersRemoved() {
397 return pointers_removed_;
398 }
399 private:
400 int pointers_removed_;
401};
402
403
404void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
405 ASSERT(!object->IsMarked());
406 ASSERT(Heap::Contains(object));
407 if (object->IsMap()) {
408 Map* map = Map::cast(object);
409 if (FLAG_cleanup_caches_in_maps_at_gc) {
410 map->ClearCodeCache();
411 }
412 SetMark(map);
413 if (FLAG_collect_maps &&
414 map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
415 map->instance_type() <= JS_FUNCTION_TYPE) {
416 MarkMapContents(map);
417 } else {
418 marking_stack.Push(map);
419 }
420 } else {
421 SetMark(object);
422 marking_stack.Push(object);
423 }
424}
425
426
427void MarkCompactCollector::MarkMapContents(Map* map) {
428 MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(
429 *HeapObject::RawField(map, Map::kInstanceDescriptorsOffset)));
430
431 // Mark the Object* fields of the Map.
432 // Since the descriptor array has been marked already, it is fine
433 // that one of these fields contains a pointer to it.
434 MarkingVisitor visitor; // Has no state or contents.
435 visitor.VisitPointers(HeapObject::RawField(map, Map::kPrototypeOffset),
436 HeapObject::RawField(map, Map::kSize));
437}
438
439
440void MarkCompactCollector::MarkDescriptorArray(
441 DescriptorArray* descriptors) {
442 if (descriptors->IsMarked()) return;
443 // Empty descriptor array is marked as a root before any maps are marked.
444 ASSERT(descriptors != Heap::raw_unchecked_empty_descriptor_array());
445 SetMark(descriptors);
446
447 FixedArray* contents = reinterpret_cast<FixedArray*>(
448 descriptors->get(DescriptorArray::kContentArrayIndex));
449 ASSERT(contents->IsHeapObject());
450 ASSERT(!contents->IsMarked());
451 ASSERT(contents->IsFixedArray());
452 ASSERT(contents->length() >= 2);
453 SetMark(contents);
454 // Contents contains (value, details) pairs. If the details say
455 // that the type of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
456 // or NULL_DESCRIPTOR, we don't mark the value as live. Only for
457 // type MAP_TRANSITION is the value a Object* (a Map*).
458 for (int i = 0; i < contents->length(); i += 2) {
459 // If the pair (value, details) at index i, i+1 is not
460 // a transition or null descriptor, mark the value.
461 PropertyDetails details(Smi::cast(contents->get(i + 1)));
462 if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
463 HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
464 if (object->IsHeapObject() && !object->IsMarked()) {
465 SetMark(object);
466 marking_stack.Push(object);
467 }
468 }
469 }
470 // The DescriptorArray descriptors contains a pointer to its contents array,
471 // but the contents array is already marked.
472 marking_stack.Push(descriptors);
473}
474
475
476void MarkCompactCollector::CreateBackPointers() {
477 HeapObjectIterator iterator(Heap::map_space());
478 while (iterator.has_next()) {
479 Object* next_object = iterator.next();
480 if (next_object->IsMap()) { // Could also be ByteArray on free list.
481 Map* map = Map::cast(next_object);
482 if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
483 map->instance_type() <= JS_FUNCTION_TYPE) {
484 map->CreateBackPointers();
485 } else {
486 ASSERT(map->instance_descriptors() == Heap::empty_descriptor_array());
487 }
488 }
489 }
490}
491
492
493static int OverflowObjectSize(HeapObject* obj) {
494 // Recover the normal map pointer, it might be marked as live and
495 // overflowed.
496 MapWord map_word = obj->map_word();
497 map_word.ClearMark();
498 map_word.ClearOverflow();
499 return obj->SizeFromMap(map_word.ToMap());
500}
501
502
503// Fill the marking stack with overflowed objects returned by the given
504// iterator. Stop when the marking stack is filled or the end of the space
505// is reached, whichever comes first.
506template<class T>
507static void ScanOverflowedObjects(T* it) {
508 // The caller should ensure that the marking stack is initially not full,
509 // so that we don't waste effort pointlessly scanning for objects.
510 ASSERT(!marking_stack.is_full());
511
512 while (it->has_next()) {
513 HeapObject* object = it->next();
514 if (object->IsOverflowed()) {
515 object->ClearOverflow();
516 ASSERT(object->IsMarked());
517 ASSERT(Heap::Contains(object));
518 marking_stack.Push(object);
519 if (marking_stack.is_full()) return;
520 }
521 }
522}
523
524
525bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
526 return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked();
527}
528
529
Steve Blocka7e24c12009-10-30 11:49:00 +0000530void MarkCompactCollector::MarkSymbolTable() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000531 SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
532 // Mark the symbol table itself.
533 SetMark(symbol_table);
534 // Explicitly mark the prefix.
535 MarkingVisitor marker;
536 symbol_table->IteratePrefix(&marker);
537 ProcessMarkingStack(&marker);
Steve Blocka7e24c12009-10-30 11:49:00 +0000538}
539
540
541void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
542 // Mark the heap roots including global variables, stack variables,
543 // etc., and all objects reachable from them.
Steve Blockd0582a62009-12-15 09:54:21 +0000544 Heap::IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +0000545
546 // Handle the symbol table specially.
547 MarkSymbolTable();
548
549 // There may be overflowed objects in the heap. Visit them now.
550 while (marking_stack.overflowed()) {
551 RefillMarkingStack();
552 EmptyMarkingStack(visitor->stack_visitor());
553 }
554}
555
556
557void MarkCompactCollector::MarkObjectGroups() {
558 List<ObjectGroup*>* object_groups = GlobalHandles::ObjectGroups();
559
560 for (int i = 0; i < object_groups->length(); i++) {
561 ObjectGroup* entry = object_groups->at(i);
562 if (entry == NULL) continue;
563
564 List<Object**>& objects = entry->objects_;
565 bool group_marked = false;
566 for (int j = 0; j < objects.length(); j++) {
567 Object* object = *objects[j];
568 if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
569 group_marked = true;
570 break;
571 }
572 }
573
574 if (!group_marked) continue;
575
576 // An object in the group is marked, so mark as gray all white heap
577 // objects in the group.
578 for (int j = 0; j < objects.length(); ++j) {
579 if ((*objects[j])->IsHeapObject()) {
580 MarkObject(HeapObject::cast(*objects[j]));
581 }
582 }
583 // Once the entire group has been colored gray, set the object group
584 // to NULL so it won't be processed again.
585 delete object_groups->at(i);
586 object_groups->at(i) = NULL;
587 }
588}
589
590
591// Mark all objects reachable from the objects on the marking stack.
592// Before: the marking stack contains zero or more heap object pointers.
593// After: the marking stack is empty, and all objects reachable from the
594// marking stack have been marked, or are overflowed in the heap.
595void MarkCompactCollector::EmptyMarkingStack(MarkingVisitor* visitor) {
596 while (!marking_stack.is_empty()) {
597 HeapObject* object = marking_stack.Pop();
598 ASSERT(object->IsHeapObject());
599 ASSERT(Heap::Contains(object));
600 ASSERT(object->IsMarked());
601 ASSERT(!object->IsOverflowed());
602
603 // Because the object is marked, we have to recover the original map
604 // pointer and use it to mark the object's body.
605 MapWord map_word = object->map_word();
606 map_word.ClearMark();
607 Map* map = map_word.ToMap();
608 MarkObject(map);
609 object->IterateBody(map->instance_type(), object->SizeFromMap(map),
610 visitor);
611 }
612}
613
614
615// Sweep the heap for overflowed objects, clear their overflow bits, and
616// push them on the marking stack. Stop early if the marking stack fills
617// before sweeping completes. If sweeping completes, there are no remaining
618// overflowed objects in the heap so the overflow flag on the markings stack
619// is cleared.
620void MarkCompactCollector::RefillMarkingStack() {
621 ASSERT(marking_stack.overflowed());
622
623 SemiSpaceIterator new_it(Heap::new_space(), &OverflowObjectSize);
624 ScanOverflowedObjects(&new_it);
625 if (marking_stack.is_full()) return;
626
627 HeapObjectIterator old_pointer_it(Heap::old_pointer_space(),
628 &OverflowObjectSize);
629 ScanOverflowedObjects(&old_pointer_it);
630 if (marking_stack.is_full()) return;
631
632 HeapObjectIterator old_data_it(Heap::old_data_space(), &OverflowObjectSize);
633 ScanOverflowedObjects(&old_data_it);
634 if (marking_stack.is_full()) return;
635
636 HeapObjectIterator code_it(Heap::code_space(), &OverflowObjectSize);
637 ScanOverflowedObjects(&code_it);
638 if (marking_stack.is_full()) return;
639
640 HeapObjectIterator map_it(Heap::map_space(), &OverflowObjectSize);
641 ScanOverflowedObjects(&map_it);
642 if (marking_stack.is_full()) return;
643
644 HeapObjectIterator cell_it(Heap::cell_space(), &OverflowObjectSize);
645 ScanOverflowedObjects(&cell_it);
646 if (marking_stack.is_full()) return;
647
648 LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize);
649 ScanOverflowedObjects(&lo_it);
650 if (marking_stack.is_full()) return;
651
652 marking_stack.clear_overflowed();
653}
654
655
656// Mark all objects reachable (transitively) from objects on the marking
657// stack. Before: the marking stack contains zero or more heap object
658// pointers. After: the marking stack is empty and there are no overflowed
659// objects in the heap.
660void MarkCompactCollector::ProcessMarkingStack(MarkingVisitor* visitor) {
661 EmptyMarkingStack(visitor);
662 while (marking_stack.overflowed()) {
663 RefillMarkingStack();
664 EmptyMarkingStack(visitor);
665 }
666}
667
668
669void MarkCompactCollector::ProcessObjectGroups(MarkingVisitor* visitor) {
670 bool work_to_do = true;
671 ASSERT(marking_stack.is_empty());
672 while (work_to_do) {
673 MarkObjectGroups();
674 work_to_do = !marking_stack.is_empty();
675 ProcessMarkingStack(visitor);
676 }
677}
678
679
680void MarkCompactCollector::MarkLiveObjects() {
681#ifdef DEBUG
682 ASSERT(state_ == PREPARE_GC);
683 state_ = MARK_LIVE_OBJECTS;
684#endif
685 // The to space contains live objects, the from space is used as a marking
686 // stack.
687 marking_stack.Initialize(Heap::new_space()->FromSpaceLow(),
688 Heap::new_space()->FromSpaceHigh());
689
690 ASSERT(!marking_stack.overflowed());
691
692 RootMarkingVisitor root_visitor;
693 MarkRoots(&root_visitor);
694
695 // The objects reachable from the roots are marked, yet unreachable
696 // objects are unmarked. Mark objects reachable from object groups
697 // containing at least one marked object, and continue until no new
698 // objects are reachable from the object groups.
699 ProcessObjectGroups(root_visitor.stack_visitor());
700
701 // The objects reachable from the roots or object groups are marked,
702 // yet unreachable objects are unmarked. Mark objects reachable
703 // only from weak global handles.
704 //
705 // First we identify nonlive weak handles and mark them as pending
706 // destruction.
707 GlobalHandles::IdentifyWeakHandles(&IsUnmarkedHeapObject);
708 // Then we mark the objects and process the transitive closure.
709 GlobalHandles::IterateWeakRoots(&root_visitor);
710 while (marking_stack.overflowed()) {
711 RefillMarkingStack();
712 EmptyMarkingStack(root_visitor.stack_visitor());
713 }
714
715 // Repeat the object groups to mark unmarked groups reachable from the
716 // weak roots.
717 ProcessObjectGroups(root_visitor.stack_visitor());
718
719 // Prune the symbol table removing all symbols only pointed to by the
720 // symbol table. Cannot use symbol_table() here because the symbol
721 // table is marked.
722 SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
723 SymbolTableCleaner v;
724 symbol_table->IterateElements(&v);
725 symbol_table->ElementsRemoved(v.PointersRemoved());
Leon Clarkee46be812010-01-19 14:06:41 +0000726 ExternalStringTable::Iterate(&v);
727 ExternalStringTable::CleanUp();
Steve Blocka7e24c12009-10-30 11:49:00 +0000728
729 // Remove object groups after marking phase.
730 GlobalHandles::RemoveObjectGroups();
731}
732
733
734static int CountMarkedCallback(HeapObject* obj) {
735 MapWord map_word = obj->map_word();
736 map_word.ClearMark();
737 return obj->SizeFromMap(map_word.ToMap());
738}
739
740
741#ifdef DEBUG
742void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
743 live_bytes_ += obj->Size();
744 if (Heap::new_space()->Contains(obj)) {
745 live_young_objects_++;
746 } else if (Heap::map_space()->Contains(obj)) {
747 ASSERT(obj->IsMap());
748 live_map_objects_++;
749 } else if (Heap::cell_space()->Contains(obj)) {
750 ASSERT(obj->IsJSGlobalPropertyCell());
751 live_cell_objects_++;
752 } else if (Heap::old_pointer_space()->Contains(obj)) {
753 live_old_pointer_objects_++;
754 } else if (Heap::old_data_space()->Contains(obj)) {
755 live_old_data_objects_++;
756 } else if (Heap::code_space()->Contains(obj)) {
757 live_code_objects_++;
758 } else if (Heap::lo_space()->Contains(obj)) {
759 live_lo_objects_++;
760 } else {
761 UNREACHABLE();
762 }
763}
764#endif // DEBUG
765
766
767void MarkCompactCollector::SweepLargeObjectSpace() {
768#ifdef DEBUG
769 ASSERT(state_ == MARK_LIVE_OBJECTS);
770 state_ =
771 compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
772#endif
773 // Deallocate unmarked objects and clear marked bits for marked objects.
774 Heap::lo_space()->FreeUnmarkedObjects();
775}
776
777// Safe to use during marking phase only.
778bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
779 MapWord metamap = object->map_word();
780 metamap.ClearMark();
781 return metamap.ToMap()->instance_type() == MAP_TYPE;
782}
783
784void MarkCompactCollector::ClearNonLiveTransitions() {
785 HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback);
786 // Iterate over the map space, setting map transitions that go from
787 // a marked map to an unmarked map to null transitions. At the same time,
788 // set all the prototype fields of maps back to their original value,
789 // dropping the back pointers temporarily stored in the prototype field.
790 // Setting the prototype field requires following the linked list of
791 // back pointers, reversing them all at once. This allows us to find
792 // those maps with map transitions that need to be nulled, and only
793 // scan the descriptor arrays of those maps, not all maps.
Leon Clarkee46be812010-01-19 14:06:41 +0000794 // All of these actions are carried out only on maps of JSObjects
Steve Blocka7e24c12009-10-30 11:49:00 +0000795 // and related subtypes.
796 while (map_iterator.has_next()) {
797 Map* map = reinterpret_cast<Map*>(map_iterator.next());
798 if (!map->IsMarked() && map->IsByteArray()) continue;
799
800 ASSERT(SafeIsMap(map));
801 // Only JSObject and subtypes have map transitions and back pointers.
802 if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
803 if (map->instance_type() > JS_FUNCTION_TYPE) continue;
804 // Follow the chain of back pointers to find the prototype.
805 Map* current = map;
806 while (SafeIsMap(current)) {
807 current = reinterpret_cast<Map*>(current->prototype());
808 ASSERT(current->IsHeapObject());
809 }
810 Object* real_prototype = current;
811
812 // Follow back pointers, setting them to prototype,
813 // clearing map transitions when necessary.
814 current = map;
815 bool on_dead_path = !current->IsMarked();
816 Object* next;
817 while (SafeIsMap(current)) {
818 next = current->prototype();
819 // There should never be a dead map above a live map.
820 ASSERT(on_dead_path || current->IsMarked());
821
822 // A live map above a dead map indicates a dead transition.
823 // This test will always be false on the first iteration.
824 if (on_dead_path && current->IsMarked()) {
825 on_dead_path = false;
826 current->ClearNonLiveTransitions(real_prototype);
827 }
828 *HeapObject::RawField(current, Map::kPrototypeOffset) =
829 real_prototype;
830 current = reinterpret_cast<Map*>(next);
831 }
832 }
833}
834
835// -------------------------------------------------------------------------
836// Phase 2: Encode forwarding addresses.
837// When compacting, forwarding addresses for objects in old space and map
838// space are encoded in their map pointer word (along with an encoding of
839// their map pointers).
840//
Leon Clarkee46be812010-01-19 14:06:41 +0000841// The excact encoding is described in the comments for class MapWord in
842// objects.h.
Steve Blocka7e24c12009-10-30 11:49:00 +0000843//
844// An address range [start, end) can have both live and non-live objects.
845// Maximal non-live regions are marked so they can be skipped on subsequent
846// sweeps of the heap. A distinguished map-pointer encoding is used to mark
847// free regions of one-word size (in which case the next word is the start
848// of a live object). A second distinguished map-pointer encoding is used
849// to mark free regions larger than one word, and the size of the free
850// region (including the first word) is written to the second word of the
851// region.
852//
853// Any valid map page offset must lie in the object area of the page, so map
854// page offsets less than Page::kObjectStartOffset are invalid. We use a
855// pair of distinguished invalid map encodings (for single word and multiple
856// words) to indicate free regions in the page found during computation of
857// forwarding addresses and skipped over in subsequent sweeps.
858static const uint32_t kSingleFreeEncoding = 0;
859static const uint32_t kMultiFreeEncoding = 1;
860
861
862// Encode a free region, defined by the given start address and size, in the
863// first word or two of the region.
864void EncodeFreeRegion(Address free_start, int free_size) {
865 ASSERT(free_size >= kIntSize);
866 if (free_size == kIntSize) {
867 Memory::uint32_at(free_start) = kSingleFreeEncoding;
868 } else {
869 ASSERT(free_size >= 2 * kIntSize);
870 Memory::uint32_at(free_start) = kMultiFreeEncoding;
871 Memory::int_at(free_start + kIntSize) = free_size;
872 }
873
874#ifdef DEBUG
875 // Zap the body of the free region.
876 if (FLAG_enable_slow_asserts) {
877 for (int offset = 2 * kIntSize;
878 offset < free_size;
879 offset += kPointerSize) {
880 Memory::Address_at(free_start + offset) = kZapValue;
881 }
882 }
883#endif
884}
885
886
887// Try to promote all objects in new space. Heap numbers and sequential
888// strings are promoted to the code space, large objects to large object space,
889// and all others to the old space.
890inline Object* MCAllocateFromNewSpace(HeapObject* object, int object_size) {
891 Object* forwarded;
892 if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
893 forwarded = Failure::Exception();
894 } else {
895 OldSpace* target_space = Heap::TargetSpace(object);
896 ASSERT(target_space == Heap::old_pointer_space() ||
897 target_space == Heap::old_data_space());
898 forwarded = target_space->MCAllocateRaw(object_size);
899 }
900 if (forwarded->IsFailure()) {
901 forwarded = Heap::new_space()->MCAllocateRaw(object_size);
902 }
903 return forwarded;
904}
905
906
907// Allocation functions for the paged spaces call the space's MCAllocateRaw.
908inline Object* MCAllocateFromOldPointerSpace(HeapObject* ignore,
909 int object_size) {
910 return Heap::old_pointer_space()->MCAllocateRaw(object_size);
911}
912
913
914inline Object* MCAllocateFromOldDataSpace(HeapObject* ignore, int object_size) {
915 return Heap::old_data_space()->MCAllocateRaw(object_size);
916}
917
918
919inline Object* MCAllocateFromCodeSpace(HeapObject* ignore, int object_size) {
920 return Heap::code_space()->MCAllocateRaw(object_size);
921}
922
923
924inline Object* MCAllocateFromMapSpace(HeapObject* ignore, int object_size) {
925 return Heap::map_space()->MCAllocateRaw(object_size);
926}
927
928
929inline Object* MCAllocateFromCellSpace(HeapObject* ignore, int object_size) {
930 return Heap::cell_space()->MCAllocateRaw(object_size);
931}
932
933
934// The forwarding address is encoded at the same offset as the current
935// to-space object, but in from space.
936inline void EncodeForwardingAddressInNewSpace(HeapObject* old_object,
937 int object_size,
938 Object* new_object,
939 int* ignored) {
940 int offset =
941 Heap::new_space()->ToSpaceOffsetForAddress(old_object->address());
942 Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset) =
943 HeapObject::cast(new_object)->address();
944}
945
946
947// The forwarding address is encoded in the map pointer of the object as an
948// offset (in terms of live bytes) from the address of the first live object
949// in the page.
950inline void EncodeForwardingAddressInPagedSpace(HeapObject* old_object,
951 int object_size,
952 Object* new_object,
953 int* offset) {
954 // Record the forwarding address of the first live object if necessary.
955 if (*offset == 0) {
956 Page::FromAddress(old_object->address())->mc_first_forwarded =
957 HeapObject::cast(new_object)->address();
958 }
959
960 MapWord encoding =
961 MapWord::EncodeAddress(old_object->map()->address(), *offset);
962 old_object->set_map_word(encoding);
963 *offset += object_size;
964 ASSERT(*offset <= Page::kObjectAreaSize);
965}
966
967
968// Most non-live objects are ignored.
969inline void IgnoreNonLiveObject(HeapObject* object) {}
970
971
972// A code deletion event is logged for non-live code objects.
973inline void LogNonLiveCodeObject(HeapObject* object) {
974 if (object->IsCode()) LOG(CodeDeleteEvent(object->address()));
975}
976
977
978// Function template that, given a range of addresses (eg, a semispace or a
979// paged space page), iterates through the objects in the range to clear
980// mark bits and compute and encode forwarding addresses. As a side effect,
981// maximal free chunks are marked so that they can be skipped on subsequent
982// sweeps.
983//
984// The template parameters are an allocation function, a forwarding address
985// encoding function, and a function to process non-live objects.
986template<MarkCompactCollector::AllocationFunction Alloc,
987 MarkCompactCollector::EncodingFunction Encode,
988 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
989inline void EncodeForwardingAddressesInRange(Address start,
990 Address end,
991 int* offset) {
992 // The start address of the current free region while sweeping the space.
993 // This address is set when a transition from live to non-live objects is
994 // encountered. A value (an encoding of the 'next free region' pointer)
995 // is written to memory at this address when a transition from non-live to
996 // live objects is encountered.
997 Address free_start = NULL;
998
999 // A flag giving the state of the previously swept object. Initially true
1000 // to ensure that free_start is initialized to a proper address before
1001 // trying to write to it.
1002 bool is_prev_alive = true;
1003
1004 int object_size; // Will be set on each iteration of the loop.
1005 for (Address current = start; current < end; current += object_size) {
1006 HeapObject* object = HeapObject::FromAddress(current);
1007 if (object->IsMarked()) {
1008 object->ClearMark();
1009 MarkCompactCollector::tracer()->decrement_marked_count();
1010 object_size = object->Size();
1011
1012 Object* forwarded = Alloc(object, object_size);
1013 // Allocation cannot fail, because we are compacting the space.
1014 ASSERT(!forwarded->IsFailure());
1015 Encode(object, object_size, forwarded, offset);
1016
1017#ifdef DEBUG
1018 if (FLAG_gc_verbose) {
1019 PrintF("forward %p -> %p.\n", object->address(),
1020 HeapObject::cast(forwarded)->address());
1021 }
1022#endif
1023 if (!is_prev_alive) { // Transition from non-live to live.
Steve Blockd0582a62009-12-15 09:54:21 +00001024 EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
Steve Blocka7e24c12009-10-30 11:49:00 +00001025 is_prev_alive = true;
1026 }
1027 } else { // Non-live object.
1028 object_size = object->Size();
1029 ProcessNonLive(object);
1030 if (is_prev_alive) { // Transition from live to non-live.
1031 free_start = current;
1032 is_prev_alive = false;
1033 }
1034 }
1035 }
1036
1037 // If we ended on a free region, mark it.
Steve Blockd0582a62009-12-15 09:54:21 +00001038 if (!is_prev_alive) {
1039 EncodeFreeRegion(free_start, static_cast<int>(end - free_start));
1040 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001041}
1042
1043
1044// Functions to encode the forwarding pointers in each compactable space.
1045void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
1046 int ignored;
1047 EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
1048 EncodeForwardingAddressInNewSpace,
1049 IgnoreNonLiveObject>(
1050 Heap::new_space()->bottom(),
1051 Heap::new_space()->top(),
1052 &ignored);
1053}
1054
1055
1056template<MarkCompactCollector::AllocationFunction Alloc,
1057 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
1058void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
1059 PagedSpace* space) {
1060 PageIterator it(space, PageIterator::PAGES_IN_USE);
1061 while (it.has_next()) {
1062 Page* p = it.next();
1063 // The offset of each live object in the page from the first live object
1064 // in the page.
1065 int offset = 0;
1066 EncodeForwardingAddressesInRange<Alloc,
1067 EncodeForwardingAddressInPagedSpace,
1068 ProcessNonLive>(
1069 p->ObjectAreaStart(),
1070 p->AllocationTop(),
1071 &offset);
1072 }
1073}
1074
1075
1076static void SweepSpace(NewSpace* space) {
1077 HeapObject* object;
1078 for (Address current = space->bottom();
1079 current < space->top();
1080 current += object->Size()) {
1081 object = HeapObject::FromAddress(current);
1082 if (object->IsMarked()) {
1083 object->ClearMark();
1084 MarkCompactCollector::tracer()->decrement_marked_count();
1085 } else {
1086 // We give non-live objects a map that will correctly give their size,
1087 // since their existing map might not be live after the collection.
1088 int size = object->Size();
1089 if (size >= ByteArray::kHeaderSize) {
1090 object->set_map(Heap::raw_unchecked_byte_array_map());
1091 ByteArray::cast(object)->set_length(ByteArray::LengthFor(size));
1092 } else {
1093 ASSERT(size == kPointerSize);
1094 object->set_map(Heap::raw_unchecked_one_pointer_filler_map());
1095 }
1096 ASSERT(object->Size() == size);
1097 }
1098 // The object is now unmarked for the call to Size() at the top of the
1099 // loop.
1100 }
1101}
1102
1103
1104static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
1105 PageIterator it(space, PageIterator::PAGES_IN_USE);
1106 while (it.has_next()) {
1107 Page* p = it.next();
1108
1109 bool is_previous_alive = true;
1110 Address free_start = NULL;
1111 HeapObject* object;
1112
1113 for (Address current = p->ObjectAreaStart();
1114 current < p->AllocationTop();
1115 current += object->Size()) {
1116 object = HeapObject::FromAddress(current);
1117 if (object->IsMarked()) {
1118 object->ClearMark();
1119 MarkCompactCollector::tracer()->decrement_marked_count();
1120 if (!is_previous_alive) { // Transition from free to live.
Steve Blockd0582a62009-12-15 09:54:21 +00001121 dealloc(free_start, static_cast<int>(current - free_start));
Steve Blocka7e24c12009-10-30 11:49:00 +00001122 is_previous_alive = true;
1123 }
1124 } else {
1125 if (object->IsCode()) {
1126 // Notify the logger that compiled code has been collected.
1127 LOG(CodeDeleteEvent(Code::cast(object)->address()));
1128 }
1129 if (is_previous_alive) { // Transition from live to free.
1130 free_start = current;
1131 is_previous_alive = false;
1132 }
1133 }
1134 // The object is now unmarked for the call to Size() at the top of the
1135 // loop.
1136 }
1137
1138 // If the last region was not live we need to deallocate from
1139 // free_start to the allocation top in the page.
1140 if (!is_previous_alive) {
Steve Blockd0582a62009-12-15 09:54:21 +00001141 int free_size = static_cast<int>(p->AllocationTop() - free_start);
Steve Blocka7e24c12009-10-30 11:49:00 +00001142 if (free_size > 0) {
1143 dealloc(free_start, free_size);
1144 }
1145 }
1146 }
1147}
1148
1149
1150void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
1151 int size_in_bytes) {
1152 Heap::ClearRSetRange(start, size_in_bytes);
1153 Heap::old_pointer_space()->Free(start, size_in_bytes);
1154}
1155
1156
1157void MarkCompactCollector::DeallocateOldDataBlock(Address start,
1158 int size_in_bytes) {
1159 Heap::old_data_space()->Free(start, size_in_bytes);
1160}
1161
1162
1163void MarkCompactCollector::DeallocateCodeBlock(Address start,
1164 int size_in_bytes) {
1165 Heap::code_space()->Free(start, size_in_bytes);
1166}
1167
1168
1169void MarkCompactCollector::DeallocateMapBlock(Address start,
1170 int size_in_bytes) {
Leon Clarkee46be812010-01-19 14:06:41 +00001171 // Objects in map space are assumed to have size Map::kSize and a
Steve Blocka7e24c12009-10-30 11:49:00 +00001172 // valid map in their first word. Thus, we break the free block up into
1173 // chunks and free them separately.
1174 ASSERT(size_in_bytes % Map::kSize == 0);
1175 Heap::ClearRSetRange(start, size_in_bytes);
1176 Address end = start + size_in_bytes;
1177 for (Address a = start; a < end; a += Map::kSize) {
1178 Heap::map_space()->Free(a);
1179 }
1180}
1181
1182
1183void MarkCompactCollector::DeallocateCellBlock(Address start,
1184 int size_in_bytes) {
1185 // Free-list elements in cell space are assumed to have a fixed size.
1186 // We break the free block into chunks and add them to the free list
1187 // individually.
1188 int size = Heap::cell_space()->object_size_in_bytes();
1189 ASSERT(size_in_bytes % size == 0);
1190 Heap::ClearRSetRange(start, size_in_bytes);
1191 Address end = start + size_in_bytes;
1192 for (Address a = start; a < end; a += size) {
1193 Heap::cell_space()->Free(a);
1194 }
1195}
1196
1197
1198void MarkCompactCollector::EncodeForwardingAddresses() {
1199 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
1200 // Objects in the active semispace of the young generation may be
1201 // relocated to the inactive semispace (if not promoted). Set the
1202 // relocation info to the beginning of the inactive semispace.
1203 Heap::new_space()->MCResetRelocationInfo();
1204
1205 // Compute the forwarding pointers in each space.
1206 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
1207 IgnoreNonLiveObject>(
1208 Heap::old_pointer_space());
1209
1210 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
1211 IgnoreNonLiveObject>(
1212 Heap::old_data_space());
1213
1214 EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
1215 LogNonLiveCodeObject>(
1216 Heap::code_space());
1217
1218 EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
1219 IgnoreNonLiveObject>(
1220 Heap::cell_space());
1221
1222
1223 // Compute new space next to last after the old and code spaces have been
1224 // compacted. Objects in new space can be promoted to old or code space.
1225 EncodeForwardingAddressesInNewSpace();
1226
1227 // Compute map space last because computing forwarding addresses
1228 // overwrites non-live objects. Objects in the other spaces rely on
1229 // non-live map pointers to get the sizes of non-live objects.
1230 EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
1231 IgnoreNonLiveObject>(
1232 Heap::map_space());
1233
1234 // Write relocation info to the top page, so we can use it later. This is
1235 // done after promoting objects from the new space so we get the correct
1236 // allocation top.
1237 Heap::old_pointer_space()->MCWriteRelocationInfoToPage();
1238 Heap::old_data_space()->MCWriteRelocationInfoToPage();
1239 Heap::code_space()->MCWriteRelocationInfoToPage();
1240 Heap::map_space()->MCWriteRelocationInfoToPage();
1241 Heap::cell_space()->MCWriteRelocationInfoToPage();
1242}
1243
1244
Leon Clarkee46be812010-01-19 14:06:41 +00001245class MapIterator : public HeapObjectIterator {
1246 public:
1247 MapIterator() : HeapObjectIterator(Heap::map_space(), &SizeCallback) { }
1248
1249 explicit MapIterator(Address start)
1250 : HeapObjectIterator(Heap::map_space(), start, &SizeCallback) { }
1251
1252 private:
1253 static int SizeCallback(HeapObject* unused) {
1254 USE(unused);
1255 return Map::kSize;
1256 }
1257};
1258
1259
1260class MapCompact {
1261 public:
1262 explicit MapCompact(int live_maps)
1263 : live_maps_(live_maps),
1264 to_evacuate_start_(Heap::map_space()->TopAfterCompaction(live_maps)),
1265 map_to_evacuate_it_(to_evacuate_start_),
1266 first_map_to_evacuate_(
1267 reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
1268 }
1269
1270 void CompactMaps() {
1271 // As we know the number of maps to evacuate beforehand,
1272 // we stop then there is no more vacant maps.
1273 for (Map* next_vacant_map = NextVacantMap();
1274 next_vacant_map;
1275 next_vacant_map = NextVacantMap()) {
1276 EvacuateMap(next_vacant_map, NextMapToEvacuate());
1277 }
1278
1279#ifdef DEBUG
1280 CheckNoMapsToEvacuate();
1281#endif
1282 }
1283
1284 void UpdateMapPointersInRoots() {
1285 Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG);
1286 GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
1287 }
1288
1289 void FinishMapSpace() {
1290 // Iterate through to space and finish move.
1291 MapIterator it;
1292 HeapObject* o = it.next();
1293 for (; o != first_map_to_evacuate_; o = it.next()) {
1294 Map* map = reinterpret_cast<Map*>(o);
1295 ASSERT(!map->IsMarked());
1296 ASSERT(!map->IsOverflowed());
1297 ASSERT(map->IsMap());
1298 Heap::UpdateRSet(map);
1299 }
1300 }
1301
1302 void UpdateMapPointersInPagedSpace(PagedSpace* space) {
1303 ASSERT(space != Heap::map_space());
1304
1305 PageIterator it(space, PageIterator::PAGES_IN_USE);
1306 while (it.has_next()) {
1307 Page* p = it.next();
1308 UpdateMapPointersInRange(p->ObjectAreaStart(), p->AllocationTop());
1309 }
1310 }
1311
1312 void UpdateMapPointersInNewSpace() {
1313 NewSpace* space = Heap::new_space();
1314 UpdateMapPointersInRange(space->bottom(), space->top());
1315 }
1316
1317 void UpdateMapPointersInLargeObjectSpace() {
1318 LargeObjectIterator it(Heap::lo_space());
1319 while (true) {
1320 if (!it.has_next()) break;
1321 UpdateMapPointersInObject(it.next());
1322 }
1323 }
1324
1325 void Finish() {
1326 Heap::map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
1327 }
1328
1329 private:
1330 int live_maps_;
1331 Address to_evacuate_start_;
1332 MapIterator vacant_map_it_;
1333 MapIterator map_to_evacuate_it_;
1334 Map* first_map_to_evacuate_;
1335
1336 // Helper class for updating map pointers in HeapObjects.
1337 class MapUpdatingVisitor: public ObjectVisitor {
1338 public:
1339 void VisitPointer(Object** p) {
1340 UpdateMapPointer(p);
1341 }
1342
1343 void VisitPointers(Object** start, Object** end) {
1344 for (Object** p = start; p < end; p++) UpdateMapPointer(p);
1345 }
1346
1347 private:
1348 void UpdateMapPointer(Object** p) {
1349 if (!(*p)->IsHeapObject()) return;
1350 HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
1351
1352 // Moved maps are tagged with overflowed map word. They are the only
1353 // objects those map word is overflowed as marking is already complete.
1354 MapWord map_word = old_map->map_word();
1355 if (!map_word.IsOverflowed()) return;
1356
1357 *p = GetForwardedMap(map_word);
1358 }
1359 };
1360
1361 static MapUpdatingVisitor map_updating_visitor_;
1362
1363 static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
1364 while (true) {
1365 ASSERT(it->has_next());
1366 HeapObject* next = it->next();
1367 if (next == last)
1368 return NULL;
1369 ASSERT(!next->IsOverflowed());
1370 ASSERT(!next->IsMarked());
1371 ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
1372 if (next->IsMap() == live)
1373 return reinterpret_cast<Map*>(next);
1374 }
1375 }
1376
1377 Map* NextVacantMap() {
1378 Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
1379 ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
1380 return map;
1381 }
1382
1383 Map* NextMapToEvacuate() {
1384 Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
1385 ASSERT(map != NULL);
1386 ASSERT(map->IsMap());
1387 return map;
1388 }
1389
1390 static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
1391 ASSERT(FreeListNode::IsFreeListNode(vacant_map));
1392 ASSERT(map_to_evacuate->IsMap());
1393
1394 memcpy(
1395 reinterpret_cast<void*>(vacant_map->address()),
1396 reinterpret_cast<void*>(map_to_evacuate->address()),
1397 Map::kSize);
1398 ASSERT(vacant_map->IsMap()); // Due to memcpy above.
1399
1400 MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
1401 forwarding_map_word.SetOverflow();
1402 map_to_evacuate->set_map_word(forwarding_map_word);
1403
1404 ASSERT(map_to_evacuate->map_word().IsOverflowed());
1405 ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
1406 }
1407
1408 static Map* GetForwardedMap(MapWord map_word) {
1409 ASSERT(map_word.IsOverflowed());
1410 map_word.ClearOverflow();
1411 Map* new_map = map_word.ToMap();
1412 ASSERT_MAP_ALIGNED(new_map->address());
1413 return new_map;
1414 }
1415
1416 static int UpdateMapPointersInObject(HeapObject* obj) {
1417 ASSERT(!obj->IsMarked());
1418 Map* map = obj->map();
1419 ASSERT(Heap::map_space()->Contains(map));
1420 MapWord map_word = map->map_word();
1421 ASSERT(!map_word.IsMarked());
1422 if (map_word.IsOverflowed()) {
1423 Map* new_map = GetForwardedMap(map_word);
1424 ASSERT(Heap::map_space()->Contains(new_map));
1425 obj->set_map(new_map);
1426
1427#ifdef DEBUG
1428 if (FLAG_gc_verbose) {
1429 PrintF("update %p : %p -> %p\n", obj->address(),
1430 map, new_map);
1431 }
1432#endif
1433 }
1434
1435 int size = obj->SizeFromMap(map);
1436 obj->IterateBody(map->instance_type(), size, &map_updating_visitor_);
1437 return size;
1438 }
1439
1440 static void UpdateMapPointersInRange(Address start, Address end) {
1441 HeapObject* object;
1442 int size;
1443 for (Address current = start; current < end; current += size) {
1444 object = HeapObject::FromAddress(current);
1445 size = UpdateMapPointersInObject(object);
1446 ASSERT(size > 0);
1447 }
1448 }
1449
1450#ifdef DEBUG
1451 void CheckNoMapsToEvacuate() {
1452 if (!FLAG_enable_slow_asserts)
1453 return;
1454
1455 while (map_to_evacuate_it_.has_next())
1456 ASSERT(FreeListNode::IsFreeListNode(map_to_evacuate_it_.next()));
1457 }
1458#endif
1459};
1460
1461MapCompact::MapUpdatingVisitor MapCompact::map_updating_visitor_;
1462
1463
Steve Blocka7e24c12009-10-30 11:49:00 +00001464void MarkCompactCollector::SweepSpaces() {
1465 ASSERT(state_ == SWEEP_SPACES);
1466 ASSERT(!IsCompacting());
1467 // Noncompacting collections simply sweep the spaces to clear the mark
1468 // bits and free the nonlive blocks (for old and map spaces). We sweep
1469 // the map space last because freeing non-live maps overwrites them and
1470 // the other spaces rely on possibly non-live maps to get the sizes for
1471 // non-live objects.
1472 SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock);
1473 SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock);
1474 SweepSpace(Heap::code_space(), &DeallocateCodeBlock);
1475 SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
1476 SweepSpace(Heap::new_space());
1477 SweepSpace(Heap::map_space(), &DeallocateMapBlock);
Leon Clarkee46be812010-01-19 14:06:41 +00001478 int live_maps = Heap::map_space()->Size() / Map::kSize;
1479 ASSERT(live_map_objects_ == live_maps);
1480
1481 if (Heap::map_space()->NeedsCompaction(live_maps)) {
1482 MapCompact map_compact(live_maps);
1483
1484 map_compact.CompactMaps();
1485 map_compact.UpdateMapPointersInRoots();
1486
1487 map_compact.FinishMapSpace();
1488 PagedSpaces spaces;
1489 while (PagedSpace* space = spaces.next()) {
1490 if (space == Heap::map_space()) continue;
1491 map_compact.UpdateMapPointersInPagedSpace(space);
1492 }
1493 map_compact.UpdateMapPointersInNewSpace();
1494 map_compact.UpdateMapPointersInLargeObjectSpace();
1495
1496 map_compact.Finish();
1497 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001498}
1499
1500
1501// Iterate the live objects in a range of addresses (eg, a page or a
1502// semispace). The live regions of the range have been linked into a list.
1503// The first live region is [first_live_start, first_live_end), and the last
1504// address in the range is top. The callback function is used to get the
1505// size of each live object.
1506int MarkCompactCollector::IterateLiveObjectsInRange(
1507 Address start,
1508 Address end,
1509 HeapObjectCallback size_func) {
1510 int live_objects = 0;
1511 Address current = start;
1512 while (current < end) {
1513 uint32_t encoded_map = Memory::uint32_at(current);
1514 if (encoded_map == kSingleFreeEncoding) {
1515 current += kPointerSize;
1516 } else if (encoded_map == kMultiFreeEncoding) {
1517 current += Memory::int_at(current + kIntSize);
1518 } else {
1519 live_objects++;
1520 current += size_func(HeapObject::FromAddress(current));
1521 }
1522 }
1523 return live_objects;
1524}
1525
1526
1527int MarkCompactCollector::IterateLiveObjects(NewSpace* space,
1528 HeapObjectCallback size_f) {
1529 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
1530 return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
1531}
1532
1533
1534int MarkCompactCollector::IterateLiveObjects(PagedSpace* space,
1535 HeapObjectCallback size_f) {
1536 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
1537 int total = 0;
1538 PageIterator it(space, PageIterator::PAGES_IN_USE);
1539 while (it.has_next()) {
1540 Page* p = it.next();
1541 total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
1542 p->AllocationTop(),
1543 size_f);
1544 }
1545 return total;
1546}
1547
1548
1549// -------------------------------------------------------------------------
1550// Phase 3: Update pointers
1551
1552// Helper class for updating pointers in HeapObjects.
1553class UpdatingVisitor: public ObjectVisitor {
1554 public:
1555 void VisitPointer(Object** p) {
1556 UpdatePointer(p);
1557 }
1558
1559 void VisitPointers(Object** start, Object** end) {
1560 // Mark all HeapObject pointers in [start, end)
1561 for (Object** p = start; p < end; p++) UpdatePointer(p);
1562 }
1563
1564 void VisitCodeTarget(RelocInfo* rinfo) {
1565 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
1566 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1567 VisitPointer(&target);
1568 rinfo->set_target_address(
1569 reinterpret_cast<Code*>(target)->instruction_start());
1570 }
1571
Steve Block3ce2e202009-11-05 08:53:23 +00001572 void VisitDebugTarget(RelocInfo* rinfo) {
1573 ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) &&
1574 rinfo->IsPatchedReturnSequence());
1575 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
1576 VisitPointer(&target);
1577 rinfo->set_call_address(
1578 reinterpret_cast<Code*>(target)->instruction_start());
1579 }
1580
Steve Blocka7e24c12009-10-30 11:49:00 +00001581 private:
1582 void UpdatePointer(Object** p) {
1583 if (!(*p)->IsHeapObject()) return;
1584
1585 HeapObject* obj = HeapObject::cast(*p);
1586 Address old_addr = obj->address();
1587 Address new_addr;
1588 ASSERT(!Heap::InFromSpace(obj));
1589
1590 if (Heap::new_space()->Contains(obj)) {
1591 Address forwarding_pointer_addr =
1592 Heap::new_space()->FromSpaceLow() +
1593 Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
1594 new_addr = Memory::Address_at(forwarding_pointer_addr);
1595
1596#ifdef DEBUG
1597 ASSERT(Heap::old_pointer_space()->Contains(new_addr) ||
1598 Heap::old_data_space()->Contains(new_addr) ||
1599 Heap::new_space()->FromSpaceContains(new_addr) ||
1600 Heap::lo_space()->Contains(HeapObject::FromAddress(new_addr)));
1601
1602 if (Heap::new_space()->FromSpaceContains(new_addr)) {
1603 ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
1604 Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
1605 }
1606#endif
1607
1608 } else if (Heap::lo_space()->Contains(obj)) {
1609 // Don't move objects in the large object space.
1610 return;
1611
1612 } else {
1613#ifdef DEBUG
1614 PagedSpaces spaces;
1615 PagedSpace* original_space = spaces.next();
1616 while (original_space != NULL) {
1617 if (original_space->Contains(obj)) break;
1618 original_space = spaces.next();
1619 }
1620 ASSERT(original_space != NULL);
1621#endif
1622 new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
1623 ASSERT(original_space->Contains(new_addr));
1624 ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
1625 original_space->MCSpaceOffsetForAddress(old_addr));
1626 }
1627
1628 *p = HeapObject::FromAddress(new_addr);
1629
1630#ifdef DEBUG
1631 if (FLAG_gc_verbose) {
1632 PrintF("update %p : %p -> %p\n",
1633 reinterpret_cast<Address>(p), old_addr, new_addr);
1634 }
1635#endif
1636 }
1637};
1638
1639
1640void MarkCompactCollector::UpdatePointers() {
1641#ifdef DEBUG
1642 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
1643 state_ = UPDATE_POINTERS;
1644#endif
1645 UpdatingVisitor updating_visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00001646 Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00001647 GlobalHandles::IterateWeakRoots(&updating_visitor);
1648
1649 int live_maps = IterateLiveObjects(Heap::map_space(),
1650 &UpdatePointersInOldObject);
1651 int live_pointer_olds = IterateLiveObjects(Heap::old_pointer_space(),
1652 &UpdatePointersInOldObject);
1653 int live_data_olds = IterateLiveObjects(Heap::old_data_space(),
1654 &UpdatePointersInOldObject);
1655 int live_codes = IterateLiveObjects(Heap::code_space(),
1656 &UpdatePointersInOldObject);
1657 int live_cells = IterateLiveObjects(Heap::cell_space(),
1658 &UpdatePointersInOldObject);
1659 int live_news = IterateLiveObjects(Heap::new_space(),
1660 &UpdatePointersInNewObject);
1661
1662 // Large objects do not move, the map word can be updated directly.
1663 LargeObjectIterator it(Heap::lo_space());
1664 while (it.has_next()) UpdatePointersInNewObject(it.next());
1665
1666 USE(live_maps);
1667 USE(live_pointer_olds);
1668 USE(live_data_olds);
1669 USE(live_codes);
1670 USE(live_cells);
1671 USE(live_news);
1672 ASSERT(live_maps == live_map_objects_);
1673 ASSERT(live_data_olds == live_old_data_objects_);
1674 ASSERT(live_pointer_olds == live_old_pointer_objects_);
1675 ASSERT(live_codes == live_code_objects_);
1676 ASSERT(live_cells == live_cell_objects_);
1677 ASSERT(live_news == live_young_objects_);
1678}
1679
1680
1681int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
1682 // Keep old map pointers
1683 Map* old_map = obj->map();
1684 ASSERT(old_map->IsHeapObject());
1685
1686 Address forwarded = GetForwardingAddressInOldSpace(old_map);
1687
1688 ASSERT(Heap::map_space()->Contains(old_map));
1689 ASSERT(Heap::map_space()->Contains(forwarded));
1690#ifdef DEBUG
1691 if (FLAG_gc_verbose) {
1692 PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
1693 forwarded);
1694 }
1695#endif
1696 // Update the map pointer.
1697 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
1698
1699 // We have to compute the object size relying on the old map because
1700 // map objects are not relocated yet.
1701 int obj_size = obj->SizeFromMap(old_map);
1702
1703 // Update pointers in the object body.
1704 UpdatingVisitor updating_visitor;
1705 obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
1706 return obj_size;
1707}
1708
1709
1710int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
1711 // Decode the map pointer.
1712 MapWord encoding = obj->map_word();
1713 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
1714 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
1715
1716 // At this point, the first word of map_addr is also encoded, cannot
1717 // cast it to Map* using Map::cast.
1718 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
1719 int obj_size = obj->SizeFromMap(map);
1720 InstanceType type = map->instance_type();
1721
1722 // Update map pointer.
1723 Address new_map_addr = GetForwardingAddressInOldSpace(map);
1724 int offset = encoding.DecodeOffset();
1725 obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
1726
1727#ifdef DEBUG
1728 if (FLAG_gc_verbose) {
1729 PrintF("update %p : %p -> %p\n", obj->address(),
1730 map_addr, new_map_addr);
1731 }
1732#endif
1733
1734 // Update pointers in the object body.
1735 UpdatingVisitor updating_visitor;
1736 obj->IterateBody(type, obj_size, &updating_visitor);
1737 return obj_size;
1738}
1739
1740
1741Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
1742 // Object should either in old or map space.
1743 MapWord encoding = obj->map_word();
1744
1745 // Offset to the first live object's forwarding address.
1746 int offset = encoding.DecodeOffset();
1747 Address obj_addr = obj->address();
1748
1749 // Find the first live object's forwarding address.
1750 Page* p = Page::FromAddress(obj_addr);
1751 Address first_forwarded = p->mc_first_forwarded;
1752
1753 // Page start address of forwarded address.
1754 Page* forwarded_page = Page::FromAddress(first_forwarded);
1755 int forwarded_offset = forwarded_page->Offset(first_forwarded);
1756
1757 // Find end of allocation of in the page of first_forwarded.
1758 Address mc_top = forwarded_page->mc_relocation_top;
1759 int mc_top_offset = forwarded_page->Offset(mc_top);
1760
1761 // Check if current object's forward pointer is in the same page
1762 // as the first live object's forwarding pointer
1763 if (forwarded_offset + offset < mc_top_offset) {
1764 // In the same page.
1765 return first_forwarded + offset;
1766 }
1767
1768 // Must be in the next page, NOTE: this may cross chunks.
1769 Page* next_page = forwarded_page->next_page();
1770 ASSERT(next_page->is_valid());
1771
1772 offset -= (mc_top_offset - forwarded_offset);
1773 offset += Page::kObjectStartOffset;
1774
1775 ASSERT_PAGE_OFFSET(offset);
1776 ASSERT(next_page->OffsetToAddress(offset) < next_page->mc_relocation_top);
1777
1778 return next_page->OffsetToAddress(offset);
1779}
1780
1781
1782// -------------------------------------------------------------------------
1783// Phase 4: Relocate objects
1784
1785void MarkCompactCollector::RelocateObjects() {
1786#ifdef DEBUG
1787 ASSERT(state_ == UPDATE_POINTERS);
1788 state_ = RELOCATE_OBJECTS;
1789#endif
1790 // Relocates objects, always relocate map objects first. Relocating
1791 // objects in other space relies on map objects to get object size.
1792 int live_maps = IterateLiveObjects(Heap::map_space(), &RelocateMapObject);
1793 int live_pointer_olds = IterateLiveObjects(Heap::old_pointer_space(),
1794 &RelocateOldPointerObject);
1795 int live_data_olds = IterateLiveObjects(Heap::old_data_space(),
1796 &RelocateOldDataObject);
1797 int live_codes = IterateLiveObjects(Heap::code_space(), &RelocateCodeObject);
1798 int live_cells = IterateLiveObjects(Heap::cell_space(), &RelocateCellObject);
1799 int live_news = IterateLiveObjects(Heap::new_space(), &RelocateNewObject);
1800
1801 USE(live_maps);
1802 USE(live_data_olds);
1803 USE(live_pointer_olds);
1804 USE(live_codes);
1805 USE(live_cells);
1806 USE(live_news);
1807 ASSERT(live_maps == live_map_objects_);
1808 ASSERT(live_data_olds == live_old_data_objects_);
1809 ASSERT(live_pointer_olds == live_old_pointer_objects_);
1810 ASSERT(live_codes == live_code_objects_);
1811 ASSERT(live_cells == live_cell_objects_);
1812 ASSERT(live_news == live_young_objects_);
1813
1814 // Flip from and to spaces
1815 Heap::new_space()->Flip();
1816
1817 // Set age_mark to bottom in to space
1818 Address mark = Heap::new_space()->bottom();
1819 Heap::new_space()->set_age_mark(mark);
1820
1821 Heap::new_space()->MCCommitRelocationInfo();
1822#ifdef DEBUG
1823 // It is safe to write to the remembered sets as remembered sets on a
1824 // page-by-page basis after committing the m-c forwarding pointer.
1825 Page::set_rset_state(Page::IN_USE);
1826#endif
1827 PagedSpaces spaces;
1828 while (PagedSpace* space = spaces.next()) space->MCCommitRelocationInfo();
1829}
1830
1831
1832int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
1833 // Recover map pointer.
1834 MapWord encoding = obj->map_word();
1835 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
1836 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
1837
1838 // Get forwarding address before resetting map pointer
1839 Address new_addr = GetForwardingAddressInOldSpace(obj);
1840
1841 // Reset map pointer. The meta map object may not be copied yet so
1842 // Map::cast does not yet work.
1843 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
1844
1845 Address old_addr = obj->address();
1846
1847 if (new_addr != old_addr) {
1848 memmove(new_addr, old_addr, Map::kSize); // copy contents
1849 }
1850
1851#ifdef DEBUG
1852 if (FLAG_gc_verbose) {
1853 PrintF("relocate %p -> %p\n", old_addr, new_addr);
1854 }
1855#endif
1856
1857 return Map::kSize;
1858}
1859
1860
1861static inline int RestoreMap(HeapObject* obj,
1862 PagedSpace* space,
1863 Address new_addr,
1864 Address map_addr) {
1865 // This must be a non-map object, and the function relies on the
1866 // assumption that the Map space is compacted before the other paged
1867 // spaces (see RelocateObjects).
1868
1869 // Reset map pointer.
1870 obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
1871
1872 int obj_size = obj->Size();
1873 ASSERT_OBJECT_SIZE(obj_size);
1874
1875 ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
1876 space->MCSpaceOffsetForAddress(obj->address()));
1877
1878#ifdef DEBUG
1879 if (FLAG_gc_verbose) {
1880 PrintF("relocate %p -> %p\n", obj->address(), new_addr);
1881 }
1882#endif
1883
1884 return obj_size;
1885}
1886
1887
1888int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
1889 PagedSpace* space) {
1890 // Recover map pointer.
1891 MapWord encoding = obj->map_word();
1892 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
1893 ASSERT(Heap::map_space()->Contains(map_addr));
1894
1895 // Get forwarding address before resetting map pointer.
1896 Address new_addr = GetForwardingAddressInOldSpace(obj);
1897
1898 // Reset the map pointer.
1899 int obj_size = RestoreMap(obj, space, new_addr, map_addr);
1900
1901 Address old_addr = obj->address();
1902
1903 if (new_addr != old_addr) {
1904 memmove(new_addr, old_addr, obj_size); // Copy contents
1905 }
1906
1907 ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
1908
1909 return obj_size;
1910}
1911
1912
1913int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
1914 return RelocateOldNonCodeObject(obj, Heap::old_pointer_space());
1915}
1916
1917
1918int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
1919 return RelocateOldNonCodeObject(obj, Heap::old_data_space());
1920}
1921
1922
1923int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
1924 return RelocateOldNonCodeObject(obj, Heap::cell_space());
1925}
1926
1927
1928int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
1929 // Recover map pointer.
1930 MapWord encoding = obj->map_word();
1931 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
1932 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
1933
1934 // Get forwarding address before resetting map pointer
1935 Address new_addr = GetForwardingAddressInOldSpace(obj);
1936
1937 // Reset the map pointer.
1938 int obj_size = RestoreMap(obj, Heap::code_space(), new_addr, map_addr);
1939
1940 Address old_addr = obj->address();
1941
1942 if (new_addr != old_addr) {
1943 memmove(new_addr, old_addr, obj_size); // Copy contents.
1944 }
1945
1946 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
1947 if (copied_to->IsCode()) {
1948 // May also update inline cache target.
1949 Code::cast(copied_to)->Relocate(new_addr - old_addr);
1950 // Notify the logger that compiled code has moved.
1951 LOG(CodeMoveEvent(old_addr, new_addr));
1952 }
1953
1954 return obj_size;
1955}
1956
1957
1958int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
1959 int obj_size = obj->Size();
1960
1961 // Get forwarding address
1962 Address old_addr = obj->address();
1963 int offset = Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
1964
1965 Address new_addr =
1966 Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset);
1967
1968#ifdef DEBUG
1969 if (Heap::new_space()->FromSpaceContains(new_addr)) {
1970 ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
1971 Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
1972 } else {
1973 ASSERT(Heap::TargetSpace(obj) == Heap::old_pointer_space() ||
1974 Heap::TargetSpace(obj) == Heap::old_data_space());
1975 }
1976#endif
1977
1978 // New and old addresses cannot overlap.
1979 memcpy(reinterpret_cast<void*>(new_addr),
1980 reinterpret_cast<void*>(old_addr),
1981 obj_size);
1982
1983#ifdef DEBUG
1984 if (FLAG_gc_verbose) {
1985 PrintF("relocate %p -> %p\n", old_addr, new_addr);
1986 }
1987#endif
1988
1989 return obj_size;
1990}
1991
1992
1993// -------------------------------------------------------------------------
1994// Phase 5: rebuild remembered sets
1995
1996void MarkCompactCollector::RebuildRSets() {
1997#ifdef DEBUG
1998 ASSERT(state_ == RELOCATE_OBJECTS);
1999 state_ = REBUILD_RSETS;
2000#endif
2001 Heap::RebuildRSets();
2002}
2003
2004} } // namespace v8::internal