blob: d9b0222a4084b810fe6254b43a358882a2b11ec8 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "execution.h"
Ben Murdoch3bec4d22010-07-22 14:51:16 +010031#include "heap-profiler.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000032#include "global-handles.h"
33#include "ic-inl.h"
34#include "mark-compact.h"
35#include "stub-cache.h"
36
37namespace v8 {
38namespace internal {
39
40// -------------------------------------------------------------------------
41// MarkCompactCollector
42
43bool MarkCompactCollector::force_compaction_ = false;
44bool MarkCompactCollector::compacting_collection_ = false;
45bool MarkCompactCollector::compact_on_next_gc_ = false;
46
47int MarkCompactCollector::previous_marked_count_ = 0;
48GCTracer* MarkCompactCollector::tracer_ = NULL;
49
50
51#ifdef DEBUG
52MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE;
53
54// Counters used for debugging the marking phase of mark-compact or mark-sweep
55// collection.
56int MarkCompactCollector::live_bytes_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +010057int MarkCompactCollector::live_young_objects_size_ = 0;
58int MarkCompactCollector::live_old_data_objects_size_ = 0;
59int MarkCompactCollector::live_old_pointer_objects_size_ = 0;
60int MarkCompactCollector::live_code_objects_size_ = 0;
61int MarkCompactCollector::live_map_objects_size_ = 0;
62int MarkCompactCollector::live_cell_objects_size_ = 0;
63int MarkCompactCollector::live_lo_objects_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +000064#endif
65
66void MarkCompactCollector::CollectGarbage() {
67 // Make sure that Prepare() has been called. The individual steps below will
68 // update the state as they proceed.
69 ASSERT(state_ == PREPARE_GC);
70
71 // Prepare has selected whether to compact the old generation or not.
72 // Tell the tracer.
73 if (IsCompacting()) tracer_->set_is_compacting();
74
75 MarkLiveObjects();
76
77 if (FLAG_collect_maps) ClearNonLiveTransitions();
78
79 SweepLargeObjectSpace();
80
81 if (IsCompacting()) {
Leon Clarkef7060e22010-06-03 12:02:55 +010082 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
Steve Blocka7e24c12009-10-30 11:49:00 +000083 EncodeForwardingAddresses();
84
85 UpdatePointers();
86
87 RelocateObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +000088 } else {
89 SweepSpaces();
90 }
91
92 Finish();
93
94 // Save the count of marked objects remaining after the collection and
95 // null out the GC tracer.
96 previous_marked_count_ = tracer_->marked_count();
97 ASSERT(previous_marked_count_ == 0);
98 tracer_ = NULL;
99}
100
101
102void MarkCompactCollector::Prepare(GCTracer* tracer) {
103 // Rather than passing the tracer around we stash it in a static member
104 // variable.
105 tracer_ = tracer;
106
107#ifdef DEBUG
108 ASSERT(state_ == IDLE);
109 state_ = PREPARE_GC;
110#endif
111 ASSERT(!FLAG_always_compact || !FLAG_never_compact);
112
113 compacting_collection_ =
114 FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
115 compact_on_next_gc_ = false;
116
117 if (FLAG_never_compact) compacting_collection_ = false;
Leon Clarkee46be812010-01-19 14:06:41 +0000118 if (!Heap::map_space()->MapPointersEncodable())
119 compacting_collection_ = false;
Steve Blocka7e24c12009-10-30 11:49:00 +0000120 if (FLAG_collect_maps) CreateBackPointers();
121
Steve Blocka7e24c12009-10-30 11:49:00 +0000122 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000123 for (PagedSpace* space = spaces.next();
124 space != NULL; space = spaces.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000125 space->PrepareForMarkCompact(compacting_collection_);
126 }
127
128#ifdef DEBUG
129 live_bytes_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100130 live_young_objects_size_ = 0;
131 live_old_pointer_objects_size_ = 0;
132 live_old_data_objects_size_ = 0;
133 live_code_objects_size_ = 0;
134 live_map_objects_size_ = 0;
135 live_cell_objects_size_ = 0;
136 live_lo_objects_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000137#endif
138}
139
140
141void MarkCompactCollector::Finish() {
142#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100143 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
Steve Blocka7e24c12009-10-30 11:49:00 +0000144 state_ = IDLE;
145#endif
146 // The stub cache is not traversed during GC; clear the cache to
147 // force lazy re-initialization of it. This must be done after the
148 // GC, because it relies on the new address of certain old space
149 // objects (empty string, illegal builtin).
150 StubCache::Clear();
151
Leon Clarkee46be812010-01-19 14:06:41 +0000152 ExternalStringTable::CleanUp();
153
Steve Blocka7e24c12009-10-30 11:49:00 +0000154 // If we've just compacted old space there's no reason to check the
155 // fragmentation limit. Just return.
156 if (HasCompacted()) return;
157
158 // We compact the old generation on the next GC if it has gotten too
159 // fragmented (ie, we could recover an expected amount of space by
160 // reclaiming the waste and free list blocks).
161 static const int kFragmentationLimit = 15; // Percent.
162 static const int kFragmentationAllowed = 1 * MB; // Absolute.
163 int old_gen_recoverable = 0;
164 int old_gen_used = 0;
165
166 OldSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000167 for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000168 old_gen_recoverable += space->Waste() + space->AvailableFree();
169 old_gen_used += space->Size();
170 }
171
172 int old_gen_fragmentation =
173 static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
174 if (old_gen_fragmentation > kFragmentationLimit &&
175 old_gen_recoverable > kFragmentationAllowed) {
176 compact_on_next_gc_ = true;
177 }
178}
179
180
181// -------------------------------------------------------------------------
182// Phase 1: tracing and marking live objects.
183// before: all objects are in normal state.
184// after: a live object's map pointer is marked as '00'.
185
186// Marking all live objects in the heap as part of mark-sweep or mark-compact
187// collection. Before marking, all objects are in their normal state. After
188// marking, live objects' map pointers are marked indicating that the object
189// has been found reachable.
190//
191// The marking algorithm is a (mostly) depth-first (because of possible stack
192// overflow) traversal of the graph of objects reachable from the roots. It
193// uses an explicit stack of pointers rather than recursion. The young
194// generation's inactive ('from') space is used as a marking stack. The
195// objects in the marking stack are the ones that have been reached and marked
196// but their children have not yet been visited.
197//
198// The marking stack can overflow during traversal. In that case, we set an
199// overflow flag. When the overflow flag is set, we continue marking objects
200// reachable from the objects on the marking stack, but no longer push them on
201// the marking stack. Instead, we mark them as both marked and overflowed.
202// When the stack is in the overflowed state, objects marked as overflowed
203// have been reached and marked but their children have not been visited yet.
204// After emptying the marking stack, we clear the overflow flag and traverse
205// the heap looking for objects marked as overflowed, push them on the stack,
206// and continue with marking. This process repeats until all reachable
207// objects have been marked.
208
209static MarkingStack marking_stack;
210
211
212static inline HeapObject* ShortCircuitConsString(Object** p) {
213 // Optimization: If the heap object pointed to by p is a non-symbol
214 // cons string whose right substring is Heap::empty_string, update
215 // it in place to its left substring. Return the updated value.
216 //
217 // Here we assume that if we change *p, we replace it with a heap object
218 // (ie, the left substring of a cons string is always a heap object).
219 //
220 // The check performed is:
221 // object->IsConsString() && !object->IsSymbol() &&
222 // (ConsString::cast(object)->second() == Heap::empty_string())
223 // except the maps for the object and its possible substrings might be
224 // marked.
225 HeapObject* object = HeapObject::cast(*p);
226 MapWord map_word = object->map_word();
227 map_word.ClearMark();
228 InstanceType type = map_word.ToMap()->instance_type();
229 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
230
231 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
232 if (second != Heap::raw_unchecked_empty_string()) {
233 return object;
234 }
235
236 // Since we don't have the object's start, it is impossible to update the
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100237 // page dirty marks. Therefore, we only replace the string with its left
238 // substring when page dirty marks do not change.
Steve Blocka7e24c12009-10-30 11:49:00 +0000239 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
240 if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object;
241
242 *p = first;
243 return HeapObject::cast(first);
244}
245
246
247// Helper class for marking pointers in HeapObjects.
248class MarkingVisitor : public ObjectVisitor {
249 public:
250 void VisitPointer(Object** p) {
251 MarkObjectByPointer(p);
252 }
253
254 void VisitPointers(Object** start, Object** end) {
255 // Mark all objects pointed to in [start, end).
256 const int kMinRangeForMarkingRecursion = 64;
257 if (end - start >= kMinRangeForMarkingRecursion) {
258 if (VisitUnmarkedObjects(start, end)) return;
259 // We are close to a stack overflow, so just mark the objects.
260 }
261 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
262 }
263
264 void VisitCodeTarget(RelocInfo* rinfo) {
265 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
266 Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
267 if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
268 IC::Clear(rinfo->pc());
269 // Please note targets for cleared inline cached do not have to be
270 // marked since they are contained in Heap::non_monomorphic_cache().
271 } else {
272 MarkCompactCollector::MarkObject(code);
273 }
274 }
275
276 void VisitDebugTarget(RelocInfo* rinfo) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100277 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
278 rinfo->IsPatchedReturnSequence()) ||
279 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
280 rinfo->IsPatchedDebugBreakSlotSequence()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000281 HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
282 MarkCompactCollector::MarkObject(code);
Steve Blocka7e24c12009-10-30 11:49:00 +0000283 }
284
285 private:
286 // Mark object pointed to by p.
287 void MarkObjectByPointer(Object** p) {
288 if (!(*p)->IsHeapObject()) return;
289 HeapObject* object = ShortCircuitConsString(p);
290 MarkCompactCollector::MarkObject(object);
291 }
292
293 // Tells whether the mark sweep collection will perform compaction.
294 bool IsCompacting() { return MarkCompactCollector::IsCompacting(); }
295
296 // Visit an unmarked object.
297 void VisitUnmarkedObject(HeapObject* obj) {
298#ifdef DEBUG
299 ASSERT(Heap::Contains(obj));
300 ASSERT(!obj->IsMarked());
301#endif
302 Map* map = obj->map();
303 MarkCompactCollector::SetMark(obj);
304 // Mark the map pointer and the body.
305 MarkCompactCollector::MarkObject(map);
306 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), this);
307 }
308
309 // Visit all unmarked objects pointed to by [start, end).
310 // Returns false if the operation fails (lack of stack space).
311 inline bool VisitUnmarkedObjects(Object** start, Object** end) {
312 // Return false is we are close to the stack limit.
313 StackLimitCheck check;
314 if (check.HasOverflowed()) return false;
315
316 // Visit the unmarked objects.
317 for (Object** p = start; p < end; p++) {
318 if (!(*p)->IsHeapObject()) continue;
319 HeapObject* obj = HeapObject::cast(*p);
320 if (obj->IsMarked()) continue;
321 VisitUnmarkedObject(obj);
322 }
323 return true;
324 }
325};
326
327
328// Visitor class for marking heap roots.
329class RootMarkingVisitor : public ObjectVisitor {
330 public:
331 void VisitPointer(Object** p) {
332 MarkObjectByPointer(p);
333 }
334
335 void VisitPointers(Object** start, Object** end) {
336 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
337 }
338
339 MarkingVisitor* stack_visitor() { return &stack_visitor_; }
340
341 private:
342 MarkingVisitor stack_visitor_;
343
344 void MarkObjectByPointer(Object** p) {
345 if (!(*p)->IsHeapObject()) return;
346
347 // Replace flat cons strings in place.
348 HeapObject* object = ShortCircuitConsString(p);
349 if (object->IsMarked()) return;
350
351 Map* map = object->map();
352 // Mark the object.
353 MarkCompactCollector::SetMark(object);
354 // Mark the map pointer and body, and push them on the marking stack.
355 MarkCompactCollector::MarkObject(map);
356 object->IterateBody(map->instance_type(), object->SizeFromMap(map),
357 &stack_visitor_);
358
359 // Mark all the objects reachable from the map and body. May leave
360 // overflowed objects in the heap.
361 MarkCompactCollector::EmptyMarkingStack(&stack_visitor_);
362 }
363};
364
365
366// Helper class for pruning the symbol table.
367class SymbolTableCleaner : public ObjectVisitor {
368 public:
369 SymbolTableCleaner() : pointers_removed_(0) { }
Leon Clarkee46be812010-01-19 14:06:41 +0000370
371 virtual void VisitPointers(Object** start, Object** end) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000372 // Visit all HeapObject pointers in [start, end).
373 for (Object** p = start; p < end; p++) {
374 if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
375 // Check if the symbol being pruned is an external symbol. We need to
376 // delete the associated external data as this symbol is going away.
377
Steve Blocka7e24c12009-10-30 11:49:00 +0000378 // Since no objects have yet been moved we can safely access the map of
379 // the object.
Leon Clarkee46be812010-01-19 14:06:41 +0000380 if ((*p)->IsExternalString()) {
381 Heap::FinalizeExternalString(String::cast(*p));
Steve Blocka7e24c12009-10-30 11:49:00 +0000382 }
383 // Set the entry to null_value (as deleted).
384 *p = Heap::raw_unchecked_null_value();
385 pointers_removed_++;
386 }
387 }
388 }
389
390 int PointersRemoved() {
391 return pointers_removed_;
392 }
393 private:
394 int pointers_removed_;
395};
396
397
398void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
399 ASSERT(!object->IsMarked());
400 ASSERT(Heap::Contains(object));
401 if (object->IsMap()) {
402 Map* map = Map::cast(object);
403 if (FLAG_cleanup_caches_in_maps_at_gc) {
404 map->ClearCodeCache();
405 }
406 SetMark(map);
407 if (FLAG_collect_maps &&
408 map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
409 map->instance_type() <= JS_FUNCTION_TYPE) {
410 MarkMapContents(map);
411 } else {
412 marking_stack.Push(map);
413 }
414 } else {
415 SetMark(object);
416 marking_stack.Push(object);
417 }
418}
419
420
421void MarkCompactCollector::MarkMapContents(Map* map) {
422 MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(
423 *HeapObject::RawField(map, Map::kInstanceDescriptorsOffset)));
424
425 // Mark the Object* fields of the Map.
426 // Since the descriptor array has been marked already, it is fine
427 // that one of these fields contains a pointer to it.
428 MarkingVisitor visitor; // Has no state or contents.
Ben Murdoch3bec4d22010-07-22 14:51:16 +0100429 visitor.VisitPointers(HeapObject::RawField(map,
430 Map::kPointerFieldsBeginOffset),
431 HeapObject::RawField(map,
432 Map::kPointerFieldsEndOffset));
Steve Blocka7e24c12009-10-30 11:49:00 +0000433}
434
435
436void MarkCompactCollector::MarkDescriptorArray(
437 DescriptorArray* descriptors) {
438 if (descriptors->IsMarked()) return;
439 // Empty descriptor array is marked as a root before any maps are marked.
440 ASSERT(descriptors != Heap::raw_unchecked_empty_descriptor_array());
441 SetMark(descriptors);
442
443 FixedArray* contents = reinterpret_cast<FixedArray*>(
444 descriptors->get(DescriptorArray::kContentArrayIndex));
445 ASSERT(contents->IsHeapObject());
446 ASSERT(!contents->IsMarked());
447 ASSERT(contents->IsFixedArray());
448 ASSERT(contents->length() >= 2);
449 SetMark(contents);
450 // Contents contains (value, details) pairs. If the details say
451 // that the type of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
452 // or NULL_DESCRIPTOR, we don't mark the value as live. Only for
453 // type MAP_TRANSITION is the value a Object* (a Map*).
454 for (int i = 0; i < contents->length(); i += 2) {
455 // If the pair (value, details) at index i, i+1 is not
456 // a transition or null descriptor, mark the value.
457 PropertyDetails details(Smi::cast(contents->get(i + 1)));
458 if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
459 HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
460 if (object->IsHeapObject() && !object->IsMarked()) {
461 SetMark(object);
462 marking_stack.Push(object);
463 }
464 }
465 }
466 // The DescriptorArray descriptors contains a pointer to its contents array,
467 // but the contents array is already marked.
468 marking_stack.Push(descriptors);
469}
470
471
472void MarkCompactCollector::CreateBackPointers() {
473 HeapObjectIterator iterator(Heap::map_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000474 for (HeapObject* next_object = iterator.next();
475 next_object != NULL; next_object = iterator.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000476 if (next_object->IsMap()) { // Could also be ByteArray on free list.
477 Map* map = Map::cast(next_object);
478 if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
479 map->instance_type() <= JS_FUNCTION_TYPE) {
480 map->CreateBackPointers();
481 } else {
482 ASSERT(map->instance_descriptors() == Heap::empty_descriptor_array());
483 }
484 }
485 }
486}
487
488
489static int OverflowObjectSize(HeapObject* obj) {
490 // Recover the normal map pointer, it might be marked as live and
491 // overflowed.
492 MapWord map_word = obj->map_word();
493 map_word.ClearMark();
494 map_word.ClearOverflow();
495 return obj->SizeFromMap(map_word.ToMap());
496}
497
498
499// Fill the marking stack with overflowed objects returned by the given
500// iterator. Stop when the marking stack is filled or the end of the space
501// is reached, whichever comes first.
502template<class T>
503static void ScanOverflowedObjects(T* it) {
504 // The caller should ensure that the marking stack is initially not full,
505 // so that we don't waste effort pointlessly scanning for objects.
506 ASSERT(!marking_stack.is_full());
507
Leon Clarked91b9f72010-01-27 17:25:45 +0000508 for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000509 if (object->IsOverflowed()) {
510 object->ClearOverflow();
511 ASSERT(object->IsMarked());
512 ASSERT(Heap::Contains(object));
513 marking_stack.Push(object);
514 if (marking_stack.is_full()) return;
515 }
516 }
517}
518
519
520bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
521 return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked();
522}
523
524
Steve Blocka7e24c12009-10-30 11:49:00 +0000525void MarkCompactCollector::MarkSymbolTable() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000526 SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
527 // Mark the symbol table itself.
528 SetMark(symbol_table);
529 // Explicitly mark the prefix.
530 MarkingVisitor marker;
531 symbol_table->IteratePrefix(&marker);
532 ProcessMarkingStack(&marker);
Steve Blocka7e24c12009-10-30 11:49:00 +0000533}
534
535
536void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
537 // Mark the heap roots including global variables, stack variables,
538 // etc., and all objects reachable from them.
Steve Blockd0582a62009-12-15 09:54:21 +0000539 Heap::IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +0000540
541 // Handle the symbol table specially.
542 MarkSymbolTable();
543
544 // There may be overflowed objects in the heap. Visit them now.
545 while (marking_stack.overflowed()) {
546 RefillMarkingStack();
547 EmptyMarkingStack(visitor->stack_visitor());
548 }
549}
550
551
552void MarkCompactCollector::MarkObjectGroups() {
553 List<ObjectGroup*>* object_groups = GlobalHandles::ObjectGroups();
554
555 for (int i = 0; i < object_groups->length(); i++) {
556 ObjectGroup* entry = object_groups->at(i);
557 if (entry == NULL) continue;
558
559 List<Object**>& objects = entry->objects_;
560 bool group_marked = false;
561 for (int j = 0; j < objects.length(); j++) {
562 Object* object = *objects[j];
563 if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
564 group_marked = true;
565 break;
566 }
567 }
568
569 if (!group_marked) continue;
570
571 // An object in the group is marked, so mark as gray all white heap
572 // objects in the group.
573 for (int j = 0; j < objects.length(); ++j) {
574 if ((*objects[j])->IsHeapObject()) {
575 MarkObject(HeapObject::cast(*objects[j]));
576 }
577 }
578 // Once the entire group has been colored gray, set the object group
579 // to NULL so it won't be processed again.
580 delete object_groups->at(i);
581 object_groups->at(i) = NULL;
582 }
583}
584
585
586// Mark all objects reachable from the objects on the marking stack.
587// Before: the marking stack contains zero or more heap object pointers.
588// After: the marking stack is empty, and all objects reachable from the
589// marking stack have been marked, or are overflowed in the heap.
590void MarkCompactCollector::EmptyMarkingStack(MarkingVisitor* visitor) {
591 while (!marking_stack.is_empty()) {
592 HeapObject* object = marking_stack.Pop();
593 ASSERT(object->IsHeapObject());
594 ASSERT(Heap::Contains(object));
595 ASSERT(object->IsMarked());
596 ASSERT(!object->IsOverflowed());
597
598 // Because the object is marked, we have to recover the original map
599 // pointer and use it to mark the object's body.
600 MapWord map_word = object->map_word();
601 map_word.ClearMark();
602 Map* map = map_word.ToMap();
603 MarkObject(map);
604 object->IterateBody(map->instance_type(), object->SizeFromMap(map),
605 visitor);
606 }
607}
608
609
610// Sweep the heap for overflowed objects, clear their overflow bits, and
611// push them on the marking stack. Stop early if the marking stack fills
612// before sweeping completes. If sweeping completes, there are no remaining
613// overflowed objects in the heap so the overflow flag on the markings stack
614// is cleared.
615void MarkCompactCollector::RefillMarkingStack() {
616 ASSERT(marking_stack.overflowed());
617
618 SemiSpaceIterator new_it(Heap::new_space(), &OverflowObjectSize);
619 ScanOverflowedObjects(&new_it);
620 if (marking_stack.is_full()) return;
621
622 HeapObjectIterator old_pointer_it(Heap::old_pointer_space(),
623 &OverflowObjectSize);
624 ScanOverflowedObjects(&old_pointer_it);
625 if (marking_stack.is_full()) return;
626
627 HeapObjectIterator old_data_it(Heap::old_data_space(), &OverflowObjectSize);
628 ScanOverflowedObjects(&old_data_it);
629 if (marking_stack.is_full()) return;
630
631 HeapObjectIterator code_it(Heap::code_space(), &OverflowObjectSize);
632 ScanOverflowedObjects(&code_it);
633 if (marking_stack.is_full()) return;
634
635 HeapObjectIterator map_it(Heap::map_space(), &OverflowObjectSize);
636 ScanOverflowedObjects(&map_it);
637 if (marking_stack.is_full()) return;
638
639 HeapObjectIterator cell_it(Heap::cell_space(), &OverflowObjectSize);
640 ScanOverflowedObjects(&cell_it);
641 if (marking_stack.is_full()) return;
642
643 LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize);
644 ScanOverflowedObjects(&lo_it);
645 if (marking_stack.is_full()) return;
646
647 marking_stack.clear_overflowed();
648}
649
650
651// Mark all objects reachable (transitively) from objects on the marking
652// stack. Before: the marking stack contains zero or more heap object
653// pointers. After: the marking stack is empty and there are no overflowed
654// objects in the heap.
655void MarkCompactCollector::ProcessMarkingStack(MarkingVisitor* visitor) {
656 EmptyMarkingStack(visitor);
657 while (marking_stack.overflowed()) {
658 RefillMarkingStack();
659 EmptyMarkingStack(visitor);
660 }
661}
662
663
664void MarkCompactCollector::ProcessObjectGroups(MarkingVisitor* visitor) {
665 bool work_to_do = true;
666 ASSERT(marking_stack.is_empty());
667 while (work_to_do) {
668 MarkObjectGroups();
669 work_to_do = !marking_stack.is_empty();
670 ProcessMarkingStack(visitor);
671 }
672}
673
674
675void MarkCompactCollector::MarkLiveObjects() {
Leon Clarkef7060e22010-06-03 12:02:55 +0100676 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
Steve Blocka7e24c12009-10-30 11:49:00 +0000677#ifdef DEBUG
678 ASSERT(state_ == PREPARE_GC);
679 state_ = MARK_LIVE_OBJECTS;
680#endif
681 // The to space contains live objects, the from space is used as a marking
682 // stack.
683 marking_stack.Initialize(Heap::new_space()->FromSpaceLow(),
684 Heap::new_space()->FromSpaceHigh());
685
686 ASSERT(!marking_stack.overflowed());
687
688 RootMarkingVisitor root_visitor;
689 MarkRoots(&root_visitor);
690
691 // The objects reachable from the roots are marked, yet unreachable
692 // objects are unmarked. Mark objects reachable from object groups
693 // containing at least one marked object, and continue until no new
694 // objects are reachable from the object groups.
695 ProcessObjectGroups(root_visitor.stack_visitor());
696
697 // The objects reachable from the roots or object groups are marked,
698 // yet unreachable objects are unmarked. Mark objects reachable
699 // only from weak global handles.
700 //
701 // First we identify nonlive weak handles and mark them as pending
702 // destruction.
703 GlobalHandles::IdentifyWeakHandles(&IsUnmarkedHeapObject);
704 // Then we mark the objects and process the transitive closure.
705 GlobalHandles::IterateWeakRoots(&root_visitor);
706 while (marking_stack.overflowed()) {
707 RefillMarkingStack();
708 EmptyMarkingStack(root_visitor.stack_visitor());
709 }
710
711 // Repeat the object groups to mark unmarked groups reachable from the
712 // weak roots.
713 ProcessObjectGroups(root_visitor.stack_visitor());
714
715 // Prune the symbol table removing all symbols only pointed to by the
716 // symbol table. Cannot use symbol_table() here because the symbol
717 // table is marked.
718 SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
719 SymbolTableCleaner v;
720 symbol_table->IterateElements(&v);
721 symbol_table->ElementsRemoved(v.PointersRemoved());
Leon Clarkee46be812010-01-19 14:06:41 +0000722 ExternalStringTable::Iterate(&v);
723 ExternalStringTable::CleanUp();
Steve Blocka7e24c12009-10-30 11:49:00 +0000724
725 // Remove object groups after marking phase.
726 GlobalHandles::RemoveObjectGroups();
727}
728
729
730static int CountMarkedCallback(HeapObject* obj) {
731 MapWord map_word = obj->map_word();
732 map_word.ClearMark();
733 return obj->SizeFromMap(map_word.ToMap());
734}
735
736
737#ifdef DEBUG
738void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
739 live_bytes_ += obj->Size();
740 if (Heap::new_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +0100741 live_young_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +0000742 } else if (Heap::map_space()->Contains(obj)) {
743 ASSERT(obj->IsMap());
Steve Block6ded16b2010-05-10 14:33:55 +0100744 live_map_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +0000745 } else if (Heap::cell_space()->Contains(obj)) {
746 ASSERT(obj->IsJSGlobalPropertyCell());
Steve Block6ded16b2010-05-10 14:33:55 +0100747 live_cell_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +0000748 } else if (Heap::old_pointer_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +0100749 live_old_pointer_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +0000750 } else if (Heap::old_data_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +0100751 live_old_data_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +0000752 } else if (Heap::code_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +0100753 live_code_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +0000754 } else if (Heap::lo_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +0100755 live_lo_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +0000756 } else {
757 UNREACHABLE();
758 }
759}
760#endif // DEBUG
761
762
763void MarkCompactCollector::SweepLargeObjectSpace() {
764#ifdef DEBUG
765 ASSERT(state_ == MARK_LIVE_OBJECTS);
766 state_ =
767 compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
768#endif
769 // Deallocate unmarked objects and clear marked bits for marked objects.
770 Heap::lo_space()->FreeUnmarkedObjects();
771}
772
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100773
Steve Blocka7e24c12009-10-30 11:49:00 +0000774// Safe to use during marking phase only.
775bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
776 MapWord metamap = object->map_word();
777 metamap.ClearMark();
778 return metamap.ToMap()->instance_type() == MAP_TYPE;
779}
780
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100781
Steve Blocka7e24c12009-10-30 11:49:00 +0000782void MarkCompactCollector::ClearNonLiveTransitions() {
783 HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback);
784 // Iterate over the map space, setting map transitions that go from
785 // a marked map to an unmarked map to null transitions. At the same time,
786 // set all the prototype fields of maps back to their original value,
787 // dropping the back pointers temporarily stored in the prototype field.
788 // Setting the prototype field requires following the linked list of
789 // back pointers, reversing them all at once. This allows us to find
790 // those maps with map transitions that need to be nulled, and only
791 // scan the descriptor arrays of those maps, not all maps.
Leon Clarkee46be812010-01-19 14:06:41 +0000792 // All of these actions are carried out only on maps of JSObjects
Steve Blocka7e24c12009-10-30 11:49:00 +0000793 // and related subtypes.
Leon Clarked91b9f72010-01-27 17:25:45 +0000794 for (HeapObject* obj = map_iterator.next();
795 obj != NULL; obj = map_iterator.next()) {
796 Map* map = reinterpret_cast<Map*>(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +0000797 if (!map->IsMarked() && map->IsByteArray()) continue;
798
799 ASSERT(SafeIsMap(map));
800 // Only JSObject and subtypes have map transitions and back pointers.
801 if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
802 if (map->instance_type() > JS_FUNCTION_TYPE) continue;
803 // Follow the chain of back pointers to find the prototype.
804 Map* current = map;
805 while (SafeIsMap(current)) {
806 current = reinterpret_cast<Map*>(current->prototype());
807 ASSERT(current->IsHeapObject());
808 }
809 Object* real_prototype = current;
810
811 // Follow back pointers, setting them to prototype,
812 // clearing map transitions when necessary.
813 current = map;
814 bool on_dead_path = !current->IsMarked();
815 Object* next;
816 while (SafeIsMap(current)) {
817 next = current->prototype();
818 // There should never be a dead map above a live map.
819 ASSERT(on_dead_path || current->IsMarked());
820
821 // A live map above a dead map indicates a dead transition.
822 // This test will always be false on the first iteration.
823 if (on_dead_path && current->IsMarked()) {
824 on_dead_path = false;
825 current->ClearNonLiveTransitions(real_prototype);
826 }
827 *HeapObject::RawField(current, Map::kPrototypeOffset) =
828 real_prototype;
829 current = reinterpret_cast<Map*>(next);
830 }
831 }
832}
833
834// -------------------------------------------------------------------------
835// Phase 2: Encode forwarding addresses.
836// When compacting, forwarding addresses for objects in old space and map
837// space are encoded in their map pointer word (along with an encoding of
838// their map pointers).
839//
Leon Clarkee46be812010-01-19 14:06:41 +0000840// The excact encoding is described in the comments for class MapWord in
841// objects.h.
Steve Blocka7e24c12009-10-30 11:49:00 +0000842//
843// An address range [start, end) can have both live and non-live objects.
844// Maximal non-live regions are marked so they can be skipped on subsequent
845// sweeps of the heap. A distinguished map-pointer encoding is used to mark
846// free regions of one-word size (in which case the next word is the start
847// of a live object). A second distinguished map-pointer encoding is used
848// to mark free regions larger than one word, and the size of the free
849// region (including the first word) is written to the second word of the
850// region.
851//
852// Any valid map page offset must lie in the object area of the page, so map
853// page offsets less than Page::kObjectStartOffset are invalid. We use a
854// pair of distinguished invalid map encodings (for single word and multiple
855// words) to indicate free regions in the page found during computation of
856// forwarding addresses and skipped over in subsequent sweeps.
857static const uint32_t kSingleFreeEncoding = 0;
858static const uint32_t kMultiFreeEncoding = 1;
859
860
861// Encode a free region, defined by the given start address and size, in the
862// first word or two of the region.
863void EncodeFreeRegion(Address free_start, int free_size) {
864 ASSERT(free_size >= kIntSize);
865 if (free_size == kIntSize) {
866 Memory::uint32_at(free_start) = kSingleFreeEncoding;
867 } else {
868 ASSERT(free_size >= 2 * kIntSize);
869 Memory::uint32_at(free_start) = kMultiFreeEncoding;
870 Memory::int_at(free_start + kIntSize) = free_size;
871 }
872
873#ifdef DEBUG
874 // Zap the body of the free region.
875 if (FLAG_enable_slow_asserts) {
876 for (int offset = 2 * kIntSize;
877 offset < free_size;
878 offset += kPointerSize) {
879 Memory::Address_at(free_start + offset) = kZapValue;
880 }
881 }
882#endif
883}
884
885
886// Try to promote all objects in new space. Heap numbers and sequential
887// strings are promoted to the code space, large objects to large object space,
888// and all others to the old space.
889inline Object* MCAllocateFromNewSpace(HeapObject* object, int object_size) {
890 Object* forwarded;
891 if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
892 forwarded = Failure::Exception();
893 } else {
894 OldSpace* target_space = Heap::TargetSpace(object);
895 ASSERT(target_space == Heap::old_pointer_space() ||
896 target_space == Heap::old_data_space());
897 forwarded = target_space->MCAllocateRaw(object_size);
898 }
899 if (forwarded->IsFailure()) {
900 forwarded = Heap::new_space()->MCAllocateRaw(object_size);
901 }
902 return forwarded;
903}
904
905
906// Allocation functions for the paged spaces call the space's MCAllocateRaw.
907inline Object* MCAllocateFromOldPointerSpace(HeapObject* ignore,
908 int object_size) {
909 return Heap::old_pointer_space()->MCAllocateRaw(object_size);
910}
911
912
913inline Object* MCAllocateFromOldDataSpace(HeapObject* ignore, int object_size) {
914 return Heap::old_data_space()->MCAllocateRaw(object_size);
915}
916
917
918inline Object* MCAllocateFromCodeSpace(HeapObject* ignore, int object_size) {
919 return Heap::code_space()->MCAllocateRaw(object_size);
920}
921
922
923inline Object* MCAllocateFromMapSpace(HeapObject* ignore, int object_size) {
924 return Heap::map_space()->MCAllocateRaw(object_size);
925}
926
927
928inline Object* MCAllocateFromCellSpace(HeapObject* ignore, int object_size) {
929 return Heap::cell_space()->MCAllocateRaw(object_size);
930}
931
932
933// The forwarding address is encoded at the same offset as the current
934// to-space object, but in from space.
935inline void EncodeForwardingAddressInNewSpace(HeapObject* old_object,
936 int object_size,
937 Object* new_object,
938 int* ignored) {
939 int offset =
940 Heap::new_space()->ToSpaceOffsetForAddress(old_object->address());
941 Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset) =
942 HeapObject::cast(new_object)->address();
943}
944
945
946// The forwarding address is encoded in the map pointer of the object as an
947// offset (in terms of live bytes) from the address of the first live object
948// in the page.
949inline void EncodeForwardingAddressInPagedSpace(HeapObject* old_object,
950 int object_size,
951 Object* new_object,
952 int* offset) {
953 // Record the forwarding address of the first live object if necessary.
954 if (*offset == 0) {
955 Page::FromAddress(old_object->address())->mc_first_forwarded =
956 HeapObject::cast(new_object)->address();
957 }
958
959 MapWord encoding =
960 MapWord::EncodeAddress(old_object->map()->address(), *offset);
961 old_object->set_map_word(encoding);
962 *offset += object_size;
963 ASSERT(*offset <= Page::kObjectAreaSize);
964}
965
966
967// Most non-live objects are ignored.
968inline void IgnoreNonLiveObject(HeapObject* object) {}
969
970
Steve Blocka7e24c12009-10-30 11:49:00 +0000971// Function template that, given a range of addresses (eg, a semispace or a
972// paged space page), iterates through the objects in the range to clear
973// mark bits and compute and encode forwarding addresses. As a side effect,
974// maximal free chunks are marked so that they can be skipped on subsequent
975// sweeps.
976//
977// The template parameters are an allocation function, a forwarding address
978// encoding function, and a function to process non-live objects.
979template<MarkCompactCollector::AllocationFunction Alloc,
980 MarkCompactCollector::EncodingFunction Encode,
981 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
982inline void EncodeForwardingAddressesInRange(Address start,
983 Address end,
984 int* offset) {
985 // The start address of the current free region while sweeping the space.
986 // This address is set when a transition from live to non-live objects is
987 // encountered. A value (an encoding of the 'next free region' pointer)
988 // is written to memory at this address when a transition from non-live to
989 // live objects is encountered.
990 Address free_start = NULL;
991
992 // A flag giving the state of the previously swept object. Initially true
993 // to ensure that free_start is initialized to a proper address before
994 // trying to write to it.
995 bool is_prev_alive = true;
996
997 int object_size; // Will be set on each iteration of the loop.
998 for (Address current = start; current < end; current += object_size) {
999 HeapObject* object = HeapObject::FromAddress(current);
1000 if (object->IsMarked()) {
1001 object->ClearMark();
1002 MarkCompactCollector::tracer()->decrement_marked_count();
1003 object_size = object->Size();
1004
1005 Object* forwarded = Alloc(object, object_size);
1006 // Allocation cannot fail, because we are compacting the space.
1007 ASSERT(!forwarded->IsFailure());
1008 Encode(object, object_size, forwarded, offset);
1009
1010#ifdef DEBUG
1011 if (FLAG_gc_verbose) {
1012 PrintF("forward %p -> %p.\n", object->address(),
1013 HeapObject::cast(forwarded)->address());
1014 }
1015#endif
1016 if (!is_prev_alive) { // Transition from non-live to live.
Steve Blockd0582a62009-12-15 09:54:21 +00001017 EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
Steve Blocka7e24c12009-10-30 11:49:00 +00001018 is_prev_alive = true;
1019 }
1020 } else { // Non-live object.
1021 object_size = object->Size();
1022 ProcessNonLive(object);
1023 if (is_prev_alive) { // Transition from live to non-live.
1024 free_start = current;
1025 is_prev_alive = false;
1026 }
1027 }
1028 }
1029
1030 // If we ended on a free region, mark it.
Steve Blockd0582a62009-12-15 09:54:21 +00001031 if (!is_prev_alive) {
1032 EncodeFreeRegion(free_start, static_cast<int>(end - free_start));
1033 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001034}
1035
1036
1037// Functions to encode the forwarding pointers in each compactable space.
1038void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
1039 int ignored;
1040 EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
1041 EncodeForwardingAddressInNewSpace,
1042 IgnoreNonLiveObject>(
1043 Heap::new_space()->bottom(),
1044 Heap::new_space()->top(),
1045 &ignored);
1046}
1047
1048
1049template<MarkCompactCollector::AllocationFunction Alloc,
1050 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
1051void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
1052 PagedSpace* space) {
1053 PageIterator it(space, PageIterator::PAGES_IN_USE);
1054 while (it.has_next()) {
1055 Page* p = it.next();
Steve Block6ded16b2010-05-10 14:33:55 +01001056
Steve Blocka7e24c12009-10-30 11:49:00 +00001057 // The offset of each live object in the page from the first live object
1058 // in the page.
1059 int offset = 0;
1060 EncodeForwardingAddressesInRange<Alloc,
1061 EncodeForwardingAddressInPagedSpace,
1062 ProcessNonLive>(
1063 p->ObjectAreaStart(),
1064 p->AllocationTop(),
1065 &offset);
1066 }
1067}
1068
1069
Steve Block6ded16b2010-05-10 14:33:55 +01001070// We scavange new space simultaneously with sweeping. This is done in two
1071// passes.
1072// The first pass migrates all alive objects from one semispace to another or
1073// promotes them to old space. Forwading address is written directly into
1074// first word of object without any encoding. If object is dead we are writing
1075// NULL as a forwarding address.
1076// The second pass updates pointers to new space in all spaces. It is possible
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001077// to encounter pointers to dead objects during traversal of dirty regions we
1078// should clear them to avoid encountering them during next dirty regions
1079// iteration.
1080static void MigrateObject(Address dst,
1081 Address src,
1082 int size,
1083 bool to_old_space) {
1084 if (to_old_space) {
1085 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
1086 } else {
1087 Heap::CopyBlock(dst, src, size);
1088 }
Steve Block6ded16b2010-05-10 14:33:55 +01001089
1090 Memory::Address_at(src) = dst;
1091}
1092
1093
1094// Visitor for updating pointers from live objects in old spaces to new space.
1095// It does not expect to encounter pointers to dead objects.
1096class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
1097 public:
1098 void VisitPointer(Object** p) {
1099 UpdatePointer(p);
1100 }
1101
1102 void VisitPointers(Object** start, Object** end) {
1103 for (Object** p = start; p < end; p++) UpdatePointer(p);
1104 }
1105
1106 void VisitCodeTarget(RelocInfo* rinfo) {
1107 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
1108 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1109 VisitPointer(&target);
1110 rinfo->set_target_address(Code::cast(target)->instruction_start());
1111 }
1112
1113 void VisitDebugTarget(RelocInfo* rinfo) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001114 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
1115 rinfo->IsPatchedReturnSequence()) ||
1116 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
1117 rinfo->IsPatchedDebugBreakSlotSequence()));
Steve Block6ded16b2010-05-10 14:33:55 +01001118 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
1119 VisitPointer(&target);
1120 rinfo->set_call_address(Code::cast(target)->instruction_start());
1121 }
1122
1123 private:
1124 void UpdatePointer(Object** p) {
1125 if (!(*p)->IsHeapObject()) return;
1126
1127 HeapObject* obj = HeapObject::cast(*p);
1128 Address old_addr = obj->address();
1129
1130 if (Heap::new_space()->Contains(obj)) {
1131 ASSERT(Heap::InFromSpace(*p));
1132 *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
1133 }
1134 }
1135};
1136
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001137
Steve Block6ded16b2010-05-10 14:33:55 +01001138// Visitor for updating pointers from live objects in old spaces to new space.
1139// It can encounter pointers to dead objects in new space when traversing map
1140// space (see comment for MigrateObject).
1141static void UpdatePointerToNewGen(HeapObject** p) {
1142 if (!(*p)->IsHeapObject()) return;
1143
1144 Address old_addr = (*p)->address();
1145 ASSERT(Heap::InFromSpace(*p));
1146
1147 Address new_addr = Memory::Address_at(old_addr);
1148
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001149 if (new_addr == NULL) {
1150 // We encountered pointer to a dead object. Clear it so we will
1151 // not visit it again during next iteration of dirty regions.
1152 *p = NULL;
1153 } else {
1154 *p = HeapObject::FromAddress(new_addr);
1155 }
Steve Block6ded16b2010-05-10 14:33:55 +01001156}
1157
1158
1159static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Object **p) {
1160 Address old_addr = HeapObject::cast(*p)->address();
1161 Address new_addr = Memory::Address_at(old_addr);
1162 return String::cast(HeapObject::FromAddress(new_addr));
1163}
1164
1165
1166static bool TryPromoteObject(HeapObject* object, int object_size) {
1167 Object* result;
1168
1169 if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
1170 result = Heap::lo_space()->AllocateRawFixedArray(object_size);
1171 if (!result->IsFailure()) {
1172 HeapObject* target = HeapObject::cast(result);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001173 MigrateObject(target->address(), object->address(), object_size, true);
Leon Clarkef7060e22010-06-03 12:02:55 +01001174 MarkCompactCollector::tracer()->
1175 increment_promoted_objects_size(object_size);
Steve Block6ded16b2010-05-10 14:33:55 +01001176 return true;
1177 }
1178 } else {
1179 OldSpace* target_space = Heap::TargetSpace(object);
1180
1181 ASSERT(target_space == Heap::old_pointer_space() ||
1182 target_space == Heap::old_data_space());
1183 result = target_space->AllocateRaw(object_size);
1184 if (!result->IsFailure()) {
1185 HeapObject* target = HeapObject::cast(result);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001186 MigrateObject(target->address(),
1187 object->address(),
1188 object_size,
1189 target_space == Heap::old_pointer_space());
Leon Clarkef7060e22010-06-03 12:02:55 +01001190 MarkCompactCollector::tracer()->
1191 increment_promoted_objects_size(object_size);
Steve Block6ded16b2010-05-10 14:33:55 +01001192 return true;
1193 }
1194 }
1195
1196 return false;
1197}
1198
1199
1200static void SweepNewSpace(NewSpace* space) {
1201 Heap::CheckNewSpaceExpansionCriteria();
1202
1203 Address from_bottom = space->bottom();
1204 Address from_top = space->top();
1205
1206 // Flip the semispaces. After flipping, to space is empty, from space has
1207 // live objects.
1208 space->Flip();
1209 space->ResetAllocationInfo();
1210
1211 int size = 0;
1212 int survivors_size = 0;
1213
1214 // First pass: traverse all objects in inactive semispace, remove marks,
1215 // migrate live objects and write forwarding addresses.
1216 for (Address current = from_bottom; current < from_top; current += size) {
1217 HeapObject* object = HeapObject::FromAddress(current);
1218
1219 if (object->IsMarked()) {
1220 object->ClearMark();
1221 MarkCompactCollector::tracer()->decrement_marked_count();
1222
1223 size = object->Size();
1224 survivors_size += size;
1225
1226 // Aggressively promote young survivors to the old space.
1227 if (TryPromoteObject(object, size)) {
1228 continue;
1229 }
1230
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001231 // Promotion failed. Just migrate object to another semispace.
Steve Block6ded16b2010-05-10 14:33:55 +01001232 Object* target = space->AllocateRaw(size);
1233
1234 // Allocation cannot fail at this point: semispaces are of equal size.
1235 ASSERT(!target->IsFailure());
1236
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001237 MigrateObject(HeapObject::cast(target)->address(),
1238 current,
1239 size,
1240 false);
Steve Block6ded16b2010-05-10 14:33:55 +01001241 } else {
1242 size = object->Size();
1243 Memory::Address_at(current) = NULL;
1244 }
1245 }
1246
1247 // Second pass: find pointers to new space and update them.
1248 PointersToNewGenUpdatingVisitor updating_visitor;
1249
1250 // Update pointers in to space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001251 HeapObject* object;
1252 for (Address current = space->bottom();
1253 current < space->top();
1254 current += object->Size()) {
1255 object = HeapObject::FromAddress(current);
Steve Block6ded16b2010-05-10 14:33:55 +01001256
1257 object->IterateBody(object->map()->instance_type(),
1258 object->Size(),
1259 &updating_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00001260 }
Steve Block6ded16b2010-05-10 14:33:55 +01001261
1262 // Update roots.
1263 Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
1264
1265 // Update pointers in old spaces.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001266 Heap::IterateDirtyRegions(Heap::old_pointer_space(),
1267 &Heap::IteratePointersInDirtyRegion,
1268 &UpdatePointerToNewGen,
1269 Heap::WATERMARK_SHOULD_BE_VALID);
1270
1271 Heap::lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
Steve Block6ded16b2010-05-10 14:33:55 +01001272
1273 // Update pointers from cells.
1274 HeapObjectIterator cell_iterator(Heap::cell_space());
1275 for (HeapObject* cell = cell_iterator.next();
1276 cell != NULL;
1277 cell = cell_iterator.next()) {
1278 if (cell->IsJSGlobalPropertyCell()) {
1279 Address value_address =
1280 reinterpret_cast<Address>(cell) +
1281 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1282 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1283 }
1284 }
1285
1286 // Update pointers from external string table.
1287 Heap::UpdateNewSpaceReferencesInExternalStringTable(
1288 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1289
1290 // All pointers were updated. Update auxiliary allocation info.
1291 Heap::IncrementYoungSurvivorsCounter(survivors_size);
1292 space->set_age_mark(space->top());
Steve Blocka7e24c12009-10-30 11:49:00 +00001293}
1294
1295
1296static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
1297 PageIterator it(space, PageIterator::PAGES_IN_USE);
Steve Block6ded16b2010-05-10 14:33:55 +01001298
1299 // During sweeping of paged space we are trying to find longest sequences
1300 // of pages without live objects and free them (instead of putting them on
1301 // the free list).
1302
1303 // Page preceding current.
1304 Page* prev = Page::FromAddress(NULL);
1305
1306 // First empty page in a sequence.
1307 Page* first_empty_page = Page::FromAddress(NULL);
1308
1309 // Page preceding first empty page.
1310 Page* prec_first_empty_page = Page::FromAddress(NULL);
1311
1312 // If last used page of space ends with a sequence of dead objects
1313 // we can adjust allocation top instead of puting this free area into
1314 // the free list. Thus during sweeping we keep track of such areas
1315 // and defer their deallocation until the sweeping of the next page
1316 // is done: if one of the next pages contains live objects we have
1317 // to put such area into the free list.
1318 Address last_free_start = NULL;
1319 int last_free_size = 0;
1320
Steve Blocka7e24c12009-10-30 11:49:00 +00001321 while (it.has_next()) {
1322 Page* p = it.next();
1323
1324 bool is_previous_alive = true;
1325 Address free_start = NULL;
1326 HeapObject* object;
1327
1328 for (Address current = p->ObjectAreaStart();
1329 current < p->AllocationTop();
1330 current += object->Size()) {
1331 object = HeapObject::FromAddress(current);
1332 if (object->IsMarked()) {
1333 object->ClearMark();
1334 MarkCompactCollector::tracer()->decrement_marked_count();
Steve Block6ded16b2010-05-10 14:33:55 +01001335
Steve Blocka7e24c12009-10-30 11:49:00 +00001336 if (!is_previous_alive) { // Transition from free to live.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001337 dealloc(free_start,
1338 static_cast<int>(current - free_start),
1339 true,
1340 false);
Steve Blocka7e24c12009-10-30 11:49:00 +00001341 is_previous_alive = true;
1342 }
1343 } else {
Leon Clarked91b9f72010-01-27 17:25:45 +00001344 MarkCompactCollector::ReportDeleteIfNeeded(object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001345 if (is_previous_alive) { // Transition from live to free.
1346 free_start = current;
1347 is_previous_alive = false;
1348 }
1349 }
1350 // The object is now unmarked for the call to Size() at the top of the
1351 // loop.
1352 }
1353
Steve Block6ded16b2010-05-10 14:33:55 +01001354 bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
1355 || (!is_previous_alive && free_start == p->ObjectAreaStart());
1356
1357 if (page_is_empty) {
1358 // This page is empty. Check whether we are in the middle of
1359 // sequence of empty pages and start one if not.
1360 if (!first_empty_page->is_valid()) {
1361 first_empty_page = p;
1362 prec_first_empty_page = prev;
1363 }
1364
1365 if (!is_previous_alive) {
1366 // There are dead objects on this page. Update space accounting stats
1367 // without putting anything into free list.
1368 int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
1369 if (size_in_bytes > 0) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001370 dealloc(free_start, size_in_bytes, false, true);
Steve Block6ded16b2010-05-10 14:33:55 +01001371 }
1372 }
1373 } else {
1374 // This page is not empty. Sequence of empty pages ended on the previous
1375 // one.
1376 if (first_empty_page->is_valid()) {
1377 space->FreePages(prec_first_empty_page, prev);
1378 prec_first_empty_page = first_empty_page = Page::FromAddress(NULL);
1379 }
1380
1381 // If there is a free ending area on one of the previous pages we have
1382 // deallocate that area and put it on the free list.
1383 if (last_free_size > 0) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001384 Page::FromAddress(last_free_start)->
1385 SetAllocationWatermark(last_free_start);
1386 dealloc(last_free_start, last_free_size, true, true);
Steve Block6ded16b2010-05-10 14:33:55 +01001387 last_free_start = NULL;
1388 last_free_size = 0;
1389 }
1390
1391 // If the last region of this page was not live we remember it.
1392 if (!is_previous_alive) {
1393 ASSERT(last_free_size == 0);
1394 last_free_size = static_cast<int>(p->AllocationTop() - free_start);
1395 last_free_start = free_start;
Steve Blocka7e24c12009-10-30 11:49:00 +00001396 }
1397 }
Steve Block6ded16b2010-05-10 14:33:55 +01001398
1399 prev = p;
1400 }
1401
1402 // We reached end of space. See if we need to adjust allocation top.
1403 Address new_allocation_top = NULL;
1404
1405 if (first_empty_page->is_valid()) {
1406 // Last used pages in space are empty. We can move allocation top backwards
1407 // to the beginning of first empty page.
1408 ASSERT(prev == space->AllocationTopPage());
1409
1410 new_allocation_top = first_empty_page->ObjectAreaStart();
1411 }
1412
1413 if (last_free_size > 0) {
1414 // There was a free ending area on the previous page.
1415 // Deallocate it without putting it into freelist and move allocation
1416 // top to the beginning of this free area.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001417 dealloc(last_free_start, last_free_size, false, true);
Steve Block6ded16b2010-05-10 14:33:55 +01001418 new_allocation_top = last_free_start;
1419 }
1420
1421 if (new_allocation_top != NULL) {
1422#ifdef DEBUG
1423 Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
1424 if (!first_empty_page->is_valid()) {
1425 ASSERT(new_allocation_top_page == space->AllocationTopPage());
1426 } else if (last_free_size > 0) {
1427 ASSERT(new_allocation_top_page == prec_first_empty_page);
1428 } else {
1429 ASSERT(new_allocation_top_page == first_empty_page);
1430 }
1431#endif
1432
1433 space->SetTop(new_allocation_top);
Steve Blocka7e24c12009-10-30 11:49:00 +00001434 }
1435}
1436
1437
1438void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
Steve Block6ded16b2010-05-10 14:33:55 +01001439 int size_in_bytes,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001440 bool add_to_freelist,
1441 bool last_on_page) {
Steve Block6ded16b2010-05-10 14:33:55 +01001442 Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist);
Steve Blocka7e24c12009-10-30 11:49:00 +00001443}
1444
1445
1446void MarkCompactCollector::DeallocateOldDataBlock(Address start,
Steve Block6ded16b2010-05-10 14:33:55 +01001447 int size_in_bytes,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001448 bool add_to_freelist,
1449 bool last_on_page) {
Steve Block6ded16b2010-05-10 14:33:55 +01001450 Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist);
Steve Blocka7e24c12009-10-30 11:49:00 +00001451}
1452
1453
1454void MarkCompactCollector::DeallocateCodeBlock(Address start,
Steve Block6ded16b2010-05-10 14:33:55 +01001455 int size_in_bytes,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001456 bool add_to_freelist,
1457 bool last_on_page) {
Steve Block6ded16b2010-05-10 14:33:55 +01001458 Heap::code_space()->Free(start, size_in_bytes, add_to_freelist);
Steve Blocka7e24c12009-10-30 11:49:00 +00001459}
1460
1461
1462void MarkCompactCollector::DeallocateMapBlock(Address start,
Steve Block6ded16b2010-05-10 14:33:55 +01001463 int size_in_bytes,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001464 bool add_to_freelist,
1465 bool last_on_page) {
Leon Clarkee46be812010-01-19 14:06:41 +00001466 // Objects in map space are assumed to have size Map::kSize and a
Steve Blocka7e24c12009-10-30 11:49:00 +00001467 // valid map in their first word. Thus, we break the free block up into
1468 // chunks and free them separately.
1469 ASSERT(size_in_bytes % Map::kSize == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001470 Address end = start + size_in_bytes;
1471 for (Address a = start; a < end; a += Map::kSize) {
Steve Block6ded16b2010-05-10 14:33:55 +01001472 Heap::map_space()->Free(a, add_to_freelist);
Steve Blocka7e24c12009-10-30 11:49:00 +00001473 }
1474}
1475
1476
1477void MarkCompactCollector::DeallocateCellBlock(Address start,
Steve Block6ded16b2010-05-10 14:33:55 +01001478 int size_in_bytes,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001479 bool add_to_freelist,
1480 bool last_on_page) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001481 // Free-list elements in cell space are assumed to have a fixed size.
1482 // We break the free block into chunks and add them to the free list
1483 // individually.
1484 int size = Heap::cell_space()->object_size_in_bytes();
1485 ASSERT(size_in_bytes % size == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001486 Address end = start + size_in_bytes;
1487 for (Address a = start; a < end; a += size) {
Steve Block6ded16b2010-05-10 14:33:55 +01001488 Heap::cell_space()->Free(a, add_to_freelist);
Steve Blocka7e24c12009-10-30 11:49:00 +00001489 }
1490}
1491
1492
1493void MarkCompactCollector::EncodeForwardingAddresses() {
1494 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
1495 // Objects in the active semispace of the young generation may be
1496 // relocated to the inactive semispace (if not promoted). Set the
1497 // relocation info to the beginning of the inactive semispace.
1498 Heap::new_space()->MCResetRelocationInfo();
1499
1500 // Compute the forwarding pointers in each space.
1501 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
Leon Clarked91b9f72010-01-27 17:25:45 +00001502 ReportDeleteIfNeeded>(
Steve Blocka7e24c12009-10-30 11:49:00 +00001503 Heap::old_pointer_space());
1504
1505 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
1506 IgnoreNonLiveObject>(
1507 Heap::old_data_space());
1508
1509 EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
Leon Clarked91b9f72010-01-27 17:25:45 +00001510 ReportDeleteIfNeeded>(
Steve Blocka7e24c12009-10-30 11:49:00 +00001511 Heap::code_space());
1512
1513 EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
1514 IgnoreNonLiveObject>(
1515 Heap::cell_space());
1516
1517
1518 // Compute new space next to last after the old and code spaces have been
1519 // compacted. Objects in new space can be promoted to old or code space.
1520 EncodeForwardingAddressesInNewSpace();
1521
1522 // Compute map space last because computing forwarding addresses
1523 // overwrites non-live objects. Objects in the other spaces rely on
1524 // non-live map pointers to get the sizes of non-live objects.
1525 EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
1526 IgnoreNonLiveObject>(
1527 Heap::map_space());
1528
1529 // Write relocation info to the top page, so we can use it later. This is
1530 // done after promoting objects from the new space so we get the correct
1531 // allocation top.
1532 Heap::old_pointer_space()->MCWriteRelocationInfoToPage();
1533 Heap::old_data_space()->MCWriteRelocationInfoToPage();
1534 Heap::code_space()->MCWriteRelocationInfoToPage();
1535 Heap::map_space()->MCWriteRelocationInfoToPage();
1536 Heap::cell_space()->MCWriteRelocationInfoToPage();
1537}
1538
1539
Leon Clarkee46be812010-01-19 14:06:41 +00001540class MapIterator : public HeapObjectIterator {
1541 public:
1542 MapIterator() : HeapObjectIterator(Heap::map_space(), &SizeCallback) { }
1543
1544 explicit MapIterator(Address start)
1545 : HeapObjectIterator(Heap::map_space(), start, &SizeCallback) { }
1546
1547 private:
1548 static int SizeCallback(HeapObject* unused) {
1549 USE(unused);
1550 return Map::kSize;
1551 }
1552};
1553
1554
1555class MapCompact {
1556 public:
1557 explicit MapCompact(int live_maps)
1558 : live_maps_(live_maps),
1559 to_evacuate_start_(Heap::map_space()->TopAfterCompaction(live_maps)),
1560 map_to_evacuate_it_(to_evacuate_start_),
1561 first_map_to_evacuate_(
1562 reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
1563 }
1564
1565 void CompactMaps() {
1566 // As we know the number of maps to evacuate beforehand,
1567 // we stop then there is no more vacant maps.
1568 for (Map* next_vacant_map = NextVacantMap();
1569 next_vacant_map;
1570 next_vacant_map = NextVacantMap()) {
1571 EvacuateMap(next_vacant_map, NextMapToEvacuate());
1572 }
1573
1574#ifdef DEBUG
1575 CheckNoMapsToEvacuate();
1576#endif
1577 }
1578
1579 void UpdateMapPointersInRoots() {
1580 Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG);
1581 GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
1582 }
1583
Leon Clarkee46be812010-01-19 14:06:41 +00001584 void UpdateMapPointersInPagedSpace(PagedSpace* space) {
1585 ASSERT(space != Heap::map_space());
1586
1587 PageIterator it(space, PageIterator::PAGES_IN_USE);
1588 while (it.has_next()) {
1589 Page* p = it.next();
1590 UpdateMapPointersInRange(p->ObjectAreaStart(), p->AllocationTop());
1591 }
1592 }
1593
1594 void UpdateMapPointersInNewSpace() {
1595 NewSpace* space = Heap::new_space();
1596 UpdateMapPointersInRange(space->bottom(), space->top());
1597 }
1598
1599 void UpdateMapPointersInLargeObjectSpace() {
1600 LargeObjectIterator it(Heap::lo_space());
Leon Clarked91b9f72010-01-27 17:25:45 +00001601 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1602 UpdateMapPointersInObject(obj);
Leon Clarkee46be812010-01-19 14:06:41 +00001603 }
1604
1605 void Finish() {
1606 Heap::map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
1607 }
1608
1609 private:
1610 int live_maps_;
1611 Address to_evacuate_start_;
1612 MapIterator vacant_map_it_;
1613 MapIterator map_to_evacuate_it_;
1614 Map* first_map_to_evacuate_;
1615
1616 // Helper class for updating map pointers in HeapObjects.
1617 class MapUpdatingVisitor: public ObjectVisitor {
1618 public:
1619 void VisitPointer(Object** p) {
1620 UpdateMapPointer(p);
1621 }
1622
1623 void VisitPointers(Object** start, Object** end) {
1624 for (Object** p = start; p < end; p++) UpdateMapPointer(p);
1625 }
1626
1627 private:
1628 void UpdateMapPointer(Object** p) {
1629 if (!(*p)->IsHeapObject()) return;
1630 HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
1631
1632 // Moved maps are tagged with overflowed map word. They are the only
1633 // objects those map word is overflowed as marking is already complete.
1634 MapWord map_word = old_map->map_word();
1635 if (!map_word.IsOverflowed()) return;
1636
1637 *p = GetForwardedMap(map_word);
1638 }
1639 };
1640
1641 static MapUpdatingVisitor map_updating_visitor_;
1642
1643 static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
1644 while (true) {
Leon Clarkee46be812010-01-19 14:06:41 +00001645 HeapObject* next = it->next();
Leon Clarked91b9f72010-01-27 17:25:45 +00001646 ASSERT(next != NULL);
Leon Clarkee46be812010-01-19 14:06:41 +00001647 if (next == last)
1648 return NULL;
1649 ASSERT(!next->IsOverflowed());
1650 ASSERT(!next->IsMarked());
1651 ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
1652 if (next->IsMap() == live)
1653 return reinterpret_cast<Map*>(next);
1654 }
1655 }
1656
1657 Map* NextVacantMap() {
1658 Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
1659 ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
1660 return map;
1661 }
1662
1663 Map* NextMapToEvacuate() {
1664 Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
1665 ASSERT(map != NULL);
1666 ASSERT(map->IsMap());
1667 return map;
1668 }
1669
1670 static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
1671 ASSERT(FreeListNode::IsFreeListNode(vacant_map));
1672 ASSERT(map_to_evacuate->IsMap());
1673
Steve Block6ded16b2010-05-10 14:33:55 +01001674 ASSERT(Map::kSize % 4 == 0);
1675
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001676 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(vacant_map->address(),
1677 map_to_evacuate->address(),
1678 Map::kSize);
Steve Block6ded16b2010-05-10 14:33:55 +01001679
Leon Clarkee46be812010-01-19 14:06:41 +00001680 ASSERT(vacant_map->IsMap()); // Due to memcpy above.
1681
1682 MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
1683 forwarding_map_word.SetOverflow();
1684 map_to_evacuate->set_map_word(forwarding_map_word);
1685
1686 ASSERT(map_to_evacuate->map_word().IsOverflowed());
1687 ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
1688 }
1689
1690 static Map* GetForwardedMap(MapWord map_word) {
1691 ASSERT(map_word.IsOverflowed());
1692 map_word.ClearOverflow();
1693 Map* new_map = map_word.ToMap();
1694 ASSERT_MAP_ALIGNED(new_map->address());
1695 return new_map;
1696 }
1697
1698 static int UpdateMapPointersInObject(HeapObject* obj) {
1699 ASSERT(!obj->IsMarked());
1700 Map* map = obj->map();
1701 ASSERT(Heap::map_space()->Contains(map));
1702 MapWord map_word = map->map_word();
1703 ASSERT(!map_word.IsMarked());
1704 if (map_word.IsOverflowed()) {
1705 Map* new_map = GetForwardedMap(map_word);
1706 ASSERT(Heap::map_space()->Contains(new_map));
1707 obj->set_map(new_map);
1708
1709#ifdef DEBUG
1710 if (FLAG_gc_verbose) {
1711 PrintF("update %p : %p -> %p\n", obj->address(),
1712 map, new_map);
1713 }
1714#endif
1715 }
1716
1717 int size = obj->SizeFromMap(map);
1718 obj->IterateBody(map->instance_type(), size, &map_updating_visitor_);
1719 return size;
1720 }
1721
1722 static void UpdateMapPointersInRange(Address start, Address end) {
1723 HeapObject* object;
1724 int size;
1725 for (Address current = start; current < end; current += size) {
1726 object = HeapObject::FromAddress(current);
1727 size = UpdateMapPointersInObject(object);
1728 ASSERT(size > 0);
1729 }
1730 }
1731
1732#ifdef DEBUG
1733 void CheckNoMapsToEvacuate() {
1734 if (!FLAG_enable_slow_asserts)
1735 return;
1736
Leon Clarked91b9f72010-01-27 17:25:45 +00001737 for (HeapObject* obj = map_to_evacuate_it_.next();
1738 obj != NULL; obj = map_to_evacuate_it_.next())
1739 ASSERT(FreeListNode::IsFreeListNode(obj));
Leon Clarkee46be812010-01-19 14:06:41 +00001740 }
1741#endif
1742};
1743
1744MapCompact::MapUpdatingVisitor MapCompact::map_updating_visitor_;
1745
1746
Steve Blocka7e24c12009-10-30 11:49:00 +00001747void MarkCompactCollector::SweepSpaces() {
Leon Clarkef7060e22010-06-03 12:02:55 +01001748 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
1749
Steve Blocka7e24c12009-10-30 11:49:00 +00001750 ASSERT(state_ == SWEEP_SPACES);
1751 ASSERT(!IsCompacting());
1752 // Noncompacting collections simply sweep the spaces to clear the mark
1753 // bits and free the nonlive blocks (for old and map spaces). We sweep
1754 // the map space last because freeing non-live maps overwrites them and
1755 // the other spaces rely on possibly non-live maps to get the sizes for
1756 // non-live objects.
1757 SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock);
1758 SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock);
1759 SweepSpace(Heap::code_space(), &DeallocateCodeBlock);
1760 SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
Steve Block6ded16b2010-05-10 14:33:55 +01001761 SweepNewSpace(Heap::new_space());
Steve Blocka7e24c12009-10-30 11:49:00 +00001762 SweepSpace(Heap::map_space(), &DeallocateMapBlock);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001763
1764 Heap::IterateDirtyRegions(Heap::map_space(),
1765 &Heap::IteratePointersInDirtyMapsRegion,
1766 &UpdatePointerToNewGen,
1767 Heap::WATERMARK_SHOULD_BE_VALID);
1768
Steve Block6ded16b2010-05-10 14:33:55 +01001769 int live_maps_size = Heap::map_space()->Size();
1770 int live_maps = live_maps_size / Map::kSize;
1771 ASSERT(live_map_objects_size_ == live_maps_size);
Leon Clarkee46be812010-01-19 14:06:41 +00001772
1773 if (Heap::map_space()->NeedsCompaction(live_maps)) {
1774 MapCompact map_compact(live_maps);
1775
1776 map_compact.CompactMaps();
1777 map_compact.UpdateMapPointersInRoots();
1778
Leon Clarkee46be812010-01-19 14:06:41 +00001779 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00001780 for (PagedSpace* space = spaces.next();
1781 space != NULL; space = spaces.next()) {
Leon Clarkee46be812010-01-19 14:06:41 +00001782 if (space == Heap::map_space()) continue;
1783 map_compact.UpdateMapPointersInPagedSpace(space);
1784 }
1785 map_compact.UpdateMapPointersInNewSpace();
1786 map_compact.UpdateMapPointersInLargeObjectSpace();
1787
1788 map_compact.Finish();
1789 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001790}
1791
1792
1793// Iterate the live objects in a range of addresses (eg, a page or a
1794// semispace). The live regions of the range have been linked into a list.
1795// The first live region is [first_live_start, first_live_end), and the last
1796// address in the range is top. The callback function is used to get the
1797// size of each live object.
1798int MarkCompactCollector::IterateLiveObjectsInRange(
1799 Address start,
1800 Address end,
1801 HeapObjectCallback size_func) {
Steve Block6ded16b2010-05-10 14:33:55 +01001802 int live_objects_size = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001803 Address current = start;
1804 while (current < end) {
1805 uint32_t encoded_map = Memory::uint32_at(current);
1806 if (encoded_map == kSingleFreeEncoding) {
1807 current += kPointerSize;
1808 } else if (encoded_map == kMultiFreeEncoding) {
1809 current += Memory::int_at(current + kIntSize);
1810 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01001811 int size = size_func(HeapObject::FromAddress(current));
1812 current += size;
1813 live_objects_size += size;
Steve Blocka7e24c12009-10-30 11:49:00 +00001814 }
1815 }
Steve Block6ded16b2010-05-10 14:33:55 +01001816 return live_objects_size;
Steve Blocka7e24c12009-10-30 11:49:00 +00001817}
1818
1819
1820int MarkCompactCollector::IterateLiveObjects(NewSpace* space,
1821 HeapObjectCallback size_f) {
1822 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
1823 return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
1824}
1825
1826
1827int MarkCompactCollector::IterateLiveObjects(PagedSpace* space,
1828 HeapObjectCallback size_f) {
1829 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
1830 int total = 0;
1831 PageIterator it(space, PageIterator::PAGES_IN_USE);
1832 while (it.has_next()) {
1833 Page* p = it.next();
1834 total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
1835 p->AllocationTop(),
1836 size_f);
1837 }
1838 return total;
1839}
1840
1841
1842// -------------------------------------------------------------------------
1843// Phase 3: Update pointers
1844
1845// Helper class for updating pointers in HeapObjects.
1846class UpdatingVisitor: public ObjectVisitor {
1847 public:
1848 void VisitPointer(Object** p) {
1849 UpdatePointer(p);
1850 }
1851
1852 void VisitPointers(Object** start, Object** end) {
1853 // Mark all HeapObject pointers in [start, end)
1854 for (Object** p = start; p < end; p++) UpdatePointer(p);
1855 }
1856
1857 void VisitCodeTarget(RelocInfo* rinfo) {
1858 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
1859 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1860 VisitPointer(&target);
1861 rinfo->set_target_address(
1862 reinterpret_cast<Code*>(target)->instruction_start());
1863 }
1864
Steve Block3ce2e202009-11-05 08:53:23 +00001865 void VisitDebugTarget(RelocInfo* rinfo) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001866 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
1867 rinfo->IsPatchedReturnSequence()) ||
1868 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
1869 rinfo->IsPatchedDebugBreakSlotSequence()));
Steve Block3ce2e202009-11-05 08:53:23 +00001870 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
1871 VisitPointer(&target);
1872 rinfo->set_call_address(
1873 reinterpret_cast<Code*>(target)->instruction_start());
1874 }
1875
Steve Blocka7e24c12009-10-30 11:49:00 +00001876 private:
1877 void UpdatePointer(Object** p) {
1878 if (!(*p)->IsHeapObject()) return;
1879
1880 HeapObject* obj = HeapObject::cast(*p);
1881 Address old_addr = obj->address();
1882 Address new_addr;
1883 ASSERT(!Heap::InFromSpace(obj));
1884
1885 if (Heap::new_space()->Contains(obj)) {
1886 Address forwarding_pointer_addr =
1887 Heap::new_space()->FromSpaceLow() +
1888 Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
1889 new_addr = Memory::Address_at(forwarding_pointer_addr);
1890
1891#ifdef DEBUG
1892 ASSERT(Heap::old_pointer_space()->Contains(new_addr) ||
1893 Heap::old_data_space()->Contains(new_addr) ||
1894 Heap::new_space()->FromSpaceContains(new_addr) ||
1895 Heap::lo_space()->Contains(HeapObject::FromAddress(new_addr)));
1896
1897 if (Heap::new_space()->FromSpaceContains(new_addr)) {
1898 ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
1899 Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
1900 }
1901#endif
1902
1903 } else if (Heap::lo_space()->Contains(obj)) {
1904 // Don't move objects in the large object space.
1905 return;
1906
1907 } else {
1908#ifdef DEBUG
1909 PagedSpaces spaces;
1910 PagedSpace* original_space = spaces.next();
1911 while (original_space != NULL) {
1912 if (original_space->Contains(obj)) break;
1913 original_space = spaces.next();
1914 }
1915 ASSERT(original_space != NULL);
1916#endif
1917 new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
1918 ASSERT(original_space->Contains(new_addr));
1919 ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
1920 original_space->MCSpaceOffsetForAddress(old_addr));
1921 }
1922
1923 *p = HeapObject::FromAddress(new_addr);
1924
1925#ifdef DEBUG
1926 if (FLAG_gc_verbose) {
1927 PrintF("update %p : %p -> %p\n",
1928 reinterpret_cast<Address>(p), old_addr, new_addr);
1929 }
1930#endif
1931 }
1932};
1933
1934
1935void MarkCompactCollector::UpdatePointers() {
1936#ifdef DEBUG
1937 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
1938 state_ = UPDATE_POINTERS;
1939#endif
1940 UpdatingVisitor updating_visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00001941 Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00001942 GlobalHandles::IterateWeakRoots(&updating_visitor);
1943
Steve Block6ded16b2010-05-10 14:33:55 +01001944 int live_maps_size = IterateLiveObjects(Heap::map_space(),
Steve Blocka7e24c12009-10-30 11:49:00 +00001945 &UpdatePointersInOldObject);
Steve Block6ded16b2010-05-10 14:33:55 +01001946 int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
1947 &UpdatePointersInOldObject);
1948 int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
1949 &UpdatePointersInOldObject);
1950 int live_codes_size = IterateLiveObjects(Heap::code_space(),
1951 &UpdatePointersInOldObject);
1952 int live_cells_size = IterateLiveObjects(Heap::cell_space(),
1953 &UpdatePointersInOldObject);
1954 int live_news_size = IterateLiveObjects(Heap::new_space(),
1955 &UpdatePointersInNewObject);
Steve Blocka7e24c12009-10-30 11:49:00 +00001956
1957 // Large objects do not move, the map word can be updated directly.
1958 LargeObjectIterator it(Heap::lo_space());
Leon Clarked91b9f72010-01-27 17:25:45 +00001959 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1960 UpdatePointersInNewObject(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00001961
Steve Block6ded16b2010-05-10 14:33:55 +01001962 USE(live_maps_size);
1963 USE(live_pointer_olds_size);
1964 USE(live_data_olds_size);
1965 USE(live_codes_size);
1966 USE(live_cells_size);
1967 USE(live_news_size);
1968 ASSERT(live_maps_size == live_map_objects_size_);
1969 ASSERT(live_data_olds_size == live_old_data_objects_size_);
1970 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
1971 ASSERT(live_codes_size == live_code_objects_size_);
1972 ASSERT(live_cells_size == live_cell_objects_size_);
1973 ASSERT(live_news_size == live_young_objects_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001974}
1975
1976
1977int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
1978 // Keep old map pointers
1979 Map* old_map = obj->map();
1980 ASSERT(old_map->IsHeapObject());
1981
1982 Address forwarded = GetForwardingAddressInOldSpace(old_map);
1983
1984 ASSERT(Heap::map_space()->Contains(old_map));
1985 ASSERT(Heap::map_space()->Contains(forwarded));
1986#ifdef DEBUG
1987 if (FLAG_gc_verbose) {
1988 PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
1989 forwarded);
1990 }
1991#endif
1992 // Update the map pointer.
1993 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
1994
1995 // We have to compute the object size relying on the old map because
1996 // map objects are not relocated yet.
1997 int obj_size = obj->SizeFromMap(old_map);
1998
1999 // Update pointers in the object body.
2000 UpdatingVisitor updating_visitor;
2001 obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
2002 return obj_size;
2003}
2004
2005
2006int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
2007 // Decode the map pointer.
2008 MapWord encoding = obj->map_word();
2009 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2010 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
2011
2012 // At this point, the first word of map_addr is also encoded, cannot
2013 // cast it to Map* using Map::cast.
2014 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
2015 int obj_size = obj->SizeFromMap(map);
2016 InstanceType type = map->instance_type();
2017
2018 // Update map pointer.
2019 Address new_map_addr = GetForwardingAddressInOldSpace(map);
2020 int offset = encoding.DecodeOffset();
2021 obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
2022
2023#ifdef DEBUG
2024 if (FLAG_gc_verbose) {
2025 PrintF("update %p : %p -> %p\n", obj->address(),
2026 map_addr, new_map_addr);
2027 }
2028#endif
2029
2030 // Update pointers in the object body.
2031 UpdatingVisitor updating_visitor;
2032 obj->IterateBody(type, obj_size, &updating_visitor);
2033 return obj_size;
2034}
2035
2036
2037Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
2038 // Object should either in old or map space.
2039 MapWord encoding = obj->map_word();
2040
2041 // Offset to the first live object's forwarding address.
2042 int offset = encoding.DecodeOffset();
2043 Address obj_addr = obj->address();
2044
2045 // Find the first live object's forwarding address.
2046 Page* p = Page::FromAddress(obj_addr);
2047 Address first_forwarded = p->mc_first_forwarded;
2048
2049 // Page start address of forwarded address.
2050 Page* forwarded_page = Page::FromAddress(first_forwarded);
2051 int forwarded_offset = forwarded_page->Offset(first_forwarded);
2052
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002053 // Find end of allocation in the page of first_forwarded.
2054 int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
Steve Blocka7e24c12009-10-30 11:49:00 +00002055
2056 // Check if current object's forward pointer is in the same page
2057 // as the first live object's forwarding pointer
2058 if (forwarded_offset + offset < mc_top_offset) {
2059 // In the same page.
2060 return first_forwarded + offset;
2061 }
2062
2063 // Must be in the next page, NOTE: this may cross chunks.
2064 Page* next_page = forwarded_page->next_page();
2065 ASSERT(next_page->is_valid());
2066
2067 offset -= (mc_top_offset - forwarded_offset);
2068 offset += Page::kObjectStartOffset;
2069
2070 ASSERT_PAGE_OFFSET(offset);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002071 ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
Steve Blocka7e24c12009-10-30 11:49:00 +00002072
2073 return next_page->OffsetToAddress(offset);
2074}
2075
2076
2077// -------------------------------------------------------------------------
2078// Phase 4: Relocate objects
2079
2080void MarkCompactCollector::RelocateObjects() {
2081#ifdef DEBUG
2082 ASSERT(state_ == UPDATE_POINTERS);
2083 state_ = RELOCATE_OBJECTS;
2084#endif
2085 // Relocates objects, always relocate map objects first. Relocating
2086 // objects in other space relies on map objects to get object size.
Steve Block6ded16b2010-05-10 14:33:55 +01002087 int live_maps_size = IterateLiveObjects(Heap::map_space(),
2088 &RelocateMapObject);
2089 int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
2090 &RelocateOldPointerObject);
2091 int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
2092 &RelocateOldDataObject);
2093 int live_codes_size = IterateLiveObjects(Heap::code_space(),
2094 &RelocateCodeObject);
2095 int live_cells_size = IterateLiveObjects(Heap::cell_space(),
2096 &RelocateCellObject);
2097 int live_news_size = IterateLiveObjects(Heap::new_space(),
2098 &RelocateNewObject);
Steve Blocka7e24c12009-10-30 11:49:00 +00002099
Steve Block6ded16b2010-05-10 14:33:55 +01002100 USE(live_maps_size);
2101 USE(live_pointer_olds_size);
2102 USE(live_data_olds_size);
2103 USE(live_codes_size);
2104 USE(live_cells_size);
2105 USE(live_news_size);
2106 ASSERT(live_maps_size == live_map_objects_size_);
2107 ASSERT(live_data_olds_size == live_old_data_objects_size_);
2108 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
2109 ASSERT(live_codes_size == live_code_objects_size_);
2110 ASSERT(live_cells_size == live_cell_objects_size_);
2111 ASSERT(live_news_size == live_young_objects_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002112
2113 // Flip from and to spaces
2114 Heap::new_space()->Flip();
2115
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002116 Heap::new_space()->MCCommitRelocationInfo();
2117
Steve Blocka7e24c12009-10-30 11:49:00 +00002118 // Set age_mark to bottom in to space
2119 Address mark = Heap::new_space()->bottom();
2120 Heap::new_space()->set_age_mark(mark);
2121
Steve Blocka7e24c12009-10-30 11:49:00 +00002122 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00002123 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
2124 space->MCCommitRelocationInfo();
Steve Block6ded16b2010-05-10 14:33:55 +01002125
2126 Heap::CheckNewSpaceExpansionCriteria();
2127 Heap::IncrementYoungSurvivorsCounter(live_news_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002128}
2129
2130
2131int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
2132 // Recover map pointer.
2133 MapWord encoding = obj->map_word();
2134 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2135 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
2136
2137 // Get forwarding address before resetting map pointer
2138 Address new_addr = GetForwardingAddressInOldSpace(obj);
2139
2140 // Reset map pointer. The meta map object may not be copied yet so
2141 // Map::cast does not yet work.
2142 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
2143
2144 Address old_addr = obj->address();
2145
2146 if (new_addr != old_addr) {
Steve Block6ded16b2010-05-10 14:33:55 +01002147 // Move contents.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002148 Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2149 old_addr,
2150 Map::kSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00002151 }
2152
2153#ifdef DEBUG
2154 if (FLAG_gc_verbose) {
2155 PrintF("relocate %p -> %p\n", old_addr, new_addr);
2156 }
2157#endif
2158
2159 return Map::kSize;
2160}
2161
2162
2163static inline int RestoreMap(HeapObject* obj,
2164 PagedSpace* space,
2165 Address new_addr,
2166 Address map_addr) {
2167 // This must be a non-map object, and the function relies on the
2168 // assumption that the Map space is compacted before the other paged
2169 // spaces (see RelocateObjects).
2170
2171 // Reset map pointer.
2172 obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
2173
2174 int obj_size = obj->Size();
2175 ASSERT_OBJECT_SIZE(obj_size);
2176
2177 ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
2178 space->MCSpaceOffsetForAddress(obj->address()));
2179
2180#ifdef DEBUG
2181 if (FLAG_gc_verbose) {
2182 PrintF("relocate %p -> %p\n", obj->address(), new_addr);
2183 }
2184#endif
2185
2186 return obj_size;
2187}
2188
2189
2190int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
2191 PagedSpace* space) {
2192 // Recover map pointer.
2193 MapWord encoding = obj->map_word();
2194 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2195 ASSERT(Heap::map_space()->Contains(map_addr));
2196
2197 // Get forwarding address before resetting map pointer.
2198 Address new_addr = GetForwardingAddressInOldSpace(obj);
2199
2200 // Reset the map pointer.
2201 int obj_size = RestoreMap(obj, space, new_addr, map_addr);
2202
2203 Address old_addr = obj->address();
2204
2205 if (new_addr != old_addr) {
Steve Block6ded16b2010-05-10 14:33:55 +01002206 // Move contents.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002207 if (space == Heap::old_data_space()) {
2208 Heap::MoveBlock(new_addr, old_addr, obj_size);
2209 } else {
2210 Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2211 old_addr,
2212 obj_size);
2213 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002214 }
2215
2216 ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
2217
Leon Clarked91b9f72010-01-27 17:25:45 +00002218 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2219 if (copied_to->IsJSFunction()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002220 PROFILE(FunctionMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002221 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002222 HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002223
Steve Blocka7e24c12009-10-30 11:49:00 +00002224 return obj_size;
2225}
2226
2227
2228int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
2229 return RelocateOldNonCodeObject(obj, Heap::old_pointer_space());
2230}
2231
2232
2233int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
2234 return RelocateOldNonCodeObject(obj, Heap::old_data_space());
2235}
2236
2237
2238int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
2239 return RelocateOldNonCodeObject(obj, Heap::cell_space());
2240}
2241
2242
2243int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
2244 // Recover map pointer.
2245 MapWord encoding = obj->map_word();
2246 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2247 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
2248
2249 // Get forwarding address before resetting map pointer
2250 Address new_addr = GetForwardingAddressInOldSpace(obj);
2251
2252 // Reset the map pointer.
2253 int obj_size = RestoreMap(obj, Heap::code_space(), new_addr, map_addr);
2254
2255 Address old_addr = obj->address();
2256
2257 if (new_addr != old_addr) {
Steve Block6ded16b2010-05-10 14:33:55 +01002258 // Move contents.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002259 Heap::MoveBlock(new_addr, old_addr, obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002260 }
2261
2262 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2263 if (copied_to->IsCode()) {
2264 // May also update inline cache target.
2265 Code::cast(copied_to)->Relocate(new_addr - old_addr);
2266 // Notify the logger that compiled code has moved.
Steve Block6ded16b2010-05-10 14:33:55 +01002267 PROFILE(CodeMoveEvent(old_addr, new_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00002268 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002269 HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00002270
2271 return obj_size;
2272}
2273
2274
2275int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
2276 int obj_size = obj->Size();
2277
2278 // Get forwarding address
2279 Address old_addr = obj->address();
2280 int offset = Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
2281
2282 Address new_addr =
2283 Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset);
2284
2285#ifdef DEBUG
2286 if (Heap::new_space()->FromSpaceContains(new_addr)) {
2287 ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
2288 Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
2289 } else {
2290 ASSERT(Heap::TargetSpace(obj) == Heap::old_pointer_space() ||
2291 Heap::TargetSpace(obj) == Heap::old_data_space());
2292 }
2293#endif
2294
2295 // New and old addresses cannot overlap.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002296 if (Heap::InNewSpace(HeapObject::FromAddress(new_addr))) {
2297 Heap::CopyBlock(new_addr, old_addr, obj_size);
2298 } else {
2299 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2300 old_addr,
2301 obj_size);
2302 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002303
2304#ifdef DEBUG
2305 if (FLAG_gc_verbose) {
2306 PrintF("relocate %p -> %p\n", old_addr, new_addr);
2307 }
2308#endif
2309
Leon Clarked91b9f72010-01-27 17:25:45 +00002310 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2311 if (copied_to->IsJSFunction()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002312 PROFILE(FunctionMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002313 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002314 HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002315
Steve Blocka7e24c12009-10-30 11:49:00 +00002316 return obj_size;
2317}
2318
2319
Leon Clarked91b9f72010-01-27 17:25:45 +00002320void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
2321#ifdef ENABLE_LOGGING_AND_PROFILING
2322 if (obj->IsCode()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002323 PROFILE(CodeDeleteEvent(obj->address()));
Leon Clarked91b9f72010-01-27 17:25:45 +00002324 } else if (obj->IsJSFunction()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002325 PROFILE(FunctionDeleteEvent(obj->address()));
Leon Clarked91b9f72010-01-27 17:25:45 +00002326 }
2327#endif
2328}
2329
Steve Blocka7e24c12009-10-30 11:49:00 +00002330} } // namespace v8::internal