blob: 95afb4abe25906f8b96937997dd505bc67fdd8de [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "execution.h"
31#include "global-handles.h"
32#include "ic-inl.h"
33#include "mark-compact.h"
34#include "stub-cache.h"
35
36namespace v8 {
37namespace internal {
38
39// -------------------------------------------------------------------------
40// MarkCompactCollector
41
42bool MarkCompactCollector::force_compaction_ = false;
43bool MarkCompactCollector::compacting_collection_ = false;
44bool MarkCompactCollector::compact_on_next_gc_ = false;
45
46int MarkCompactCollector::previous_marked_count_ = 0;
47GCTracer* MarkCompactCollector::tracer_ = NULL;
48
49
50#ifdef DEBUG
51MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE;
52
53// Counters used for debugging the marking phase of mark-compact or mark-sweep
54// collection.
55int MarkCompactCollector::live_bytes_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +010056int MarkCompactCollector::live_young_objects_size_ = 0;
57int MarkCompactCollector::live_old_data_objects_size_ = 0;
58int MarkCompactCollector::live_old_pointer_objects_size_ = 0;
59int MarkCompactCollector::live_code_objects_size_ = 0;
60int MarkCompactCollector::live_map_objects_size_ = 0;
61int MarkCompactCollector::live_cell_objects_size_ = 0;
62int MarkCompactCollector::live_lo_objects_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +000063#endif
64
65void MarkCompactCollector::CollectGarbage() {
66 // Make sure that Prepare() has been called. The individual steps below will
67 // update the state as they proceed.
68 ASSERT(state_ == PREPARE_GC);
69
70 // Prepare has selected whether to compact the old generation or not.
71 // Tell the tracer.
72 if (IsCompacting()) tracer_->set_is_compacting();
73
74 MarkLiveObjects();
75
76 if (FLAG_collect_maps) ClearNonLiveTransitions();
77
78 SweepLargeObjectSpace();
79
80 if (IsCompacting()) {
Leon Clarkef7060e22010-06-03 12:02:55 +010081 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
Steve Blocka7e24c12009-10-30 11:49:00 +000082 EncodeForwardingAddresses();
83
84 UpdatePointers();
85
86 RelocateObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +000087 } else {
88 SweepSpaces();
89 }
90
91 Finish();
92
93 // Save the count of marked objects remaining after the collection and
94 // null out the GC tracer.
95 previous_marked_count_ = tracer_->marked_count();
96 ASSERT(previous_marked_count_ == 0);
97 tracer_ = NULL;
98}
99
100
101void MarkCompactCollector::Prepare(GCTracer* tracer) {
102 // Rather than passing the tracer around we stash it in a static member
103 // variable.
104 tracer_ = tracer;
105
106#ifdef DEBUG
107 ASSERT(state_ == IDLE);
108 state_ = PREPARE_GC;
109#endif
110 ASSERT(!FLAG_always_compact || !FLAG_never_compact);
111
112 compacting_collection_ =
113 FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
114 compact_on_next_gc_ = false;
115
116 if (FLAG_never_compact) compacting_collection_ = false;
Leon Clarkee46be812010-01-19 14:06:41 +0000117 if (!Heap::map_space()->MapPointersEncodable())
118 compacting_collection_ = false;
Steve Blocka7e24c12009-10-30 11:49:00 +0000119 if (FLAG_collect_maps) CreateBackPointers();
120
Steve Blocka7e24c12009-10-30 11:49:00 +0000121 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000122 for (PagedSpace* space = spaces.next();
123 space != NULL; space = spaces.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000124 space->PrepareForMarkCompact(compacting_collection_);
125 }
126
127#ifdef DEBUG
128 live_bytes_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100129 live_young_objects_size_ = 0;
130 live_old_pointer_objects_size_ = 0;
131 live_old_data_objects_size_ = 0;
132 live_code_objects_size_ = 0;
133 live_map_objects_size_ = 0;
134 live_cell_objects_size_ = 0;
135 live_lo_objects_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000136#endif
137}
138
139
140void MarkCompactCollector::Finish() {
141#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100142 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
Steve Blocka7e24c12009-10-30 11:49:00 +0000143 state_ = IDLE;
144#endif
145 // The stub cache is not traversed during GC; clear the cache to
146 // force lazy re-initialization of it. This must be done after the
147 // GC, because it relies on the new address of certain old space
148 // objects (empty string, illegal builtin).
149 StubCache::Clear();
150
Leon Clarkee46be812010-01-19 14:06:41 +0000151 ExternalStringTable::CleanUp();
152
Steve Blocka7e24c12009-10-30 11:49:00 +0000153 // If we've just compacted old space there's no reason to check the
154 // fragmentation limit. Just return.
155 if (HasCompacted()) return;
156
157 // We compact the old generation on the next GC if it has gotten too
158 // fragmented (ie, we could recover an expected amount of space by
159 // reclaiming the waste and free list blocks).
160 static const int kFragmentationLimit = 15; // Percent.
161 static const int kFragmentationAllowed = 1 * MB; // Absolute.
162 int old_gen_recoverable = 0;
163 int old_gen_used = 0;
164
165 OldSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000166 for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000167 old_gen_recoverable += space->Waste() + space->AvailableFree();
168 old_gen_used += space->Size();
169 }
170
171 int old_gen_fragmentation =
172 static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
173 if (old_gen_fragmentation > kFragmentationLimit &&
174 old_gen_recoverable > kFragmentationAllowed) {
175 compact_on_next_gc_ = true;
176 }
177}
178
179
180// -------------------------------------------------------------------------
181// Phase 1: tracing and marking live objects.
182// before: all objects are in normal state.
183// after: a live object's map pointer is marked as '00'.
184
185// Marking all live objects in the heap as part of mark-sweep or mark-compact
186// collection. Before marking, all objects are in their normal state. After
187// marking, live objects' map pointers are marked indicating that the object
188// has been found reachable.
189//
190// The marking algorithm is a (mostly) depth-first (because of possible stack
191// overflow) traversal of the graph of objects reachable from the roots. It
192// uses an explicit stack of pointers rather than recursion. The young
193// generation's inactive ('from') space is used as a marking stack. The
194// objects in the marking stack are the ones that have been reached and marked
195// but their children have not yet been visited.
196//
197// The marking stack can overflow during traversal. In that case, we set an
198// overflow flag. When the overflow flag is set, we continue marking objects
199// reachable from the objects on the marking stack, but no longer push them on
200// the marking stack. Instead, we mark them as both marked and overflowed.
201// When the stack is in the overflowed state, objects marked as overflowed
202// have been reached and marked but their children have not been visited yet.
203// After emptying the marking stack, we clear the overflow flag and traverse
204// the heap looking for objects marked as overflowed, push them on the stack,
205// and continue with marking. This process repeats until all reachable
206// objects have been marked.
207
208static MarkingStack marking_stack;
209
210
211static inline HeapObject* ShortCircuitConsString(Object** p) {
212 // Optimization: If the heap object pointed to by p is a non-symbol
213 // cons string whose right substring is Heap::empty_string, update
214 // it in place to its left substring. Return the updated value.
215 //
216 // Here we assume that if we change *p, we replace it with a heap object
217 // (ie, the left substring of a cons string is always a heap object).
218 //
219 // The check performed is:
220 // object->IsConsString() && !object->IsSymbol() &&
221 // (ConsString::cast(object)->second() == Heap::empty_string())
222 // except the maps for the object and its possible substrings might be
223 // marked.
224 HeapObject* object = HeapObject::cast(*p);
225 MapWord map_word = object->map_word();
226 map_word.ClearMark();
227 InstanceType type = map_word.ToMap()->instance_type();
228 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
229
230 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
231 if (second != Heap::raw_unchecked_empty_string()) {
232 return object;
233 }
234
235 // Since we don't have the object's start, it is impossible to update the
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100236 // page dirty marks. Therefore, we only replace the string with its left
237 // substring when page dirty marks do not change.
Steve Blocka7e24c12009-10-30 11:49:00 +0000238 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
239 if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object;
240
241 *p = first;
242 return HeapObject::cast(first);
243}
244
245
246// Helper class for marking pointers in HeapObjects.
247class MarkingVisitor : public ObjectVisitor {
248 public:
249 void VisitPointer(Object** p) {
250 MarkObjectByPointer(p);
251 }
252
253 void VisitPointers(Object** start, Object** end) {
254 // Mark all objects pointed to in [start, end).
255 const int kMinRangeForMarkingRecursion = 64;
256 if (end - start >= kMinRangeForMarkingRecursion) {
257 if (VisitUnmarkedObjects(start, end)) return;
258 // We are close to a stack overflow, so just mark the objects.
259 }
260 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
261 }
262
263 void VisitCodeTarget(RelocInfo* rinfo) {
264 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
265 Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
266 if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
267 IC::Clear(rinfo->pc());
268 // Please note targets for cleared inline cached do not have to be
269 // marked since they are contained in Heap::non_monomorphic_cache().
270 } else {
271 MarkCompactCollector::MarkObject(code);
272 }
273 }
274
275 void VisitDebugTarget(RelocInfo* rinfo) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100276 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
277 rinfo->IsPatchedReturnSequence()) ||
278 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
279 rinfo->IsPatchedDebugBreakSlotSequence()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000280 HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
281 MarkCompactCollector::MarkObject(code);
Steve Blocka7e24c12009-10-30 11:49:00 +0000282 }
283
284 private:
285 // Mark object pointed to by p.
286 void MarkObjectByPointer(Object** p) {
287 if (!(*p)->IsHeapObject()) return;
288 HeapObject* object = ShortCircuitConsString(p);
289 MarkCompactCollector::MarkObject(object);
290 }
291
292 // Tells whether the mark sweep collection will perform compaction.
293 bool IsCompacting() { return MarkCompactCollector::IsCompacting(); }
294
295 // Visit an unmarked object.
296 void VisitUnmarkedObject(HeapObject* obj) {
297#ifdef DEBUG
298 ASSERT(Heap::Contains(obj));
299 ASSERT(!obj->IsMarked());
300#endif
301 Map* map = obj->map();
302 MarkCompactCollector::SetMark(obj);
303 // Mark the map pointer and the body.
304 MarkCompactCollector::MarkObject(map);
305 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), this);
306 }
307
308 // Visit all unmarked objects pointed to by [start, end).
309 // Returns false if the operation fails (lack of stack space).
310 inline bool VisitUnmarkedObjects(Object** start, Object** end) {
311 // Return false is we are close to the stack limit.
312 StackLimitCheck check;
313 if (check.HasOverflowed()) return false;
314
315 // Visit the unmarked objects.
316 for (Object** p = start; p < end; p++) {
317 if (!(*p)->IsHeapObject()) continue;
318 HeapObject* obj = HeapObject::cast(*p);
319 if (obj->IsMarked()) continue;
320 VisitUnmarkedObject(obj);
321 }
322 return true;
323 }
324};
325
326
327// Visitor class for marking heap roots.
328class RootMarkingVisitor : public ObjectVisitor {
329 public:
330 void VisitPointer(Object** p) {
331 MarkObjectByPointer(p);
332 }
333
334 void VisitPointers(Object** start, Object** end) {
335 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
336 }
337
338 MarkingVisitor* stack_visitor() { return &stack_visitor_; }
339
340 private:
341 MarkingVisitor stack_visitor_;
342
343 void MarkObjectByPointer(Object** p) {
344 if (!(*p)->IsHeapObject()) return;
345
346 // Replace flat cons strings in place.
347 HeapObject* object = ShortCircuitConsString(p);
348 if (object->IsMarked()) return;
349
350 Map* map = object->map();
351 // Mark the object.
352 MarkCompactCollector::SetMark(object);
353 // Mark the map pointer and body, and push them on the marking stack.
354 MarkCompactCollector::MarkObject(map);
355 object->IterateBody(map->instance_type(), object->SizeFromMap(map),
356 &stack_visitor_);
357
358 // Mark all the objects reachable from the map and body. May leave
359 // overflowed objects in the heap.
360 MarkCompactCollector::EmptyMarkingStack(&stack_visitor_);
361 }
362};
363
364
365// Helper class for pruning the symbol table.
366class SymbolTableCleaner : public ObjectVisitor {
367 public:
368 SymbolTableCleaner() : pointers_removed_(0) { }
Leon Clarkee46be812010-01-19 14:06:41 +0000369
370 virtual void VisitPointers(Object** start, Object** end) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000371 // Visit all HeapObject pointers in [start, end).
372 for (Object** p = start; p < end; p++) {
373 if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
374 // Check if the symbol being pruned is an external symbol. We need to
375 // delete the associated external data as this symbol is going away.
376
Steve Blocka7e24c12009-10-30 11:49:00 +0000377 // Since no objects have yet been moved we can safely access the map of
378 // the object.
Leon Clarkee46be812010-01-19 14:06:41 +0000379 if ((*p)->IsExternalString()) {
380 Heap::FinalizeExternalString(String::cast(*p));
Steve Blocka7e24c12009-10-30 11:49:00 +0000381 }
382 // Set the entry to null_value (as deleted).
383 *p = Heap::raw_unchecked_null_value();
384 pointers_removed_++;
385 }
386 }
387 }
388
389 int PointersRemoved() {
390 return pointers_removed_;
391 }
392 private:
393 int pointers_removed_;
394};
395
396
397void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
398 ASSERT(!object->IsMarked());
399 ASSERT(Heap::Contains(object));
400 if (object->IsMap()) {
401 Map* map = Map::cast(object);
402 if (FLAG_cleanup_caches_in_maps_at_gc) {
403 map->ClearCodeCache();
404 }
405 SetMark(map);
406 if (FLAG_collect_maps &&
407 map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
408 map->instance_type() <= JS_FUNCTION_TYPE) {
409 MarkMapContents(map);
410 } else {
411 marking_stack.Push(map);
412 }
413 } else {
414 SetMark(object);
415 marking_stack.Push(object);
416 }
417}
418
419
420void MarkCompactCollector::MarkMapContents(Map* map) {
421 MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(
422 *HeapObject::RawField(map, Map::kInstanceDescriptorsOffset)));
423
424 // Mark the Object* fields of the Map.
425 // Since the descriptor array has been marked already, it is fine
426 // that one of these fields contains a pointer to it.
427 MarkingVisitor visitor; // Has no state or contents.
428 visitor.VisitPointers(HeapObject::RawField(map, Map::kPrototypeOffset),
429 HeapObject::RawField(map, Map::kSize));
430}
431
432
433void MarkCompactCollector::MarkDescriptorArray(
434 DescriptorArray* descriptors) {
435 if (descriptors->IsMarked()) return;
436 // Empty descriptor array is marked as a root before any maps are marked.
437 ASSERT(descriptors != Heap::raw_unchecked_empty_descriptor_array());
438 SetMark(descriptors);
439
440 FixedArray* contents = reinterpret_cast<FixedArray*>(
441 descriptors->get(DescriptorArray::kContentArrayIndex));
442 ASSERT(contents->IsHeapObject());
443 ASSERT(!contents->IsMarked());
444 ASSERT(contents->IsFixedArray());
445 ASSERT(contents->length() >= 2);
446 SetMark(contents);
447 // Contents contains (value, details) pairs. If the details say
448 // that the type of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
449 // or NULL_DESCRIPTOR, we don't mark the value as live. Only for
450 // type MAP_TRANSITION is the value a Object* (a Map*).
451 for (int i = 0; i < contents->length(); i += 2) {
452 // If the pair (value, details) at index i, i+1 is not
453 // a transition or null descriptor, mark the value.
454 PropertyDetails details(Smi::cast(contents->get(i + 1)));
455 if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
456 HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
457 if (object->IsHeapObject() && !object->IsMarked()) {
458 SetMark(object);
459 marking_stack.Push(object);
460 }
461 }
462 }
463 // The DescriptorArray descriptors contains a pointer to its contents array,
464 // but the contents array is already marked.
465 marking_stack.Push(descriptors);
466}
467
468
469void MarkCompactCollector::CreateBackPointers() {
470 HeapObjectIterator iterator(Heap::map_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000471 for (HeapObject* next_object = iterator.next();
472 next_object != NULL; next_object = iterator.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000473 if (next_object->IsMap()) { // Could also be ByteArray on free list.
474 Map* map = Map::cast(next_object);
475 if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
476 map->instance_type() <= JS_FUNCTION_TYPE) {
477 map->CreateBackPointers();
478 } else {
479 ASSERT(map->instance_descriptors() == Heap::empty_descriptor_array());
480 }
481 }
482 }
483}
484
485
486static int OverflowObjectSize(HeapObject* obj) {
487 // Recover the normal map pointer, it might be marked as live and
488 // overflowed.
489 MapWord map_word = obj->map_word();
490 map_word.ClearMark();
491 map_word.ClearOverflow();
492 return obj->SizeFromMap(map_word.ToMap());
493}
494
495
496// Fill the marking stack with overflowed objects returned by the given
497// iterator. Stop when the marking stack is filled or the end of the space
498// is reached, whichever comes first.
499template<class T>
500static void ScanOverflowedObjects(T* it) {
501 // The caller should ensure that the marking stack is initially not full,
502 // so that we don't waste effort pointlessly scanning for objects.
503 ASSERT(!marking_stack.is_full());
504
Leon Clarked91b9f72010-01-27 17:25:45 +0000505 for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000506 if (object->IsOverflowed()) {
507 object->ClearOverflow();
508 ASSERT(object->IsMarked());
509 ASSERT(Heap::Contains(object));
510 marking_stack.Push(object);
511 if (marking_stack.is_full()) return;
512 }
513 }
514}
515
516
517bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
518 return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked();
519}
520
521
Steve Blocka7e24c12009-10-30 11:49:00 +0000522void MarkCompactCollector::MarkSymbolTable() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000523 SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
524 // Mark the symbol table itself.
525 SetMark(symbol_table);
526 // Explicitly mark the prefix.
527 MarkingVisitor marker;
528 symbol_table->IteratePrefix(&marker);
529 ProcessMarkingStack(&marker);
Steve Blocka7e24c12009-10-30 11:49:00 +0000530}
531
532
533void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
534 // Mark the heap roots including global variables, stack variables,
535 // etc., and all objects reachable from them.
Steve Blockd0582a62009-12-15 09:54:21 +0000536 Heap::IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +0000537
538 // Handle the symbol table specially.
539 MarkSymbolTable();
540
541 // There may be overflowed objects in the heap. Visit them now.
542 while (marking_stack.overflowed()) {
543 RefillMarkingStack();
544 EmptyMarkingStack(visitor->stack_visitor());
545 }
546}
547
548
549void MarkCompactCollector::MarkObjectGroups() {
550 List<ObjectGroup*>* object_groups = GlobalHandles::ObjectGroups();
551
552 for (int i = 0; i < object_groups->length(); i++) {
553 ObjectGroup* entry = object_groups->at(i);
554 if (entry == NULL) continue;
555
556 List<Object**>& objects = entry->objects_;
557 bool group_marked = false;
558 for (int j = 0; j < objects.length(); j++) {
559 Object* object = *objects[j];
560 if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
561 group_marked = true;
562 break;
563 }
564 }
565
566 if (!group_marked) continue;
567
568 // An object in the group is marked, so mark as gray all white heap
569 // objects in the group.
570 for (int j = 0; j < objects.length(); ++j) {
571 if ((*objects[j])->IsHeapObject()) {
572 MarkObject(HeapObject::cast(*objects[j]));
573 }
574 }
575 // Once the entire group has been colored gray, set the object group
576 // to NULL so it won't be processed again.
577 delete object_groups->at(i);
578 object_groups->at(i) = NULL;
579 }
580}
581
582
583// Mark all objects reachable from the objects on the marking stack.
584// Before: the marking stack contains zero or more heap object pointers.
585// After: the marking stack is empty, and all objects reachable from the
586// marking stack have been marked, or are overflowed in the heap.
587void MarkCompactCollector::EmptyMarkingStack(MarkingVisitor* visitor) {
588 while (!marking_stack.is_empty()) {
589 HeapObject* object = marking_stack.Pop();
590 ASSERT(object->IsHeapObject());
591 ASSERT(Heap::Contains(object));
592 ASSERT(object->IsMarked());
593 ASSERT(!object->IsOverflowed());
594
595 // Because the object is marked, we have to recover the original map
596 // pointer and use it to mark the object's body.
597 MapWord map_word = object->map_word();
598 map_word.ClearMark();
599 Map* map = map_word.ToMap();
600 MarkObject(map);
601 object->IterateBody(map->instance_type(), object->SizeFromMap(map),
602 visitor);
603 }
604}
605
606
607// Sweep the heap for overflowed objects, clear their overflow bits, and
608// push them on the marking stack. Stop early if the marking stack fills
609// before sweeping completes. If sweeping completes, there are no remaining
610// overflowed objects in the heap so the overflow flag on the markings stack
611// is cleared.
612void MarkCompactCollector::RefillMarkingStack() {
613 ASSERT(marking_stack.overflowed());
614
615 SemiSpaceIterator new_it(Heap::new_space(), &OverflowObjectSize);
616 ScanOverflowedObjects(&new_it);
617 if (marking_stack.is_full()) return;
618
619 HeapObjectIterator old_pointer_it(Heap::old_pointer_space(),
620 &OverflowObjectSize);
621 ScanOverflowedObjects(&old_pointer_it);
622 if (marking_stack.is_full()) return;
623
624 HeapObjectIterator old_data_it(Heap::old_data_space(), &OverflowObjectSize);
625 ScanOverflowedObjects(&old_data_it);
626 if (marking_stack.is_full()) return;
627
628 HeapObjectIterator code_it(Heap::code_space(), &OverflowObjectSize);
629 ScanOverflowedObjects(&code_it);
630 if (marking_stack.is_full()) return;
631
632 HeapObjectIterator map_it(Heap::map_space(), &OverflowObjectSize);
633 ScanOverflowedObjects(&map_it);
634 if (marking_stack.is_full()) return;
635
636 HeapObjectIterator cell_it(Heap::cell_space(), &OverflowObjectSize);
637 ScanOverflowedObjects(&cell_it);
638 if (marking_stack.is_full()) return;
639
640 LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize);
641 ScanOverflowedObjects(&lo_it);
642 if (marking_stack.is_full()) return;
643
644 marking_stack.clear_overflowed();
645}
646
647
648// Mark all objects reachable (transitively) from objects on the marking
649// stack. Before: the marking stack contains zero or more heap object
650// pointers. After: the marking stack is empty and there are no overflowed
651// objects in the heap.
652void MarkCompactCollector::ProcessMarkingStack(MarkingVisitor* visitor) {
653 EmptyMarkingStack(visitor);
654 while (marking_stack.overflowed()) {
655 RefillMarkingStack();
656 EmptyMarkingStack(visitor);
657 }
658}
659
660
661void MarkCompactCollector::ProcessObjectGroups(MarkingVisitor* visitor) {
662 bool work_to_do = true;
663 ASSERT(marking_stack.is_empty());
664 while (work_to_do) {
665 MarkObjectGroups();
666 work_to_do = !marking_stack.is_empty();
667 ProcessMarkingStack(visitor);
668 }
669}
670
671
672void MarkCompactCollector::MarkLiveObjects() {
Leon Clarkef7060e22010-06-03 12:02:55 +0100673 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
Steve Blocka7e24c12009-10-30 11:49:00 +0000674#ifdef DEBUG
675 ASSERT(state_ == PREPARE_GC);
676 state_ = MARK_LIVE_OBJECTS;
677#endif
678 // The to space contains live objects, the from space is used as a marking
679 // stack.
680 marking_stack.Initialize(Heap::new_space()->FromSpaceLow(),
681 Heap::new_space()->FromSpaceHigh());
682
683 ASSERT(!marking_stack.overflowed());
684
685 RootMarkingVisitor root_visitor;
686 MarkRoots(&root_visitor);
687
688 // The objects reachable from the roots are marked, yet unreachable
689 // objects are unmarked. Mark objects reachable from object groups
690 // containing at least one marked object, and continue until no new
691 // objects are reachable from the object groups.
692 ProcessObjectGroups(root_visitor.stack_visitor());
693
694 // The objects reachable from the roots or object groups are marked,
695 // yet unreachable objects are unmarked. Mark objects reachable
696 // only from weak global handles.
697 //
698 // First we identify nonlive weak handles and mark them as pending
699 // destruction.
700 GlobalHandles::IdentifyWeakHandles(&IsUnmarkedHeapObject);
701 // Then we mark the objects and process the transitive closure.
702 GlobalHandles::IterateWeakRoots(&root_visitor);
703 while (marking_stack.overflowed()) {
704 RefillMarkingStack();
705 EmptyMarkingStack(root_visitor.stack_visitor());
706 }
707
708 // Repeat the object groups to mark unmarked groups reachable from the
709 // weak roots.
710 ProcessObjectGroups(root_visitor.stack_visitor());
711
712 // Prune the symbol table removing all symbols only pointed to by the
713 // symbol table. Cannot use symbol_table() here because the symbol
714 // table is marked.
715 SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
716 SymbolTableCleaner v;
717 symbol_table->IterateElements(&v);
718 symbol_table->ElementsRemoved(v.PointersRemoved());
Leon Clarkee46be812010-01-19 14:06:41 +0000719 ExternalStringTable::Iterate(&v);
720 ExternalStringTable::CleanUp();
Steve Blocka7e24c12009-10-30 11:49:00 +0000721
722 // Remove object groups after marking phase.
723 GlobalHandles::RemoveObjectGroups();
724}
725
726
727static int CountMarkedCallback(HeapObject* obj) {
728 MapWord map_word = obj->map_word();
729 map_word.ClearMark();
730 return obj->SizeFromMap(map_word.ToMap());
731}
732
733
734#ifdef DEBUG
735void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
736 live_bytes_ += obj->Size();
737 if (Heap::new_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +0100738 live_young_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +0000739 } else if (Heap::map_space()->Contains(obj)) {
740 ASSERT(obj->IsMap());
Steve Block6ded16b2010-05-10 14:33:55 +0100741 live_map_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +0000742 } else if (Heap::cell_space()->Contains(obj)) {
743 ASSERT(obj->IsJSGlobalPropertyCell());
Steve Block6ded16b2010-05-10 14:33:55 +0100744 live_cell_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +0000745 } else if (Heap::old_pointer_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +0100746 live_old_pointer_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +0000747 } else if (Heap::old_data_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +0100748 live_old_data_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +0000749 } else if (Heap::code_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +0100750 live_code_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +0000751 } else if (Heap::lo_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +0100752 live_lo_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +0000753 } else {
754 UNREACHABLE();
755 }
756}
757#endif // DEBUG
758
759
760void MarkCompactCollector::SweepLargeObjectSpace() {
761#ifdef DEBUG
762 ASSERT(state_ == MARK_LIVE_OBJECTS);
763 state_ =
764 compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
765#endif
766 // Deallocate unmarked objects and clear marked bits for marked objects.
767 Heap::lo_space()->FreeUnmarkedObjects();
768}
769
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100770
Steve Blocka7e24c12009-10-30 11:49:00 +0000771// Safe to use during marking phase only.
772bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
773 MapWord metamap = object->map_word();
774 metamap.ClearMark();
775 return metamap.ToMap()->instance_type() == MAP_TYPE;
776}
777
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100778
Steve Blocka7e24c12009-10-30 11:49:00 +0000779void MarkCompactCollector::ClearNonLiveTransitions() {
780 HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback);
781 // Iterate over the map space, setting map transitions that go from
782 // a marked map to an unmarked map to null transitions. At the same time,
783 // set all the prototype fields of maps back to their original value,
784 // dropping the back pointers temporarily stored in the prototype field.
785 // Setting the prototype field requires following the linked list of
786 // back pointers, reversing them all at once. This allows us to find
787 // those maps with map transitions that need to be nulled, and only
788 // scan the descriptor arrays of those maps, not all maps.
Leon Clarkee46be812010-01-19 14:06:41 +0000789 // All of these actions are carried out only on maps of JSObjects
Steve Blocka7e24c12009-10-30 11:49:00 +0000790 // and related subtypes.
Leon Clarked91b9f72010-01-27 17:25:45 +0000791 for (HeapObject* obj = map_iterator.next();
792 obj != NULL; obj = map_iterator.next()) {
793 Map* map = reinterpret_cast<Map*>(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +0000794 if (!map->IsMarked() && map->IsByteArray()) continue;
795
796 ASSERT(SafeIsMap(map));
797 // Only JSObject and subtypes have map transitions and back pointers.
798 if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
799 if (map->instance_type() > JS_FUNCTION_TYPE) continue;
800 // Follow the chain of back pointers to find the prototype.
801 Map* current = map;
802 while (SafeIsMap(current)) {
803 current = reinterpret_cast<Map*>(current->prototype());
804 ASSERT(current->IsHeapObject());
805 }
806 Object* real_prototype = current;
807
808 // Follow back pointers, setting them to prototype,
809 // clearing map transitions when necessary.
810 current = map;
811 bool on_dead_path = !current->IsMarked();
812 Object* next;
813 while (SafeIsMap(current)) {
814 next = current->prototype();
815 // There should never be a dead map above a live map.
816 ASSERT(on_dead_path || current->IsMarked());
817
818 // A live map above a dead map indicates a dead transition.
819 // This test will always be false on the first iteration.
820 if (on_dead_path && current->IsMarked()) {
821 on_dead_path = false;
822 current->ClearNonLiveTransitions(real_prototype);
823 }
824 *HeapObject::RawField(current, Map::kPrototypeOffset) =
825 real_prototype;
826 current = reinterpret_cast<Map*>(next);
827 }
828 }
829}
830
831// -------------------------------------------------------------------------
832// Phase 2: Encode forwarding addresses.
833// When compacting, forwarding addresses for objects in old space and map
834// space are encoded in their map pointer word (along with an encoding of
835// their map pointers).
836//
Leon Clarkee46be812010-01-19 14:06:41 +0000837// The excact encoding is described in the comments for class MapWord in
838// objects.h.
Steve Blocka7e24c12009-10-30 11:49:00 +0000839//
840// An address range [start, end) can have both live and non-live objects.
841// Maximal non-live regions are marked so they can be skipped on subsequent
842// sweeps of the heap. A distinguished map-pointer encoding is used to mark
843// free regions of one-word size (in which case the next word is the start
844// of a live object). A second distinguished map-pointer encoding is used
845// to mark free regions larger than one word, and the size of the free
846// region (including the first word) is written to the second word of the
847// region.
848//
849// Any valid map page offset must lie in the object area of the page, so map
850// page offsets less than Page::kObjectStartOffset are invalid. We use a
851// pair of distinguished invalid map encodings (for single word and multiple
852// words) to indicate free regions in the page found during computation of
853// forwarding addresses and skipped over in subsequent sweeps.
854static const uint32_t kSingleFreeEncoding = 0;
855static const uint32_t kMultiFreeEncoding = 1;
856
857
858// Encode a free region, defined by the given start address and size, in the
859// first word or two of the region.
860void EncodeFreeRegion(Address free_start, int free_size) {
861 ASSERT(free_size >= kIntSize);
862 if (free_size == kIntSize) {
863 Memory::uint32_at(free_start) = kSingleFreeEncoding;
864 } else {
865 ASSERT(free_size >= 2 * kIntSize);
866 Memory::uint32_at(free_start) = kMultiFreeEncoding;
867 Memory::int_at(free_start + kIntSize) = free_size;
868 }
869
870#ifdef DEBUG
871 // Zap the body of the free region.
872 if (FLAG_enable_slow_asserts) {
873 for (int offset = 2 * kIntSize;
874 offset < free_size;
875 offset += kPointerSize) {
876 Memory::Address_at(free_start + offset) = kZapValue;
877 }
878 }
879#endif
880}
881
882
883// Try to promote all objects in new space. Heap numbers and sequential
884// strings are promoted to the code space, large objects to large object space,
885// and all others to the old space.
886inline Object* MCAllocateFromNewSpace(HeapObject* object, int object_size) {
887 Object* forwarded;
888 if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
889 forwarded = Failure::Exception();
890 } else {
891 OldSpace* target_space = Heap::TargetSpace(object);
892 ASSERT(target_space == Heap::old_pointer_space() ||
893 target_space == Heap::old_data_space());
894 forwarded = target_space->MCAllocateRaw(object_size);
895 }
896 if (forwarded->IsFailure()) {
897 forwarded = Heap::new_space()->MCAllocateRaw(object_size);
898 }
899 return forwarded;
900}
901
902
903// Allocation functions for the paged spaces call the space's MCAllocateRaw.
904inline Object* MCAllocateFromOldPointerSpace(HeapObject* ignore,
905 int object_size) {
906 return Heap::old_pointer_space()->MCAllocateRaw(object_size);
907}
908
909
910inline Object* MCAllocateFromOldDataSpace(HeapObject* ignore, int object_size) {
911 return Heap::old_data_space()->MCAllocateRaw(object_size);
912}
913
914
915inline Object* MCAllocateFromCodeSpace(HeapObject* ignore, int object_size) {
916 return Heap::code_space()->MCAllocateRaw(object_size);
917}
918
919
920inline Object* MCAllocateFromMapSpace(HeapObject* ignore, int object_size) {
921 return Heap::map_space()->MCAllocateRaw(object_size);
922}
923
924
925inline Object* MCAllocateFromCellSpace(HeapObject* ignore, int object_size) {
926 return Heap::cell_space()->MCAllocateRaw(object_size);
927}
928
929
930// The forwarding address is encoded at the same offset as the current
931// to-space object, but in from space.
932inline void EncodeForwardingAddressInNewSpace(HeapObject* old_object,
933 int object_size,
934 Object* new_object,
935 int* ignored) {
936 int offset =
937 Heap::new_space()->ToSpaceOffsetForAddress(old_object->address());
938 Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset) =
939 HeapObject::cast(new_object)->address();
940}
941
942
943// The forwarding address is encoded in the map pointer of the object as an
944// offset (in terms of live bytes) from the address of the first live object
945// in the page.
946inline void EncodeForwardingAddressInPagedSpace(HeapObject* old_object,
947 int object_size,
948 Object* new_object,
949 int* offset) {
950 // Record the forwarding address of the first live object if necessary.
951 if (*offset == 0) {
952 Page::FromAddress(old_object->address())->mc_first_forwarded =
953 HeapObject::cast(new_object)->address();
954 }
955
956 MapWord encoding =
957 MapWord::EncodeAddress(old_object->map()->address(), *offset);
958 old_object->set_map_word(encoding);
959 *offset += object_size;
960 ASSERT(*offset <= Page::kObjectAreaSize);
961}
962
963
964// Most non-live objects are ignored.
965inline void IgnoreNonLiveObject(HeapObject* object) {}
966
967
Steve Blocka7e24c12009-10-30 11:49:00 +0000968// Function template that, given a range of addresses (eg, a semispace or a
969// paged space page), iterates through the objects in the range to clear
970// mark bits and compute and encode forwarding addresses. As a side effect,
971// maximal free chunks are marked so that they can be skipped on subsequent
972// sweeps.
973//
974// The template parameters are an allocation function, a forwarding address
975// encoding function, and a function to process non-live objects.
976template<MarkCompactCollector::AllocationFunction Alloc,
977 MarkCompactCollector::EncodingFunction Encode,
978 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
979inline void EncodeForwardingAddressesInRange(Address start,
980 Address end,
981 int* offset) {
982 // The start address of the current free region while sweeping the space.
983 // This address is set when a transition from live to non-live objects is
984 // encountered. A value (an encoding of the 'next free region' pointer)
985 // is written to memory at this address when a transition from non-live to
986 // live objects is encountered.
987 Address free_start = NULL;
988
989 // A flag giving the state of the previously swept object. Initially true
990 // to ensure that free_start is initialized to a proper address before
991 // trying to write to it.
992 bool is_prev_alive = true;
993
994 int object_size; // Will be set on each iteration of the loop.
995 for (Address current = start; current < end; current += object_size) {
996 HeapObject* object = HeapObject::FromAddress(current);
997 if (object->IsMarked()) {
998 object->ClearMark();
999 MarkCompactCollector::tracer()->decrement_marked_count();
1000 object_size = object->Size();
1001
1002 Object* forwarded = Alloc(object, object_size);
1003 // Allocation cannot fail, because we are compacting the space.
1004 ASSERT(!forwarded->IsFailure());
1005 Encode(object, object_size, forwarded, offset);
1006
1007#ifdef DEBUG
1008 if (FLAG_gc_verbose) {
1009 PrintF("forward %p -> %p.\n", object->address(),
1010 HeapObject::cast(forwarded)->address());
1011 }
1012#endif
1013 if (!is_prev_alive) { // Transition from non-live to live.
Steve Blockd0582a62009-12-15 09:54:21 +00001014 EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
Steve Blocka7e24c12009-10-30 11:49:00 +00001015 is_prev_alive = true;
1016 }
1017 } else { // Non-live object.
1018 object_size = object->Size();
1019 ProcessNonLive(object);
1020 if (is_prev_alive) { // Transition from live to non-live.
1021 free_start = current;
1022 is_prev_alive = false;
1023 }
1024 }
1025 }
1026
1027 // If we ended on a free region, mark it.
Steve Blockd0582a62009-12-15 09:54:21 +00001028 if (!is_prev_alive) {
1029 EncodeFreeRegion(free_start, static_cast<int>(end - free_start));
1030 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001031}
1032
1033
1034// Functions to encode the forwarding pointers in each compactable space.
1035void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
1036 int ignored;
1037 EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
1038 EncodeForwardingAddressInNewSpace,
1039 IgnoreNonLiveObject>(
1040 Heap::new_space()->bottom(),
1041 Heap::new_space()->top(),
1042 &ignored);
1043}
1044
1045
1046template<MarkCompactCollector::AllocationFunction Alloc,
1047 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
1048void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
1049 PagedSpace* space) {
1050 PageIterator it(space, PageIterator::PAGES_IN_USE);
1051 while (it.has_next()) {
1052 Page* p = it.next();
Steve Block6ded16b2010-05-10 14:33:55 +01001053
Steve Blocka7e24c12009-10-30 11:49:00 +00001054 // The offset of each live object in the page from the first live object
1055 // in the page.
1056 int offset = 0;
1057 EncodeForwardingAddressesInRange<Alloc,
1058 EncodeForwardingAddressInPagedSpace,
1059 ProcessNonLive>(
1060 p->ObjectAreaStart(),
1061 p->AllocationTop(),
1062 &offset);
1063 }
1064}
1065
1066
Steve Block6ded16b2010-05-10 14:33:55 +01001067// We scavange new space simultaneously with sweeping. This is done in two
1068// passes.
1069// The first pass migrates all alive objects from one semispace to another or
1070// promotes them to old space. Forwading address is written directly into
1071// first word of object without any encoding. If object is dead we are writing
1072// NULL as a forwarding address.
1073// The second pass updates pointers to new space in all spaces. It is possible
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001074// to encounter pointers to dead objects during traversal of dirty regions we
1075// should clear them to avoid encountering them during next dirty regions
1076// iteration.
1077static void MigrateObject(Address dst,
1078 Address src,
1079 int size,
1080 bool to_old_space) {
1081 if (to_old_space) {
1082 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
1083 } else {
1084 Heap::CopyBlock(dst, src, size);
1085 }
Steve Block6ded16b2010-05-10 14:33:55 +01001086
1087 Memory::Address_at(src) = dst;
1088}
1089
1090
1091// Visitor for updating pointers from live objects in old spaces to new space.
1092// It does not expect to encounter pointers to dead objects.
1093class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
1094 public:
1095 void VisitPointer(Object** p) {
1096 UpdatePointer(p);
1097 }
1098
1099 void VisitPointers(Object** start, Object** end) {
1100 for (Object** p = start; p < end; p++) UpdatePointer(p);
1101 }
1102
1103 void VisitCodeTarget(RelocInfo* rinfo) {
1104 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
1105 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1106 VisitPointer(&target);
1107 rinfo->set_target_address(Code::cast(target)->instruction_start());
1108 }
1109
1110 void VisitDebugTarget(RelocInfo* rinfo) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001111 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
1112 rinfo->IsPatchedReturnSequence()) ||
1113 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
1114 rinfo->IsPatchedDebugBreakSlotSequence()));
Steve Block6ded16b2010-05-10 14:33:55 +01001115 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
1116 VisitPointer(&target);
1117 rinfo->set_call_address(Code::cast(target)->instruction_start());
1118 }
1119
1120 private:
1121 void UpdatePointer(Object** p) {
1122 if (!(*p)->IsHeapObject()) return;
1123
1124 HeapObject* obj = HeapObject::cast(*p);
1125 Address old_addr = obj->address();
1126
1127 if (Heap::new_space()->Contains(obj)) {
1128 ASSERT(Heap::InFromSpace(*p));
1129 *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
1130 }
1131 }
1132};
1133
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001134
Steve Block6ded16b2010-05-10 14:33:55 +01001135// Visitor for updating pointers from live objects in old spaces to new space.
1136// It can encounter pointers to dead objects in new space when traversing map
1137// space (see comment for MigrateObject).
1138static void UpdatePointerToNewGen(HeapObject** p) {
1139 if (!(*p)->IsHeapObject()) return;
1140
1141 Address old_addr = (*p)->address();
1142 ASSERT(Heap::InFromSpace(*p));
1143
1144 Address new_addr = Memory::Address_at(old_addr);
1145
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001146 if (new_addr == NULL) {
1147 // We encountered pointer to a dead object. Clear it so we will
1148 // not visit it again during next iteration of dirty regions.
1149 *p = NULL;
1150 } else {
1151 *p = HeapObject::FromAddress(new_addr);
1152 }
Steve Block6ded16b2010-05-10 14:33:55 +01001153}
1154
1155
1156static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Object **p) {
1157 Address old_addr = HeapObject::cast(*p)->address();
1158 Address new_addr = Memory::Address_at(old_addr);
1159 return String::cast(HeapObject::FromAddress(new_addr));
1160}
1161
1162
1163static bool TryPromoteObject(HeapObject* object, int object_size) {
1164 Object* result;
1165
1166 if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
1167 result = Heap::lo_space()->AllocateRawFixedArray(object_size);
1168 if (!result->IsFailure()) {
1169 HeapObject* target = HeapObject::cast(result);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001170 MigrateObject(target->address(), object->address(), object_size, true);
Leon Clarkef7060e22010-06-03 12:02:55 +01001171 MarkCompactCollector::tracer()->
1172 increment_promoted_objects_size(object_size);
Steve Block6ded16b2010-05-10 14:33:55 +01001173 return true;
1174 }
1175 } else {
1176 OldSpace* target_space = Heap::TargetSpace(object);
1177
1178 ASSERT(target_space == Heap::old_pointer_space() ||
1179 target_space == Heap::old_data_space());
1180 result = target_space->AllocateRaw(object_size);
1181 if (!result->IsFailure()) {
1182 HeapObject* target = HeapObject::cast(result);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001183 MigrateObject(target->address(),
1184 object->address(),
1185 object_size,
1186 target_space == Heap::old_pointer_space());
Leon Clarkef7060e22010-06-03 12:02:55 +01001187 MarkCompactCollector::tracer()->
1188 increment_promoted_objects_size(object_size);
Steve Block6ded16b2010-05-10 14:33:55 +01001189 return true;
1190 }
1191 }
1192
1193 return false;
1194}
1195
1196
1197static void SweepNewSpace(NewSpace* space) {
1198 Heap::CheckNewSpaceExpansionCriteria();
1199
1200 Address from_bottom = space->bottom();
1201 Address from_top = space->top();
1202
1203 // Flip the semispaces. After flipping, to space is empty, from space has
1204 // live objects.
1205 space->Flip();
1206 space->ResetAllocationInfo();
1207
1208 int size = 0;
1209 int survivors_size = 0;
1210
1211 // First pass: traverse all objects in inactive semispace, remove marks,
1212 // migrate live objects and write forwarding addresses.
1213 for (Address current = from_bottom; current < from_top; current += size) {
1214 HeapObject* object = HeapObject::FromAddress(current);
1215
1216 if (object->IsMarked()) {
1217 object->ClearMark();
1218 MarkCompactCollector::tracer()->decrement_marked_count();
1219
1220 size = object->Size();
1221 survivors_size += size;
1222
1223 // Aggressively promote young survivors to the old space.
1224 if (TryPromoteObject(object, size)) {
1225 continue;
1226 }
1227
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001228 // Promotion failed. Just migrate object to another semispace.
Steve Block6ded16b2010-05-10 14:33:55 +01001229 Object* target = space->AllocateRaw(size);
1230
1231 // Allocation cannot fail at this point: semispaces are of equal size.
1232 ASSERT(!target->IsFailure());
1233
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001234 MigrateObject(HeapObject::cast(target)->address(),
1235 current,
1236 size,
1237 false);
Steve Block6ded16b2010-05-10 14:33:55 +01001238 } else {
1239 size = object->Size();
1240 Memory::Address_at(current) = NULL;
1241 }
1242 }
1243
1244 // Second pass: find pointers to new space and update them.
1245 PointersToNewGenUpdatingVisitor updating_visitor;
1246
1247 // Update pointers in to space.
Steve Blocka7e24c12009-10-30 11:49:00 +00001248 HeapObject* object;
1249 for (Address current = space->bottom();
1250 current < space->top();
1251 current += object->Size()) {
1252 object = HeapObject::FromAddress(current);
Steve Block6ded16b2010-05-10 14:33:55 +01001253
1254 object->IterateBody(object->map()->instance_type(),
1255 object->Size(),
1256 &updating_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00001257 }
Steve Block6ded16b2010-05-10 14:33:55 +01001258
1259 // Update roots.
1260 Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
1261
1262 // Update pointers in old spaces.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001263 Heap::IterateDirtyRegions(Heap::old_pointer_space(),
1264 &Heap::IteratePointersInDirtyRegion,
1265 &UpdatePointerToNewGen,
1266 Heap::WATERMARK_SHOULD_BE_VALID);
1267
1268 Heap::lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
Steve Block6ded16b2010-05-10 14:33:55 +01001269
1270 // Update pointers from cells.
1271 HeapObjectIterator cell_iterator(Heap::cell_space());
1272 for (HeapObject* cell = cell_iterator.next();
1273 cell != NULL;
1274 cell = cell_iterator.next()) {
1275 if (cell->IsJSGlobalPropertyCell()) {
1276 Address value_address =
1277 reinterpret_cast<Address>(cell) +
1278 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1279 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1280 }
1281 }
1282
1283 // Update pointers from external string table.
1284 Heap::UpdateNewSpaceReferencesInExternalStringTable(
1285 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1286
1287 // All pointers were updated. Update auxiliary allocation info.
1288 Heap::IncrementYoungSurvivorsCounter(survivors_size);
1289 space->set_age_mark(space->top());
Steve Blocka7e24c12009-10-30 11:49:00 +00001290}
1291
1292
1293static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
1294 PageIterator it(space, PageIterator::PAGES_IN_USE);
Steve Block6ded16b2010-05-10 14:33:55 +01001295
1296 // During sweeping of paged space we are trying to find longest sequences
1297 // of pages without live objects and free them (instead of putting them on
1298 // the free list).
1299
1300 // Page preceding current.
1301 Page* prev = Page::FromAddress(NULL);
1302
1303 // First empty page in a sequence.
1304 Page* first_empty_page = Page::FromAddress(NULL);
1305
1306 // Page preceding first empty page.
1307 Page* prec_first_empty_page = Page::FromAddress(NULL);
1308
1309 // If last used page of space ends with a sequence of dead objects
1310 // we can adjust allocation top instead of puting this free area into
1311 // the free list. Thus during sweeping we keep track of such areas
1312 // and defer their deallocation until the sweeping of the next page
1313 // is done: if one of the next pages contains live objects we have
1314 // to put such area into the free list.
1315 Address last_free_start = NULL;
1316 int last_free_size = 0;
1317
Steve Blocka7e24c12009-10-30 11:49:00 +00001318 while (it.has_next()) {
1319 Page* p = it.next();
1320
1321 bool is_previous_alive = true;
1322 Address free_start = NULL;
1323 HeapObject* object;
1324
1325 for (Address current = p->ObjectAreaStart();
1326 current < p->AllocationTop();
1327 current += object->Size()) {
1328 object = HeapObject::FromAddress(current);
1329 if (object->IsMarked()) {
1330 object->ClearMark();
1331 MarkCompactCollector::tracer()->decrement_marked_count();
Steve Block6ded16b2010-05-10 14:33:55 +01001332
Steve Blocka7e24c12009-10-30 11:49:00 +00001333 if (!is_previous_alive) { // Transition from free to live.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001334 dealloc(free_start,
1335 static_cast<int>(current - free_start),
1336 true,
1337 false);
Steve Blocka7e24c12009-10-30 11:49:00 +00001338 is_previous_alive = true;
1339 }
1340 } else {
Leon Clarked91b9f72010-01-27 17:25:45 +00001341 MarkCompactCollector::ReportDeleteIfNeeded(object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001342 if (is_previous_alive) { // Transition from live to free.
1343 free_start = current;
1344 is_previous_alive = false;
1345 }
1346 }
1347 // The object is now unmarked for the call to Size() at the top of the
1348 // loop.
1349 }
1350
Steve Block6ded16b2010-05-10 14:33:55 +01001351 bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
1352 || (!is_previous_alive && free_start == p->ObjectAreaStart());
1353
1354 if (page_is_empty) {
1355 // This page is empty. Check whether we are in the middle of
1356 // sequence of empty pages and start one if not.
1357 if (!first_empty_page->is_valid()) {
1358 first_empty_page = p;
1359 prec_first_empty_page = prev;
1360 }
1361
1362 if (!is_previous_alive) {
1363 // There are dead objects on this page. Update space accounting stats
1364 // without putting anything into free list.
1365 int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
1366 if (size_in_bytes > 0) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001367 dealloc(free_start, size_in_bytes, false, true);
Steve Block6ded16b2010-05-10 14:33:55 +01001368 }
1369 }
1370 } else {
1371 // This page is not empty. Sequence of empty pages ended on the previous
1372 // one.
1373 if (first_empty_page->is_valid()) {
1374 space->FreePages(prec_first_empty_page, prev);
1375 prec_first_empty_page = first_empty_page = Page::FromAddress(NULL);
1376 }
1377
1378 // If there is a free ending area on one of the previous pages we have
1379 // deallocate that area and put it on the free list.
1380 if (last_free_size > 0) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001381 Page::FromAddress(last_free_start)->
1382 SetAllocationWatermark(last_free_start);
1383 dealloc(last_free_start, last_free_size, true, true);
Steve Block6ded16b2010-05-10 14:33:55 +01001384 last_free_start = NULL;
1385 last_free_size = 0;
1386 }
1387
1388 // If the last region of this page was not live we remember it.
1389 if (!is_previous_alive) {
1390 ASSERT(last_free_size == 0);
1391 last_free_size = static_cast<int>(p->AllocationTop() - free_start);
1392 last_free_start = free_start;
Steve Blocka7e24c12009-10-30 11:49:00 +00001393 }
1394 }
Steve Block6ded16b2010-05-10 14:33:55 +01001395
1396 prev = p;
1397 }
1398
1399 // We reached end of space. See if we need to adjust allocation top.
1400 Address new_allocation_top = NULL;
1401
1402 if (first_empty_page->is_valid()) {
1403 // Last used pages in space are empty. We can move allocation top backwards
1404 // to the beginning of first empty page.
1405 ASSERT(prev == space->AllocationTopPage());
1406
1407 new_allocation_top = first_empty_page->ObjectAreaStart();
1408 }
1409
1410 if (last_free_size > 0) {
1411 // There was a free ending area on the previous page.
1412 // Deallocate it without putting it into freelist and move allocation
1413 // top to the beginning of this free area.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001414 dealloc(last_free_start, last_free_size, false, true);
Steve Block6ded16b2010-05-10 14:33:55 +01001415 new_allocation_top = last_free_start;
1416 }
1417
1418 if (new_allocation_top != NULL) {
1419#ifdef DEBUG
1420 Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
1421 if (!first_empty_page->is_valid()) {
1422 ASSERT(new_allocation_top_page == space->AllocationTopPage());
1423 } else if (last_free_size > 0) {
1424 ASSERT(new_allocation_top_page == prec_first_empty_page);
1425 } else {
1426 ASSERT(new_allocation_top_page == first_empty_page);
1427 }
1428#endif
1429
1430 space->SetTop(new_allocation_top);
Steve Blocka7e24c12009-10-30 11:49:00 +00001431 }
1432}
1433
1434
1435void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
Steve Block6ded16b2010-05-10 14:33:55 +01001436 int size_in_bytes,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001437 bool add_to_freelist,
1438 bool last_on_page) {
Steve Block6ded16b2010-05-10 14:33:55 +01001439 Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist);
Steve Blocka7e24c12009-10-30 11:49:00 +00001440}
1441
1442
1443void MarkCompactCollector::DeallocateOldDataBlock(Address start,
Steve Block6ded16b2010-05-10 14:33:55 +01001444 int size_in_bytes,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001445 bool add_to_freelist,
1446 bool last_on_page) {
Steve Block6ded16b2010-05-10 14:33:55 +01001447 Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist);
Steve Blocka7e24c12009-10-30 11:49:00 +00001448}
1449
1450
1451void MarkCompactCollector::DeallocateCodeBlock(Address start,
Steve Block6ded16b2010-05-10 14:33:55 +01001452 int size_in_bytes,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001453 bool add_to_freelist,
1454 bool last_on_page) {
Steve Block6ded16b2010-05-10 14:33:55 +01001455 Heap::code_space()->Free(start, size_in_bytes, add_to_freelist);
Steve Blocka7e24c12009-10-30 11:49:00 +00001456}
1457
1458
1459void MarkCompactCollector::DeallocateMapBlock(Address start,
Steve Block6ded16b2010-05-10 14:33:55 +01001460 int size_in_bytes,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001461 bool add_to_freelist,
1462 bool last_on_page) {
Leon Clarkee46be812010-01-19 14:06:41 +00001463 // Objects in map space are assumed to have size Map::kSize and a
Steve Blocka7e24c12009-10-30 11:49:00 +00001464 // valid map in their first word. Thus, we break the free block up into
1465 // chunks and free them separately.
1466 ASSERT(size_in_bytes % Map::kSize == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001467 Address end = start + size_in_bytes;
1468 for (Address a = start; a < end; a += Map::kSize) {
Steve Block6ded16b2010-05-10 14:33:55 +01001469 Heap::map_space()->Free(a, add_to_freelist);
Steve Blocka7e24c12009-10-30 11:49:00 +00001470 }
1471}
1472
1473
1474void MarkCompactCollector::DeallocateCellBlock(Address start,
Steve Block6ded16b2010-05-10 14:33:55 +01001475 int size_in_bytes,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001476 bool add_to_freelist,
1477 bool last_on_page) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001478 // Free-list elements in cell space are assumed to have a fixed size.
1479 // We break the free block into chunks and add them to the free list
1480 // individually.
1481 int size = Heap::cell_space()->object_size_in_bytes();
1482 ASSERT(size_in_bytes % size == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001483 Address end = start + size_in_bytes;
1484 for (Address a = start; a < end; a += size) {
Steve Block6ded16b2010-05-10 14:33:55 +01001485 Heap::cell_space()->Free(a, add_to_freelist);
Steve Blocka7e24c12009-10-30 11:49:00 +00001486 }
1487}
1488
1489
1490void MarkCompactCollector::EncodeForwardingAddresses() {
1491 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
1492 // Objects in the active semispace of the young generation may be
1493 // relocated to the inactive semispace (if not promoted). Set the
1494 // relocation info to the beginning of the inactive semispace.
1495 Heap::new_space()->MCResetRelocationInfo();
1496
1497 // Compute the forwarding pointers in each space.
1498 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
Leon Clarked91b9f72010-01-27 17:25:45 +00001499 ReportDeleteIfNeeded>(
Steve Blocka7e24c12009-10-30 11:49:00 +00001500 Heap::old_pointer_space());
1501
1502 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
1503 IgnoreNonLiveObject>(
1504 Heap::old_data_space());
1505
1506 EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
Leon Clarked91b9f72010-01-27 17:25:45 +00001507 ReportDeleteIfNeeded>(
Steve Blocka7e24c12009-10-30 11:49:00 +00001508 Heap::code_space());
1509
1510 EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
1511 IgnoreNonLiveObject>(
1512 Heap::cell_space());
1513
1514
1515 // Compute new space next to last after the old and code spaces have been
1516 // compacted. Objects in new space can be promoted to old or code space.
1517 EncodeForwardingAddressesInNewSpace();
1518
1519 // Compute map space last because computing forwarding addresses
1520 // overwrites non-live objects. Objects in the other spaces rely on
1521 // non-live map pointers to get the sizes of non-live objects.
1522 EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
1523 IgnoreNonLiveObject>(
1524 Heap::map_space());
1525
1526 // Write relocation info to the top page, so we can use it later. This is
1527 // done after promoting objects from the new space so we get the correct
1528 // allocation top.
1529 Heap::old_pointer_space()->MCWriteRelocationInfoToPage();
1530 Heap::old_data_space()->MCWriteRelocationInfoToPage();
1531 Heap::code_space()->MCWriteRelocationInfoToPage();
1532 Heap::map_space()->MCWriteRelocationInfoToPage();
1533 Heap::cell_space()->MCWriteRelocationInfoToPage();
1534}
1535
1536
Leon Clarkee46be812010-01-19 14:06:41 +00001537class MapIterator : public HeapObjectIterator {
1538 public:
1539 MapIterator() : HeapObjectIterator(Heap::map_space(), &SizeCallback) { }
1540
1541 explicit MapIterator(Address start)
1542 : HeapObjectIterator(Heap::map_space(), start, &SizeCallback) { }
1543
1544 private:
1545 static int SizeCallback(HeapObject* unused) {
1546 USE(unused);
1547 return Map::kSize;
1548 }
1549};
1550
1551
1552class MapCompact {
1553 public:
1554 explicit MapCompact(int live_maps)
1555 : live_maps_(live_maps),
1556 to_evacuate_start_(Heap::map_space()->TopAfterCompaction(live_maps)),
1557 map_to_evacuate_it_(to_evacuate_start_),
1558 first_map_to_evacuate_(
1559 reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
1560 }
1561
1562 void CompactMaps() {
1563 // As we know the number of maps to evacuate beforehand,
1564 // we stop then there is no more vacant maps.
1565 for (Map* next_vacant_map = NextVacantMap();
1566 next_vacant_map;
1567 next_vacant_map = NextVacantMap()) {
1568 EvacuateMap(next_vacant_map, NextMapToEvacuate());
1569 }
1570
1571#ifdef DEBUG
1572 CheckNoMapsToEvacuate();
1573#endif
1574 }
1575
1576 void UpdateMapPointersInRoots() {
1577 Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG);
1578 GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
1579 }
1580
Leon Clarkee46be812010-01-19 14:06:41 +00001581 void UpdateMapPointersInPagedSpace(PagedSpace* space) {
1582 ASSERT(space != Heap::map_space());
1583
1584 PageIterator it(space, PageIterator::PAGES_IN_USE);
1585 while (it.has_next()) {
1586 Page* p = it.next();
1587 UpdateMapPointersInRange(p->ObjectAreaStart(), p->AllocationTop());
1588 }
1589 }
1590
1591 void UpdateMapPointersInNewSpace() {
1592 NewSpace* space = Heap::new_space();
1593 UpdateMapPointersInRange(space->bottom(), space->top());
1594 }
1595
1596 void UpdateMapPointersInLargeObjectSpace() {
1597 LargeObjectIterator it(Heap::lo_space());
Leon Clarked91b9f72010-01-27 17:25:45 +00001598 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1599 UpdateMapPointersInObject(obj);
Leon Clarkee46be812010-01-19 14:06:41 +00001600 }
1601
1602 void Finish() {
1603 Heap::map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
1604 }
1605
1606 private:
1607 int live_maps_;
1608 Address to_evacuate_start_;
1609 MapIterator vacant_map_it_;
1610 MapIterator map_to_evacuate_it_;
1611 Map* first_map_to_evacuate_;
1612
1613 // Helper class for updating map pointers in HeapObjects.
1614 class MapUpdatingVisitor: public ObjectVisitor {
1615 public:
1616 void VisitPointer(Object** p) {
1617 UpdateMapPointer(p);
1618 }
1619
1620 void VisitPointers(Object** start, Object** end) {
1621 for (Object** p = start; p < end; p++) UpdateMapPointer(p);
1622 }
1623
1624 private:
1625 void UpdateMapPointer(Object** p) {
1626 if (!(*p)->IsHeapObject()) return;
1627 HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
1628
1629 // Moved maps are tagged with overflowed map word. They are the only
1630 // objects those map word is overflowed as marking is already complete.
1631 MapWord map_word = old_map->map_word();
1632 if (!map_word.IsOverflowed()) return;
1633
1634 *p = GetForwardedMap(map_word);
1635 }
1636 };
1637
1638 static MapUpdatingVisitor map_updating_visitor_;
1639
1640 static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
1641 while (true) {
Leon Clarkee46be812010-01-19 14:06:41 +00001642 HeapObject* next = it->next();
Leon Clarked91b9f72010-01-27 17:25:45 +00001643 ASSERT(next != NULL);
Leon Clarkee46be812010-01-19 14:06:41 +00001644 if (next == last)
1645 return NULL;
1646 ASSERT(!next->IsOverflowed());
1647 ASSERT(!next->IsMarked());
1648 ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
1649 if (next->IsMap() == live)
1650 return reinterpret_cast<Map*>(next);
1651 }
1652 }
1653
1654 Map* NextVacantMap() {
1655 Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
1656 ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
1657 return map;
1658 }
1659
1660 Map* NextMapToEvacuate() {
1661 Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
1662 ASSERT(map != NULL);
1663 ASSERT(map->IsMap());
1664 return map;
1665 }
1666
1667 static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
1668 ASSERT(FreeListNode::IsFreeListNode(vacant_map));
1669 ASSERT(map_to_evacuate->IsMap());
1670
Steve Block6ded16b2010-05-10 14:33:55 +01001671 ASSERT(Map::kSize % 4 == 0);
1672
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001673 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(vacant_map->address(),
1674 map_to_evacuate->address(),
1675 Map::kSize);
Steve Block6ded16b2010-05-10 14:33:55 +01001676
Leon Clarkee46be812010-01-19 14:06:41 +00001677 ASSERT(vacant_map->IsMap()); // Due to memcpy above.
1678
1679 MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
1680 forwarding_map_word.SetOverflow();
1681 map_to_evacuate->set_map_word(forwarding_map_word);
1682
1683 ASSERT(map_to_evacuate->map_word().IsOverflowed());
1684 ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
1685 }
1686
1687 static Map* GetForwardedMap(MapWord map_word) {
1688 ASSERT(map_word.IsOverflowed());
1689 map_word.ClearOverflow();
1690 Map* new_map = map_word.ToMap();
1691 ASSERT_MAP_ALIGNED(new_map->address());
1692 return new_map;
1693 }
1694
1695 static int UpdateMapPointersInObject(HeapObject* obj) {
1696 ASSERT(!obj->IsMarked());
1697 Map* map = obj->map();
1698 ASSERT(Heap::map_space()->Contains(map));
1699 MapWord map_word = map->map_word();
1700 ASSERT(!map_word.IsMarked());
1701 if (map_word.IsOverflowed()) {
1702 Map* new_map = GetForwardedMap(map_word);
1703 ASSERT(Heap::map_space()->Contains(new_map));
1704 obj->set_map(new_map);
1705
1706#ifdef DEBUG
1707 if (FLAG_gc_verbose) {
1708 PrintF("update %p : %p -> %p\n", obj->address(),
1709 map, new_map);
1710 }
1711#endif
1712 }
1713
1714 int size = obj->SizeFromMap(map);
1715 obj->IterateBody(map->instance_type(), size, &map_updating_visitor_);
1716 return size;
1717 }
1718
1719 static void UpdateMapPointersInRange(Address start, Address end) {
1720 HeapObject* object;
1721 int size;
1722 for (Address current = start; current < end; current += size) {
1723 object = HeapObject::FromAddress(current);
1724 size = UpdateMapPointersInObject(object);
1725 ASSERT(size > 0);
1726 }
1727 }
1728
1729#ifdef DEBUG
1730 void CheckNoMapsToEvacuate() {
1731 if (!FLAG_enable_slow_asserts)
1732 return;
1733
Leon Clarked91b9f72010-01-27 17:25:45 +00001734 for (HeapObject* obj = map_to_evacuate_it_.next();
1735 obj != NULL; obj = map_to_evacuate_it_.next())
1736 ASSERT(FreeListNode::IsFreeListNode(obj));
Leon Clarkee46be812010-01-19 14:06:41 +00001737 }
1738#endif
1739};
1740
1741MapCompact::MapUpdatingVisitor MapCompact::map_updating_visitor_;
1742
1743
Steve Blocka7e24c12009-10-30 11:49:00 +00001744void MarkCompactCollector::SweepSpaces() {
Leon Clarkef7060e22010-06-03 12:02:55 +01001745 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
1746
Steve Blocka7e24c12009-10-30 11:49:00 +00001747 ASSERT(state_ == SWEEP_SPACES);
1748 ASSERT(!IsCompacting());
1749 // Noncompacting collections simply sweep the spaces to clear the mark
1750 // bits and free the nonlive blocks (for old and map spaces). We sweep
1751 // the map space last because freeing non-live maps overwrites them and
1752 // the other spaces rely on possibly non-live maps to get the sizes for
1753 // non-live objects.
1754 SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock);
1755 SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock);
1756 SweepSpace(Heap::code_space(), &DeallocateCodeBlock);
1757 SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
Steve Block6ded16b2010-05-10 14:33:55 +01001758 SweepNewSpace(Heap::new_space());
Steve Blocka7e24c12009-10-30 11:49:00 +00001759 SweepSpace(Heap::map_space(), &DeallocateMapBlock);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001760
1761 Heap::IterateDirtyRegions(Heap::map_space(),
1762 &Heap::IteratePointersInDirtyMapsRegion,
1763 &UpdatePointerToNewGen,
1764 Heap::WATERMARK_SHOULD_BE_VALID);
1765
Steve Block6ded16b2010-05-10 14:33:55 +01001766 int live_maps_size = Heap::map_space()->Size();
1767 int live_maps = live_maps_size / Map::kSize;
1768 ASSERT(live_map_objects_size_ == live_maps_size);
Leon Clarkee46be812010-01-19 14:06:41 +00001769
1770 if (Heap::map_space()->NeedsCompaction(live_maps)) {
1771 MapCompact map_compact(live_maps);
1772
1773 map_compact.CompactMaps();
1774 map_compact.UpdateMapPointersInRoots();
1775
Leon Clarkee46be812010-01-19 14:06:41 +00001776 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00001777 for (PagedSpace* space = spaces.next();
1778 space != NULL; space = spaces.next()) {
Leon Clarkee46be812010-01-19 14:06:41 +00001779 if (space == Heap::map_space()) continue;
1780 map_compact.UpdateMapPointersInPagedSpace(space);
1781 }
1782 map_compact.UpdateMapPointersInNewSpace();
1783 map_compact.UpdateMapPointersInLargeObjectSpace();
1784
1785 map_compact.Finish();
1786 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001787}
1788
1789
1790// Iterate the live objects in a range of addresses (eg, a page or a
1791// semispace). The live regions of the range have been linked into a list.
1792// The first live region is [first_live_start, first_live_end), and the last
1793// address in the range is top. The callback function is used to get the
1794// size of each live object.
1795int MarkCompactCollector::IterateLiveObjectsInRange(
1796 Address start,
1797 Address end,
1798 HeapObjectCallback size_func) {
Steve Block6ded16b2010-05-10 14:33:55 +01001799 int live_objects_size = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001800 Address current = start;
1801 while (current < end) {
1802 uint32_t encoded_map = Memory::uint32_at(current);
1803 if (encoded_map == kSingleFreeEncoding) {
1804 current += kPointerSize;
1805 } else if (encoded_map == kMultiFreeEncoding) {
1806 current += Memory::int_at(current + kIntSize);
1807 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01001808 int size = size_func(HeapObject::FromAddress(current));
1809 current += size;
1810 live_objects_size += size;
Steve Blocka7e24c12009-10-30 11:49:00 +00001811 }
1812 }
Steve Block6ded16b2010-05-10 14:33:55 +01001813 return live_objects_size;
Steve Blocka7e24c12009-10-30 11:49:00 +00001814}
1815
1816
1817int MarkCompactCollector::IterateLiveObjects(NewSpace* space,
1818 HeapObjectCallback size_f) {
1819 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
1820 return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
1821}
1822
1823
1824int MarkCompactCollector::IterateLiveObjects(PagedSpace* space,
1825 HeapObjectCallback size_f) {
1826 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
1827 int total = 0;
1828 PageIterator it(space, PageIterator::PAGES_IN_USE);
1829 while (it.has_next()) {
1830 Page* p = it.next();
1831 total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
1832 p->AllocationTop(),
1833 size_f);
1834 }
1835 return total;
1836}
1837
1838
1839// -------------------------------------------------------------------------
1840// Phase 3: Update pointers
1841
1842// Helper class for updating pointers in HeapObjects.
1843class UpdatingVisitor: public ObjectVisitor {
1844 public:
1845 void VisitPointer(Object** p) {
1846 UpdatePointer(p);
1847 }
1848
1849 void VisitPointers(Object** start, Object** end) {
1850 // Mark all HeapObject pointers in [start, end)
1851 for (Object** p = start; p < end; p++) UpdatePointer(p);
1852 }
1853
1854 void VisitCodeTarget(RelocInfo* rinfo) {
1855 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
1856 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1857 VisitPointer(&target);
1858 rinfo->set_target_address(
1859 reinterpret_cast<Code*>(target)->instruction_start());
1860 }
1861
Steve Block3ce2e202009-11-05 08:53:23 +00001862 void VisitDebugTarget(RelocInfo* rinfo) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001863 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
1864 rinfo->IsPatchedReturnSequence()) ||
1865 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
1866 rinfo->IsPatchedDebugBreakSlotSequence()));
Steve Block3ce2e202009-11-05 08:53:23 +00001867 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
1868 VisitPointer(&target);
1869 rinfo->set_call_address(
1870 reinterpret_cast<Code*>(target)->instruction_start());
1871 }
1872
Steve Blocka7e24c12009-10-30 11:49:00 +00001873 private:
1874 void UpdatePointer(Object** p) {
1875 if (!(*p)->IsHeapObject()) return;
1876
1877 HeapObject* obj = HeapObject::cast(*p);
1878 Address old_addr = obj->address();
1879 Address new_addr;
1880 ASSERT(!Heap::InFromSpace(obj));
1881
1882 if (Heap::new_space()->Contains(obj)) {
1883 Address forwarding_pointer_addr =
1884 Heap::new_space()->FromSpaceLow() +
1885 Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
1886 new_addr = Memory::Address_at(forwarding_pointer_addr);
1887
1888#ifdef DEBUG
1889 ASSERT(Heap::old_pointer_space()->Contains(new_addr) ||
1890 Heap::old_data_space()->Contains(new_addr) ||
1891 Heap::new_space()->FromSpaceContains(new_addr) ||
1892 Heap::lo_space()->Contains(HeapObject::FromAddress(new_addr)));
1893
1894 if (Heap::new_space()->FromSpaceContains(new_addr)) {
1895 ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
1896 Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
1897 }
1898#endif
1899
1900 } else if (Heap::lo_space()->Contains(obj)) {
1901 // Don't move objects in the large object space.
1902 return;
1903
1904 } else {
1905#ifdef DEBUG
1906 PagedSpaces spaces;
1907 PagedSpace* original_space = spaces.next();
1908 while (original_space != NULL) {
1909 if (original_space->Contains(obj)) break;
1910 original_space = spaces.next();
1911 }
1912 ASSERT(original_space != NULL);
1913#endif
1914 new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
1915 ASSERT(original_space->Contains(new_addr));
1916 ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
1917 original_space->MCSpaceOffsetForAddress(old_addr));
1918 }
1919
1920 *p = HeapObject::FromAddress(new_addr);
1921
1922#ifdef DEBUG
1923 if (FLAG_gc_verbose) {
1924 PrintF("update %p : %p -> %p\n",
1925 reinterpret_cast<Address>(p), old_addr, new_addr);
1926 }
1927#endif
1928 }
1929};
1930
1931
1932void MarkCompactCollector::UpdatePointers() {
1933#ifdef DEBUG
1934 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
1935 state_ = UPDATE_POINTERS;
1936#endif
1937 UpdatingVisitor updating_visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00001938 Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00001939 GlobalHandles::IterateWeakRoots(&updating_visitor);
1940
Steve Block6ded16b2010-05-10 14:33:55 +01001941 int live_maps_size = IterateLiveObjects(Heap::map_space(),
Steve Blocka7e24c12009-10-30 11:49:00 +00001942 &UpdatePointersInOldObject);
Steve Block6ded16b2010-05-10 14:33:55 +01001943 int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
1944 &UpdatePointersInOldObject);
1945 int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
1946 &UpdatePointersInOldObject);
1947 int live_codes_size = IterateLiveObjects(Heap::code_space(),
1948 &UpdatePointersInOldObject);
1949 int live_cells_size = IterateLiveObjects(Heap::cell_space(),
1950 &UpdatePointersInOldObject);
1951 int live_news_size = IterateLiveObjects(Heap::new_space(),
1952 &UpdatePointersInNewObject);
Steve Blocka7e24c12009-10-30 11:49:00 +00001953
1954 // Large objects do not move, the map word can be updated directly.
1955 LargeObjectIterator it(Heap::lo_space());
Leon Clarked91b9f72010-01-27 17:25:45 +00001956 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1957 UpdatePointersInNewObject(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00001958
Steve Block6ded16b2010-05-10 14:33:55 +01001959 USE(live_maps_size);
1960 USE(live_pointer_olds_size);
1961 USE(live_data_olds_size);
1962 USE(live_codes_size);
1963 USE(live_cells_size);
1964 USE(live_news_size);
1965 ASSERT(live_maps_size == live_map_objects_size_);
1966 ASSERT(live_data_olds_size == live_old_data_objects_size_);
1967 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
1968 ASSERT(live_codes_size == live_code_objects_size_);
1969 ASSERT(live_cells_size == live_cell_objects_size_);
1970 ASSERT(live_news_size == live_young_objects_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001971}
1972
1973
1974int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
1975 // Keep old map pointers
1976 Map* old_map = obj->map();
1977 ASSERT(old_map->IsHeapObject());
1978
1979 Address forwarded = GetForwardingAddressInOldSpace(old_map);
1980
1981 ASSERT(Heap::map_space()->Contains(old_map));
1982 ASSERT(Heap::map_space()->Contains(forwarded));
1983#ifdef DEBUG
1984 if (FLAG_gc_verbose) {
1985 PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
1986 forwarded);
1987 }
1988#endif
1989 // Update the map pointer.
1990 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
1991
1992 // We have to compute the object size relying on the old map because
1993 // map objects are not relocated yet.
1994 int obj_size = obj->SizeFromMap(old_map);
1995
1996 // Update pointers in the object body.
1997 UpdatingVisitor updating_visitor;
1998 obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
1999 return obj_size;
2000}
2001
2002
2003int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
2004 // Decode the map pointer.
2005 MapWord encoding = obj->map_word();
2006 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2007 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
2008
2009 // At this point, the first word of map_addr is also encoded, cannot
2010 // cast it to Map* using Map::cast.
2011 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
2012 int obj_size = obj->SizeFromMap(map);
2013 InstanceType type = map->instance_type();
2014
2015 // Update map pointer.
2016 Address new_map_addr = GetForwardingAddressInOldSpace(map);
2017 int offset = encoding.DecodeOffset();
2018 obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
2019
2020#ifdef DEBUG
2021 if (FLAG_gc_verbose) {
2022 PrintF("update %p : %p -> %p\n", obj->address(),
2023 map_addr, new_map_addr);
2024 }
2025#endif
2026
2027 // Update pointers in the object body.
2028 UpdatingVisitor updating_visitor;
2029 obj->IterateBody(type, obj_size, &updating_visitor);
2030 return obj_size;
2031}
2032
2033
2034Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
2035 // Object should either in old or map space.
2036 MapWord encoding = obj->map_word();
2037
2038 // Offset to the first live object's forwarding address.
2039 int offset = encoding.DecodeOffset();
2040 Address obj_addr = obj->address();
2041
2042 // Find the first live object's forwarding address.
2043 Page* p = Page::FromAddress(obj_addr);
2044 Address first_forwarded = p->mc_first_forwarded;
2045
2046 // Page start address of forwarded address.
2047 Page* forwarded_page = Page::FromAddress(first_forwarded);
2048 int forwarded_offset = forwarded_page->Offset(first_forwarded);
2049
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002050 // Find end of allocation in the page of first_forwarded.
2051 int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
Steve Blocka7e24c12009-10-30 11:49:00 +00002052
2053 // Check if current object's forward pointer is in the same page
2054 // as the first live object's forwarding pointer
2055 if (forwarded_offset + offset < mc_top_offset) {
2056 // In the same page.
2057 return first_forwarded + offset;
2058 }
2059
2060 // Must be in the next page, NOTE: this may cross chunks.
2061 Page* next_page = forwarded_page->next_page();
2062 ASSERT(next_page->is_valid());
2063
2064 offset -= (mc_top_offset - forwarded_offset);
2065 offset += Page::kObjectStartOffset;
2066
2067 ASSERT_PAGE_OFFSET(offset);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002068 ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
Steve Blocka7e24c12009-10-30 11:49:00 +00002069
2070 return next_page->OffsetToAddress(offset);
2071}
2072
2073
2074// -------------------------------------------------------------------------
2075// Phase 4: Relocate objects
2076
2077void MarkCompactCollector::RelocateObjects() {
2078#ifdef DEBUG
2079 ASSERT(state_ == UPDATE_POINTERS);
2080 state_ = RELOCATE_OBJECTS;
2081#endif
2082 // Relocates objects, always relocate map objects first. Relocating
2083 // objects in other space relies on map objects to get object size.
Steve Block6ded16b2010-05-10 14:33:55 +01002084 int live_maps_size = IterateLiveObjects(Heap::map_space(),
2085 &RelocateMapObject);
2086 int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
2087 &RelocateOldPointerObject);
2088 int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
2089 &RelocateOldDataObject);
2090 int live_codes_size = IterateLiveObjects(Heap::code_space(),
2091 &RelocateCodeObject);
2092 int live_cells_size = IterateLiveObjects(Heap::cell_space(),
2093 &RelocateCellObject);
2094 int live_news_size = IterateLiveObjects(Heap::new_space(),
2095 &RelocateNewObject);
Steve Blocka7e24c12009-10-30 11:49:00 +00002096
Steve Block6ded16b2010-05-10 14:33:55 +01002097 USE(live_maps_size);
2098 USE(live_pointer_olds_size);
2099 USE(live_data_olds_size);
2100 USE(live_codes_size);
2101 USE(live_cells_size);
2102 USE(live_news_size);
2103 ASSERT(live_maps_size == live_map_objects_size_);
2104 ASSERT(live_data_olds_size == live_old_data_objects_size_);
2105 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
2106 ASSERT(live_codes_size == live_code_objects_size_);
2107 ASSERT(live_cells_size == live_cell_objects_size_);
2108 ASSERT(live_news_size == live_young_objects_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002109
2110 // Flip from and to spaces
2111 Heap::new_space()->Flip();
2112
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002113 Heap::new_space()->MCCommitRelocationInfo();
2114
Steve Blocka7e24c12009-10-30 11:49:00 +00002115 // Set age_mark to bottom in to space
2116 Address mark = Heap::new_space()->bottom();
2117 Heap::new_space()->set_age_mark(mark);
2118
Steve Blocka7e24c12009-10-30 11:49:00 +00002119 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00002120 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
2121 space->MCCommitRelocationInfo();
Steve Block6ded16b2010-05-10 14:33:55 +01002122
2123 Heap::CheckNewSpaceExpansionCriteria();
2124 Heap::IncrementYoungSurvivorsCounter(live_news_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002125}
2126
2127
2128int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
2129 // Recover map pointer.
2130 MapWord encoding = obj->map_word();
2131 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2132 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
2133
2134 // Get forwarding address before resetting map pointer
2135 Address new_addr = GetForwardingAddressInOldSpace(obj);
2136
2137 // Reset map pointer. The meta map object may not be copied yet so
2138 // Map::cast does not yet work.
2139 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
2140
2141 Address old_addr = obj->address();
2142
2143 if (new_addr != old_addr) {
Steve Block6ded16b2010-05-10 14:33:55 +01002144 // Move contents.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002145 Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2146 old_addr,
2147 Map::kSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00002148 }
2149
2150#ifdef DEBUG
2151 if (FLAG_gc_verbose) {
2152 PrintF("relocate %p -> %p\n", old_addr, new_addr);
2153 }
2154#endif
2155
2156 return Map::kSize;
2157}
2158
2159
2160static inline int RestoreMap(HeapObject* obj,
2161 PagedSpace* space,
2162 Address new_addr,
2163 Address map_addr) {
2164 // This must be a non-map object, and the function relies on the
2165 // assumption that the Map space is compacted before the other paged
2166 // spaces (see RelocateObjects).
2167
2168 // Reset map pointer.
2169 obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
2170
2171 int obj_size = obj->Size();
2172 ASSERT_OBJECT_SIZE(obj_size);
2173
2174 ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
2175 space->MCSpaceOffsetForAddress(obj->address()));
2176
2177#ifdef DEBUG
2178 if (FLAG_gc_verbose) {
2179 PrintF("relocate %p -> %p\n", obj->address(), new_addr);
2180 }
2181#endif
2182
2183 return obj_size;
2184}
2185
2186
2187int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
2188 PagedSpace* space) {
2189 // Recover map pointer.
2190 MapWord encoding = obj->map_word();
2191 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2192 ASSERT(Heap::map_space()->Contains(map_addr));
2193
2194 // Get forwarding address before resetting map pointer.
2195 Address new_addr = GetForwardingAddressInOldSpace(obj);
2196
2197 // Reset the map pointer.
2198 int obj_size = RestoreMap(obj, space, new_addr, map_addr);
2199
2200 Address old_addr = obj->address();
2201
2202 if (new_addr != old_addr) {
Steve Block6ded16b2010-05-10 14:33:55 +01002203 // Move contents.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002204 if (space == Heap::old_data_space()) {
2205 Heap::MoveBlock(new_addr, old_addr, obj_size);
2206 } else {
2207 Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2208 old_addr,
2209 obj_size);
2210 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002211 }
2212
2213 ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
2214
Leon Clarked91b9f72010-01-27 17:25:45 +00002215 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2216 if (copied_to->IsJSFunction()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002217 PROFILE(FunctionMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002218 }
2219
Steve Blocka7e24c12009-10-30 11:49:00 +00002220 return obj_size;
2221}
2222
2223
2224int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
2225 return RelocateOldNonCodeObject(obj, Heap::old_pointer_space());
2226}
2227
2228
2229int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
2230 return RelocateOldNonCodeObject(obj, Heap::old_data_space());
2231}
2232
2233
2234int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
2235 return RelocateOldNonCodeObject(obj, Heap::cell_space());
2236}
2237
2238
2239int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
2240 // Recover map pointer.
2241 MapWord encoding = obj->map_word();
2242 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2243 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
2244
2245 // Get forwarding address before resetting map pointer
2246 Address new_addr = GetForwardingAddressInOldSpace(obj);
2247
2248 // Reset the map pointer.
2249 int obj_size = RestoreMap(obj, Heap::code_space(), new_addr, map_addr);
2250
2251 Address old_addr = obj->address();
2252
2253 if (new_addr != old_addr) {
Steve Block6ded16b2010-05-10 14:33:55 +01002254 // Move contents.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002255 Heap::MoveBlock(new_addr, old_addr, obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002256 }
2257
2258 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2259 if (copied_to->IsCode()) {
2260 // May also update inline cache target.
2261 Code::cast(copied_to)->Relocate(new_addr - old_addr);
2262 // Notify the logger that compiled code has moved.
Steve Block6ded16b2010-05-10 14:33:55 +01002263 PROFILE(CodeMoveEvent(old_addr, new_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00002264 }
2265
2266 return obj_size;
2267}
2268
2269
2270int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
2271 int obj_size = obj->Size();
2272
2273 // Get forwarding address
2274 Address old_addr = obj->address();
2275 int offset = Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
2276
2277 Address new_addr =
2278 Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset);
2279
2280#ifdef DEBUG
2281 if (Heap::new_space()->FromSpaceContains(new_addr)) {
2282 ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
2283 Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
2284 } else {
2285 ASSERT(Heap::TargetSpace(obj) == Heap::old_pointer_space() ||
2286 Heap::TargetSpace(obj) == Heap::old_data_space());
2287 }
2288#endif
2289
2290 // New and old addresses cannot overlap.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002291 if (Heap::InNewSpace(HeapObject::FromAddress(new_addr))) {
2292 Heap::CopyBlock(new_addr, old_addr, obj_size);
2293 } else {
2294 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2295 old_addr,
2296 obj_size);
2297 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002298
2299#ifdef DEBUG
2300 if (FLAG_gc_verbose) {
2301 PrintF("relocate %p -> %p\n", old_addr, new_addr);
2302 }
2303#endif
2304
Leon Clarked91b9f72010-01-27 17:25:45 +00002305 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2306 if (copied_to->IsJSFunction()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002307 PROFILE(FunctionMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002308 }
2309
Steve Blocka7e24c12009-10-30 11:49:00 +00002310 return obj_size;
2311}
2312
2313
Leon Clarked91b9f72010-01-27 17:25:45 +00002314void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
2315#ifdef ENABLE_LOGGING_AND_PROFILING
2316 if (obj->IsCode()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002317 PROFILE(CodeDeleteEvent(obj->address()));
Leon Clarked91b9f72010-01-27 17:25:45 +00002318 } else if (obj->IsJSFunction()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002319 PROFILE(FunctionDeleteEvent(obj->address()));
Leon Clarked91b9f72010-01-27 17:25:45 +00002320 }
2321#endif
2322}
2323
Steve Blocka7e24c12009-10-30 11:49:00 +00002324} } // namespace v8::internal