blob: 94cf315a865d4a38106870ca4bd5219b6df08415 [file] [log] [blame]
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
30#include "execution.h"
31#include "global-handles.h"
32#include "ic-inl.h"
33#include "mark-compact.h"
34#include "stub-cache.h"
35
36namespace v8 { namespace internal {
37
ager@chromium.orgddb913d2009-01-27 10:01:48 +000038// -------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000039// MarkCompactCollector
40
41bool MarkCompactCollector::compacting_collection_ = false;
42
kasper.lund7276f142008-07-30 08:49:36 +000043int MarkCompactCollector::previous_marked_count_ = 0;
44GCTracer* MarkCompactCollector::tracer_ = NULL;
45
46
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000047#ifdef DEBUG
48MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE;
49
50// Counters used for debugging the marking phase of mark-compact or mark-sweep
51// collection.
52int MarkCompactCollector::live_bytes_ = 0;
53int MarkCompactCollector::live_young_objects_ = 0;
ager@chromium.org9258b6b2008-09-11 09:11:10 +000054int MarkCompactCollector::live_old_data_objects_ = 0;
55int MarkCompactCollector::live_old_pointer_objects_ = 0;
56int MarkCompactCollector::live_code_objects_ = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000057int MarkCompactCollector::live_map_objects_ = 0;
58int MarkCompactCollector::live_lo_objects_ = 0;
59#endif
60
kasperl@chromium.org061ef742009-02-27 12:16:20 +000061void MarkCompactCollector::CollectGarbage() {
62 // Make sure that Prepare() has been called. The individual steps below will
63 // update the state as they proceed.
64 ASSERT(state_ == PREPARE_GC);
65
kasper.lund7276f142008-07-30 08:49:36 +000066 // Prepare has selected whether to compact the old generation or not.
67 // Tell the tracer.
68 if (IsCompacting()) tracer_->set_is_compacting();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000069
70 MarkLiveObjects();
71
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +000072 if (FLAG_collect_maps) ClearNonLiveTransitions();
73
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000074 SweepLargeObjectSpace();
75
76 if (compacting_collection_) {
77 EncodeForwardingAddresses();
78
79 UpdatePointers();
80
81 RelocateObjects();
82
83 RebuildRSets();
84
85 } else {
86 SweepSpaces();
87 }
88
89 Finish();
kasper.lund7276f142008-07-30 08:49:36 +000090
91 // Save the count of marked objects remaining after the collection and
92 // null out the GC tracer.
93 previous_marked_count_ = tracer_->marked_count();
94 ASSERT(previous_marked_count_ == 0);
95 tracer_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000096}
97
98
kasperl@chromium.org061ef742009-02-27 12:16:20 +000099void MarkCompactCollector::Prepare(GCTracer* tracer) {
100 // Rather than passing the tracer around we stash it in a static member
101 // variable.
102 tracer_ = tracer;
103
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000104 static const int kFragmentationLimit = 50; // Percent.
105#ifdef DEBUG
106 ASSERT(state_ == IDLE);
107 state_ = PREPARE_GC;
108#endif
109 ASSERT(!FLAG_always_compact || !FLAG_never_compact);
110
111 compacting_collection_ = FLAG_always_compact;
112
113 // We compact the old generation if it gets too fragmented (ie, we could
114 // recover an expected amount of space by reclaiming the waste and free
115 // list blocks). We always compact when the flag --gc-global is true
116 // because objects do not get promoted out of new space on non-compacting
117 // GCs.
118 if (!compacting_collection_) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000119 int old_gen_recoverable = 0;
120 int old_gen_used = 0;
121
122 OldSpaces spaces;
123 while (OldSpace* space = spaces.next()) {
124 old_gen_recoverable += space->Waste() + space->AvailableFree();
125 old_gen_used += space->Size();
126 }
127 int old_gen_fragmentation =
128 static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000129 if (old_gen_fragmentation > kFragmentationLimit) {
130 compacting_collection_ = true;
131 }
132 }
133
134 if (FLAG_never_compact) compacting_collection_ = false;
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +0000135 if (FLAG_collect_maps) CreateBackPointers();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000136
137#ifdef DEBUG
138 if (compacting_collection_) {
139 // We will write bookkeeping information to the remembered set area
140 // starting now.
141 Page::set_rset_state(Page::NOT_IN_USE);
142 }
143#endif
144
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000145 PagedSpaces spaces;
146 while (PagedSpace* space = spaces.next()) {
147 space->PrepareForMarkCompact(compacting_collection_);
148 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000149
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000150#ifdef DEBUG
151 live_bytes_ = 0;
152 live_young_objects_ = 0;
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000153 live_old_pointer_objects_ = 0;
154 live_old_data_objects_ = 0;
155 live_code_objects_ = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000156 live_map_objects_ = 0;
157 live_lo_objects_ = 0;
158#endif
159}
160
161
162void MarkCompactCollector::Finish() {
163#ifdef DEBUG
164 ASSERT(state_ == SWEEP_SPACES || state_ == REBUILD_RSETS);
165 state_ = IDLE;
166#endif
167 // The stub cache is not traversed during GC; clear the cache to
168 // force lazy re-initialization of it. This must be done after the
169 // GC, because it relies on the new address of certain old space
170 // objects (empty string, illegal builtin).
171 StubCache::Clear();
172}
173
174
ager@chromium.orgddb913d2009-01-27 10:01:48 +0000175// -------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000176// Phase 1: tracing and marking live objects.
177// before: all objects are in normal state.
178// after: a live object's map pointer is marked as '00'.
179
180// Marking all live objects in the heap as part of mark-sweep or mark-compact
181// collection. Before marking, all objects are in their normal state. After
182// marking, live objects' map pointers are marked indicating that the object
183// has been found reachable.
184//
185// The marking algorithm is a (mostly) depth-first (because of possible stack
186// overflow) traversal of the graph of objects reachable from the roots. It
187// uses an explicit stack of pointers rather than recursion. The young
188// generation's inactive ('from') space is used as a marking stack. The
189// objects in the marking stack are the ones that have been reached and marked
190// but their children have not yet been visited.
191//
192// The marking stack can overflow during traversal. In that case, we set an
193// overflow flag. When the overflow flag is set, we continue marking objects
194// reachable from the objects on the marking stack, but no longer push them on
195// the marking stack. Instead, we mark them as both marked and overflowed.
196// When the stack is in the overflowed state, objects marked as overflowed
197// have been reached and marked but their children have not been visited yet.
198// After emptying the marking stack, we clear the overflow flag and traverse
199// the heap looking for objects marked as overflowed, push them on the stack,
200// and continue with marking. This process repeats until all reachable
201// objects have been marked.
202
203static MarkingStack marking_stack;
204
mads.s.ager31e71382008-08-13 09:32:07 +0000205
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000206static inline HeapObject* ShortCircuitConsString(Object** p) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000207 // Optimization: If the heap object pointed to by p is a non-symbol
208 // cons string whose right substring is Heap::empty_string, update
209 // it in place to its left substring. Return the updated value.
mads.s.ager31e71382008-08-13 09:32:07 +0000210 //
211 // Here we assume that if we change *p, we replace it with a heap object
212 // (ie, the left substring of a cons string is always a heap object).
213 //
214 // The check performed is:
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000215 // object->IsConsString() && !object->IsSymbol() &&
mads.s.ager31e71382008-08-13 09:32:07 +0000216 // (ConsString::cast(object)->second() == Heap::empty_string())
217 // except the maps for the object and its possible substrings might be
218 // marked.
219 HeapObject* object = HeapObject::cast(*p);
220 MapWord map_word = object->map_word();
221 map_word.ClearMark();
222 InstanceType type = map_word.ToMap()->instance_type();
kasperl@chromium.orgd1e3e722009-04-14 13:38:25 +0000223 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
mads.s.ager31e71382008-08-13 09:32:07 +0000224
ager@chromium.orgbb29dc92009-03-24 13:25:23 +0000225 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
mads.s.ager31e71382008-08-13 09:32:07 +0000226 if (reinterpret_cast<String*>(second) != Heap::empty_string()) return object;
227
228 // Since we don't have the object's start, it is impossible to update the
229 // remembered set. Therefore, we only replace the string with its left
230 // substring when the remembered set does not change.
ager@chromium.orgbb29dc92009-03-24 13:25:23 +0000231 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
mads.s.ager31e71382008-08-13 09:32:07 +0000232 if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object;
233
234 *p = first;
235 return HeapObject::cast(first);
236}
237
238
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000239// Helper class for marking pointers in HeapObjects.
240class MarkingVisitor : public ObjectVisitor {
241 public:
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000242 void VisitPointer(Object** p) {
243 MarkObjectByPointer(p);
244 }
245
246 void VisitPointers(Object** start, Object** end) {
247 // Mark all objects pointed to in [start, end).
248 const int kMinRangeForMarkingRecursion = 64;
249 if (end - start >= kMinRangeForMarkingRecursion) {
250 if (VisitUnmarkedObjects(start, end)) return;
251 // We are close to a stack overflow, so just mark the objects.
252 }
253 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
254 }
255
256 void BeginCodeIteration(Code* code) {
257 // When iterating over a code object during marking
258 // ic targets are derived pointers.
259 ASSERT(code->ic_flag() == Code::IC_TARGET_IS_ADDRESS);
260 }
261
262 void EndCodeIteration(Code* code) {
263 // If this is a compacting collection, set ic targets
264 // are pointing to object headers.
265 if (IsCompacting()) code->set_ic_flag(Code::IC_TARGET_IS_OBJECT);
266 }
267
268 void VisitCodeTarget(RelocInfo* rinfo) {
ager@chromium.org236ad962008-09-25 09:45:57 +0000269 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000270 Code* code = CodeFromDerivedPointer(rinfo->target_address());
271 if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
272 IC::Clear(rinfo->pc());
273 // Please note targets for cleared inline cached do not have to be
274 // marked since they are contained in Heap::non_monomorphic_cache().
275 } else {
276 MarkCompactCollector::MarkObject(code);
277 }
278 if (IsCompacting()) {
279 // When compacting we convert the target to a real object pointer.
280 code = CodeFromDerivedPointer(rinfo->target_address());
281 rinfo->set_target_object(code);
282 }
283 }
284
285 void VisitDebugTarget(RelocInfo* rinfo) {
ager@chromium.org236ad962008-09-25 09:45:57 +0000286 ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()) &&
iposva@chromium.org245aa852009-02-10 00:49:54 +0000287 rinfo->IsCallInstruction());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000288 HeapObject* code = CodeFromDerivedPointer(rinfo->call_address());
289 MarkCompactCollector::MarkObject(code);
290 // When compacting we convert the call to a real object pointer.
291 if (IsCompacting()) rinfo->set_call_object(code);
292 }
293
294 private:
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000295 // Mark object pointed to by p.
296 void MarkObjectByPointer(Object** p) {
mads.s.ager31e71382008-08-13 09:32:07 +0000297 if (!(*p)->IsHeapObject()) return;
298 HeapObject* object = ShortCircuitConsString(p);
299 MarkCompactCollector::MarkObject(object);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000300 }
301
302 // Tells whether the mark sweep collection will perform compaction.
303 bool IsCompacting() { return MarkCompactCollector::IsCompacting(); }
304
305 // Retrieves the Code pointer from derived code entry.
306 Code* CodeFromDerivedPointer(Address addr) {
307 ASSERT(addr != NULL);
308 return reinterpret_cast<Code*>(
309 HeapObject::FromAddress(addr - Code::kHeaderSize));
310 }
311
312 // Visit an unmarked object.
313 void VisitUnmarkedObject(HeapObject* obj) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000314#ifdef DEBUG
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +0000315 ASSERT(Heap::Contains(obj));
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +0000316 ASSERT(!obj->IsMarked());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000317#endif
318 Map* map = obj->map();
ager@chromium.org3bf7b912008-11-17 09:09:45 +0000319 MarkCompactCollector::SetMark(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000320 // Mark the map pointer and the body.
321 MarkCompactCollector::MarkObject(map);
322 obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), this);
323 }
324
325 // Visit all unmarked objects pointed to by [start, end).
326 // Returns false if the operation fails (lack of stack space).
327 inline bool VisitUnmarkedObjects(Object** start, Object** end) {
328 // Return false is we are close to the stack limit.
329 StackLimitCheck check;
330 if (check.HasOverflowed()) return false;
331
332 // Visit the unmarked objects.
333 for (Object** p = start; p < end; p++) {
334 if (!(*p)->IsHeapObject()) continue;
335 HeapObject* obj = HeapObject::cast(*p);
kasper.lund7276f142008-07-30 08:49:36 +0000336 if (obj->IsMarked()) continue;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000337 VisitUnmarkedObject(obj);
338 }
339 return true;
340 }
341};
342
343
mads.s.ager31e71382008-08-13 09:32:07 +0000344// Visitor class for marking heap roots.
345class RootMarkingVisitor : public ObjectVisitor {
346 public:
347 void VisitPointer(Object** p) {
348 MarkObjectByPointer(p);
349 }
350
351 void VisitPointers(Object** start, Object** end) {
352 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
353 }
354
355 MarkingVisitor* stack_visitor() { return &stack_visitor_; }
356
357 private:
358 MarkingVisitor stack_visitor_;
359
360 void MarkObjectByPointer(Object** p) {
361 if (!(*p)->IsHeapObject()) return;
362
363 // Replace flat cons strings in place.
364 HeapObject* object = ShortCircuitConsString(p);
365 if (object->IsMarked()) return;
366
mads.s.ager31e71382008-08-13 09:32:07 +0000367 Map* map = object->map();
368 // Mark the object.
ager@chromium.org3bf7b912008-11-17 09:09:45 +0000369 MarkCompactCollector::SetMark(object);
mads.s.ager31e71382008-08-13 09:32:07 +0000370 // Mark the map pointer and body, and push them on the marking stack.
371 MarkCompactCollector::MarkObject(map);
372 object->IterateBody(map->instance_type(), object->SizeFromMap(map),
373 &stack_visitor_);
374
375 // Mark all the objects reachable from the map and body. May leave
376 // overflowed objects in the heap.
377 MarkCompactCollector::EmptyMarkingStack(&stack_visitor_);
378 }
379};
380
381
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000382// Helper class for pruning the symbol table.
383class SymbolTableCleaner : public ObjectVisitor {
384 public:
385 SymbolTableCleaner() : pointers_removed_(0) { }
386 void VisitPointers(Object** start, Object** end) {
387 // Visit all HeapObject pointers in [start, end).
388 for (Object** p = start; p < end; p++) {
kasper.lund7276f142008-07-30 08:49:36 +0000389 if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
ager@chromium.org6f10e412009-02-13 10:11:16 +0000390 // Check if the symbol being pruned is an external symbol. We need to
391 // delete the associated external data as this symbol is going away.
392
393 // Since the object is not marked we can access its map word safely
394 // without having to worry about marking bits in the object header.
395 Map* map = HeapObject::cast(*p)->map();
396 // Since no objects have yet been moved we can safely access the map of
397 // the object.
398 uint32_t type = map->instance_type();
399 bool is_external = (type & kStringRepresentationMask) ==
400 kExternalStringTag;
401 if (is_external) {
402 bool is_two_byte = (type & kStringEncodingMask) == kTwoByteStringTag;
403 byte* resource_addr = reinterpret_cast<byte*>(*p) +
404 ExternalString::kResourceOffset -
405 kHeapObjectTag;
406 if (is_two_byte) {
ager@chromium.org80787b72009-04-17 10:24:24 +0000407 v8::String::ExternalStringResource** resource =
408 reinterpret_cast<v8::String::ExternalStringResource**>
ager@chromium.org6f10e412009-02-13 10:11:16 +0000409 (resource_addr);
ager@chromium.org80787b72009-04-17 10:24:24 +0000410 delete *resource;
411 // Clear the resource pointer in the symbol.
412 *resource = NULL;
ager@chromium.org6f10e412009-02-13 10:11:16 +0000413 } else {
ager@chromium.org80787b72009-04-17 10:24:24 +0000414 v8::String::ExternalAsciiStringResource** resource =
415 reinterpret_cast<v8::String::ExternalAsciiStringResource**>
ager@chromium.org6f10e412009-02-13 10:11:16 +0000416 (resource_addr);
ager@chromium.org80787b72009-04-17 10:24:24 +0000417 delete *resource;
418 // Clear the resource pointer in the symbol.
419 *resource = NULL;
ager@chromium.org6f10e412009-02-13 10:11:16 +0000420 }
421 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000422 // Set the entry to null_value (as deleted).
423 *p = Heap::null_value();
424 pointers_removed_++;
425 }
426 }
427 }
428
429 int PointersRemoved() {
430 return pointers_removed_;
431 }
432 private:
433 int pointers_removed_;
434};
435
436
mads.s.ager31e71382008-08-13 09:32:07 +0000437void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
mads.s.ager31e71382008-08-13 09:32:07 +0000438 ASSERT(!object->IsMarked());
mads.s.ager31e71382008-08-13 09:32:07 +0000439 ASSERT(Heap::Contains(object));
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +0000440 if (object->IsMap()) {
441 Map* map = Map::cast(object);
442 if (FLAG_cleanup_caches_in_maps_at_gc) {
443 map->ClearCodeCache();
444 }
ager@chromium.org3bf7b912008-11-17 09:09:45 +0000445 SetMark(map);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +0000446 if (FLAG_collect_maps &&
447 map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
448 map->instance_type() <= JS_FUNCTION_TYPE) {
449 MarkMapContents(map);
450 } else {
451 marking_stack.Push(map);
452 }
453 } else {
ager@chromium.org3bf7b912008-11-17 09:09:45 +0000454 SetMark(object);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +0000455 marking_stack.Push(object);
456 }
457}
458
459
460void MarkCompactCollector::MarkMapContents(Map* map) {
461 MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(
462 *HeapObject::RawField(map, Map::kInstanceDescriptorsOffset)));
463
464 // Mark the Object* fields of the Map.
465 // Since the descriptor array has been marked already, it is fine
466 // that one of these fields contains a pointer to it.
467 MarkingVisitor visitor; // Has no state or contents.
468 visitor.VisitPointers(HeapObject::RawField(map, Map::kPrototypeOffset),
469 HeapObject::RawField(map, Map::kSize));
470}
471
472
473void MarkCompactCollector::MarkDescriptorArray(
474 DescriptorArray *descriptors) {
475 if (descriptors->IsMarked()) return;
476 // Empty descriptor array is marked as a root before any maps are marked.
477 ASSERT(descriptors != Heap::empty_descriptor_array());
ager@chromium.org3bf7b912008-11-17 09:09:45 +0000478 SetMark(descriptors);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +0000479
480 FixedArray* contents = reinterpret_cast<FixedArray*>(
481 descriptors->get(DescriptorArray::kContentArrayIndex));
482 ASSERT(contents->IsHeapObject());
483 ASSERT(!contents->IsMarked());
484 ASSERT(contents->IsFixedArray());
485 ASSERT(contents->length() >= 2);
ager@chromium.org3bf7b912008-11-17 09:09:45 +0000486 SetMark(contents);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +0000487 // Contents contains (value, details) pairs. If the details say
488 // that the type of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
489 // or NULL_DESCRIPTOR, we don't mark the value as live. Only for
490 // type MAP_TRANSITION is the value a Object* (a Map*).
491 for (int i = 0; i < contents->length(); i += 2) {
492 // If the pair (value, details) at index i, i+1 is not
493 // a transition or null descriptor, mark the value.
494 PropertyDetails details(Smi::cast(contents->get(i + 1)));
495 if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
496 HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
497 if (object->IsHeapObject() && !object->IsMarked()) {
ager@chromium.org3bf7b912008-11-17 09:09:45 +0000498 SetMark(object);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +0000499 marking_stack.Push(object);
500 }
501 }
502 }
503 // The DescriptorArray descriptors contains a pointer to its contents array,
504 // but the contents array is already marked.
505 marking_stack.Push(descriptors);
506}
507
508
509void MarkCompactCollector::CreateBackPointers() {
510 HeapObjectIterator iterator(Heap::map_space());
511 while (iterator.has_next()) {
512 Object* next_object = iterator.next();
513 if (next_object->IsMap()) { // Could also be ByteArray on free list.
514 Map* map = Map::cast(next_object);
515 if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
516 map->instance_type() <= JS_FUNCTION_TYPE) {
517 map->CreateBackPointers();
518 } else {
519 ASSERT(map->instance_descriptors() == Heap::empty_descriptor_array());
520 }
521 }
522 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000523}
524
525
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000526static int OverflowObjectSize(HeapObject* obj) {
527 // Recover the normal map pointer, it might be marked as live and
528 // overflowed.
kasper.lund7276f142008-07-30 08:49:36 +0000529 MapWord map_word = obj->map_word();
530 map_word.ClearMark();
531 map_word.ClearOverflow();
532 return obj->SizeFromMap(map_word.ToMap());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000533}
534
535
mads.s.ager31e71382008-08-13 09:32:07 +0000536// Fill the marking stack with overflowed objects returned by the given
537// iterator. Stop when the marking stack is filled or the end of the space
538// is reached, whichever comes first.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000539template<class T>
540static void ScanOverflowedObjects(T* it) {
mads.s.ager31e71382008-08-13 09:32:07 +0000541 // The caller should ensure that the marking stack is initially not full,
542 // so that we don't waste effort pointlessly scanning for objects.
543 ASSERT(!marking_stack.is_full());
544
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000545 while (it->has_next()) {
mads.s.ager31e71382008-08-13 09:32:07 +0000546 HeapObject* object = it->next();
547 if (object->IsOverflowed()) {
548 object->ClearOverflow();
549 ASSERT(object->IsMarked());
550 ASSERT(Heap::Contains(object));
551 marking_stack.Push(object);
552 if (marking_stack.is_full()) return;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000553 }
554 }
555}
556
557
558bool MarkCompactCollector::MustBeMarked(Object** p) {
559 // Check whether *p is a HeapObject pointer.
560 if (!(*p)->IsHeapObject()) return false;
kasper.lund7276f142008-07-30 08:49:36 +0000561 return !HeapObject::cast(*p)->IsMarked();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000562}
563
564
ager@chromium.org5ec48922009-05-05 07:25:34 +0000565class SymbolMarkingVisitor : public ObjectVisitor {
566 public:
567 void VisitPointers(Object** start, Object** end) {
568 MarkingVisitor marker;
569 for (Object** p = start; p < end; p++) {
570 if (!(*p)->IsHeapObject()) continue;
571
572 HeapObject* object = HeapObject::cast(*p);
573 // If the object is marked, we have marked or are in the process
574 // of marking subparts.
575 if (object->IsMarked()) continue;
576
577 // The object is unmarked, we do not need to unmark to use its
578 // map.
579 Map* map = object->map();
580 object->IterateBody(map->instance_type(),
581 object->SizeFromMap(map),
582 &marker);
583 }
584 }
585};
586
587
588void MarkCompactCollector::MarkSymbolTable() {
589 // Objects reachable from symbols are marked as live so as to ensure
590 // that if the symbol itself remains alive after GC for any reason,
591 // and if it is a sliced string or a cons string backed by an
592 // external string (even indirectly), then the external string does
593 // not receive a weak reference callback.
594 SymbolTable* symbol_table = SymbolTable::cast(Heap::symbol_table());
595 // Mark the symbol table itself.
596 SetMark(symbol_table);
597 // Explicitly mark the prefix.
598 MarkingVisitor marker;
599 symbol_table->IteratePrefix(&marker);
600 ProcessMarkingStack(&marker);
601 // Mark subparts of the symbols but not the symbols themselves
602 // (unless reachable from another symbol).
603 SymbolMarkingVisitor symbol_marker;
604 symbol_table->IterateElements(&symbol_marker);
605 ProcessMarkingStack(&marker);
606}
607
608
609void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
610 // Mark the heap roots including global variables, stack variables,
611 // etc., and all objects reachable from them.
mads.s.ager31e71382008-08-13 09:32:07 +0000612 Heap::IterateStrongRoots(visitor);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000613
ager@chromium.org5ec48922009-05-05 07:25:34 +0000614 // Handle the symbol table specially.
615 MarkSymbolTable();
mads.s.ager31e71382008-08-13 09:32:07 +0000616
617 // There may be overflowed objects in the heap. Visit them now.
618 while (marking_stack.overflowed()) {
619 RefillMarkingStack();
620 EmptyMarkingStack(visitor->stack_visitor());
621 }
kasper.lund7276f142008-07-30 08:49:36 +0000622}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000623
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000624
kasper.lund7276f142008-07-30 08:49:36 +0000625void MarkCompactCollector::MarkObjectGroups() {
ager@chromium.org8bb60582008-12-11 12:02:20 +0000626 List<ObjectGroup*>* object_groups = GlobalHandles::ObjectGroups();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000627
ager@chromium.org8bb60582008-12-11 12:02:20 +0000628 for (int i = 0; i < object_groups->length(); i++) {
629 ObjectGroup* entry = object_groups->at(i);
kasper.lund7276f142008-07-30 08:49:36 +0000630 if (entry == NULL) continue;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000631
kasper.lund7276f142008-07-30 08:49:36 +0000632 List<Object**>& objects = entry->objects_;
633 bool group_marked = false;
634 for (int j = 0; j < objects.length(); j++) {
635 Object* object = *objects[j];
636 if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
637 group_marked = true;
638 break;
639 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000640 }
641
kasper.lund7276f142008-07-30 08:49:36 +0000642 if (!group_marked) continue;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000643
kasper.lund7276f142008-07-30 08:49:36 +0000644 // An object in the group is marked, so mark as gray all white heap
645 // objects in the group.
646 for (int j = 0; j < objects.length(); ++j) {
647 if ((*objects[j])->IsHeapObject()) {
648 MarkObject(HeapObject::cast(*objects[j]));
649 }
650 }
651 // Once the entire group has been colored gray, set the object group
652 // to NULL so it won't be processed again.
ager@chromium.org8bb60582008-12-11 12:02:20 +0000653 delete object_groups->at(i);
654 object_groups->at(i) = NULL;
kasper.lund7276f142008-07-30 08:49:36 +0000655 }
656}
657
658
mads.s.ager31e71382008-08-13 09:32:07 +0000659// Mark all objects reachable from the objects on the marking stack.
660// Before: the marking stack contains zero or more heap object pointers.
661// After: the marking stack is empty, and all objects reachable from the
662// marking stack have been marked, or are overflowed in the heap.
663void MarkCompactCollector::EmptyMarkingStack(MarkingVisitor* visitor) {
664 while (!marking_stack.is_empty()) {
665 HeapObject* object = marking_stack.Pop();
666 ASSERT(object->IsHeapObject());
667 ASSERT(Heap::Contains(object));
668 ASSERT(object->IsMarked());
669 ASSERT(!object->IsOverflowed());
kasper.lund7276f142008-07-30 08:49:36 +0000670
mads.s.ager31e71382008-08-13 09:32:07 +0000671 // Because the object is marked, we have to recover the original map
672 // pointer and use it to mark the object's body.
673 MapWord map_word = object->map_word();
674 map_word.ClearMark();
675 Map* map = map_word.ToMap();
676 MarkObject(map);
677 object->IterateBody(map->instance_type(), object->SizeFromMap(map),
678 visitor);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000679 }
kasper.lund7276f142008-07-30 08:49:36 +0000680}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000681
kasper.lund7276f142008-07-30 08:49:36 +0000682
mads.s.ager31e71382008-08-13 09:32:07 +0000683// Sweep the heap for overflowed objects, clear their overflow bits, and
684// push them on the marking stack. Stop early if the marking stack fills
685// before sweeping completes. If sweeping completes, there are no remaining
686// overflowed objects in the heap so the overflow flag on the markings stack
687// is cleared.
688void MarkCompactCollector::RefillMarkingStack() {
689 ASSERT(marking_stack.overflowed());
690
691 SemiSpaceIterator new_it(Heap::new_space(), &OverflowObjectSize);
692 ScanOverflowedObjects(&new_it);
693 if (marking_stack.is_full()) return;
694
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000695 HeapObjectIterator old_pointer_it(Heap::old_pointer_space(),
696 &OverflowObjectSize);
697 ScanOverflowedObjects(&old_pointer_it);
698 if (marking_stack.is_full()) return;
699
700 HeapObjectIterator old_data_it(Heap::old_data_space(), &OverflowObjectSize);
701 ScanOverflowedObjects(&old_data_it);
mads.s.ager31e71382008-08-13 09:32:07 +0000702 if (marking_stack.is_full()) return;
703
704 HeapObjectIterator code_it(Heap::code_space(), &OverflowObjectSize);
705 ScanOverflowedObjects(&code_it);
706 if (marking_stack.is_full()) return;
707
708 HeapObjectIterator map_it(Heap::map_space(), &OverflowObjectSize);
709 ScanOverflowedObjects(&map_it);
710 if (marking_stack.is_full()) return;
711
712 LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize);
713 ScanOverflowedObjects(&lo_it);
714 if (marking_stack.is_full()) return;
715
716 marking_stack.clear_overflowed();
717}
718
719
720// Mark all objects reachable (transitively) from objects on the marking
721// stack. Before: the marking stack contains zero or more heap object
722// pointers. After: the marking stack is empty and there are no overflowed
723// objects in the heap.
724void MarkCompactCollector::ProcessMarkingStack(MarkingVisitor* visitor) {
725 EmptyMarkingStack(visitor);
726 while (marking_stack.overflowed()) {
727 RefillMarkingStack();
728 EmptyMarkingStack(visitor);
729 }
730}
731
732
733void MarkCompactCollector::ProcessObjectGroups(MarkingVisitor* visitor) {
kasper.lund7276f142008-07-30 08:49:36 +0000734 bool work_to_do = true;
735 ASSERT(marking_stack.is_empty());
736 while (work_to_do) {
737 MarkObjectGroups();
738 work_to_do = !marking_stack.is_empty();
mads.s.ager31e71382008-08-13 09:32:07 +0000739 ProcessMarkingStack(visitor);
kasper.lund7276f142008-07-30 08:49:36 +0000740 }
741}
742
743
744void MarkCompactCollector::MarkLiveObjects() {
745#ifdef DEBUG
746 ASSERT(state_ == PREPARE_GC);
747 state_ = MARK_LIVE_OBJECTS;
748#endif
749 // The to space contains live objects, the from space is used as a marking
750 // stack.
751 marking_stack.Initialize(Heap::new_space()->FromSpaceLow(),
752 Heap::new_space()->FromSpaceHigh());
753
754 ASSERT(!marking_stack.overflowed());
755
mads.s.ager31e71382008-08-13 09:32:07 +0000756 RootMarkingVisitor root_visitor;
ager@chromium.org5ec48922009-05-05 07:25:34 +0000757 MarkRoots(&root_visitor);
kasper.lund7276f142008-07-30 08:49:36 +0000758
759 // The objects reachable from the roots are marked black, unreachable
760 // objects are white. Mark objects reachable from object groups with at
761 // least one marked object, and continue until no new objects are
762 // reachable from the object groups.
mads.s.ager31e71382008-08-13 09:32:07 +0000763 ProcessObjectGroups(root_visitor.stack_visitor());
kasper.lund7276f142008-07-30 08:49:36 +0000764
765 // The objects reachable from the roots or object groups are marked black,
766 // unreachable objects are white. Process objects reachable only from
767 // weak global handles.
768 //
769 // First we mark weak pointers not yet reachable.
770 GlobalHandles::MarkWeakRoots(&MustBeMarked);
771 // Then we process weak pointers and process the transitive closure.
mads.s.ager31e71382008-08-13 09:32:07 +0000772 GlobalHandles::IterateWeakRoots(&root_visitor);
773 while (marking_stack.overflowed()) {
774 RefillMarkingStack();
775 EmptyMarkingStack(root_visitor.stack_visitor());
776 }
kasper.lund7276f142008-07-30 08:49:36 +0000777
778 // Repeat the object groups to mark unmarked groups reachable from the
779 // weak roots.
mads.s.ager31e71382008-08-13 09:32:07 +0000780 ProcessObjectGroups(root_visitor.stack_visitor());
kasper.lund7276f142008-07-30 08:49:36 +0000781
782 // Prune the symbol table removing all symbols only pointed to by the
783 // symbol table. Cannot use SymbolTable::cast here because the symbol
784 // table is marked.
785 SymbolTable* symbol_table =
786 reinterpret_cast<SymbolTable*>(Heap::symbol_table());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000787 SymbolTableCleaner v;
788 symbol_table->IterateElements(&v);
789 symbol_table->ElementsRemoved(v.PointersRemoved());
790
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000791 // Remove object groups after marking phase.
792 GlobalHandles::RemoveObjectGroups();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000793}
794
795
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +0000796static int CountMarkedCallback(HeapObject* obj) {
797 MapWord map_word = obj->map_word();
798 map_word.ClearMark();
799 return obj->SizeFromMap(map_word.ToMap());
800}
801
802
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000803#ifdef DEBUG
ager@chromium.org5ec48922009-05-05 07:25:34 +0000804void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj, int scale) {
805 ASSERT(scale == -1 || scale == 1);
806 live_bytes_ += obj->Size() * scale;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000807 if (Heap::new_space()->Contains(obj)) {
ager@chromium.org5ec48922009-05-05 07:25:34 +0000808 live_young_objects_ += scale;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000809 } else if (Heap::map_space()->Contains(obj)) {
810 ASSERT(obj->IsMap());
ager@chromium.org5ec48922009-05-05 07:25:34 +0000811 live_map_objects_ += scale;
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000812 } else if (Heap::old_pointer_space()->Contains(obj)) {
ager@chromium.org5ec48922009-05-05 07:25:34 +0000813 live_old_pointer_objects_ += scale;
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000814 } else if (Heap::old_data_space()->Contains(obj)) {
ager@chromium.org5ec48922009-05-05 07:25:34 +0000815 live_old_data_objects_ += scale;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000816 } else if (Heap::code_space()->Contains(obj)) {
ager@chromium.org5ec48922009-05-05 07:25:34 +0000817 live_code_objects_ += scale;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000818 } else if (Heap::lo_space()->Contains(obj)) {
ager@chromium.org5ec48922009-05-05 07:25:34 +0000819 live_lo_objects_ +=scale;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000820 } else {
821 UNREACHABLE();
822 }
823}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000824#endif // DEBUG
825
826
827void MarkCompactCollector::SweepLargeObjectSpace() {
828#ifdef DEBUG
829 ASSERT(state_ == MARK_LIVE_OBJECTS);
830 state_ =
831 compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
832#endif
833 // Deallocate unmarked objects and clear marked bits for marked objects.
834 Heap::lo_space()->FreeUnmarkedObjects();
835}
836
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +0000837// Safe to use during marking phase only.
838bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
839 MapWord metamap = object->map_word();
840 metamap.ClearMark();
841 return metamap.ToMap()->instance_type() == MAP_TYPE;
842}
843
844void MarkCompactCollector::ClearNonLiveTransitions() {
845 HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback);
846 // Iterate over the map space, setting map transitions that go from
847 // a marked map to an unmarked map to null transitions. At the same time,
848 // set all the prototype fields of maps back to their original value,
849 // dropping the back pointers temporarily stored in the prototype field.
850 // Setting the prototype field requires following the linked list of
851 // back pointers, reversing them all at once. This allows us to find
852 // those maps with map transitions that need to be nulled, and only
853 // scan the descriptor arrays of those maps, not all maps.
854 // All of these actions are carried out only on maps of JSObects
855 // and related subtypes.
856 while (map_iterator.has_next()) {
857 Map* map = reinterpret_cast<Map*>(map_iterator.next());
858 if (!map->IsMarked() && map->IsByteArray()) continue;
859
860 ASSERT(SafeIsMap(map));
861 // Only JSObject and subtypes have map transitions and back pointers.
862 if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
863 if (map->instance_type() > JS_FUNCTION_TYPE) continue;
864 // Follow the chain of back pointers to find the prototype.
865 Map* current = map;
866 while (SafeIsMap(current)) {
867 current = reinterpret_cast<Map*>(current->prototype());
868 ASSERT(current->IsHeapObject());
869 }
870 Object* real_prototype = current;
871
872 // Follow back pointers, setting them to prototype,
873 // clearing map transitions when necessary.
874 current = map;
875 bool on_dead_path = !current->IsMarked();
876 Object *next;
877 while (SafeIsMap(current)) {
878 next = current->prototype();
879 // There should never be a dead map above a live map.
880 ASSERT(on_dead_path || current->IsMarked());
881
882 // A live map above a dead map indicates a dead transition.
883 // This test will always be false on the first iteration.
884 if (on_dead_path && current->IsMarked()) {
885 on_dead_path = false;
886 current->ClearNonLiveTransitions(real_prototype);
887 }
888 *HeapObject::RawField(current, Map::kPrototypeOffset) =
889 real_prototype;
890 current = reinterpret_cast<Map*>(next);
891 }
892 }
893}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000894
895// -------------------------------------------------------------------------
896// Phase 2: Encode forwarding addresses.
897// When compacting, forwarding addresses for objects in old space and map
898// space are encoded in their map pointer word (along with an encoding of
899// their map pointers).
900//
901// 31 21 20 10 9 0
902// +-----------------+------------------+-----------------+
903// |forwarding offset|page offset of map|page index of map|
904// +-----------------+------------------+-----------------+
905// 11 bits 11 bits 10 bits
906//
907// An address range [start, end) can have both live and non-live objects.
908// Maximal non-live regions are marked so they can be skipped on subsequent
909// sweeps of the heap. A distinguished map-pointer encoding is used to mark
910// free regions of one-word size (in which case the next word is the start
911// of a live object). A second distinguished map-pointer encoding is used
912// to mark free regions larger than one word, and the size of the free
913// region (including the first word) is written to the second word of the
914// region.
915//
916// Any valid map page offset must lie in the object area of the page, so map
917// page offsets less than Page::kObjectStartOffset are invalid. We use a
918// pair of distinguished invalid map encodings (for single word and multiple
919// words) to indicate free regions in the page found during computation of
920// forwarding addresses and skipped over in subsequent sweeps.
921static const uint32_t kSingleFreeEncoding = 0;
922static const uint32_t kMultiFreeEncoding = 1;
923
924
925// Encode a free region, defined by the given start address and size, in the
926// first word or two of the region.
927void EncodeFreeRegion(Address free_start, int free_size) {
928 ASSERT(free_size >= kIntSize);
929 if (free_size == kIntSize) {
930 Memory::uint32_at(free_start) = kSingleFreeEncoding;
931 } else {
932 ASSERT(free_size >= 2 * kIntSize);
933 Memory::uint32_at(free_start) = kMultiFreeEncoding;
934 Memory::int_at(free_start + kIntSize) = free_size;
935 }
936
937#ifdef DEBUG
938 // Zap the body of the free region.
939 if (FLAG_enable_slow_asserts) {
940 for (int offset = 2 * kIntSize;
941 offset < free_size;
942 offset += kPointerSize) {
943 Memory::Address_at(free_start + offset) = kZapValue;
944 }
945 }
946#endif
947}
948
949
950// Try to promote all objects in new space. Heap numbers and sequential
951// strings are promoted to the code space, all others to the old space.
952inline Object* MCAllocateFromNewSpace(HeapObject* object, int object_size) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000953 OldSpace* target_space = Heap::TargetSpace(object);
954 ASSERT(target_space == Heap::old_pointer_space() ||
955 target_space == Heap::old_data_space());
956 Object* forwarded = target_space->MCAllocateRaw(object_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000957
958 if (forwarded->IsFailure()) {
959 forwarded = Heap::new_space()->MCAllocateRaw(object_size);
960 }
961 return forwarded;
962}
963
964
965// Allocation functions for the paged spaces call the space's MCAllocateRaw.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000966inline Object* MCAllocateFromOldPointerSpace(HeapObject* object,
967 int object_size) {
968 return Heap::old_pointer_space()->MCAllocateRaw(object_size);
969}
970
971
972inline Object* MCAllocateFromOldDataSpace(HeapObject* object, int object_size) {
973 return Heap::old_data_space()->MCAllocateRaw(object_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000974}
975
976
977inline Object* MCAllocateFromCodeSpace(HeapObject* object, int object_size) {
978 return Heap::code_space()->MCAllocateRaw(object_size);
979}
980
981
982inline Object* MCAllocateFromMapSpace(HeapObject* object, int object_size) {
983 return Heap::map_space()->MCAllocateRaw(object_size);
984}
985
986
987// The forwarding address is encoded at the same offset as the current
988// to-space object, but in from space.
989inline void EncodeForwardingAddressInNewSpace(HeapObject* old_object,
990 int object_size,
991 Object* new_object,
992 int* ignored) {
993 int offset =
994 Heap::new_space()->ToSpaceOffsetForAddress(old_object->address());
995 Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset) =
996 HeapObject::cast(new_object)->address();
997}
998
999
1000// The forwarding address is encoded in the map pointer of the object as an
1001// offset (in terms of live bytes) from the address of the first live object
1002// in the page.
1003inline void EncodeForwardingAddressInPagedSpace(HeapObject* old_object,
1004 int object_size,
1005 Object* new_object,
1006 int* offset) {
1007 // Record the forwarding address of the first live object if necessary.
1008 if (*offset == 0) {
1009 Page::FromAddress(old_object->address())->mc_first_forwarded =
1010 HeapObject::cast(new_object)->address();
1011 }
1012
kasper.lund7276f142008-07-30 08:49:36 +00001013 MapWord encoding =
1014 MapWord::EncodeAddress(old_object->map()->address(), *offset);
1015 old_object->set_map_word(encoding);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001016 *offset += object_size;
1017 ASSERT(*offset <= Page::kObjectAreaSize);
1018}
1019
1020
1021// Most non-live objects are ignored.
1022inline void IgnoreNonLiveObject(HeapObject* object) {}
1023
1024
1025// A code deletion event is logged for non-live code objects.
1026inline void LogNonLiveCodeObject(HeapObject* object) {
1027 if (object->IsCode()) LOG(CodeDeleteEvent(object->address()));
1028}
1029
1030
1031// Function template that, given a range of addresses (eg, a semispace or a
1032// paged space page), iterates through the objects in the range to clear
1033// mark bits and compute and encode forwarding addresses. As a side effect,
1034// maximal free chunks are marked so that they can be skipped on subsequent
1035// sweeps.
1036//
1037// The template parameters are an allocation function, a forwarding address
1038// encoding function, and a function to process non-live objects.
1039template<MarkCompactCollector::AllocationFunction Alloc,
1040 MarkCompactCollector::EncodingFunction Encode,
1041 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
1042inline void EncodeForwardingAddressesInRange(Address start,
1043 Address end,
1044 int* offset) {
1045 // The start address of the current free region while sweeping the space.
1046 // This address is set when a transition from live to non-live objects is
1047 // encountered. A value (an encoding of the 'next free region' pointer)
1048 // is written to memory at this address when a transition from non-live to
1049 // live objects is encountered.
1050 Address free_start = NULL;
1051
1052 // A flag giving the state of the previously swept object. Initially true
1053 // to ensure that free_start is initialized to a proper address before
1054 // trying to write to it.
1055 bool is_prev_alive = true;
1056
1057 int object_size; // Will be set on each iteration of the loop.
1058 for (Address current = start; current < end; current += object_size) {
1059 HeapObject* object = HeapObject::FromAddress(current);
kasper.lund7276f142008-07-30 08:49:36 +00001060 if (object->IsMarked()) {
1061 object->ClearMark();
1062 MarkCompactCollector::tracer()->decrement_marked_count();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001063 object_size = object->Size();
1064
1065 Object* forwarded = Alloc(object, object_size);
1066 // Allocation cannot fail, because we are compacting the space.
1067 ASSERT(!forwarded->IsFailure());
1068 Encode(object, object_size, forwarded, offset);
1069
1070#ifdef DEBUG
1071 if (FLAG_gc_verbose) {
1072 PrintF("forward %p -> %p.\n", object->address(),
1073 HeapObject::cast(forwarded)->address());
1074 }
1075#endif
1076 if (!is_prev_alive) { // Transition from non-live to live.
1077 EncodeFreeRegion(free_start, current - free_start);
1078 is_prev_alive = true;
1079 }
1080 } else { // Non-live object.
1081 object_size = object->Size();
1082 ProcessNonLive(object);
1083 if (is_prev_alive) { // Transition from live to non-live.
1084 free_start = current;
1085 is_prev_alive = false;
1086 }
1087 }
1088 }
1089
1090 // If we ended on a free region, mark it.
1091 if (!is_prev_alive) EncodeFreeRegion(free_start, end - free_start);
1092}
1093
1094
1095// Functions to encode the forwarding pointers in each compactable space.
1096void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
1097 int ignored;
1098 EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
1099 EncodeForwardingAddressInNewSpace,
1100 IgnoreNonLiveObject>(
1101 Heap::new_space()->bottom(),
1102 Heap::new_space()->top(),
1103 &ignored);
1104}
1105
1106
1107template<MarkCompactCollector::AllocationFunction Alloc,
1108 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
1109void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
1110 PagedSpace* space) {
1111 PageIterator it(space, PageIterator::PAGES_IN_USE);
1112 while (it.has_next()) {
1113 Page* p = it.next();
1114 // The offset of each live object in the page from the first live object
1115 // in the page.
1116 int offset = 0;
1117 EncodeForwardingAddressesInRange<Alloc,
1118 EncodeForwardingAddressInPagedSpace,
1119 ProcessNonLive>(
1120 p->ObjectAreaStart(),
1121 p->AllocationTop(),
1122 &offset);
1123 }
1124}
1125
1126
1127static void SweepSpace(NewSpace* space) {
1128 HeapObject* object;
1129 for (Address current = space->bottom();
1130 current < space->top();
1131 current += object->Size()) {
1132 object = HeapObject::FromAddress(current);
kasper.lund7276f142008-07-30 08:49:36 +00001133 if (object->IsMarked()) {
1134 object->ClearMark();
1135 MarkCompactCollector::tracer()->decrement_marked_count();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001136 } else {
1137 // We give non-live objects a map that will correctly give their size,
1138 // since their existing map might not be live after the collection.
1139 int size = object->Size();
1140 if (size >= Array::kHeaderSize) {
1141 object->set_map(Heap::byte_array_map());
1142 ByteArray::cast(object)->set_length(ByteArray::LengthFor(size));
1143 } else {
1144 ASSERT(size == kPointerSize);
1145 object->set_map(Heap::one_word_filler_map());
1146 }
1147 ASSERT(object->Size() == size);
1148 }
1149 // The object is now unmarked for the call to Size() at the top of the
1150 // loop.
1151 }
1152}
1153
1154
1155static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
1156 PageIterator it(space, PageIterator::PAGES_IN_USE);
1157 while (it.has_next()) {
1158 Page* p = it.next();
1159
1160 bool is_previous_alive = true;
1161 Address free_start = NULL;
1162 HeapObject* object;
1163
1164 for (Address current = p->ObjectAreaStart();
1165 current < p->AllocationTop();
1166 current += object->Size()) {
1167 object = HeapObject::FromAddress(current);
kasper.lund7276f142008-07-30 08:49:36 +00001168 if (object->IsMarked()) {
1169 object->ClearMark();
1170 MarkCompactCollector::tracer()->decrement_marked_count();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001171 if (MarkCompactCollector::IsCompacting() && object->IsCode()) {
1172 // If this is compacting collection marked code objects have had
1173 // their IC targets converted to objects.
1174 // They need to be converted back to addresses.
1175 Code::cast(object)->ConvertICTargetsFromObjectToAddress();
1176 }
1177 if (!is_previous_alive) { // Transition from free to live.
1178 dealloc(free_start, current - free_start);
1179 is_previous_alive = true;
1180 }
1181 } else {
1182 if (object->IsCode()) {
iposva@chromium.org245aa852009-02-10 00:49:54 +00001183 // Notify the logger that compiled code has been collected.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001184 LOG(CodeDeleteEvent(Code::cast(object)->address()));
1185 }
1186 if (is_previous_alive) { // Transition from live to free.
1187 free_start = current;
1188 is_previous_alive = false;
1189 }
1190 }
1191 // The object is now unmarked for the call to Size() at the top of the
1192 // loop.
1193 }
1194
1195 // If the last region was not live we need to from free_start to the
1196 // allocation top in the page.
1197 if (!is_previous_alive) {
1198 int free_size = p->AllocationTop() - free_start;
1199 if (free_size > 0) {
1200 dealloc(free_start, free_size);
1201 }
1202 }
1203 }
1204}
1205
1206
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001207void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
1208 int size_in_bytes) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001209 Heap::ClearRSetRange(start, size_in_bytes);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001210 Heap::old_pointer_space()->Free(start, size_in_bytes);
1211}
1212
1213
1214void MarkCompactCollector::DeallocateOldDataBlock(Address start,
1215 int size_in_bytes) {
1216 Heap::old_data_space()->Free(start, size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001217}
1218
1219
1220void MarkCompactCollector::DeallocateCodeBlock(Address start,
1221 int size_in_bytes) {
1222 Heap::code_space()->Free(start, size_in_bytes);
1223}
1224
1225
1226void MarkCompactCollector::DeallocateMapBlock(Address start,
1227 int size_in_bytes) {
1228 // Objects in map space are frequently assumed to have size Map::kSize and a
1229 // valid map in their first word. Thus, we break the free block up into
1230 // chunks and free them separately.
1231 ASSERT(size_in_bytes % Map::kSize == 0);
1232 Heap::ClearRSetRange(start, size_in_bytes);
1233 Address end = start + size_in_bytes;
1234 for (Address a = start; a < end; a += Map::kSize) {
1235 Heap::map_space()->Free(a);
1236 }
1237}
1238
1239
1240void MarkCompactCollector::EncodeForwardingAddresses() {
1241 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
kasper.lund7276f142008-07-30 08:49:36 +00001242 // Objects in the active semispace of the young generation may be
1243 // relocated to the inactive semispace (if not promoted). Set the
1244 // relocation info to the beginning of the inactive semispace.
1245 Heap::new_space()->MCResetRelocationInfo();
1246
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001247 // Compute the forwarding pointers in each space.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001248 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001249 IgnoreNonLiveObject>(
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001250 Heap::old_pointer_space());
1251
1252 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
1253 IgnoreNonLiveObject>(
1254 Heap::old_data_space());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001255
1256 EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
1257 LogNonLiveCodeObject>(
1258 Heap::code_space());
1259
1260 // Compute new space next to last after the old and code spaces have been
1261 // compacted. Objects in new space can be promoted to old or code space.
1262 EncodeForwardingAddressesInNewSpace();
1263
1264 // Compute map space last because computing forwarding addresses
1265 // overwrites non-live objects. Objects in the other spaces rely on
1266 // non-live map pointers to get the sizes of non-live objects.
1267 EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
1268 IgnoreNonLiveObject>(
1269 Heap::map_space());
1270
1271 // Write relocation info to the top page, so we can use it later. This is
1272 // done after promoting objects from the new space so we get the correct
1273 // allocation top.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001274 Heap::old_pointer_space()->MCWriteRelocationInfoToPage();
1275 Heap::old_data_space()->MCWriteRelocationInfoToPage();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001276 Heap::code_space()->MCWriteRelocationInfoToPage();
1277 Heap::map_space()->MCWriteRelocationInfoToPage();
1278}
1279
1280
1281void MarkCompactCollector::SweepSpaces() {
1282 ASSERT(state_ == SWEEP_SPACES);
1283 ASSERT(!IsCompacting());
1284 // Noncompacting collections simply sweep the spaces to clear the mark
1285 // bits and free the nonlive blocks (for old and map spaces). We sweep
1286 // the map space last because freeing non-live maps overwrites them and
1287 // the other spaces rely on possibly non-live maps to get the sizes for
1288 // non-live objects.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001289 SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock);
1290 SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001291 SweepSpace(Heap::code_space(), &DeallocateCodeBlock);
1292 SweepSpace(Heap::new_space());
1293 SweepSpace(Heap::map_space(), &DeallocateMapBlock);
1294}
1295
1296
1297// Iterate the live objects in a range of addresses (eg, a page or a
1298// semispace). The live regions of the range have been linked into a list.
1299// The first live region is [first_live_start, first_live_end), and the last
1300// address in the range is top. The callback function is used to get the
1301// size of each live object.
1302int MarkCompactCollector::IterateLiveObjectsInRange(
1303 Address start,
1304 Address end,
1305 HeapObjectCallback size_func) {
1306 int live_objects = 0;
1307 Address current = start;
1308 while (current < end) {
1309 uint32_t encoded_map = Memory::uint32_at(current);
1310 if (encoded_map == kSingleFreeEncoding) {
1311 current += kPointerSize;
1312 } else if (encoded_map == kMultiFreeEncoding) {
1313 current += Memory::int_at(current + kIntSize);
1314 } else {
1315 live_objects++;
1316 current += size_func(HeapObject::FromAddress(current));
1317 }
1318 }
1319 return live_objects;
1320}
1321
1322
1323int MarkCompactCollector::IterateLiveObjects(NewSpace* space,
1324 HeapObjectCallback size_f) {
1325 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
1326 return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
1327}
1328
1329
1330int MarkCompactCollector::IterateLiveObjects(PagedSpace* space,
1331 HeapObjectCallback size_f) {
1332 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
1333 int total = 0;
1334 PageIterator it(space, PageIterator::PAGES_IN_USE);
1335 while (it.has_next()) {
1336 Page* p = it.next();
1337 total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
1338 p->AllocationTop(),
1339 size_f);
1340 }
1341 return total;
1342}
1343
1344
ager@chromium.orgddb913d2009-01-27 10:01:48 +00001345// -------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001346// Phase 3: Update pointers
1347
1348// Helper class for updating pointers in HeapObjects.
1349class UpdatingVisitor: public ObjectVisitor {
1350 public:
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001351 void VisitPointer(Object** p) {
mads.s.agercbaa0602008-08-14 13:41:48 +00001352 UpdatePointer(p);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001353 }
1354
1355 void VisitPointers(Object** start, Object** end) {
1356 // Mark all HeapObject pointers in [start, end)
mads.s.agercbaa0602008-08-14 13:41:48 +00001357 for (Object** p = start; p < end; p++) UpdatePointer(p);
1358 }
1359
1360 private:
1361 void UpdatePointer(Object** p) {
1362 if (!(*p)->IsHeapObject()) return;
1363
1364 HeapObject* obj = HeapObject::cast(*p);
1365 Address old_addr = obj->address();
1366 Address new_addr;
1367 ASSERT(!Heap::InFromSpace(obj));
1368
1369 if (Heap::new_space()->Contains(obj)) {
1370 Address f_addr = Heap::new_space()->FromSpaceLow() +
1371 Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
1372 new_addr = Memory::Address_at(f_addr);
1373
1374#ifdef DEBUG
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001375 ASSERT(Heap::old_pointer_space()->Contains(new_addr) ||
1376 Heap::old_data_space()->Contains(new_addr) ||
mads.s.agercbaa0602008-08-14 13:41:48 +00001377 Heap::code_space()->Contains(new_addr) ||
1378 Heap::new_space()->FromSpaceContains(new_addr));
1379
1380 if (Heap::new_space()->FromSpaceContains(new_addr)) {
1381 ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
1382 Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
1383 }
1384#endif
1385
1386 } else if (Heap::lo_space()->Contains(obj)) {
1387 // Don't move objects in the large object space.
1388 return;
1389
1390 } else {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001391 ASSERT(Heap::old_pointer_space()->Contains(obj) ||
1392 Heap::old_data_space()->Contains(obj) ||
mads.s.agercbaa0602008-08-14 13:41:48 +00001393 Heap::code_space()->Contains(obj) ||
1394 Heap::map_space()->Contains(obj));
1395
1396 new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001397 ASSERT(Heap::old_pointer_space()->Contains(new_addr) ||
1398 Heap::old_data_space()->Contains(new_addr) ||
mads.s.agercbaa0602008-08-14 13:41:48 +00001399 Heap::code_space()->Contains(new_addr) ||
1400 Heap::map_space()->Contains(new_addr));
1401
1402#ifdef DEBUG
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001403 if (Heap::old_pointer_space()->Contains(obj)) {
1404 ASSERT(Heap::old_pointer_space()->MCSpaceOffsetForAddress(new_addr) <=
1405 Heap::old_pointer_space()->MCSpaceOffsetForAddress(old_addr));
1406 } else if (Heap::old_data_space()->Contains(obj)) {
1407 ASSERT(Heap::old_data_space()->MCSpaceOffsetForAddress(new_addr) <=
1408 Heap::old_data_space()->MCSpaceOffsetForAddress(old_addr));
mads.s.agercbaa0602008-08-14 13:41:48 +00001409 } else if (Heap::code_space()->Contains(obj)) {
1410 ASSERT(Heap::code_space()->MCSpaceOffsetForAddress(new_addr) <=
1411 Heap::code_space()->MCSpaceOffsetForAddress(old_addr));
1412 } else {
1413 ASSERT(Heap::map_space()->MCSpaceOffsetForAddress(new_addr) <=
1414 Heap::map_space()->MCSpaceOffsetForAddress(old_addr));
1415 }
1416#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001417 }
mads.s.agercbaa0602008-08-14 13:41:48 +00001418
1419 *p = HeapObject::FromAddress(new_addr);
1420
1421#ifdef DEBUG
1422 if (FLAG_gc_verbose) {
1423 PrintF("update %p : %p -> %p\n",
1424 reinterpret_cast<Address>(p), old_addr, new_addr);
1425 }
1426#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001427 }
1428};
1429
mads.s.agercbaa0602008-08-14 13:41:48 +00001430
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001431void MarkCompactCollector::UpdatePointers() {
1432#ifdef DEBUG
1433 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
1434 state_ = UPDATE_POINTERS;
1435#endif
1436 UpdatingVisitor updating_visitor;
1437 Heap::IterateRoots(&updating_visitor);
1438 GlobalHandles::IterateWeakRoots(&updating_visitor);
1439
1440 int live_maps = IterateLiveObjects(Heap::map_space(),
1441 &UpdatePointersInOldObject);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001442 int live_pointer_olds = IterateLiveObjects(Heap::old_pointer_space(),
1443 &UpdatePointersInOldObject);
1444 int live_data_olds = IterateLiveObjects(Heap::old_data_space(),
1445 &UpdatePointersInOldObject);
1446 int live_codes = IterateLiveObjects(Heap::code_space(),
1447 &UpdatePointersInOldObject);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001448 int live_news = IterateLiveObjects(Heap::new_space(),
1449 &UpdatePointersInNewObject);
1450
1451 // Large objects do not move, the map word can be updated directly.
1452 LargeObjectIterator it(Heap::lo_space());
1453 while (it.has_next()) UpdatePointersInNewObject(it.next());
1454
1455 USE(live_maps);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001456 USE(live_pointer_olds);
1457 USE(live_data_olds);
1458 USE(live_codes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001459 USE(live_news);
1460
1461#ifdef DEBUG
1462 ASSERT(live_maps == live_map_objects_);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001463 ASSERT(live_data_olds == live_old_data_objects_);
1464 ASSERT(live_pointer_olds == live_old_pointer_objects_);
1465 ASSERT(live_codes == live_code_objects_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001466 ASSERT(live_news == live_young_objects_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001467#endif
1468}
1469
1470
1471int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
1472 // Keep old map pointers
1473 Map* old_map = obj->map();
1474 ASSERT(old_map->IsHeapObject());
1475
1476 Address forwarded = GetForwardingAddressInOldSpace(old_map);
1477
1478 ASSERT(Heap::map_space()->Contains(old_map));
1479 ASSERT(Heap::map_space()->Contains(forwarded));
1480#ifdef DEBUG
1481 if (FLAG_gc_verbose) {
1482 PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
1483 forwarded);
1484 }
1485#endif
1486 // Update the map pointer.
1487 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
1488
1489 // We have to compute the object size relying on the old map because
1490 // map objects are not relocated yet.
1491 int obj_size = obj->SizeFromMap(old_map);
1492
1493 // Update pointers in the object body.
1494 UpdatingVisitor updating_visitor;
1495 obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
1496 return obj_size;
1497}
1498
1499
1500int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
1501 // Decode the map pointer.
kasper.lund7276f142008-07-30 08:49:36 +00001502 MapWord encoding = obj->map_word();
1503 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001504 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
1505
1506 // At this point, the first word of map_addr is also encoded, cannot
1507 // cast it to Map* using Map::cast.
1508 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
1509 int obj_size = obj->SizeFromMap(map);
1510 InstanceType type = map->instance_type();
1511
1512 // Update map pointer.
1513 Address new_map_addr = GetForwardingAddressInOldSpace(map);
kasper.lund7276f142008-07-30 08:49:36 +00001514 int offset = encoding.DecodeOffset();
1515 obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001516
1517#ifdef DEBUG
1518 if (FLAG_gc_verbose) {
1519 PrintF("update %p : %p -> %p\n", obj->address(),
1520 map_addr, new_map_addr);
1521 }
1522#endif
1523
1524 // Update pointers in the object body.
1525 UpdatingVisitor updating_visitor;
1526 obj->IterateBody(type, obj_size, &updating_visitor);
1527 return obj_size;
1528}
1529
1530
1531Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
1532 // Object should either in old or map space.
kasper.lund7276f142008-07-30 08:49:36 +00001533 MapWord encoding = obj->map_word();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001534
1535 // Offset to the first live object's forwarding address.
kasper.lund7276f142008-07-30 08:49:36 +00001536 int offset = encoding.DecodeOffset();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001537 Address obj_addr = obj->address();
1538
1539 // Find the first live object's forwarding address.
1540 Page* p = Page::FromAddress(obj_addr);
1541 Address first_forwarded = p->mc_first_forwarded;
1542
1543 // Page start address of forwarded address.
1544 Page* forwarded_page = Page::FromAddress(first_forwarded);
1545 int forwarded_offset = forwarded_page->Offset(first_forwarded);
1546
1547 // Find end of allocation of in the page of first_forwarded.
1548 Address mc_top = forwarded_page->mc_relocation_top;
1549 int mc_top_offset = forwarded_page->Offset(mc_top);
1550
1551 // Check if current object's forward pointer is in the same page
1552 // as the first live object's forwarding pointer
1553 if (forwarded_offset + offset < mc_top_offset) {
1554 // In the same page.
1555 return first_forwarded + offset;
1556 }
1557
1558 // Must be in the next page, NOTE: this may cross chunks.
1559 Page* next_page = forwarded_page->next_page();
1560 ASSERT(next_page->is_valid());
1561
1562 offset -= (mc_top_offset - forwarded_offset);
1563 offset += Page::kObjectStartOffset;
1564
1565 ASSERT_PAGE_OFFSET(offset);
1566 ASSERT(next_page->OffsetToAddress(offset) < next_page->mc_relocation_top);
1567
1568 return next_page->OffsetToAddress(offset);
1569}
1570
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001571
ager@chromium.orgddb913d2009-01-27 10:01:48 +00001572// -------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001573// Phase 4: Relocate objects
1574
1575void MarkCompactCollector::RelocateObjects() {
1576#ifdef DEBUG
1577 ASSERT(state_ == UPDATE_POINTERS);
1578 state_ = RELOCATE_OBJECTS;
1579#endif
1580 // Relocates objects, always relocate map objects first. Relocating
1581 // objects in other space relies on map objects to get object size.
1582 int live_maps = IterateLiveObjects(Heap::map_space(), &RelocateMapObject);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001583 int live_pointer_olds = IterateLiveObjects(Heap::old_pointer_space(),
1584 &RelocateOldPointerObject);
1585 int live_data_olds = IterateLiveObjects(Heap::old_data_space(),
1586 &RelocateOldDataObject);
1587 int live_codes = IterateLiveObjects(Heap::code_space(), &RelocateCodeObject);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001588 int live_news = IterateLiveObjects(Heap::new_space(), &RelocateNewObject);
1589
1590 USE(live_maps);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001591 USE(live_data_olds);
1592 USE(live_pointer_olds);
1593 USE(live_codes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001594 USE(live_news);
1595#ifdef DEBUG
1596 ASSERT(live_maps == live_map_objects_);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001597 ASSERT(live_data_olds == live_old_data_objects_);
1598 ASSERT(live_pointer_olds == live_old_pointer_objects_);
1599 ASSERT(live_codes == live_code_objects_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001600 ASSERT(live_news == live_young_objects_);
1601#endif
1602
1603 // Notify code object in LO to convert IC target to address
1604 // This must happen after lo_space_->Compact
1605 LargeObjectIterator it(Heap::lo_space());
1606 while (it.has_next()) { ConvertCodeICTargetToAddress(it.next()); }
1607
1608 // Flips from and to spaces
1609 Heap::new_space()->Flip();
1610
1611 // Sets age_mark to bottom in to space
1612 Address mark = Heap::new_space()->bottom();
1613 Heap::new_space()->set_age_mark(mark);
1614
1615 Heap::new_space()->MCCommitRelocationInfo();
1616#ifdef DEBUG
1617 // It is safe to write to the remembered sets as remembered sets on a
1618 // page-by-page basis after committing the m-c forwarding pointer.
1619 Page::set_rset_state(Page::IN_USE);
1620#endif
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001621 PagedSpaces spaces;
1622 while (PagedSpace* space = spaces.next()) space->MCCommitRelocationInfo();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001623}
1624
1625
1626int MarkCompactCollector::ConvertCodeICTargetToAddress(HeapObject* obj) {
1627 if (obj->IsCode()) {
1628 Code::cast(obj)->ConvertICTargetsFromObjectToAddress();
1629 }
1630 return obj->Size();
1631}
1632
1633
1634int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
1635 // decode map pointer (forwarded address)
kasper.lund7276f142008-07-30 08:49:36 +00001636 MapWord encoding = obj->map_word();
1637 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001638 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
1639
1640 // Get forwarding address before resetting map pointer
1641 Address new_addr = GetForwardingAddressInOldSpace(obj);
1642
1643 // recover map pointer
1644 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
1645
1646 // The meta map object may not be copied yet.
1647 Address old_addr = obj->address();
1648
1649 if (new_addr != old_addr) {
1650 memmove(new_addr, old_addr, Map::kSize); // copy contents
1651 }
1652
1653#ifdef DEBUG
1654 if (FLAG_gc_verbose) {
1655 PrintF("relocate %p -> %p\n", old_addr, new_addr);
1656 }
1657#endif
1658
1659 return Map::kSize;
1660}
1661
1662
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001663static inline int RelocateOldObject(HeapObject* obj,
1664 OldSpace* space,
1665 Address new_addr,
1666 Address map_addr) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001667 // recover map pointer
1668 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
1669
1670 // This is a non-map object, it relies on the assumption that the Map space
1671 // is compacted before the Old space (see RelocateObjects).
1672 int obj_size = obj->Size();
1673 ASSERT_OBJECT_SIZE(obj_size);
1674
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001675 ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
1676 space->MCSpaceOffsetForAddress(obj->address()));
1677
1678 space->MCAdjustRelocationEnd(new_addr, obj_size);
1679
1680#ifdef DEBUG
1681 if (FLAG_gc_verbose) {
1682 PrintF("relocate %p -> %p\n", obj->address(), new_addr);
1683 }
1684#endif
1685
1686 return obj_size;
1687}
1688
1689
1690int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
1691 OldSpace* space) {
1692 // decode map pointer (forwarded address)
1693 MapWord encoding = obj->map_word();
1694 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
1695 ASSERT(Heap::map_space()->Contains(map_addr));
1696
1697 // Get forwarding address before resetting map pointer
1698 Address new_addr = GetForwardingAddressInOldSpace(obj);
1699
1700 int obj_size = RelocateOldObject(obj, space, new_addr, map_addr);
1701
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001702 Address old_addr = obj->address();
1703
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001704 if (new_addr != old_addr) {
1705 memmove(new_addr, old_addr, obj_size); // copy contents
1706 }
1707
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001708 ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001709
1710 return obj_size;
1711}
1712
1713
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001714int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
1715 return RelocateOldNonCodeObject(obj, Heap::old_pointer_space());
1716}
1717
1718
1719int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
1720 return RelocateOldNonCodeObject(obj, Heap::old_data_space());
1721}
1722
1723
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001724int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
1725 // decode map pointer (forwarded address)
kasper.lund7276f142008-07-30 08:49:36 +00001726 MapWord encoding = obj->map_word();
1727 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001728 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
1729
1730 // Get forwarding address before resetting map pointer
1731 Address new_addr = GetForwardingAddressInOldSpace(obj);
1732
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001733 int obj_size = RelocateOldObject(obj, Heap::code_space(), new_addr, map_addr);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001734
1735 // convert inline cache target to address using old address
1736 if (obj->IsCode()) {
1737 // convert target to address first related to old_address
1738 Code::cast(obj)->ConvertICTargetsFromObjectToAddress();
1739 }
1740
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001741 Address old_addr = obj->address();
1742
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001743 if (new_addr != old_addr) {
1744 memmove(new_addr, old_addr, obj_size); // copy contents
1745 }
1746
1747 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
1748 if (copied_to->IsCode()) {
1749 // may also update inline cache target.
1750 Code::cast(copied_to)->Relocate(new_addr - old_addr);
iposva@chromium.org245aa852009-02-10 00:49:54 +00001751 // Notify the logger that compiled code has moved.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001752 LOG(CodeMoveEvent(old_addr, new_addr));
1753 }
1754
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001755 return obj_size;
1756}
1757
1758
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001759int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
1760 int obj_size = obj->Size();
1761
1762 // Get forwarding address
1763 Address old_addr = obj->address();
1764 int offset = Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
1765
1766 Address new_addr =
1767 Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset);
1768
1769 if (Heap::new_space()->FromSpaceContains(new_addr)) {
1770 ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
1771 Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
1772 } else {
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001773 OldSpace* target_space = Heap::TargetSpace(obj);
1774 ASSERT(target_space == Heap::old_pointer_space() ||
1775 target_space == Heap::old_data_space());
1776 target_space->MCAdjustRelocationEnd(new_addr, obj_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001777 }
1778
1779 // New and old addresses cannot overlap.
1780 memcpy(reinterpret_cast<void*>(new_addr),
1781 reinterpret_cast<void*>(old_addr),
1782 obj_size);
1783
1784#ifdef DEBUG
1785 if (FLAG_gc_verbose) {
1786 PrintF("relocate %p -> %p\n", old_addr, new_addr);
1787 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001788#endif
1789
1790 return obj_size;
1791}
1792
1793
ager@chromium.orgddb913d2009-01-27 10:01:48 +00001794// -------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001795// Phase 5: rebuild remembered sets
1796
1797void MarkCompactCollector::RebuildRSets() {
1798#ifdef DEBUG
1799 ASSERT(state_ == RELOCATE_OBJECTS);
1800 state_ = REBUILD_RSETS;
1801#endif
1802 Heap::RebuildRSets();
1803}
1804
1805} } // namespace v8::internal