blob: c5eabebbcbeece0443415793f85e8798d8404e5b [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
Iain Merrick75681382010-08-19 15:07:18 +010030#include "compilation-cache.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000031#include "execution.h"
Ben Murdoch3bec4d22010-07-22 14:51:16 +010032#include "heap-profiler.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000033#include "global-handles.h"
34#include "ic-inl.h"
35#include "mark-compact.h"
Iain Merrick75681382010-08-19 15:07:18 +010036#include "objects-visiting.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000037#include "stub-cache.h"
38
39namespace v8 {
40namespace internal {
41
42// -------------------------------------------------------------------------
43// MarkCompactCollector
44
45bool MarkCompactCollector::force_compaction_ = false;
46bool MarkCompactCollector::compacting_collection_ = false;
47bool MarkCompactCollector::compact_on_next_gc_ = false;
48
49int MarkCompactCollector::previous_marked_count_ = 0;
50GCTracer* MarkCompactCollector::tracer_ = NULL;
51
52
53#ifdef DEBUG
54MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE;
55
56// Counters used for debugging the marking phase of mark-compact or mark-sweep
57// collection.
58int MarkCompactCollector::live_bytes_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +010059int MarkCompactCollector::live_young_objects_size_ = 0;
60int MarkCompactCollector::live_old_data_objects_size_ = 0;
61int MarkCompactCollector::live_old_pointer_objects_size_ = 0;
62int MarkCompactCollector::live_code_objects_size_ = 0;
63int MarkCompactCollector::live_map_objects_size_ = 0;
64int MarkCompactCollector::live_cell_objects_size_ = 0;
65int MarkCompactCollector::live_lo_objects_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +000066#endif
67
Iain Merrick75681382010-08-19 15:07:18 +010068
Steve Blocka7e24c12009-10-30 11:49:00 +000069void MarkCompactCollector::CollectGarbage() {
70 // Make sure that Prepare() has been called. The individual steps below will
71 // update the state as they proceed.
72 ASSERT(state_ == PREPARE_GC);
73
74 // Prepare has selected whether to compact the old generation or not.
75 // Tell the tracer.
76 if (IsCompacting()) tracer_->set_is_compacting();
77
78 MarkLiveObjects();
79
80 if (FLAG_collect_maps) ClearNonLiveTransitions();
81
82 SweepLargeObjectSpace();
83
84 if (IsCompacting()) {
Leon Clarkef7060e22010-06-03 12:02:55 +010085 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
Steve Blocka7e24c12009-10-30 11:49:00 +000086 EncodeForwardingAddresses();
87
88 UpdatePointers();
89
90 RelocateObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +000091 } else {
92 SweepSpaces();
93 }
94
95 Finish();
96
97 // Save the count of marked objects remaining after the collection and
98 // null out the GC tracer.
99 previous_marked_count_ = tracer_->marked_count();
100 ASSERT(previous_marked_count_ == 0);
101 tracer_ = NULL;
102}
103
104
105void MarkCompactCollector::Prepare(GCTracer* tracer) {
106 // Rather than passing the tracer around we stash it in a static member
107 // variable.
108 tracer_ = tracer;
109
110#ifdef DEBUG
111 ASSERT(state_ == IDLE);
112 state_ = PREPARE_GC;
113#endif
114 ASSERT(!FLAG_always_compact || !FLAG_never_compact);
115
116 compacting_collection_ =
117 FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
118 compact_on_next_gc_ = false;
119
120 if (FLAG_never_compact) compacting_collection_ = false;
Leon Clarkee46be812010-01-19 14:06:41 +0000121 if (!Heap::map_space()->MapPointersEncodable())
122 compacting_collection_ = false;
Steve Blocka7e24c12009-10-30 11:49:00 +0000123 if (FLAG_collect_maps) CreateBackPointers();
124
Steve Blocka7e24c12009-10-30 11:49:00 +0000125 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000126 for (PagedSpace* space = spaces.next();
127 space != NULL; space = spaces.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000128 space->PrepareForMarkCompact(compacting_collection_);
129 }
130
131#ifdef DEBUG
132 live_bytes_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100133 live_young_objects_size_ = 0;
134 live_old_pointer_objects_size_ = 0;
135 live_old_data_objects_size_ = 0;
136 live_code_objects_size_ = 0;
137 live_map_objects_size_ = 0;
138 live_cell_objects_size_ = 0;
139 live_lo_objects_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000140#endif
141}
142
143
144void MarkCompactCollector::Finish() {
145#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100146 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
Steve Blocka7e24c12009-10-30 11:49:00 +0000147 state_ = IDLE;
148#endif
149 // The stub cache is not traversed during GC; clear the cache to
150 // force lazy re-initialization of it. This must be done after the
151 // GC, because it relies on the new address of certain old space
152 // objects (empty string, illegal builtin).
153 StubCache::Clear();
154
Leon Clarkee46be812010-01-19 14:06:41 +0000155 ExternalStringTable::CleanUp();
156
Steve Blocka7e24c12009-10-30 11:49:00 +0000157 // If we've just compacted old space there's no reason to check the
158 // fragmentation limit. Just return.
159 if (HasCompacted()) return;
160
161 // We compact the old generation on the next GC if it has gotten too
162 // fragmented (ie, we could recover an expected amount of space by
163 // reclaiming the waste and free list blocks).
164 static const int kFragmentationLimit = 15; // Percent.
165 static const int kFragmentationAllowed = 1 * MB; // Absolute.
166 int old_gen_recoverable = 0;
167 int old_gen_used = 0;
168
169 OldSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000170 for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000171 old_gen_recoverable += space->Waste() + space->AvailableFree();
172 old_gen_used += space->Size();
173 }
174
175 int old_gen_fragmentation =
176 static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
177 if (old_gen_fragmentation > kFragmentationLimit &&
178 old_gen_recoverable > kFragmentationAllowed) {
179 compact_on_next_gc_ = true;
180 }
181}
182
183
184// -------------------------------------------------------------------------
185// Phase 1: tracing and marking live objects.
186// before: all objects are in normal state.
187// after: a live object's map pointer is marked as '00'.
188
189// Marking all live objects in the heap as part of mark-sweep or mark-compact
190// collection. Before marking, all objects are in their normal state. After
191// marking, live objects' map pointers are marked indicating that the object
192// has been found reachable.
193//
194// The marking algorithm is a (mostly) depth-first (because of possible stack
195// overflow) traversal of the graph of objects reachable from the roots. It
196// uses an explicit stack of pointers rather than recursion. The young
197// generation's inactive ('from') space is used as a marking stack. The
198// objects in the marking stack are the ones that have been reached and marked
199// but their children have not yet been visited.
200//
201// The marking stack can overflow during traversal. In that case, we set an
202// overflow flag. When the overflow flag is set, we continue marking objects
203// reachable from the objects on the marking stack, but no longer push them on
204// the marking stack. Instead, we mark them as both marked and overflowed.
205// When the stack is in the overflowed state, objects marked as overflowed
206// have been reached and marked but their children have not been visited yet.
207// After emptying the marking stack, we clear the overflow flag and traverse
208// the heap looking for objects marked as overflowed, push them on the stack,
209// and continue with marking. This process repeats until all reachable
210// objects have been marked.
211
212static MarkingStack marking_stack;
213
214
215static inline HeapObject* ShortCircuitConsString(Object** p) {
216 // Optimization: If the heap object pointed to by p is a non-symbol
217 // cons string whose right substring is Heap::empty_string, update
218 // it in place to its left substring. Return the updated value.
219 //
220 // Here we assume that if we change *p, we replace it with a heap object
221 // (ie, the left substring of a cons string is always a heap object).
222 //
223 // The check performed is:
224 // object->IsConsString() && !object->IsSymbol() &&
225 // (ConsString::cast(object)->second() == Heap::empty_string())
226 // except the maps for the object and its possible substrings might be
227 // marked.
228 HeapObject* object = HeapObject::cast(*p);
229 MapWord map_word = object->map_word();
230 map_word.ClearMark();
231 InstanceType type = map_word.ToMap()->instance_type();
232 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
233
234 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
235 if (second != Heap::raw_unchecked_empty_string()) {
236 return object;
237 }
238
239 // Since we don't have the object's start, it is impossible to update the
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100240 // page dirty marks. Therefore, we only replace the string with its left
241 // substring when page dirty marks do not change.
Steve Blocka7e24c12009-10-30 11:49:00 +0000242 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
243 if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object;
244
245 *p = first;
246 return HeapObject::cast(first);
247}
248
249
Iain Merrick75681382010-08-19 15:07:18 +0100250class StaticMarkingVisitor : public StaticVisitorBase {
Steve Blocka7e24c12009-10-30 11:49:00 +0000251 public:
Iain Merrick75681382010-08-19 15:07:18 +0100252 static inline void IterateBody(Map* map, HeapObject* obj) {
253 table_.GetVisitor(map)(map, obj);
254 }
255
256 static void EnableCodeFlushing(bool enabled) {
257 if (enabled) {
258 table_.Register(kVisitJSFunction, &VisitJSFunction);
259 } else {
260 table_.Register(kVisitJSFunction,
261 &JSObjectVisitor::VisitSpecialized<JSFunction::kSize>);
262 }
263 }
264
265 static void Initialize() {
266 table_.Register(kVisitShortcutCandidate,
267 &FixedBodyVisitor<StaticMarkingVisitor,
268 ConsString::BodyDescriptor,
269 void>::Visit);
270
271 table_.Register(kVisitConsString,
272 &FixedBodyVisitor<StaticMarkingVisitor,
273 ConsString::BodyDescriptor,
274 void>::Visit);
275
276
277 table_.Register(kVisitFixedArray,
278 &FlexibleBodyVisitor<StaticMarkingVisitor,
279 FixedArray::BodyDescriptor,
280 void>::Visit);
281
282 table_.Register(kVisitSharedFunctionInfo,
283 &FixedBodyVisitor<StaticMarkingVisitor,
284 SharedFunctionInfo::BodyDescriptor,
285 void>::Visit);
286
287 table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
288 table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
289 table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
290
291 table_.Register(kVisitOddball,
292 &FixedBodyVisitor<StaticMarkingVisitor,
293 Oddball::BodyDescriptor,
294 void>::Visit);
295 table_.Register(kVisitMap,
296 &FixedBodyVisitor<StaticMarkingVisitor,
297 Map::BodyDescriptor,
298 void>::Visit);
299
300 table_.Register(kVisitCode, &VisitCode);
301
302 table_.Register(kVisitJSFunction, &VisitJSFunction);
303
304 table_.Register(kVisitPropertyCell,
305 &FixedBodyVisitor<StaticMarkingVisitor,
306 JSGlobalPropertyCell::BodyDescriptor,
307 void>::Visit);
308
309 table_.RegisterSpecializations<DataObjectVisitor,
310 kVisitDataObject,
311 kVisitDataObjectGeneric>();
312
313 table_.RegisterSpecializations<JSObjectVisitor,
314 kVisitJSObject,
315 kVisitJSObjectGeneric>();
316
317 table_.RegisterSpecializations<StructObjectVisitor,
318 kVisitStruct,
319 kVisitStructGeneric>();
320 }
321
322 INLINE(static void VisitPointer(Object** p)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000323 MarkObjectByPointer(p);
324 }
325
Iain Merrick75681382010-08-19 15:07:18 +0100326 INLINE(static void VisitPointers(Object** start, Object** end)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000327 // Mark all objects pointed to in [start, end).
328 const int kMinRangeForMarkingRecursion = 64;
329 if (end - start >= kMinRangeForMarkingRecursion) {
330 if (VisitUnmarkedObjects(start, end)) return;
331 // We are close to a stack overflow, so just mark the objects.
332 }
333 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
334 }
335
Iain Merrick75681382010-08-19 15:07:18 +0100336 static inline void VisitCodeTarget(RelocInfo* rinfo) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000337 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
338 Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
339 if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
340 IC::Clear(rinfo->pc());
341 // Please note targets for cleared inline cached do not have to be
342 // marked since they are contained in Heap::non_monomorphic_cache().
343 } else {
344 MarkCompactCollector::MarkObject(code);
345 }
346 }
347
Iain Merrick75681382010-08-19 15:07:18 +0100348 static inline void VisitDebugTarget(RelocInfo* rinfo) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100349 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
350 rinfo->IsPatchedReturnSequence()) ||
351 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
352 rinfo->IsPatchedDebugBreakSlotSequence()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000353 HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
354 MarkCompactCollector::MarkObject(code);
Steve Blocka7e24c12009-10-30 11:49:00 +0000355 }
356
Steve Blocka7e24c12009-10-30 11:49:00 +0000357 // Mark object pointed to by p.
Iain Merrick75681382010-08-19 15:07:18 +0100358 INLINE(static void MarkObjectByPointer(Object** p)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000359 if (!(*p)->IsHeapObject()) return;
360 HeapObject* object = ShortCircuitConsString(p);
361 MarkCompactCollector::MarkObject(object);
362 }
363
Steve Blocka7e24c12009-10-30 11:49:00 +0000364 // Visit an unmarked object.
Iain Merrick75681382010-08-19 15:07:18 +0100365 static inline void VisitUnmarkedObject(HeapObject* obj) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000366#ifdef DEBUG
367 ASSERT(Heap::Contains(obj));
368 ASSERT(!obj->IsMarked());
369#endif
370 Map* map = obj->map();
371 MarkCompactCollector::SetMark(obj);
372 // Mark the map pointer and the body.
373 MarkCompactCollector::MarkObject(map);
Iain Merrick75681382010-08-19 15:07:18 +0100374 IterateBody(map, obj);
Steve Blocka7e24c12009-10-30 11:49:00 +0000375 }
376
377 // Visit all unmarked objects pointed to by [start, end).
378 // Returns false if the operation fails (lack of stack space).
Iain Merrick75681382010-08-19 15:07:18 +0100379 static inline bool VisitUnmarkedObjects(Object** start, Object** end) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000380 // Return false is we are close to the stack limit.
381 StackLimitCheck check;
382 if (check.HasOverflowed()) return false;
383
384 // Visit the unmarked objects.
385 for (Object** p = start; p < end; p++) {
386 if (!(*p)->IsHeapObject()) continue;
387 HeapObject* obj = HeapObject::cast(*p);
388 if (obj->IsMarked()) continue;
389 VisitUnmarkedObject(obj);
390 }
391 return true;
392 }
Iain Merrick75681382010-08-19 15:07:18 +0100393
394 static inline void VisitExternalReference(Address* p) { }
395 static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
396
397 private:
398 class DataObjectVisitor {
399 public:
400 template<int size>
401 static void VisitSpecialized(Map* map, HeapObject* object) {
402 }
403
404 static void Visit(Map* map, HeapObject* object) {
405 }
406 };
407
408 typedef FlexibleBodyVisitor<StaticMarkingVisitor,
409 JSObject::BodyDescriptor,
410 void> JSObjectVisitor;
411
412 typedef FlexibleBodyVisitor<StaticMarkingVisitor,
413 StructBodyDescriptor,
414 void> StructObjectVisitor;
415
416 static void VisitCode(Map* map, HeapObject* object) {
417 reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>();
418 }
419
420 // Code flushing support.
421
422 // How many collections newly compiled code object will survive before being
423 // flushed.
424 static const int kCodeAgeThreshold = 5;
425
426 inline static bool HasSourceCode(SharedFunctionInfo* info) {
427 Object* undefined = Heap::raw_unchecked_undefined_value();
428 return (info->script() != undefined) &&
429 (reinterpret_cast<Script*>(info->script())->source() != undefined);
430 }
431
432
433 inline static bool IsCompiled(JSFunction* function) {
434 return
435 function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile);
436 }
437
438
439 inline static bool IsCompiled(SharedFunctionInfo* function) {
440 return
441 function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile);
442 }
443
444
445 static void FlushCodeForFunction(JSFunction* function) {
446 SharedFunctionInfo* shared_info = function->unchecked_shared();
447
448 if (shared_info->IsMarked()) return;
449
450 // Special handling if the function and shared info objects
451 // have different code objects.
452 if (function->unchecked_code() != shared_info->unchecked_code()) {
453 // If the shared function has been flushed but the function has not,
454 // we flush the function if possible.
455 if (!IsCompiled(shared_info) &&
456 IsCompiled(function) &&
457 !function->unchecked_code()->IsMarked()) {
458 function->set_code(shared_info->unchecked_code());
459 }
460 return;
461 }
462
463 // Code is either on stack or in compilation cache.
464 if (shared_info->unchecked_code()->IsMarked()) {
465 shared_info->set_code_age(0);
466 return;
467 }
468
469 // The function must be compiled and have the source code available,
470 // to be able to recompile it in case we need the function again.
471 if (!(shared_info->is_compiled() && HasSourceCode(shared_info))) return;
472
473 // We never flush code for Api functions.
474 Object* function_data = shared_info->function_data();
475 if (function_data->IsHeapObject() &&
476 (SafeMap(function_data)->instance_type() ==
477 FUNCTION_TEMPLATE_INFO_TYPE)) {
478 return;
479 }
480
481 // Only flush code for functions.
482 if (shared_info->code()->kind() != Code::FUNCTION) return;
483
484 // Function must be lazy compilable.
485 if (!shared_info->allows_lazy_compilation()) return;
486
487 // If this is a full script wrapped in a function we do no flush the code.
488 if (shared_info->is_toplevel()) return;
489
490 // Age this shared function info.
491 if (shared_info->code_age() < kCodeAgeThreshold) {
492 shared_info->set_code_age(shared_info->code_age() + 1);
493 return;
494 }
495
496 // Compute the lazy compilable version of the code.
497 Code* code = Builtins::builtin(Builtins::LazyCompile);
498 shared_info->set_code(code);
499 function->set_code(code);
500 }
501
502
503 static inline Map* SafeMap(Object* obj) {
504 MapWord map_word = HeapObject::cast(obj)->map_word();
505 map_word.ClearMark();
506 map_word.ClearOverflow();
507 return map_word.ToMap();
508 }
509
510
511 static inline bool IsJSBuiltinsObject(Object* obj) {
512 return obj->IsHeapObject() &&
513 (SafeMap(obj)->instance_type() == JS_BUILTINS_OBJECT_TYPE);
514 }
515
516
517 static inline bool IsValidNotBuiltinContext(Object* ctx) {
518 if (!ctx->IsHeapObject()) return false;
519
520 Map* map = SafeMap(ctx);
521 if (!(map == Heap::raw_unchecked_context_map() ||
522 map == Heap::raw_unchecked_catch_context_map() ||
523 map == Heap::raw_unchecked_global_context_map())) {
524 return false;
525 }
526
527 Context* context = reinterpret_cast<Context*>(ctx);
528
529 if (IsJSBuiltinsObject(context->global())) {
530 return false;
531 }
532
533 return true;
534 }
535
536
537 static void VisitJSFunction(Map* map, HeapObject* object) {
538 JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
539
540 // The function must have a valid context and not be a builtin.
541 if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
542 FlushCodeForFunction(jsfunction);
543 }
544
545 JSObjectVisitor::VisitSpecialized<JSFunction::kSize>(map, object);
546 }
547
548 typedef void (*Callback)(Map* map, HeapObject* object);
549
550 static VisitorDispatchTable<Callback> table_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000551};
552
553
Iain Merrick75681382010-08-19 15:07:18 +0100554VisitorDispatchTable<StaticMarkingVisitor::Callback>
555 StaticMarkingVisitor::table_;
556
557
558class MarkingVisitor : public ObjectVisitor {
559 public:
560 void VisitPointer(Object** p) {
561 StaticMarkingVisitor::VisitPointer(p);
562 }
563
564 void VisitPointers(Object** start, Object** end) {
565 StaticMarkingVisitor::VisitPointers(start, end);
566 }
567
568 void VisitCodeTarget(RelocInfo* rinfo) {
569 StaticMarkingVisitor::VisitCodeTarget(rinfo);
570 }
571
572 void VisitDebugTarget(RelocInfo* rinfo) {
573 StaticMarkingVisitor::VisitDebugTarget(rinfo);
574 }
575};
576
577
578class CodeMarkingVisitor : public ThreadVisitor {
579 public:
580 void VisitThread(ThreadLocalTop* top) {
581 for (StackFrameIterator it(top); !it.done(); it.Advance()) {
582 MarkCompactCollector::MarkObject(it.frame()->unchecked_code());
583 }
584 }
585};
586
587
588class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
589 public:
590 void VisitPointers(Object** start, Object** end) {
591 for (Object** p = start; p < end; p++) VisitPointer(p);
592 }
593
594 void VisitPointer(Object** slot) {
595 Object* obj = *slot;
596 if (obj->IsHeapObject()) {
597 MarkCompactCollector::MarkObject(HeapObject::cast(obj));
598 }
599 }
600};
601
602
603void MarkCompactCollector::PrepareForCodeFlushing() {
604 if (!FLAG_flush_code) {
605 StaticMarkingVisitor::EnableCodeFlushing(false);
606 return;
607 }
608
609#ifdef ENABLE_DEBUGGER_SUPPORT
610 if (Debug::IsLoaded() || Debug::has_break_points()) {
611 StaticMarkingVisitor::EnableCodeFlushing(false);
612 return;
613 }
614#endif
615 StaticMarkingVisitor::EnableCodeFlushing(true);
616
617 // Make sure we are not referencing the code from the stack.
618 for (StackFrameIterator it; !it.done(); it.Advance()) {
619 MarkCompactCollector::MarkObject(it.frame()->unchecked_code());
620 }
621
622 // Iterate the archived stacks in all threads to check if
623 // the code is referenced.
624 CodeMarkingVisitor code_marking_visitor;
625 ThreadManager::IterateArchivedThreads(&code_marking_visitor);
626
627 SharedFunctionInfoMarkingVisitor visitor;
628 CompilationCache::IterateFunctions(&visitor);
629
630 MarkCompactCollector::ProcessMarkingStack();
631}
632
633
Steve Blocka7e24c12009-10-30 11:49:00 +0000634// Visitor class for marking heap roots.
635class RootMarkingVisitor : public ObjectVisitor {
636 public:
637 void VisitPointer(Object** p) {
638 MarkObjectByPointer(p);
639 }
640
641 void VisitPointers(Object** start, Object** end) {
642 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
643 }
644
Steve Blocka7e24c12009-10-30 11:49:00 +0000645 private:
Steve Blocka7e24c12009-10-30 11:49:00 +0000646 void MarkObjectByPointer(Object** p) {
647 if (!(*p)->IsHeapObject()) return;
648
649 // Replace flat cons strings in place.
650 HeapObject* object = ShortCircuitConsString(p);
651 if (object->IsMarked()) return;
652
653 Map* map = object->map();
654 // Mark the object.
655 MarkCompactCollector::SetMark(object);
Iain Merrick75681382010-08-19 15:07:18 +0100656
Steve Blocka7e24c12009-10-30 11:49:00 +0000657 // Mark the map pointer and body, and push them on the marking stack.
658 MarkCompactCollector::MarkObject(map);
Iain Merrick75681382010-08-19 15:07:18 +0100659 StaticMarkingVisitor::IterateBody(map, object);
Steve Blocka7e24c12009-10-30 11:49:00 +0000660
661 // Mark all the objects reachable from the map and body. May leave
662 // overflowed objects in the heap.
Iain Merrick75681382010-08-19 15:07:18 +0100663 MarkCompactCollector::EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +0000664 }
665};
666
667
668// Helper class for pruning the symbol table.
669class SymbolTableCleaner : public ObjectVisitor {
670 public:
671 SymbolTableCleaner() : pointers_removed_(0) { }
Leon Clarkee46be812010-01-19 14:06:41 +0000672
673 virtual void VisitPointers(Object** start, Object** end) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000674 // Visit all HeapObject pointers in [start, end).
675 for (Object** p = start; p < end; p++) {
676 if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
677 // Check if the symbol being pruned is an external symbol. We need to
678 // delete the associated external data as this symbol is going away.
679
Steve Blocka7e24c12009-10-30 11:49:00 +0000680 // Since no objects have yet been moved we can safely access the map of
681 // the object.
Leon Clarkee46be812010-01-19 14:06:41 +0000682 if ((*p)->IsExternalString()) {
683 Heap::FinalizeExternalString(String::cast(*p));
Steve Blocka7e24c12009-10-30 11:49:00 +0000684 }
685 // Set the entry to null_value (as deleted).
686 *p = Heap::raw_unchecked_null_value();
687 pointers_removed_++;
688 }
689 }
690 }
691
692 int PointersRemoved() {
693 return pointers_removed_;
694 }
695 private:
696 int pointers_removed_;
697};
698
699
700void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
701 ASSERT(!object->IsMarked());
702 ASSERT(Heap::Contains(object));
703 if (object->IsMap()) {
704 Map* map = Map::cast(object);
705 if (FLAG_cleanup_caches_in_maps_at_gc) {
706 map->ClearCodeCache();
707 }
708 SetMark(map);
709 if (FLAG_collect_maps &&
710 map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
711 map->instance_type() <= JS_FUNCTION_TYPE) {
712 MarkMapContents(map);
713 } else {
714 marking_stack.Push(map);
715 }
716 } else {
717 SetMark(object);
718 marking_stack.Push(object);
719 }
720}
721
722
723void MarkCompactCollector::MarkMapContents(Map* map) {
724 MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(
725 *HeapObject::RawField(map, Map::kInstanceDescriptorsOffset)));
726
727 // Mark the Object* fields of the Map.
728 // Since the descriptor array has been marked already, it is fine
729 // that one of these fields contains a pointer to it.
Iain Merrick75681382010-08-19 15:07:18 +0100730 Object** start_slot = HeapObject::RawField(map,
731 Map::kPointerFieldsBeginOffset);
732
733 Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
734
735 StaticMarkingVisitor::VisitPointers(start_slot, end_slot);
Steve Blocka7e24c12009-10-30 11:49:00 +0000736}
737
738
739void MarkCompactCollector::MarkDescriptorArray(
740 DescriptorArray* descriptors) {
741 if (descriptors->IsMarked()) return;
742 // Empty descriptor array is marked as a root before any maps are marked.
743 ASSERT(descriptors != Heap::raw_unchecked_empty_descriptor_array());
744 SetMark(descriptors);
745
746 FixedArray* contents = reinterpret_cast<FixedArray*>(
747 descriptors->get(DescriptorArray::kContentArrayIndex));
748 ASSERT(contents->IsHeapObject());
749 ASSERT(!contents->IsMarked());
750 ASSERT(contents->IsFixedArray());
751 ASSERT(contents->length() >= 2);
752 SetMark(contents);
Iain Merrick75681382010-08-19 15:07:18 +0100753 // Contents contains (value, details) pairs. If the details say that
754 // the type of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION, or
755 // NULL_DESCRIPTOR, we don't mark the value as live. Only for
756 // MAP_TRANSITION and CONSTANT_TRANSITION is the value an Object* (a
757 // Map*).
Steve Blocka7e24c12009-10-30 11:49:00 +0000758 for (int i = 0; i < contents->length(); i += 2) {
759 // If the pair (value, details) at index i, i+1 is not
760 // a transition or null descriptor, mark the value.
761 PropertyDetails details(Smi::cast(contents->get(i + 1)));
762 if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
763 HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
764 if (object->IsHeapObject() && !object->IsMarked()) {
765 SetMark(object);
766 marking_stack.Push(object);
767 }
768 }
769 }
770 // The DescriptorArray descriptors contains a pointer to its contents array,
771 // but the contents array is already marked.
772 marking_stack.Push(descriptors);
773}
774
775
776void MarkCompactCollector::CreateBackPointers() {
777 HeapObjectIterator iterator(Heap::map_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000778 for (HeapObject* next_object = iterator.next();
779 next_object != NULL; next_object = iterator.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000780 if (next_object->IsMap()) { // Could also be ByteArray on free list.
781 Map* map = Map::cast(next_object);
782 if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
783 map->instance_type() <= JS_FUNCTION_TYPE) {
784 map->CreateBackPointers();
785 } else {
786 ASSERT(map->instance_descriptors() == Heap::empty_descriptor_array());
787 }
788 }
789 }
790}
791
792
793static int OverflowObjectSize(HeapObject* obj) {
794 // Recover the normal map pointer, it might be marked as live and
795 // overflowed.
796 MapWord map_word = obj->map_word();
797 map_word.ClearMark();
798 map_word.ClearOverflow();
799 return obj->SizeFromMap(map_word.ToMap());
800}
801
802
803// Fill the marking stack with overflowed objects returned by the given
804// iterator. Stop when the marking stack is filled or the end of the space
805// is reached, whichever comes first.
806template<class T>
807static void ScanOverflowedObjects(T* it) {
808 // The caller should ensure that the marking stack is initially not full,
809 // so that we don't waste effort pointlessly scanning for objects.
810 ASSERT(!marking_stack.is_full());
811
Leon Clarked91b9f72010-01-27 17:25:45 +0000812 for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000813 if (object->IsOverflowed()) {
814 object->ClearOverflow();
815 ASSERT(object->IsMarked());
816 ASSERT(Heap::Contains(object));
817 marking_stack.Push(object);
818 if (marking_stack.is_full()) return;
819 }
820 }
821}
822
823
824bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
825 return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked();
826}
827
828
Steve Blocka7e24c12009-10-30 11:49:00 +0000829void MarkCompactCollector::MarkSymbolTable() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000830 SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
831 // Mark the symbol table itself.
832 SetMark(symbol_table);
833 // Explicitly mark the prefix.
834 MarkingVisitor marker;
835 symbol_table->IteratePrefix(&marker);
Iain Merrick75681382010-08-19 15:07:18 +0100836 ProcessMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +0000837}
838
839
840void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
841 // Mark the heap roots including global variables, stack variables,
842 // etc., and all objects reachable from them.
Steve Blockd0582a62009-12-15 09:54:21 +0000843 Heap::IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +0000844
845 // Handle the symbol table specially.
846 MarkSymbolTable();
847
848 // There may be overflowed objects in the heap. Visit them now.
849 while (marking_stack.overflowed()) {
850 RefillMarkingStack();
Iain Merrick75681382010-08-19 15:07:18 +0100851 EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +0000852 }
853}
854
855
856void MarkCompactCollector::MarkObjectGroups() {
857 List<ObjectGroup*>* object_groups = GlobalHandles::ObjectGroups();
858
859 for (int i = 0; i < object_groups->length(); i++) {
860 ObjectGroup* entry = object_groups->at(i);
861 if (entry == NULL) continue;
862
863 List<Object**>& objects = entry->objects_;
864 bool group_marked = false;
865 for (int j = 0; j < objects.length(); j++) {
866 Object* object = *objects[j];
867 if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
868 group_marked = true;
869 break;
870 }
871 }
872
873 if (!group_marked) continue;
874
875 // An object in the group is marked, so mark as gray all white heap
876 // objects in the group.
877 for (int j = 0; j < objects.length(); ++j) {
878 if ((*objects[j])->IsHeapObject()) {
879 MarkObject(HeapObject::cast(*objects[j]));
880 }
881 }
882 // Once the entire group has been colored gray, set the object group
883 // to NULL so it won't be processed again.
884 delete object_groups->at(i);
885 object_groups->at(i) = NULL;
886 }
887}
888
889
890// Mark all objects reachable from the objects on the marking stack.
891// Before: the marking stack contains zero or more heap object pointers.
892// After: the marking stack is empty, and all objects reachable from the
893// marking stack have been marked, or are overflowed in the heap.
Iain Merrick75681382010-08-19 15:07:18 +0100894void MarkCompactCollector::EmptyMarkingStack() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000895 while (!marking_stack.is_empty()) {
896 HeapObject* object = marking_stack.Pop();
897 ASSERT(object->IsHeapObject());
898 ASSERT(Heap::Contains(object));
899 ASSERT(object->IsMarked());
900 ASSERT(!object->IsOverflowed());
901
902 // Because the object is marked, we have to recover the original map
903 // pointer and use it to mark the object's body.
904 MapWord map_word = object->map_word();
905 map_word.ClearMark();
906 Map* map = map_word.ToMap();
907 MarkObject(map);
Iain Merrick75681382010-08-19 15:07:18 +0100908
909 StaticMarkingVisitor::IterateBody(map, object);
Steve Blocka7e24c12009-10-30 11:49:00 +0000910 }
911}
912
913
914// Sweep the heap for overflowed objects, clear their overflow bits, and
915// push them on the marking stack. Stop early if the marking stack fills
916// before sweeping completes. If sweeping completes, there are no remaining
917// overflowed objects in the heap so the overflow flag on the markings stack
918// is cleared.
919void MarkCompactCollector::RefillMarkingStack() {
920 ASSERT(marking_stack.overflowed());
921
922 SemiSpaceIterator new_it(Heap::new_space(), &OverflowObjectSize);
923 ScanOverflowedObjects(&new_it);
924 if (marking_stack.is_full()) return;
925
926 HeapObjectIterator old_pointer_it(Heap::old_pointer_space(),
927 &OverflowObjectSize);
928 ScanOverflowedObjects(&old_pointer_it);
929 if (marking_stack.is_full()) return;
930
931 HeapObjectIterator old_data_it(Heap::old_data_space(), &OverflowObjectSize);
932 ScanOverflowedObjects(&old_data_it);
933 if (marking_stack.is_full()) return;
934
935 HeapObjectIterator code_it(Heap::code_space(), &OverflowObjectSize);
936 ScanOverflowedObjects(&code_it);
937 if (marking_stack.is_full()) return;
938
939 HeapObjectIterator map_it(Heap::map_space(), &OverflowObjectSize);
940 ScanOverflowedObjects(&map_it);
941 if (marking_stack.is_full()) return;
942
943 HeapObjectIterator cell_it(Heap::cell_space(), &OverflowObjectSize);
944 ScanOverflowedObjects(&cell_it);
945 if (marking_stack.is_full()) return;
946
947 LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize);
948 ScanOverflowedObjects(&lo_it);
949 if (marking_stack.is_full()) return;
950
951 marking_stack.clear_overflowed();
952}
953
954
955// Mark all objects reachable (transitively) from objects on the marking
956// stack. Before: the marking stack contains zero or more heap object
957// pointers. After: the marking stack is empty and there are no overflowed
958// objects in the heap.
Iain Merrick75681382010-08-19 15:07:18 +0100959void MarkCompactCollector::ProcessMarkingStack() {
960 EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +0000961 while (marking_stack.overflowed()) {
962 RefillMarkingStack();
Iain Merrick75681382010-08-19 15:07:18 +0100963 EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +0000964 }
965}
966
967
Iain Merrick75681382010-08-19 15:07:18 +0100968void MarkCompactCollector::ProcessObjectGroups() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000969 bool work_to_do = true;
970 ASSERT(marking_stack.is_empty());
971 while (work_to_do) {
972 MarkObjectGroups();
973 work_to_do = !marking_stack.is_empty();
Iain Merrick75681382010-08-19 15:07:18 +0100974 ProcessMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +0000975 }
976}
977
978
979void MarkCompactCollector::MarkLiveObjects() {
Leon Clarkef7060e22010-06-03 12:02:55 +0100980 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
Steve Blocka7e24c12009-10-30 11:49:00 +0000981#ifdef DEBUG
982 ASSERT(state_ == PREPARE_GC);
983 state_ = MARK_LIVE_OBJECTS;
984#endif
985 // The to space contains live objects, the from space is used as a marking
986 // stack.
987 marking_stack.Initialize(Heap::new_space()->FromSpaceLow(),
988 Heap::new_space()->FromSpaceHigh());
989
990 ASSERT(!marking_stack.overflowed());
991
Iain Merrick75681382010-08-19 15:07:18 +0100992 PrepareForCodeFlushing();
993
Steve Blocka7e24c12009-10-30 11:49:00 +0000994 RootMarkingVisitor root_visitor;
995 MarkRoots(&root_visitor);
996
997 // The objects reachable from the roots are marked, yet unreachable
998 // objects are unmarked. Mark objects reachable from object groups
999 // containing at least one marked object, and continue until no new
1000 // objects are reachable from the object groups.
Iain Merrick75681382010-08-19 15:07:18 +01001001 ProcessObjectGroups();
Steve Blocka7e24c12009-10-30 11:49:00 +00001002
1003 // The objects reachable from the roots or object groups are marked,
1004 // yet unreachable objects are unmarked. Mark objects reachable
1005 // only from weak global handles.
1006 //
1007 // First we identify nonlive weak handles and mark them as pending
1008 // destruction.
1009 GlobalHandles::IdentifyWeakHandles(&IsUnmarkedHeapObject);
1010 // Then we mark the objects and process the transitive closure.
1011 GlobalHandles::IterateWeakRoots(&root_visitor);
1012 while (marking_stack.overflowed()) {
1013 RefillMarkingStack();
Iain Merrick75681382010-08-19 15:07:18 +01001014 EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00001015 }
1016
1017 // Repeat the object groups to mark unmarked groups reachable from the
1018 // weak roots.
Iain Merrick75681382010-08-19 15:07:18 +01001019 ProcessObjectGroups();
Steve Blocka7e24c12009-10-30 11:49:00 +00001020
1021 // Prune the symbol table removing all symbols only pointed to by the
1022 // symbol table. Cannot use symbol_table() here because the symbol
1023 // table is marked.
1024 SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
1025 SymbolTableCleaner v;
1026 symbol_table->IterateElements(&v);
1027 symbol_table->ElementsRemoved(v.PointersRemoved());
Leon Clarkee46be812010-01-19 14:06:41 +00001028 ExternalStringTable::Iterate(&v);
1029 ExternalStringTable::CleanUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00001030
1031 // Remove object groups after marking phase.
1032 GlobalHandles::RemoveObjectGroups();
1033}
1034
1035
1036static int CountMarkedCallback(HeapObject* obj) {
1037 MapWord map_word = obj->map_word();
1038 map_word.ClearMark();
1039 return obj->SizeFromMap(map_word.ToMap());
1040}
1041
1042
1043#ifdef DEBUG
1044void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
1045 live_bytes_ += obj->Size();
1046 if (Heap::new_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001047 live_young_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001048 } else if (Heap::map_space()->Contains(obj)) {
1049 ASSERT(obj->IsMap());
Steve Block6ded16b2010-05-10 14:33:55 +01001050 live_map_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001051 } else if (Heap::cell_space()->Contains(obj)) {
1052 ASSERT(obj->IsJSGlobalPropertyCell());
Steve Block6ded16b2010-05-10 14:33:55 +01001053 live_cell_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001054 } else if (Heap::old_pointer_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001055 live_old_pointer_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001056 } else if (Heap::old_data_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001057 live_old_data_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001058 } else if (Heap::code_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001059 live_code_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001060 } else if (Heap::lo_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001061 live_lo_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001062 } else {
1063 UNREACHABLE();
1064 }
1065}
1066#endif // DEBUG
1067
1068
1069void MarkCompactCollector::SweepLargeObjectSpace() {
1070#ifdef DEBUG
1071 ASSERT(state_ == MARK_LIVE_OBJECTS);
1072 state_ =
1073 compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
1074#endif
1075 // Deallocate unmarked objects and clear marked bits for marked objects.
1076 Heap::lo_space()->FreeUnmarkedObjects();
1077}
1078
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001079
Steve Blocka7e24c12009-10-30 11:49:00 +00001080// Safe to use during marking phase only.
1081bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
1082 MapWord metamap = object->map_word();
1083 metamap.ClearMark();
1084 return metamap.ToMap()->instance_type() == MAP_TYPE;
1085}
1086
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001087
Steve Blocka7e24c12009-10-30 11:49:00 +00001088void MarkCompactCollector::ClearNonLiveTransitions() {
1089 HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback);
1090 // Iterate over the map space, setting map transitions that go from
1091 // a marked map to an unmarked map to null transitions. At the same time,
1092 // set all the prototype fields of maps back to their original value,
1093 // dropping the back pointers temporarily stored in the prototype field.
1094 // Setting the prototype field requires following the linked list of
1095 // back pointers, reversing them all at once. This allows us to find
1096 // those maps with map transitions that need to be nulled, and only
1097 // scan the descriptor arrays of those maps, not all maps.
Leon Clarkee46be812010-01-19 14:06:41 +00001098 // All of these actions are carried out only on maps of JSObjects
Steve Blocka7e24c12009-10-30 11:49:00 +00001099 // and related subtypes.
Leon Clarked91b9f72010-01-27 17:25:45 +00001100 for (HeapObject* obj = map_iterator.next();
1101 obj != NULL; obj = map_iterator.next()) {
1102 Map* map = reinterpret_cast<Map*>(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00001103 if (!map->IsMarked() && map->IsByteArray()) continue;
1104
1105 ASSERT(SafeIsMap(map));
1106 // Only JSObject and subtypes have map transitions and back pointers.
1107 if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
1108 if (map->instance_type() > JS_FUNCTION_TYPE) continue;
1109 // Follow the chain of back pointers to find the prototype.
1110 Map* current = map;
1111 while (SafeIsMap(current)) {
1112 current = reinterpret_cast<Map*>(current->prototype());
1113 ASSERT(current->IsHeapObject());
1114 }
1115 Object* real_prototype = current;
1116
1117 // Follow back pointers, setting them to prototype,
1118 // clearing map transitions when necessary.
1119 current = map;
1120 bool on_dead_path = !current->IsMarked();
1121 Object* next;
1122 while (SafeIsMap(current)) {
1123 next = current->prototype();
1124 // There should never be a dead map above a live map.
1125 ASSERT(on_dead_path || current->IsMarked());
1126
1127 // A live map above a dead map indicates a dead transition.
1128 // This test will always be false on the first iteration.
1129 if (on_dead_path && current->IsMarked()) {
1130 on_dead_path = false;
1131 current->ClearNonLiveTransitions(real_prototype);
1132 }
1133 *HeapObject::RawField(current, Map::kPrototypeOffset) =
1134 real_prototype;
1135 current = reinterpret_cast<Map*>(next);
1136 }
1137 }
1138}
1139
1140// -------------------------------------------------------------------------
1141// Phase 2: Encode forwarding addresses.
1142// When compacting, forwarding addresses for objects in old space and map
1143// space are encoded in their map pointer word (along with an encoding of
1144// their map pointers).
1145//
Leon Clarkee46be812010-01-19 14:06:41 +00001146// The excact encoding is described in the comments for class MapWord in
1147// objects.h.
Steve Blocka7e24c12009-10-30 11:49:00 +00001148//
1149// An address range [start, end) can have both live and non-live objects.
1150// Maximal non-live regions are marked so they can be skipped on subsequent
1151// sweeps of the heap. A distinguished map-pointer encoding is used to mark
1152// free regions of one-word size (in which case the next word is the start
1153// of a live object). A second distinguished map-pointer encoding is used
1154// to mark free regions larger than one word, and the size of the free
1155// region (including the first word) is written to the second word of the
1156// region.
1157//
1158// Any valid map page offset must lie in the object area of the page, so map
1159// page offsets less than Page::kObjectStartOffset are invalid. We use a
1160// pair of distinguished invalid map encodings (for single word and multiple
1161// words) to indicate free regions in the page found during computation of
1162// forwarding addresses and skipped over in subsequent sweeps.
1163static const uint32_t kSingleFreeEncoding = 0;
1164static const uint32_t kMultiFreeEncoding = 1;
1165
1166
1167// Encode a free region, defined by the given start address and size, in the
1168// first word or two of the region.
1169void EncodeFreeRegion(Address free_start, int free_size) {
1170 ASSERT(free_size >= kIntSize);
1171 if (free_size == kIntSize) {
1172 Memory::uint32_at(free_start) = kSingleFreeEncoding;
1173 } else {
1174 ASSERT(free_size >= 2 * kIntSize);
1175 Memory::uint32_at(free_start) = kMultiFreeEncoding;
1176 Memory::int_at(free_start + kIntSize) = free_size;
1177 }
1178
1179#ifdef DEBUG
1180 // Zap the body of the free region.
1181 if (FLAG_enable_slow_asserts) {
1182 for (int offset = 2 * kIntSize;
1183 offset < free_size;
1184 offset += kPointerSize) {
1185 Memory::Address_at(free_start + offset) = kZapValue;
1186 }
1187 }
1188#endif
1189}
1190
1191
1192// Try to promote all objects in new space. Heap numbers and sequential
1193// strings are promoted to the code space, large objects to large object space,
1194// and all others to the old space.
1195inline Object* MCAllocateFromNewSpace(HeapObject* object, int object_size) {
1196 Object* forwarded;
1197 if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
1198 forwarded = Failure::Exception();
1199 } else {
1200 OldSpace* target_space = Heap::TargetSpace(object);
1201 ASSERT(target_space == Heap::old_pointer_space() ||
1202 target_space == Heap::old_data_space());
1203 forwarded = target_space->MCAllocateRaw(object_size);
1204 }
1205 if (forwarded->IsFailure()) {
1206 forwarded = Heap::new_space()->MCAllocateRaw(object_size);
1207 }
1208 return forwarded;
1209}
1210
1211
1212// Allocation functions for the paged spaces call the space's MCAllocateRaw.
1213inline Object* MCAllocateFromOldPointerSpace(HeapObject* ignore,
1214 int object_size) {
1215 return Heap::old_pointer_space()->MCAllocateRaw(object_size);
1216}
1217
1218
1219inline Object* MCAllocateFromOldDataSpace(HeapObject* ignore, int object_size) {
1220 return Heap::old_data_space()->MCAllocateRaw(object_size);
1221}
1222
1223
1224inline Object* MCAllocateFromCodeSpace(HeapObject* ignore, int object_size) {
1225 return Heap::code_space()->MCAllocateRaw(object_size);
1226}
1227
1228
1229inline Object* MCAllocateFromMapSpace(HeapObject* ignore, int object_size) {
1230 return Heap::map_space()->MCAllocateRaw(object_size);
1231}
1232
1233
1234inline Object* MCAllocateFromCellSpace(HeapObject* ignore, int object_size) {
1235 return Heap::cell_space()->MCAllocateRaw(object_size);
1236}
1237
1238
1239// The forwarding address is encoded at the same offset as the current
1240// to-space object, but in from space.
1241inline void EncodeForwardingAddressInNewSpace(HeapObject* old_object,
1242 int object_size,
1243 Object* new_object,
1244 int* ignored) {
1245 int offset =
1246 Heap::new_space()->ToSpaceOffsetForAddress(old_object->address());
1247 Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset) =
1248 HeapObject::cast(new_object)->address();
1249}
1250
1251
1252// The forwarding address is encoded in the map pointer of the object as an
1253// offset (in terms of live bytes) from the address of the first live object
1254// in the page.
1255inline void EncodeForwardingAddressInPagedSpace(HeapObject* old_object,
1256 int object_size,
1257 Object* new_object,
1258 int* offset) {
1259 // Record the forwarding address of the first live object if necessary.
1260 if (*offset == 0) {
1261 Page::FromAddress(old_object->address())->mc_first_forwarded =
1262 HeapObject::cast(new_object)->address();
1263 }
1264
1265 MapWord encoding =
1266 MapWord::EncodeAddress(old_object->map()->address(), *offset);
1267 old_object->set_map_word(encoding);
1268 *offset += object_size;
1269 ASSERT(*offset <= Page::kObjectAreaSize);
1270}
1271
1272
1273// Most non-live objects are ignored.
1274inline void IgnoreNonLiveObject(HeapObject* object) {}
1275
1276
Steve Blocka7e24c12009-10-30 11:49:00 +00001277// Function template that, given a range of addresses (eg, a semispace or a
1278// paged space page), iterates through the objects in the range to clear
1279// mark bits and compute and encode forwarding addresses. As a side effect,
1280// maximal free chunks are marked so that they can be skipped on subsequent
1281// sweeps.
1282//
1283// The template parameters are an allocation function, a forwarding address
1284// encoding function, and a function to process non-live objects.
1285template<MarkCompactCollector::AllocationFunction Alloc,
1286 MarkCompactCollector::EncodingFunction Encode,
1287 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
1288inline void EncodeForwardingAddressesInRange(Address start,
1289 Address end,
1290 int* offset) {
1291 // The start address of the current free region while sweeping the space.
1292 // This address is set when a transition from live to non-live objects is
1293 // encountered. A value (an encoding of the 'next free region' pointer)
1294 // is written to memory at this address when a transition from non-live to
1295 // live objects is encountered.
1296 Address free_start = NULL;
1297
1298 // A flag giving the state of the previously swept object. Initially true
1299 // to ensure that free_start is initialized to a proper address before
1300 // trying to write to it.
1301 bool is_prev_alive = true;
1302
1303 int object_size; // Will be set on each iteration of the loop.
1304 for (Address current = start; current < end; current += object_size) {
1305 HeapObject* object = HeapObject::FromAddress(current);
1306 if (object->IsMarked()) {
1307 object->ClearMark();
1308 MarkCompactCollector::tracer()->decrement_marked_count();
1309 object_size = object->Size();
1310
1311 Object* forwarded = Alloc(object, object_size);
1312 // Allocation cannot fail, because we are compacting the space.
1313 ASSERT(!forwarded->IsFailure());
1314 Encode(object, object_size, forwarded, offset);
1315
1316#ifdef DEBUG
1317 if (FLAG_gc_verbose) {
1318 PrintF("forward %p -> %p.\n", object->address(),
1319 HeapObject::cast(forwarded)->address());
1320 }
1321#endif
1322 if (!is_prev_alive) { // Transition from non-live to live.
Steve Blockd0582a62009-12-15 09:54:21 +00001323 EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
Steve Blocka7e24c12009-10-30 11:49:00 +00001324 is_prev_alive = true;
1325 }
1326 } else { // Non-live object.
1327 object_size = object->Size();
1328 ProcessNonLive(object);
1329 if (is_prev_alive) { // Transition from live to non-live.
1330 free_start = current;
1331 is_prev_alive = false;
1332 }
1333 }
1334 }
1335
1336 // If we ended on a free region, mark it.
Steve Blockd0582a62009-12-15 09:54:21 +00001337 if (!is_prev_alive) {
1338 EncodeFreeRegion(free_start, static_cast<int>(end - free_start));
1339 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001340}
1341
1342
1343// Functions to encode the forwarding pointers in each compactable space.
1344void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
1345 int ignored;
1346 EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
1347 EncodeForwardingAddressInNewSpace,
1348 IgnoreNonLiveObject>(
1349 Heap::new_space()->bottom(),
1350 Heap::new_space()->top(),
1351 &ignored);
1352}
1353
1354
1355template<MarkCompactCollector::AllocationFunction Alloc,
1356 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
1357void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
1358 PagedSpace* space) {
1359 PageIterator it(space, PageIterator::PAGES_IN_USE);
1360 while (it.has_next()) {
1361 Page* p = it.next();
Steve Block6ded16b2010-05-10 14:33:55 +01001362
Steve Blocka7e24c12009-10-30 11:49:00 +00001363 // The offset of each live object in the page from the first live object
1364 // in the page.
1365 int offset = 0;
1366 EncodeForwardingAddressesInRange<Alloc,
1367 EncodeForwardingAddressInPagedSpace,
1368 ProcessNonLive>(
1369 p->ObjectAreaStart(),
1370 p->AllocationTop(),
1371 &offset);
1372 }
1373}
1374
1375
Steve Block6ded16b2010-05-10 14:33:55 +01001376// We scavange new space simultaneously with sweeping. This is done in two
1377// passes.
1378// The first pass migrates all alive objects from one semispace to another or
1379// promotes them to old space. Forwading address is written directly into
1380// first word of object without any encoding. If object is dead we are writing
1381// NULL as a forwarding address.
1382// The second pass updates pointers to new space in all spaces. It is possible
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001383// to encounter pointers to dead objects during traversal of dirty regions we
1384// should clear them to avoid encountering them during next dirty regions
1385// iteration.
1386static void MigrateObject(Address dst,
1387 Address src,
1388 int size,
1389 bool to_old_space) {
1390 if (to_old_space) {
1391 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
1392 } else {
1393 Heap::CopyBlock(dst, src, size);
1394 }
Steve Block6ded16b2010-05-10 14:33:55 +01001395
1396 Memory::Address_at(src) = dst;
1397}
1398
1399
Iain Merrick75681382010-08-19 15:07:18 +01001400class StaticPointersToNewGenUpdatingVisitor : public
1401 StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> {
1402 public:
1403 static inline void VisitPointer(Object** p) {
1404 if (!(*p)->IsHeapObject()) return;
1405
1406 HeapObject* obj = HeapObject::cast(*p);
1407 Address old_addr = obj->address();
1408
1409 if (Heap::new_space()->Contains(obj)) {
1410 ASSERT(Heap::InFromSpace(*p));
1411 *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
1412 }
1413 }
1414};
1415
1416
Steve Block6ded16b2010-05-10 14:33:55 +01001417// Visitor for updating pointers from live objects in old spaces to new space.
1418// It does not expect to encounter pointers to dead objects.
1419class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
1420 public:
1421 void VisitPointer(Object** p) {
Iain Merrick75681382010-08-19 15:07:18 +01001422 StaticPointersToNewGenUpdatingVisitor::VisitPointer(p);
Steve Block6ded16b2010-05-10 14:33:55 +01001423 }
1424
1425 void VisitPointers(Object** start, Object** end) {
Iain Merrick75681382010-08-19 15:07:18 +01001426 for (Object** p = start; p < end; p++) {
1427 StaticPointersToNewGenUpdatingVisitor::VisitPointer(p);
1428 }
Steve Block6ded16b2010-05-10 14:33:55 +01001429 }
1430
1431 void VisitCodeTarget(RelocInfo* rinfo) {
1432 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
1433 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1434 VisitPointer(&target);
1435 rinfo->set_target_address(Code::cast(target)->instruction_start());
1436 }
1437
1438 void VisitDebugTarget(RelocInfo* rinfo) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001439 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
1440 rinfo->IsPatchedReturnSequence()) ||
1441 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
1442 rinfo->IsPatchedDebugBreakSlotSequence()));
Steve Block6ded16b2010-05-10 14:33:55 +01001443 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
1444 VisitPointer(&target);
1445 rinfo->set_call_address(Code::cast(target)->instruction_start());
1446 }
Steve Block6ded16b2010-05-10 14:33:55 +01001447};
1448
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001449
Steve Block6ded16b2010-05-10 14:33:55 +01001450// Visitor for updating pointers from live objects in old spaces to new space.
1451// It can encounter pointers to dead objects in new space when traversing map
1452// space (see comment for MigrateObject).
1453static void UpdatePointerToNewGen(HeapObject** p) {
1454 if (!(*p)->IsHeapObject()) return;
1455
1456 Address old_addr = (*p)->address();
1457 ASSERT(Heap::InFromSpace(*p));
1458
1459 Address new_addr = Memory::Address_at(old_addr);
1460
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001461 if (new_addr == NULL) {
1462 // We encountered pointer to a dead object. Clear it so we will
1463 // not visit it again during next iteration of dirty regions.
1464 *p = NULL;
1465 } else {
1466 *p = HeapObject::FromAddress(new_addr);
1467 }
Steve Block6ded16b2010-05-10 14:33:55 +01001468}
1469
1470
1471static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Object **p) {
1472 Address old_addr = HeapObject::cast(*p)->address();
1473 Address new_addr = Memory::Address_at(old_addr);
1474 return String::cast(HeapObject::FromAddress(new_addr));
1475}
1476
1477
1478static bool TryPromoteObject(HeapObject* object, int object_size) {
1479 Object* result;
1480
1481 if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
1482 result = Heap::lo_space()->AllocateRawFixedArray(object_size);
1483 if (!result->IsFailure()) {
1484 HeapObject* target = HeapObject::cast(result);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001485 MigrateObject(target->address(), object->address(), object_size, true);
Leon Clarkef7060e22010-06-03 12:02:55 +01001486 MarkCompactCollector::tracer()->
1487 increment_promoted_objects_size(object_size);
Steve Block6ded16b2010-05-10 14:33:55 +01001488 return true;
1489 }
1490 } else {
1491 OldSpace* target_space = Heap::TargetSpace(object);
1492
1493 ASSERT(target_space == Heap::old_pointer_space() ||
1494 target_space == Heap::old_data_space());
1495 result = target_space->AllocateRaw(object_size);
1496 if (!result->IsFailure()) {
1497 HeapObject* target = HeapObject::cast(result);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001498 MigrateObject(target->address(),
1499 object->address(),
1500 object_size,
1501 target_space == Heap::old_pointer_space());
Leon Clarkef7060e22010-06-03 12:02:55 +01001502 MarkCompactCollector::tracer()->
1503 increment_promoted_objects_size(object_size);
Steve Block6ded16b2010-05-10 14:33:55 +01001504 return true;
1505 }
1506 }
1507
1508 return false;
1509}
1510
1511
1512static void SweepNewSpace(NewSpace* space) {
1513 Heap::CheckNewSpaceExpansionCriteria();
1514
1515 Address from_bottom = space->bottom();
1516 Address from_top = space->top();
1517
1518 // Flip the semispaces. After flipping, to space is empty, from space has
1519 // live objects.
1520 space->Flip();
1521 space->ResetAllocationInfo();
1522
1523 int size = 0;
1524 int survivors_size = 0;
1525
1526 // First pass: traverse all objects in inactive semispace, remove marks,
1527 // migrate live objects and write forwarding addresses.
1528 for (Address current = from_bottom; current < from_top; current += size) {
1529 HeapObject* object = HeapObject::FromAddress(current);
1530
1531 if (object->IsMarked()) {
1532 object->ClearMark();
1533 MarkCompactCollector::tracer()->decrement_marked_count();
1534
1535 size = object->Size();
1536 survivors_size += size;
1537
1538 // Aggressively promote young survivors to the old space.
1539 if (TryPromoteObject(object, size)) {
1540 continue;
1541 }
1542
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001543 // Promotion failed. Just migrate object to another semispace.
Steve Block6ded16b2010-05-10 14:33:55 +01001544 Object* target = space->AllocateRaw(size);
1545
1546 // Allocation cannot fail at this point: semispaces are of equal size.
1547 ASSERT(!target->IsFailure());
1548
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001549 MigrateObject(HeapObject::cast(target)->address(),
1550 current,
1551 size,
1552 false);
Steve Block6ded16b2010-05-10 14:33:55 +01001553 } else {
1554 size = object->Size();
1555 Memory::Address_at(current) = NULL;
1556 }
1557 }
1558
1559 // Second pass: find pointers to new space and update them.
1560 PointersToNewGenUpdatingVisitor updating_visitor;
1561
1562 // Update pointers in to space.
Iain Merrick75681382010-08-19 15:07:18 +01001563 Address current = space->bottom();
1564 while (current < space->top()) {
1565 HeapObject* object = HeapObject::FromAddress(current);
1566 current +=
1567 StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(),
1568 object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001569 }
Steve Block6ded16b2010-05-10 14:33:55 +01001570
1571 // Update roots.
1572 Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
1573
1574 // Update pointers in old spaces.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001575 Heap::IterateDirtyRegions(Heap::old_pointer_space(),
1576 &Heap::IteratePointersInDirtyRegion,
1577 &UpdatePointerToNewGen,
1578 Heap::WATERMARK_SHOULD_BE_VALID);
1579
1580 Heap::lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
Steve Block6ded16b2010-05-10 14:33:55 +01001581
1582 // Update pointers from cells.
1583 HeapObjectIterator cell_iterator(Heap::cell_space());
1584 for (HeapObject* cell = cell_iterator.next();
1585 cell != NULL;
1586 cell = cell_iterator.next()) {
1587 if (cell->IsJSGlobalPropertyCell()) {
1588 Address value_address =
1589 reinterpret_cast<Address>(cell) +
1590 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1591 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1592 }
1593 }
1594
1595 // Update pointers from external string table.
1596 Heap::UpdateNewSpaceReferencesInExternalStringTable(
1597 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1598
1599 // All pointers were updated. Update auxiliary allocation info.
1600 Heap::IncrementYoungSurvivorsCounter(survivors_size);
1601 space->set_age_mark(space->top());
Steve Blocka7e24c12009-10-30 11:49:00 +00001602}
1603
1604
1605static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
1606 PageIterator it(space, PageIterator::PAGES_IN_USE);
Steve Block6ded16b2010-05-10 14:33:55 +01001607
1608 // During sweeping of paged space we are trying to find longest sequences
1609 // of pages without live objects and free them (instead of putting them on
1610 // the free list).
1611
1612 // Page preceding current.
1613 Page* prev = Page::FromAddress(NULL);
1614
1615 // First empty page in a sequence.
1616 Page* first_empty_page = Page::FromAddress(NULL);
1617
1618 // Page preceding first empty page.
1619 Page* prec_first_empty_page = Page::FromAddress(NULL);
1620
1621 // If last used page of space ends with a sequence of dead objects
1622 // we can adjust allocation top instead of puting this free area into
1623 // the free list. Thus during sweeping we keep track of such areas
1624 // and defer their deallocation until the sweeping of the next page
1625 // is done: if one of the next pages contains live objects we have
1626 // to put such area into the free list.
1627 Address last_free_start = NULL;
1628 int last_free_size = 0;
1629
Steve Blocka7e24c12009-10-30 11:49:00 +00001630 while (it.has_next()) {
1631 Page* p = it.next();
1632
1633 bool is_previous_alive = true;
1634 Address free_start = NULL;
1635 HeapObject* object;
1636
1637 for (Address current = p->ObjectAreaStart();
1638 current < p->AllocationTop();
1639 current += object->Size()) {
1640 object = HeapObject::FromAddress(current);
1641 if (object->IsMarked()) {
1642 object->ClearMark();
1643 MarkCompactCollector::tracer()->decrement_marked_count();
Steve Block6ded16b2010-05-10 14:33:55 +01001644
Steve Blocka7e24c12009-10-30 11:49:00 +00001645 if (!is_previous_alive) { // Transition from free to live.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001646 dealloc(free_start,
1647 static_cast<int>(current - free_start),
1648 true,
1649 false);
Steve Blocka7e24c12009-10-30 11:49:00 +00001650 is_previous_alive = true;
1651 }
1652 } else {
Leon Clarked91b9f72010-01-27 17:25:45 +00001653 MarkCompactCollector::ReportDeleteIfNeeded(object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001654 if (is_previous_alive) { // Transition from live to free.
1655 free_start = current;
1656 is_previous_alive = false;
1657 }
1658 }
1659 // The object is now unmarked for the call to Size() at the top of the
1660 // loop.
1661 }
1662
Steve Block6ded16b2010-05-10 14:33:55 +01001663 bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
1664 || (!is_previous_alive && free_start == p->ObjectAreaStart());
1665
1666 if (page_is_empty) {
1667 // This page is empty. Check whether we are in the middle of
1668 // sequence of empty pages and start one if not.
1669 if (!first_empty_page->is_valid()) {
1670 first_empty_page = p;
1671 prec_first_empty_page = prev;
1672 }
1673
1674 if (!is_previous_alive) {
1675 // There are dead objects on this page. Update space accounting stats
1676 // without putting anything into free list.
1677 int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
1678 if (size_in_bytes > 0) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001679 dealloc(free_start, size_in_bytes, false, true);
Steve Block6ded16b2010-05-10 14:33:55 +01001680 }
1681 }
1682 } else {
1683 // This page is not empty. Sequence of empty pages ended on the previous
1684 // one.
1685 if (first_empty_page->is_valid()) {
1686 space->FreePages(prec_first_empty_page, prev);
1687 prec_first_empty_page = first_empty_page = Page::FromAddress(NULL);
1688 }
1689
1690 // If there is a free ending area on one of the previous pages we have
1691 // deallocate that area and put it on the free list.
1692 if (last_free_size > 0) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001693 Page::FromAddress(last_free_start)->
1694 SetAllocationWatermark(last_free_start);
1695 dealloc(last_free_start, last_free_size, true, true);
Steve Block6ded16b2010-05-10 14:33:55 +01001696 last_free_start = NULL;
1697 last_free_size = 0;
1698 }
1699
1700 // If the last region of this page was not live we remember it.
1701 if (!is_previous_alive) {
1702 ASSERT(last_free_size == 0);
1703 last_free_size = static_cast<int>(p->AllocationTop() - free_start);
1704 last_free_start = free_start;
Steve Blocka7e24c12009-10-30 11:49:00 +00001705 }
1706 }
Steve Block6ded16b2010-05-10 14:33:55 +01001707
1708 prev = p;
1709 }
1710
1711 // We reached end of space. See if we need to adjust allocation top.
1712 Address new_allocation_top = NULL;
1713
1714 if (first_empty_page->is_valid()) {
1715 // Last used pages in space are empty. We can move allocation top backwards
1716 // to the beginning of first empty page.
1717 ASSERT(prev == space->AllocationTopPage());
1718
1719 new_allocation_top = first_empty_page->ObjectAreaStart();
1720 }
1721
1722 if (last_free_size > 0) {
1723 // There was a free ending area on the previous page.
1724 // Deallocate it without putting it into freelist and move allocation
1725 // top to the beginning of this free area.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001726 dealloc(last_free_start, last_free_size, false, true);
Steve Block6ded16b2010-05-10 14:33:55 +01001727 new_allocation_top = last_free_start;
1728 }
1729
1730 if (new_allocation_top != NULL) {
1731#ifdef DEBUG
1732 Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
1733 if (!first_empty_page->is_valid()) {
1734 ASSERT(new_allocation_top_page == space->AllocationTopPage());
1735 } else if (last_free_size > 0) {
1736 ASSERT(new_allocation_top_page == prec_first_empty_page);
1737 } else {
1738 ASSERT(new_allocation_top_page == first_empty_page);
1739 }
1740#endif
1741
1742 space->SetTop(new_allocation_top);
Steve Blocka7e24c12009-10-30 11:49:00 +00001743 }
1744}
1745
1746
1747void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
Steve Block6ded16b2010-05-10 14:33:55 +01001748 int size_in_bytes,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001749 bool add_to_freelist,
1750 bool last_on_page) {
Steve Block6ded16b2010-05-10 14:33:55 +01001751 Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist);
Steve Blocka7e24c12009-10-30 11:49:00 +00001752}
1753
1754
1755void MarkCompactCollector::DeallocateOldDataBlock(Address start,
Steve Block6ded16b2010-05-10 14:33:55 +01001756 int size_in_bytes,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001757 bool add_to_freelist,
1758 bool last_on_page) {
Steve Block6ded16b2010-05-10 14:33:55 +01001759 Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist);
Steve Blocka7e24c12009-10-30 11:49:00 +00001760}
1761
1762
1763void MarkCompactCollector::DeallocateCodeBlock(Address start,
Steve Block6ded16b2010-05-10 14:33:55 +01001764 int size_in_bytes,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001765 bool add_to_freelist,
1766 bool last_on_page) {
Steve Block6ded16b2010-05-10 14:33:55 +01001767 Heap::code_space()->Free(start, size_in_bytes, add_to_freelist);
Steve Blocka7e24c12009-10-30 11:49:00 +00001768}
1769
1770
1771void MarkCompactCollector::DeallocateMapBlock(Address start,
Steve Block6ded16b2010-05-10 14:33:55 +01001772 int size_in_bytes,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001773 bool add_to_freelist,
1774 bool last_on_page) {
Leon Clarkee46be812010-01-19 14:06:41 +00001775 // Objects in map space are assumed to have size Map::kSize and a
Steve Blocka7e24c12009-10-30 11:49:00 +00001776 // valid map in their first word. Thus, we break the free block up into
1777 // chunks and free them separately.
1778 ASSERT(size_in_bytes % Map::kSize == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001779 Address end = start + size_in_bytes;
1780 for (Address a = start; a < end; a += Map::kSize) {
Steve Block6ded16b2010-05-10 14:33:55 +01001781 Heap::map_space()->Free(a, add_to_freelist);
Steve Blocka7e24c12009-10-30 11:49:00 +00001782 }
1783}
1784
1785
1786void MarkCompactCollector::DeallocateCellBlock(Address start,
Steve Block6ded16b2010-05-10 14:33:55 +01001787 int size_in_bytes,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001788 bool add_to_freelist,
1789 bool last_on_page) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001790 // Free-list elements in cell space are assumed to have a fixed size.
1791 // We break the free block into chunks and add them to the free list
1792 // individually.
1793 int size = Heap::cell_space()->object_size_in_bytes();
1794 ASSERT(size_in_bytes % size == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001795 Address end = start + size_in_bytes;
1796 for (Address a = start; a < end; a += size) {
Steve Block6ded16b2010-05-10 14:33:55 +01001797 Heap::cell_space()->Free(a, add_to_freelist);
Steve Blocka7e24c12009-10-30 11:49:00 +00001798 }
1799}
1800
1801
1802void MarkCompactCollector::EncodeForwardingAddresses() {
1803 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
1804 // Objects in the active semispace of the young generation may be
1805 // relocated to the inactive semispace (if not promoted). Set the
1806 // relocation info to the beginning of the inactive semispace.
1807 Heap::new_space()->MCResetRelocationInfo();
1808
1809 // Compute the forwarding pointers in each space.
1810 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
Leon Clarked91b9f72010-01-27 17:25:45 +00001811 ReportDeleteIfNeeded>(
Steve Blocka7e24c12009-10-30 11:49:00 +00001812 Heap::old_pointer_space());
1813
1814 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
1815 IgnoreNonLiveObject>(
1816 Heap::old_data_space());
1817
1818 EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
Leon Clarked91b9f72010-01-27 17:25:45 +00001819 ReportDeleteIfNeeded>(
Steve Blocka7e24c12009-10-30 11:49:00 +00001820 Heap::code_space());
1821
1822 EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
1823 IgnoreNonLiveObject>(
1824 Heap::cell_space());
1825
1826
1827 // Compute new space next to last after the old and code spaces have been
1828 // compacted. Objects in new space can be promoted to old or code space.
1829 EncodeForwardingAddressesInNewSpace();
1830
1831 // Compute map space last because computing forwarding addresses
1832 // overwrites non-live objects. Objects in the other spaces rely on
1833 // non-live map pointers to get the sizes of non-live objects.
1834 EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
1835 IgnoreNonLiveObject>(
1836 Heap::map_space());
1837
1838 // Write relocation info to the top page, so we can use it later. This is
1839 // done after promoting objects from the new space so we get the correct
1840 // allocation top.
1841 Heap::old_pointer_space()->MCWriteRelocationInfoToPage();
1842 Heap::old_data_space()->MCWriteRelocationInfoToPage();
1843 Heap::code_space()->MCWriteRelocationInfoToPage();
1844 Heap::map_space()->MCWriteRelocationInfoToPage();
1845 Heap::cell_space()->MCWriteRelocationInfoToPage();
1846}
1847
1848
Leon Clarkee46be812010-01-19 14:06:41 +00001849class MapIterator : public HeapObjectIterator {
1850 public:
1851 MapIterator() : HeapObjectIterator(Heap::map_space(), &SizeCallback) { }
1852
1853 explicit MapIterator(Address start)
1854 : HeapObjectIterator(Heap::map_space(), start, &SizeCallback) { }
1855
1856 private:
1857 static int SizeCallback(HeapObject* unused) {
1858 USE(unused);
1859 return Map::kSize;
1860 }
1861};
1862
1863
1864class MapCompact {
1865 public:
1866 explicit MapCompact(int live_maps)
1867 : live_maps_(live_maps),
1868 to_evacuate_start_(Heap::map_space()->TopAfterCompaction(live_maps)),
1869 map_to_evacuate_it_(to_evacuate_start_),
1870 first_map_to_evacuate_(
1871 reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
1872 }
1873
1874 void CompactMaps() {
1875 // As we know the number of maps to evacuate beforehand,
1876 // we stop then there is no more vacant maps.
1877 for (Map* next_vacant_map = NextVacantMap();
1878 next_vacant_map;
1879 next_vacant_map = NextVacantMap()) {
1880 EvacuateMap(next_vacant_map, NextMapToEvacuate());
1881 }
1882
1883#ifdef DEBUG
1884 CheckNoMapsToEvacuate();
1885#endif
1886 }
1887
1888 void UpdateMapPointersInRoots() {
1889 Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG);
1890 GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
1891 }
1892
Leon Clarkee46be812010-01-19 14:06:41 +00001893 void UpdateMapPointersInPagedSpace(PagedSpace* space) {
1894 ASSERT(space != Heap::map_space());
1895
1896 PageIterator it(space, PageIterator::PAGES_IN_USE);
1897 while (it.has_next()) {
1898 Page* p = it.next();
1899 UpdateMapPointersInRange(p->ObjectAreaStart(), p->AllocationTop());
1900 }
1901 }
1902
1903 void UpdateMapPointersInNewSpace() {
1904 NewSpace* space = Heap::new_space();
1905 UpdateMapPointersInRange(space->bottom(), space->top());
1906 }
1907
1908 void UpdateMapPointersInLargeObjectSpace() {
1909 LargeObjectIterator it(Heap::lo_space());
Leon Clarked91b9f72010-01-27 17:25:45 +00001910 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1911 UpdateMapPointersInObject(obj);
Leon Clarkee46be812010-01-19 14:06:41 +00001912 }
1913
1914 void Finish() {
1915 Heap::map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
1916 }
1917
1918 private:
1919 int live_maps_;
1920 Address to_evacuate_start_;
1921 MapIterator vacant_map_it_;
1922 MapIterator map_to_evacuate_it_;
1923 Map* first_map_to_evacuate_;
1924
1925 // Helper class for updating map pointers in HeapObjects.
1926 class MapUpdatingVisitor: public ObjectVisitor {
1927 public:
1928 void VisitPointer(Object** p) {
1929 UpdateMapPointer(p);
1930 }
1931
1932 void VisitPointers(Object** start, Object** end) {
1933 for (Object** p = start; p < end; p++) UpdateMapPointer(p);
1934 }
1935
1936 private:
1937 void UpdateMapPointer(Object** p) {
1938 if (!(*p)->IsHeapObject()) return;
1939 HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
1940
1941 // Moved maps are tagged with overflowed map word. They are the only
1942 // objects those map word is overflowed as marking is already complete.
1943 MapWord map_word = old_map->map_word();
1944 if (!map_word.IsOverflowed()) return;
1945
1946 *p = GetForwardedMap(map_word);
1947 }
1948 };
1949
1950 static MapUpdatingVisitor map_updating_visitor_;
1951
1952 static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
1953 while (true) {
Leon Clarkee46be812010-01-19 14:06:41 +00001954 HeapObject* next = it->next();
Leon Clarked91b9f72010-01-27 17:25:45 +00001955 ASSERT(next != NULL);
Leon Clarkee46be812010-01-19 14:06:41 +00001956 if (next == last)
1957 return NULL;
1958 ASSERT(!next->IsOverflowed());
1959 ASSERT(!next->IsMarked());
1960 ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
1961 if (next->IsMap() == live)
1962 return reinterpret_cast<Map*>(next);
1963 }
1964 }
1965
1966 Map* NextVacantMap() {
1967 Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
1968 ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
1969 return map;
1970 }
1971
1972 Map* NextMapToEvacuate() {
1973 Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
1974 ASSERT(map != NULL);
1975 ASSERT(map->IsMap());
1976 return map;
1977 }
1978
1979 static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
1980 ASSERT(FreeListNode::IsFreeListNode(vacant_map));
1981 ASSERT(map_to_evacuate->IsMap());
1982
Steve Block6ded16b2010-05-10 14:33:55 +01001983 ASSERT(Map::kSize % 4 == 0);
1984
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001985 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(vacant_map->address(),
1986 map_to_evacuate->address(),
1987 Map::kSize);
Steve Block6ded16b2010-05-10 14:33:55 +01001988
Leon Clarkee46be812010-01-19 14:06:41 +00001989 ASSERT(vacant_map->IsMap()); // Due to memcpy above.
1990
1991 MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
1992 forwarding_map_word.SetOverflow();
1993 map_to_evacuate->set_map_word(forwarding_map_word);
1994
1995 ASSERT(map_to_evacuate->map_word().IsOverflowed());
1996 ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
1997 }
1998
1999 static Map* GetForwardedMap(MapWord map_word) {
2000 ASSERT(map_word.IsOverflowed());
2001 map_word.ClearOverflow();
2002 Map* new_map = map_word.ToMap();
2003 ASSERT_MAP_ALIGNED(new_map->address());
2004 return new_map;
2005 }
2006
2007 static int UpdateMapPointersInObject(HeapObject* obj) {
2008 ASSERT(!obj->IsMarked());
2009 Map* map = obj->map();
2010 ASSERT(Heap::map_space()->Contains(map));
2011 MapWord map_word = map->map_word();
2012 ASSERT(!map_word.IsMarked());
2013 if (map_word.IsOverflowed()) {
2014 Map* new_map = GetForwardedMap(map_word);
2015 ASSERT(Heap::map_space()->Contains(new_map));
2016 obj->set_map(new_map);
2017
2018#ifdef DEBUG
2019 if (FLAG_gc_verbose) {
2020 PrintF("update %p : %p -> %p\n", obj->address(),
2021 map, new_map);
2022 }
2023#endif
2024 }
2025
2026 int size = obj->SizeFromMap(map);
2027 obj->IterateBody(map->instance_type(), size, &map_updating_visitor_);
2028 return size;
2029 }
2030
2031 static void UpdateMapPointersInRange(Address start, Address end) {
2032 HeapObject* object;
2033 int size;
2034 for (Address current = start; current < end; current += size) {
2035 object = HeapObject::FromAddress(current);
2036 size = UpdateMapPointersInObject(object);
2037 ASSERT(size > 0);
2038 }
2039 }
2040
2041#ifdef DEBUG
2042 void CheckNoMapsToEvacuate() {
2043 if (!FLAG_enable_slow_asserts)
2044 return;
2045
Leon Clarked91b9f72010-01-27 17:25:45 +00002046 for (HeapObject* obj = map_to_evacuate_it_.next();
2047 obj != NULL; obj = map_to_evacuate_it_.next())
2048 ASSERT(FreeListNode::IsFreeListNode(obj));
Leon Clarkee46be812010-01-19 14:06:41 +00002049 }
2050#endif
2051};
2052
2053MapCompact::MapUpdatingVisitor MapCompact::map_updating_visitor_;
2054
2055
Steve Blocka7e24c12009-10-30 11:49:00 +00002056void MarkCompactCollector::SweepSpaces() {
Leon Clarkef7060e22010-06-03 12:02:55 +01002057 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
2058
Steve Blocka7e24c12009-10-30 11:49:00 +00002059 ASSERT(state_ == SWEEP_SPACES);
2060 ASSERT(!IsCompacting());
2061 // Noncompacting collections simply sweep the spaces to clear the mark
2062 // bits and free the nonlive blocks (for old and map spaces). We sweep
2063 // the map space last because freeing non-live maps overwrites them and
2064 // the other spaces rely on possibly non-live maps to get the sizes for
2065 // non-live objects.
2066 SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock);
2067 SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock);
2068 SweepSpace(Heap::code_space(), &DeallocateCodeBlock);
2069 SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
Iain Merrick75681382010-08-19 15:07:18 +01002070 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
2071 SweepNewSpace(Heap::new_space());
2072 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002073 SweepSpace(Heap::map_space(), &DeallocateMapBlock);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002074
2075 Heap::IterateDirtyRegions(Heap::map_space(),
2076 &Heap::IteratePointersInDirtyMapsRegion,
2077 &UpdatePointerToNewGen,
2078 Heap::WATERMARK_SHOULD_BE_VALID);
2079
Steve Block6ded16b2010-05-10 14:33:55 +01002080 int live_maps_size = Heap::map_space()->Size();
2081 int live_maps = live_maps_size / Map::kSize;
2082 ASSERT(live_map_objects_size_ == live_maps_size);
Leon Clarkee46be812010-01-19 14:06:41 +00002083
2084 if (Heap::map_space()->NeedsCompaction(live_maps)) {
2085 MapCompact map_compact(live_maps);
2086
2087 map_compact.CompactMaps();
2088 map_compact.UpdateMapPointersInRoots();
2089
Leon Clarkee46be812010-01-19 14:06:41 +00002090 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00002091 for (PagedSpace* space = spaces.next();
2092 space != NULL; space = spaces.next()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002093 if (space == Heap::map_space()) continue;
2094 map_compact.UpdateMapPointersInPagedSpace(space);
2095 }
2096 map_compact.UpdateMapPointersInNewSpace();
2097 map_compact.UpdateMapPointersInLargeObjectSpace();
2098
2099 map_compact.Finish();
2100 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002101}
2102
2103
2104// Iterate the live objects in a range of addresses (eg, a page or a
2105// semispace). The live regions of the range have been linked into a list.
2106// The first live region is [first_live_start, first_live_end), and the last
2107// address in the range is top. The callback function is used to get the
2108// size of each live object.
2109int MarkCompactCollector::IterateLiveObjectsInRange(
2110 Address start,
2111 Address end,
2112 HeapObjectCallback size_func) {
Steve Block6ded16b2010-05-10 14:33:55 +01002113 int live_objects_size = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00002114 Address current = start;
2115 while (current < end) {
2116 uint32_t encoded_map = Memory::uint32_at(current);
2117 if (encoded_map == kSingleFreeEncoding) {
2118 current += kPointerSize;
2119 } else if (encoded_map == kMultiFreeEncoding) {
2120 current += Memory::int_at(current + kIntSize);
2121 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01002122 int size = size_func(HeapObject::FromAddress(current));
2123 current += size;
2124 live_objects_size += size;
Steve Blocka7e24c12009-10-30 11:49:00 +00002125 }
2126 }
Steve Block6ded16b2010-05-10 14:33:55 +01002127 return live_objects_size;
Steve Blocka7e24c12009-10-30 11:49:00 +00002128}
2129
2130
2131int MarkCompactCollector::IterateLiveObjects(NewSpace* space,
2132 HeapObjectCallback size_f) {
2133 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2134 return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
2135}
2136
2137
2138int MarkCompactCollector::IterateLiveObjects(PagedSpace* space,
2139 HeapObjectCallback size_f) {
2140 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2141 int total = 0;
2142 PageIterator it(space, PageIterator::PAGES_IN_USE);
2143 while (it.has_next()) {
2144 Page* p = it.next();
2145 total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
2146 p->AllocationTop(),
2147 size_f);
2148 }
2149 return total;
2150}
2151
2152
2153// -------------------------------------------------------------------------
2154// Phase 3: Update pointers
2155
2156// Helper class for updating pointers in HeapObjects.
2157class UpdatingVisitor: public ObjectVisitor {
2158 public:
2159 void VisitPointer(Object** p) {
2160 UpdatePointer(p);
2161 }
2162
2163 void VisitPointers(Object** start, Object** end) {
2164 // Mark all HeapObject pointers in [start, end)
2165 for (Object** p = start; p < end; p++) UpdatePointer(p);
2166 }
2167
2168 void VisitCodeTarget(RelocInfo* rinfo) {
2169 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2170 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2171 VisitPointer(&target);
2172 rinfo->set_target_address(
2173 reinterpret_cast<Code*>(target)->instruction_start());
2174 }
2175
Steve Block3ce2e202009-11-05 08:53:23 +00002176 void VisitDebugTarget(RelocInfo* rinfo) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002177 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2178 rinfo->IsPatchedReturnSequence()) ||
2179 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2180 rinfo->IsPatchedDebugBreakSlotSequence()));
Steve Block3ce2e202009-11-05 08:53:23 +00002181 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2182 VisitPointer(&target);
2183 rinfo->set_call_address(
2184 reinterpret_cast<Code*>(target)->instruction_start());
2185 }
2186
Steve Blocka7e24c12009-10-30 11:49:00 +00002187 private:
2188 void UpdatePointer(Object** p) {
2189 if (!(*p)->IsHeapObject()) return;
2190
2191 HeapObject* obj = HeapObject::cast(*p);
2192 Address old_addr = obj->address();
2193 Address new_addr;
2194 ASSERT(!Heap::InFromSpace(obj));
2195
2196 if (Heap::new_space()->Contains(obj)) {
2197 Address forwarding_pointer_addr =
2198 Heap::new_space()->FromSpaceLow() +
2199 Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
2200 new_addr = Memory::Address_at(forwarding_pointer_addr);
2201
2202#ifdef DEBUG
2203 ASSERT(Heap::old_pointer_space()->Contains(new_addr) ||
2204 Heap::old_data_space()->Contains(new_addr) ||
2205 Heap::new_space()->FromSpaceContains(new_addr) ||
2206 Heap::lo_space()->Contains(HeapObject::FromAddress(new_addr)));
2207
2208 if (Heap::new_space()->FromSpaceContains(new_addr)) {
2209 ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
2210 Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
2211 }
2212#endif
2213
2214 } else if (Heap::lo_space()->Contains(obj)) {
2215 // Don't move objects in the large object space.
2216 return;
2217
2218 } else {
2219#ifdef DEBUG
2220 PagedSpaces spaces;
2221 PagedSpace* original_space = spaces.next();
2222 while (original_space != NULL) {
2223 if (original_space->Contains(obj)) break;
2224 original_space = spaces.next();
2225 }
2226 ASSERT(original_space != NULL);
2227#endif
2228 new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
2229 ASSERT(original_space->Contains(new_addr));
2230 ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
2231 original_space->MCSpaceOffsetForAddress(old_addr));
2232 }
2233
2234 *p = HeapObject::FromAddress(new_addr);
2235
2236#ifdef DEBUG
2237 if (FLAG_gc_verbose) {
2238 PrintF("update %p : %p -> %p\n",
2239 reinterpret_cast<Address>(p), old_addr, new_addr);
2240 }
2241#endif
2242 }
2243};
2244
2245
2246void MarkCompactCollector::UpdatePointers() {
2247#ifdef DEBUG
2248 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
2249 state_ = UPDATE_POINTERS;
2250#endif
2251 UpdatingVisitor updating_visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00002252 Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00002253 GlobalHandles::IterateWeakRoots(&updating_visitor);
2254
Steve Block6ded16b2010-05-10 14:33:55 +01002255 int live_maps_size = IterateLiveObjects(Heap::map_space(),
Steve Blocka7e24c12009-10-30 11:49:00 +00002256 &UpdatePointersInOldObject);
Steve Block6ded16b2010-05-10 14:33:55 +01002257 int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
2258 &UpdatePointersInOldObject);
2259 int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
2260 &UpdatePointersInOldObject);
2261 int live_codes_size = IterateLiveObjects(Heap::code_space(),
2262 &UpdatePointersInOldObject);
2263 int live_cells_size = IterateLiveObjects(Heap::cell_space(),
2264 &UpdatePointersInOldObject);
2265 int live_news_size = IterateLiveObjects(Heap::new_space(),
2266 &UpdatePointersInNewObject);
Steve Blocka7e24c12009-10-30 11:49:00 +00002267
2268 // Large objects do not move, the map word can be updated directly.
2269 LargeObjectIterator it(Heap::lo_space());
Leon Clarked91b9f72010-01-27 17:25:45 +00002270 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
2271 UpdatePointersInNewObject(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002272
Steve Block6ded16b2010-05-10 14:33:55 +01002273 USE(live_maps_size);
2274 USE(live_pointer_olds_size);
2275 USE(live_data_olds_size);
2276 USE(live_codes_size);
2277 USE(live_cells_size);
2278 USE(live_news_size);
2279 ASSERT(live_maps_size == live_map_objects_size_);
2280 ASSERT(live_data_olds_size == live_old_data_objects_size_);
2281 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
2282 ASSERT(live_codes_size == live_code_objects_size_);
2283 ASSERT(live_cells_size == live_cell_objects_size_);
2284 ASSERT(live_news_size == live_young_objects_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002285}
2286
2287
2288int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
2289 // Keep old map pointers
2290 Map* old_map = obj->map();
2291 ASSERT(old_map->IsHeapObject());
2292
2293 Address forwarded = GetForwardingAddressInOldSpace(old_map);
2294
2295 ASSERT(Heap::map_space()->Contains(old_map));
2296 ASSERT(Heap::map_space()->Contains(forwarded));
2297#ifdef DEBUG
2298 if (FLAG_gc_verbose) {
2299 PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
2300 forwarded);
2301 }
2302#endif
2303 // Update the map pointer.
2304 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
2305
2306 // We have to compute the object size relying on the old map because
2307 // map objects are not relocated yet.
2308 int obj_size = obj->SizeFromMap(old_map);
2309
2310 // Update pointers in the object body.
2311 UpdatingVisitor updating_visitor;
2312 obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
2313 return obj_size;
2314}
2315
2316
2317int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
2318 // Decode the map pointer.
2319 MapWord encoding = obj->map_word();
2320 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2321 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
2322
2323 // At this point, the first word of map_addr is also encoded, cannot
2324 // cast it to Map* using Map::cast.
2325 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
2326 int obj_size = obj->SizeFromMap(map);
2327 InstanceType type = map->instance_type();
2328
2329 // Update map pointer.
2330 Address new_map_addr = GetForwardingAddressInOldSpace(map);
2331 int offset = encoding.DecodeOffset();
2332 obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
2333
2334#ifdef DEBUG
2335 if (FLAG_gc_verbose) {
2336 PrintF("update %p : %p -> %p\n", obj->address(),
2337 map_addr, new_map_addr);
2338 }
2339#endif
2340
2341 // Update pointers in the object body.
2342 UpdatingVisitor updating_visitor;
2343 obj->IterateBody(type, obj_size, &updating_visitor);
2344 return obj_size;
2345}
2346
2347
2348Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
2349 // Object should either in old or map space.
2350 MapWord encoding = obj->map_word();
2351
2352 // Offset to the first live object's forwarding address.
2353 int offset = encoding.DecodeOffset();
2354 Address obj_addr = obj->address();
2355
2356 // Find the first live object's forwarding address.
2357 Page* p = Page::FromAddress(obj_addr);
2358 Address first_forwarded = p->mc_first_forwarded;
2359
2360 // Page start address of forwarded address.
2361 Page* forwarded_page = Page::FromAddress(first_forwarded);
2362 int forwarded_offset = forwarded_page->Offset(first_forwarded);
2363
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002364 // Find end of allocation in the page of first_forwarded.
2365 int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
Steve Blocka7e24c12009-10-30 11:49:00 +00002366
2367 // Check if current object's forward pointer is in the same page
2368 // as the first live object's forwarding pointer
2369 if (forwarded_offset + offset < mc_top_offset) {
2370 // In the same page.
2371 return first_forwarded + offset;
2372 }
2373
2374 // Must be in the next page, NOTE: this may cross chunks.
2375 Page* next_page = forwarded_page->next_page();
2376 ASSERT(next_page->is_valid());
2377
2378 offset -= (mc_top_offset - forwarded_offset);
2379 offset += Page::kObjectStartOffset;
2380
2381 ASSERT_PAGE_OFFSET(offset);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002382 ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
Steve Blocka7e24c12009-10-30 11:49:00 +00002383
2384 return next_page->OffsetToAddress(offset);
2385}
2386
2387
2388// -------------------------------------------------------------------------
2389// Phase 4: Relocate objects
2390
2391void MarkCompactCollector::RelocateObjects() {
2392#ifdef DEBUG
2393 ASSERT(state_ == UPDATE_POINTERS);
2394 state_ = RELOCATE_OBJECTS;
2395#endif
2396 // Relocates objects, always relocate map objects first. Relocating
2397 // objects in other space relies on map objects to get object size.
Steve Block6ded16b2010-05-10 14:33:55 +01002398 int live_maps_size = IterateLiveObjects(Heap::map_space(),
2399 &RelocateMapObject);
2400 int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
2401 &RelocateOldPointerObject);
2402 int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
2403 &RelocateOldDataObject);
2404 int live_codes_size = IterateLiveObjects(Heap::code_space(),
2405 &RelocateCodeObject);
2406 int live_cells_size = IterateLiveObjects(Heap::cell_space(),
2407 &RelocateCellObject);
2408 int live_news_size = IterateLiveObjects(Heap::new_space(),
2409 &RelocateNewObject);
Steve Blocka7e24c12009-10-30 11:49:00 +00002410
Steve Block6ded16b2010-05-10 14:33:55 +01002411 USE(live_maps_size);
2412 USE(live_pointer_olds_size);
2413 USE(live_data_olds_size);
2414 USE(live_codes_size);
2415 USE(live_cells_size);
2416 USE(live_news_size);
2417 ASSERT(live_maps_size == live_map_objects_size_);
2418 ASSERT(live_data_olds_size == live_old_data_objects_size_);
2419 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
2420 ASSERT(live_codes_size == live_code_objects_size_);
2421 ASSERT(live_cells_size == live_cell_objects_size_);
2422 ASSERT(live_news_size == live_young_objects_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002423
2424 // Flip from and to spaces
2425 Heap::new_space()->Flip();
2426
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002427 Heap::new_space()->MCCommitRelocationInfo();
2428
Steve Blocka7e24c12009-10-30 11:49:00 +00002429 // Set age_mark to bottom in to space
2430 Address mark = Heap::new_space()->bottom();
2431 Heap::new_space()->set_age_mark(mark);
2432
Steve Blocka7e24c12009-10-30 11:49:00 +00002433 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00002434 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
2435 space->MCCommitRelocationInfo();
Steve Block6ded16b2010-05-10 14:33:55 +01002436
2437 Heap::CheckNewSpaceExpansionCriteria();
2438 Heap::IncrementYoungSurvivorsCounter(live_news_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002439}
2440
2441
2442int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
2443 // Recover map pointer.
2444 MapWord encoding = obj->map_word();
2445 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2446 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
2447
2448 // Get forwarding address before resetting map pointer
2449 Address new_addr = GetForwardingAddressInOldSpace(obj);
2450
2451 // Reset map pointer. The meta map object may not be copied yet so
2452 // Map::cast does not yet work.
2453 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
2454
2455 Address old_addr = obj->address();
2456
2457 if (new_addr != old_addr) {
Steve Block6ded16b2010-05-10 14:33:55 +01002458 // Move contents.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002459 Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2460 old_addr,
2461 Map::kSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00002462 }
2463
2464#ifdef DEBUG
2465 if (FLAG_gc_verbose) {
2466 PrintF("relocate %p -> %p\n", old_addr, new_addr);
2467 }
2468#endif
2469
2470 return Map::kSize;
2471}
2472
2473
2474static inline int RestoreMap(HeapObject* obj,
2475 PagedSpace* space,
2476 Address new_addr,
2477 Address map_addr) {
2478 // This must be a non-map object, and the function relies on the
2479 // assumption that the Map space is compacted before the other paged
2480 // spaces (see RelocateObjects).
2481
2482 // Reset map pointer.
2483 obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
2484
2485 int obj_size = obj->Size();
2486 ASSERT_OBJECT_SIZE(obj_size);
2487
2488 ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
2489 space->MCSpaceOffsetForAddress(obj->address()));
2490
2491#ifdef DEBUG
2492 if (FLAG_gc_verbose) {
2493 PrintF("relocate %p -> %p\n", obj->address(), new_addr);
2494 }
2495#endif
2496
2497 return obj_size;
2498}
2499
2500
2501int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
2502 PagedSpace* space) {
2503 // Recover map pointer.
2504 MapWord encoding = obj->map_word();
2505 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2506 ASSERT(Heap::map_space()->Contains(map_addr));
2507
2508 // Get forwarding address before resetting map pointer.
2509 Address new_addr = GetForwardingAddressInOldSpace(obj);
2510
2511 // Reset the map pointer.
2512 int obj_size = RestoreMap(obj, space, new_addr, map_addr);
2513
2514 Address old_addr = obj->address();
2515
2516 if (new_addr != old_addr) {
Steve Block6ded16b2010-05-10 14:33:55 +01002517 // Move contents.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002518 if (space == Heap::old_data_space()) {
2519 Heap::MoveBlock(new_addr, old_addr, obj_size);
2520 } else {
2521 Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2522 old_addr,
2523 obj_size);
2524 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002525 }
2526
2527 ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
2528
Leon Clarked91b9f72010-01-27 17:25:45 +00002529 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2530 if (copied_to->IsJSFunction()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002531 PROFILE(FunctionMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002532 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002533 HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002534
Steve Blocka7e24c12009-10-30 11:49:00 +00002535 return obj_size;
2536}
2537
2538
2539int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
2540 return RelocateOldNonCodeObject(obj, Heap::old_pointer_space());
2541}
2542
2543
2544int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
2545 return RelocateOldNonCodeObject(obj, Heap::old_data_space());
2546}
2547
2548
2549int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
2550 return RelocateOldNonCodeObject(obj, Heap::cell_space());
2551}
2552
2553
2554int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
2555 // Recover map pointer.
2556 MapWord encoding = obj->map_word();
2557 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2558 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
2559
2560 // Get forwarding address before resetting map pointer
2561 Address new_addr = GetForwardingAddressInOldSpace(obj);
2562
2563 // Reset the map pointer.
2564 int obj_size = RestoreMap(obj, Heap::code_space(), new_addr, map_addr);
2565
2566 Address old_addr = obj->address();
2567
2568 if (new_addr != old_addr) {
Steve Block6ded16b2010-05-10 14:33:55 +01002569 // Move contents.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002570 Heap::MoveBlock(new_addr, old_addr, obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002571 }
2572
2573 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2574 if (copied_to->IsCode()) {
2575 // May also update inline cache target.
2576 Code::cast(copied_to)->Relocate(new_addr - old_addr);
2577 // Notify the logger that compiled code has moved.
Steve Block6ded16b2010-05-10 14:33:55 +01002578 PROFILE(CodeMoveEvent(old_addr, new_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00002579 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002580 HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00002581
2582 return obj_size;
2583}
2584
2585
2586int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
2587 int obj_size = obj->Size();
2588
2589 // Get forwarding address
2590 Address old_addr = obj->address();
2591 int offset = Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
2592
2593 Address new_addr =
2594 Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset);
2595
2596#ifdef DEBUG
2597 if (Heap::new_space()->FromSpaceContains(new_addr)) {
2598 ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
2599 Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
2600 } else {
2601 ASSERT(Heap::TargetSpace(obj) == Heap::old_pointer_space() ||
2602 Heap::TargetSpace(obj) == Heap::old_data_space());
2603 }
2604#endif
2605
2606 // New and old addresses cannot overlap.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002607 if (Heap::InNewSpace(HeapObject::FromAddress(new_addr))) {
2608 Heap::CopyBlock(new_addr, old_addr, obj_size);
2609 } else {
2610 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2611 old_addr,
2612 obj_size);
2613 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002614
2615#ifdef DEBUG
2616 if (FLAG_gc_verbose) {
2617 PrintF("relocate %p -> %p\n", old_addr, new_addr);
2618 }
2619#endif
2620
Leon Clarked91b9f72010-01-27 17:25:45 +00002621 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2622 if (copied_to->IsJSFunction()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002623 PROFILE(FunctionMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002624 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002625 HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002626
Steve Blocka7e24c12009-10-30 11:49:00 +00002627 return obj_size;
2628}
2629
2630
Leon Clarked91b9f72010-01-27 17:25:45 +00002631void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
2632#ifdef ENABLE_LOGGING_AND_PROFILING
2633 if (obj->IsCode()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002634 PROFILE(CodeDeleteEvent(obj->address()));
Leon Clarked91b9f72010-01-27 17:25:45 +00002635 } else if (obj->IsJSFunction()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002636 PROFILE(FunctionDeleteEvent(obj->address()));
Leon Clarked91b9f72010-01-27 17:25:45 +00002637 }
2638#endif
2639}
2640
Iain Merrick75681382010-08-19 15:07:18 +01002641
2642void MarkCompactCollector::Initialize() {
2643 StaticPointersToNewGenUpdatingVisitor::Initialize();
2644 StaticMarkingVisitor::Initialize();
2645}
2646
2647
Steve Blocka7e24c12009-10-30 11:49:00 +00002648} } // namespace v8::internal