blob: a9e852ef32747ef8e2385353f1f7ffd8c9733587 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
Iain Merrick75681382010-08-19 15:07:18 +010030#include "compilation-cache.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000031#include "execution.h"
Ben Murdoch3bec4d22010-07-22 14:51:16 +010032#include "heap-profiler.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000033#include "global-handles.h"
34#include "ic-inl.h"
35#include "mark-compact.h"
Iain Merrick75681382010-08-19 15:07:18 +010036#include "objects-visiting.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000037#include "stub-cache.h"
38
39namespace v8 {
40namespace internal {
41
42// -------------------------------------------------------------------------
43// MarkCompactCollector
44
45bool MarkCompactCollector::force_compaction_ = false;
46bool MarkCompactCollector::compacting_collection_ = false;
47bool MarkCompactCollector::compact_on_next_gc_ = false;
48
49int MarkCompactCollector::previous_marked_count_ = 0;
50GCTracer* MarkCompactCollector::tracer_ = NULL;
51
52
53#ifdef DEBUG
54MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE;
55
56// Counters used for debugging the marking phase of mark-compact or mark-sweep
57// collection.
58int MarkCompactCollector::live_bytes_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +010059int MarkCompactCollector::live_young_objects_size_ = 0;
60int MarkCompactCollector::live_old_data_objects_size_ = 0;
61int MarkCompactCollector::live_old_pointer_objects_size_ = 0;
62int MarkCompactCollector::live_code_objects_size_ = 0;
63int MarkCompactCollector::live_map_objects_size_ = 0;
64int MarkCompactCollector::live_cell_objects_size_ = 0;
65int MarkCompactCollector::live_lo_objects_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +000066#endif
67
Iain Merrick75681382010-08-19 15:07:18 +010068
Steve Blocka7e24c12009-10-30 11:49:00 +000069void MarkCompactCollector::CollectGarbage() {
70 // Make sure that Prepare() has been called. The individual steps below will
71 // update the state as they proceed.
72 ASSERT(state_ == PREPARE_GC);
73
74 // Prepare has selected whether to compact the old generation or not.
75 // Tell the tracer.
76 if (IsCompacting()) tracer_->set_is_compacting();
77
78 MarkLiveObjects();
79
80 if (FLAG_collect_maps) ClearNonLiveTransitions();
81
82 SweepLargeObjectSpace();
83
84 if (IsCompacting()) {
Leon Clarkef7060e22010-06-03 12:02:55 +010085 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
Steve Blocka7e24c12009-10-30 11:49:00 +000086 EncodeForwardingAddresses();
87
Kristian Monsen80d68ea2010-09-08 11:05:35 +010088 Heap::MarkMapPointersAsEncoded(true);
Steve Blocka7e24c12009-10-30 11:49:00 +000089 UpdatePointers();
Kristian Monsen80d68ea2010-09-08 11:05:35 +010090 Heap::MarkMapPointersAsEncoded(false);
91 PcToCodeCache::FlushPcToCodeCache();
Steve Blocka7e24c12009-10-30 11:49:00 +000092
93 RelocateObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +000094 } else {
95 SweepSpaces();
Kristian Monsen80d68ea2010-09-08 11:05:35 +010096 PcToCodeCache::FlushPcToCodeCache();
Steve Blocka7e24c12009-10-30 11:49:00 +000097 }
98
99 Finish();
100
101 // Save the count of marked objects remaining after the collection and
102 // null out the GC tracer.
103 previous_marked_count_ = tracer_->marked_count();
104 ASSERT(previous_marked_count_ == 0);
105 tracer_ = NULL;
106}
107
108
109void MarkCompactCollector::Prepare(GCTracer* tracer) {
110 // Rather than passing the tracer around we stash it in a static member
111 // variable.
112 tracer_ = tracer;
113
114#ifdef DEBUG
115 ASSERT(state_ == IDLE);
116 state_ = PREPARE_GC;
117#endif
118 ASSERT(!FLAG_always_compact || !FLAG_never_compact);
119
120 compacting_collection_ =
121 FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
122 compact_on_next_gc_ = false;
123
124 if (FLAG_never_compact) compacting_collection_ = false;
Leon Clarkee46be812010-01-19 14:06:41 +0000125 if (!Heap::map_space()->MapPointersEncodable())
126 compacting_collection_ = false;
Steve Blocka7e24c12009-10-30 11:49:00 +0000127 if (FLAG_collect_maps) CreateBackPointers();
128
Steve Blocka7e24c12009-10-30 11:49:00 +0000129 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000130 for (PagedSpace* space = spaces.next();
131 space != NULL; space = spaces.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000132 space->PrepareForMarkCompact(compacting_collection_);
133 }
134
135#ifdef DEBUG
136 live_bytes_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100137 live_young_objects_size_ = 0;
138 live_old_pointer_objects_size_ = 0;
139 live_old_data_objects_size_ = 0;
140 live_code_objects_size_ = 0;
141 live_map_objects_size_ = 0;
142 live_cell_objects_size_ = 0;
143 live_lo_objects_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000144#endif
145}
146
147
148void MarkCompactCollector::Finish() {
149#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100150 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
Steve Blocka7e24c12009-10-30 11:49:00 +0000151 state_ = IDLE;
152#endif
153 // The stub cache is not traversed during GC; clear the cache to
154 // force lazy re-initialization of it. This must be done after the
155 // GC, because it relies on the new address of certain old space
156 // objects (empty string, illegal builtin).
157 StubCache::Clear();
158
Leon Clarkee46be812010-01-19 14:06:41 +0000159 ExternalStringTable::CleanUp();
160
Steve Blocka7e24c12009-10-30 11:49:00 +0000161 // If we've just compacted old space there's no reason to check the
162 // fragmentation limit. Just return.
163 if (HasCompacted()) return;
164
165 // We compact the old generation on the next GC if it has gotten too
166 // fragmented (ie, we could recover an expected amount of space by
167 // reclaiming the waste and free list blocks).
168 static const int kFragmentationLimit = 15; // Percent.
169 static const int kFragmentationAllowed = 1 * MB; // Absolute.
170 int old_gen_recoverable = 0;
171 int old_gen_used = 0;
172
173 OldSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000174 for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000175 old_gen_recoverable += space->Waste() + space->AvailableFree();
176 old_gen_used += space->Size();
177 }
178
179 int old_gen_fragmentation =
180 static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
181 if (old_gen_fragmentation > kFragmentationLimit &&
182 old_gen_recoverable > kFragmentationAllowed) {
183 compact_on_next_gc_ = true;
184 }
185}
186
187
188// -------------------------------------------------------------------------
189// Phase 1: tracing and marking live objects.
190// before: all objects are in normal state.
191// after: a live object's map pointer is marked as '00'.
192
193// Marking all live objects in the heap as part of mark-sweep or mark-compact
194// collection. Before marking, all objects are in their normal state. After
195// marking, live objects' map pointers are marked indicating that the object
196// has been found reachable.
197//
198// The marking algorithm is a (mostly) depth-first (because of possible stack
199// overflow) traversal of the graph of objects reachable from the roots. It
200// uses an explicit stack of pointers rather than recursion. The young
201// generation's inactive ('from') space is used as a marking stack. The
202// objects in the marking stack are the ones that have been reached and marked
203// but their children have not yet been visited.
204//
205// The marking stack can overflow during traversal. In that case, we set an
206// overflow flag. When the overflow flag is set, we continue marking objects
207// reachable from the objects on the marking stack, but no longer push them on
208// the marking stack. Instead, we mark them as both marked and overflowed.
209// When the stack is in the overflowed state, objects marked as overflowed
210// have been reached and marked but their children have not been visited yet.
211// After emptying the marking stack, we clear the overflow flag and traverse
212// the heap looking for objects marked as overflowed, push them on the stack,
213// and continue with marking. This process repeats until all reachable
214// objects have been marked.
215
216static MarkingStack marking_stack;
217
218
219static inline HeapObject* ShortCircuitConsString(Object** p) {
220 // Optimization: If the heap object pointed to by p is a non-symbol
221 // cons string whose right substring is Heap::empty_string, update
222 // it in place to its left substring. Return the updated value.
223 //
224 // Here we assume that if we change *p, we replace it with a heap object
225 // (ie, the left substring of a cons string is always a heap object).
226 //
227 // The check performed is:
228 // object->IsConsString() && !object->IsSymbol() &&
229 // (ConsString::cast(object)->second() == Heap::empty_string())
230 // except the maps for the object and its possible substrings might be
231 // marked.
232 HeapObject* object = HeapObject::cast(*p);
233 MapWord map_word = object->map_word();
234 map_word.ClearMark();
235 InstanceType type = map_word.ToMap()->instance_type();
236 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
237
238 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
239 if (second != Heap::raw_unchecked_empty_string()) {
240 return object;
241 }
242
243 // Since we don't have the object's start, it is impossible to update the
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100244 // page dirty marks. Therefore, we only replace the string with its left
245 // substring when page dirty marks do not change.
Steve Blocka7e24c12009-10-30 11:49:00 +0000246 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
247 if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object;
248
249 *p = first;
250 return HeapObject::cast(first);
251}
252
253
Iain Merrick75681382010-08-19 15:07:18 +0100254class StaticMarkingVisitor : public StaticVisitorBase {
Steve Blocka7e24c12009-10-30 11:49:00 +0000255 public:
Iain Merrick75681382010-08-19 15:07:18 +0100256 static inline void IterateBody(Map* map, HeapObject* obj) {
257 table_.GetVisitor(map)(map, obj);
258 }
259
260 static void EnableCodeFlushing(bool enabled) {
261 if (enabled) {
Steve Block791712a2010-08-27 10:21:07 +0100262 table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode);
Iain Merrick75681382010-08-19 15:07:18 +0100263 } else {
Steve Block791712a2010-08-27 10:21:07 +0100264 table_.Register(kVisitJSFunction, &VisitJSFunction);
Iain Merrick75681382010-08-19 15:07:18 +0100265 }
266 }
267
268 static void Initialize() {
269 table_.Register(kVisitShortcutCandidate,
270 &FixedBodyVisitor<StaticMarkingVisitor,
271 ConsString::BodyDescriptor,
272 void>::Visit);
273
274 table_.Register(kVisitConsString,
275 &FixedBodyVisitor<StaticMarkingVisitor,
276 ConsString::BodyDescriptor,
277 void>::Visit);
278
279
280 table_.Register(kVisitFixedArray,
281 &FlexibleBodyVisitor<StaticMarkingVisitor,
282 FixedArray::BodyDescriptor,
283 void>::Visit);
284
285 table_.Register(kVisitSharedFunctionInfo,
286 &FixedBodyVisitor<StaticMarkingVisitor,
287 SharedFunctionInfo::BodyDescriptor,
288 void>::Visit);
289
290 table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
291 table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
292 table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
293
294 table_.Register(kVisitOddball,
295 &FixedBodyVisitor<StaticMarkingVisitor,
296 Oddball::BodyDescriptor,
297 void>::Visit);
298 table_.Register(kVisitMap,
299 &FixedBodyVisitor<StaticMarkingVisitor,
300 Map::BodyDescriptor,
301 void>::Visit);
302
303 table_.Register(kVisitCode, &VisitCode);
304
Steve Block791712a2010-08-27 10:21:07 +0100305 table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode);
Iain Merrick75681382010-08-19 15:07:18 +0100306
307 table_.Register(kVisitPropertyCell,
308 &FixedBodyVisitor<StaticMarkingVisitor,
309 JSGlobalPropertyCell::BodyDescriptor,
310 void>::Visit);
311
312 table_.RegisterSpecializations<DataObjectVisitor,
313 kVisitDataObject,
314 kVisitDataObjectGeneric>();
315
316 table_.RegisterSpecializations<JSObjectVisitor,
317 kVisitJSObject,
318 kVisitJSObjectGeneric>();
319
320 table_.RegisterSpecializations<StructObjectVisitor,
321 kVisitStruct,
322 kVisitStructGeneric>();
323 }
324
325 INLINE(static void VisitPointer(Object** p)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000326 MarkObjectByPointer(p);
327 }
328
Iain Merrick75681382010-08-19 15:07:18 +0100329 INLINE(static void VisitPointers(Object** start, Object** end)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000330 // Mark all objects pointed to in [start, end).
331 const int kMinRangeForMarkingRecursion = 64;
332 if (end - start >= kMinRangeForMarkingRecursion) {
333 if (VisitUnmarkedObjects(start, end)) return;
334 // We are close to a stack overflow, so just mark the objects.
335 }
336 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
337 }
338
Iain Merrick75681382010-08-19 15:07:18 +0100339 static inline void VisitCodeTarget(RelocInfo* rinfo) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000340 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
341 Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
342 if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
343 IC::Clear(rinfo->pc());
344 // Please note targets for cleared inline cached do not have to be
345 // marked since they are contained in Heap::non_monomorphic_cache().
346 } else {
347 MarkCompactCollector::MarkObject(code);
348 }
349 }
350
Iain Merrick75681382010-08-19 15:07:18 +0100351 static inline void VisitDebugTarget(RelocInfo* rinfo) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100352 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
353 rinfo->IsPatchedReturnSequence()) ||
354 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
355 rinfo->IsPatchedDebugBreakSlotSequence()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000356 HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
357 MarkCompactCollector::MarkObject(code);
Steve Blocka7e24c12009-10-30 11:49:00 +0000358 }
359
Steve Blocka7e24c12009-10-30 11:49:00 +0000360 // Mark object pointed to by p.
Iain Merrick75681382010-08-19 15:07:18 +0100361 INLINE(static void MarkObjectByPointer(Object** p)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000362 if (!(*p)->IsHeapObject()) return;
363 HeapObject* object = ShortCircuitConsString(p);
364 MarkCompactCollector::MarkObject(object);
365 }
366
Steve Blocka7e24c12009-10-30 11:49:00 +0000367 // Visit an unmarked object.
Iain Merrick75681382010-08-19 15:07:18 +0100368 static inline void VisitUnmarkedObject(HeapObject* obj) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000369#ifdef DEBUG
370 ASSERT(Heap::Contains(obj));
371 ASSERT(!obj->IsMarked());
372#endif
373 Map* map = obj->map();
374 MarkCompactCollector::SetMark(obj);
375 // Mark the map pointer and the body.
376 MarkCompactCollector::MarkObject(map);
Iain Merrick75681382010-08-19 15:07:18 +0100377 IterateBody(map, obj);
Steve Blocka7e24c12009-10-30 11:49:00 +0000378 }
379
380 // Visit all unmarked objects pointed to by [start, end).
381 // Returns false if the operation fails (lack of stack space).
Iain Merrick75681382010-08-19 15:07:18 +0100382 static inline bool VisitUnmarkedObjects(Object** start, Object** end) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000383 // Return false is we are close to the stack limit.
384 StackLimitCheck check;
385 if (check.HasOverflowed()) return false;
386
387 // Visit the unmarked objects.
388 for (Object** p = start; p < end; p++) {
389 if (!(*p)->IsHeapObject()) continue;
390 HeapObject* obj = HeapObject::cast(*p);
391 if (obj->IsMarked()) continue;
392 VisitUnmarkedObject(obj);
393 }
394 return true;
395 }
Iain Merrick75681382010-08-19 15:07:18 +0100396
397 static inline void VisitExternalReference(Address* p) { }
398 static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
399
400 private:
401 class DataObjectVisitor {
402 public:
403 template<int size>
404 static void VisitSpecialized(Map* map, HeapObject* object) {
405 }
406
407 static void Visit(Map* map, HeapObject* object) {
408 }
409 };
410
411 typedef FlexibleBodyVisitor<StaticMarkingVisitor,
412 JSObject::BodyDescriptor,
413 void> JSObjectVisitor;
414
415 typedef FlexibleBodyVisitor<StaticMarkingVisitor,
416 StructBodyDescriptor,
417 void> StructObjectVisitor;
418
419 static void VisitCode(Map* map, HeapObject* object) {
420 reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>();
421 }
422
423 // Code flushing support.
424
425 // How many collections newly compiled code object will survive before being
426 // flushed.
427 static const int kCodeAgeThreshold = 5;
428
429 inline static bool HasSourceCode(SharedFunctionInfo* info) {
430 Object* undefined = Heap::raw_unchecked_undefined_value();
431 return (info->script() != undefined) &&
432 (reinterpret_cast<Script*>(info->script())->source() != undefined);
433 }
434
435
436 inline static bool IsCompiled(JSFunction* function) {
437 return
438 function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile);
439 }
440
441
442 inline static bool IsCompiled(SharedFunctionInfo* function) {
443 return
444 function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile);
445 }
446
447
448 static void FlushCodeForFunction(JSFunction* function) {
449 SharedFunctionInfo* shared_info = function->unchecked_shared();
450
451 if (shared_info->IsMarked()) return;
452
453 // Special handling if the function and shared info objects
454 // have different code objects.
455 if (function->unchecked_code() != shared_info->unchecked_code()) {
456 // If the shared function has been flushed but the function has not,
457 // we flush the function if possible.
458 if (!IsCompiled(shared_info) &&
459 IsCompiled(function) &&
460 !function->unchecked_code()->IsMarked()) {
461 function->set_code(shared_info->unchecked_code());
462 }
463 return;
464 }
465
466 // Code is either on stack or in compilation cache.
467 if (shared_info->unchecked_code()->IsMarked()) {
468 shared_info->set_code_age(0);
469 return;
470 }
471
472 // The function must be compiled and have the source code available,
473 // to be able to recompile it in case we need the function again.
474 if (!(shared_info->is_compiled() && HasSourceCode(shared_info))) return;
475
476 // We never flush code for Api functions.
477 Object* function_data = shared_info->function_data();
478 if (function_data->IsHeapObject() &&
479 (SafeMap(function_data)->instance_type() ==
480 FUNCTION_TEMPLATE_INFO_TYPE)) {
481 return;
482 }
483
484 // Only flush code for functions.
485 if (shared_info->code()->kind() != Code::FUNCTION) return;
486
487 // Function must be lazy compilable.
488 if (!shared_info->allows_lazy_compilation()) return;
489
490 // If this is a full script wrapped in a function we do no flush the code.
491 if (shared_info->is_toplevel()) return;
492
493 // Age this shared function info.
494 if (shared_info->code_age() < kCodeAgeThreshold) {
495 shared_info->set_code_age(shared_info->code_age() + 1);
496 return;
497 }
498
499 // Compute the lazy compilable version of the code.
500 Code* code = Builtins::builtin(Builtins::LazyCompile);
501 shared_info->set_code(code);
502 function->set_code(code);
503 }
504
505
506 static inline Map* SafeMap(Object* obj) {
507 MapWord map_word = HeapObject::cast(obj)->map_word();
508 map_word.ClearMark();
509 map_word.ClearOverflow();
510 return map_word.ToMap();
511 }
512
513
514 static inline bool IsJSBuiltinsObject(Object* obj) {
515 return obj->IsHeapObject() &&
516 (SafeMap(obj)->instance_type() == JS_BUILTINS_OBJECT_TYPE);
517 }
518
519
520 static inline bool IsValidNotBuiltinContext(Object* ctx) {
521 if (!ctx->IsHeapObject()) return false;
522
523 Map* map = SafeMap(ctx);
524 if (!(map == Heap::raw_unchecked_context_map() ||
525 map == Heap::raw_unchecked_catch_context_map() ||
526 map == Heap::raw_unchecked_global_context_map())) {
527 return false;
528 }
529
530 Context* context = reinterpret_cast<Context*>(ctx);
531
532 if (IsJSBuiltinsObject(context->global())) {
533 return false;
534 }
535
536 return true;
537 }
538
539
Steve Block791712a2010-08-27 10:21:07 +0100540 static void VisitCodeEntry(Address entry_address) {
541 Object* code = Code::GetObjectFromEntryAddress(entry_address);
542 Object* old_code = code;
543 VisitPointer(&code);
544 if (code != old_code) {
545 Memory::Address_at(entry_address) =
546 reinterpret_cast<Code*>(code)->entry();
547 }
548 }
Iain Merrick75681382010-08-19 15:07:18 +0100549
Steve Block791712a2010-08-27 10:21:07 +0100550
551 static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
552 JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
Iain Merrick75681382010-08-19 15:07:18 +0100553 // The function must have a valid context and not be a builtin.
554 if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
555 FlushCodeForFunction(jsfunction);
556 }
Steve Block791712a2010-08-27 10:21:07 +0100557 VisitJSFunction(map, object);
Iain Merrick75681382010-08-19 15:07:18 +0100558 }
559
Steve Block791712a2010-08-27 10:21:07 +0100560
561 static void VisitJSFunction(Map* map, HeapObject* object) {
562#define SLOT_ADDR(obj, offset) \
563 reinterpret_cast<Object**>((obj)->address() + offset)
564
565 VisitPointers(SLOT_ADDR(object, JSFunction::kPropertiesOffset),
566 SLOT_ADDR(object, JSFunction::kCodeEntryOffset));
567
568 VisitCodeEntry(object->address() + JSFunction::kCodeEntryOffset);
569
570 VisitPointers(SLOT_ADDR(object,
571 JSFunction::kCodeEntryOffset + kPointerSize),
572 SLOT_ADDR(object, JSFunction::kSize));
573#undef SLOT_ADDR
574 }
575
576
Iain Merrick75681382010-08-19 15:07:18 +0100577 typedef void (*Callback)(Map* map, HeapObject* object);
578
579 static VisitorDispatchTable<Callback> table_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000580};
581
582
Iain Merrick75681382010-08-19 15:07:18 +0100583VisitorDispatchTable<StaticMarkingVisitor::Callback>
584 StaticMarkingVisitor::table_;
585
586
587class MarkingVisitor : public ObjectVisitor {
588 public:
589 void VisitPointer(Object** p) {
590 StaticMarkingVisitor::VisitPointer(p);
591 }
592
593 void VisitPointers(Object** start, Object** end) {
594 StaticMarkingVisitor::VisitPointers(start, end);
595 }
596
597 void VisitCodeTarget(RelocInfo* rinfo) {
598 StaticMarkingVisitor::VisitCodeTarget(rinfo);
599 }
600
601 void VisitDebugTarget(RelocInfo* rinfo) {
602 StaticMarkingVisitor::VisitDebugTarget(rinfo);
603 }
604};
605
606
607class CodeMarkingVisitor : public ThreadVisitor {
608 public:
609 void VisitThread(ThreadLocalTop* top) {
610 for (StackFrameIterator it(top); !it.done(); it.Advance()) {
611 MarkCompactCollector::MarkObject(it.frame()->unchecked_code());
612 }
613 }
614};
615
616
617class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
618 public:
619 void VisitPointers(Object** start, Object** end) {
620 for (Object** p = start; p < end; p++) VisitPointer(p);
621 }
622
623 void VisitPointer(Object** slot) {
624 Object* obj = *slot;
625 if (obj->IsHeapObject()) {
626 MarkCompactCollector::MarkObject(HeapObject::cast(obj));
627 }
628 }
629};
630
631
632void MarkCompactCollector::PrepareForCodeFlushing() {
633 if (!FLAG_flush_code) {
634 StaticMarkingVisitor::EnableCodeFlushing(false);
635 return;
636 }
637
638#ifdef ENABLE_DEBUGGER_SUPPORT
639 if (Debug::IsLoaded() || Debug::has_break_points()) {
640 StaticMarkingVisitor::EnableCodeFlushing(false);
641 return;
642 }
643#endif
644 StaticMarkingVisitor::EnableCodeFlushing(true);
645
Iain Merrick9ac36c92010-09-13 15:29:50 +0100646 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
647 // relies on it being marked before any other descriptor array.
648 MarkObject(Heap::raw_unchecked_empty_descriptor_array());
649
Iain Merrick75681382010-08-19 15:07:18 +0100650 // Make sure we are not referencing the code from the stack.
651 for (StackFrameIterator it; !it.done(); it.Advance()) {
Iain Merrick9ac36c92010-09-13 15:29:50 +0100652 MarkObject(it.frame()->unchecked_code());
Iain Merrick75681382010-08-19 15:07:18 +0100653 }
654
655 // Iterate the archived stacks in all threads to check if
656 // the code is referenced.
657 CodeMarkingVisitor code_marking_visitor;
658 ThreadManager::IterateArchivedThreads(&code_marking_visitor);
659
660 SharedFunctionInfoMarkingVisitor visitor;
661 CompilationCache::IterateFunctions(&visitor);
662
Iain Merrick9ac36c92010-09-13 15:29:50 +0100663 ProcessMarkingStack();
Iain Merrick75681382010-08-19 15:07:18 +0100664}
665
666
Steve Blocka7e24c12009-10-30 11:49:00 +0000667// Visitor class for marking heap roots.
668class RootMarkingVisitor : public ObjectVisitor {
669 public:
670 void VisitPointer(Object** p) {
671 MarkObjectByPointer(p);
672 }
673
674 void VisitPointers(Object** start, Object** end) {
675 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
676 }
677
Steve Blocka7e24c12009-10-30 11:49:00 +0000678 private:
Steve Blocka7e24c12009-10-30 11:49:00 +0000679 void MarkObjectByPointer(Object** p) {
680 if (!(*p)->IsHeapObject()) return;
681
682 // Replace flat cons strings in place.
683 HeapObject* object = ShortCircuitConsString(p);
684 if (object->IsMarked()) return;
685
686 Map* map = object->map();
687 // Mark the object.
688 MarkCompactCollector::SetMark(object);
Iain Merrick75681382010-08-19 15:07:18 +0100689
Steve Blocka7e24c12009-10-30 11:49:00 +0000690 // Mark the map pointer and body, and push them on the marking stack.
691 MarkCompactCollector::MarkObject(map);
Iain Merrick75681382010-08-19 15:07:18 +0100692 StaticMarkingVisitor::IterateBody(map, object);
Steve Blocka7e24c12009-10-30 11:49:00 +0000693
694 // Mark all the objects reachable from the map and body. May leave
695 // overflowed objects in the heap.
Iain Merrick75681382010-08-19 15:07:18 +0100696 MarkCompactCollector::EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +0000697 }
698};
699
700
701// Helper class for pruning the symbol table.
702class SymbolTableCleaner : public ObjectVisitor {
703 public:
704 SymbolTableCleaner() : pointers_removed_(0) { }
Leon Clarkee46be812010-01-19 14:06:41 +0000705
706 virtual void VisitPointers(Object** start, Object** end) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000707 // Visit all HeapObject pointers in [start, end).
708 for (Object** p = start; p < end; p++) {
709 if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
710 // Check if the symbol being pruned is an external symbol. We need to
711 // delete the associated external data as this symbol is going away.
712
Steve Blocka7e24c12009-10-30 11:49:00 +0000713 // Since no objects have yet been moved we can safely access the map of
714 // the object.
Leon Clarkee46be812010-01-19 14:06:41 +0000715 if ((*p)->IsExternalString()) {
716 Heap::FinalizeExternalString(String::cast(*p));
Steve Blocka7e24c12009-10-30 11:49:00 +0000717 }
718 // Set the entry to null_value (as deleted).
719 *p = Heap::raw_unchecked_null_value();
720 pointers_removed_++;
721 }
722 }
723 }
724
725 int PointersRemoved() {
726 return pointers_removed_;
727 }
728 private:
729 int pointers_removed_;
730};
731
732
733void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
734 ASSERT(!object->IsMarked());
735 ASSERT(Heap::Contains(object));
736 if (object->IsMap()) {
737 Map* map = Map::cast(object);
738 if (FLAG_cleanup_caches_in_maps_at_gc) {
739 map->ClearCodeCache();
740 }
741 SetMark(map);
742 if (FLAG_collect_maps &&
743 map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
744 map->instance_type() <= JS_FUNCTION_TYPE) {
745 MarkMapContents(map);
746 } else {
747 marking_stack.Push(map);
748 }
749 } else {
750 SetMark(object);
751 marking_stack.Push(object);
752 }
753}
754
755
756void MarkCompactCollector::MarkMapContents(Map* map) {
757 MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(
758 *HeapObject::RawField(map, Map::kInstanceDescriptorsOffset)));
759
760 // Mark the Object* fields of the Map.
761 // Since the descriptor array has been marked already, it is fine
762 // that one of these fields contains a pointer to it.
Iain Merrick75681382010-08-19 15:07:18 +0100763 Object** start_slot = HeapObject::RawField(map,
764 Map::kPointerFieldsBeginOffset);
765
766 Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
767
768 StaticMarkingVisitor::VisitPointers(start_slot, end_slot);
Steve Blocka7e24c12009-10-30 11:49:00 +0000769}
770
771
772void MarkCompactCollector::MarkDescriptorArray(
773 DescriptorArray* descriptors) {
774 if (descriptors->IsMarked()) return;
775 // Empty descriptor array is marked as a root before any maps are marked.
776 ASSERT(descriptors != Heap::raw_unchecked_empty_descriptor_array());
777 SetMark(descriptors);
778
779 FixedArray* contents = reinterpret_cast<FixedArray*>(
780 descriptors->get(DescriptorArray::kContentArrayIndex));
781 ASSERT(contents->IsHeapObject());
782 ASSERT(!contents->IsMarked());
783 ASSERT(contents->IsFixedArray());
784 ASSERT(contents->length() >= 2);
785 SetMark(contents);
Iain Merrick75681382010-08-19 15:07:18 +0100786 // Contents contains (value, details) pairs. If the details say that
787 // the type of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION, or
788 // NULL_DESCRIPTOR, we don't mark the value as live. Only for
789 // MAP_TRANSITION and CONSTANT_TRANSITION is the value an Object* (a
790 // Map*).
Steve Blocka7e24c12009-10-30 11:49:00 +0000791 for (int i = 0; i < contents->length(); i += 2) {
792 // If the pair (value, details) at index i, i+1 is not
793 // a transition or null descriptor, mark the value.
794 PropertyDetails details(Smi::cast(contents->get(i + 1)));
795 if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
796 HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
797 if (object->IsHeapObject() && !object->IsMarked()) {
798 SetMark(object);
799 marking_stack.Push(object);
800 }
801 }
802 }
803 // The DescriptorArray descriptors contains a pointer to its contents array,
804 // but the contents array is already marked.
805 marking_stack.Push(descriptors);
806}
807
808
809void MarkCompactCollector::CreateBackPointers() {
810 HeapObjectIterator iterator(Heap::map_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000811 for (HeapObject* next_object = iterator.next();
812 next_object != NULL; next_object = iterator.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000813 if (next_object->IsMap()) { // Could also be ByteArray on free list.
814 Map* map = Map::cast(next_object);
815 if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
816 map->instance_type() <= JS_FUNCTION_TYPE) {
817 map->CreateBackPointers();
818 } else {
819 ASSERT(map->instance_descriptors() == Heap::empty_descriptor_array());
820 }
821 }
822 }
823}
824
825
826static int OverflowObjectSize(HeapObject* obj) {
827 // Recover the normal map pointer, it might be marked as live and
828 // overflowed.
829 MapWord map_word = obj->map_word();
830 map_word.ClearMark();
831 map_word.ClearOverflow();
832 return obj->SizeFromMap(map_word.ToMap());
833}
834
835
836// Fill the marking stack with overflowed objects returned by the given
837// iterator. Stop when the marking stack is filled or the end of the space
838// is reached, whichever comes first.
839template<class T>
840static void ScanOverflowedObjects(T* it) {
841 // The caller should ensure that the marking stack is initially not full,
842 // so that we don't waste effort pointlessly scanning for objects.
843 ASSERT(!marking_stack.is_full());
844
Leon Clarked91b9f72010-01-27 17:25:45 +0000845 for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000846 if (object->IsOverflowed()) {
847 object->ClearOverflow();
848 ASSERT(object->IsMarked());
849 ASSERT(Heap::Contains(object));
850 marking_stack.Push(object);
851 if (marking_stack.is_full()) return;
852 }
853 }
854}
855
856
857bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
858 return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked();
859}
860
861
Steve Blocka7e24c12009-10-30 11:49:00 +0000862void MarkCompactCollector::MarkSymbolTable() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000863 SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
864 // Mark the symbol table itself.
865 SetMark(symbol_table);
866 // Explicitly mark the prefix.
867 MarkingVisitor marker;
868 symbol_table->IteratePrefix(&marker);
Iain Merrick75681382010-08-19 15:07:18 +0100869 ProcessMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +0000870}
871
872
873void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
874 // Mark the heap roots including global variables, stack variables,
875 // etc., and all objects reachable from them.
Steve Blockd0582a62009-12-15 09:54:21 +0000876 Heap::IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +0000877
878 // Handle the symbol table specially.
879 MarkSymbolTable();
880
881 // There may be overflowed objects in the heap. Visit them now.
882 while (marking_stack.overflowed()) {
883 RefillMarkingStack();
Iain Merrick75681382010-08-19 15:07:18 +0100884 EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +0000885 }
886}
887
888
889void MarkCompactCollector::MarkObjectGroups() {
890 List<ObjectGroup*>* object_groups = GlobalHandles::ObjectGroups();
891
892 for (int i = 0; i < object_groups->length(); i++) {
893 ObjectGroup* entry = object_groups->at(i);
894 if (entry == NULL) continue;
895
896 List<Object**>& objects = entry->objects_;
897 bool group_marked = false;
898 for (int j = 0; j < objects.length(); j++) {
899 Object* object = *objects[j];
900 if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
901 group_marked = true;
902 break;
903 }
904 }
905
906 if (!group_marked) continue;
907
908 // An object in the group is marked, so mark as gray all white heap
909 // objects in the group.
910 for (int j = 0; j < objects.length(); ++j) {
911 if ((*objects[j])->IsHeapObject()) {
912 MarkObject(HeapObject::cast(*objects[j]));
913 }
914 }
915 // Once the entire group has been colored gray, set the object group
916 // to NULL so it won't be processed again.
917 delete object_groups->at(i);
918 object_groups->at(i) = NULL;
919 }
920}
921
922
923// Mark all objects reachable from the objects on the marking stack.
924// Before: the marking stack contains zero or more heap object pointers.
925// After: the marking stack is empty, and all objects reachable from the
926// marking stack have been marked, or are overflowed in the heap.
Iain Merrick75681382010-08-19 15:07:18 +0100927void MarkCompactCollector::EmptyMarkingStack() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000928 while (!marking_stack.is_empty()) {
929 HeapObject* object = marking_stack.Pop();
930 ASSERT(object->IsHeapObject());
931 ASSERT(Heap::Contains(object));
932 ASSERT(object->IsMarked());
933 ASSERT(!object->IsOverflowed());
934
935 // Because the object is marked, we have to recover the original map
936 // pointer and use it to mark the object's body.
937 MapWord map_word = object->map_word();
938 map_word.ClearMark();
939 Map* map = map_word.ToMap();
940 MarkObject(map);
Iain Merrick75681382010-08-19 15:07:18 +0100941
942 StaticMarkingVisitor::IterateBody(map, object);
Steve Blocka7e24c12009-10-30 11:49:00 +0000943 }
944}
945
946
947// Sweep the heap for overflowed objects, clear their overflow bits, and
948// push them on the marking stack. Stop early if the marking stack fills
949// before sweeping completes. If sweeping completes, there are no remaining
950// overflowed objects in the heap so the overflow flag on the markings stack
951// is cleared.
952void MarkCompactCollector::RefillMarkingStack() {
953 ASSERT(marking_stack.overflowed());
954
955 SemiSpaceIterator new_it(Heap::new_space(), &OverflowObjectSize);
956 ScanOverflowedObjects(&new_it);
957 if (marking_stack.is_full()) return;
958
959 HeapObjectIterator old_pointer_it(Heap::old_pointer_space(),
960 &OverflowObjectSize);
961 ScanOverflowedObjects(&old_pointer_it);
962 if (marking_stack.is_full()) return;
963
964 HeapObjectIterator old_data_it(Heap::old_data_space(), &OverflowObjectSize);
965 ScanOverflowedObjects(&old_data_it);
966 if (marking_stack.is_full()) return;
967
968 HeapObjectIterator code_it(Heap::code_space(), &OverflowObjectSize);
969 ScanOverflowedObjects(&code_it);
970 if (marking_stack.is_full()) return;
971
972 HeapObjectIterator map_it(Heap::map_space(), &OverflowObjectSize);
973 ScanOverflowedObjects(&map_it);
974 if (marking_stack.is_full()) return;
975
976 HeapObjectIterator cell_it(Heap::cell_space(), &OverflowObjectSize);
977 ScanOverflowedObjects(&cell_it);
978 if (marking_stack.is_full()) return;
979
980 LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize);
981 ScanOverflowedObjects(&lo_it);
982 if (marking_stack.is_full()) return;
983
984 marking_stack.clear_overflowed();
985}
986
987
988// Mark all objects reachable (transitively) from objects on the marking
989// stack. Before: the marking stack contains zero or more heap object
990// pointers. After: the marking stack is empty and there are no overflowed
991// objects in the heap.
Iain Merrick75681382010-08-19 15:07:18 +0100992void MarkCompactCollector::ProcessMarkingStack() {
993 EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +0000994 while (marking_stack.overflowed()) {
995 RefillMarkingStack();
Iain Merrick75681382010-08-19 15:07:18 +0100996 EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +0000997 }
998}
999
1000
Iain Merrick75681382010-08-19 15:07:18 +01001001void MarkCompactCollector::ProcessObjectGroups() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001002 bool work_to_do = true;
1003 ASSERT(marking_stack.is_empty());
1004 while (work_to_do) {
1005 MarkObjectGroups();
1006 work_to_do = !marking_stack.is_empty();
Iain Merrick75681382010-08-19 15:07:18 +01001007 ProcessMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00001008 }
1009}
1010
1011
1012void MarkCompactCollector::MarkLiveObjects() {
Leon Clarkef7060e22010-06-03 12:02:55 +01001013 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
Steve Blocka7e24c12009-10-30 11:49:00 +00001014#ifdef DEBUG
1015 ASSERT(state_ == PREPARE_GC);
1016 state_ = MARK_LIVE_OBJECTS;
1017#endif
1018 // The to space contains live objects, the from space is used as a marking
1019 // stack.
1020 marking_stack.Initialize(Heap::new_space()->FromSpaceLow(),
1021 Heap::new_space()->FromSpaceHigh());
1022
1023 ASSERT(!marking_stack.overflowed());
1024
Iain Merrick75681382010-08-19 15:07:18 +01001025 PrepareForCodeFlushing();
1026
Steve Blocka7e24c12009-10-30 11:49:00 +00001027 RootMarkingVisitor root_visitor;
1028 MarkRoots(&root_visitor);
1029
1030 // The objects reachable from the roots are marked, yet unreachable
1031 // objects are unmarked. Mark objects reachable from object groups
1032 // containing at least one marked object, and continue until no new
1033 // objects are reachable from the object groups.
Iain Merrick75681382010-08-19 15:07:18 +01001034 ProcessObjectGroups();
Steve Blocka7e24c12009-10-30 11:49:00 +00001035
1036 // The objects reachable from the roots or object groups are marked,
1037 // yet unreachable objects are unmarked. Mark objects reachable
1038 // only from weak global handles.
1039 //
1040 // First we identify nonlive weak handles and mark them as pending
1041 // destruction.
1042 GlobalHandles::IdentifyWeakHandles(&IsUnmarkedHeapObject);
1043 // Then we mark the objects and process the transitive closure.
1044 GlobalHandles::IterateWeakRoots(&root_visitor);
1045 while (marking_stack.overflowed()) {
1046 RefillMarkingStack();
Iain Merrick75681382010-08-19 15:07:18 +01001047 EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00001048 }
1049
1050 // Repeat the object groups to mark unmarked groups reachable from the
1051 // weak roots.
Iain Merrick75681382010-08-19 15:07:18 +01001052 ProcessObjectGroups();
Steve Blocka7e24c12009-10-30 11:49:00 +00001053
1054 // Prune the symbol table removing all symbols only pointed to by the
1055 // symbol table. Cannot use symbol_table() here because the symbol
1056 // table is marked.
1057 SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
1058 SymbolTableCleaner v;
1059 symbol_table->IterateElements(&v);
1060 symbol_table->ElementsRemoved(v.PointersRemoved());
Leon Clarkee46be812010-01-19 14:06:41 +00001061 ExternalStringTable::Iterate(&v);
1062 ExternalStringTable::CleanUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00001063
1064 // Remove object groups after marking phase.
1065 GlobalHandles::RemoveObjectGroups();
1066}
1067
1068
1069static int CountMarkedCallback(HeapObject* obj) {
1070 MapWord map_word = obj->map_word();
1071 map_word.ClearMark();
1072 return obj->SizeFromMap(map_word.ToMap());
1073}
1074
1075
1076#ifdef DEBUG
1077void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
1078 live_bytes_ += obj->Size();
1079 if (Heap::new_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001080 live_young_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001081 } else if (Heap::map_space()->Contains(obj)) {
1082 ASSERT(obj->IsMap());
Steve Block6ded16b2010-05-10 14:33:55 +01001083 live_map_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001084 } else if (Heap::cell_space()->Contains(obj)) {
1085 ASSERT(obj->IsJSGlobalPropertyCell());
Steve Block6ded16b2010-05-10 14:33:55 +01001086 live_cell_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001087 } else if (Heap::old_pointer_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001088 live_old_pointer_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001089 } else if (Heap::old_data_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001090 live_old_data_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001091 } else if (Heap::code_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001092 live_code_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001093 } else if (Heap::lo_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001094 live_lo_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001095 } else {
1096 UNREACHABLE();
1097 }
1098}
1099#endif // DEBUG
1100
1101
1102void MarkCompactCollector::SweepLargeObjectSpace() {
1103#ifdef DEBUG
1104 ASSERT(state_ == MARK_LIVE_OBJECTS);
1105 state_ =
1106 compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
1107#endif
1108 // Deallocate unmarked objects and clear marked bits for marked objects.
1109 Heap::lo_space()->FreeUnmarkedObjects();
1110}
1111
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001112
Steve Blocka7e24c12009-10-30 11:49:00 +00001113// Safe to use during marking phase only.
1114bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
1115 MapWord metamap = object->map_word();
1116 metamap.ClearMark();
1117 return metamap.ToMap()->instance_type() == MAP_TYPE;
1118}
1119
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001120
Steve Blocka7e24c12009-10-30 11:49:00 +00001121void MarkCompactCollector::ClearNonLiveTransitions() {
1122 HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback);
1123 // Iterate over the map space, setting map transitions that go from
1124 // a marked map to an unmarked map to null transitions. At the same time,
1125 // set all the prototype fields of maps back to their original value,
1126 // dropping the back pointers temporarily stored in the prototype field.
1127 // Setting the prototype field requires following the linked list of
1128 // back pointers, reversing them all at once. This allows us to find
1129 // those maps with map transitions that need to be nulled, and only
1130 // scan the descriptor arrays of those maps, not all maps.
Leon Clarkee46be812010-01-19 14:06:41 +00001131 // All of these actions are carried out only on maps of JSObjects
Steve Blocka7e24c12009-10-30 11:49:00 +00001132 // and related subtypes.
Leon Clarked91b9f72010-01-27 17:25:45 +00001133 for (HeapObject* obj = map_iterator.next();
1134 obj != NULL; obj = map_iterator.next()) {
1135 Map* map = reinterpret_cast<Map*>(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00001136 if (!map->IsMarked() && map->IsByteArray()) continue;
1137
1138 ASSERT(SafeIsMap(map));
1139 // Only JSObject and subtypes have map transitions and back pointers.
1140 if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
1141 if (map->instance_type() > JS_FUNCTION_TYPE) continue;
1142 // Follow the chain of back pointers to find the prototype.
1143 Map* current = map;
1144 while (SafeIsMap(current)) {
1145 current = reinterpret_cast<Map*>(current->prototype());
1146 ASSERT(current->IsHeapObject());
1147 }
1148 Object* real_prototype = current;
1149
1150 // Follow back pointers, setting them to prototype,
1151 // clearing map transitions when necessary.
1152 current = map;
1153 bool on_dead_path = !current->IsMarked();
1154 Object* next;
1155 while (SafeIsMap(current)) {
1156 next = current->prototype();
1157 // There should never be a dead map above a live map.
1158 ASSERT(on_dead_path || current->IsMarked());
1159
1160 // A live map above a dead map indicates a dead transition.
1161 // This test will always be false on the first iteration.
1162 if (on_dead_path && current->IsMarked()) {
1163 on_dead_path = false;
1164 current->ClearNonLiveTransitions(real_prototype);
1165 }
1166 *HeapObject::RawField(current, Map::kPrototypeOffset) =
1167 real_prototype;
1168 current = reinterpret_cast<Map*>(next);
1169 }
1170 }
1171}
1172
1173// -------------------------------------------------------------------------
1174// Phase 2: Encode forwarding addresses.
1175// When compacting, forwarding addresses for objects in old space and map
1176// space are encoded in their map pointer word (along with an encoding of
1177// their map pointers).
1178//
Leon Clarkee46be812010-01-19 14:06:41 +00001179// The excact encoding is described in the comments for class MapWord in
1180// objects.h.
Steve Blocka7e24c12009-10-30 11:49:00 +00001181//
1182// An address range [start, end) can have both live and non-live objects.
1183// Maximal non-live regions are marked so they can be skipped on subsequent
1184// sweeps of the heap. A distinguished map-pointer encoding is used to mark
1185// free regions of one-word size (in which case the next word is the start
1186// of a live object). A second distinguished map-pointer encoding is used
1187// to mark free regions larger than one word, and the size of the free
1188// region (including the first word) is written to the second word of the
1189// region.
1190//
1191// Any valid map page offset must lie in the object area of the page, so map
1192// page offsets less than Page::kObjectStartOffset are invalid. We use a
1193// pair of distinguished invalid map encodings (for single word and multiple
1194// words) to indicate free regions in the page found during computation of
1195// forwarding addresses and skipped over in subsequent sweeps.
Steve Blocka7e24c12009-10-30 11:49:00 +00001196
1197
1198// Encode a free region, defined by the given start address and size, in the
1199// first word or two of the region.
1200void EncodeFreeRegion(Address free_start, int free_size) {
1201 ASSERT(free_size >= kIntSize);
1202 if (free_size == kIntSize) {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001203 Memory::uint32_at(free_start) = MarkCompactCollector::kSingleFreeEncoding;
Steve Blocka7e24c12009-10-30 11:49:00 +00001204 } else {
1205 ASSERT(free_size >= 2 * kIntSize);
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001206 Memory::uint32_at(free_start) = MarkCompactCollector::kMultiFreeEncoding;
Steve Blocka7e24c12009-10-30 11:49:00 +00001207 Memory::int_at(free_start + kIntSize) = free_size;
1208 }
1209
1210#ifdef DEBUG
1211 // Zap the body of the free region.
1212 if (FLAG_enable_slow_asserts) {
1213 for (int offset = 2 * kIntSize;
1214 offset < free_size;
1215 offset += kPointerSize) {
1216 Memory::Address_at(free_start + offset) = kZapValue;
1217 }
1218 }
1219#endif
1220}
1221
1222
1223// Try to promote all objects in new space. Heap numbers and sequential
1224// strings are promoted to the code space, large objects to large object space,
1225// and all others to the old space.
1226inline Object* MCAllocateFromNewSpace(HeapObject* object, int object_size) {
1227 Object* forwarded;
1228 if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
1229 forwarded = Failure::Exception();
1230 } else {
1231 OldSpace* target_space = Heap::TargetSpace(object);
1232 ASSERT(target_space == Heap::old_pointer_space() ||
1233 target_space == Heap::old_data_space());
1234 forwarded = target_space->MCAllocateRaw(object_size);
1235 }
1236 if (forwarded->IsFailure()) {
1237 forwarded = Heap::new_space()->MCAllocateRaw(object_size);
1238 }
1239 return forwarded;
1240}
1241
1242
1243// Allocation functions for the paged spaces call the space's MCAllocateRaw.
1244inline Object* MCAllocateFromOldPointerSpace(HeapObject* ignore,
1245 int object_size) {
1246 return Heap::old_pointer_space()->MCAllocateRaw(object_size);
1247}
1248
1249
1250inline Object* MCAllocateFromOldDataSpace(HeapObject* ignore, int object_size) {
1251 return Heap::old_data_space()->MCAllocateRaw(object_size);
1252}
1253
1254
1255inline Object* MCAllocateFromCodeSpace(HeapObject* ignore, int object_size) {
1256 return Heap::code_space()->MCAllocateRaw(object_size);
1257}
1258
1259
1260inline Object* MCAllocateFromMapSpace(HeapObject* ignore, int object_size) {
1261 return Heap::map_space()->MCAllocateRaw(object_size);
1262}
1263
1264
1265inline Object* MCAllocateFromCellSpace(HeapObject* ignore, int object_size) {
1266 return Heap::cell_space()->MCAllocateRaw(object_size);
1267}
1268
1269
1270// The forwarding address is encoded at the same offset as the current
1271// to-space object, but in from space.
1272inline void EncodeForwardingAddressInNewSpace(HeapObject* old_object,
1273 int object_size,
1274 Object* new_object,
1275 int* ignored) {
1276 int offset =
1277 Heap::new_space()->ToSpaceOffsetForAddress(old_object->address());
1278 Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset) =
1279 HeapObject::cast(new_object)->address();
1280}
1281
1282
1283// The forwarding address is encoded in the map pointer of the object as an
1284// offset (in terms of live bytes) from the address of the first live object
1285// in the page.
1286inline void EncodeForwardingAddressInPagedSpace(HeapObject* old_object,
1287 int object_size,
1288 Object* new_object,
1289 int* offset) {
1290 // Record the forwarding address of the first live object if necessary.
1291 if (*offset == 0) {
1292 Page::FromAddress(old_object->address())->mc_first_forwarded =
1293 HeapObject::cast(new_object)->address();
1294 }
1295
1296 MapWord encoding =
1297 MapWord::EncodeAddress(old_object->map()->address(), *offset);
1298 old_object->set_map_word(encoding);
1299 *offset += object_size;
1300 ASSERT(*offset <= Page::kObjectAreaSize);
1301}
1302
1303
1304// Most non-live objects are ignored.
1305inline void IgnoreNonLiveObject(HeapObject* object) {}
1306
1307
Steve Blocka7e24c12009-10-30 11:49:00 +00001308// Function template that, given a range of addresses (eg, a semispace or a
1309// paged space page), iterates through the objects in the range to clear
1310// mark bits and compute and encode forwarding addresses. As a side effect,
1311// maximal free chunks are marked so that they can be skipped on subsequent
1312// sweeps.
1313//
1314// The template parameters are an allocation function, a forwarding address
1315// encoding function, and a function to process non-live objects.
1316template<MarkCompactCollector::AllocationFunction Alloc,
1317 MarkCompactCollector::EncodingFunction Encode,
1318 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
1319inline void EncodeForwardingAddressesInRange(Address start,
1320 Address end,
1321 int* offset) {
1322 // The start address of the current free region while sweeping the space.
1323 // This address is set when a transition from live to non-live objects is
1324 // encountered. A value (an encoding of the 'next free region' pointer)
1325 // is written to memory at this address when a transition from non-live to
1326 // live objects is encountered.
1327 Address free_start = NULL;
1328
1329 // A flag giving the state of the previously swept object. Initially true
1330 // to ensure that free_start is initialized to a proper address before
1331 // trying to write to it.
1332 bool is_prev_alive = true;
1333
1334 int object_size; // Will be set on each iteration of the loop.
1335 for (Address current = start; current < end; current += object_size) {
1336 HeapObject* object = HeapObject::FromAddress(current);
1337 if (object->IsMarked()) {
1338 object->ClearMark();
1339 MarkCompactCollector::tracer()->decrement_marked_count();
1340 object_size = object->Size();
1341
1342 Object* forwarded = Alloc(object, object_size);
1343 // Allocation cannot fail, because we are compacting the space.
1344 ASSERT(!forwarded->IsFailure());
1345 Encode(object, object_size, forwarded, offset);
1346
1347#ifdef DEBUG
1348 if (FLAG_gc_verbose) {
1349 PrintF("forward %p -> %p.\n", object->address(),
1350 HeapObject::cast(forwarded)->address());
1351 }
1352#endif
1353 if (!is_prev_alive) { // Transition from non-live to live.
Steve Blockd0582a62009-12-15 09:54:21 +00001354 EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
Steve Blocka7e24c12009-10-30 11:49:00 +00001355 is_prev_alive = true;
1356 }
1357 } else { // Non-live object.
1358 object_size = object->Size();
1359 ProcessNonLive(object);
1360 if (is_prev_alive) { // Transition from live to non-live.
1361 free_start = current;
1362 is_prev_alive = false;
1363 }
1364 }
1365 }
1366
1367 // If we ended on a free region, mark it.
Steve Blockd0582a62009-12-15 09:54:21 +00001368 if (!is_prev_alive) {
1369 EncodeFreeRegion(free_start, static_cast<int>(end - free_start));
1370 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001371}
1372
1373
1374// Functions to encode the forwarding pointers in each compactable space.
1375void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
1376 int ignored;
1377 EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
1378 EncodeForwardingAddressInNewSpace,
1379 IgnoreNonLiveObject>(
1380 Heap::new_space()->bottom(),
1381 Heap::new_space()->top(),
1382 &ignored);
1383}
1384
1385
1386template<MarkCompactCollector::AllocationFunction Alloc,
1387 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
1388void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
1389 PagedSpace* space) {
1390 PageIterator it(space, PageIterator::PAGES_IN_USE);
1391 while (it.has_next()) {
1392 Page* p = it.next();
Steve Block6ded16b2010-05-10 14:33:55 +01001393
Steve Blocka7e24c12009-10-30 11:49:00 +00001394 // The offset of each live object in the page from the first live object
1395 // in the page.
1396 int offset = 0;
1397 EncodeForwardingAddressesInRange<Alloc,
1398 EncodeForwardingAddressInPagedSpace,
1399 ProcessNonLive>(
1400 p->ObjectAreaStart(),
1401 p->AllocationTop(),
1402 &offset);
1403 }
1404}
1405
1406
Steve Block6ded16b2010-05-10 14:33:55 +01001407// We scavange new space simultaneously with sweeping. This is done in two
1408// passes.
1409// The first pass migrates all alive objects from one semispace to another or
1410// promotes them to old space. Forwading address is written directly into
1411// first word of object without any encoding. If object is dead we are writing
1412// NULL as a forwarding address.
1413// The second pass updates pointers to new space in all spaces. It is possible
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001414// to encounter pointers to dead objects during traversal of dirty regions we
1415// should clear them to avoid encountering them during next dirty regions
1416// iteration.
1417static void MigrateObject(Address dst,
1418 Address src,
1419 int size,
1420 bool to_old_space) {
1421 if (to_old_space) {
1422 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
1423 } else {
1424 Heap::CopyBlock(dst, src, size);
1425 }
Steve Block6ded16b2010-05-10 14:33:55 +01001426
1427 Memory::Address_at(src) = dst;
1428}
1429
1430
Iain Merrick75681382010-08-19 15:07:18 +01001431class StaticPointersToNewGenUpdatingVisitor : public
1432 StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> {
1433 public:
1434 static inline void VisitPointer(Object** p) {
1435 if (!(*p)->IsHeapObject()) return;
1436
1437 HeapObject* obj = HeapObject::cast(*p);
1438 Address old_addr = obj->address();
1439
1440 if (Heap::new_space()->Contains(obj)) {
1441 ASSERT(Heap::InFromSpace(*p));
1442 *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
1443 }
1444 }
1445};
1446
1447
Steve Block6ded16b2010-05-10 14:33:55 +01001448// Visitor for updating pointers from live objects in old spaces to new space.
1449// It does not expect to encounter pointers to dead objects.
1450class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
1451 public:
1452 void VisitPointer(Object** p) {
Iain Merrick75681382010-08-19 15:07:18 +01001453 StaticPointersToNewGenUpdatingVisitor::VisitPointer(p);
Steve Block6ded16b2010-05-10 14:33:55 +01001454 }
1455
1456 void VisitPointers(Object** start, Object** end) {
Iain Merrick75681382010-08-19 15:07:18 +01001457 for (Object** p = start; p < end; p++) {
1458 StaticPointersToNewGenUpdatingVisitor::VisitPointer(p);
1459 }
Steve Block6ded16b2010-05-10 14:33:55 +01001460 }
1461
1462 void VisitCodeTarget(RelocInfo* rinfo) {
1463 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
1464 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1465 VisitPointer(&target);
1466 rinfo->set_target_address(Code::cast(target)->instruction_start());
1467 }
1468
1469 void VisitDebugTarget(RelocInfo* rinfo) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001470 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
1471 rinfo->IsPatchedReturnSequence()) ||
1472 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
1473 rinfo->IsPatchedDebugBreakSlotSequence()));
Steve Block6ded16b2010-05-10 14:33:55 +01001474 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
1475 VisitPointer(&target);
1476 rinfo->set_call_address(Code::cast(target)->instruction_start());
1477 }
Steve Block6ded16b2010-05-10 14:33:55 +01001478};
1479
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001480
Steve Block6ded16b2010-05-10 14:33:55 +01001481// Visitor for updating pointers from live objects in old spaces to new space.
1482// It can encounter pointers to dead objects in new space when traversing map
1483// space (see comment for MigrateObject).
1484static void UpdatePointerToNewGen(HeapObject** p) {
1485 if (!(*p)->IsHeapObject()) return;
1486
1487 Address old_addr = (*p)->address();
1488 ASSERT(Heap::InFromSpace(*p));
1489
1490 Address new_addr = Memory::Address_at(old_addr);
1491
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001492 if (new_addr == NULL) {
1493 // We encountered pointer to a dead object. Clear it so we will
1494 // not visit it again during next iteration of dirty regions.
1495 *p = NULL;
1496 } else {
1497 *p = HeapObject::FromAddress(new_addr);
1498 }
Steve Block6ded16b2010-05-10 14:33:55 +01001499}
1500
1501
1502static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Object **p) {
1503 Address old_addr = HeapObject::cast(*p)->address();
1504 Address new_addr = Memory::Address_at(old_addr);
1505 return String::cast(HeapObject::FromAddress(new_addr));
1506}
1507
1508
1509static bool TryPromoteObject(HeapObject* object, int object_size) {
1510 Object* result;
1511
1512 if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
1513 result = Heap::lo_space()->AllocateRawFixedArray(object_size);
1514 if (!result->IsFailure()) {
1515 HeapObject* target = HeapObject::cast(result);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001516 MigrateObject(target->address(), object->address(), object_size, true);
Leon Clarkef7060e22010-06-03 12:02:55 +01001517 MarkCompactCollector::tracer()->
1518 increment_promoted_objects_size(object_size);
Steve Block6ded16b2010-05-10 14:33:55 +01001519 return true;
1520 }
1521 } else {
1522 OldSpace* target_space = Heap::TargetSpace(object);
1523
1524 ASSERT(target_space == Heap::old_pointer_space() ||
1525 target_space == Heap::old_data_space());
1526 result = target_space->AllocateRaw(object_size);
1527 if (!result->IsFailure()) {
1528 HeapObject* target = HeapObject::cast(result);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001529 MigrateObject(target->address(),
1530 object->address(),
1531 object_size,
1532 target_space == Heap::old_pointer_space());
Leon Clarkef7060e22010-06-03 12:02:55 +01001533 MarkCompactCollector::tracer()->
1534 increment_promoted_objects_size(object_size);
Steve Block6ded16b2010-05-10 14:33:55 +01001535 return true;
1536 }
1537 }
1538
1539 return false;
1540}
1541
1542
1543static void SweepNewSpace(NewSpace* space) {
1544 Heap::CheckNewSpaceExpansionCriteria();
1545
1546 Address from_bottom = space->bottom();
1547 Address from_top = space->top();
1548
1549 // Flip the semispaces. After flipping, to space is empty, from space has
1550 // live objects.
1551 space->Flip();
1552 space->ResetAllocationInfo();
1553
1554 int size = 0;
1555 int survivors_size = 0;
1556
1557 // First pass: traverse all objects in inactive semispace, remove marks,
1558 // migrate live objects and write forwarding addresses.
1559 for (Address current = from_bottom; current < from_top; current += size) {
1560 HeapObject* object = HeapObject::FromAddress(current);
1561
1562 if (object->IsMarked()) {
1563 object->ClearMark();
1564 MarkCompactCollector::tracer()->decrement_marked_count();
1565
1566 size = object->Size();
1567 survivors_size += size;
1568
1569 // Aggressively promote young survivors to the old space.
1570 if (TryPromoteObject(object, size)) {
1571 continue;
1572 }
1573
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001574 // Promotion failed. Just migrate object to another semispace.
Steve Block6ded16b2010-05-10 14:33:55 +01001575 Object* target = space->AllocateRaw(size);
1576
1577 // Allocation cannot fail at this point: semispaces are of equal size.
1578 ASSERT(!target->IsFailure());
1579
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001580 MigrateObject(HeapObject::cast(target)->address(),
1581 current,
1582 size,
1583 false);
Steve Block6ded16b2010-05-10 14:33:55 +01001584 } else {
1585 size = object->Size();
1586 Memory::Address_at(current) = NULL;
1587 }
1588 }
1589
1590 // Second pass: find pointers to new space and update them.
1591 PointersToNewGenUpdatingVisitor updating_visitor;
1592
1593 // Update pointers in to space.
Iain Merrick75681382010-08-19 15:07:18 +01001594 Address current = space->bottom();
1595 while (current < space->top()) {
1596 HeapObject* object = HeapObject::FromAddress(current);
1597 current +=
1598 StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(),
1599 object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001600 }
Steve Block6ded16b2010-05-10 14:33:55 +01001601
1602 // Update roots.
1603 Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
1604
1605 // Update pointers in old spaces.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001606 Heap::IterateDirtyRegions(Heap::old_pointer_space(),
1607 &Heap::IteratePointersInDirtyRegion,
1608 &UpdatePointerToNewGen,
1609 Heap::WATERMARK_SHOULD_BE_VALID);
1610
1611 Heap::lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
Steve Block6ded16b2010-05-10 14:33:55 +01001612
1613 // Update pointers from cells.
1614 HeapObjectIterator cell_iterator(Heap::cell_space());
1615 for (HeapObject* cell = cell_iterator.next();
1616 cell != NULL;
1617 cell = cell_iterator.next()) {
1618 if (cell->IsJSGlobalPropertyCell()) {
1619 Address value_address =
1620 reinterpret_cast<Address>(cell) +
1621 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1622 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1623 }
1624 }
1625
1626 // Update pointers from external string table.
1627 Heap::UpdateNewSpaceReferencesInExternalStringTable(
1628 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1629
1630 // All pointers were updated. Update auxiliary allocation info.
1631 Heap::IncrementYoungSurvivorsCounter(survivors_size);
1632 space->set_age_mark(space->top());
Steve Blocka7e24c12009-10-30 11:49:00 +00001633}
1634
1635
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001636static void SweepSpace(PagedSpace* space) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001637 PageIterator it(space, PageIterator::PAGES_IN_USE);
Steve Block6ded16b2010-05-10 14:33:55 +01001638
1639 // During sweeping of paged space we are trying to find longest sequences
1640 // of pages without live objects and free them (instead of putting them on
1641 // the free list).
1642
1643 // Page preceding current.
1644 Page* prev = Page::FromAddress(NULL);
1645
1646 // First empty page in a sequence.
1647 Page* first_empty_page = Page::FromAddress(NULL);
1648
1649 // Page preceding first empty page.
1650 Page* prec_first_empty_page = Page::FromAddress(NULL);
1651
1652 // If last used page of space ends with a sequence of dead objects
1653 // we can adjust allocation top instead of puting this free area into
1654 // the free list. Thus during sweeping we keep track of such areas
1655 // and defer their deallocation until the sweeping of the next page
1656 // is done: if one of the next pages contains live objects we have
1657 // to put such area into the free list.
1658 Address last_free_start = NULL;
1659 int last_free_size = 0;
1660
Steve Blocka7e24c12009-10-30 11:49:00 +00001661 while (it.has_next()) {
1662 Page* p = it.next();
1663
1664 bool is_previous_alive = true;
1665 Address free_start = NULL;
1666 HeapObject* object;
1667
1668 for (Address current = p->ObjectAreaStart();
1669 current < p->AllocationTop();
1670 current += object->Size()) {
1671 object = HeapObject::FromAddress(current);
1672 if (object->IsMarked()) {
1673 object->ClearMark();
1674 MarkCompactCollector::tracer()->decrement_marked_count();
Steve Block6ded16b2010-05-10 14:33:55 +01001675
Steve Blocka7e24c12009-10-30 11:49:00 +00001676 if (!is_previous_alive) { // Transition from free to live.
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001677 space->DeallocateBlock(free_start,
1678 static_cast<int>(current - free_start),
1679 true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001680 is_previous_alive = true;
1681 }
1682 } else {
Leon Clarked91b9f72010-01-27 17:25:45 +00001683 MarkCompactCollector::ReportDeleteIfNeeded(object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001684 if (is_previous_alive) { // Transition from live to free.
1685 free_start = current;
1686 is_previous_alive = false;
1687 }
1688 }
1689 // The object is now unmarked for the call to Size() at the top of the
1690 // loop.
1691 }
1692
Steve Block6ded16b2010-05-10 14:33:55 +01001693 bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
1694 || (!is_previous_alive && free_start == p->ObjectAreaStart());
1695
1696 if (page_is_empty) {
1697 // This page is empty. Check whether we are in the middle of
1698 // sequence of empty pages and start one if not.
1699 if (!first_empty_page->is_valid()) {
1700 first_empty_page = p;
1701 prec_first_empty_page = prev;
1702 }
1703
1704 if (!is_previous_alive) {
1705 // There are dead objects on this page. Update space accounting stats
1706 // without putting anything into free list.
1707 int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
1708 if (size_in_bytes > 0) {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001709 space->DeallocateBlock(free_start, size_in_bytes, false);
Steve Block6ded16b2010-05-10 14:33:55 +01001710 }
1711 }
1712 } else {
1713 // This page is not empty. Sequence of empty pages ended on the previous
1714 // one.
1715 if (first_empty_page->is_valid()) {
1716 space->FreePages(prec_first_empty_page, prev);
1717 prec_first_empty_page = first_empty_page = Page::FromAddress(NULL);
1718 }
1719
1720 // If there is a free ending area on one of the previous pages we have
1721 // deallocate that area and put it on the free list.
1722 if (last_free_size > 0) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001723 Page::FromAddress(last_free_start)->
1724 SetAllocationWatermark(last_free_start);
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001725 space->DeallocateBlock(last_free_start, last_free_size, true);
Steve Block6ded16b2010-05-10 14:33:55 +01001726 last_free_start = NULL;
1727 last_free_size = 0;
1728 }
1729
1730 // If the last region of this page was not live we remember it.
1731 if (!is_previous_alive) {
1732 ASSERT(last_free_size == 0);
1733 last_free_size = static_cast<int>(p->AllocationTop() - free_start);
1734 last_free_start = free_start;
Steve Blocka7e24c12009-10-30 11:49:00 +00001735 }
1736 }
Steve Block6ded16b2010-05-10 14:33:55 +01001737
1738 prev = p;
1739 }
1740
1741 // We reached end of space. See if we need to adjust allocation top.
1742 Address new_allocation_top = NULL;
1743
1744 if (first_empty_page->is_valid()) {
1745 // Last used pages in space are empty. We can move allocation top backwards
1746 // to the beginning of first empty page.
1747 ASSERT(prev == space->AllocationTopPage());
1748
1749 new_allocation_top = first_empty_page->ObjectAreaStart();
1750 }
1751
1752 if (last_free_size > 0) {
1753 // There was a free ending area on the previous page.
1754 // Deallocate it without putting it into freelist and move allocation
1755 // top to the beginning of this free area.
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001756 space->DeallocateBlock(last_free_start, last_free_size, false);
Steve Block6ded16b2010-05-10 14:33:55 +01001757 new_allocation_top = last_free_start;
1758 }
1759
1760 if (new_allocation_top != NULL) {
1761#ifdef DEBUG
1762 Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
1763 if (!first_empty_page->is_valid()) {
1764 ASSERT(new_allocation_top_page == space->AllocationTopPage());
1765 } else if (last_free_size > 0) {
1766 ASSERT(new_allocation_top_page == prec_first_empty_page);
1767 } else {
1768 ASSERT(new_allocation_top_page == first_empty_page);
1769 }
1770#endif
1771
1772 space->SetTop(new_allocation_top);
Steve Blocka7e24c12009-10-30 11:49:00 +00001773 }
1774}
1775
1776
Steve Blocka7e24c12009-10-30 11:49:00 +00001777void MarkCompactCollector::EncodeForwardingAddresses() {
1778 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
1779 // Objects in the active semispace of the young generation may be
1780 // relocated to the inactive semispace (if not promoted). Set the
1781 // relocation info to the beginning of the inactive semispace.
1782 Heap::new_space()->MCResetRelocationInfo();
1783
1784 // Compute the forwarding pointers in each space.
1785 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
Leon Clarked91b9f72010-01-27 17:25:45 +00001786 ReportDeleteIfNeeded>(
Steve Blocka7e24c12009-10-30 11:49:00 +00001787 Heap::old_pointer_space());
1788
1789 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
1790 IgnoreNonLiveObject>(
1791 Heap::old_data_space());
1792
1793 EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
Leon Clarked91b9f72010-01-27 17:25:45 +00001794 ReportDeleteIfNeeded>(
Steve Blocka7e24c12009-10-30 11:49:00 +00001795 Heap::code_space());
1796
1797 EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
1798 IgnoreNonLiveObject>(
1799 Heap::cell_space());
1800
1801
1802 // Compute new space next to last after the old and code spaces have been
1803 // compacted. Objects in new space can be promoted to old or code space.
1804 EncodeForwardingAddressesInNewSpace();
1805
1806 // Compute map space last because computing forwarding addresses
1807 // overwrites non-live objects. Objects in the other spaces rely on
1808 // non-live map pointers to get the sizes of non-live objects.
1809 EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
1810 IgnoreNonLiveObject>(
1811 Heap::map_space());
1812
1813 // Write relocation info to the top page, so we can use it later. This is
1814 // done after promoting objects from the new space so we get the correct
1815 // allocation top.
1816 Heap::old_pointer_space()->MCWriteRelocationInfoToPage();
1817 Heap::old_data_space()->MCWriteRelocationInfoToPage();
1818 Heap::code_space()->MCWriteRelocationInfoToPage();
1819 Heap::map_space()->MCWriteRelocationInfoToPage();
1820 Heap::cell_space()->MCWriteRelocationInfoToPage();
1821}
1822
1823
Leon Clarkee46be812010-01-19 14:06:41 +00001824class MapIterator : public HeapObjectIterator {
1825 public:
1826 MapIterator() : HeapObjectIterator(Heap::map_space(), &SizeCallback) { }
1827
1828 explicit MapIterator(Address start)
1829 : HeapObjectIterator(Heap::map_space(), start, &SizeCallback) { }
1830
1831 private:
1832 static int SizeCallback(HeapObject* unused) {
1833 USE(unused);
1834 return Map::kSize;
1835 }
1836};
1837
1838
1839class MapCompact {
1840 public:
1841 explicit MapCompact(int live_maps)
1842 : live_maps_(live_maps),
1843 to_evacuate_start_(Heap::map_space()->TopAfterCompaction(live_maps)),
1844 map_to_evacuate_it_(to_evacuate_start_),
1845 first_map_to_evacuate_(
1846 reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
1847 }
1848
1849 void CompactMaps() {
1850 // As we know the number of maps to evacuate beforehand,
1851 // we stop then there is no more vacant maps.
1852 for (Map* next_vacant_map = NextVacantMap();
1853 next_vacant_map;
1854 next_vacant_map = NextVacantMap()) {
1855 EvacuateMap(next_vacant_map, NextMapToEvacuate());
1856 }
1857
1858#ifdef DEBUG
1859 CheckNoMapsToEvacuate();
1860#endif
1861 }
1862
1863 void UpdateMapPointersInRoots() {
1864 Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG);
1865 GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
1866 }
1867
Leon Clarkee46be812010-01-19 14:06:41 +00001868 void UpdateMapPointersInPagedSpace(PagedSpace* space) {
1869 ASSERT(space != Heap::map_space());
1870
1871 PageIterator it(space, PageIterator::PAGES_IN_USE);
1872 while (it.has_next()) {
1873 Page* p = it.next();
1874 UpdateMapPointersInRange(p->ObjectAreaStart(), p->AllocationTop());
1875 }
1876 }
1877
1878 void UpdateMapPointersInNewSpace() {
1879 NewSpace* space = Heap::new_space();
1880 UpdateMapPointersInRange(space->bottom(), space->top());
1881 }
1882
1883 void UpdateMapPointersInLargeObjectSpace() {
1884 LargeObjectIterator it(Heap::lo_space());
Leon Clarked91b9f72010-01-27 17:25:45 +00001885 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1886 UpdateMapPointersInObject(obj);
Leon Clarkee46be812010-01-19 14:06:41 +00001887 }
1888
1889 void Finish() {
1890 Heap::map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
1891 }
1892
1893 private:
1894 int live_maps_;
1895 Address to_evacuate_start_;
1896 MapIterator vacant_map_it_;
1897 MapIterator map_to_evacuate_it_;
1898 Map* first_map_to_evacuate_;
1899
1900 // Helper class for updating map pointers in HeapObjects.
1901 class MapUpdatingVisitor: public ObjectVisitor {
1902 public:
1903 void VisitPointer(Object** p) {
1904 UpdateMapPointer(p);
1905 }
1906
1907 void VisitPointers(Object** start, Object** end) {
1908 for (Object** p = start; p < end; p++) UpdateMapPointer(p);
1909 }
1910
1911 private:
1912 void UpdateMapPointer(Object** p) {
1913 if (!(*p)->IsHeapObject()) return;
1914 HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
1915
1916 // Moved maps are tagged with overflowed map word. They are the only
1917 // objects those map word is overflowed as marking is already complete.
1918 MapWord map_word = old_map->map_word();
1919 if (!map_word.IsOverflowed()) return;
1920
1921 *p = GetForwardedMap(map_word);
1922 }
1923 };
1924
1925 static MapUpdatingVisitor map_updating_visitor_;
1926
1927 static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
1928 while (true) {
Leon Clarkee46be812010-01-19 14:06:41 +00001929 HeapObject* next = it->next();
Leon Clarked91b9f72010-01-27 17:25:45 +00001930 ASSERT(next != NULL);
Leon Clarkee46be812010-01-19 14:06:41 +00001931 if (next == last)
1932 return NULL;
1933 ASSERT(!next->IsOverflowed());
1934 ASSERT(!next->IsMarked());
1935 ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
1936 if (next->IsMap() == live)
1937 return reinterpret_cast<Map*>(next);
1938 }
1939 }
1940
1941 Map* NextVacantMap() {
1942 Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
1943 ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
1944 return map;
1945 }
1946
1947 Map* NextMapToEvacuate() {
1948 Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
1949 ASSERT(map != NULL);
1950 ASSERT(map->IsMap());
1951 return map;
1952 }
1953
1954 static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
1955 ASSERT(FreeListNode::IsFreeListNode(vacant_map));
1956 ASSERT(map_to_evacuate->IsMap());
1957
Steve Block6ded16b2010-05-10 14:33:55 +01001958 ASSERT(Map::kSize % 4 == 0);
1959
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001960 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(vacant_map->address(),
1961 map_to_evacuate->address(),
1962 Map::kSize);
Steve Block6ded16b2010-05-10 14:33:55 +01001963
Leon Clarkee46be812010-01-19 14:06:41 +00001964 ASSERT(vacant_map->IsMap()); // Due to memcpy above.
1965
1966 MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
1967 forwarding_map_word.SetOverflow();
1968 map_to_evacuate->set_map_word(forwarding_map_word);
1969
1970 ASSERT(map_to_evacuate->map_word().IsOverflowed());
1971 ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
1972 }
1973
1974 static Map* GetForwardedMap(MapWord map_word) {
1975 ASSERT(map_word.IsOverflowed());
1976 map_word.ClearOverflow();
1977 Map* new_map = map_word.ToMap();
1978 ASSERT_MAP_ALIGNED(new_map->address());
1979 return new_map;
1980 }
1981
1982 static int UpdateMapPointersInObject(HeapObject* obj) {
1983 ASSERT(!obj->IsMarked());
1984 Map* map = obj->map();
1985 ASSERT(Heap::map_space()->Contains(map));
1986 MapWord map_word = map->map_word();
1987 ASSERT(!map_word.IsMarked());
1988 if (map_word.IsOverflowed()) {
1989 Map* new_map = GetForwardedMap(map_word);
1990 ASSERT(Heap::map_space()->Contains(new_map));
1991 obj->set_map(new_map);
1992
1993#ifdef DEBUG
1994 if (FLAG_gc_verbose) {
1995 PrintF("update %p : %p -> %p\n", obj->address(),
1996 map, new_map);
1997 }
1998#endif
1999 }
2000
2001 int size = obj->SizeFromMap(map);
2002 obj->IterateBody(map->instance_type(), size, &map_updating_visitor_);
2003 return size;
2004 }
2005
2006 static void UpdateMapPointersInRange(Address start, Address end) {
2007 HeapObject* object;
2008 int size;
2009 for (Address current = start; current < end; current += size) {
2010 object = HeapObject::FromAddress(current);
2011 size = UpdateMapPointersInObject(object);
2012 ASSERT(size > 0);
2013 }
2014 }
2015
2016#ifdef DEBUG
2017 void CheckNoMapsToEvacuate() {
2018 if (!FLAG_enable_slow_asserts)
2019 return;
2020
Leon Clarked91b9f72010-01-27 17:25:45 +00002021 for (HeapObject* obj = map_to_evacuate_it_.next();
2022 obj != NULL; obj = map_to_evacuate_it_.next())
2023 ASSERT(FreeListNode::IsFreeListNode(obj));
Leon Clarkee46be812010-01-19 14:06:41 +00002024 }
2025#endif
2026};
2027
2028MapCompact::MapUpdatingVisitor MapCompact::map_updating_visitor_;
2029
2030
Steve Blocka7e24c12009-10-30 11:49:00 +00002031void MarkCompactCollector::SweepSpaces() {
Leon Clarkef7060e22010-06-03 12:02:55 +01002032 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
2033
Steve Blocka7e24c12009-10-30 11:49:00 +00002034 ASSERT(state_ == SWEEP_SPACES);
2035 ASSERT(!IsCompacting());
2036 // Noncompacting collections simply sweep the spaces to clear the mark
2037 // bits and free the nonlive blocks (for old and map spaces). We sweep
2038 // the map space last because freeing non-live maps overwrites them and
2039 // the other spaces rely on possibly non-live maps to get the sizes for
2040 // non-live objects.
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002041 SweepSpace(Heap::old_pointer_space());
2042 SweepSpace(Heap::old_data_space());
2043 SweepSpace(Heap::code_space());
2044 SweepSpace(Heap::cell_space());
Iain Merrick75681382010-08-19 15:07:18 +01002045 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
2046 SweepNewSpace(Heap::new_space());
2047 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002048 SweepSpace(Heap::map_space());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002049
2050 Heap::IterateDirtyRegions(Heap::map_space(),
2051 &Heap::IteratePointersInDirtyMapsRegion,
2052 &UpdatePointerToNewGen,
2053 Heap::WATERMARK_SHOULD_BE_VALID);
2054
Steve Block6ded16b2010-05-10 14:33:55 +01002055 int live_maps_size = Heap::map_space()->Size();
2056 int live_maps = live_maps_size / Map::kSize;
2057 ASSERT(live_map_objects_size_ == live_maps_size);
Leon Clarkee46be812010-01-19 14:06:41 +00002058
2059 if (Heap::map_space()->NeedsCompaction(live_maps)) {
2060 MapCompact map_compact(live_maps);
2061
2062 map_compact.CompactMaps();
2063 map_compact.UpdateMapPointersInRoots();
2064
Leon Clarkee46be812010-01-19 14:06:41 +00002065 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00002066 for (PagedSpace* space = spaces.next();
2067 space != NULL; space = spaces.next()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002068 if (space == Heap::map_space()) continue;
2069 map_compact.UpdateMapPointersInPagedSpace(space);
2070 }
2071 map_compact.UpdateMapPointersInNewSpace();
2072 map_compact.UpdateMapPointersInLargeObjectSpace();
2073
2074 map_compact.Finish();
2075 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002076}
2077
2078
2079// Iterate the live objects in a range of addresses (eg, a page or a
2080// semispace). The live regions of the range have been linked into a list.
2081// The first live region is [first_live_start, first_live_end), and the last
2082// address in the range is top. The callback function is used to get the
2083// size of each live object.
2084int MarkCompactCollector::IterateLiveObjectsInRange(
2085 Address start,
2086 Address end,
2087 HeapObjectCallback size_func) {
Steve Block6ded16b2010-05-10 14:33:55 +01002088 int live_objects_size = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00002089 Address current = start;
2090 while (current < end) {
2091 uint32_t encoded_map = Memory::uint32_at(current);
2092 if (encoded_map == kSingleFreeEncoding) {
2093 current += kPointerSize;
2094 } else if (encoded_map == kMultiFreeEncoding) {
2095 current += Memory::int_at(current + kIntSize);
2096 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01002097 int size = size_func(HeapObject::FromAddress(current));
2098 current += size;
2099 live_objects_size += size;
Steve Blocka7e24c12009-10-30 11:49:00 +00002100 }
2101 }
Steve Block6ded16b2010-05-10 14:33:55 +01002102 return live_objects_size;
Steve Blocka7e24c12009-10-30 11:49:00 +00002103}
2104
2105
2106int MarkCompactCollector::IterateLiveObjects(NewSpace* space,
2107 HeapObjectCallback size_f) {
2108 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2109 return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
2110}
2111
2112
2113int MarkCompactCollector::IterateLiveObjects(PagedSpace* space,
2114 HeapObjectCallback size_f) {
2115 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2116 int total = 0;
2117 PageIterator it(space, PageIterator::PAGES_IN_USE);
2118 while (it.has_next()) {
2119 Page* p = it.next();
2120 total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
2121 p->AllocationTop(),
2122 size_f);
2123 }
2124 return total;
2125}
2126
2127
2128// -------------------------------------------------------------------------
2129// Phase 3: Update pointers
2130
2131// Helper class for updating pointers in HeapObjects.
2132class UpdatingVisitor: public ObjectVisitor {
2133 public:
2134 void VisitPointer(Object** p) {
2135 UpdatePointer(p);
2136 }
2137
2138 void VisitPointers(Object** start, Object** end) {
2139 // Mark all HeapObject pointers in [start, end)
2140 for (Object** p = start; p < end; p++) UpdatePointer(p);
2141 }
2142
2143 void VisitCodeTarget(RelocInfo* rinfo) {
2144 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2145 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2146 VisitPointer(&target);
2147 rinfo->set_target_address(
2148 reinterpret_cast<Code*>(target)->instruction_start());
2149 }
2150
Steve Block3ce2e202009-11-05 08:53:23 +00002151 void VisitDebugTarget(RelocInfo* rinfo) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002152 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2153 rinfo->IsPatchedReturnSequence()) ||
2154 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2155 rinfo->IsPatchedDebugBreakSlotSequence()));
Steve Block3ce2e202009-11-05 08:53:23 +00002156 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2157 VisitPointer(&target);
2158 rinfo->set_call_address(
2159 reinterpret_cast<Code*>(target)->instruction_start());
2160 }
2161
Steve Blocka7e24c12009-10-30 11:49:00 +00002162 private:
2163 void UpdatePointer(Object** p) {
2164 if (!(*p)->IsHeapObject()) return;
2165
2166 HeapObject* obj = HeapObject::cast(*p);
2167 Address old_addr = obj->address();
2168 Address new_addr;
2169 ASSERT(!Heap::InFromSpace(obj));
2170
2171 if (Heap::new_space()->Contains(obj)) {
2172 Address forwarding_pointer_addr =
2173 Heap::new_space()->FromSpaceLow() +
2174 Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
2175 new_addr = Memory::Address_at(forwarding_pointer_addr);
2176
2177#ifdef DEBUG
2178 ASSERT(Heap::old_pointer_space()->Contains(new_addr) ||
2179 Heap::old_data_space()->Contains(new_addr) ||
2180 Heap::new_space()->FromSpaceContains(new_addr) ||
2181 Heap::lo_space()->Contains(HeapObject::FromAddress(new_addr)));
2182
2183 if (Heap::new_space()->FromSpaceContains(new_addr)) {
2184 ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
2185 Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
2186 }
2187#endif
2188
2189 } else if (Heap::lo_space()->Contains(obj)) {
2190 // Don't move objects in the large object space.
2191 return;
2192
2193 } else {
2194#ifdef DEBUG
2195 PagedSpaces spaces;
2196 PagedSpace* original_space = spaces.next();
2197 while (original_space != NULL) {
2198 if (original_space->Contains(obj)) break;
2199 original_space = spaces.next();
2200 }
2201 ASSERT(original_space != NULL);
2202#endif
2203 new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
2204 ASSERT(original_space->Contains(new_addr));
2205 ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
2206 original_space->MCSpaceOffsetForAddress(old_addr));
2207 }
2208
2209 *p = HeapObject::FromAddress(new_addr);
2210
2211#ifdef DEBUG
2212 if (FLAG_gc_verbose) {
2213 PrintF("update %p : %p -> %p\n",
2214 reinterpret_cast<Address>(p), old_addr, new_addr);
2215 }
2216#endif
2217 }
2218};
2219
2220
2221void MarkCompactCollector::UpdatePointers() {
2222#ifdef DEBUG
2223 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
2224 state_ = UPDATE_POINTERS;
2225#endif
2226 UpdatingVisitor updating_visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00002227 Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00002228 GlobalHandles::IterateWeakRoots(&updating_visitor);
2229
Steve Block6ded16b2010-05-10 14:33:55 +01002230 int live_maps_size = IterateLiveObjects(Heap::map_space(),
Steve Blocka7e24c12009-10-30 11:49:00 +00002231 &UpdatePointersInOldObject);
Steve Block6ded16b2010-05-10 14:33:55 +01002232 int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
2233 &UpdatePointersInOldObject);
2234 int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
2235 &UpdatePointersInOldObject);
2236 int live_codes_size = IterateLiveObjects(Heap::code_space(),
2237 &UpdatePointersInOldObject);
2238 int live_cells_size = IterateLiveObjects(Heap::cell_space(),
2239 &UpdatePointersInOldObject);
2240 int live_news_size = IterateLiveObjects(Heap::new_space(),
2241 &UpdatePointersInNewObject);
Steve Blocka7e24c12009-10-30 11:49:00 +00002242
2243 // Large objects do not move, the map word can be updated directly.
2244 LargeObjectIterator it(Heap::lo_space());
Leon Clarked91b9f72010-01-27 17:25:45 +00002245 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
2246 UpdatePointersInNewObject(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002247
Steve Block6ded16b2010-05-10 14:33:55 +01002248 USE(live_maps_size);
2249 USE(live_pointer_olds_size);
2250 USE(live_data_olds_size);
2251 USE(live_codes_size);
2252 USE(live_cells_size);
2253 USE(live_news_size);
2254 ASSERT(live_maps_size == live_map_objects_size_);
2255 ASSERT(live_data_olds_size == live_old_data_objects_size_);
2256 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
2257 ASSERT(live_codes_size == live_code_objects_size_);
2258 ASSERT(live_cells_size == live_cell_objects_size_);
2259 ASSERT(live_news_size == live_young_objects_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002260}
2261
2262
2263int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
2264 // Keep old map pointers
2265 Map* old_map = obj->map();
2266 ASSERT(old_map->IsHeapObject());
2267
2268 Address forwarded = GetForwardingAddressInOldSpace(old_map);
2269
2270 ASSERT(Heap::map_space()->Contains(old_map));
2271 ASSERT(Heap::map_space()->Contains(forwarded));
2272#ifdef DEBUG
2273 if (FLAG_gc_verbose) {
2274 PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
2275 forwarded);
2276 }
2277#endif
2278 // Update the map pointer.
2279 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
2280
2281 // We have to compute the object size relying on the old map because
2282 // map objects are not relocated yet.
2283 int obj_size = obj->SizeFromMap(old_map);
2284
2285 // Update pointers in the object body.
2286 UpdatingVisitor updating_visitor;
2287 obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
2288 return obj_size;
2289}
2290
2291
2292int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
2293 // Decode the map pointer.
2294 MapWord encoding = obj->map_word();
2295 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2296 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
2297
2298 // At this point, the first word of map_addr is also encoded, cannot
2299 // cast it to Map* using Map::cast.
2300 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
2301 int obj_size = obj->SizeFromMap(map);
2302 InstanceType type = map->instance_type();
2303
2304 // Update map pointer.
2305 Address new_map_addr = GetForwardingAddressInOldSpace(map);
2306 int offset = encoding.DecodeOffset();
2307 obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
2308
2309#ifdef DEBUG
2310 if (FLAG_gc_verbose) {
2311 PrintF("update %p : %p -> %p\n", obj->address(),
2312 map_addr, new_map_addr);
2313 }
2314#endif
2315
2316 // Update pointers in the object body.
2317 UpdatingVisitor updating_visitor;
2318 obj->IterateBody(type, obj_size, &updating_visitor);
2319 return obj_size;
2320}
2321
2322
2323Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
2324 // Object should either in old or map space.
2325 MapWord encoding = obj->map_word();
2326
2327 // Offset to the first live object's forwarding address.
2328 int offset = encoding.DecodeOffset();
2329 Address obj_addr = obj->address();
2330
2331 // Find the first live object's forwarding address.
2332 Page* p = Page::FromAddress(obj_addr);
2333 Address first_forwarded = p->mc_first_forwarded;
2334
2335 // Page start address of forwarded address.
2336 Page* forwarded_page = Page::FromAddress(first_forwarded);
2337 int forwarded_offset = forwarded_page->Offset(first_forwarded);
2338
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002339 // Find end of allocation in the page of first_forwarded.
2340 int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
Steve Blocka7e24c12009-10-30 11:49:00 +00002341
2342 // Check if current object's forward pointer is in the same page
2343 // as the first live object's forwarding pointer
2344 if (forwarded_offset + offset < mc_top_offset) {
2345 // In the same page.
2346 return first_forwarded + offset;
2347 }
2348
2349 // Must be in the next page, NOTE: this may cross chunks.
2350 Page* next_page = forwarded_page->next_page();
2351 ASSERT(next_page->is_valid());
2352
2353 offset -= (mc_top_offset - forwarded_offset);
2354 offset += Page::kObjectStartOffset;
2355
2356 ASSERT_PAGE_OFFSET(offset);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002357 ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
Steve Blocka7e24c12009-10-30 11:49:00 +00002358
2359 return next_page->OffsetToAddress(offset);
2360}
2361
2362
2363// -------------------------------------------------------------------------
2364// Phase 4: Relocate objects
2365
2366void MarkCompactCollector::RelocateObjects() {
2367#ifdef DEBUG
2368 ASSERT(state_ == UPDATE_POINTERS);
2369 state_ = RELOCATE_OBJECTS;
2370#endif
2371 // Relocates objects, always relocate map objects first. Relocating
2372 // objects in other space relies on map objects to get object size.
Steve Block6ded16b2010-05-10 14:33:55 +01002373 int live_maps_size = IterateLiveObjects(Heap::map_space(),
2374 &RelocateMapObject);
2375 int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
2376 &RelocateOldPointerObject);
2377 int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
2378 &RelocateOldDataObject);
2379 int live_codes_size = IterateLiveObjects(Heap::code_space(),
2380 &RelocateCodeObject);
2381 int live_cells_size = IterateLiveObjects(Heap::cell_space(),
2382 &RelocateCellObject);
2383 int live_news_size = IterateLiveObjects(Heap::new_space(),
2384 &RelocateNewObject);
Steve Blocka7e24c12009-10-30 11:49:00 +00002385
Steve Block6ded16b2010-05-10 14:33:55 +01002386 USE(live_maps_size);
2387 USE(live_pointer_olds_size);
2388 USE(live_data_olds_size);
2389 USE(live_codes_size);
2390 USE(live_cells_size);
2391 USE(live_news_size);
2392 ASSERT(live_maps_size == live_map_objects_size_);
2393 ASSERT(live_data_olds_size == live_old_data_objects_size_);
2394 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
2395 ASSERT(live_codes_size == live_code_objects_size_);
2396 ASSERT(live_cells_size == live_cell_objects_size_);
2397 ASSERT(live_news_size == live_young_objects_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002398
2399 // Flip from and to spaces
2400 Heap::new_space()->Flip();
2401
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002402 Heap::new_space()->MCCommitRelocationInfo();
2403
Steve Blocka7e24c12009-10-30 11:49:00 +00002404 // Set age_mark to bottom in to space
2405 Address mark = Heap::new_space()->bottom();
2406 Heap::new_space()->set_age_mark(mark);
2407
Steve Blocka7e24c12009-10-30 11:49:00 +00002408 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00002409 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
2410 space->MCCommitRelocationInfo();
Steve Block6ded16b2010-05-10 14:33:55 +01002411
2412 Heap::CheckNewSpaceExpansionCriteria();
2413 Heap::IncrementYoungSurvivorsCounter(live_news_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002414}
2415
2416
2417int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
2418 // Recover map pointer.
2419 MapWord encoding = obj->map_word();
2420 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2421 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
2422
2423 // Get forwarding address before resetting map pointer
2424 Address new_addr = GetForwardingAddressInOldSpace(obj);
2425
2426 // Reset map pointer. The meta map object may not be copied yet so
2427 // Map::cast does not yet work.
2428 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
2429
2430 Address old_addr = obj->address();
2431
2432 if (new_addr != old_addr) {
Steve Block6ded16b2010-05-10 14:33:55 +01002433 // Move contents.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002434 Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2435 old_addr,
2436 Map::kSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00002437 }
2438
2439#ifdef DEBUG
2440 if (FLAG_gc_verbose) {
2441 PrintF("relocate %p -> %p\n", old_addr, new_addr);
2442 }
2443#endif
2444
2445 return Map::kSize;
2446}
2447
2448
2449static inline int RestoreMap(HeapObject* obj,
2450 PagedSpace* space,
2451 Address new_addr,
2452 Address map_addr) {
2453 // This must be a non-map object, and the function relies on the
2454 // assumption that the Map space is compacted before the other paged
2455 // spaces (see RelocateObjects).
2456
2457 // Reset map pointer.
2458 obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
2459
2460 int obj_size = obj->Size();
2461 ASSERT_OBJECT_SIZE(obj_size);
2462
2463 ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
2464 space->MCSpaceOffsetForAddress(obj->address()));
2465
2466#ifdef DEBUG
2467 if (FLAG_gc_verbose) {
2468 PrintF("relocate %p -> %p\n", obj->address(), new_addr);
2469 }
2470#endif
2471
2472 return obj_size;
2473}
2474
2475
2476int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
2477 PagedSpace* space) {
2478 // Recover map pointer.
2479 MapWord encoding = obj->map_word();
2480 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2481 ASSERT(Heap::map_space()->Contains(map_addr));
2482
2483 // Get forwarding address before resetting map pointer.
2484 Address new_addr = GetForwardingAddressInOldSpace(obj);
2485
2486 // Reset the map pointer.
2487 int obj_size = RestoreMap(obj, space, new_addr, map_addr);
2488
2489 Address old_addr = obj->address();
2490
2491 if (new_addr != old_addr) {
Steve Block6ded16b2010-05-10 14:33:55 +01002492 // Move contents.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002493 if (space == Heap::old_data_space()) {
2494 Heap::MoveBlock(new_addr, old_addr, obj_size);
2495 } else {
2496 Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2497 old_addr,
2498 obj_size);
2499 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002500 }
2501
2502 ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
2503
Leon Clarked91b9f72010-01-27 17:25:45 +00002504 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2505 if (copied_to->IsJSFunction()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002506 PROFILE(FunctionMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002507 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002508 HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002509
Steve Blocka7e24c12009-10-30 11:49:00 +00002510 return obj_size;
2511}
2512
2513
2514int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
2515 return RelocateOldNonCodeObject(obj, Heap::old_pointer_space());
2516}
2517
2518
2519int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
2520 return RelocateOldNonCodeObject(obj, Heap::old_data_space());
2521}
2522
2523
2524int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
2525 return RelocateOldNonCodeObject(obj, Heap::cell_space());
2526}
2527
2528
2529int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
2530 // Recover map pointer.
2531 MapWord encoding = obj->map_word();
2532 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2533 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
2534
2535 // Get forwarding address before resetting map pointer
2536 Address new_addr = GetForwardingAddressInOldSpace(obj);
2537
2538 // Reset the map pointer.
2539 int obj_size = RestoreMap(obj, Heap::code_space(), new_addr, map_addr);
2540
2541 Address old_addr = obj->address();
2542
2543 if (new_addr != old_addr) {
Steve Block6ded16b2010-05-10 14:33:55 +01002544 // Move contents.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002545 Heap::MoveBlock(new_addr, old_addr, obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002546 }
2547
2548 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2549 if (copied_to->IsCode()) {
2550 // May also update inline cache target.
2551 Code::cast(copied_to)->Relocate(new_addr - old_addr);
2552 // Notify the logger that compiled code has moved.
Steve Block6ded16b2010-05-10 14:33:55 +01002553 PROFILE(CodeMoveEvent(old_addr, new_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00002554 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002555 HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00002556
2557 return obj_size;
2558}
2559
2560
2561int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
2562 int obj_size = obj->Size();
2563
2564 // Get forwarding address
2565 Address old_addr = obj->address();
2566 int offset = Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
2567
2568 Address new_addr =
2569 Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset);
2570
2571#ifdef DEBUG
2572 if (Heap::new_space()->FromSpaceContains(new_addr)) {
2573 ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
2574 Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
2575 } else {
2576 ASSERT(Heap::TargetSpace(obj) == Heap::old_pointer_space() ||
2577 Heap::TargetSpace(obj) == Heap::old_data_space());
2578 }
2579#endif
2580
2581 // New and old addresses cannot overlap.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002582 if (Heap::InNewSpace(HeapObject::FromAddress(new_addr))) {
2583 Heap::CopyBlock(new_addr, old_addr, obj_size);
2584 } else {
2585 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2586 old_addr,
2587 obj_size);
2588 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002589
2590#ifdef DEBUG
2591 if (FLAG_gc_verbose) {
2592 PrintF("relocate %p -> %p\n", old_addr, new_addr);
2593 }
2594#endif
2595
Leon Clarked91b9f72010-01-27 17:25:45 +00002596 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2597 if (copied_to->IsJSFunction()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002598 PROFILE(FunctionMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002599 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002600 HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002601
Steve Blocka7e24c12009-10-30 11:49:00 +00002602 return obj_size;
2603}
2604
2605
Leon Clarked91b9f72010-01-27 17:25:45 +00002606void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
2607#ifdef ENABLE_LOGGING_AND_PROFILING
2608 if (obj->IsCode()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002609 PROFILE(CodeDeleteEvent(obj->address()));
Leon Clarked91b9f72010-01-27 17:25:45 +00002610 } else if (obj->IsJSFunction()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002611 PROFILE(FunctionDeleteEvent(obj->address()));
Leon Clarked91b9f72010-01-27 17:25:45 +00002612 }
2613#endif
2614}
2615
Iain Merrick75681382010-08-19 15:07:18 +01002616
2617void MarkCompactCollector::Initialize() {
2618 StaticPointersToNewGenUpdatingVisitor::Initialize();
2619 StaticMarkingVisitor::Initialize();
2620}
2621
2622
Steve Blocka7e24c12009-10-30 11:49:00 +00002623} } // namespace v8::internal