blob: 162b3d63957872715ab5d51adbbba7fc61f58fd4 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
Iain Merrick75681382010-08-19 15:07:18 +010030#include "compilation-cache.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000031#include "execution.h"
Ben Murdoch3bec4d22010-07-22 14:51:16 +010032#include "heap-profiler.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000033#include "global-handles.h"
34#include "ic-inl.h"
35#include "mark-compact.h"
Iain Merrick75681382010-08-19 15:07:18 +010036#include "objects-visiting.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000037#include "stub-cache.h"
38
39namespace v8 {
40namespace internal {
41
42// -------------------------------------------------------------------------
43// MarkCompactCollector
44
45bool MarkCompactCollector::force_compaction_ = false;
46bool MarkCompactCollector::compacting_collection_ = false;
47bool MarkCompactCollector::compact_on_next_gc_ = false;
48
49int MarkCompactCollector::previous_marked_count_ = 0;
50GCTracer* MarkCompactCollector::tracer_ = NULL;
51
52
53#ifdef DEBUG
54MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE;
55
56// Counters used for debugging the marking phase of mark-compact or mark-sweep
57// collection.
58int MarkCompactCollector::live_bytes_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +010059int MarkCompactCollector::live_young_objects_size_ = 0;
60int MarkCompactCollector::live_old_data_objects_size_ = 0;
61int MarkCompactCollector::live_old_pointer_objects_size_ = 0;
62int MarkCompactCollector::live_code_objects_size_ = 0;
63int MarkCompactCollector::live_map_objects_size_ = 0;
64int MarkCompactCollector::live_cell_objects_size_ = 0;
65int MarkCompactCollector::live_lo_objects_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +000066#endif
67
Iain Merrick75681382010-08-19 15:07:18 +010068
Steve Blocka7e24c12009-10-30 11:49:00 +000069void MarkCompactCollector::CollectGarbage() {
70 // Make sure that Prepare() has been called. The individual steps below will
71 // update the state as they proceed.
72 ASSERT(state_ == PREPARE_GC);
73
74 // Prepare has selected whether to compact the old generation or not.
75 // Tell the tracer.
76 if (IsCompacting()) tracer_->set_is_compacting();
77
78 MarkLiveObjects();
79
80 if (FLAG_collect_maps) ClearNonLiveTransitions();
81
82 SweepLargeObjectSpace();
83
84 if (IsCompacting()) {
Leon Clarkef7060e22010-06-03 12:02:55 +010085 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
Steve Blocka7e24c12009-10-30 11:49:00 +000086 EncodeForwardingAddresses();
87
Kristian Monsen80d68ea2010-09-08 11:05:35 +010088 Heap::MarkMapPointersAsEncoded(true);
Steve Blocka7e24c12009-10-30 11:49:00 +000089 UpdatePointers();
Kristian Monsen80d68ea2010-09-08 11:05:35 +010090 Heap::MarkMapPointersAsEncoded(false);
91 PcToCodeCache::FlushPcToCodeCache();
Steve Blocka7e24c12009-10-30 11:49:00 +000092
93 RelocateObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +000094 } else {
95 SweepSpaces();
Kristian Monsen80d68ea2010-09-08 11:05:35 +010096 PcToCodeCache::FlushPcToCodeCache();
Steve Blocka7e24c12009-10-30 11:49:00 +000097 }
98
99 Finish();
100
101 // Save the count of marked objects remaining after the collection and
102 // null out the GC tracer.
103 previous_marked_count_ = tracer_->marked_count();
104 ASSERT(previous_marked_count_ == 0);
105 tracer_ = NULL;
106}
107
108
109void MarkCompactCollector::Prepare(GCTracer* tracer) {
110 // Rather than passing the tracer around we stash it in a static member
111 // variable.
112 tracer_ = tracer;
113
114#ifdef DEBUG
115 ASSERT(state_ == IDLE);
116 state_ = PREPARE_GC;
117#endif
118 ASSERT(!FLAG_always_compact || !FLAG_never_compact);
119
120 compacting_collection_ =
121 FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
122 compact_on_next_gc_ = false;
123
124 if (FLAG_never_compact) compacting_collection_ = false;
Leon Clarkee46be812010-01-19 14:06:41 +0000125 if (!Heap::map_space()->MapPointersEncodable())
126 compacting_collection_ = false;
Steve Blocka7e24c12009-10-30 11:49:00 +0000127 if (FLAG_collect_maps) CreateBackPointers();
128
Steve Blocka7e24c12009-10-30 11:49:00 +0000129 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000130 for (PagedSpace* space = spaces.next();
131 space != NULL; space = spaces.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000132 space->PrepareForMarkCompact(compacting_collection_);
133 }
134
135#ifdef DEBUG
136 live_bytes_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100137 live_young_objects_size_ = 0;
138 live_old_pointer_objects_size_ = 0;
139 live_old_data_objects_size_ = 0;
140 live_code_objects_size_ = 0;
141 live_map_objects_size_ = 0;
142 live_cell_objects_size_ = 0;
143 live_lo_objects_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000144#endif
145}
146
147
148void MarkCompactCollector::Finish() {
149#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100150 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
Steve Blocka7e24c12009-10-30 11:49:00 +0000151 state_ = IDLE;
152#endif
153 // The stub cache is not traversed during GC; clear the cache to
154 // force lazy re-initialization of it. This must be done after the
155 // GC, because it relies on the new address of certain old space
156 // objects (empty string, illegal builtin).
157 StubCache::Clear();
158
Leon Clarkee46be812010-01-19 14:06:41 +0000159 ExternalStringTable::CleanUp();
160
Steve Blocka7e24c12009-10-30 11:49:00 +0000161 // If we've just compacted old space there's no reason to check the
162 // fragmentation limit. Just return.
163 if (HasCompacted()) return;
164
165 // We compact the old generation on the next GC if it has gotten too
166 // fragmented (ie, we could recover an expected amount of space by
167 // reclaiming the waste and free list blocks).
168 static const int kFragmentationLimit = 15; // Percent.
169 static const int kFragmentationAllowed = 1 * MB; // Absolute.
170 int old_gen_recoverable = 0;
171 int old_gen_used = 0;
172
173 OldSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000174 for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000175 old_gen_recoverable += space->Waste() + space->AvailableFree();
176 old_gen_used += space->Size();
177 }
178
179 int old_gen_fragmentation =
180 static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
181 if (old_gen_fragmentation > kFragmentationLimit &&
182 old_gen_recoverable > kFragmentationAllowed) {
183 compact_on_next_gc_ = true;
184 }
185}
186
187
188// -------------------------------------------------------------------------
189// Phase 1: tracing and marking live objects.
190// before: all objects are in normal state.
191// after: a live object's map pointer is marked as '00'.
192
193// Marking all live objects in the heap as part of mark-sweep or mark-compact
194// collection. Before marking, all objects are in their normal state. After
195// marking, live objects' map pointers are marked indicating that the object
196// has been found reachable.
197//
198// The marking algorithm is a (mostly) depth-first (because of possible stack
199// overflow) traversal of the graph of objects reachable from the roots. It
200// uses an explicit stack of pointers rather than recursion. The young
201// generation's inactive ('from') space is used as a marking stack. The
202// objects in the marking stack are the ones that have been reached and marked
203// but their children have not yet been visited.
204//
205// The marking stack can overflow during traversal. In that case, we set an
206// overflow flag. When the overflow flag is set, we continue marking objects
207// reachable from the objects on the marking stack, but no longer push them on
208// the marking stack. Instead, we mark them as both marked and overflowed.
209// When the stack is in the overflowed state, objects marked as overflowed
210// have been reached and marked but their children have not been visited yet.
211// After emptying the marking stack, we clear the overflow flag and traverse
212// the heap looking for objects marked as overflowed, push them on the stack,
213// and continue with marking. This process repeats until all reachable
214// objects have been marked.
215
216static MarkingStack marking_stack;
217
218
219static inline HeapObject* ShortCircuitConsString(Object** p) {
220 // Optimization: If the heap object pointed to by p is a non-symbol
221 // cons string whose right substring is Heap::empty_string, update
222 // it in place to its left substring. Return the updated value.
223 //
224 // Here we assume that if we change *p, we replace it with a heap object
225 // (ie, the left substring of a cons string is always a heap object).
226 //
227 // The check performed is:
228 // object->IsConsString() && !object->IsSymbol() &&
229 // (ConsString::cast(object)->second() == Heap::empty_string())
230 // except the maps for the object and its possible substrings might be
231 // marked.
232 HeapObject* object = HeapObject::cast(*p);
233 MapWord map_word = object->map_word();
234 map_word.ClearMark();
235 InstanceType type = map_word.ToMap()->instance_type();
236 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
237
238 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
239 if (second != Heap::raw_unchecked_empty_string()) {
240 return object;
241 }
242
243 // Since we don't have the object's start, it is impossible to update the
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100244 // page dirty marks. Therefore, we only replace the string with its left
245 // substring when page dirty marks do not change.
Steve Blocka7e24c12009-10-30 11:49:00 +0000246 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
247 if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object;
248
249 *p = first;
250 return HeapObject::cast(first);
251}
252
253
Iain Merrick75681382010-08-19 15:07:18 +0100254class StaticMarkingVisitor : public StaticVisitorBase {
Steve Blocka7e24c12009-10-30 11:49:00 +0000255 public:
Iain Merrick75681382010-08-19 15:07:18 +0100256 static inline void IterateBody(Map* map, HeapObject* obj) {
257 table_.GetVisitor(map)(map, obj);
258 }
259
260 static void EnableCodeFlushing(bool enabled) {
261 if (enabled) {
Steve Block791712a2010-08-27 10:21:07 +0100262 table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode);
Iain Merrick75681382010-08-19 15:07:18 +0100263 } else {
Steve Block791712a2010-08-27 10:21:07 +0100264 table_.Register(kVisitJSFunction, &VisitJSFunction);
Iain Merrick75681382010-08-19 15:07:18 +0100265 }
266 }
267
268 static void Initialize() {
269 table_.Register(kVisitShortcutCandidate,
270 &FixedBodyVisitor<StaticMarkingVisitor,
271 ConsString::BodyDescriptor,
272 void>::Visit);
273
274 table_.Register(kVisitConsString,
275 &FixedBodyVisitor<StaticMarkingVisitor,
276 ConsString::BodyDescriptor,
277 void>::Visit);
278
279
280 table_.Register(kVisitFixedArray,
281 &FlexibleBodyVisitor<StaticMarkingVisitor,
282 FixedArray::BodyDescriptor,
283 void>::Visit);
284
285 table_.Register(kVisitSharedFunctionInfo,
286 &FixedBodyVisitor<StaticMarkingVisitor,
287 SharedFunctionInfo::BodyDescriptor,
288 void>::Visit);
289
290 table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
291 table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
292 table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
293
294 table_.Register(kVisitOddball,
295 &FixedBodyVisitor<StaticMarkingVisitor,
296 Oddball::BodyDescriptor,
297 void>::Visit);
298 table_.Register(kVisitMap,
299 &FixedBodyVisitor<StaticMarkingVisitor,
300 Map::BodyDescriptor,
301 void>::Visit);
302
303 table_.Register(kVisitCode, &VisitCode);
304
Steve Block791712a2010-08-27 10:21:07 +0100305 table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode);
Iain Merrick75681382010-08-19 15:07:18 +0100306
307 table_.Register(kVisitPropertyCell,
308 &FixedBodyVisitor<StaticMarkingVisitor,
309 JSGlobalPropertyCell::BodyDescriptor,
310 void>::Visit);
311
312 table_.RegisterSpecializations<DataObjectVisitor,
313 kVisitDataObject,
314 kVisitDataObjectGeneric>();
315
316 table_.RegisterSpecializations<JSObjectVisitor,
317 kVisitJSObject,
318 kVisitJSObjectGeneric>();
319
320 table_.RegisterSpecializations<StructObjectVisitor,
321 kVisitStruct,
322 kVisitStructGeneric>();
323 }
324
325 INLINE(static void VisitPointer(Object** p)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000326 MarkObjectByPointer(p);
327 }
328
Iain Merrick75681382010-08-19 15:07:18 +0100329 INLINE(static void VisitPointers(Object** start, Object** end)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000330 // Mark all objects pointed to in [start, end).
331 const int kMinRangeForMarkingRecursion = 64;
332 if (end - start >= kMinRangeForMarkingRecursion) {
333 if (VisitUnmarkedObjects(start, end)) return;
334 // We are close to a stack overflow, so just mark the objects.
335 }
336 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
337 }
338
Iain Merrick75681382010-08-19 15:07:18 +0100339 static inline void VisitCodeTarget(RelocInfo* rinfo) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000340 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
341 Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
342 if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
343 IC::Clear(rinfo->pc());
344 // Please note targets for cleared inline cached do not have to be
345 // marked since they are contained in Heap::non_monomorphic_cache().
346 } else {
347 MarkCompactCollector::MarkObject(code);
348 }
349 }
350
Iain Merrick75681382010-08-19 15:07:18 +0100351 static inline void VisitDebugTarget(RelocInfo* rinfo) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100352 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
353 rinfo->IsPatchedReturnSequence()) ||
354 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
355 rinfo->IsPatchedDebugBreakSlotSequence()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000356 HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
357 MarkCompactCollector::MarkObject(code);
Steve Blocka7e24c12009-10-30 11:49:00 +0000358 }
359
Steve Blocka7e24c12009-10-30 11:49:00 +0000360 // Mark object pointed to by p.
Iain Merrick75681382010-08-19 15:07:18 +0100361 INLINE(static void MarkObjectByPointer(Object** p)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000362 if (!(*p)->IsHeapObject()) return;
363 HeapObject* object = ShortCircuitConsString(p);
364 MarkCompactCollector::MarkObject(object);
365 }
366
Steve Blocka7e24c12009-10-30 11:49:00 +0000367 // Visit an unmarked object.
Iain Merrick75681382010-08-19 15:07:18 +0100368 static inline void VisitUnmarkedObject(HeapObject* obj) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000369#ifdef DEBUG
370 ASSERT(Heap::Contains(obj));
371 ASSERT(!obj->IsMarked());
372#endif
373 Map* map = obj->map();
374 MarkCompactCollector::SetMark(obj);
375 // Mark the map pointer and the body.
376 MarkCompactCollector::MarkObject(map);
Iain Merrick75681382010-08-19 15:07:18 +0100377 IterateBody(map, obj);
Steve Blocka7e24c12009-10-30 11:49:00 +0000378 }
379
380 // Visit all unmarked objects pointed to by [start, end).
381 // Returns false if the operation fails (lack of stack space).
Iain Merrick75681382010-08-19 15:07:18 +0100382 static inline bool VisitUnmarkedObjects(Object** start, Object** end) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000383 // Return false is we are close to the stack limit.
384 StackLimitCheck check;
385 if (check.HasOverflowed()) return false;
386
387 // Visit the unmarked objects.
388 for (Object** p = start; p < end; p++) {
389 if (!(*p)->IsHeapObject()) continue;
390 HeapObject* obj = HeapObject::cast(*p);
391 if (obj->IsMarked()) continue;
392 VisitUnmarkedObject(obj);
393 }
394 return true;
395 }
Iain Merrick75681382010-08-19 15:07:18 +0100396
397 static inline void VisitExternalReference(Address* p) { }
398 static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
399
400 private:
401 class DataObjectVisitor {
402 public:
403 template<int size>
404 static void VisitSpecialized(Map* map, HeapObject* object) {
405 }
406
407 static void Visit(Map* map, HeapObject* object) {
408 }
409 };
410
411 typedef FlexibleBodyVisitor<StaticMarkingVisitor,
412 JSObject::BodyDescriptor,
413 void> JSObjectVisitor;
414
415 typedef FlexibleBodyVisitor<StaticMarkingVisitor,
416 StructBodyDescriptor,
417 void> StructObjectVisitor;
418
419 static void VisitCode(Map* map, HeapObject* object) {
420 reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>();
421 }
422
423 // Code flushing support.
424
425 // How many collections newly compiled code object will survive before being
426 // flushed.
427 static const int kCodeAgeThreshold = 5;
428
429 inline static bool HasSourceCode(SharedFunctionInfo* info) {
430 Object* undefined = Heap::raw_unchecked_undefined_value();
431 return (info->script() != undefined) &&
432 (reinterpret_cast<Script*>(info->script())->source() != undefined);
433 }
434
435
436 inline static bool IsCompiled(JSFunction* function) {
437 return
438 function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile);
439 }
440
441
442 inline static bool IsCompiled(SharedFunctionInfo* function) {
443 return
444 function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile);
445 }
446
447
448 static void FlushCodeForFunction(JSFunction* function) {
449 SharedFunctionInfo* shared_info = function->unchecked_shared();
450
451 if (shared_info->IsMarked()) return;
452
453 // Special handling if the function and shared info objects
454 // have different code objects.
455 if (function->unchecked_code() != shared_info->unchecked_code()) {
456 // If the shared function has been flushed but the function has not,
457 // we flush the function if possible.
458 if (!IsCompiled(shared_info) &&
459 IsCompiled(function) &&
460 !function->unchecked_code()->IsMarked()) {
461 function->set_code(shared_info->unchecked_code());
462 }
463 return;
464 }
465
466 // Code is either on stack or in compilation cache.
467 if (shared_info->unchecked_code()->IsMarked()) {
468 shared_info->set_code_age(0);
469 return;
470 }
471
472 // The function must be compiled and have the source code available,
473 // to be able to recompile it in case we need the function again.
474 if (!(shared_info->is_compiled() && HasSourceCode(shared_info))) return;
475
476 // We never flush code for Api functions.
477 Object* function_data = shared_info->function_data();
478 if (function_data->IsHeapObject() &&
479 (SafeMap(function_data)->instance_type() ==
480 FUNCTION_TEMPLATE_INFO_TYPE)) {
481 return;
482 }
483
484 // Only flush code for functions.
485 if (shared_info->code()->kind() != Code::FUNCTION) return;
486
487 // Function must be lazy compilable.
488 if (!shared_info->allows_lazy_compilation()) return;
489
490 // If this is a full script wrapped in a function we do no flush the code.
491 if (shared_info->is_toplevel()) return;
492
493 // Age this shared function info.
494 if (shared_info->code_age() < kCodeAgeThreshold) {
495 shared_info->set_code_age(shared_info->code_age() + 1);
496 return;
497 }
498
499 // Compute the lazy compilable version of the code.
500 Code* code = Builtins::builtin(Builtins::LazyCompile);
501 shared_info->set_code(code);
502 function->set_code(code);
503 }
504
505
506 static inline Map* SafeMap(Object* obj) {
507 MapWord map_word = HeapObject::cast(obj)->map_word();
508 map_word.ClearMark();
509 map_word.ClearOverflow();
510 return map_word.ToMap();
511 }
512
513
514 static inline bool IsJSBuiltinsObject(Object* obj) {
515 return obj->IsHeapObject() &&
516 (SafeMap(obj)->instance_type() == JS_BUILTINS_OBJECT_TYPE);
517 }
518
519
520 static inline bool IsValidNotBuiltinContext(Object* ctx) {
521 if (!ctx->IsHeapObject()) return false;
522
523 Map* map = SafeMap(ctx);
524 if (!(map == Heap::raw_unchecked_context_map() ||
525 map == Heap::raw_unchecked_catch_context_map() ||
526 map == Heap::raw_unchecked_global_context_map())) {
527 return false;
528 }
529
530 Context* context = reinterpret_cast<Context*>(ctx);
531
532 if (IsJSBuiltinsObject(context->global())) {
533 return false;
534 }
535
536 return true;
537 }
538
539
Steve Block791712a2010-08-27 10:21:07 +0100540 static void VisitCodeEntry(Address entry_address) {
541 Object* code = Code::GetObjectFromEntryAddress(entry_address);
542 Object* old_code = code;
543 VisitPointer(&code);
544 if (code != old_code) {
545 Memory::Address_at(entry_address) =
546 reinterpret_cast<Code*>(code)->entry();
547 }
548 }
Iain Merrick75681382010-08-19 15:07:18 +0100549
Steve Block791712a2010-08-27 10:21:07 +0100550
551 static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
552 JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
Iain Merrick75681382010-08-19 15:07:18 +0100553 // The function must have a valid context and not be a builtin.
554 if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
555 FlushCodeForFunction(jsfunction);
556 }
Steve Block791712a2010-08-27 10:21:07 +0100557 VisitJSFunction(map, object);
Iain Merrick75681382010-08-19 15:07:18 +0100558 }
559
Steve Block791712a2010-08-27 10:21:07 +0100560
561 static void VisitJSFunction(Map* map, HeapObject* object) {
562#define SLOT_ADDR(obj, offset) \
563 reinterpret_cast<Object**>((obj)->address() + offset)
564
565 VisitPointers(SLOT_ADDR(object, JSFunction::kPropertiesOffset),
566 SLOT_ADDR(object, JSFunction::kCodeEntryOffset));
567
568 VisitCodeEntry(object->address() + JSFunction::kCodeEntryOffset);
569
570 VisitPointers(SLOT_ADDR(object,
571 JSFunction::kCodeEntryOffset + kPointerSize),
572 SLOT_ADDR(object, JSFunction::kSize));
573#undef SLOT_ADDR
574 }
575
576
Iain Merrick75681382010-08-19 15:07:18 +0100577 typedef void (*Callback)(Map* map, HeapObject* object);
578
579 static VisitorDispatchTable<Callback> table_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000580};
581
582
Iain Merrick75681382010-08-19 15:07:18 +0100583VisitorDispatchTable<StaticMarkingVisitor::Callback>
584 StaticMarkingVisitor::table_;
585
586
587class MarkingVisitor : public ObjectVisitor {
588 public:
589 void VisitPointer(Object** p) {
590 StaticMarkingVisitor::VisitPointer(p);
591 }
592
593 void VisitPointers(Object** start, Object** end) {
594 StaticMarkingVisitor::VisitPointers(start, end);
595 }
596
597 void VisitCodeTarget(RelocInfo* rinfo) {
598 StaticMarkingVisitor::VisitCodeTarget(rinfo);
599 }
600
601 void VisitDebugTarget(RelocInfo* rinfo) {
602 StaticMarkingVisitor::VisitDebugTarget(rinfo);
603 }
604};
605
606
607class CodeMarkingVisitor : public ThreadVisitor {
608 public:
609 void VisitThread(ThreadLocalTop* top) {
610 for (StackFrameIterator it(top); !it.done(); it.Advance()) {
611 MarkCompactCollector::MarkObject(it.frame()->unchecked_code());
612 }
613 }
614};
615
616
617class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
618 public:
619 void VisitPointers(Object** start, Object** end) {
620 for (Object** p = start; p < end; p++) VisitPointer(p);
621 }
622
623 void VisitPointer(Object** slot) {
624 Object* obj = *slot;
625 if (obj->IsHeapObject()) {
626 MarkCompactCollector::MarkObject(HeapObject::cast(obj));
627 }
628 }
629};
630
631
632void MarkCompactCollector::PrepareForCodeFlushing() {
633 if (!FLAG_flush_code) {
634 StaticMarkingVisitor::EnableCodeFlushing(false);
635 return;
636 }
637
638#ifdef ENABLE_DEBUGGER_SUPPORT
639 if (Debug::IsLoaded() || Debug::has_break_points()) {
640 StaticMarkingVisitor::EnableCodeFlushing(false);
641 return;
642 }
643#endif
644 StaticMarkingVisitor::EnableCodeFlushing(true);
645
646 // Make sure we are not referencing the code from the stack.
647 for (StackFrameIterator it; !it.done(); it.Advance()) {
648 MarkCompactCollector::MarkObject(it.frame()->unchecked_code());
649 }
650
651 // Iterate the archived stacks in all threads to check if
652 // the code is referenced.
653 CodeMarkingVisitor code_marking_visitor;
654 ThreadManager::IterateArchivedThreads(&code_marking_visitor);
655
656 SharedFunctionInfoMarkingVisitor visitor;
657 CompilationCache::IterateFunctions(&visitor);
658
659 MarkCompactCollector::ProcessMarkingStack();
660}
661
662
Steve Blocka7e24c12009-10-30 11:49:00 +0000663// Visitor class for marking heap roots.
664class RootMarkingVisitor : public ObjectVisitor {
665 public:
666 void VisitPointer(Object** p) {
667 MarkObjectByPointer(p);
668 }
669
670 void VisitPointers(Object** start, Object** end) {
671 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
672 }
673
Steve Blocka7e24c12009-10-30 11:49:00 +0000674 private:
Steve Blocka7e24c12009-10-30 11:49:00 +0000675 void MarkObjectByPointer(Object** p) {
676 if (!(*p)->IsHeapObject()) return;
677
678 // Replace flat cons strings in place.
679 HeapObject* object = ShortCircuitConsString(p);
680 if (object->IsMarked()) return;
681
682 Map* map = object->map();
683 // Mark the object.
684 MarkCompactCollector::SetMark(object);
Iain Merrick75681382010-08-19 15:07:18 +0100685
Steve Blocka7e24c12009-10-30 11:49:00 +0000686 // Mark the map pointer and body, and push them on the marking stack.
687 MarkCompactCollector::MarkObject(map);
Iain Merrick75681382010-08-19 15:07:18 +0100688 StaticMarkingVisitor::IterateBody(map, object);
Steve Blocka7e24c12009-10-30 11:49:00 +0000689
690 // Mark all the objects reachable from the map and body. May leave
691 // overflowed objects in the heap.
Iain Merrick75681382010-08-19 15:07:18 +0100692 MarkCompactCollector::EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +0000693 }
694};
695
696
697// Helper class for pruning the symbol table.
698class SymbolTableCleaner : public ObjectVisitor {
699 public:
700 SymbolTableCleaner() : pointers_removed_(0) { }
Leon Clarkee46be812010-01-19 14:06:41 +0000701
702 virtual void VisitPointers(Object** start, Object** end) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000703 // Visit all HeapObject pointers in [start, end).
704 for (Object** p = start; p < end; p++) {
705 if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
706 // Check if the symbol being pruned is an external symbol. We need to
707 // delete the associated external data as this symbol is going away.
708
Steve Blocka7e24c12009-10-30 11:49:00 +0000709 // Since no objects have yet been moved we can safely access the map of
710 // the object.
Leon Clarkee46be812010-01-19 14:06:41 +0000711 if ((*p)->IsExternalString()) {
712 Heap::FinalizeExternalString(String::cast(*p));
Steve Blocka7e24c12009-10-30 11:49:00 +0000713 }
714 // Set the entry to null_value (as deleted).
715 *p = Heap::raw_unchecked_null_value();
716 pointers_removed_++;
717 }
718 }
719 }
720
721 int PointersRemoved() {
722 return pointers_removed_;
723 }
724 private:
725 int pointers_removed_;
726};
727
728
729void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
730 ASSERT(!object->IsMarked());
731 ASSERT(Heap::Contains(object));
732 if (object->IsMap()) {
733 Map* map = Map::cast(object);
734 if (FLAG_cleanup_caches_in_maps_at_gc) {
735 map->ClearCodeCache();
736 }
737 SetMark(map);
738 if (FLAG_collect_maps &&
739 map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
740 map->instance_type() <= JS_FUNCTION_TYPE) {
741 MarkMapContents(map);
742 } else {
743 marking_stack.Push(map);
744 }
745 } else {
746 SetMark(object);
747 marking_stack.Push(object);
748 }
749}
750
751
752void MarkCompactCollector::MarkMapContents(Map* map) {
753 MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(
754 *HeapObject::RawField(map, Map::kInstanceDescriptorsOffset)));
755
756 // Mark the Object* fields of the Map.
757 // Since the descriptor array has been marked already, it is fine
758 // that one of these fields contains a pointer to it.
Iain Merrick75681382010-08-19 15:07:18 +0100759 Object** start_slot = HeapObject::RawField(map,
760 Map::kPointerFieldsBeginOffset);
761
762 Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
763
764 StaticMarkingVisitor::VisitPointers(start_slot, end_slot);
Steve Blocka7e24c12009-10-30 11:49:00 +0000765}
766
767
768void MarkCompactCollector::MarkDescriptorArray(
769 DescriptorArray* descriptors) {
770 if (descriptors->IsMarked()) return;
771 // Empty descriptor array is marked as a root before any maps are marked.
772 ASSERT(descriptors != Heap::raw_unchecked_empty_descriptor_array());
773 SetMark(descriptors);
774
775 FixedArray* contents = reinterpret_cast<FixedArray*>(
776 descriptors->get(DescriptorArray::kContentArrayIndex));
777 ASSERT(contents->IsHeapObject());
778 ASSERT(!contents->IsMarked());
779 ASSERT(contents->IsFixedArray());
780 ASSERT(contents->length() >= 2);
781 SetMark(contents);
Iain Merrick75681382010-08-19 15:07:18 +0100782 // Contents contains (value, details) pairs. If the details say that
783 // the type of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION, or
784 // NULL_DESCRIPTOR, we don't mark the value as live. Only for
785 // MAP_TRANSITION and CONSTANT_TRANSITION is the value an Object* (a
786 // Map*).
Steve Blocka7e24c12009-10-30 11:49:00 +0000787 for (int i = 0; i < contents->length(); i += 2) {
788 // If the pair (value, details) at index i, i+1 is not
789 // a transition or null descriptor, mark the value.
790 PropertyDetails details(Smi::cast(contents->get(i + 1)));
791 if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
792 HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
793 if (object->IsHeapObject() && !object->IsMarked()) {
794 SetMark(object);
795 marking_stack.Push(object);
796 }
797 }
798 }
799 // The DescriptorArray descriptors contains a pointer to its contents array,
800 // but the contents array is already marked.
801 marking_stack.Push(descriptors);
802}
803
804
805void MarkCompactCollector::CreateBackPointers() {
806 HeapObjectIterator iterator(Heap::map_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000807 for (HeapObject* next_object = iterator.next();
808 next_object != NULL; next_object = iterator.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000809 if (next_object->IsMap()) { // Could also be ByteArray on free list.
810 Map* map = Map::cast(next_object);
811 if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
812 map->instance_type() <= JS_FUNCTION_TYPE) {
813 map->CreateBackPointers();
814 } else {
815 ASSERT(map->instance_descriptors() == Heap::empty_descriptor_array());
816 }
817 }
818 }
819}
820
821
822static int OverflowObjectSize(HeapObject* obj) {
823 // Recover the normal map pointer, it might be marked as live and
824 // overflowed.
825 MapWord map_word = obj->map_word();
826 map_word.ClearMark();
827 map_word.ClearOverflow();
828 return obj->SizeFromMap(map_word.ToMap());
829}
830
831
832// Fill the marking stack with overflowed objects returned by the given
833// iterator. Stop when the marking stack is filled or the end of the space
834// is reached, whichever comes first.
835template<class T>
836static void ScanOverflowedObjects(T* it) {
837 // The caller should ensure that the marking stack is initially not full,
838 // so that we don't waste effort pointlessly scanning for objects.
839 ASSERT(!marking_stack.is_full());
840
Leon Clarked91b9f72010-01-27 17:25:45 +0000841 for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000842 if (object->IsOverflowed()) {
843 object->ClearOverflow();
844 ASSERT(object->IsMarked());
845 ASSERT(Heap::Contains(object));
846 marking_stack.Push(object);
847 if (marking_stack.is_full()) return;
848 }
849 }
850}
851
852
853bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
854 return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked();
855}
856
857
Steve Blocka7e24c12009-10-30 11:49:00 +0000858void MarkCompactCollector::MarkSymbolTable() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000859 SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
860 // Mark the symbol table itself.
861 SetMark(symbol_table);
862 // Explicitly mark the prefix.
863 MarkingVisitor marker;
864 symbol_table->IteratePrefix(&marker);
Iain Merrick75681382010-08-19 15:07:18 +0100865 ProcessMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +0000866}
867
868
869void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
870 // Mark the heap roots including global variables, stack variables,
871 // etc., and all objects reachable from them.
Steve Blockd0582a62009-12-15 09:54:21 +0000872 Heap::IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +0000873
874 // Handle the symbol table specially.
875 MarkSymbolTable();
876
877 // There may be overflowed objects in the heap. Visit them now.
878 while (marking_stack.overflowed()) {
879 RefillMarkingStack();
Iain Merrick75681382010-08-19 15:07:18 +0100880 EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +0000881 }
882}
883
884
885void MarkCompactCollector::MarkObjectGroups() {
886 List<ObjectGroup*>* object_groups = GlobalHandles::ObjectGroups();
887
888 for (int i = 0; i < object_groups->length(); i++) {
889 ObjectGroup* entry = object_groups->at(i);
890 if (entry == NULL) continue;
891
892 List<Object**>& objects = entry->objects_;
893 bool group_marked = false;
894 for (int j = 0; j < objects.length(); j++) {
895 Object* object = *objects[j];
896 if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
897 group_marked = true;
898 break;
899 }
900 }
901
902 if (!group_marked) continue;
903
904 // An object in the group is marked, so mark as gray all white heap
905 // objects in the group.
906 for (int j = 0; j < objects.length(); ++j) {
907 if ((*objects[j])->IsHeapObject()) {
908 MarkObject(HeapObject::cast(*objects[j]));
909 }
910 }
911 // Once the entire group has been colored gray, set the object group
912 // to NULL so it won't be processed again.
913 delete object_groups->at(i);
914 object_groups->at(i) = NULL;
915 }
916}
917
918
919// Mark all objects reachable from the objects on the marking stack.
920// Before: the marking stack contains zero or more heap object pointers.
921// After: the marking stack is empty, and all objects reachable from the
922// marking stack have been marked, or are overflowed in the heap.
Iain Merrick75681382010-08-19 15:07:18 +0100923void MarkCompactCollector::EmptyMarkingStack() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000924 while (!marking_stack.is_empty()) {
925 HeapObject* object = marking_stack.Pop();
926 ASSERT(object->IsHeapObject());
927 ASSERT(Heap::Contains(object));
928 ASSERT(object->IsMarked());
929 ASSERT(!object->IsOverflowed());
930
931 // Because the object is marked, we have to recover the original map
932 // pointer and use it to mark the object's body.
933 MapWord map_word = object->map_word();
934 map_word.ClearMark();
935 Map* map = map_word.ToMap();
936 MarkObject(map);
Iain Merrick75681382010-08-19 15:07:18 +0100937
938 StaticMarkingVisitor::IterateBody(map, object);
Steve Blocka7e24c12009-10-30 11:49:00 +0000939 }
940}
941
942
943// Sweep the heap for overflowed objects, clear their overflow bits, and
944// push them on the marking stack. Stop early if the marking stack fills
945// before sweeping completes. If sweeping completes, there are no remaining
946// overflowed objects in the heap so the overflow flag on the markings stack
947// is cleared.
948void MarkCompactCollector::RefillMarkingStack() {
949 ASSERT(marking_stack.overflowed());
950
951 SemiSpaceIterator new_it(Heap::new_space(), &OverflowObjectSize);
952 ScanOverflowedObjects(&new_it);
953 if (marking_stack.is_full()) return;
954
955 HeapObjectIterator old_pointer_it(Heap::old_pointer_space(),
956 &OverflowObjectSize);
957 ScanOverflowedObjects(&old_pointer_it);
958 if (marking_stack.is_full()) return;
959
960 HeapObjectIterator old_data_it(Heap::old_data_space(), &OverflowObjectSize);
961 ScanOverflowedObjects(&old_data_it);
962 if (marking_stack.is_full()) return;
963
964 HeapObjectIterator code_it(Heap::code_space(), &OverflowObjectSize);
965 ScanOverflowedObjects(&code_it);
966 if (marking_stack.is_full()) return;
967
968 HeapObjectIterator map_it(Heap::map_space(), &OverflowObjectSize);
969 ScanOverflowedObjects(&map_it);
970 if (marking_stack.is_full()) return;
971
972 HeapObjectIterator cell_it(Heap::cell_space(), &OverflowObjectSize);
973 ScanOverflowedObjects(&cell_it);
974 if (marking_stack.is_full()) return;
975
976 LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize);
977 ScanOverflowedObjects(&lo_it);
978 if (marking_stack.is_full()) return;
979
980 marking_stack.clear_overflowed();
981}
982
983
984// Mark all objects reachable (transitively) from objects on the marking
985// stack. Before: the marking stack contains zero or more heap object
986// pointers. After: the marking stack is empty and there are no overflowed
987// objects in the heap.
Iain Merrick75681382010-08-19 15:07:18 +0100988void MarkCompactCollector::ProcessMarkingStack() {
989 EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +0000990 while (marking_stack.overflowed()) {
991 RefillMarkingStack();
Iain Merrick75681382010-08-19 15:07:18 +0100992 EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +0000993 }
994}
995
996
Iain Merrick75681382010-08-19 15:07:18 +0100997void MarkCompactCollector::ProcessObjectGroups() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000998 bool work_to_do = true;
999 ASSERT(marking_stack.is_empty());
1000 while (work_to_do) {
1001 MarkObjectGroups();
1002 work_to_do = !marking_stack.is_empty();
Iain Merrick75681382010-08-19 15:07:18 +01001003 ProcessMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00001004 }
1005}
1006
1007
1008void MarkCompactCollector::MarkLiveObjects() {
Leon Clarkef7060e22010-06-03 12:02:55 +01001009 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
Steve Blocka7e24c12009-10-30 11:49:00 +00001010#ifdef DEBUG
1011 ASSERT(state_ == PREPARE_GC);
1012 state_ = MARK_LIVE_OBJECTS;
1013#endif
1014 // The to space contains live objects, the from space is used as a marking
1015 // stack.
1016 marking_stack.Initialize(Heap::new_space()->FromSpaceLow(),
1017 Heap::new_space()->FromSpaceHigh());
1018
1019 ASSERT(!marking_stack.overflowed());
1020
Iain Merrick75681382010-08-19 15:07:18 +01001021 PrepareForCodeFlushing();
1022
Steve Blocka7e24c12009-10-30 11:49:00 +00001023 RootMarkingVisitor root_visitor;
1024 MarkRoots(&root_visitor);
1025
1026 // The objects reachable from the roots are marked, yet unreachable
1027 // objects are unmarked. Mark objects reachable from object groups
1028 // containing at least one marked object, and continue until no new
1029 // objects are reachable from the object groups.
Iain Merrick75681382010-08-19 15:07:18 +01001030 ProcessObjectGroups();
Steve Blocka7e24c12009-10-30 11:49:00 +00001031
1032 // The objects reachable from the roots or object groups are marked,
1033 // yet unreachable objects are unmarked. Mark objects reachable
1034 // only from weak global handles.
1035 //
1036 // First we identify nonlive weak handles and mark them as pending
1037 // destruction.
1038 GlobalHandles::IdentifyWeakHandles(&IsUnmarkedHeapObject);
1039 // Then we mark the objects and process the transitive closure.
1040 GlobalHandles::IterateWeakRoots(&root_visitor);
1041 while (marking_stack.overflowed()) {
1042 RefillMarkingStack();
Iain Merrick75681382010-08-19 15:07:18 +01001043 EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00001044 }
1045
1046 // Repeat the object groups to mark unmarked groups reachable from the
1047 // weak roots.
Iain Merrick75681382010-08-19 15:07:18 +01001048 ProcessObjectGroups();
Steve Blocka7e24c12009-10-30 11:49:00 +00001049
1050 // Prune the symbol table removing all symbols only pointed to by the
1051 // symbol table. Cannot use symbol_table() here because the symbol
1052 // table is marked.
1053 SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
1054 SymbolTableCleaner v;
1055 symbol_table->IterateElements(&v);
1056 symbol_table->ElementsRemoved(v.PointersRemoved());
Leon Clarkee46be812010-01-19 14:06:41 +00001057 ExternalStringTable::Iterate(&v);
1058 ExternalStringTable::CleanUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00001059
1060 // Remove object groups after marking phase.
1061 GlobalHandles::RemoveObjectGroups();
1062}
1063
1064
1065static int CountMarkedCallback(HeapObject* obj) {
1066 MapWord map_word = obj->map_word();
1067 map_word.ClearMark();
1068 return obj->SizeFromMap(map_word.ToMap());
1069}
1070
1071
1072#ifdef DEBUG
1073void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
1074 live_bytes_ += obj->Size();
1075 if (Heap::new_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001076 live_young_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001077 } else if (Heap::map_space()->Contains(obj)) {
1078 ASSERT(obj->IsMap());
Steve Block6ded16b2010-05-10 14:33:55 +01001079 live_map_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001080 } else if (Heap::cell_space()->Contains(obj)) {
1081 ASSERT(obj->IsJSGlobalPropertyCell());
Steve Block6ded16b2010-05-10 14:33:55 +01001082 live_cell_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001083 } else if (Heap::old_pointer_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001084 live_old_pointer_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001085 } else if (Heap::old_data_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001086 live_old_data_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001087 } else if (Heap::code_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001088 live_code_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001089 } else if (Heap::lo_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001090 live_lo_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001091 } else {
1092 UNREACHABLE();
1093 }
1094}
1095#endif // DEBUG
1096
1097
1098void MarkCompactCollector::SweepLargeObjectSpace() {
1099#ifdef DEBUG
1100 ASSERT(state_ == MARK_LIVE_OBJECTS);
1101 state_ =
1102 compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
1103#endif
1104 // Deallocate unmarked objects and clear marked bits for marked objects.
1105 Heap::lo_space()->FreeUnmarkedObjects();
1106}
1107
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001108
Steve Blocka7e24c12009-10-30 11:49:00 +00001109// Safe to use during marking phase only.
1110bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
1111 MapWord metamap = object->map_word();
1112 metamap.ClearMark();
1113 return metamap.ToMap()->instance_type() == MAP_TYPE;
1114}
1115
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001116
Steve Blocka7e24c12009-10-30 11:49:00 +00001117void MarkCompactCollector::ClearNonLiveTransitions() {
1118 HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback);
1119 // Iterate over the map space, setting map transitions that go from
1120 // a marked map to an unmarked map to null transitions. At the same time,
1121 // set all the prototype fields of maps back to their original value,
1122 // dropping the back pointers temporarily stored in the prototype field.
1123 // Setting the prototype field requires following the linked list of
1124 // back pointers, reversing them all at once. This allows us to find
1125 // those maps with map transitions that need to be nulled, and only
1126 // scan the descriptor arrays of those maps, not all maps.
Leon Clarkee46be812010-01-19 14:06:41 +00001127 // All of these actions are carried out only on maps of JSObjects
Steve Blocka7e24c12009-10-30 11:49:00 +00001128 // and related subtypes.
Leon Clarked91b9f72010-01-27 17:25:45 +00001129 for (HeapObject* obj = map_iterator.next();
1130 obj != NULL; obj = map_iterator.next()) {
1131 Map* map = reinterpret_cast<Map*>(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00001132 if (!map->IsMarked() && map->IsByteArray()) continue;
1133
1134 ASSERT(SafeIsMap(map));
1135 // Only JSObject and subtypes have map transitions and back pointers.
1136 if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
1137 if (map->instance_type() > JS_FUNCTION_TYPE) continue;
1138 // Follow the chain of back pointers to find the prototype.
1139 Map* current = map;
1140 while (SafeIsMap(current)) {
1141 current = reinterpret_cast<Map*>(current->prototype());
1142 ASSERT(current->IsHeapObject());
1143 }
1144 Object* real_prototype = current;
1145
1146 // Follow back pointers, setting them to prototype,
1147 // clearing map transitions when necessary.
1148 current = map;
1149 bool on_dead_path = !current->IsMarked();
1150 Object* next;
1151 while (SafeIsMap(current)) {
1152 next = current->prototype();
1153 // There should never be a dead map above a live map.
1154 ASSERT(on_dead_path || current->IsMarked());
1155
1156 // A live map above a dead map indicates a dead transition.
1157 // This test will always be false on the first iteration.
1158 if (on_dead_path && current->IsMarked()) {
1159 on_dead_path = false;
1160 current->ClearNonLiveTransitions(real_prototype);
1161 }
1162 *HeapObject::RawField(current, Map::kPrototypeOffset) =
1163 real_prototype;
1164 current = reinterpret_cast<Map*>(next);
1165 }
1166 }
1167}
1168
1169// -------------------------------------------------------------------------
1170// Phase 2: Encode forwarding addresses.
1171// When compacting, forwarding addresses for objects in old space and map
1172// space are encoded in their map pointer word (along with an encoding of
1173// their map pointers).
1174//
Leon Clarkee46be812010-01-19 14:06:41 +00001175// The excact encoding is described in the comments for class MapWord in
1176// objects.h.
Steve Blocka7e24c12009-10-30 11:49:00 +00001177//
1178// An address range [start, end) can have both live and non-live objects.
1179// Maximal non-live regions are marked so they can be skipped on subsequent
1180// sweeps of the heap. A distinguished map-pointer encoding is used to mark
1181// free regions of one-word size (in which case the next word is the start
1182// of a live object). A second distinguished map-pointer encoding is used
1183// to mark free regions larger than one word, and the size of the free
1184// region (including the first word) is written to the second word of the
1185// region.
1186//
1187// Any valid map page offset must lie in the object area of the page, so map
1188// page offsets less than Page::kObjectStartOffset are invalid. We use a
1189// pair of distinguished invalid map encodings (for single word and multiple
1190// words) to indicate free regions in the page found during computation of
1191// forwarding addresses and skipped over in subsequent sweeps.
Steve Blocka7e24c12009-10-30 11:49:00 +00001192
1193
1194// Encode a free region, defined by the given start address and size, in the
1195// first word or two of the region.
1196void EncodeFreeRegion(Address free_start, int free_size) {
1197 ASSERT(free_size >= kIntSize);
1198 if (free_size == kIntSize) {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001199 Memory::uint32_at(free_start) = MarkCompactCollector::kSingleFreeEncoding;
Steve Blocka7e24c12009-10-30 11:49:00 +00001200 } else {
1201 ASSERT(free_size >= 2 * kIntSize);
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001202 Memory::uint32_at(free_start) = MarkCompactCollector::kMultiFreeEncoding;
Steve Blocka7e24c12009-10-30 11:49:00 +00001203 Memory::int_at(free_start + kIntSize) = free_size;
1204 }
1205
1206#ifdef DEBUG
1207 // Zap the body of the free region.
1208 if (FLAG_enable_slow_asserts) {
1209 for (int offset = 2 * kIntSize;
1210 offset < free_size;
1211 offset += kPointerSize) {
1212 Memory::Address_at(free_start + offset) = kZapValue;
1213 }
1214 }
1215#endif
1216}
1217
1218
1219// Try to promote all objects in new space. Heap numbers and sequential
1220// strings are promoted to the code space, large objects to large object space,
1221// and all others to the old space.
1222inline Object* MCAllocateFromNewSpace(HeapObject* object, int object_size) {
1223 Object* forwarded;
1224 if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
1225 forwarded = Failure::Exception();
1226 } else {
1227 OldSpace* target_space = Heap::TargetSpace(object);
1228 ASSERT(target_space == Heap::old_pointer_space() ||
1229 target_space == Heap::old_data_space());
1230 forwarded = target_space->MCAllocateRaw(object_size);
1231 }
1232 if (forwarded->IsFailure()) {
1233 forwarded = Heap::new_space()->MCAllocateRaw(object_size);
1234 }
1235 return forwarded;
1236}
1237
1238
1239// Allocation functions for the paged spaces call the space's MCAllocateRaw.
1240inline Object* MCAllocateFromOldPointerSpace(HeapObject* ignore,
1241 int object_size) {
1242 return Heap::old_pointer_space()->MCAllocateRaw(object_size);
1243}
1244
1245
1246inline Object* MCAllocateFromOldDataSpace(HeapObject* ignore, int object_size) {
1247 return Heap::old_data_space()->MCAllocateRaw(object_size);
1248}
1249
1250
1251inline Object* MCAllocateFromCodeSpace(HeapObject* ignore, int object_size) {
1252 return Heap::code_space()->MCAllocateRaw(object_size);
1253}
1254
1255
1256inline Object* MCAllocateFromMapSpace(HeapObject* ignore, int object_size) {
1257 return Heap::map_space()->MCAllocateRaw(object_size);
1258}
1259
1260
1261inline Object* MCAllocateFromCellSpace(HeapObject* ignore, int object_size) {
1262 return Heap::cell_space()->MCAllocateRaw(object_size);
1263}
1264
1265
1266// The forwarding address is encoded at the same offset as the current
1267// to-space object, but in from space.
1268inline void EncodeForwardingAddressInNewSpace(HeapObject* old_object,
1269 int object_size,
1270 Object* new_object,
1271 int* ignored) {
1272 int offset =
1273 Heap::new_space()->ToSpaceOffsetForAddress(old_object->address());
1274 Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset) =
1275 HeapObject::cast(new_object)->address();
1276}
1277
1278
1279// The forwarding address is encoded in the map pointer of the object as an
1280// offset (in terms of live bytes) from the address of the first live object
1281// in the page.
1282inline void EncodeForwardingAddressInPagedSpace(HeapObject* old_object,
1283 int object_size,
1284 Object* new_object,
1285 int* offset) {
1286 // Record the forwarding address of the first live object if necessary.
1287 if (*offset == 0) {
1288 Page::FromAddress(old_object->address())->mc_first_forwarded =
1289 HeapObject::cast(new_object)->address();
1290 }
1291
1292 MapWord encoding =
1293 MapWord::EncodeAddress(old_object->map()->address(), *offset);
1294 old_object->set_map_word(encoding);
1295 *offset += object_size;
1296 ASSERT(*offset <= Page::kObjectAreaSize);
1297}
1298
1299
1300// Most non-live objects are ignored.
1301inline void IgnoreNonLiveObject(HeapObject* object) {}
1302
1303
Steve Blocka7e24c12009-10-30 11:49:00 +00001304// Function template that, given a range of addresses (eg, a semispace or a
1305// paged space page), iterates through the objects in the range to clear
1306// mark bits and compute and encode forwarding addresses. As a side effect,
1307// maximal free chunks are marked so that they can be skipped on subsequent
1308// sweeps.
1309//
1310// The template parameters are an allocation function, a forwarding address
1311// encoding function, and a function to process non-live objects.
1312template<MarkCompactCollector::AllocationFunction Alloc,
1313 MarkCompactCollector::EncodingFunction Encode,
1314 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
1315inline void EncodeForwardingAddressesInRange(Address start,
1316 Address end,
1317 int* offset) {
1318 // The start address of the current free region while sweeping the space.
1319 // This address is set when a transition from live to non-live objects is
1320 // encountered. A value (an encoding of the 'next free region' pointer)
1321 // is written to memory at this address when a transition from non-live to
1322 // live objects is encountered.
1323 Address free_start = NULL;
1324
1325 // A flag giving the state of the previously swept object. Initially true
1326 // to ensure that free_start is initialized to a proper address before
1327 // trying to write to it.
1328 bool is_prev_alive = true;
1329
1330 int object_size; // Will be set on each iteration of the loop.
1331 for (Address current = start; current < end; current += object_size) {
1332 HeapObject* object = HeapObject::FromAddress(current);
1333 if (object->IsMarked()) {
1334 object->ClearMark();
1335 MarkCompactCollector::tracer()->decrement_marked_count();
1336 object_size = object->Size();
1337
1338 Object* forwarded = Alloc(object, object_size);
1339 // Allocation cannot fail, because we are compacting the space.
1340 ASSERT(!forwarded->IsFailure());
1341 Encode(object, object_size, forwarded, offset);
1342
1343#ifdef DEBUG
1344 if (FLAG_gc_verbose) {
1345 PrintF("forward %p -> %p.\n", object->address(),
1346 HeapObject::cast(forwarded)->address());
1347 }
1348#endif
1349 if (!is_prev_alive) { // Transition from non-live to live.
Steve Blockd0582a62009-12-15 09:54:21 +00001350 EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
Steve Blocka7e24c12009-10-30 11:49:00 +00001351 is_prev_alive = true;
1352 }
1353 } else { // Non-live object.
1354 object_size = object->Size();
1355 ProcessNonLive(object);
1356 if (is_prev_alive) { // Transition from live to non-live.
1357 free_start = current;
1358 is_prev_alive = false;
1359 }
1360 }
1361 }
1362
1363 // If we ended on a free region, mark it.
Steve Blockd0582a62009-12-15 09:54:21 +00001364 if (!is_prev_alive) {
1365 EncodeFreeRegion(free_start, static_cast<int>(end - free_start));
1366 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001367}
1368
1369
1370// Functions to encode the forwarding pointers in each compactable space.
1371void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
1372 int ignored;
1373 EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
1374 EncodeForwardingAddressInNewSpace,
1375 IgnoreNonLiveObject>(
1376 Heap::new_space()->bottom(),
1377 Heap::new_space()->top(),
1378 &ignored);
1379}
1380
1381
1382template<MarkCompactCollector::AllocationFunction Alloc,
1383 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
1384void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
1385 PagedSpace* space) {
1386 PageIterator it(space, PageIterator::PAGES_IN_USE);
1387 while (it.has_next()) {
1388 Page* p = it.next();
Steve Block6ded16b2010-05-10 14:33:55 +01001389
Steve Blocka7e24c12009-10-30 11:49:00 +00001390 // The offset of each live object in the page from the first live object
1391 // in the page.
1392 int offset = 0;
1393 EncodeForwardingAddressesInRange<Alloc,
1394 EncodeForwardingAddressInPagedSpace,
1395 ProcessNonLive>(
1396 p->ObjectAreaStart(),
1397 p->AllocationTop(),
1398 &offset);
1399 }
1400}
1401
1402
Steve Block6ded16b2010-05-10 14:33:55 +01001403// We scavange new space simultaneously with sweeping. This is done in two
1404// passes.
1405// The first pass migrates all alive objects from one semispace to another or
1406// promotes them to old space. Forwading address is written directly into
1407// first word of object without any encoding. If object is dead we are writing
1408// NULL as a forwarding address.
1409// The second pass updates pointers to new space in all spaces. It is possible
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001410// to encounter pointers to dead objects during traversal of dirty regions we
1411// should clear them to avoid encountering them during next dirty regions
1412// iteration.
1413static void MigrateObject(Address dst,
1414 Address src,
1415 int size,
1416 bool to_old_space) {
1417 if (to_old_space) {
1418 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
1419 } else {
1420 Heap::CopyBlock(dst, src, size);
1421 }
Steve Block6ded16b2010-05-10 14:33:55 +01001422
1423 Memory::Address_at(src) = dst;
1424}
1425
1426
Iain Merrick75681382010-08-19 15:07:18 +01001427class StaticPointersToNewGenUpdatingVisitor : public
1428 StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> {
1429 public:
1430 static inline void VisitPointer(Object** p) {
1431 if (!(*p)->IsHeapObject()) return;
1432
1433 HeapObject* obj = HeapObject::cast(*p);
1434 Address old_addr = obj->address();
1435
1436 if (Heap::new_space()->Contains(obj)) {
1437 ASSERT(Heap::InFromSpace(*p));
1438 *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
1439 }
1440 }
1441};
1442
1443
Steve Block6ded16b2010-05-10 14:33:55 +01001444// Visitor for updating pointers from live objects in old spaces to new space.
1445// It does not expect to encounter pointers to dead objects.
1446class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
1447 public:
1448 void VisitPointer(Object** p) {
Iain Merrick75681382010-08-19 15:07:18 +01001449 StaticPointersToNewGenUpdatingVisitor::VisitPointer(p);
Steve Block6ded16b2010-05-10 14:33:55 +01001450 }
1451
1452 void VisitPointers(Object** start, Object** end) {
Iain Merrick75681382010-08-19 15:07:18 +01001453 for (Object** p = start; p < end; p++) {
1454 StaticPointersToNewGenUpdatingVisitor::VisitPointer(p);
1455 }
Steve Block6ded16b2010-05-10 14:33:55 +01001456 }
1457
1458 void VisitCodeTarget(RelocInfo* rinfo) {
1459 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
1460 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1461 VisitPointer(&target);
1462 rinfo->set_target_address(Code::cast(target)->instruction_start());
1463 }
1464
1465 void VisitDebugTarget(RelocInfo* rinfo) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001466 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
1467 rinfo->IsPatchedReturnSequence()) ||
1468 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
1469 rinfo->IsPatchedDebugBreakSlotSequence()));
Steve Block6ded16b2010-05-10 14:33:55 +01001470 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
1471 VisitPointer(&target);
1472 rinfo->set_call_address(Code::cast(target)->instruction_start());
1473 }
Steve Block6ded16b2010-05-10 14:33:55 +01001474};
1475
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001476
Steve Block6ded16b2010-05-10 14:33:55 +01001477// Visitor for updating pointers from live objects in old spaces to new space.
1478// It can encounter pointers to dead objects in new space when traversing map
1479// space (see comment for MigrateObject).
1480static void UpdatePointerToNewGen(HeapObject** p) {
1481 if (!(*p)->IsHeapObject()) return;
1482
1483 Address old_addr = (*p)->address();
1484 ASSERT(Heap::InFromSpace(*p));
1485
1486 Address new_addr = Memory::Address_at(old_addr);
1487
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001488 if (new_addr == NULL) {
1489 // We encountered pointer to a dead object. Clear it so we will
1490 // not visit it again during next iteration of dirty regions.
1491 *p = NULL;
1492 } else {
1493 *p = HeapObject::FromAddress(new_addr);
1494 }
Steve Block6ded16b2010-05-10 14:33:55 +01001495}
1496
1497
1498static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Object **p) {
1499 Address old_addr = HeapObject::cast(*p)->address();
1500 Address new_addr = Memory::Address_at(old_addr);
1501 return String::cast(HeapObject::FromAddress(new_addr));
1502}
1503
1504
1505static bool TryPromoteObject(HeapObject* object, int object_size) {
1506 Object* result;
1507
1508 if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
1509 result = Heap::lo_space()->AllocateRawFixedArray(object_size);
1510 if (!result->IsFailure()) {
1511 HeapObject* target = HeapObject::cast(result);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001512 MigrateObject(target->address(), object->address(), object_size, true);
Leon Clarkef7060e22010-06-03 12:02:55 +01001513 MarkCompactCollector::tracer()->
1514 increment_promoted_objects_size(object_size);
Steve Block6ded16b2010-05-10 14:33:55 +01001515 return true;
1516 }
1517 } else {
1518 OldSpace* target_space = Heap::TargetSpace(object);
1519
1520 ASSERT(target_space == Heap::old_pointer_space() ||
1521 target_space == Heap::old_data_space());
1522 result = target_space->AllocateRaw(object_size);
1523 if (!result->IsFailure()) {
1524 HeapObject* target = HeapObject::cast(result);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001525 MigrateObject(target->address(),
1526 object->address(),
1527 object_size,
1528 target_space == Heap::old_pointer_space());
Leon Clarkef7060e22010-06-03 12:02:55 +01001529 MarkCompactCollector::tracer()->
1530 increment_promoted_objects_size(object_size);
Steve Block6ded16b2010-05-10 14:33:55 +01001531 return true;
1532 }
1533 }
1534
1535 return false;
1536}
1537
1538
1539static void SweepNewSpace(NewSpace* space) {
1540 Heap::CheckNewSpaceExpansionCriteria();
1541
1542 Address from_bottom = space->bottom();
1543 Address from_top = space->top();
1544
1545 // Flip the semispaces. After flipping, to space is empty, from space has
1546 // live objects.
1547 space->Flip();
1548 space->ResetAllocationInfo();
1549
1550 int size = 0;
1551 int survivors_size = 0;
1552
1553 // First pass: traverse all objects in inactive semispace, remove marks,
1554 // migrate live objects and write forwarding addresses.
1555 for (Address current = from_bottom; current < from_top; current += size) {
1556 HeapObject* object = HeapObject::FromAddress(current);
1557
1558 if (object->IsMarked()) {
1559 object->ClearMark();
1560 MarkCompactCollector::tracer()->decrement_marked_count();
1561
1562 size = object->Size();
1563 survivors_size += size;
1564
1565 // Aggressively promote young survivors to the old space.
1566 if (TryPromoteObject(object, size)) {
1567 continue;
1568 }
1569
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001570 // Promotion failed. Just migrate object to another semispace.
Steve Block6ded16b2010-05-10 14:33:55 +01001571 Object* target = space->AllocateRaw(size);
1572
1573 // Allocation cannot fail at this point: semispaces are of equal size.
1574 ASSERT(!target->IsFailure());
1575
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001576 MigrateObject(HeapObject::cast(target)->address(),
1577 current,
1578 size,
1579 false);
Steve Block6ded16b2010-05-10 14:33:55 +01001580 } else {
1581 size = object->Size();
1582 Memory::Address_at(current) = NULL;
1583 }
1584 }
1585
1586 // Second pass: find pointers to new space and update them.
1587 PointersToNewGenUpdatingVisitor updating_visitor;
1588
1589 // Update pointers in to space.
Iain Merrick75681382010-08-19 15:07:18 +01001590 Address current = space->bottom();
1591 while (current < space->top()) {
1592 HeapObject* object = HeapObject::FromAddress(current);
1593 current +=
1594 StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(),
1595 object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001596 }
Steve Block6ded16b2010-05-10 14:33:55 +01001597
1598 // Update roots.
1599 Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
1600
1601 // Update pointers in old spaces.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001602 Heap::IterateDirtyRegions(Heap::old_pointer_space(),
1603 &Heap::IteratePointersInDirtyRegion,
1604 &UpdatePointerToNewGen,
1605 Heap::WATERMARK_SHOULD_BE_VALID);
1606
1607 Heap::lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
Steve Block6ded16b2010-05-10 14:33:55 +01001608
1609 // Update pointers from cells.
1610 HeapObjectIterator cell_iterator(Heap::cell_space());
1611 for (HeapObject* cell = cell_iterator.next();
1612 cell != NULL;
1613 cell = cell_iterator.next()) {
1614 if (cell->IsJSGlobalPropertyCell()) {
1615 Address value_address =
1616 reinterpret_cast<Address>(cell) +
1617 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1618 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1619 }
1620 }
1621
1622 // Update pointers from external string table.
1623 Heap::UpdateNewSpaceReferencesInExternalStringTable(
1624 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1625
1626 // All pointers were updated. Update auxiliary allocation info.
1627 Heap::IncrementYoungSurvivorsCounter(survivors_size);
1628 space->set_age_mark(space->top());
Steve Blocka7e24c12009-10-30 11:49:00 +00001629}
1630
1631
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001632static void SweepSpace(PagedSpace* space) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001633 PageIterator it(space, PageIterator::PAGES_IN_USE);
Steve Block6ded16b2010-05-10 14:33:55 +01001634
1635 // During sweeping of paged space we are trying to find longest sequences
1636 // of pages without live objects and free them (instead of putting them on
1637 // the free list).
1638
1639 // Page preceding current.
1640 Page* prev = Page::FromAddress(NULL);
1641
1642 // First empty page in a sequence.
1643 Page* first_empty_page = Page::FromAddress(NULL);
1644
1645 // Page preceding first empty page.
1646 Page* prec_first_empty_page = Page::FromAddress(NULL);
1647
1648 // If last used page of space ends with a sequence of dead objects
1649 // we can adjust allocation top instead of puting this free area into
1650 // the free list. Thus during sweeping we keep track of such areas
1651 // and defer their deallocation until the sweeping of the next page
1652 // is done: if one of the next pages contains live objects we have
1653 // to put such area into the free list.
1654 Address last_free_start = NULL;
1655 int last_free_size = 0;
1656
Steve Blocka7e24c12009-10-30 11:49:00 +00001657 while (it.has_next()) {
1658 Page* p = it.next();
1659
1660 bool is_previous_alive = true;
1661 Address free_start = NULL;
1662 HeapObject* object;
1663
1664 for (Address current = p->ObjectAreaStart();
1665 current < p->AllocationTop();
1666 current += object->Size()) {
1667 object = HeapObject::FromAddress(current);
1668 if (object->IsMarked()) {
1669 object->ClearMark();
1670 MarkCompactCollector::tracer()->decrement_marked_count();
Steve Block6ded16b2010-05-10 14:33:55 +01001671
Steve Blocka7e24c12009-10-30 11:49:00 +00001672 if (!is_previous_alive) { // Transition from free to live.
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001673 space->DeallocateBlock(free_start,
1674 static_cast<int>(current - free_start),
1675 true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001676 is_previous_alive = true;
1677 }
1678 } else {
Leon Clarked91b9f72010-01-27 17:25:45 +00001679 MarkCompactCollector::ReportDeleteIfNeeded(object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001680 if (is_previous_alive) { // Transition from live to free.
1681 free_start = current;
1682 is_previous_alive = false;
1683 }
1684 }
1685 // The object is now unmarked for the call to Size() at the top of the
1686 // loop.
1687 }
1688
Steve Block6ded16b2010-05-10 14:33:55 +01001689 bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
1690 || (!is_previous_alive && free_start == p->ObjectAreaStart());
1691
1692 if (page_is_empty) {
1693 // This page is empty. Check whether we are in the middle of
1694 // sequence of empty pages and start one if not.
1695 if (!first_empty_page->is_valid()) {
1696 first_empty_page = p;
1697 prec_first_empty_page = prev;
1698 }
1699
1700 if (!is_previous_alive) {
1701 // There are dead objects on this page. Update space accounting stats
1702 // without putting anything into free list.
1703 int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
1704 if (size_in_bytes > 0) {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001705 space->DeallocateBlock(free_start, size_in_bytes, false);
Steve Block6ded16b2010-05-10 14:33:55 +01001706 }
1707 }
1708 } else {
1709 // This page is not empty. Sequence of empty pages ended on the previous
1710 // one.
1711 if (first_empty_page->is_valid()) {
1712 space->FreePages(prec_first_empty_page, prev);
1713 prec_first_empty_page = first_empty_page = Page::FromAddress(NULL);
1714 }
1715
1716 // If there is a free ending area on one of the previous pages we have
1717 // deallocate that area and put it on the free list.
1718 if (last_free_size > 0) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001719 Page::FromAddress(last_free_start)->
1720 SetAllocationWatermark(last_free_start);
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001721 space->DeallocateBlock(last_free_start, last_free_size, true);
Steve Block6ded16b2010-05-10 14:33:55 +01001722 last_free_start = NULL;
1723 last_free_size = 0;
1724 }
1725
1726 // If the last region of this page was not live we remember it.
1727 if (!is_previous_alive) {
1728 ASSERT(last_free_size == 0);
1729 last_free_size = static_cast<int>(p->AllocationTop() - free_start);
1730 last_free_start = free_start;
Steve Blocka7e24c12009-10-30 11:49:00 +00001731 }
1732 }
Steve Block6ded16b2010-05-10 14:33:55 +01001733
1734 prev = p;
1735 }
1736
1737 // We reached end of space. See if we need to adjust allocation top.
1738 Address new_allocation_top = NULL;
1739
1740 if (first_empty_page->is_valid()) {
1741 // Last used pages in space are empty. We can move allocation top backwards
1742 // to the beginning of first empty page.
1743 ASSERT(prev == space->AllocationTopPage());
1744
1745 new_allocation_top = first_empty_page->ObjectAreaStart();
1746 }
1747
1748 if (last_free_size > 0) {
1749 // There was a free ending area on the previous page.
1750 // Deallocate it without putting it into freelist and move allocation
1751 // top to the beginning of this free area.
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001752 space->DeallocateBlock(last_free_start, last_free_size, false);
Steve Block6ded16b2010-05-10 14:33:55 +01001753 new_allocation_top = last_free_start;
1754 }
1755
1756 if (new_allocation_top != NULL) {
1757#ifdef DEBUG
1758 Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
1759 if (!first_empty_page->is_valid()) {
1760 ASSERT(new_allocation_top_page == space->AllocationTopPage());
1761 } else if (last_free_size > 0) {
1762 ASSERT(new_allocation_top_page == prec_first_empty_page);
1763 } else {
1764 ASSERT(new_allocation_top_page == first_empty_page);
1765 }
1766#endif
1767
1768 space->SetTop(new_allocation_top);
Steve Blocka7e24c12009-10-30 11:49:00 +00001769 }
1770}
1771
1772
Steve Blocka7e24c12009-10-30 11:49:00 +00001773void MarkCompactCollector::EncodeForwardingAddresses() {
1774 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
1775 // Objects in the active semispace of the young generation may be
1776 // relocated to the inactive semispace (if not promoted). Set the
1777 // relocation info to the beginning of the inactive semispace.
1778 Heap::new_space()->MCResetRelocationInfo();
1779
1780 // Compute the forwarding pointers in each space.
1781 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
Leon Clarked91b9f72010-01-27 17:25:45 +00001782 ReportDeleteIfNeeded>(
Steve Blocka7e24c12009-10-30 11:49:00 +00001783 Heap::old_pointer_space());
1784
1785 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
1786 IgnoreNonLiveObject>(
1787 Heap::old_data_space());
1788
1789 EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
Leon Clarked91b9f72010-01-27 17:25:45 +00001790 ReportDeleteIfNeeded>(
Steve Blocka7e24c12009-10-30 11:49:00 +00001791 Heap::code_space());
1792
1793 EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
1794 IgnoreNonLiveObject>(
1795 Heap::cell_space());
1796
1797
1798 // Compute new space next to last after the old and code spaces have been
1799 // compacted. Objects in new space can be promoted to old or code space.
1800 EncodeForwardingAddressesInNewSpace();
1801
1802 // Compute map space last because computing forwarding addresses
1803 // overwrites non-live objects. Objects in the other spaces rely on
1804 // non-live map pointers to get the sizes of non-live objects.
1805 EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
1806 IgnoreNonLiveObject>(
1807 Heap::map_space());
1808
1809 // Write relocation info to the top page, so we can use it later. This is
1810 // done after promoting objects from the new space so we get the correct
1811 // allocation top.
1812 Heap::old_pointer_space()->MCWriteRelocationInfoToPage();
1813 Heap::old_data_space()->MCWriteRelocationInfoToPage();
1814 Heap::code_space()->MCWriteRelocationInfoToPage();
1815 Heap::map_space()->MCWriteRelocationInfoToPage();
1816 Heap::cell_space()->MCWriteRelocationInfoToPage();
1817}
1818
1819
Leon Clarkee46be812010-01-19 14:06:41 +00001820class MapIterator : public HeapObjectIterator {
1821 public:
1822 MapIterator() : HeapObjectIterator(Heap::map_space(), &SizeCallback) { }
1823
1824 explicit MapIterator(Address start)
1825 : HeapObjectIterator(Heap::map_space(), start, &SizeCallback) { }
1826
1827 private:
1828 static int SizeCallback(HeapObject* unused) {
1829 USE(unused);
1830 return Map::kSize;
1831 }
1832};
1833
1834
1835class MapCompact {
1836 public:
1837 explicit MapCompact(int live_maps)
1838 : live_maps_(live_maps),
1839 to_evacuate_start_(Heap::map_space()->TopAfterCompaction(live_maps)),
1840 map_to_evacuate_it_(to_evacuate_start_),
1841 first_map_to_evacuate_(
1842 reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
1843 }
1844
1845 void CompactMaps() {
1846 // As we know the number of maps to evacuate beforehand,
1847 // we stop then there is no more vacant maps.
1848 for (Map* next_vacant_map = NextVacantMap();
1849 next_vacant_map;
1850 next_vacant_map = NextVacantMap()) {
1851 EvacuateMap(next_vacant_map, NextMapToEvacuate());
1852 }
1853
1854#ifdef DEBUG
1855 CheckNoMapsToEvacuate();
1856#endif
1857 }
1858
1859 void UpdateMapPointersInRoots() {
1860 Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG);
1861 GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
1862 }
1863
Leon Clarkee46be812010-01-19 14:06:41 +00001864 void UpdateMapPointersInPagedSpace(PagedSpace* space) {
1865 ASSERT(space != Heap::map_space());
1866
1867 PageIterator it(space, PageIterator::PAGES_IN_USE);
1868 while (it.has_next()) {
1869 Page* p = it.next();
1870 UpdateMapPointersInRange(p->ObjectAreaStart(), p->AllocationTop());
1871 }
1872 }
1873
1874 void UpdateMapPointersInNewSpace() {
1875 NewSpace* space = Heap::new_space();
1876 UpdateMapPointersInRange(space->bottom(), space->top());
1877 }
1878
1879 void UpdateMapPointersInLargeObjectSpace() {
1880 LargeObjectIterator it(Heap::lo_space());
Leon Clarked91b9f72010-01-27 17:25:45 +00001881 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1882 UpdateMapPointersInObject(obj);
Leon Clarkee46be812010-01-19 14:06:41 +00001883 }
1884
1885 void Finish() {
1886 Heap::map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
1887 }
1888
1889 private:
1890 int live_maps_;
1891 Address to_evacuate_start_;
1892 MapIterator vacant_map_it_;
1893 MapIterator map_to_evacuate_it_;
1894 Map* first_map_to_evacuate_;
1895
1896 // Helper class for updating map pointers in HeapObjects.
1897 class MapUpdatingVisitor: public ObjectVisitor {
1898 public:
1899 void VisitPointer(Object** p) {
1900 UpdateMapPointer(p);
1901 }
1902
1903 void VisitPointers(Object** start, Object** end) {
1904 for (Object** p = start; p < end; p++) UpdateMapPointer(p);
1905 }
1906
1907 private:
1908 void UpdateMapPointer(Object** p) {
1909 if (!(*p)->IsHeapObject()) return;
1910 HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
1911
1912 // Moved maps are tagged with overflowed map word. They are the only
1913 // objects those map word is overflowed as marking is already complete.
1914 MapWord map_word = old_map->map_word();
1915 if (!map_word.IsOverflowed()) return;
1916
1917 *p = GetForwardedMap(map_word);
1918 }
1919 };
1920
1921 static MapUpdatingVisitor map_updating_visitor_;
1922
1923 static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
1924 while (true) {
Leon Clarkee46be812010-01-19 14:06:41 +00001925 HeapObject* next = it->next();
Leon Clarked91b9f72010-01-27 17:25:45 +00001926 ASSERT(next != NULL);
Leon Clarkee46be812010-01-19 14:06:41 +00001927 if (next == last)
1928 return NULL;
1929 ASSERT(!next->IsOverflowed());
1930 ASSERT(!next->IsMarked());
1931 ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
1932 if (next->IsMap() == live)
1933 return reinterpret_cast<Map*>(next);
1934 }
1935 }
1936
1937 Map* NextVacantMap() {
1938 Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
1939 ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
1940 return map;
1941 }
1942
1943 Map* NextMapToEvacuate() {
1944 Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
1945 ASSERT(map != NULL);
1946 ASSERT(map->IsMap());
1947 return map;
1948 }
1949
1950 static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
1951 ASSERT(FreeListNode::IsFreeListNode(vacant_map));
1952 ASSERT(map_to_evacuate->IsMap());
1953
Steve Block6ded16b2010-05-10 14:33:55 +01001954 ASSERT(Map::kSize % 4 == 0);
1955
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001956 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(vacant_map->address(),
1957 map_to_evacuate->address(),
1958 Map::kSize);
Steve Block6ded16b2010-05-10 14:33:55 +01001959
Leon Clarkee46be812010-01-19 14:06:41 +00001960 ASSERT(vacant_map->IsMap()); // Due to memcpy above.
1961
1962 MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
1963 forwarding_map_word.SetOverflow();
1964 map_to_evacuate->set_map_word(forwarding_map_word);
1965
1966 ASSERT(map_to_evacuate->map_word().IsOverflowed());
1967 ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
1968 }
1969
1970 static Map* GetForwardedMap(MapWord map_word) {
1971 ASSERT(map_word.IsOverflowed());
1972 map_word.ClearOverflow();
1973 Map* new_map = map_word.ToMap();
1974 ASSERT_MAP_ALIGNED(new_map->address());
1975 return new_map;
1976 }
1977
1978 static int UpdateMapPointersInObject(HeapObject* obj) {
1979 ASSERT(!obj->IsMarked());
1980 Map* map = obj->map();
1981 ASSERT(Heap::map_space()->Contains(map));
1982 MapWord map_word = map->map_word();
1983 ASSERT(!map_word.IsMarked());
1984 if (map_word.IsOverflowed()) {
1985 Map* new_map = GetForwardedMap(map_word);
1986 ASSERT(Heap::map_space()->Contains(new_map));
1987 obj->set_map(new_map);
1988
1989#ifdef DEBUG
1990 if (FLAG_gc_verbose) {
1991 PrintF("update %p : %p -> %p\n", obj->address(),
1992 map, new_map);
1993 }
1994#endif
1995 }
1996
1997 int size = obj->SizeFromMap(map);
1998 obj->IterateBody(map->instance_type(), size, &map_updating_visitor_);
1999 return size;
2000 }
2001
2002 static void UpdateMapPointersInRange(Address start, Address end) {
2003 HeapObject* object;
2004 int size;
2005 for (Address current = start; current < end; current += size) {
2006 object = HeapObject::FromAddress(current);
2007 size = UpdateMapPointersInObject(object);
2008 ASSERT(size > 0);
2009 }
2010 }
2011
2012#ifdef DEBUG
2013 void CheckNoMapsToEvacuate() {
2014 if (!FLAG_enable_slow_asserts)
2015 return;
2016
Leon Clarked91b9f72010-01-27 17:25:45 +00002017 for (HeapObject* obj = map_to_evacuate_it_.next();
2018 obj != NULL; obj = map_to_evacuate_it_.next())
2019 ASSERT(FreeListNode::IsFreeListNode(obj));
Leon Clarkee46be812010-01-19 14:06:41 +00002020 }
2021#endif
2022};
2023
2024MapCompact::MapUpdatingVisitor MapCompact::map_updating_visitor_;
2025
2026
Steve Blocka7e24c12009-10-30 11:49:00 +00002027void MarkCompactCollector::SweepSpaces() {
Leon Clarkef7060e22010-06-03 12:02:55 +01002028 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
2029
Steve Blocka7e24c12009-10-30 11:49:00 +00002030 ASSERT(state_ == SWEEP_SPACES);
2031 ASSERT(!IsCompacting());
2032 // Noncompacting collections simply sweep the spaces to clear the mark
2033 // bits and free the nonlive blocks (for old and map spaces). We sweep
2034 // the map space last because freeing non-live maps overwrites them and
2035 // the other spaces rely on possibly non-live maps to get the sizes for
2036 // non-live objects.
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002037 SweepSpace(Heap::old_pointer_space());
2038 SweepSpace(Heap::old_data_space());
2039 SweepSpace(Heap::code_space());
2040 SweepSpace(Heap::cell_space());
Iain Merrick75681382010-08-19 15:07:18 +01002041 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
2042 SweepNewSpace(Heap::new_space());
2043 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002044 SweepSpace(Heap::map_space());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002045
2046 Heap::IterateDirtyRegions(Heap::map_space(),
2047 &Heap::IteratePointersInDirtyMapsRegion,
2048 &UpdatePointerToNewGen,
2049 Heap::WATERMARK_SHOULD_BE_VALID);
2050
Steve Block6ded16b2010-05-10 14:33:55 +01002051 int live_maps_size = Heap::map_space()->Size();
2052 int live_maps = live_maps_size / Map::kSize;
2053 ASSERT(live_map_objects_size_ == live_maps_size);
Leon Clarkee46be812010-01-19 14:06:41 +00002054
2055 if (Heap::map_space()->NeedsCompaction(live_maps)) {
2056 MapCompact map_compact(live_maps);
2057
2058 map_compact.CompactMaps();
2059 map_compact.UpdateMapPointersInRoots();
2060
Leon Clarkee46be812010-01-19 14:06:41 +00002061 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00002062 for (PagedSpace* space = spaces.next();
2063 space != NULL; space = spaces.next()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002064 if (space == Heap::map_space()) continue;
2065 map_compact.UpdateMapPointersInPagedSpace(space);
2066 }
2067 map_compact.UpdateMapPointersInNewSpace();
2068 map_compact.UpdateMapPointersInLargeObjectSpace();
2069
2070 map_compact.Finish();
2071 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002072}
2073
2074
2075// Iterate the live objects in a range of addresses (eg, a page or a
2076// semispace). The live regions of the range have been linked into a list.
2077// The first live region is [first_live_start, first_live_end), and the last
2078// address in the range is top. The callback function is used to get the
2079// size of each live object.
2080int MarkCompactCollector::IterateLiveObjectsInRange(
2081 Address start,
2082 Address end,
2083 HeapObjectCallback size_func) {
Steve Block6ded16b2010-05-10 14:33:55 +01002084 int live_objects_size = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00002085 Address current = start;
2086 while (current < end) {
2087 uint32_t encoded_map = Memory::uint32_at(current);
2088 if (encoded_map == kSingleFreeEncoding) {
2089 current += kPointerSize;
2090 } else if (encoded_map == kMultiFreeEncoding) {
2091 current += Memory::int_at(current + kIntSize);
2092 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01002093 int size = size_func(HeapObject::FromAddress(current));
2094 current += size;
2095 live_objects_size += size;
Steve Blocka7e24c12009-10-30 11:49:00 +00002096 }
2097 }
Steve Block6ded16b2010-05-10 14:33:55 +01002098 return live_objects_size;
Steve Blocka7e24c12009-10-30 11:49:00 +00002099}
2100
2101
2102int MarkCompactCollector::IterateLiveObjects(NewSpace* space,
2103 HeapObjectCallback size_f) {
2104 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2105 return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
2106}
2107
2108
2109int MarkCompactCollector::IterateLiveObjects(PagedSpace* space,
2110 HeapObjectCallback size_f) {
2111 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2112 int total = 0;
2113 PageIterator it(space, PageIterator::PAGES_IN_USE);
2114 while (it.has_next()) {
2115 Page* p = it.next();
2116 total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
2117 p->AllocationTop(),
2118 size_f);
2119 }
2120 return total;
2121}
2122
2123
2124// -------------------------------------------------------------------------
2125// Phase 3: Update pointers
2126
2127// Helper class for updating pointers in HeapObjects.
2128class UpdatingVisitor: public ObjectVisitor {
2129 public:
2130 void VisitPointer(Object** p) {
2131 UpdatePointer(p);
2132 }
2133
2134 void VisitPointers(Object** start, Object** end) {
2135 // Mark all HeapObject pointers in [start, end)
2136 for (Object** p = start; p < end; p++) UpdatePointer(p);
2137 }
2138
2139 void VisitCodeTarget(RelocInfo* rinfo) {
2140 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2141 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2142 VisitPointer(&target);
2143 rinfo->set_target_address(
2144 reinterpret_cast<Code*>(target)->instruction_start());
2145 }
2146
Steve Block3ce2e202009-11-05 08:53:23 +00002147 void VisitDebugTarget(RelocInfo* rinfo) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002148 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2149 rinfo->IsPatchedReturnSequence()) ||
2150 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2151 rinfo->IsPatchedDebugBreakSlotSequence()));
Steve Block3ce2e202009-11-05 08:53:23 +00002152 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2153 VisitPointer(&target);
2154 rinfo->set_call_address(
2155 reinterpret_cast<Code*>(target)->instruction_start());
2156 }
2157
Steve Blocka7e24c12009-10-30 11:49:00 +00002158 private:
2159 void UpdatePointer(Object** p) {
2160 if (!(*p)->IsHeapObject()) return;
2161
2162 HeapObject* obj = HeapObject::cast(*p);
2163 Address old_addr = obj->address();
2164 Address new_addr;
2165 ASSERT(!Heap::InFromSpace(obj));
2166
2167 if (Heap::new_space()->Contains(obj)) {
2168 Address forwarding_pointer_addr =
2169 Heap::new_space()->FromSpaceLow() +
2170 Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
2171 new_addr = Memory::Address_at(forwarding_pointer_addr);
2172
2173#ifdef DEBUG
2174 ASSERT(Heap::old_pointer_space()->Contains(new_addr) ||
2175 Heap::old_data_space()->Contains(new_addr) ||
2176 Heap::new_space()->FromSpaceContains(new_addr) ||
2177 Heap::lo_space()->Contains(HeapObject::FromAddress(new_addr)));
2178
2179 if (Heap::new_space()->FromSpaceContains(new_addr)) {
2180 ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
2181 Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
2182 }
2183#endif
2184
2185 } else if (Heap::lo_space()->Contains(obj)) {
2186 // Don't move objects in the large object space.
2187 return;
2188
2189 } else {
2190#ifdef DEBUG
2191 PagedSpaces spaces;
2192 PagedSpace* original_space = spaces.next();
2193 while (original_space != NULL) {
2194 if (original_space->Contains(obj)) break;
2195 original_space = spaces.next();
2196 }
2197 ASSERT(original_space != NULL);
2198#endif
2199 new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
2200 ASSERT(original_space->Contains(new_addr));
2201 ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
2202 original_space->MCSpaceOffsetForAddress(old_addr));
2203 }
2204
2205 *p = HeapObject::FromAddress(new_addr);
2206
2207#ifdef DEBUG
2208 if (FLAG_gc_verbose) {
2209 PrintF("update %p : %p -> %p\n",
2210 reinterpret_cast<Address>(p), old_addr, new_addr);
2211 }
2212#endif
2213 }
2214};
2215
2216
2217void MarkCompactCollector::UpdatePointers() {
2218#ifdef DEBUG
2219 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
2220 state_ = UPDATE_POINTERS;
2221#endif
2222 UpdatingVisitor updating_visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00002223 Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00002224 GlobalHandles::IterateWeakRoots(&updating_visitor);
2225
Steve Block6ded16b2010-05-10 14:33:55 +01002226 int live_maps_size = IterateLiveObjects(Heap::map_space(),
Steve Blocka7e24c12009-10-30 11:49:00 +00002227 &UpdatePointersInOldObject);
Steve Block6ded16b2010-05-10 14:33:55 +01002228 int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
2229 &UpdatePointersInOldObject);
2230 int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
2231 &UpdatePointersInOldObject);
2232 int live_codes_size = IterateLiveObjects(Heap::code_space(),
2233 &UpdatePointersInOldObject);
2234 int live_cells_size = IterateLiveObjects(Heap::cell_space(),
2235 &UpdatePointersInOldObject);
2236 int live_news_size = IterateLiveObjects(Heap::new_space(),
2237 &UpdatePointersInNewObject);
Steve Blocka7e24c12009-10-30 11:49:00 +00002238
2239 // Large objects do not move, the map word can be updated directly.
2240 LargeObjectIterator it(Heap::lo_space());
Leon Clarked91b9f72010-01-27 17:25:45 +00002241 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
2242 UpdatePointersInNewObject(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002243
Steve Block6ded16b2010-05-10 14:33:55 +01002244 USE(live_maps_size);
2245 USE(live_pointer_olds_size);
2246 USE(live_data_olds_size);
2247 USE(live_codes_size);
2248 USE(live_cells_size);
2249 USE(live_news_size);
2250 ASSERT(live_maps_size == live_map_objects_size_);
2251 ASSERT(live_data_olds_size == live_old_data_objects_size_);
2252 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
2253 ASSERT(live_codes_size == live_code_objects_size_);
2254 ASSERT(live_cells_size == live_cell_objects_size_);
2255 ASSERT(live_news_size == live_young_objects_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002256}
2257
2258
2259int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
2260 // Keep old map pointers
2261 Map* old_map = obj->map();
2262 ASSERT(old_map->IsHeapObject());
2263
2264 Address forwarded = GetForwardingAddressInOldSpace(old_map);
2265
2266 ASSERT(Heap::map_space()->Contains(old_map));
2267 ASSERT(Heap::map_space()->Contains(forwarded));
2268#ifdef DEBUG
2269 if (FLAG_gc_verbose) {
2270 PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
2271 forwarded);
2272 }
2273#endif
2274 // Update the map pointer.
2275 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
2276
2277 // We have to compute the object size relying on the old map because
2278 // map objects are not relocated yet.
2279 int obj_size = obj->SizeFromMap(old_map);
2280
2281 // Update pointers in the object body.
2282 UpdatingVisitor updating_visitor;
2283 obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
2284 return obj_size;
2285}
2286
2287
2288int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
2289 // Decode the map pointer.
2290 MapWord encoding = obj->map_word();
2291 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2292 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
2293
2294 // At this point, the first word of map_addr is also encoded, cannot
2295 // cast it to Map* using Map::cast.
2296 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
2297 int obj_size = obj->SizeFromMap(map);
2298 InstanceType type = map->instance_type();
2299
2300 // Update map pointer.
2301 Address new_map_addr = GetForwardingAddressInOldSpace(map);
2302 int offset = encoding.DecodeOffset();
2303 obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
2304
2305#ifdef DEBUG
2306 if (FLAG_gc_verbose) {
2307 PrintF("update %p : %p -> %p\n", obj->address(),
2308 map_addr, new_map_addr);
2309 }
2310#endif
2311
2312 // Update pointers in the object body.
2313 UpdatingVisitor updating_visitor;
2314 obj->IterateBody(type, obj_size, &updating_visitor);
2315 return obj_size;
2316}
2317
2318
2319Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
2320 // Object should either in old or map space.
2321 MapWord encoding = obj->map_word();
2322
2323 // Offset to the first live object's forwarding address.
2324 int offset = encoding.DecodeOffset();
2325 Address obj_addr = obj->address();
2326
2327 // Find the first live object's forwarding address.
2328 Page* p = Page::FromAddress(obj_addr);
2329 Address first_forwarded = p->mc_first_forwarded;
2330
2331 // Page start address of forwarded address.
2332 Page* forwarded_page = Page::FromAddress(first_forwarded);
2333 int forwarded_offset = forwarded_page->Offset(first_forwarded);
2334
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002335 // Find end of allocation in the page of first_forwarded.
2336 int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
Steve Blocka7e24c12009-10-30 11:49:00 +00002337
2338 // Check if current object's forward pointer is in the same page
2339 // as the first live object's forwarding pointer
2340 if (forwarded_offset + offset < mc_top_offset) {
2341 // In the same page.
2342 return first_forwarded + offset;
2343 }
2344
2345 // Must be in the next page, NOTE: this may cross chunks.
2346 Page* next_page = forwarded_page->next_page();
2347 ASSERT(next_page->is_valid());
2348
2349 offset -= (mc_top_offset - forwarded_offset);
2350 offset += Page::kObjectStartOffset;
2351
2352 ASSERT_PAGE_OFFSET(offset);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002353 ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
Steve Blocka7e24c12009-10-30 11:49:00 +00002354
2355 return next_page->OffsetToAddress(offset);
2356}
2357
2358
2359// -------------------------------------------------------------------------
2360// Phase 4: Relocate objects
2361
2362void MarkCompactCollector::RelocateObjects() {
2363#ifdef DEBUG
2364 ASSERT(state_ == UPDATE_POINTERS);
2365 state_ = RELOCATE_OBJECTS;
2366#endif
2367 // Relocates objects, always relocate map objects first. Relocating
2368 // objects in other space relies on map objects to get object size.
Steve Block6ded16b2010-05-10 14:33:55 +01002369 int live_maps_size = IterateLiveObjects(Heap::map_space(),
2370 &RelocateMapObject);
2371 int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
2372 &RelocateOldPointerObject);
2373 int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
2374 &RelocateOldDataObject);
2375 int live_codes_size = IterateLiveObjects(Heap::code_space(),
2376 &RelocateCodeObject);
2377 int live_cells_size = IterateLiveObjects(Heap::cell_space(),
2378 &RelocateCellObject);
2379 int live_news_size = IterateLiveObjects(Heap::new_space(),
2380 &RelocateNewObject);
Steve Blocka7e24c12009-10-30 11:49:00 +00002381
Steve Block6ded16b2010-05-10 14:33:55 +01002382 USE(live_maps_size);
2383 USE(live_pointer_olds_size);
2384 USE(live_data_olds_size);
2385 USE(live_codes_size);
2386 USE(live_cells_size);
2387 USE(live_news_size);
2388 ASSERT(live_maps_size == live_map_objects_size_);
2389 ASSERT(live_data_olds_size == live_old_data_objects_size_);
2390 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
2391 ASSERT(live_codes_size == live_code_objects_size_);
2392 ASSERT(live_cells_size == live_cell_objects_size_);
2393 ASSERT(live_news_size == live_young_objects_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002394
2395 // Flip from and to spaces
2396 Heap::new_space()->Flip();
2397
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002398 Heap::new_space()->MCCommitRelocationInfo();
2399
Steve Blocka7e24c12009-10-30 11:49:00 +00002400 // Set age_mark to bottom in to space
2401 Address mark = Heap::new_space()->bottom();
2402 Heap::new_space()->set_age_mark(mark);
2403
Steve Blocka7e24c12009-10-30 11:49:00 +00002404 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00002405 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
2406 space->MCCommitRelocationInfo();
Steve Block6ded16b2010-05-10 14:33:55 +01002407
2408 Heap::CheckNewSpaceExpansionCriteria();
2409 Heap::IncrementYoungSurvivorsCounter(live_news_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002410}
2411
2412
2413int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
2414 // Recover map pointer.
2415 MapWord encoding = obj->map_word();
2416 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2417 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
2418
2419 // Get forwarding address before resetting map pointer
2420 Address new_addr = GetForwardingAddressInOldSpace(obj);
2421
2422 // Reset map pointer. The meta map object may not be copied yet so
2423 // Map::cast does not yet work.
2424 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
2425
2426 Address old_addr = obj->address();
2427
2428 if (new_addr != old_addr) {
Steve Block6ded16b2010-05-10 14:33:55 +01002429 // Move contents.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002430 Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2431 old_addr,
2432 Map::kSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00002433 }
2434
2435#ifdef DEBUG
2436 if (FLAG_gc_verbose) {
2437 PrintF("relocate %p -> %p\n", old_addr, new_addr);
2438 }
2439#endif
2440
2441 return Map::kSize;
2442}
2443
2444
2445static inline int RestoreMap(HeapObject* obj,
2446 PagedSpace* space,
2447 Address new_addr,
2448 Address map_addr) {
2449 // This must be a non-map object, and the function relies on the
2450 // assumption that the Map space is compacted before the other paged
2451 // spaces (see RelocateObjects).
2452
2453 // Reset map pointer.
2454 obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
2455
2456 int obj_size = obj->Size();
2457 ASSERT_OBJECT_SIZE(obj_size);
2458
2459 ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
2460 space->MCSpaceOffsetForAddress(obj->address()));
2461
2462#ifdef DEBUG
2463 if (FLAG_gc_verbose) {
2464 PrintF("relocate %p -> %p\n", obj->address(), new_addr);
2465 }
2466#endif
2467
2468 return obj_size;
2469}
2470
2471
2472int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
2473 PagedSpace* space) {
2474 // Recover map pointer.
2475 MapWord encoding = obj->map_word();
2476 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2477 ASSERT(Heap::map_space()->Contains(map_addr));
2478
2479 // Get forwarding address before resetting map pointer.
2480 Address new_addr = GetForwardingAddressInOldSpace(obj);
2481
2482 // Reset the map pointer.
2483 int obj_size = RestoreMap(obj, space, new_addr, map_addr);
2484
2485 Address old_addr = obj->address();
2486
2487 if (new_addr != old_addr) {
Steve Block6ded16b2010-05-10 14:33:55 +01002488 // Move contents.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002489 if (space == Heap::old_data_space()) {
2490 Heap::MoveBlock(new_addr, old_addr, obj_size);
2491 } else {
2492 Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2493 old_addr,
2494 obj_size);
2495 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002496 }
2497
2498 ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
2499
Leon Clarked91b9f72010-01-27 17:25:45 +00002500 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2501 if (copied_to->IsJSFunction()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002502 PROFILE(FunctionMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002503 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002504 HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002505
Steve Blocka7e24c12009-10-30 11:49:00 +00002506 return obj_size;
2507}
2508
2509
2510int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
2511 return RelocateOldNonCodeObject(obj, Heap::old_pointer_space());
2512}
2513
2514
2515int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
2516 return RelocateOldNonCodeObject(obj, Heap::old_data_space());
2517}
2518
2519
2520int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
2521 return RelocateOldNonCodeObject(obj, Heap::cell_space());
2522}
2523
2524
2525int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
2526 // Recover map pointer.
2527 MapWord encoding = obj->map_word();
2528 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2529 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
2530
2531 // Get forwarding address before resetting map pointer
2532 Address new_addr = GetForwardingAddressInOldSpace(obj);
2533
2534 // Reset the map pointer.
2535 int obj_size = RestoreMap(obj, Heap::code_space(), new_addr, map_addr);
2536
2537 Address old_addr = obj->address();
2538
2539 if (new_addr != old_addr) {
Steve Block6ded16b2010-05-10 14:33:55 +01002540 // Move contents.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002541 Heap::MoveBlock(new_addr, old_addr, obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002542 }
2543
2544 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2545 if (copied_to->IsCode()) {
2546 // May also update inline cache target.
2547 Code::cast(copied_to)->Relocate(new_addr - old_addr);
2548 // Notify the logger that compiled code has moved.
Steve Block6ded16b2010-05-10 14:33:55 +01002549 PROFILE(CodeMoveEvent(old_addr, new_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00002550 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002551 HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00002552
2553 return obj_size;
2554}
2555
2556
2557int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
2558 int obj_size = obj->Size();
2559
2560 // Get forwarding address
2561 Address old_addr = obj->address();
2562 int offset = Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
2563
2564 Address new_addr =
2565 Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset);
2566
2567#ifdef DEBUG
2568 if (Heap::new_space()->FromSpaceContains(new_addr)) {
2569 ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
2570 Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
2571 } else {
2572 ASSERT(Heap::TargetSpace(obj) == Heap::old_pointer_space() ||
2573 Heap::TargetSpace(obj) == Heap::old_data_space());
2574 }
2575#endif
2576
2577 // New and old addresses cannot overlap.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002578 if (Heap::InNewSpace(HeapObject::FromAddress(new_addr))) {
2579 Heap::CopyBlock(new_addr, old_addr, obj_size);
2580 } else {
2581 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2582 old_addr,
2583 obj_size);
2584 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002585
2586#ifdef DEBUG
2587 if (FLAG_gc_verbose) {
2588 PrintF("relocate %p -> %p\n", old_addr, new_addr);
2589 }
2590#endif
2591
Leon Clarked91b9f72010-01-27 17:25:45 +00002592 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2593 if (copied_to->IsJSFunction()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002594 PROFILE(FunctionMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002595 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002596 HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002597
Steve Blocka7e24c12009-10-30 11:49:00 +00002598 return obj_size;
2599}
2600
2601
Leon Clarked91b9f72010-01-27 17:25:45 +00002602void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
2603#ifdef ENABLE_LOGGING_AND_PROFILING
2604 if (obj->IsCode()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002605 PROFILE(CodeDeleteEvent(obj->address()));
Leon Clarked91b9f72010-01-27 17:25:45 +00002606 } else if (obj->IsJSFunction()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002607 PROFILE(FunctionDeleteEvent(obj->address()));
Leon Clarked91b9f72010-01-27 17:25:45 +00002608 }
2609#endif
2610}
2611
Iain Merrick75681382010-08-19 15:07:18 +01002612
2613void MarkCompactCollector::Initialize() {
2614 StaticPointersToNewGenUpdatingVisitor::Initialize();
2615 StaticMarkingVisitor::Initialize();
2616}
2617
2618
Steve Blocka7e24c12009-10-30 11:49:00 +00002619} } // namespace v8::internal