blob: e7a26194e99f3231451fc70ab39ab1397b57993b [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
Iain Merrick75681382010-08-19 15:07:18 +010030#include "compilation-cache.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000031#include "execution.h"
Ben Murdoch3bec4d22010-07-22 14:51:16 +010032#include "heap-profiler.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000033#include "global-handles.h"
34#include "ic-inl.h"
35#include "mark-compact.h"
Iain Merrick75681382010-08-19 15:07:18 +010036#include "objects-visiting.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000037#include "stub-cache.h"
38
39namespace v8 {
40namespace internal {
41
42// -------------------------------------------------------------------------
43// MarkCompactCollector
44
45bool MarkCompactCollector::force_compaction_ = false;
46bool MarkCompactCollector::compacting_collection_ = false;
47bool MarkCompactCollector::compact_on_next_gc_ = false;
48
49int MarkCompactCollector::previous_marked_count_ = 0;
50GCTracer* MarkCompactCollector::tracer_ = NULL;
51
52
53#ifdef DEBUG
54MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE;
55
56// Counters used for debugging the marking phase of mark-compact or mark-sweep
57// collection.
58int MarkCompactCollector::live_bytes_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +010059int MarkCompactCollector::live_young_objects_size_ = 0;
60int MarkCompactCollector::live_old_data_objects_size_ = 0;
61int MarkCompactCollector::live_old_pointer_objects_size_ = 0;
62int MarkCompactCollector::live_code_objects_size_ = 0;
63int MarkCompactCollector::live_map_objects_size_ = 0;
64int MarkCompactCollector::live_cell_objects_size_ = 0;
65int MarkCompactCollector::live_lo_objects_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +000066#endif
67
Iain Merrick75681382010-08-19 15:07:18 +010068
Steve Blocka7e24c12009-10-30 11:49:00 +000069void MarkCompactCollector::CollectGarbage() {
70 // Make sure that Prepare() has been called. The individual steps below will
71 // update the state as they proceed.
72 ASSERT(state_ == PREPARE_GC);
73
74 // Prepare has selected whether to compact the old generation or not.
75 // Tell the tracer.
76 if (IsCompacting()) tracer_->set_is_compacting();
77
78 MarkLiveObjects();
79
80 if (FLAG_collect_maps) ClearNonLiveTransitions();
81
82 SweepLargeObjectSpace();
83
84 if (IsCompacting()) {
Leon Clarkef7060e22010-06-03 12:02:55 +010085 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
Steve Blocka7e24c12009-10-30 11:49:00 +000086 EncodeForwardingAddresses();
87
88 UpdatePointers();
89
90 RelocateObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +000091 } else {
92 SweepSpaces();
93 }
94
95 Finish();
96
97 // Save the count of marked objects remaining after the collection and
98 // null out the GC tracer.
99 previous_marked_count_ = tracer_->marked_count();
100 ASSERT(previous_marked_count_ == 0);
101 tracer_ = NULL;
102}
103
104
105void MarkCompactCollector::Prepare(GCTracer* tracer) {
106 // Rather than passing the tracer around we stash it in a static member
107 // variable.
108 tracer_ = tracer;
109
110#ifdef DEBUG
111 ASSERT(state_ == IDLE);
112 state_ = PREPARE_GC;
113#endif
114 ASSERT(!FLAG_always_compact || !FLAG_never_compact);
115
116 compacting_collection_ =
117 FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
118 compact_on_next_gc_ = false;
119
120 if (FLAG_never_compact) compacting_collection_ = false;
Leon Clarkee46be812010-01-19 14:06:41 +0000121 if (!Heap::map_space()->MapPointersEncodable())
122 compacting_collection_ = false;
Steve Blocka7e24c12009-10-30 11:49:00 +0000123 if (FLAG_collect_maps) CreateBackPointers();
124
Steve Blocka7e24c12009-10-30 11:49:00 +0000125 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000126 for (PagedSpace* space = spaces.next();
127 space != NULL; space = spaces.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000128 space->PrepareForMarkCompact(compacting_collection_);
129 }
130
131#ifdef DEBUG
132 live_bytes_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100133 live_young_objects_size_ = 0;
134 live_old_pointer_objects_size_ = 0;
135 live_old_data_objects_size_ = 0;
136 live_code_objects_size_ = 0;
137 live_map_objects_size_ = 0;
138 live_cell_objects_size_ = 0;
139 live_lo_objects_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000140#endif
141}
142
143
144void MarkCompactCollector::Finish() {
145#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100146 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
Steve Blocka7e24c12009-10-30 11:49:00 +0000147 state_ = IDLE;
148#endif
149 // The stub cache is not traversed during GC; clear the cache to
150 // force lazy re-initialization of it. This must be done after the
151 // GC, because it relies on the new address of certain old space
152 // objects (empty string, illegal builtin).
153 StubCache::Clear();
154
Leon Clarkee46be812010-01-19 14:06:41 +0000155 ExternalStringTable::CleanUp();
156
Steve Blocka7e24c12009-10-30 11:49:00 +0000157 // If we've just compacted old space there's no reason to check the
158 // fragmentation limit. Just return.
159 if (HasCompacted()) return;
160
161 // We compact the old generation on the next GC if it has gotten too
162 // fragmented (ie, we could recover an expected amount of space by
163 // reclaiming the waste and free list blocks).
164 static const int kFragmentationLimit = 15; // Percent.
165 static const int kFragmentationAllowed = 1 * MB; // Absolute.
166 int old_gen_recoverable = 0;
167 int old_gen_used = 0;
168
169 OldSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000170 for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000171 old_gen_recoverable += space->Waste() + space->AvailableFree();
172 old_gen_used += space->Size();
173 }
174
175 int old_gen_fragmentation =
176 static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
177 if (old_gen_fragmentation > kFragmentationLimit &&
178 old_gen_recoverable > kFragmentationAllowed) {
179 compact_on_next_gc_ = true;
180 }
181}
182
183
184// -------------------------------------------------------------------------
185// Phase 1: tracing and marking live objects.
186// before: all objects are in normal state.
187// after: a live object's map pointer is marked as '00'.
188
189// Marking all live objects in the heap as part of mark-sweep or mark-compact
190// collection. Before marking, all objects are in their normal state. After
191// marking, live objects' map pointers are marked indicating that the object
192// has been found reachable.
193//
194// The marking algorithm is a (mostly) depth-first (because of possible stack
195// overflow) traversal of the graph of objects reachable from the roots. It
196// uses an explicit stack of pointers rather than recursion. The young
197// generation's inactive ('from') space is used as a marking stack. The
198// objects in the marking stack are the ones that have been reached and marked
199// but their children have not yet been visited.
200//
201// The marking stack can overflow during traversal. In that case, we set an
202// overflow flag. When the overflow flag is set, we continue marking objects
203// reachable from the objects on the marking stack, but no longer push them on
204// the marking stack. Instead, we mark them as both marked and overflowed.
205// When the stack is in the overflowed state, objects marked as overflowed
206// have been reached and marked but their children have not been visited yet.
207// After emptying the marking stack, we clear the overflow flag and traverse
208// the heap looking for objects marked as overflowed, push them on the stack,
209// and continue with marking. This process repeats until all reachable
210// objects have been marked.
211
212static MarkingStack marking_stack;
213
214
215static inline HeapObject* ShortCircuitConsString(Object** p) {
216 // Optimization: If the heap object pointed to by p is a non-symbol
217 // cons string whose right substring is Heap::empty_string, update
218 // it in place to its left substring. Return the updated value.
219 //
220 // Here we assume that if we change *p, we replace it with a heap object
221 // (ie, the left substring of a cons string is always a heap object).
222 //
223 // The check performed is:
224 // object->IsConsString() && !object->IsSymbol() &&
225 // (ConsString::cast(object)->second() == Heap::empty_string())
226 // except the maps for the object and its possible substrings might be
227 // marked.
228 HeapObject* object = HeapObject::cast(*p);
229 MapWord map_word = object->map_word();
230 map_word.ClearMark();
231 InstanceType type = map_word.ToMap()->instance_type();
232 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
233
234 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
235 if (second != Heap::raw_unchecked_empty_string()) {
236 return object;
237 }
238
239 // Since we don't have the object's start, it is impossible to update the
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100240 // page dirty marks. Therefore, we only replace the string with its left
241 // substring when page dirty marks do not change.
Steve Blocka7e24c12009-10-30 11:49:00 +0000242 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
243 if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object;
244
245 *p = first;
246 return HeapObject::cast(first);
247}
248
249
Iain Merrick75681382010-08-19 15:07:18 +0100250class StaticMarkingVisitor : public StaticVisitorBase {
Steve Blocka7e24c12009-10-30 11:49:00 +0000251 public:
Iain Merrick75681382010-08-19 15:07:18 +0100252 static inline void IterateBody(Map* map, HeapObject* obj) {
253 table_.GetVisitor(map)(map, obj);
254 }
255
256 static void EnableCodeFlushing(bool enabled) {
257 if (enabled) {
Steve Block791712a2010-08-27 10:21:07 +0100258 table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode);
Iain Merrick75681382010-08-19 15:07:18 +0100259 } else {
Steve Block791712a2010-08-27 10:21:07 +0100260 table_.Register(kVisitJSFunction, &VisitJSFunction);
Iain Merrick75681382010-08-19 15:07:18 +0100261 }
262 }
263
264 static void Initialize() {
265 table_.Register(kVisitShortcutCandidate,
266 &FixedBodyVisitor<StaticMarkingVisitor,
267 ConsString::BodyDescriptor,
268 void>::Visit);
269
270 table_.Register(kVisitConsString,
271 &FixedBodyVisitor<StaticMarkingVisitor,
272 ConsString::BodyDescriptor,
273 void>::Visit);
274
275
276 table_.Register(kVisitFixedArray,
277 &FlexibleBodyVisitor<StaticMarkingVisitor,
278 FixedArray::BodyDescriptor,
279 void>::Visit);
280
281 table_.Register(kVisitSharedFunctionInfo,
282 &FixedBodyVisitor<StaticMarkingVisitor,
283 SharedFunctionInfo::BodyDescriptor,
284 void>::Visit);
285
286 table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
287 table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
288 table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
289
290 table_.Register(kVisitOddball,
291 &FixedBodyVisitor<StaticMarkingVisitor,
292 Oddball::BodyDescriptor,
293 void>::Visit);
294 table_.Register(kVisitMap,
295 &FixedBodyVisitor<StaticMarkingVisitor,
296 Map::BodyDescriptor,
297 void>::Visit);
298
299 table_.Register(kVisitCode, &VisitCode);
300
Steve Block791712a2010-08-27 10:21:07 +0100301 table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode);
Iain Merrick75681382010-08-19 15:07:18 +0100302
303 table_.Register(kVisitPropertyCell,
304 &FixedBodyVisitor<StaticMarkingVisitor,
305 JSGlobalPropertyCell::BodyDescriptor,
306 void>::Visit);
307
308 table_.RegisterSpecializations<DataObjectVisitor,
309 kVisitDataObject,
310 kVisitDataObjectGeneric>();
311
312 table_.RegisterSpecializations<JSObjectVisitor,
313 kVisitJSObject,
314 kVisitJSObjectGeneric>();
315
316 table_.RegisterSpecializations<StructObjectVisitor,
317 kVisitStruct,
318 kVisitStructGeneric>();
319 }
320
321 INLINE(static void VisitPointer(Object** p)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000322 MarkObjectByPointer(p);
323 }
324
Iain Merrick75681382010-08-19 15:07:18 +0100325 INLINE(static void VisitPointers(Object** start, Object** end)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000326 // Mark all objects pointed to in [start, end).
327 const int kMinRangeForMarkingRecursion = 64;
328 if (end - start >= kMinRangeForMarkingRecursion) {
329 if (VisitUnmarkedObjects(start, end)) return;
330 // We are close to a stack overflow, so just mark the objects.
331 }
332 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
333 }
334
Iain Merrick75681382010-08-19 15:07:18 +0100335 static inline void VisitCodeTarget(RelocInfo* rinfo) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000336 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
337 Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
338 if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
339 IC::Clear(rinfo->pc());
340 // Please note targets for cleared inline cached do not have to be
341 // marked since they are contained in Heap::non_monomorphic_cache().
342 } else {
343 MarkCompactCollector::MarkObject(code);
344 }
345 }
346
Iain Merrick75681382010-08-19 15:07:18 +0100347 static inline void VisitDebugTarget(RelocInfo* rinfo) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100348 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
349 rinfo->IsPatchedReturnSequence()) ||
350 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
351 rinfo->IsPatchedDebugBreakSlotSequence()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000352 HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
353 MarkCompactCollector::MarkObject(code);
Steve Blocka7e24c12009-10-30 11:49:00 +0000354 }
355
Steve Blocka7e24c12009-10-30 11:49:00 +0000356 // Mark object pointed to by p.
Iain Merrick75681382010-08-19 15:07:18 +0100357 INLINE(static void MarkObjectByPointer(Object** p)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000358 if (!(*p)->IsHeapObject()) return;
359 HeapObject* object = ShortCircuitConsString(p);
360 MarkCompactCollector::MarkObject(object);
361 }
362
Steve Blocka7e24c12009-10-30 11:49:00 +0000363 // Visit an unmarked object.
Iain Merrick75681382010-08-19 15:07:18 +0100364 static inline void VisitUnmarkedObject(HeapObject* obj) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000365#ifdef DEBUG
366 ASSERT(Heap::Contains(obj));
367 ASSERT(!obj->IsMarked());
368#endif
369 Map* map = obj->map();
370 MarkCompactCollector::SetMark(obj);
371 // Mark the map pointer and the body.
372 MarkCompactCollector::MarkObject(map);
Iain Merrick75681382010-08-19 15:07:18 +0100373 IterateBody(map, obj);
Steve Blocka7e24c12009-10-30 11:49:00 +0000374 }
375
376 // Visit all unmarked objects pointed to by [start, end).
377 // Returns false if the operation fails (lack of stack space).
Iain Merrick75681382010-08-19 15:07:18 +0100378 static inline bool VisitUnmarkedObjects(Object** start, Object** end) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000379 // Return false is we are close to the stack limit.
380 StackLimitCheck check;
381 if (check.HasOverflowed()) return false;
382
383 // Visit the unmarked objects.
384 for (Object** p = start; p < end; p++) {
385 if (!(*p)->IsHeapObject()) continue;
386 HeapObject* obj = HeapObject::cast(*p);
387 if (obj->IsMarked()) continue;
388 VisitUnmarkedObject(obj);
389 }
390 return true;
391 }
Iain Merrick75681382010-08-19 15:07:18 +0100392
393 static inline void VisitExternalReference(Address* p) { }
394 static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
395
396 private:
397 class DataObjectVisitor {
398 public:
399 template<int size>
400 static void VisitSpecialized(Map* map, HeapObject* object) {
401 }
402
403 static void Visit(Map* map, HeapObject* object) {
404 }
405 };
406
407 typedef FlexibleBodyVisitor<StaticMarkingVisitor,
408 JSObject::BodyDescriptor,
409 void> JSObjectVisitor;
410
411 typedef FlexibleBodyVisitor<StaticMarkingVisitor,
412 StructBodyDescriptor,
413 void> StructObjectVisitor;
414
415 static void VisitCode(Map* map, HeapObject* object) {
416 reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>();
417 }
418
419 // Code flushing support.
420
421 // How many collections newly compiled code object will survive before being
422 // flushed.
423 static const int kCodeAgeThreshold = 5;
424
425 inline static bool HasSourceCode(SharedFunctionInfo* info) {
426 Object* undefined = Heap::raw_unchecked_undefined_value();
427 return (info->script() != undefined) &&
428 (reinterpret_cast<Script*>(info->script())->source() != undefined);
429 }
430
431
432 inline static bool IsCompiled(JSFunction* function) {
433 return
434 function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile);
435 }
436
437
438 inline static bool IsCompiled(SharedFunctionInfo* function) {
439 return
440 function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile);
441 }
442
443
444 static void FlushCodeForFunction(JSFunction* function) {
445 SharedFunctionInfo* shared_info = function->unchecked_shared();
446
447 if (shared_info->IsMarked()) return;
448
449 // Special handling if the function and shared info objects
450 // have different code objects.
451 if (function->unchecked_code() != shared_info->unchecked_code()) {
452 // If the shared function has been flushed but the function has not,
453 // we flush the function if possible.
454 if (!IsCompiled(shared_info) &&
455 IsCompiled(function) &&
456 !function->unchecked_code()->IsMarked()) {
457 function->set_code(shared_info->unchecked_code());
458 }
459 return;
460 }
461
462 // Code is either on stack or in compilation cache.
463 if (shared_info->unchecked_code()->IsMarked()) {
464 shared_info->set_code_age(0);
465 return;
466 }
467
468 // The function must be compiled and have the source code available,
469 // to be able to recompile it in case we need the function again.
470 if (!(shared_info->is_compiled() && HasSourceCode(shared_info))) return;
471
472 // We never flush code for Api functions.
473 Object* function_data = shared_info->function_data();
474 if (function_data->IsHeapObject() &&
475 (SafeMap(function_data)->instance_type() ==
476 FUNCTION_TEMPLATE_INFO_TYPE)) {
477 return;
478 }
479
480 // Only flush code for functions.
481 if (shared_info->code()->kind() != Code::FUNCTION) return;
482
483 // Function must be lazy compilable.
484 if (!shared_info->allows_lazy_compilation()) return;
485
486 // If this is a full script wrapped in a function we do no flush the code.
487 if (shared_info->is_toplevel()) return;
488
489 // Age this shared function info.
490 if (shared_info->code_age() < kCodeAgeThreshold) {
491 shared_info->set_code_age(shared_info->code_age() + 1);
492 return;
493 }
494
495 // Compute the lazy compilable version of the code.
496 Code* code = Builtins::builtin(Builtins::LazyCompile);
497 shared_info->set_code(code);
498 function->set_code(code);
499 }
500
501
502 static inline Map* SafeMap(Object* obj) {
503 MapWord map_word = HeapObject::cast(obj)->map_word();
504 map_word.ClearMark();
505 map_word.ClearOverflow();
506 return map_word.ToMap();
507 }
508
509
510 static inline bool IsJSBuiltinsObject(Object* obj) {
511 return obj->IsHeapObject() &&
512 (SafeMap(obj)->instance_type() == JS_BUILTINS_OBJECT_TYPE);
513 }
514
515
516 static inline bool IsValidNotBuiltinContext(Object* ctx) {
517 if (!ctx->IsHeapObject()) return false;
518
519 Map* map = SafeMap(ctx);
520 if (!(map == Heap::raw_unchecked_context_map() ||
521 map == Heap::raw_unchecked_catch_context_map() ||
522 map == Heap::raw_unchecked_global_context_map())) {
523 return false;
524 }
525
526 Context* context = reinterpret_cast<Context*>(ctx);
527
528 if (IsJSBuiltinsObject(context->global())) {
529 return false;
530 }
531
532 return true;
533 }
534
535
Steve Block791712a2010-08-27 10:21:07 +0100536 static void VisitCodeEntry(Address entry_address) {
537 Object* code = Code::GetObjectFromEntryAddress(entry_address);
538 Object* old_code = code;
539 VisitPointer(&code);
540 if (code != old_code) {
541 Memory::Address_at(entry_address) =
542 reinterpret_cast<Code*>(code)->entry();
543 }
544 }
Iain Merrick75681382010-08-19 15:07:18 +0100545
Steve Block791712a2010-08-27 10:21:07 +0100546
547 static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
548 JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
Iain Merrick75681382010-08-19 15:07:18 +0100549 // The function must have a valid context and not be a builtin.
550 if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
551 FlushCodeForFunction(jsfunction);
552 }
Steve Block791712a2010-08-27 10:21:07 +0100553 VisitJSFunction(map, object);
Iain Merrick75681382010-08-19 15:07:18 +0100554 }
555
Steve Block791712a2010-08-27 10:21:07 +0100556
557 static void VisitJSFunction(Map* map, HeapObject* object) {
558#define SLOT_ADDR(obj, offset) \
559 reinterpret_cast<Object**>((obj)->address() + offset)
560
561 VisitPointers(SLOT_ADDR(object, JSFunction::kPropertiesOffset),
562 SLOT_ADDR(object, JSFunction::kCodeEntryOffset));
563
564 VisitCodeEntry(object->address() + JSFunction::kCodeEntryOffset);
565
566 VisitPointers(SLOT_ADDR(object,
567 JSFunction::kCodeEntryOffset + kPointerSize),
568 SLOT_ADDR(object, JSFunction::kSize));
569#undef SLOT_ADDR
570 }
571
572
Iain Merrick75681382010-08-19 15:07:18 +0100573 typedef void (*Callback)(Map* map, HeapObject* object);
574
575 static VisitorDispatchTable<Callback> table_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000576};
577
578
Iain Merrick75681382010-08-19 15:07:18 +0100579VisitorDispatchTable<StaticMarkingVisitor::Callback>
580 StaticMarkingVisitor::table_;
581
582
583class MarkingVisitor : public ObjectVisitor {
584 public:
585 void VisitPointer(Object** p) {
586 StaticMarkingVisitor::VisitPointer(p);
587 }
588
589 void VisitPointers(Object** start, Object** end) {
590 StaticMarkingVisitor::VisitPointers(start, end);
591 }
592
593 void VisitCodeTarget(RelocInfo* rinfo) {
594 StaticMarkingVisitor::VisitCodeTarget(rinfo);
595 }
596
597 void VisitDebugTarget(RelocInfo* rinfo) {
598 StaticMarkingVisitor::VisitDebugTarget(rinfo);
599 }
600};
601
602
603class CodeMarkingVisitor : public ThreadVisitor {
604 public:
605 void VisitThread(ThreadLocalTop* top) {
606 for (StackFrameIterator it(top); !it.done(); it.Advance()) {
607 MarkCompactCollector::MarkObject(it.frame()->unchecked_code());
608 }
609 }
610};
611
612
613class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
614 public:
615 void VisitPointers(Object** start, Object** end) {
616 for (Object** p = start; p < end; p++) VisitPointer(p);
617 }
618
619 void VisitPointer(Object** slot) {
620 Object* obj = *slot;
621 if (obj->IsHeapObject()) {
622 MarkCompactCollector::MarkObject(HeapObject::cast(obj));
623 }
624 }
625};
626
627
628void MarkCompactCollector::PrepareForCodeFlushing() {
629 if (!FLAG_flush_code) {
630 StaticMarkingVisitor::EnableCodeFlushing(false);
631 return;
632 }
633
634#ifdef ENABLE_DEBUGGER_SUPPORT
635 if (Debug::IsLoaded() || Debug::has_break_points()) {
636 StaticMarkingVisitor::EnableCodeFlushing(false);
637 return;
638 }
639#endif
640 StaticMarkingVisitor::EnableCodeFlushing(true);
641
642 // Make sure we are not referencing the code from the stack.
643 for (StackFrameIterator it; !it.done(); it.Advance()) {
644 MarkCompactCollector::MarkObject(it.frame()->unchecked_code());
645 }
646
647 // Iterate the archived stacks in all threads to check if
648 // the code is referenced.
649 CodeMarkingVisitor code_marking_visitor;
650 ThreadManager::IterateArchivedThreads(&code_marking_visitor);
651
652 SharedFunctionInfoMarkingVisitor visitor;
653 CompilationCache::IterateFunctions(&visitor);
654
655 MarkCompactCollector::ProcessMarkingStack();
656}
657
658
Steve Blocka7e24c12009-10-30 11:49:00 +0000659// Visitor class for marking heap roots.
660class RootMarkingVisitor : public ObjectVisitor {
661 public:
662 void VisitPointer(Object** p) {
663 MarkObjectByPointer(p);
664 }
665
666 void VisitPointers(Object** start, Object** end) {
667 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
668 }
669
Steve Blocka7e24c12009-10-30 11:49:00 +0000670 private:
Steve Blocka7e24c12009-10-30 11:49:00 +0000671 void MarkObjectByPointer(Object** p) {
672 if (!(*p)->IsHeapObject()) return;
673
674 // Replace flat cons strings in place.
675 HeapObject* object = ShortCircuitConsString(p);
676 if (object->IsMarked()) return;
677
678 Map* map = object->map();
679 // Mark the object.
680 MarkCompactCollector::SetMark(object);
Iain Merrick75681382010-08-19 15:07:18 +0100681
Steve Blocka7e24c12009-10-30 11:49:00 +0000682 // Mark the map pointer and body, and push them on the marking stack.
683 MarkCompactCollector::MarkObject(map);
Iain Merrick75681382010-08-19 15:07:18 +0100684 StaticMarkingVisitor::IterateBody(map, object);
Steve Blocka7e24c12009-10-30 11:49:00 +0000685
686 // Mark all the objects reachable from the map and body. May leave
687 // overflowed objects in the heap.
Iain Merrick75681382010-08-19 15:07:18 +0100688 MarkCompactCollector::EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +0000689 }
690};
691
692
693// Helper class for pruning the symbol table.
694class SymbolTableCleaner : public ObjectVisitor {
695 public:
696 SymbolTableCleaner() : pointers_removed_(0) { }
Leon Clarkee46be812010-01-19 14:06:41 +0000697
698 virtual void VisitPointers(Object** start, Object** end) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000699 // Visit all HeapObject pointers in [start, end).
700 for (Object** p = start; p < end; p++) {
701 if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
702 // Check if the symbol being pruned is an external symbol. We need to
703 // delete the associated external data as this symbol is going away.
704
Steve Blocka7e24c12009-10-30 11:49:00 +0000705 // Since no objects have yet been moved we can safely access the map of
706 // the object.
Leon Clarkee46be812010-01-19 14:06:41 +0000707 if ((*p)->IsExternalString()) {
708 Heap::FinalizeExternalString(String::cast(*p));
Steve Blocka7e24c12009-10-30 11:49:00 +0000709 }
710 // Set the entry to null_value (as deleted).
711 *p = Heap::raw_unchecked_null_value();
712 pointers_removed_++;
713 }
714 }
715 }
716
717 int PointersRemoved() {
718 return pointers_removed_;
719 }
720 private:
721 int pointers_removed_;
722};
723
724
725void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
726 ASSERT(!object->IsMarked());
727 ASSERT(Heap::Contains(object));
728 if (object->IsMap()) {
729 Map* map = Map::cast(object);
730 if (FLAG_cleanup_caches_in_maps_at_gc) {
731 map->ClearCodeCache();
732 }
733 SetMark(map);
734 if (FLAG_collect_maps &&
735 map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
736 map->instance_type() <= JS_FUNCTION_TYPE) {
737 MarkMapContents(map);
738 } else {
739 marking_stack.Push(map);
740 }
741 } else {
742 SetMark(object);
743 marking_stack.Push(object);
744 }
745}
746
747
748void MarkCompactCollector::MarkMapContents(Map* map) {
749 MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(
750 *HeapObject::RawField(map, Map::kInstanceDescriptorsOffset)));
751
752 // Mark the Object* fields of the Map.
753 // Since the descriptor array has been marked already, it is fine
754 // that one of these fields contains a pointer to it.
Iain Merrick75681382010-08-19 15:07:18 +0100755 Object** start_slot = HeapObject::RawField(map,
756 Map::kPointerFieldsBeginOffset);
757
758 Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
759
760 StaticMarkingVisitor::VisitPointers(start_slot, end_slot);
Steve Blocka7e24c12009-10-30 11:49:00 +0000761}
762
763
764void MarkCompactCollector::MarkDescriptorArray(
765 DescriptorArray* descriptors) {
766 if (descriptors->IsMarked()) return;
767 // Empty descriptor array is marked as a root before any maps are marked.
768 ASSERT(descriptors != Heap::raw_unchecked_empty_descriptor_array());
769 SetMark(descriptors);
770
771 FixedArray* contents = reinterpret_cast<FixedArray*>(
772 descriptors->get(DescriptorArray::kContentArrayIndex));
773 ASSERT(contents->IsHeapObject());
774 ASSERT(!contents->IsMarked());
775 ASSERT(contents->IsFixedArray());
776 ASSERT(contents->length() >= 2);
777 SetMark(contents);
Iain Merrick75681382010-08-19 15:07:18 +0100778 // Contents contains (value, details) pairs. If the details say that
779 // the type of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION, or
780 // NULL_DESCRIPTOR, we don't mark the value as live. Only for
781 // MAP_TRANSITION and CONSTANT_TRANSITION is the value an Object* (a
782 // Map*).
Steve Blocka7e24c12009-10-30 11:49:00 +0000783 for (int i = 0; i < contents->length(); i += 2) {
784 // If the pair (value, details) at index i, i+1 is not
785 // a transition or null descriptor, mark the value.
786 PropertyDetails details(Smi::cast(contents->get(i + 1)));
787 if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
788 HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
789 if (object->IsHeapObject() && !object->IsMarked()) {
790 SetMark(object);
791 marking_stack.Push(object);
792 }
793 }
794 }
795 // The DescriptorArray descriptors contains a pointer to its contents array,
796 // but the contents array is already marked.
797 marking_stack.Push(descriptors);
798}
799
800
801void MarkCompactCollector::CreateBackPointers() {
802 HeapObjectIterator iterator(Heap::map_space());
Leon Clarked91b9f72010-01-27 17:25:45 +0000803 for (HeapObject* next_object = iterator.next();
804 next_object != NULL; next_object = iterator.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000805 if (next_object->IsMap()) { // Could also be ByteArray on free list.
806 Map* map = Map::cast(next_object);
807 if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
808 map->instance_type() <= JS_FUNCTION_TYPE) {
809 map->CreateBackPointers();
810 } else {
811 ASSERT(map->instance_descriptors() == Heap::empty_descriptor_array());
812 }
813 }
814 }
815}
816
817
818static int OverflowObjectSize(HeapObject* obj) {
819 // Recover the normal map pointer, it might be marked as live and
820 // overflowed.
821 MapWord map_word = obj->map_word();
822 map_word.ClearMark();
823 map_word.ClearOverflow();
824 return obj->SizeFromMap(map_word.ToMap());
825}
826
827
828// Fill the marking stack with overflowed objects returned by the given
829// iterator. Stop when the marking stack is filled or the end of the space
830// is reached, whichever comes first.
831template<class T>
832static void ScanOverflowedObjects(T* it) {
833 // The caller should ensure that the marking stack is initially not full,
834 // so that we don't waste effort pointlessly scanning for objects.
835 ASSERT(!marking_stack.is_full());
836
Leon Clarked91b9f72010-01-27 17:25:45 +0000837 for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000838 if (object->IsOverflowed()) {
839 object->ClearOverflow();
840 ASSERT(object->IsMarked());
841 ASSERT(Heap::Contains(object));
842 marking_stack.Push(object);
843 if (marking_stack.is_full()) return;
844 }
845 }
846}
847
848
849bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
850 return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked();
851}
852
853
Steve Blocka7e24c12009-10-30 11:49:00 +0000854void MarkCompactCollector::MarkSymbolTable() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000855 SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
856 // Mark the symbol table itself.
857 SetMark(symbol_table);
858 // Explicitly mark the prefix.
859 MarkingVisitor marker;
860 symbol_table->IteratePrefix(&marker);
Iain Merrick75681382010-08-19 15:07:18 +0100861 ProcessMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +0000862}
863
864
865void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
866 // Mark the heap roots including global variables, stack variables,
867 // etc., and all objects reachable from them.
Steve Blockd0582a62009-12-15 09:54:21 +0000868 Heap::IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +0000869
870 // Handle the symbol table specially.
871 MarkSymbolTable();
872
873 // There may be overflowed objects in the heap. Visit them now.
874 while (marking_stack.overflowed()) {
875 RefillMarkingStack();
Iain Merrick75681382010-08-19 15:07:18 +0100876 EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +0000877 }
878}
879
880
881void MarkCompactCollector::MarkObjectGroups() {
882 List<ObjectGroup*>* object_groups = GlobalHandles::ObjectGroups();
883
884 for (int i = 0; i < object_groups->length(); i++) {
885 ObjectGroup* entry = object_groups->at(i);
886 if (entry == NULL) continue;
887
888 List<Object**>& objects = entry->objects_;
889 bool group_marked = false;
890 for (int j = 0; j < objects.length(); j++) {
891 Object* object = *objects[j];
892 if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
893 group_marked = true;
894 break;
895 }
896 }
897
898 if (!group_marked) continue;
899
900 // An object in the group is marked, so mark as gray all white heap
901 // objects in the group.
902 for (int j = 0; j < objects.length(); ++j) {
903 if ((*objects[j])->IsHeapObject()) {
904 MarkObject(HeapObject::cast(*objects[j]));
905 }
906 }
907 // Once the entire group has been colored gray, set the object group
908 // to NULL so it won't be processed again.
909 delete object_groups->at(i);
910 object_groups->at(i) = NULL;
911 }
912}
913
914
915// Mark all objects reachable from the objects on the marking stack.
916// Before: the marking stack contains zero or more heap object pointers.
917// After: the marking stack is empty, and all objects reachable from the
918// marking stack have been marked, or are overflowed in the heap.
Iain Merrick75681382010-08-19 15:07:18 +0100919void MarkCompactCollector::EmptyMarkingStack() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000920 while (!marking_stack.is_empty()) {
921 HeapObject* object = marking_stack.Pop();
922 ASSERT(object->IsHeapObject());
923 ASSERT(Heap::Contains(object));
924 ASSERT(object->IsMarked());
925 ASSERT(!object->IsOverflowed());
926
927 // Because the object is marked, we have to recover the original map
928 // pointer and use it to mark the object's body.
929 MapWord map_word = object->map_word();
930 map_word.ClearMark();
931 Map* map = map_word.ToMap();
932 MarkObject(map);
Iain Merrick75681382010-08-19 15:07:18 +0100933
934 StaticMarkingVisitor::IterateBody(map, object);
Steve Blocka7e24c12009-10-30 11:49:00 +0000935 }
936}
937
938
939// Sweep the heap for overflowed objects, clear their overflow bits, and
940// push them on the marking stack. Stop early if the marking stack fills
941// before sweeping completes. If sweeping completes, there are no remaining
942// overflowed objects in the heap so the overflow flag on the markings stack
943// is cleared.
944void MarkCompactCollector::RefillMarkingStack() {
945 ASSERT(marking_stack.overflowed());
946
947 SemiSpaceIterator new_it(Heap::new_space(), &OverflowObjectSize);
948 ScanOverflowedObjects(&new_it);
949 if (marking_stack.is_full()) return;
950
951 HeapObjectIterator old_pointer_it(Heap::old_pointer_space(),
952 &OverflowObjectSize);
953 ScanOverflowedObjects(&old_pointer_it);
954 if (marking_stack.is_full()) return;
955
956 HeapObjectIterator old_data_it(Heap::old_data_space(), &OverflowObjectSize);
957 ScanOverflowedObjects(&old_data_it);
958 if (marking_stack.is_full()) return;
959
960 HeapObjectIterator code_it(Heap::code_space(), &OverflowObjectSize);
961 ScanOverflowedObjects(&code_it);
962 if (marking_stack.is_full()) return;
963
964 HeapObjectIterator map_it(Heap::map_space(), &OverflowObjectSize);
965 ScanOverflowedObjects(&map_it);
966 if (marking_stack.is_full()) return;
967
968 HeapObjectIterator cell_it(Heap::cell_space(), &OverflowObjectSize);
969 ScanOverflowedObjects(&cell_it);
970 if (marking_stack.is_full()) return;
971
972 LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize);
973 ScanOverflowedObjects(&lo_it);
974 if (marking_stack.is_full()) return;
975
976 marking_stack.clear_overflowed();
977}
978
979
980// Mark all objects reachable (transitively) from objects on the marking
981// stack. Before: the marking stack contains zero or more heap object
982// pointers. After: the marking stack is empty and there are no overflowed
983// objects in the heap.
Iain Merrick75681382010-08-19 15:07:18 +0100984void MarkCompactCollector::ProcessMarkingStack() {
985 EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +0000986 while (marking_stack.overflowed()) {
987 RefillMarkingStack();
Iain Merrick75681382010-08-19 15:07:18 +0100988 EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +0000989 }
990}
991
992
Iain Merrick75681382010-08-19 15:07:18 +0100993void MarkCompactCollector::ProcessObjectGroups() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000994 bool work_to_do = true;
995 ASSERT(marking_stack.is_empty());
996 while (work_to_do) {
997 MarkObjectGroups();
998 work_to_do = !marking_stack.is_empty();
Iain Merrick75681382010-08-19 15:07:18 +0100999 ProcessMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00001000 }
1001}
1002
1003
1004void MarkCompactCollector::MarkLiveObjects() {
Leon Clarkef7060e22010-06-03 12:02:55 +01001005 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
Steve Blocka7e24c12009-10-30 11:49:00 +00001006#ifdef DEBUG
1007 ASSERT(state_ == PREPARE_GC);
1008 state_ = MARK_LIVE_OBJECTS;
1009#endif
1010 // The to space contains live objects, the from space is used as a marking
1011 // stack.
1012 marking_stack.Initialize(Heap::new_space()->FromSpaceLow(),
1013 Heap::new_space()->FromSpaceHigh());
1014
1015 ASSERT(!marking_stack.overflowed());
1016
Iain Merrick75681382010-08-19 15:07:18 +01001017 PrepareForCodeFlushing();
1018
Steve Blocka7e24c12009-10-30 11:49:00 +00001019 RootMarkingVisitor root_visitor;
1020 MarkRoots(&root_visitor);
1021
1022 // The objects reachable from the roots are marked, yet unreachable
1023 // objects are unmarked. Mark objects reachable from object groups
1024 // containing at least one marked object, and continue until no new
1025 // objects are reachable from the object groups.
Iain Merrick75681382010-08-19 15:07:18 +01001026 ProcessObjectGroups();
Steve Blocka7e24c12009-10-30 11:49:00 +00001027
1028 // The objects reachable from the roots or object groups are marked,
1029 // yet unreachable objects are unmarked. Mark objects reachable
1030 // only from weak global handles.
1031 //
1032 // First we identify nonlive weak handles and mark them as pending
1033 // destruction.
1034 GlobalHandles::IdentifyWeakHandles(&IsUnmarkedHeapObject);
1035 // Then we mark the objects and process the transitive closure.
1036 GlobalHandles::IterateWeakRoots(&root_visitor);
1037 while (marking_stack.overflowed()) {
1038 RefillMarkingStack();
Iain Merrick75681382010-08-19 15:07:18 +01001039 EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00001040 }
1041
1042 // Repeat the object groups to mark unmarked groups reachable from the
1043 // weak roots.
Iain Merrick75681382010-08-19 15:07:18 +01001044 ProcessObjectGroups();
Steve Blocka7e24c12009-10-30 11:49:00 +00001045
1046 // Prune the symbol table removing all symbols only pointed to by the
1047 // symbol table. Cannot use symbol_table() here because the symbol
1048 // table is marked.
1049 SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
1050 SymbolTableCleaner v;
1051 symbol_table->IterateElements(&v);
1052 symbol_table->ElementsRemoved(v.PointersRemoved());
Leon Clarkee46be812010-01-19 14:06:41 +00001053 ExternalStringTable::Iterate(&v);
1054 ExternalStringTable::CleanUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00001055
1056 // Remove object groups after marking phase.
1057 GlobalHandles::RemoveObjectGroups();
1058}
1059
1060
1061static int CountMarkedCallback(HeapObject* obj) {
1062 MapWord map_word = obj->map_word();
1063 map_word.ClearMark();
1064 return obj->SizeFromMap(map_word.ToMap());
1065}
1066
1067
1068#ifdef DEBUG
1069void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
1070 live_bytes_ += obj->Size();
1071 if (Heap::new_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001072 live_young_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001073 } else if (Heap::map_space()->Contains(obj)) {
1074 ASSERT(obj->IsMap());
Steve Block6ded16b2010-05-10 14:33:55 +01001075 live_map_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001076 } else if (Heap::cell_space()->Contains(obj)) {
1077 ASSERT(obj->IsJSGlobalPropertyCell());
Steve Block6ded16b2010-05-10 14:33:55 +01001078 live_cell_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001079 } else if (Heap::old_pointer_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001080 live_old_pointer_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001081 } else if (Heap::old_data_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001082 live_old_data_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001083 } else if (Heap::code_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001084 live_code_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001085 } else if (Heap::lo_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001086 live_lo_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001087 } else {
1088 UNREACHABLE();
1089 }
1090}
1091#endif // DEBUG
1092
1093
1094void MarkCompactCollector::SweepLargeObjectSpace() {
1095#ifdef DEBUG
1096 ASSERT(state_ == MARK_LIVE_OBJECTS);
1097 state_ =
1098 compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
1099#endif
1100 // Deallocate unmarked objects and clear marked bits for marked objects.
1101 Heap::lo_space()->FreeUnmarkedObjects();
1102}
1103
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001104
Steve Blocka7e24c12009-10-30 11:49:00 +00001105// Safe to use during marking phase only.
1106bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
1107 MapWord metamap = object->map_word();
1108 metamap.ClearMark();
1109 return metamap.ToMap()->instance_type() == MAP_TYPE;
1110}
1111
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001112
Steve Blocka7e24c12009-10-30 11:49:00 +00001113void MarkCompactCollector::ClearNonLiveTransitions() {
1114 HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback);
1115 // Iterate over the map space, setting map transitions that go from
1116 // a marked map to an unmarked map to null transitions. At the same time,
1117 // set all the prototype fields of maps back to their original value,
1118 // dropping the back pointers temporarily stored in the prototype field.
1119 // Setting the prototype field requires following the linked list of
1120 // back pointers, reversing them all at once. This allows us to find
1121 // those maps with map transitions that need to be nulled, and only
1122 // scan the descriptor arrays of those maps, not all maps.
Leon Clarkee46be812010-01-19 14:06:41 +00001123 // All of these actions are carried out only on maps of JSObjects
Steve Blocka7e24c12009-10-30 11:49:00 +00001124 // and related subtypes.
Leon Clarked91b9f72010-01-27 17:25:45 +00001125 for (HeapObject* obj = map_iterator.next();
1126 obj != NULL; obj = map_iterator.next()) {
1127 Map* map = reinterpret_cast<Map*>(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00001128 if (!map->IsMarked() && map->IsByteArray()) continue;
1129
1130 ASSERT(SafeIsMap(map));
1131 // Only JSObject and subtypes have map transitions and back pointers.
1132 if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
1133 if (map->instance_type() > JS_FUNCTION_TYPE) continue;
1134 // Follow the chain of back pointers to find the prototype.
1135 Map* current = map;
1136 while (SafeIsMap(current)) {
1137 current = reinterpret_cast<Map*>(current->prototype());
1138 ASSERT(current->IsHeapObject());
1139 }
1140 Object* real_prototype = current;
1141
1142 // Follow back pointers, setting them to prototype,
1143 // clearing map transitions when necessary.
1144 current = map;
1145 bool on_dead_path = !current->IsMarked();
1146 Object* next;
1147 while (SafeIsMap(current)) {
1148 next = current->prototype();
1149 // There should never be a dead map above a live map.
1150 ASSERT(on_dead_path || current->IsMarked());
1151
1152 // A live map above a dead map indicates a dead transition.
1153 // This test will always be false on the first iteration.
1154 if (on_dead_path && current->IsMarked()) {
1155 on_dead_path = false;
1156 current->ClearNonLiveTransitions(real_prototype);
1157 }
1158 *HeapObject::RawField(current, Map::kPrototypeOffset) =
1159 real_prototype;
1160 current = reinterpret_cast<Map*>(next);
1161 }
1162 }
1163}
1164
1165// -------------------------------------------------------------------------
1166// Phase 2: Encode forwarding addresses.
1167// When compacting, forwarding addresses for objects in old space and map
1168// space are encoded in their map pointer word (along with an encoding of
1169// their map pointers).
1170//
Leon Clarkee46be812010-01-19 14:06:41 +00001171// The excact encoding is described in the comments for class MapWord in
1172// objects.h.
Steve Blocka7e24c12009-10-30 11:49:00 +00001173//
1174// An address range [start, end) can have both live and non-live objects.
1175// Maximal non-live regions are marked so they can be skipped on subsequent
1176// sweeps of the heap. A distinguished map-pointer encoding is used to mark
1177// free regions of one-word size (in which case the next word is the start
1178// of a live object). A second distinguished map-pointer encoding is used
1179// to mark free regions larger than one word, and the size of the free
1180// region (including the first word) is written to the second word of the
1181// region.
1182//
1183// Any valid map page offset must lie in the object area of the page, so map
1184// page offsets less than Page::kObjectStartOffset are invalid. We use a
1185// pair of distinguished invalid map encodings (for single word and multiple
1186// words) to indicate free regions in the page found during computation of
1187// forwarding addresses and skipped over in subsequent sweeps.
1188static const uint32_t kSingleFreeEncoding = 0;
1189static const uint32_t kMultiFreeEncoding = 1;
1190
1191
1192// Encode a free region, defined by the given start address and size, in the
1193// first word or two of the region.
1194void EncodeFreeRegion(Address free_start, int free_size) {
1195 ASSERT(free_size >= kIntSize);
1196 if (free_size == kIntSize) {
1197 Memory::uint32_at(free_start) = kSingleFreeEncoding;
1198 } else {
1199 ASSERT(free_size >= 2 * kIntSize);
1200 Memory::uint32_at(free_start) = kMultiFreeEncoding;
1201 Memory::int_at(free_start + kIntSize) = free_size;
1202 }
1203
1204#ifdef DEBUG
1205 // Zap the body of the free region.
1206 if (FLAG_enable_slow_asserts) {
1207 for (int offset = 2 * kIntSize;
1208 offset < free_size;
1209 offset += kPointerSize) {
1210 Memory::Address_at(free_start + offset) = kZapValue;
1211 }
1212 }
1213#endif
1214}
1215
1216
1217// Try to promote all objects in new space. Heap numbers and sequential
1218// strings are promoted to the code space, large objects to large object space,
1219// and all others to the old space.
1220inline Object* MCAllocateFromNewSpace(HeapObject* object, int object_size) {
1221 Object* forwarded;
1222 if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
1223 forwarded = Failure::Exception();
1224 } else {
1225 OldSpace* target_space = Heap::TargetSpace(object);
1226 ASSERT(target_space == Heap::old_pointer_space() ||
1227 target_space == Heap::old_data_space());
1228 forwarded = target_space->MCAllocateRaw(object_size);
1229 }
1230 if (forwarded->IsFailure()) {
1231 forwarded = Heap::new_space()->MCAllocateRaw(object_size);
1232 }
1233 return forwarded;
1234}
1235
1236
1237// Allocation functions for the paged spaces call the space's MCAllocateRaw.
1238inline Object* MCAllocateFromOldPointerSpace(HeapObject* ignore,
1239 int object_size) {
1240 return Heap::old_pointer_space()->MCAllocateRaw(object_size);
1241}
1242
1243
1244inline Object* MCAllocateFromOldDataSpace(HeapObject* ignore, int object_size) {
1245 return Heap::old_data_space()->MCAllocateRaw(object_size);
1246}
1247
1248
1249inline Object* MCAllocateFromCodeSpace(HeapObject* ignore, int object_size) {
1250 return Heap::code_space()->MCAllocateRaw(object_size);
1251}
1252
1253
1254inline Object* MCAllocateFromMapSpace(HeapObject* ignore, int object_size) {
1255 return Heap::map_space()->MCAllocateRaw(object_size);
1256}
1257
1258
1259inline Object* MCAllocateFromCellSpace(HeapObject* ignore, int object_size) {
1260 return Heap::cell_space()->MCAllocateRaw(object_size);
1261}
1262
1263
1264// The forwarding address is encoded at the same offset as the current
1265// to-space object, but in from space.
1266inline void EncodeForwardingAddressInNewSpace(HeapObject* old_object,
1267 int object_size,
1268 Object* new_object,
1269 int* ignored) {
1270 int offset =
1271 Heap::new_space()->ToSpaceOffsetForAddress(old_object->address());
1272 Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset) =
1273 HeapObject::cast(new_object)->address();
1274}
1275
1276
1277// The forwarding address is encoded in the map pointer of the object as an
1278// offset (in terms of live bytes) from the address of the first live object
1279// in the page.
1280inline void EncodeForwardingAddressInPagedSpace(HeapObject* old_object,
1281 int object_size,
1282 Object* new_object,
1283 int* offset) {
1284 // Record the forwarding address of the first live object if necessary.
1285 if (*offset == 0) {
1286 Page::FromAddress(old_object->address())->mc_first_forwarded =
1287 HeapObject::cast(new_object)->address();
1288 }
1289
1290 MapWord encoding =
1291 MapWord::EncodeAddress(old_object->map()->address(), *offset);
1292 old_object->set_map_word(encoding);
1293 *offset += object_size;
1294 ASSERT(*offset <= Page::kObjectAreaSize);
1295}
1296
1297
1298// Most non-live objects are ignored.
1299inline void IgnoreNonLiveObject(HeapObject* object) {}
1300
1301
Steve Blocka7e24c12009-10-30 11:49:00 +00001302// Function template that, given a range of addresses (eg, a semispace or a
1303// paged space page), iterates through the objects in the range to clear
1304// mark bits and compute and encode forwarding addresses. As a side effect,
1305// maximal free chunks are marked so that they can be skipped on subsequent
1306// sweeps.
1307//
1308// The template parameters are an allocation function, a forwarding address
1309// encoding function, and a function to process non-live objects.
1310template<MarkCompactCollector::AllocationFunction Alloc,
1311 MarkCompactCollector::EncodingFunction Encode,
1312 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
1313inline void EncodeForwardingAddressesInRange(Address start,
1314 Address end,
1315 int* offset) {
1316 // The start address of the current free region while sweeping the space.
1317 // This address is set when a transition from live to non-live objects is
1318 // encountered. A value (an encoding of the 'next free region' pointer)
1319 // is written to memory at this address when a transition from non-live to
1320 // live objects is encountered.
1321 Address free_start = NULL;
1322
1323 // A flag giving the state of the previously swept object. Initially true
1324 // to ensure that free_start is initialized to a proper address before
1325 // trying to write to it.
1326 bool is_prev_alive = true;
1327
1328 int object_size; // Will be set on each iteration of the loop.
1329 for (Address current = start; current < end; current += object_size) {
1330 HeapObject* object = HeapObject::FromAddress(current);
1331 if (object->IsMarked()) {
1332 object->ClearMark();
1333 MarkCompactCollector::tracer()->decrement_marked_count();
1334 object_size = object->Size();
1335
1336 Object* forwarded = Alloc(object, object_size);
1337 // Allocation cannot fail, because we are compacting the space.
1338 ASSERT(!forwarded->IsFailure());
1339 Encode(object, object_size, forwarded, offset);
1340
1341#ifdef DEBUG
1342 if (FLAG_gc_verbose) {
1343 PrintF("forward %p -> %p.\n", object->address(),
1344 HeapObject::cast(forwarded)->address());
1345 }
1346#endif
1347 if (!is_prev_alive) { // Transition from non-live to live.
Steve Blockd0582a62009-12-15 09:54:21 +00001348 EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
Steve Blocka7e24c12009-10-30 11:49:00 +00001349 is_prev_alive = true;
1350 }
1351 } else { // Non-live object.
1352 object_size = object->Size();
1353 ProcessNonLive(object);
1354 if (is_prev_alive) { // Transition from live to non-live.
1355 free_start = current;
1356 is_prev_alive = false;
1357 }
1358 }
1359 }
1360
1361 // If we ended on a free region, mark it.
Steve Blockd0582a62009-12-15 09:54:21 +00001362 if (!is_prev_alive) {
1363 EncodeFreeRegion(free_start, static_cast<int>(end - free_start));
1364 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001365}
1366
1367
1368// Functions to encode the forwarding pointers in each compactable space.
1369void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
1370 int ignored;
1371 EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
1372 EncodeForwardingAddressInNewSpace,
1373 IgnoreNonLiveObject>(
1374 Heap::new_space()->bottom(),
1375 Heap::new_space()->top(),
1376 &ignored);
1377}
1378
1379
1380template<MarkCompactCollector::AllocationFunction Alloc,
1381 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
1382void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
1383 PagedSpace* space) {
1384 PageIterator it(space, PageIterator::PAGES_IN_USE);
1385 while (it.has_next()) {
1386 Page* p = it.next();
Steve Block6ded16b2010-05-10 14:33:55 +01001387
Steve Blocka7e24c12009-10-30 11:49:00 +00001388 // The offset of each live object in the page from the first live object
1389 // in the page.
1390 int offset = 0;
1391 EncodeForwardingAddressesInRange<Alloc,
1392 EncodeForwardingAddressInPagedSpace,
1393 ProcessNonLive>(
1394 p->ObjectAreaStart(),
1395 p->AllocationTop(),
1396 &offset);
1397 }
1398}
1399
1400
Steve Block6ded16b2010-05-10 14:33:55 +01001401// We scavange new space simultaneously with sweeping. This is done in two
1402// passes.
1403// The first pass migrates all alive objects from one semispace to another or
1404// promotes them to old space. Forwading address is written directly into
1405// first word of object without any encoding. If object is dead we are writing
1406// NULL as a forwarding address.
1407// The second pass updates pointers to new space in all spaces. It is possible
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001408// to encounter pointers to dead objects during traversal of dirty regions we
1409// should clear them to avoid encountering them during next dirty regions
1410// iteration.
1411static void MigrateObject(Address dst,
1412 Address src,
1413 int size,
1414 bool to_old_space) {
1415 if (to_old_space) {
1416 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
1417 } else {
1418 Heap::CopyBlock(dst, src, size);
1419 }
Steve Block6ded16b2010-05-10 14:33:55 +01001420
1421 Memory::Address_at(src) = dst;
1422}
1423
1424
Iain Merrick75681382010-08-19 15:07:18 +01001425class StaticPointersToNewGenUpdatingVisitor : public
1426 StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> {
1427 public:
1428 static inline void VisitPointer(Object** p) {
1429 if (!(*p)->IsHeapObject()) return;
1430
1431 HeapObject* obj = HeapObject::cast(*p);
1432 Address old_addr = obj->address();
1433
1434 if (Heap::new_space()->Contains(obj)) {
1435 ASSERT(Heap::InFromSpace(*p));
1436 *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
1437 }
1438 }
1439};
1440
1441
Steve Block6ded16b2010-05-10 14:33:55 +01001442// Visitor for updating pointers from live objects in old spaces to new space.
1443// It does not expect to encounter pointers to dead objects.
1444class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
1445 public:
1446 void VisitPointer(Object** p) {
Iain Merrick75681382010-08-19 15:07:18 +01001447 StaticPointersToNewGenUpdatingVisitor::VisitPointer(p);
Steve Block6ded16b2010-05-10 14:33:55 +01001448 }
1449
1450 void VisitPointers(Object** start, Object** end) {
Iain Merrick75681382010-08-19 15:07:18 +01001451 for (Object** p = start; p < end; p++) {
1452 StaticPointersToNewGenUpdatingVisitor::VisitPointer(p);
1453 }
Steve Block6ded16b2010-05-10 14:33:55 +01001454 }
1455
1456 void VisitCodeTarget(RelocInfo* rinfo) {
1457 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
1458 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1459 VisitPointer(&target);
1460 rinfo->set_target_address(Code::cast(target)->instruction_start());
1461 }
1462
1463 void VisitDebugTarget(RelocInfo* rinfo) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001464 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
1465 rinfo->IsPatchedReturnSequence()) ||
1466 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
1467 rinfo->IsPatchedDebugBreakSlotSequence()));
Steve Block6ded16b2010-05-10 14:33:55 +01001468 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
1469 VisitPointer(&target);
1470 rinfo->set_call_address(Code::cast(target)->instruction_start());
1471 }
Steve Block6ded16b2010-05-10 14:33:55 +01001472};
1473
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001474
Steve Block6ded16b2010-05-10 14:33:55 +01001475// Visitor for updating pointers from live objects in old spaces to new space.
1476// It can encounter pointers to dead objects in new space when traversing map
1477// space (see comment for MigrateObject).
1478static void UpdatePointerToNewGen(HeapObject** p) {
1479 if (!(*p)->IsHeapObject()) return;
1480
1481 Address old_addr = (*p)->address();
1482 ASSERT(Heap::InFromSpace(*p));
1483
1484 Address new_addr = Memory::Address_at(old_addr);
1485
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001486 if (new_addr == NULL) {
1487 // We encountered pointer to a dead object. Clear it so we will
1488 // not visit it again during next iteration of dirty regions.
1489 *p = NULL;
1490 } else {
1491 *p = HeapObject::FromAddress(new_addr);
1492 }
Steve Block6ded16b2010-05-10 14:33:55 +01001493}
1494
1495
1496static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Object **p) {
1497 Address old_addr = HeapObject::cast(*p)->address();
1498 Address new_addr = Memory::Address_at(old_addr);
1499 return String::cast(HeapObject::FromAddress(new_addr));
1500}
1501
1502
1503static bool TryPromoteObject(HeapObject* object, int object_size) {
1504 Object* result;
1505
1506 if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
1507 result = Heap::lo_space()->AllocateRawFixedArray(object_size);
1508 if (!result->IsFailure()) {
1509 HeapObject* target = HeapObject::cast(result);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001510 MigrateObject(target->address(), object->address(), object_size, true);
Leon Clarkef7060e22010-06-03 12:02:55 +01001511 MarkCompactCollector::tracer()->
1512 increment_promoted_objects_size(object_size);
Steve Block6ded16b2010-05-10 14:33:55 +01001513 return true;
1514 }
1515 } else {
1516 OldSpace* target_space = Heap::TargetSpace(object);
1517
1518 ASSERT(target_space == Heap::old_pointer_space() ||
1519 target_space == Heap::old_data_space());
1520 result = target_space->AllocateRaw(object_size);
1521 if (!result->IsFailure()) {
1522 HeapObject* target = HeapObject::cast(result);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001523 MigrateObject(target->address(),
1524 object->address(),
1525 object_size,
1526 target_space == Heap::old_pointer_space());
Leon Clarkef7060e22010-06-03 12:02:55 +01001527 MarkCompactCollector::tracer()->
1528 increment_promoted_objects_size(object_size);
Steve Block6ded16b2010-05-10 14:33:55 +01001529 return true;
1530 }
1531 }
1532
1533 return false;
1534}
1535
1536
1537static void SweepNewSpace(NewSpace* space) {
1538 Heap::CheckNewSpaceExpansionCriteria();
1539
1540 Address from_bottom = space->bottom();
1541 Address from_top = space->top();
1542
1543 // Flip the semispaces. After flipping, to space is empty, from space has
1544 // live objects.
1545 space->Flip();
1546 space->ResetAllocationInfo();
1547
1548 int size = 0;
1549 int survivors_size = 0;
1550
1551 // First pass: traverse all objects in inactive semispace, remove marks,
1552 // migrate live objects and write forwarding addresses.
1553 for (Address current = from_bottom; current < from_top; current += size) {
1554 HeapObject* object = HeapObject::FromAddress(current);
1555
1556 if (object->IsMarked()) {
1557 object->ClearMark();
1558 MarkCompactCollector::tracer()->decrement_marked_count();
1559
1560 size = object->Size();
1561 survivors_size += size;
1562
1563 // Aggressively promote young survivors to the old space.
1564 if (TryPromoteObject(object, size)) {
1565 continue;
1566 }
1567
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001568 // Promotion failed. Just migrate object to another semispace.
Steve Block6ded16b2010-05-10 14:33:55 +01001569 Object* target = space->AllocateRaw(size);
1570
1571 // Allocation cannot fail at this point: semispaces are of equal size.
1572 ASSERT(!target->IsFailure());
1573
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001574 MigrateObject(HeapObject::cast(target)->address(),
1575 current,
1576 size,
1577 false);
Steve Block6ded16b2010-05-10 14:33:55 +01001578 } else {
1579 size = object->Size();
1580 Memory::Address_at(current) = NULL;
1581 }
1582 }
1583
1584 // Second pass: find pointers to new space and update them.
1585 PointersToNewGenUpdatingVisitor updating_visitor;
1586
1587 // Update pointers in to space.
Iain Merrick75681382010-08-19 15:07:18 +01001588 Address current = space->bottom();
1589 while (current < space->top()) {
1590 HeapObject* object = HeapObject::FromAddress(current);
1591 current +=
1592 StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(),
1593 object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001594 }
Steve Block6ded16b2010-05-10 14:33:55 +01001595
1596 // Update roots.
1597 Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
1598
1599 // Update pointers in old spaces.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001600 Heap::IterateDirtyRegions(Heap::old_pointer_space(),
1601 &Heap::IteratePointersInDirtyRegion,
1602 &UpdatePointerToNewGen,
1603 Heap::WATERMARK_SHOULD_BE_VALID);
1604
1605 Heap::lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
Steve Block6ded16b2010-05-10 14:33:55 +01001606
1607 // Update pointers from cells.
1608 HeapObjectIterator cell_iterator(Heap::cell_space());
1609 for (HeapObject* cell = cell_iterator.next();
1610 cell != NULL;
1611 cell = cell_iterator.next()) {
1612 if (cell->IsJSGlobalPropertyCell()) {
1613 Address value_address =
1614 reinterpret_cast<Address>(cell) +
1615 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1616 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1617 }
1618 }
1619
1620 // Update pointers from external string table.
1621 Heap::UpdateNewSpaceReferencesInExternalStringTable(
1622 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1623
1624 // All pointers were updated. Update auxiliary allocation info.
1625 Heap::IncrementYoungSurvivorsCounter(survivors_size);
1626 space->set_age_mark(space->top());
Steve Blocka7e24c12009-10-30 11:49:00 +00001627}
1628
1629
1630static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
1631 PageIterator it(space, PageIterator::PAGES_IN_USE);
Steve Block6ded16b2010-05-10 14:33:55 +01001632
1633 // During sweeping of paged space we are trying to find longest sequences
1634 // of pages without live objects and free them (instead of putting them on
1635 // the free list).
1636
1637 // Page preceding current.
1638 Page* prev = Page::FromAddress(NULL);
1639
1640 // First empty page in a sequence.
1641 Page* first_empty_page = Page::FromAddress(NULL);
1642
1643 // Page preceding first empty page.
1644 Page* prec_first_empty_page = Page::FromAddress(NULL);
1645
1646 // If last used page of space ends with a sequence of dead objects
1647 // we can adjust allocation top instead of puting this free area into
1648 // the free list. Thus during sweeping we keep track of such areas
1649 // and defer their deallocation until the sweeping of the next page
1650 // is done: if one of the next pages contains live objects we have
1651 // to put such area into the free list.
1652 Address last_free_start = NULL;
1653 int last_free_size = 0;
1654
Steve Blocka7e24c12009-10-30 11:49:00 +00001655 while (it.has_next()) {
1656 Page* p = it.next();
1657
1658 bool is_previous_alive = true;
1659 Address free_start = NULL;
1660 HeapObject* object;
1661
1662 for (Address current = p->ObjectAreaStart();
1663 current < p->AllocationTop();
1664 current += object->Size()) {
1665 object = HeapObject::FromAddress(current);
1666 if (object->IsMarked()) {
1667 object->ClearMark();
1668 MarkCompactCollector::tracer()->decrement_marked_count();
Steve Block6ded16b2010-05-10 14:33:55 +01001669
Steve Blocka7e24c12009-10-30 11:49:00 +00001670 if (!is_previous_alive) { // Transition from free to live.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001671 dealloc(free_start,
1672 static_cast<int>(current - free_start),
1673 true,
1674 false);
Steve Blocka7e24c12009-10-30 11:49:00 +00001675 is_previous_alive = true;
1676 }
1677 } else {
Leon Clarked91b9f72010-01-27 17:25:45 +00001678 MarkCompactCollector::ReportDeleteIfNeeded(object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001679 if (is_previous_alive) { // Transition from live to free.
1680 free_start = current;
1681 is_previous_alive = false;
1682 }
1683 }
1684 // The object is now unmarked for the call to Size() at the top of the
1685 // loop.
1686 }
1687
Steve Block6ded16b2010-05-10 14:33:55 +01001688 bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
1689 || (!is_previous_alive && free_start == p->ObjectAreaStart());
1690
1691 if (page_is_empty) {
1692 // This page is empty. Check whether we are in the middle of
1693 // sequence of empty pages and start one if not.
1694 if (!first_empty_page->is_valid()) {
1695 first_empty_page = p;
1696 prec_first_empty_page = prev;
1697 }
1698
1699 if (!is_previous_alive) {
1700 // There are dead objects on this page. Update space accounting stats
1701 // without putting anything into free list.
1702 int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
1703 if (size_in_bytes > 0) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001704 dealloc(free_start, size_in_bytes, false, true);
Steve Block6ded16b2010-05-10 14:33:55 +01001705 }
1706 }
1707 } else {
1708 // This page is not empty. Sequence of empty pages ended on the previous
1709 // one.
1710 if (first_empty_page->is_valid()) {
1711 space->FreePages(prec_first_empty_page, prev);
1712 prec_first_empty_page = first_empty_page = Page::FromAddress(NULL);
1713 }
1714
1715 // If there is a free ending area on one of the previous pages we have
1716 // deallocate that area and put it on the free list.
1717 if (last_free_size > 0) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001718 Page::FromAddress(last_free_start)->
1719 SetAllocationWatermark(last_free_start);
1720 dealloc(last_free_start, last_free_size, true, true);
Steve Block6ded16b2010-05-10 14:33:55 +01001721 last_free_start = NULL;
1722 last_free_size = 0;
1723 }
1724
1725 // If the last region of this page was not live we remember it.
1726 if (!is_previous_alive) {
1727 ASSERT(last_free_size == 0);
1728 last_free_size = static_cast<int>(p->AllocationTop() - free_start);
1729 last_free_start = free_start;
Steve Blocka7e24c12009-10-30 11:49:00 +00001730 }
1731 }
Steve Block6ded16b2010-05-10 14:33:55 +01001732
1733 prev = p;
1734 }
1735
1736 // We reached end of space. See if we need to adjust allocation top.
1737 Address new_allocation_top = NULL;
1738
1739 if (first_empty_page->is_valid()) {
1740 // Last used pages in space are empty. We can move allocation top backwards
1741 // to the beginning of first empty page.
1742 ASSERT(prev == space->AllocationTopPage());
1743
1744 new_allocation_top = first_empty_page->ObjectAreaStart();
1745 }
1746
1747 if (last_free_size > 0) {
1748 // There was a free ending area on the previous page.
1749 // Deallocate it without putting it into freelist and move allocation
1750 // top to the beginning of this free area.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001751 dealloc(last_free_start, last_free_size, false, true);
Steve Block6ded16b2010-05-10 14:33:55 +01001752 new_allocation_top = last_free_start;
1753 }
1754
1755 if (new_allocation_top != NULL) {
1756#ifdef DEBUG
1757 Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
1758 if (!first_empty_page->is_valid()) {
1759 ASSERT(new_allocation_top_page == space->AllocationTopPage());
1760 } else if (last_free_size > 0) {
1761 ASSERT(new_allocation_top_page == prec_first_empty_page);
1762 } else {
1763 ASSERT(new_allocation_top_page == first_empty_page);
1764 }
1765#endif
1766
1767 space->SetTop(new_allocation_top);
Steve Blocka7e24c12009-10-30 11:49:00 +00001768 }
1769}
1770
1771
1772void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
Steve Block6ded16b2010-05-10 14:33:55 +01001773 int size_in_bytes,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001774 bool add_to_freelist,
1775 bool last_on_page) {
Steve Block6ded16b2010-05-10 14:33:55 +01001776 Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist);
Steve Blocka7e24c12009-10-30 11:49:00 +00001777}
1778
1779
1780void MarkCompactCollector::DeallocateOldDataBlock(Address start,
Steve Block6ded16b2010-05-10 14:33:55 +01001781 int size_in_bytes,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001782 bool add_to_freelist,
1783 bool last_on_page) {
Steve Block6ded16b2010-05-10 14:33:55 +01001784 Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist);
Steve Blocka7e24c12009-10-30 11:49:00 +00001785}
1786
1787
1788void MarkCompactCollector::DeallocateCodeBlock(Address start,
Steve Block6ded16b2010-05-10 14:33:55 +01001789 int size_in_bytes,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001790 bool add_to_freelist,
1791 bool last_on_page) {
Steve Block6ded16b2010-05-10 14:33:55 +01001792 Heap::code_space()->Free(start, size_in_bytes, add_to_freelist);
Steve Blocka7e24c12009-10-30 11:49:00 +00001793}
1794
1795
1796void MarkCompactCollector::DeallocateMapBlock(Address start,
Steve Block6ded16b2010-05-10 14:33:55 +01001797 int size_in_bytes,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001798 bool add_to_freelist,
1799 bool last_on_page) {
Leon Clarkee46be812010-01-19 14:06:41 +00001800 // Objects in map space are assumed to have size Map::kSize and a
Steve Blocka7e24c12009-10-30 11:49:00 +00001801 // valid map in their first word. Thus, we break the free block up into
1802 // chunks and free them separately.
1803 ASSERT(size_in_bytes % Map::kSize == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001804 Address end = start + size_in_bytes;
1805 for (Address a = start; a < end; a += Map::kSize) {
Steve Block6ded16b2010-05-10 14:33:55 +01001806 Heap::map_space()->Free(a, add_to_freelist);
Steve Blocka7e24c12009-10-30 11:49:00 +00001807 }
1808}
1809
1810
1811void MarkCompactCollector::DeallocateCellBlock(Address start,
Steve Block6ded16b2010-05-10 14:33:55 +01001812 int size_in_bytes,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001813 bool add_to_freelist,
1814 bool last_on_page) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001815 // Free-list elements in cell space are assumed to have a fixed size.
1816 // We break the free block into chunks and add them to the free list
1817 // individually.
1818 int size = Heap::cell_space()->object_size_in_bytes();
1819 ASSERT(size_in_bytes % size == 0);
Steve Blocka7e24c12009-10-30 11:49:00 +00001820 Address end = start + size_in_bytes;
1821 for (Address a = start; a < end; a += size) {
Steve Block6ded16b2010-05-10 14:33:55 +01001822 Heap::cell_space()->Free(a, add_to_freelist);
Steve Blocka7e24c12009-10-30 11:49:00 +00001823 }
1824}
1825
1826
1827void MarkCompactCollector::EncodeForwardingAddresses() {
1828 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
1829 // Objects in the active semispace of the young generation may be
1830 // relocated to the inactive semispace (if not promoted). Set the
1831 // relocation info to the beginning of the inactive semispace.
1832 Heap::new_space()->MCResetRelocationInfo();
1833
1834 // Compute the forwarding pointers in each space.
1835 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
Leon Clarked91b9f72010-01-27 17:25:45 +00001836 ReportDeleteIfNeeded>(
Steve Blocka7e24c12009-10-30 11:49:00 +00001837 Heap::old_pointer_space());
1838
1839 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
1840 IgnoreNonLiveObject>(
1841 Heap::old_data_space());
1842
1843 EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
Leon Clarked91b9f72010-01-27 17:25:45 +00001844 ReportDeleteIfNeeded>(
Steve Blocka7e24c12009-10-30 11:49:00 +00001845 Heap::code_space());
1846
1847 EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
1848 IgnoreNonLiveObject>(
1849 Heap::cell_space());
1850
1851
1852 // Compute new space next to last after the old and code spaces have been
1853 // compacted. Objects in new space can be promoted to old or code space.
1854 EncodeForwardingAddressesInNewSpace();
1855
1856 // Compute map space last because computing forwarding addresses
1857 // overwrites non-live objects. Objects in the other spaces rely on
1858 // non-live map pointers to get the sizes of non-live objects.
1859 EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
1860 IgnoreNonLiveObject>(
1861 Heap::map_space());
1862
1863 // Write relocation info to the top page, so we can use it later. This is
1864 // done after promoting objects from the new space so we get the correct
1865 // allocation top.
1866 Heap::old_pointer_space()->MCWriteRelocationInfoToPage();
1867 Heap::old_data_space()->MCWriteRelocationInfoToPage();
1868 Heap::code_space()->MCWriteRelocationInfoToPage();
1869 Heap::map_space()->MCWriteRelocationInfoToPage();
1870 Heap::cell_space()->MCWriteRelocationInfoToPage();
1871}
1872
1873
Leon Clarkee46be812010-01-19 14:06:41 +00001874class MapIterator : public HeapObjectIterator {
1875 public:
1876 MapIterator() : HeapObjectIterator(Heap::map_space(), &SizeCallback) { }
1877
1878 explicit MapIterator(Address start)
1879 : HeapObjectIterator(Heap::map_space(), start, &SizeCallback) { }
1880
1881 private:
1882 static int SizeCallback(HeapObject* unused) {
1883 USE(unused);
1884 return Map::kSize;
1885 }
1886};
1887
1888
1889class MapCompact {
1890 public:
1891 explicit MapCompact(int live_maps)
1892 : live_maps_(live_maps),
1893 to_evacuate_start_(Heap::map_space()->TopAfterCompaction(live_maps)),
1894 map_to_evacuate_it_(to_evacuate_start_),
1895 first_map_to_evacuate_(
1896 reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
1897 }
1898
1899 void CompactMaps() {
1900 // As we know the number of maps to evacuate beforehand,
1901 // we stop then there is no more vacant maps.
1902 for (Map* next_vacant_map = NextVacantMap();
1903 next_vacant_map;
1904 next_vacant_map = NextVacantMap()) {
1905 EvacuateMap(next_vacant_map, NextMapToEvacuate());
1906 }
1907
1908#ifdef DEBUG
1909 CheckNoMapsToEvacuate();
1910#endif
1911 }
1912
1913 void UpdateMapPointersInRoots() {
1914 Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG);
1915 GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
1916 }
1917
Leon Clarkee46be812010-01-19 14:06:41 +00001918 void UpdateMapPointersInPagedSpace(PagedSpace* space) {
1919 ASSERT(space != Heap::map_space());
1920
1921 PageIterator it(space, PageIterator::PAGES_IN_USE);
1922 while (it.has_next()) {
1923 Page* p = it.next();
1924 UpdateMapPointersInRange(p->ObjectAreaStart(), p->AllocationTop());
1925 }
1926 }
1927
1928 void UpdateMapPointersInNewSpace() {
1929 NewSpace* space = Heap::new_space();
1930 UpdateMapPointersInRange(space->bottom(), space->top());
1931 }
1932
1933 void UpdateMapPointersInLargeObjectSpace() {
1934 LargeObjectIterator it(Heap::lo_space());
Leon Clarked91b9f72010-01-27 17:25:45 +00001935 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
1936 UpdateMapPointersInObject(obj);
Leon Clarkee46be812010-01-19 14:06:41 +00001937 }
1938
1939 void Finish() {
1940 Heap::map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
1941 }
1942
1943 private:
1944 int live_maps_;
1945 Address to_evacuate_start_;
1946 MapIterator vacant_map_it_;
1947 MapIterator map_to_evacuate_it_;
1948 Map* first_map_to_evacuate_;
1949
1950 // Helper class for updating map pointers in HeapObjects.
1951 class MapUpdatingVisitor: public ObjectVisitor {
1952 public:
1953 void VisitPointer(Object** p) {
1954 UpdateMapPointer(p);
1955 }
1956
1957 void VisitPointers(Object** start, Object** end) {
1958 for (Object** p = start; p < end; p++) UpdateMapPointer(p);
1959 }
1960
1961 private:
1962 void UpdateMapPointer(Object** p) {
1963 if (!(*p)->IsHeapObject()) return;
1964 HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
1965
1966 // Moved maps are tagged with overflowed map word. They are the only
1967 // objects those map word is overflowed as marking is already complete.
1968 MapWord map_word = old_map->map_word();
1969 if (!map_word.IsOverflowed()) return;
1970
1971 *p = GetForwardedMap(map_word);
1972 }
1973 };
1974
1975 static MapUpdatingVisitor map_updating_visitor_;
1976
1977 static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
1978 while (true) {
Leon Clarkee46be812010-01-19 14:06:41 +00001979 HeapObject* next = it->next();
Leon Clarked91b9f72010-01-27 17:25:45 +00001980 ASSERT(next != NULL);
Leon Clarkee46be812010-01-19 14:06:41 +00001981 if (next == last)
1982 return NULL;
1983 ASSERT(!next->IsOverflowed());
1984 ASSERT(!next->IsMarked());
1985 ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
1986 if (next->IsMap() == live)
1987 return reinterpret_cast<Map*>(next);
1988 }
1989 }
1990
1991 Map* NextVacantMap() {
1992 Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
1993 ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
1994 return map;
1995 }
1996
1997 Map* NextMapToEvacuate() {
1998 Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
1999 ASSERT(map != NULL);
2000 ASSERT(map->IsMap());
2001 return map;
2002 }
2003
2004 static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
2005 ASSERT(FreeListNode::IsFreeListNode(vacant_map));
2006 ASSERT(map_to_evacuate->IsMap());
2007
Steve Block6ded16b2010-05-10 14:33:55 +01002008 ASSERT(Map::kSize % 4 == 0);
2009
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002010 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(vacant_map->address(),
2011 map_to_evacuate->address(),
2012 Map::kSize);
Steve Block6ded16b2010-05-10 14:33:55 +01002013
Leon Clarkee46be812010-01-19 14:06:41 +00002014 ASSERT(vacant_map->IsMap()); // Due to memcpy above.
2015
2016 MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
2017 forwarding_map_word.SetOverflow();
2018 map_to_evacuate->set_map_word(forwarding_map_word);
2019
2020 ASSERT(map_to_evacuate->map_word().IsOverflowed());
2021 ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
2022 }
2023
2024 static Map* GetForwardedMap(MapWord map_word) {
2025 ASSERT(map_word.IsOverflowed());
2026 map_word.ClearOverflow();
2027 Map* new_map = map_word.ToMap();
2028 ASSERT_MAP_ALIGNED(new_map->address());
2029 return new_map;
2030 }
2031
2032 static int UpdateMapPointersInObject(HeapObject* obj) {
2033 ASSERT(!obj->IsMarked());
2034 Map* map = obj->map();
2035 ASSERT(Heap::map_space()->Contains(map));
2036 MapWord map_word = map->map_word();
2037 ASSERT(!map_word.IsMarked());
2038 if (map_word.IsOverflowed()) {
2039 Map* new_map = GetForwardedMap(map_word);
2040 ASSERT(Heap::map_space()->Contains(new_map));
2041 obj->set_map(new_map);
2042
2043#ifdef DEBUG
2044 if (FLAG_gc_verbose) {
2045 PrintF("update %p : %p -> %p\n", obj->address(),
2046 map, new_map);
2047 }
2048#endif
2049 }
2050
2051 int size = obj->SizeFromMap(map);
2052 obj->IterateBody(map->instance_type(), size, &map_updating_visitor_);
2053 return size;
2054 }
2055
2056 static void UpdateMapPointersInRange(Address start, Address end) {
2057 HeapObject* object;
2058 int size;
2059 for (Address current = start; current < end; current += size) {
2060 object = HeapObject::FromAddress(current);
2061 size = UpdateMapPointersInObject(object);
2062 ASSERT(size > 0);
2063 }
2064 }
2065
2066#ifdef DEBUG
2067 void CheckNoMapsToEvacuate() {
2068 if (!FLAG_enable_slow_asserts)
2069 return;
2070
Leon Clarked91b9f72010-01-27 17:25:45 +00002071 for (HeapObject* obj = map_to_evacuate_it_.next();
2072 obj != NULL; obj = map_to_evacuate_it_.next())
2073 ASSERT(FreeListNode::IsFreeListNode(obj));
Leon Clarkee46be812010-01-19 14:06:41 +00002074 }
2075#endif
2076};
2077
2078MapCompact::MapUpdatingVisitor MapCompact::map_updating_visitor_;
2079
2080
Steve Blocka7e24c12009-10-30 11:49:00 +00002081void MarkCompactCollector::SweepSpaces() {
Leon Clarkef7060e22010-06-03 12:02:55 +01002082 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
2083
Steve Blocka7e24c12009-10-30 11:49:00 +00002084 ASSERT(state_ == SWEEP_SPACES);
2085 ASSERT(!IsCompacting());
2086 // Noncompacting collections simply sweep the spaces to clear the mark
2087 // bits and free the nonlive blocks (for old and map spaces). We sweep
2088 // the map space last because freeing non-live maps overwrites them and
2089 // the other spaces rely on possibly non-live maps to get the sizes for
2090 // non-live objects.
2091 SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock);
2092 SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock);
2093 SweepSpace(Heap::code_space(), &DeallocateCodeBlock);
2094 SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
Iain Merrick75681382010-08-19 15:07:18 +01002095 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
2096 SweepNewSpace(Heap::new_space());
2097 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002098 SweepSpace(Heap::map_space(), &DeallocateMapBlock);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002099
2100 Heap::IterateDirtyRegions(Heap::map_space(),
2101 &Heap::IteratePointersInDirtyMapsRegion,
2102 &UpdatePointerToNewGen,
2103 Heap::WATERMARK_SHOULD_BE_VALID);
2104
Steve Block6ded16b2010-05-10 14:33:55 +01002105 int live_maps_size = Heap::map_space()->Size();
2106 int live_maps = live_maps_size / Map::kSize;
2107 ASSERT(live_map_objects_size_ == live_maps_size);
Leon Clarkee46be812010-01-19 14:06:41 +00002108
2109 if (Heap::map_space()->NeedsCompaction(live_maps)) {
2110 MapCompact map_compact(live_maps);
2111
2112 map_compact.CompactMaps();
2113 map_compact.UpdateMapPointersInRoots();
2114
Leon Clarkee46be812010-01-19 14:06:41 +00002115 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00002116 for (PagedSpace* space = spaces.next();
2117 space != NULL; space = spaces.next()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002118 if (space == Heap::map_space()) continue;
2119 map_compact.UpdateMapPointersInPagedSpace(space);
2120 }
2121 map_compact.UpdateMapPointersInNewSpace();
2122 map_compact.UpdateMapPointersInLargeObjectSpace();
2123
2124 map_compact.Finish();
2125 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002126}
2127
2128
2129// Iterate the live objects in a range of addresses (eg, a page or a
2130// semispace). The live regions of the range have been linked into a list.
2131// The first live region is [first_live_start, first_live_end), and the last
2132// address in the range is top. The callback function is used to get the
2133// size of each live object.
2134int MarkCompactCollector::IterateLiveObjectsInRange(
2135 Address start,
2136 Address end,
2137 HeapObjectCallback size_func) {
Steve Block6ded16b2010-05-10 14:33:55 +01002138 int live_objects_size = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00002139 Address current = start;
2140 while (current < end) {
2141 uint32_t encoded_map = Memory::uint32_at(current);
2142 if (encoded_map == kSingleFreeEncoding) {
2143 current += kPointerSize;
2144 } else if (encoded_map == kMultiFreeEncoding) {
2145 current += Memory::int_at(current + kIntSize);
2146 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01002147 int size = size_func(HeapObject::FromAddress(current));
2148 current += size;
2149 live_objects_size += size;
Steve Blocka7e24c12009-10-30 11:49:00 +00002150 }
2151 }
Steve Block6ded16b2010-05-10 14:33:55 +01002152 return live_objects_size;
Steve Blocka7e24c12009-10-30 11:49:00 +00002153}
2154
2155
2156int MarkCompactCollector::IterateLiveObjects(NewSpace* space,
2157 HeapObjectCallback size_f) {
2158 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2159 return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
2160}
2161
2162
2163int MarkCompactCollector::IterateLiveObjects(PagedSpace* space,
2164 HeapObjectCallback size_f) {
2165 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2166 int total = 0;
2167 PageIterator it(space, PageIterator::PAGES_IN_USE);
2168 while (it.has_next()) {
2169 Page* p = it.next();
2170 total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
2171 p->AllocationTop(),
2172 size_f);
2173 }
2174 return total;
2175}
2176
2177
2178// -------------------------------------------------------------------------
2179// Phase 3: Update pointers
2180
2181// Helper class for updating pointers in HeapObjects.
2182class UpdatingVisitor: public ObjectVisitor {
2183 public:
2184 void VisitPointer(Object** p) {
2185 UpdatePointer(p);
2186 }
2187
2188 void VisitPointers(Object** start, Object** end) {
2189 // Mark all HeapObject pointers in [start, end)
2190 for (Object** p = start; p < end; p++) UpdatePointer(p);
2191 }
2192
2193 void VisitCodeTarget(RelocInfo* rinfo) {
2194 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2195 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2196 VisitPointer(&target);
2197 rinfo->set_target_address(
2198 reinterpret_cast<Code*>(target)->instruction_start());
2199 }
2200
Steve Block3ce2e202009-11-05 08:53:23 +00002201 void VisitDebugTarget(RelocInfo* rinfo) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002202 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2203 rinfo->IsPatchedReturnSequence()) ||
2204 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2205 rinfo->IsPatchedDebugBreakSlotSequence()));
Steve Block3ce2e202009-11-05 08:53:23 +00002206 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2207 VisitPointer(&target);
2208 rinfo->set_call_address(
2209 reinterpret_cast<Code*>(target)->instruction_start());
2210 }
2211
Steve Blocka7e24c12009-10-30 11:49:00 +00002212 private:
2213 void UpdatePointer(Object** p) {
2214 if (!(*p)->IsHeapObject()) return;
2215
2216 HeapObject* obj = HeapObject::cast(*p);
2217 Address old_addr = obj->address();
2218 Address new_addr;
2219 ASSERT(!Heap::InFromSpace(obj));
2220
2221 if (Heap::new_space()->Contains(obj)) {
2222 Address forwarding_pointer_addr =
2223 Heap::new_space()->FromSpaceLow() +
2224 Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
2225 new_addr = Memory::Address_at(forwarding_pointer_addr);
2226
2227#ifdef DEBUG
2228 ASSERT(Heap::old_pointer_space()->Contains(new_addr) ||
2229 Heap::old_data_space()->Contains(new_addr) ||
2230 Heap::new_space()->FromSpaceContains(new_addr) ||
2231 Heap::lo_space()->Contains(HeapObject::FromAddress(new_addr)));
2232
2233 if (Heap::new_space()->FromSpaceContains(new_addr)) {
2234 ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
2235 Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
2236 }
2237#endif
2238
2239 } else if (Heap::lo_space()->Contains(obj)) {
2240 // Don't move objects in the large object space.
2241 return;
2242
2243 } else {
2244#ifdef DEBUG
2245 PagedSpaces spaces;
2246 PagedSpace* original_space = spaces.next();
2247 while (original_space != NULL) {
2248 if (original_space->Contains(obj)) break;
2249 original_space = spaces.next();
2250 }
2251 ASSERT(original_space != NULL);
2252#endif
2253 new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
2254 ASSERT(original_space->Contains(new_addr));
2255 ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
2256 original_space->MCSpaceOffsetForAddress(old_addr));
2257 }
2258
2259 *p = HeapObject::FromAddress(new_addr);
2260
2261#ifdef DEBUG
2262 if (FLAG_gc_verbose) {
2263 PrintF("update %p : %p -> %p\n",
2264 reinterpret_cast<Address>(p), old_addr, new_addr);
2265 }
2266#endif
2267 }
2268};
2269
2270
2271void MarkCompactCollector::UpdatePointers() {
2272#ifdef DEBUG
2273 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
2274 state_ = UPDATE_POINTERS;
2275#endif
2276 UpdatingVisitor updating_visitor;
Steve Blockd0582a62009-12-15 09:54:21 +00002277 Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00002278 GlobalHandles::IterateWeakRoots(&updating_visitor);
2279
Steve Block6ded16b2010-05-10 14:33:55 +01002280 int live_maps_size = IterateLiveObjects(Heap::map_space(),
Steve Blocka7e24c12009-10-30 11:49:00 +00002281 &UpdatePointersInOldObject);
Steve Block6ded16b2010-05-10 14:33:55 +01002282 int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
2283 &UpdatePointersInOldObject);
2284 int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
2285 &UpdatePointersInOldObject);
2286 int live_codes_size = IterateLiveObjects(Heap::code_space(),
2287 &UpdatePointersInOldObject);
2288 int live_cells_size = IterateLiveObjects(Heap::cell_space(),
2289 &UpdatePointersInOldObject);
2290 int live_news_size = IterateLiveObjects(Heap::new_space(),
2291 &UpdatePointersInNewObject);
Steve Blocka7e24c12009-10-30 11:49:00 +00002292
2293 // Large objects do not move, the map word can be updated directly.
2294 LargeObjectIterator it(Heap::lo_space());
Leon Clarked91b9f72010-01-27 17:25:45 +00002295 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
2296 UpdatePointersInNewObject(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00002297
Steve Block6ded16b2010-05-10 14:33:55 +01002298 USE(live_maps_size);
2299 USE(live_pointer_olds_size);
2300 USE(live_data_olds_size);
2301 USE(live_codes_size);
2302 USE(live_cells_size);
2303 USE(live_news_size);
2304 ASSERT(live_maps_size == live_map_objects_size_);
2305 ASSERT(live_data_olds_size == live_old_data_objects_size_);
2306 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
2307 ASSERT(live_codes_size == live_code_objects_size_);
2308 ASSERT(live_cells_size == live_cell_objects_size_);
2309 ASSERT(live_news_size == live_young_objects_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002310}
2311
2312
2313int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
2314 // Keep old map pointers
2315 Map* old_map = obj->map();
2316 ASSERT(old_map->IsHeapObject());
2317
2318 Address forwarded = GetForwardingAddressInOldSpace(old_map);
2319
2320 ASSERT(Heap::map_space()->Contains(old_map));
2321 ASSERT(Heap::map_space()->Contains(forwarded));
2322#ifdef DEBUG
2323 if (FLAG_gc_verbose) {
2324 PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
2325 forwarded);
2326 }
2327#endif
2328 // Update the map pointer.
2329 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
2330
2331 // We have to compute the object size relying on the old map because
2332 // map objects are not relocated yet.
2333 int obj_size = obj->SizeFromMap(old_map);
2334
2335 // Update pointers in the object body.
2336 UpdatingVisitor updating_visitor;
2337 obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
2338 return obj_size;
2339}
2340
2341
2342int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
2343 // Decode the map pointer.
2344 MapWord encoding = obj->map_word();
2345 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2346 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
2347
2348 // At this point, the first word of map_addr is also encoded, cannot
2349 // cast it to Map* using Map::cast.
2350 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
2351 int obj_size = obj->SizeFromMap(map);
2352 InstanceType type = map->instance_type();
2353
2354 // Update map pointer.
2355 Address new_map_addr = GetForwardingAddressInOldSpace(map);
2356 int offset = encoding.DecodeOffset();
2357 obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
2358
2359#ifdef DEBUG
2360 if (FLAG_gc_verbose) {
2361 PrintF("update %p : %p -> %p\n", obj->address(),
2362 map_addr, new_map_addr);
2363 }
2364#endif
2365
2366 // Update pointers in the object body.
2367 UpdatingVisitor updating_visitor;
2368 obj->IterateBody(type, obj_size, &updating_visitor);
2369 return obj_size;
2370}
2371
2372
2373Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
2374 // Object should either in old or map space.
2375 MapWord encoding = obj->map_word();
2376
2377 // Offset to the first live object's forwarding address.
2378 int offset = encoding.DecodeOffset();
2379 Address obj_addr = obj->address();
2380
2381 // Find the first live object's forwarding address.
2382 Page* p = Page::FromAddress(obj_addr);
2383 Address first_forwarded = p->mc_first_forwarded;
2384
2385 // Page start address of forwarded address.
2386 Page* forwarded_page = Page::FromAddress(first_forwarded);
2387 int forwarded_offset = forwarded_page->Offset(first_forwarded);
2388
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002389 // Find end of allocation in the page of first_forwarded.
2390 int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
Steve Blocka7e24c12009-10-30 11:49:00 +00002391
2392 // Check if current object's forward pointer is in the same page
2393 // as the first live object's forwarding pointer
2394 if (forwarded_offset + offset < mc_top_offset) {
2395 // In the same page.
2396 return first_forwarded + offset;
2397 }
2398
2399 // Must be in the next page, NOTE: this may cross chunks.
2400 Page* next_page = forwarded_page->next_page();
2401 ASSERT(next_page->is_valid());
2402
2403 offset -= (mc_top_offset - forwarded_offset);
2404 offset += Page::kObjectStartOffset;
2405
2406 ASSERT_PAGE_OFFSET(offset);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002407 ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
Steve Blocka7e24c12009-10-30 11:49:00 +00002408
2409 return next_page->OffsetToAddress(offset);
2410}
2411
2412
2413// -------------------------------------------------------------------------
2414// Phase 4: Relocate objects
2415
2416void MarkCompactCollector::RelocateObjects() {
2417#ifdef DEBUG
2418 ASSERT(state_ == UPDATE_POINTERS);
2419 state_ = RELOCATE_OBJECTS;
2420#endif
2421 // Relocates objects, always relocate map objects first. Relocating
2422 // objects in other space relies on map objects to get object size.
Steve Block6ded16b2010-05-10 14:33:55 +01002423 int live_maps_size = IterateLiveObjects(Heap::map_space(),
2424 &RelocateMapObject);
2425 int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
2426 &RelocateOldPointerObject);
2427 int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
2428 &RelocateOldDataObject);
2429 int live_codes_size = IterateLiveObjects(Heap::code_space(),
2430 &RelocateCodeObject);
2431 int live_cells_size = IterateLiveObjects(Heap::cell_space(),
2432 &RelocateCellObject);
2433 int live_news_size = IterateLiveObjects(Heap::new_space(),
2434 &RelocateNewObject);
Steve Blocka7e24c12009-10-30 11:49:00 +00002435
Steve Block6ded16b2010-05-10 14:33:55 +01002436 USE(live_maps_size);
2437 USE(live_pointer_olds_size);
2438 USE(live_data_olds_size);
2439 USE(live_codes_size);
2440 USE(live_cells_size);
2441 USE(live_news_size);
2442 ASSERT(live_maps_size == live_map_objects_size_);
2443 ASSERT(live_data_olds_size == live_old_data_objects_size_);
2444 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
2445 ASSERT(live_codes_size == live_code_objects_size_);
2446 ASSERT(live_cells_size == live_cell_objects_size_);
2447 ASSERT(live_news_size == live_young_objects_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002448
2449 // Flip from and to spaces
2450 Heap::new_space()->Flip();
2451
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002452 Heap::new_space()->MCCommitRelocationInfo();
2453
Steve Blocka7e24c12009-10-30 11:49:00 +00002454 // Set age_mark to bottom in to space
2455 Address mark = Heap::new_space()->bottom();
2456 Heap::new_space()->set_age_mark(mark);
2457
Steve Blocka7e24c12009-10-30 11:49:00 +00002458 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00002459 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
2460 space->MCCommitRelocationInfo();
Steve Block6ded16b2010-05-10 14:33:55 +01002461
2462 Heap::CheckNewSpaceExpansionCriteria();
2463 Heap::IncrementYoungSurvivorsCounter(live_news_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002464}
2465
2466
2467int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
2468 // Recover map pointer.
2469 MapWord encoding = obj->map_word();
2470 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2471 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
2472
2473 // Get forwarding address before resetting map pointer
2474 Address new_addr = GetForwardingAddressInOldSpace(obj);
2475
2476 // Reset map pointer. The meta map object may not be copied yet so
2477 // Map::cast does not yet work.
2478 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
2479
2480 Address old_addr = obj->address();
2481
2482 if (new_addr != old_addr) {
Steve Block6ded16b2010-05-10 14:33:55 +01002483 // Move contents.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002484 Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2485 old_addr,
2486 Map::kSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00002487 }
2488
2489#ifdef DEBUG
2490 if (FLAG_gc_verbose) {
2491 PrintF("relocate %p -> %p\n", old_addr, new_addr);
2492 }
2493#endif
2494
2495 return Map::kSize;
2496}
2497
2498
2499static inline int RestoreMap(HeapObject* obj,
2500 PagedSpace* space,
2501 Address new_addr,
2502 Address map_addr) {
2503 // This must be a non-map object, and the function relies on the
2504 // assumption that the Map space is compacted before the other paged
2505 // spaces (see RelocateObjects).
2506
2507 // Reset map pointer.
2508 obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
2509
2510 int obj_size = obj->Size();
2511 ASSERT_OBJECT_SIZE(obj_size);
2512
2513 ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
2514 space->MCSpaceOffsetForAddress(obj->address()));
2515
2516#ifdef DEBUG
2517 if (FLAG_gc_verbose) {
2518 PrintF("relocate %p -> %p\n", obj->address(), new_addr);
2519 }
2520#endif
2521
2522 return obj_size;
2523}
2524
2525
2526int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
2527 PagedSpace* space) {
2528 // Recover map pointer.
2529 MapWord encoding = obj->map_word();
2530 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2531 ASSERT(Heap::map_space()->Contains(map_addr));
2532
2533 // Get forwarding address before resetting map pointer.
2534 Address new_addr = GetForwardingAddressInOldSpace(obj);
2535
2536 // Reset the map pointer.
2537 int obj_size = RestoreMap(obj, space, new_addr, map_addr);
2538
2539 Address old_addr = obj->address();
2540
2541 if (new_addr != old_addr) {
Steve Block6ded16b2010-05-10 14:33:55 +01002542 // Move contents.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002543 if (space == Heap::old_data_space()) {
2544 Heap::MoveBlock(new_addr, old_addr, obj_size);
2545 } else {
2546 Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2547 old_addr,
2548 obj_size);
2549 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002550 }
2551
2552 ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
2553
Leon Clarked91b9f72010-01-27 17:25:45 +00002554 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2555 if (copied_to->IsJSFunction()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002556 PROFILE(FunctionMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002557 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002558 HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002559
Steve Blocka7e24c12009-10-30 11:49:00 +00002560 return obj_size;
2561}
2562
2563
2564int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
2565 return RelocateOldNonCodeObject(obj, Heap::old_pointer_space());
2566}
2567
2568
2569int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
2570 return RelocateOldNonCodeObject(obj, Heap::old_data_space());
2571}
2572
2573
2574int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
2575 return RelocateOldNonCodeObject(obj, Heap::cell_space());
2576}
2577
2578
2579int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
2580 // Recover map pointer.
2581 MapWord encoding = obj->map_word();
2582 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2583 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
2584
2585 // Get forwarding address before resetting map pointer
2586 Address new_addr = GetForwardingAddressInOldSpace(obj);
2587
2588 // Reset the map pointer.
2589 int obj_size = RestoreMap(obj, Heap::code_space(), new_addr, map_addr);
2590
2591 Address old_addr = obj->address();
2592
2593 if (new_addr != old_addr) {
Steve Block6ded16b2010-05-10 14:33:55 +01002594 // Move contents.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002595 Heap::MoveBlock(new_addr, old_addr, obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002596 }
2597
2598 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2599 if (copied_to->IsCode()) {
2600 // May also update inline cache target.
2601 Code::cast(copied_to)->Relocate(new_addr - old_addr);
2602 // Notify the logger that compiled code has moved.
Steve Block6ded16b2010-05-10 14:33:55 +01002603 PROFILE(CodeMoveEvent(old_addr, new_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00002604 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002605 HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00002606
2607 return obj_size;
2608}
2609
2610
2611int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
2612 int obj_size = obj->Size();
2613
2614 // Get forwarding address
2615 Address old_addr = obj->address();
2616 int offset = Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
2617
2618 Address new_addr =
2619 Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset);
2620
2621#ifdef DEBUG
2622 if (Heap::new_space()->FromSpaceContains(new_addr)) {
2623 ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
2624 Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
2625 } else {
2626 ASSERT(Heap::TargetSpace(obj) == Heap::old_pointer_space() ||
2627 Heap::TargetSpace(obj) == Heap::old_data_space());
2628 }
2629#endif
2630
2631 // New and old addresses cannot overlap.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002632 if (Heap::InNewSpace(HeapObject::FromAddress(new_addr))) {
2633 Heap::CopyBlock(new_addr, old_addr, obj_size);
2634 } else {
2635 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2636 old_addr,
2637 obj_size);
2638 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002639
2640#ifdef DEBUG
2641 if (FLAG_gc_verbose) {
2642 PrintF("relocate %p -> %p\n", old_addr, new_addr);
2643 }
2644#endif
2645
Leon Clarked91b9f72010-01-27 17:25:45 +00002646 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2647 if (copied_to->IsJSFunction()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002648 PROFILE(FunctionMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002649 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002650 HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002651
Steve Blocka7e24c12009-10-30 11:49:00 +00002652 return obj_size;
2653}
2654
2655
Leon Clarked91b9f72010-01-27 17:25:45 +00002656void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
2657#ifdef ENABLE_LOGGING_AND_PROFILING
2658 if (obj->IsCode()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002659 PROFILE(CodeDeleteEvent(obj->address()));
Leon Clarked91b9f72010-01-27 17:25:45 +00002660 } else if (obj->IsJSFunction()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002661 PROFILE(FunctionDeleteEvent(obj->address()));
Leon Clarked91b9f72010-01-27 17:25:45 +00002662 }
2663#endif
2664}
2665
Iain Merrick75681382010-08-19 15:07:18 +01002666
2667void MarkCompactCollector::Initialize() {
2668 StaticPointersToNewGenUpdatingVisitor::Initialize();
2669 StaticMarkingVisitor::Initialize();
2670}
2671
2672
Steve Blocka7e24c12009-10-30 11:49:00 +00002673} } // namespace v8::internal