blob: a4c782c59ea9f39f681b2064a2c7867d999d94ed [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
Iain Merrick75681382010-08-19 15:07:18 +010030#include "compilation-cache.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000031#include "execution.h"
Ben Murdoch3bec4d22010-07-22 14:51:16 +010032#include "heap-profiler.h"
Ben Murdochb8e0da22011-05-16 14:20:40 +010033#include "gdb-jit.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000034#include "global-handles.h"
35#include "ic-inl.h"
Steve Block1e0659c2011-05-24 12:43:12 +010036#include "liveobjectlist-inl.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000037#include "mark-compact.h"
Iain Merrick75681382010-08-19 15:07:18 +010038#include "objects-visiting.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000039#include "stub-cache.h"
40
41namespace v8 {
42namespace internal {
43
44// -------------------------------------------------------------------------
45// MarkCompactCollector
46
47bool MarkCompactCollector::force_compaction_ = false;
48bool MarkCompactCollector::compacting_collection_ = false;
49bool MarkCompactCollector::compact_on_next_gc_ = false;
50
51int MarkCompactCollector::previous_marked_count_ = 0;
52GCTracer* MarkCompactCollector::tracer_ = NULL;
53
54
55#ifdef DEBUG
56MarkCompactCollector::CollectorState MarkCompactCollector::state_ = IDLE;
57
58// Counters used for debugging the marking phase of mark-compact or mark-sweep
59// collection.
60int MarkCompactCollector::live_bytes_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +010061int MarkCompactCollector::live_young_objects_size_ = 0;
62int MarkCompactCollector::live_old_data_objects_size_ = 0;
63int MarkCompactCollector::live_old_pointer_objects_size_ = 0;
64int MarkCompactCollector::live_code_objects_size_ = 0;
65int MarkCompactCollector::live_map_objects_size_ = 0;
66int MarkCompactCollector::live_cell_objects_size_ = 0;
67int MarkCompactCollector::live_lo_objects_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +000068#endif
69
Iain Merrick75681382010-08-19 15:07:18 +010070
Steve Blocka7e24c12009-10-30 11:49:00 +000071void MarkCompactCollector::CollectGarbage() {
72 // Make sure that Prepare() has been called. The individual steps below will
73 // update the state as they proceed.
74 ASSERT(state_ == PREPARE_GC);
75
76 // Prepare has selected whether to compact the old generation or not.
77 // Tell the tracer.
78 if (IsCompacting()) tracer_->set_is_compacting();
79
80 MarkLiveObjects();
81
82 if (FLAG_collect_maps) ClearNonLiveTransitions();
83
84 SweepLargeObjectSpace();
85
86 if (IsCompacting()) {
Leon Clarkef7060e22010-06-03 12:02:55 +010087 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
Steve Blocka7e24c12009-10-30 11:49:00 +000088 EncodeForwardingAddresses();
89
Kristian Monsen80d68ea2010-09-08 11:05:35 +010090 Heap::MarkMapPointersAsEncoded(true);
Steve Blocka7e24c12009-10-30 11:49:00 +000091 UpdatePointers();
Kristian Monsen80d68ea2010-09-08 11:05:35 +010092 Heap::MarkMapPointersAsEncoded(false);
93 PcToCodeCache::FlushPcToCodeCache();
Steve Blocka7e24c12009-10-30 11:49:00 +000094
95 RelocateObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +000096 } else {
97 SweepSpaces();
Kristian Monsen80d68ea2010-09-08 11:05:35 +010098 PcToCodeCache::FlushPcToCodeCache();
Steve Blocka7e24c12009-10-30 11:49:00 +000099 }
100
101 Finish();
102
103 // Save the count of marked objects remaining after the collection and
104 // null out the GC tracer.
105 previous_marked_count_ = tracer_->marked_count();
106 ASSERT(previous_marked_count_ == 0);
107 tracer_ = NULL;
108}
109
110
111void MarkCompactCollector::Prepare(GCTracer* tracer) {
112 // Rather than passing the tracer around we stash it in a static member
113 // variable.
114 tracer_ = tracer;
115
116#ifdef DEBUG
117 ASSERT(state_ == IDLE);
118 state_ = PREPARE_GC;
119#endif
120 ASSERT(!FLAG_always_compact || !FLAG_never_compact);
121
122 compacting_collection_ =
123 FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
124 compact_on_next_gc_ = false;
125
126 if (FLAG_never_compact) compacting_collection_ = false;
Leon Clarkee46be812010-01-19 14:06:41 +0000127 if (!Heap::map_space()->MapPointersEncodable())
128 compacting_collection_ = false;
Steve Blocka7e24c12009-10-30 11:49:00 +0000129 if (FLAG_collect_maps) CreateBackPointers();
Ben Murdochb8e0da22011-05-16 14:20:40 +0100130#ifdef ENABLE_GDB_JIT_INTERFACE
131 if (FLAG_gdbjit) {
132 // If GDBJIT interface is active disable compaction.
133 compacting_collection_ = false;
134 }
135#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000136
Steve Blocka7e24c12009-10-30 11:49:00 +0000137 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000138 for (PagedSpace* space = spaces.next();
139 space != NULL; space = spaces.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000140 space->PrepareForMarkCompact(compacting_collection_);
141 }
142
143#ifdef DEBUG
144 live_bytes_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100145 live_young_objects_size_ = 0;
146 live_old_pointer_objects_size_ = 0;
147 live_old_data_objects_size_ = 0;
148 live_code_objects_size_ = 0;
149 live_map_objects_size_ = 0;
150 live_cell_objects_size_ = 0;
151 live_lo_objects_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000152#endif
153}
154
155
156void MarkCompactCollector::Finish() {
157#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100158 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
Steve Blocka7e24c12009-10-30 11:49:00 +0000159 state_ = IDLE;
160#endif
161 // The stub cache is not traversed during GC; clear the cache to
162 // force lazy re-initialization of it. This must be done after the
163 // GC, because it relies on the new address of certain old space
164 // objects (empty string, illegal builtin).
165 StubCache::Clear();
166
Leon Clarkee46be812010-01-19 14:06:41 +0000167 ExternalStringTable::CleanUp();
168
Steve Blocka7e24c12009-10-30 11:49:00 +0000169 // If we've just compacted old space there's no reason to check the
170 // fragmentation limit. Just return.
171 if (HasCompacted()) return;
172
173 // We compact the old generation on the next GC if it has gotten too
174 // fragmented (ie, we could recover an expected amount of space by
175 // reclaiming the waste and free list blocks).
176 static const int kFragmentationLimit = 15; // Percent.
177 static const int kFragmentationAllowed = 1 * MB; // Absolute.
Ben Murdochf87a2032010-10-22 12:50:53 +0100178 intptr_t old_gen_recoverable = 0;
179 intptr_t old_gen_used = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000180
181 OldSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000182 for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000183 old_gen_recoverable += space->Waste() + space->AvailableFree();
184 old_gen_used += space->Size();
185 }
186
187 int old_gen_fragmentation =
188 static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
189 if (old_gen_fragmentation > kFragmentationLimit &&
190 old_gen_recoverable > kFragmentationAllowed) {
191 compact_on_next_gc_ = true;
192 }
193}
194
195
196// -------------------------------------------------------------------------
197// Phase 1: tracing and marking live objects.
198// before: all objects are in normal state.
199// after: a live object's map pointer is marked as '00'.
200
201// Marking all live objects in the heap as part of mark-sweep or mark-compact
202// collection. Before marking, all objects are in their normal state. After
203// marking, live objects' map pointers are marked indicating that the object
204// has been found reachable.
205//
206// The marking algorithm is a (mostly) depth-first (because of possible stack
207// overflow) traversal of the graph of objects reachable from the roots. It
208// uses an explicit stack of pointers rather than recursion. The young
209// generation's inactive ('from') space is used as a marking stack. The
210// objects in the marking stack are the ones that have been reached and marked
211// but their children have not yet been visited.
212//
213// The marking stack can overflow during traversal. In that case, we set an
214// overflow flag. When the overflow flag is set, we continue marking objects
215// reachable from the objects on the marking stack, but no longer push them on
216// the marking stack. Instead, we mark them as both marked and overflowed.
217// When the stack is in the overflowed state, objects marked as overflowed
218// have been reached and marked but their children have not been visited yet.
219// After emptying the marking stack, we clear the overflow flag and traverse
220// the heap looking for objects marked as overflowed, push them on the stack,
221// and continue with marking. This process repeats until all reachable
222// objects have been marked.
223
224static MarkingStack marking_stack;
225
Ben Murdochb0fe1622011-05-05 13:52:32 +0100226class FlushCode : public AllStatic {
227 public:
228 static void AddCandidate(SharedFunctionInfo* shared_info) {
229 SetNextCandidate(shared_info, shared_function_info_candidates_head_);
230 shared_function_info_candidates_head_ = shared_info;
231 }
232
233
234 static void AddCandidate(JSFunction* function) {
235 ASSERT(function->unchecked_code() ==
236 function->unchecked_shared()->unchecked_code());
237
238 SetNextCandidate(function, jsfunction_candidates_head_);
239 jsfunction_candidates_head_ = function;
240 }
241
242
243 static void ProcessCandidates() {
244 ProcessSharedFunctionInfoCandidates();
245 ProcessJSFunctionCandidates();
246 }
247
248 private:
249 static void ProcessJSFunctionCandidates() {
250 Code* lazy_compile = Builtins::builtin(Builtins::LazyCompile);
251
252 JSFunction* candidate = jsfunction_candidates_head_;
253 JSFunction* next_candidate;
254 while (candidate != NULL) {
255 next_candidate = GetNextCandidate(candidate);
256
257 SharedFunctionInfo* shared = candidate->unchecked_shared();
258
259 Code* code = shared->unchecked_code();
260 if (!code->IsMarked()) {
261 shared->set_code(lazy_compile);
262 candidate->set_code(lazy_compile);
263 } else {
264 candidate->set_code(shared->unchecked_code());
265 }
266
267 candidate = next_candidate;
268 }
269
270 jsfunction_candidates_head_ = NULL;
271 }
272
273
274 static void ProcessSharedFunctionInfoCandidates() {
275 Code* lazy_compile = Builtins::builtin(Builtins::LazyCompile);
276
277 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
278 SharedFunctionInfo* next_candidate;
279 while (candidate != NULL) {
280 next_candidate = GetNextCandidate(candidate);
281 SetNextCandidate(candidate, NULL);
282
283 Code* code = candidate->unchecked_code();
284 if (!code->IsMarked()) {
285 candidate->set_code(lazy_compile);
286 }
287
288 candidate = next_candidate;
289 }
290
291 shared_function_info_candidates_head_ = NULL;
292 }
293
294
295 static JSFunction** GetNextCandidateField(JSFunction* candidate) {
296 return reinterpret_cast<JSFunction**>(
297 candidate->address() + JSFunction::kCodeEntryOffset);
298 }
299
300
301 static JSFunction* GetNextCandidate(JSFunction* candidate) {
302 return *GetNextCandidateField(candidate);
303 }
304
305
306 static void SetNextCandidate(JSFunction* candidate,
307 JSFunction* next_candidate) {
308 *GetNextCandidateField(candidate) = next_candidate;
309 }
310
311
312 STATIC_ASSERT(kPointerSize <= Code::kHeaderSize - Code::kHeaderPaddingStart);
313
314
315 static SharedFunctionInfo** GetNextCandidateField(
316 SharedFunctionInfo* candidate) {
317 Code* code = candidate->unchecked_code();
318 return reinterpret_cast<SharedFunctionInfo**>(
319 code->address() + Code::kHeaderPaddingStart);
320 }
321
322
323 static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
324 return *GetNextCandidateField(candidate);
325 }
326
327
328 static void SetNextCandidate(SharedFunctionInfo* candidate,
329 SharedFunctionInfo* next_candidate) {
330 *GetNextCandidateField(candidate) = next_candidate;
331 }
332
333 static JSFunction* jsfunction_candidates_head_;
334
335 static SharedFunctionInfo* shared_function_info_candidates_head_;
336};
337
338JSFunction* FlushCode::jsfunction_candidates_head_ = NULL;
339
340SharedFunctionInfo* FlushCode::shared_function_info_candidates_head_ = NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +0000341
342static inline HeapObject* ShortCircuitConsString(Object** p) {
343 // Optimization: If the heap object pointed to by p is a non-symbol
344 // cons string whose right substring is Heap::empty_string, update
345 // it in place to its left substring. Return the updated value.
346 //
347 // Here we assume that if we change *p, we replace it with a heap object
348 // (ie, the left substring of a cons string is always a heap object).
349 //
350 // The check performed is:
351 // object->IsConsString() && !object->IsSymbol() &&
352 // (ConsString::cast(object)->second() == Heap::empty_string())
353 // except the maps for the object and its possible substrings might be
354 // marked.
355 HeapObject* object = HeapObject::cast(*p);
356 MapWord map_word = object->map_word();
357 map_word.ClearMark();
358 InstanceType type = map_word.ToMap()->instance_type();
359 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
360
361 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
362 if (second != Heap::raw_unchecked_empty_string()) {
363 return object;
364 }
365
366 // Since we don't have the object's start, it is impossible to update the
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100367 // page dirty marks. Therefore, we only replace the string with its left
368 // substring when page dirty marks do not change.
Steve Blocka7e24c12009-10-30 11:49:00 +0000369 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
370 if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object;
371
372 *p = first;
373 return HeapObject::cast(first);
374}
375
376
Iain Merrick75681382010-08-19 15:07:18 +0100377class StaticMarkingVisitor : public StaticVisitorBase {
Steve Blocka7e24c12009-10-30 11:49:00 +0000378 public:
Iain Merrick75681382010-08-19 15:07:18 +0100379 static inline void IterateBody(Map* map, HeapObject* obj) {
380 table_.GetVisitor(map)(map, obj);
381 }
382
383 static void EnableCodeFlushing(bool enabled) {
384 if (enabled) {
Steve Block791712a2010-08-27 10:21:07 +0100385 table_.Register(kVisitJSFunction, &VisitJSFunctionAndFlushCode);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100386 table_.Register(kVisitSharedFunctionInfo,
387 &VisitSharedFunctionInfoAndFlushCode);
388
Iain Merrick75681382010-08-19 15:07:18 +0100389 } else {
Steve Block791712a2010-08-27 10:21:07 +0100390 table_.Register(kVisitJSFunction, &VisitJSFunction);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100391 table_.Register(kVisitSharedFunctionInfo,
392 &VisitSharedFunctionInfoGeneric);
Iain Merrick75681382010-08-19 15:07:18 +0100393 }
394 }
395
396 static void Initialize() {
397 table_.Register(kVisitShortcutCandidate,
398 &FixedBodyVisitor<StaticMarkingVisitor,
399 ConsString::BodyDescriptor,
400 void>::Visit);
401
402 table_.Register(kVisitConsString,
403 &FixedBodyVisitor<StaticMarkingVisitor,
404 ConsString::BodyDescriptor,
405 void>::Visit);
406
407
408 table_.Register(kVisitFixedArray,
409 &FlexibleBodyVisitor<StaticMarkingVisitor,
410 FixedArray::BodyDescriptor,
411 void>::Visit);
412
Ben Murdochf87a2032010-10-22 12:50:53 +0100413 table_.Register(kVisitGlobalContext,
414 &FixedBodyVisitor<StaticMarkingVisitor,
415 Context::MarkCompactBodyDescriptor,
416 void>::Visit);
417
Iain Merrick75681382010-08-19 15:07:18 +0100418 table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
419 table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
420 table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
421
422 table_.Register(kVisitOddball,
423 &FixedBodyVisitor<StaticMarkingVisitor,
424 Oddball::BodyDescriptor,
425 void>::Visit);
426 table_.Register(kVisitMap,
427 &FixedBodyVisitor<StaticMarkingVisitor,
428 Map::BodyDescriptor,
429 void>::Visit);
430
431 table_.Register(kVisitCode, &VisitCode);
432
Ben Murdochb0fe1622011-05-05 13:52:32 +0100433 table_.Register(kVisitSharedFunctionInfo,
434 &VisitSharedFunctionInfoAndFlushCode);
435
436 table_.Register(kVisitJSFunction,
437 &VisitJSFunctionAndFlushCode);
Iain Merrick75681382010-08-19 15:07:18 +0100438
439 table_.Register(kVisitPropertyCell,
440 &FixedBodyVisitor<StaticMarkingVisitor,
441 JSGlobalPropertyCell::BodyDescriptor,
442 void>::Visit);
443
444 table_.RegisterSpecializations<DataObjectVisitor,
445 kVisitDataObject,
446 kVisitDataObjectGeneric>();
447
448 table_.RegisterSpecializations<JSObjectVisitor,
449 kVisitJSObject,
450 kVisitJSObjectGeneric>();
451
452 table_.RegisterSpecializations<StructObjectVisitor,
453 kVisitStruct,
454 kVisitStructGeneric>();
455 }
456
457 INLINE(static void VisitPointer(Object** p)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000458 MarkObjectByPointer(p);
459 }
460
Iain Merrick75681382010-08-19 15:07:18 +0100461 INLINE(static void VisitPointers(Object** start, Object** end)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000462 // Mark all objects pointed to in [start, end).
463 const int kMinRangeForMarkingRecursion = 64;
464 if (end - start >= kMinRangeForMarkingRecursion) {
465 if (VisitUnmarkedObjects(start, end)) return;
466 // We are close to a stack overflow, so just mark the objects.
467 }
468 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
469 }
470
Iain Merrick75681382010-08-19 15:07:18 +0100471 static inline void VisitCodeTarget(RelocInfo* rinfo) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000472 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
473 Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
474 if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
475 IC::Clear(rinfo->pc());
476 // Please note targets for cleared inline cached do not have to be
477 // marked since they are contained in Heap::non_monomorphic_cache().
478 } else {
479 MarkCompactCollector::MarkObject(code);
480 }
481 }
482
Ben Murdochb0fe1622011-05-05 13:52:32 +0100483 static void VisitGlobalPropertyCell(RelocInfo* rinfo) {
484 ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
485 Object* cell = rinfo->target_cell();
486 Object* old_cell = cell;
487 VisitPointer(&cell);
488 if (cell != old_cell) {
489 rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell));
490 }
491 }
492
Iain Merrick75681382010-08-19 15:07:18 +0100493 static inline void VisitDebugTarget(RelocInfo* rinfo) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100494 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
495 rinfo->IsPatchedReturnSequence()) ||
496 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
497 rinfo->IsPatchedDebugBreakSlotSequence()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000498 HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
499 MarkCompactCollector::MarkObject(code);
Steve Blocka7e24c12009-10-30 11:49:00 +0000500 }
501
Steve Blocka7e24c12009-10-30 11:49:00 +0000502 // Mark object pointed to by p.
Iain Merrick75681382010-08-19 15:07:18 +0100503 INLINE(static void MarkObjectByPointer(Object** p)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000504 if (!(*p)->IsHeapObject()) return;
505 HeapObject* object = ShortCircuitConsString(p);
506 MarkCompactCollector::MarkObject(object);
507 }
508
Steve Blocka7e24c12009-10-30 11:49:00 +0000509 // Visit an unmarked object.
Iain Merrick75681382010-08-19 15:07:18 +0100510 static inline void VisitUnmarkedObject(HeapObject* obj) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000511#ifdef DEBUG
512 ASSERT(Heap::Contains(obj));
513 ASSERT(!obj->IsMarked());
514#endif
515 Map* map = obj->map();
516 MarkCompactCollector::SetMark(obj);
517 // Mark the map pointer and the body.
518 MarkCompactCollector::MarkObject(map);
Iain Merrick75681382010-08-19 15:07:18 +0100519 IterateBody(map, obj);
Steve Blocka7e24c12009-10-30 11:49:00 +0000520 }
521
522 // Visit all unmarked objects pointed to by [start, end).
523 // Returns false if the operation fails (lack of stack space).
Iain Merrick75681382010-08-19 15:07:18 +0100524 static inline bool VisitUnmarkedObjects(Object** start, Object** end) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000525 // Return false is we are close to the stack limit.
526 StackLimitCheck check;
527 if (check.HasOverflowed()) return false;
528
529 // Visit the unmarked objects.
530 for (Object** p = start; p < end; p++) {
531 if (!(*p)->IsHeapObject()) continue;
532 HeapObject* obj = HeapObject::cast(*p);
533 if (obj->IsMarked()) continue;
534 VisitUnmarkedObject(obj);
535 }
536 return true;
537 }
Iain Merrick75681382010-08-19 15:07:18 +0100538
539 static inline void VisitExternalReference(Address* p) { }
540 static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
541
542 private:
543 class DataObjectVisitor {
544 public:
545 template<int size>
546 static void VisitSpecialized(Map* map, HeapObject* object) {
547 }
548
549 static void Visit(Map* map, HeapObject* object) {
550 }
551 };
552
553 typedef FlexibleBodyVisitor<StaticMarkingVisitor,
554 JSObject::BodyDescriptor,
555 void> JSObjectVisitor;
556
557 typedef FlexibleBodyVisitor<StaticMarkingVisitor,
558 StructBodyDescriptor,
559 void> StructObjectVisitor;
560
561 static void VisitCode(Map* map, HeapObject* object) {
562 reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>();
563 }
564
565 // Code flushing support.
566
567 // How many collections newly compiled code object will survive before being
568 // flushed.
569 static const int kCodeAgeThreshold = 5;
570
571 inline static bool HasSourceCode(SharedFunctionInfo* info) {
572 Object* undefined = Heap::raw_unchecked_undefined_value();
573 return (info->script() != undefined) &&
574 (reinterpret_cast<Script*>(info->script())->source() != undefined);
575 }
576
577
578 inline static bool IsCompiled(JSFunction* function) {
579 return
580 function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile);
581 }
582
583
584 inline static bool IsCompiled(SharedFunctionInfo* function) {
585 return
586 function->unchecked_code() != Builtins::builtin(Builtins::LazyCompile);
587 }
588
Ben Murdochb0fe1622011-05-05 13:52:32 +0100589 inline static bool IsFlushable(JSFunction* function) {
Iain Merrick75681382010-08-19 15:07:18 +0100590 SharedFunctionInfo* shared_info = function->unchecked_shared();
591
Ben Murdochb0fe1622011-05-05 13:52:32 +0100592 // Code is either on stack, in compilation cache or referenced
593 // by optimized version of function.
594 if (function->unchecked_code()->IsMarked()) {
595 shared_info->set_code_age(0);
596 return false;
Iain Merrick75681382010-08-19 15:07:18 +0100597 }
598
Ben Murdochb0fe1622011-05-05 13:52:32 +0100599 // We do not flush code for optimized functions.
600 if (function->code() != shared_info->unchecked_code()) {
601 return false;
602 }
603
604 return IsFlushable(shared_info);
605 }
606
607 inline static bool IsFlushable(SharedFunctionInfo* shared_info) {
608 // Code is either on stack, in compilation cache or referenced
609 // by optimized version of function.
Iain Merrick75681382010-08-19 15:07:18 +0100610 if (shared_info->unchecked_code()->IsMarked()) {
611 shared_info->set_code_age(0);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100612 return false;
Iain Merrick75681382010-08-19 15:07:18 +0100613 }
614
615 // The function must be compiled and have the source code available,
616 // to be able to recompile it in case we need the function again.
Ben Murdochb0fe1622011-05-05 13:52:32 +0100617 if (!(shared_info->is_compiled() && HasSourceCode(shared_info))) {
618 return false;
619 }
Iain Merrick75681382010-08-19 15:07:18 +0100620
621 // We never flush code for Api functions.
622 Object* function_data = shared_info->function_data();
623 if (function_data->IsHeapObject() &&
624 (SafeMap(function_data)->instance_type() ==
625 FUNCTION_TEMPLATE_INFO_TYPE)) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100626 return false;
Iain Merrick75681382010-08-19 15:07:18 +0100627 }
628
629 // Only flush code for functions.
Ben Murdochb0fe1622011-05-05 13:52:32 +0100630 if (shared_info->code()->kind() != Code::FUNCTION) return false;
Iain Merrick75681382010-08-19 15:07:18 +0100631
632 // Function must be lazy compilable.
Ben Murdochb0fe1622011-05-05 13:52:32 +0100633 if (!shared_info->allows_lazy_compilation()) return false;
Iain Merrick75681382010-08-19 15:07:18 +0100634
635 // If this is a full script wrapped in a function we do no flush the code.
Ben Murdochb0fe1622011-05-05 13:52:32 +0100636 if (shared_info->is_toplevel()) return false;
Iain Merrick75681382010-08-19 15:07:18 +0100637
638 // Age this shared function info.
639 if (shared_info->code_age() < kCodeAgeThreshold) {
640 shared_info->set_code_age(shared_info->code_age() + 1);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100641 return false;
Iain Merrick75681382010-08-19 15:07:18 +0100642 }
643
Ben Murdochb0fe1622011-05-05 13:52:32 +0100644 return true;
645 }
646
647
648 static bool FlushCodeForFunction(JSFunction* function) {
649 if (!IsFlushable(function)) return false;
650
651 // This function's code looks flushable. But we have to postpone the
652 // decision until we see all functions that point to the same
653 // SharedFunctionInfo because some of them might be optimized.
654 // That would make the nonoptimized version of the code nonflushable,
655 // because it is required for bailing out from optimized code.
656 FlushCode::AddCandidate(function);
657 return true;
Iain Merrick75681382010-08-19 15:07:18 +0100658 }
659
660
661 static inline Map* SafeMap(Object* obj) {
662 MapWord map_word = HeapObject::cast(obj)->map_word();
663 map_word.ClearMark();
664 map_word.ClearOverflow();
665 return map_word.ToMap();
666 }
667
668
669 static inline bool IsJSBuiltinsObject(Object* obj) {
670 return obj->IsHeapObject() &&
671 (SafeMap(obj)->instance_type() == JS_BUILTINS_OBJECT_TYPE);
672 }
673
674
675 static inline bool IsValidNotBuiltinContext(Object* ctx) {
676 if (!ctx->IsHeapObject()) return false;
677
678 Map* map = SafeMap(ctx);
679 if (!(map == Heap::raw_unchecked_context_map() ||
680 map == Heap::raw_unchecked_catch_context_map() ||
681 map == Heap::raw_unchecked_global_context_map())) {
682 return false;
683 }
684
685 Context* context = reinterpret_cast<Context*>(ctx);
686
687 if (IsJSBuiltinsObject(context->global())) {
688 return false;
689 }
690
691 return true;
692 }
693
694
Ben Murdochb0fe1622011-05-05 13:52:32 +0100695 static void VisitSharedFunctionInfoGeneric(Map* map, HeapObject* object) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100696 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100697
698 if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
699
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100700 FixedBodyVisitor<StaticMarkingVisitor,
701 SharedFunctionInfo::BodyDescriptor,
702 void>::Visit(map, object);
703 }
704
705
Ben Murdochb0fe1622011-05-05 13:52:32 +0100706 static void VisitSharedFunctionInfoAndFlushCode(Map* map,
707 HeapObject* object) {
708 VisitSharedFunctionInfoAndFlushCodeGeneric(map, object, false);
709 }
710
711
712 static void VisitSharedFunctionInfoAndFlushCodeGeneric(
713 Map* map, HeapObject* object, bool known_flush_code_candidate) {
714 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
715
716 if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
717
718 if (!known_flush_code_candidate) {
719 known_flush_code_candidate = IsFlushable(shared);
720 if (known_flush_code_candidate) FlushCode::AddCandidate(shared);
721 }
722
723 VisitSharedFunctionInfoFields(object, known_flush_code_candidate);
724 }
725
726
Steve Block791712a2010-08-27 10:21:07 +0100727 static void VisitCodeEntry(Address entry_address) {
728 Object* code = Code::GetObjectFromEntryAddress(entry_address);
729 Object* old_code = code;
730 VisitPointer(&code);
731 if (code != old_code) {
732 Memory::Address_at(entry_address) =
733 reinterpret_cast<Code*>(code)->entry();
734 }
735 }
Iain Merrick75681382010-08-19 15:07:18 +0100736
Steve Block791712a2010-08-27 10:21:07 +0100737
738 static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
739 JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
Iain Merrick75681382010-08-19 15:07:18 +0100740 // The function must have a valid context and not be a builtin.
Ben Murdochb0fe1622011-05-05 13:52:32 +0100741 bool flush_code_candidate = false;
Iain Merrick75681382010-08-19 15:07:18 +0100742 if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100743 flush_code_candidate = FlushCodeForFunction(jsfunction);
Iain Merrick75681382010-08-19 15:07:18 +0100744 }
Ben Murdochb0fe1622011-05-05 13:52:32 +0100745
746 if (!flush_code_candidate) {
747 MarkCompactCollector::MarkObject(
748 jsfunction->unchecked_shared()->unchecked_code());
749
750 if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) {
751 // For optimized functions we should retain both non-optimized version
752 // of it's code and non-optimized version of all inlined functions.
753 // This is required to support bailing out from inlined code.
754 DeoptimizationInputData* data =
755 reinterpret_cast<DeoptimizationInputData*>(
756 jsfunction->unchecked_code()->unchecked_deoptimization_data());
757
758 FixedArray* literals = data->UncheckedLiteralArray();
759
760 for (int i = 0, count = data->InlinedFunctionCount()->value();
761 i < count;
762 i++) {
763 JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i));
764 MarkCompactCollector::MarkObject(
765 inlined->unchecked_shared()->unchecked_code());
766 }
767 }
768 }
769
770 VisitJSFunctionFields(map,
771 reinterpret_cast<JSFunction*>(object),
772 flush_code_candidate);
Iain Merrick75681382010-08-19 15:07:18 +0100773 }
774
Steve Block791712a2010-08-27 10:21:07 +0100775
776 static void VisitJSFunction(Map* map, HeapObject* object) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100777 VisitJSFunctionFields(map,
778 reinterpret_cast<JSFunction*>(object),
779 false);
780 }
Steve Block791712a2010-08-27 10:21:07 +0100781
Ben Murdochb0fe1622011-05-05 13:52:32 +0100782
783#define SLOT_ADDR(obj, offset) \
784 reinterpret_cast<Object**>((obj)->address() + offset)
785
786
787 static inline void VisitJSFunctionFields(Map* map,
788 JSFunction* object,
789 bool flush_code_candidate) {
Steve Block791712a2010-08-27 10:21:07 +0100790 VisitPointers(SLOT_ADDR(object, JSFunction::kPropertiesOffset),
791 SLOT_ADDR(object, JSFunction::kCodeEntryOffset));
792
Ben Murdochb0fe1622011-05-05 13:52:32 +0100793 if (!flush_code_candidate) {
794 VisitCodeEntry(object->address() + JSFunction::kCodeEntryOffset);
795 } else {
796 // Don't visit code object.
797
798 // Visit shared function info to avoid double checking of it's
799 // flushability.
800 SharedFunctionInfo* shared_info = object->unchecked_shared();
801 if (!shared_info->IsMarked()) {
802 Map* shared_info_map = shared_info->map();
803 MarkCompactCollector::SetMark(shared_info);
804 MarkCompactCollector::MarkObject(shared_info_map);
805 VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map,
806 shared_info,
807 true);
808 }
809 }
Steve Block791712a2010-08-27 10:21:07 +0100810
811 VisitPointers(SLOT_ADDR(object,
812 JSFunction::kCodeEntryOffset + kPointerSize),
Ben Murdochb0fe1622011-05-05 13:52:32 +0100813 SLOT_ADDR(object, JSFunction::kNonWeakFieldsEndOffset));
Ben Murdochf87a2032010-10-22 12:50:53 +0100814
Ben Murdochb0fe1622011-05-05 13:52:32 +0100815 // Don't visit the next function list field as it is a weak reference.
Steve Block791712a2010-08-27 10:21:07 +0100816 }
817
818
Ben Murdochb0fe1622011-05-05 13:52:32 +0100819 static void VisitSharedFunctionInfoFields(HeapObject* object,
820 bool flush_code_candidate) {
821 VisitPointer(SLOT_ADDR(object, SharedFunctionInfo::kNameOffset));
822
823 if (!flush_code_candidate) {
824 VisitPointer(SLOT_ADDR(object, SharedFunctionInfo::kCodeOffset));
825 }
826
827 VisitPointers(SLOT_ADDR(object, SharedFunctionInfo::kScopeInfoOffset),
828 SLOT_ADDR(object, SharedFunctionInfo::kSize));
829 }
830
831 #undef SLOT_ADDR
832
Iain Merrick75681382010-08-19 15:07:18 +0100833 typedef void (*Callback)(Map* map, HeapObject* object);
834
835 static VisitorDispatchTable<Callback> table_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000836};
837
838
Iain Merrick75681382010-08-19 15:07:18 +0100839VisitorDispatchTable<StaticMarkingVisitor::Callback>
840 StaticMarkingVisitor::table_;
841
842
843class MarkingVisitor : public ObjectVisitor {
844 public:
845 void VisitPointer(Object** p) {
846 StaticMarkingVisitor::VisitPointer(p);
847 }
848
849 void VisitPointers(Object** start, Object** end) {
850 StaticMarkingVisitor::VisitPointers(start, end);
851 }
852
853 void VisitCodeTarget(RelocInfo* rinfo) {
854 StaticMarkingVisitor::VisitCodeTarget(rinfo);
855 }
856
Ben Murdochb0fe1622011-05-05 13:52:32 +0100857 void VisitGlobalPropertyCell(RelocInfo* rinfo) {
858 StaticMarkingVisitor::VisitGlobalPropertyCell(rinfo);
859 }
860
Iain Merrick75681382010-08-19 15:07:18 +0100861 void VisitDebugTarget(RelocInfo* rinfo) {
862 StaticMarkingVisitor::VisitDebugTarget(rinfo);
863 }
864};
865
866
867class CodeMarkingVisitor : public ThreadVisitor {
868 public:
869 void VisitThread(ThreadLocalTop* top) {
870 for (StackFrameIterator it(top); !it.done(); it.Advance()) {
871 MarkCompactCollector::MarkObject(it.frame()->unchecked_code());
872 }
873 }
874};
875
876
877class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
878 public:
879 void VisitPointers(Object** start, Object** end) {
880 for (Object** p = start; p < end; p++) VisitPointer(p);
881 }
882
883 void VisitPointer(Object** slot) {
884 Object* obj = *slot;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100885 if (obj->IsSharedFunctionInfo()) {
886 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
887 MarkCompactCollector::MarkObject(shared->unchecked_code());
888 MarkCompactCollector::MarkObject(shared);
Iain Merrick75681382010-08-19 15:07:18 +0100889 }
890 }
891};
892
893
894void MarkCompactCollector::PrepareForCodeFlushing() {
895 if (!FLAG_flush_code) {
896 StaticMarkingVisitor::EnableCodeFlushing(false);
897 return;
898 }
899
900#ifdef ENABLE_DEBUGGER_SUPPORT
901 if (Debug::IsLoaded() || Debug::has_break_points()) {
902 StaticMarkingVisitor::EnableCodeFlushing(false);
903 return;
904 }
905#endif
906 StaticMarkingVisitor::EnableCodeFlushing(true);
907
Iain Merrick9ac36c92010-09-13 15:29:50 +0100908 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
909 // relies on it being marked before any other descriptor array.
910 MarkObject(Heap::raw_unchecked_empty_descriptor_array());
911
Iain Merrick75681382010-08-19 15:07:18 +0100912 // Make sure we are not referencing the code from the stack.
913 for (StackFrameIterator it; !it.done(); it.Advance()) {
Iain Merrick9ac36c92010-09-13 15:29:50 +0100914 MarkObject(it.frame()->unchecked_code());
Iain Merrick75681382010-08-19 15:07:18 +0100915 }
916
917 // Iterate the archived stacks in all threads to check if
918 // the code is referenced.
919 CodeMarkingVisitor code_marking_visitor;
920 ThreadManager::IterateArchivedThreads(&code_marking_visitor);
921
922 SharedFunctionInfoMarkingVisitor visitor;
923 CompilationCache::IterateFunctions(&visitor);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100924 HandleScopeImplementer::Iterate(&visitor);
Iain Merrick75681382010-08-19 15:07:18 +0100925
Iain Merrick9ac36c92010-09-13 15:29:50 +0100926 ProcessMarkingStack();
Iain Merrick75681382010-08-19 15:07:18 +0100927}
928
929
Steve Blocka7e24c12009-10-30 11:49:00 +0000930// Visitor class for marking heap roots.
931class RootMarkingVisitor : public ObjectVisitor {
932 public:
933 void VisitPointer(Object** p) {
934 MarkObjectByPointer(p);
935 }
936
937 void VisitPointers(Object** start, Object** end) {
938 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
939 }
940
Steve Blocka7e24c12009-10-30 11:49:00 +0000941 private:
Steve Blocka7e24c12009-10-30 11:49:00 +0000942 void MarkObjectByPointer(Object** p) {
943 if (!(*p)->IsHeapObject()) return;
944
945 // Replace flat cons strings in place.
946 HeapObject* object = ShortCircuitConsString(p);
947 if (object->IsMarked()) return;
948
949 Map* map = object->map();
950 // Mark the object.
951 MarkCompactCollector::SetMark(object);
Iain Merrick75681382010-08-19 15:07:18 +0100952
Steve Blocka7e24c12009-10-30 11:49:00 +0000953 // Mark the map pointer and body, and push them on the marking stack.
954 MarkCompactCollector::MarkObject(map);
Iain Merrick75681382010-08-19 15:07:18 +0100955 StaticMarkingVisitor::IterateBody(map, object);
Steve Blocka7e24c12009-10-30 11:49:00 +0000956
957 // Mark all the objects reachable from the map and body. May leave
958 // overflowed objects in the heap.
Iain Merrick75681382010-08-19 15:07:18 +0100959 MarkCompactCollector::EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +0000960 }
961};
962
963
964// Helper class for pruning the symbol table.
965class SymbolTableCleaner : public ObjectVisitor {
966 public:
967 SymbolTableCleaner() : pointers_removed_(0) { }
Leon Clarkee46be812010-01-19 14:06:41 +0000968
969 virtual void VisitPointers(Object** start, Object** end) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000970 // Visit all HeapObject pointers in [start, end).
971 for (Object** p = start; p < end; p++) {
972 if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
973 // Check if the symbol being pruned is an external symbol. We need to
974 // delete the associated external data as this symbol is going away.
975
Steve Blocka7e24c12009-10-30 11:49:00 +0000976 // Since no objects have yet been moved we can safely access the map of
977 // the object.
Leon Clarkee46be812010-01-19 14:06:41 +0000978 if ((*p)->IsExternalString()) {
979 Heap::FinalizeExternalString(String::cast(*p));
Steve Blocka7e24c12009-10-30 11:49:00 +0000980 }
981 // Set the entry to null_value (as deleted).
982 *p = Heap::raw_unchecked_null_value();
983 pointers_removed_++;
984 }
985 }
986 }
987
988 int PointersRemoved() {
989 return pointers_removed_;
990 }
991 private:
992 int pointers_removed_;
993};
994
995
Ben Murdochf87a2032010-10-22 12:50:53 +0100996// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
997// are retained.
998class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
999 public:
1000 virtual Object* RetainAs(Object* object) {
1001 MapWord first_word = HeapObject::cast(object)->map_word();
1002 if (first_word.IsMarked()) {
1003 return object;
1004 } else {
1005 return NULL;
1006 }
1007 }
1008};
1009
1010
Steve Blocka7e24c12009-10-30 11:49:00 +00001011void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
1012 ASSERT(!object->IsMarked());
1013 ASSERT(Heap::Contains(object));
1014 if (object->IsMap()) {
1015 Map* map = Map::cast(object);
1016 if (FLAG_cleanup_caches_in_maps_at_gc) {
1017 map->ClearCodeCache();
1018 }
1019 SetMark(map);
1020 if (FLAG_collect_maps &&
1021 map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
1022 map->instance_type() <= JS_FUNCTION_TYPE) {
1023 MarkMapContents(map);
1024 } else {
1025 marking_stack.Push(map);
1026 }
1027 } else {
1028 SetMark(object);
1029 marking_stack.Push(object);
1030 }
1031}
1032
1033
1034void MarkCompactCollector::MarkMapContents(Map* map) {
1035 MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(
1036 *HeapObject::RawField(map, Map::kInstanceDescriptorsOffset)));
1037
1038 // Mark the Object* fields of the Map.
1039 // Since the descriptor array has been marked already, it is fine
1040 // that one of these fields contains a pointer to it.
Iain Merrick75681382010-08-19 15:07:18 +01001041 Object** start_slot = HeapObject::RawField(map,
1042 Map::kPointerFieldsBeginOffset);
1043
1044 Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
1045
1046 StaticMarkingVisitor::VisitPointers(start_slot, end_slot);
Steve Blocka7e24c12009-10-30 11:49:00 +00001047}
1048
1049
1050void MarkCompactCollector::MarkDescriptorArray(
1051 DescriptorArray* descriptors) {
1052 if (descriptors->IsMarked()) return;
1053 // Empty descriptor array is marked as a root before any maps are marked.
1054 ASSERT(descriptors != Heap::raw_unchecked_empty_descriptor_array());
1055 SetMark(descriptors);
1056
1057 FixedArray* contents = reinterpret_cast<FixedArray*>(
1058 descriptors->get(DescriptorArray::kContentArrayIndex));
1059 ASSERT(contents->IsHeapObject());
1060 ASSERT(!contents->IsMarked());
1061 ASSERT(contents->IsFixedArray());
1062 ASSERT(contents->length() >= 2);
1063 SetMark(contents);
Iain Merrick75681382010-08-19 15:07:18 +01001064 // Contents contains (value, details) pairs. If the details say that
1065 // the type of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION, or
1066 // NULL_DESCRIPTOR, we don't mark the value as live. Only for
1067 // MAP_TRANSITION and CONSTANT_TRANSITION is the value an Object* (a
1068 // Map*).
Steve Blocka7e24c12009-10-30 11:49:00 +00001069 for (int i = 0; i < contents->length(); i += 2) {
1070 // If the pair (value, details) at index i, i+1 is not
1071 // a transition or null descriptor, mark the value.
1072 PropertyDetails details(Smi::cast(contents->get(i + 1)));
1073 if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
1074 HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
1075 if (object->IsHeapObject() && !object->IsMarked()) {
1076 SetMark(object);
1077 marking_stack.Push(object);
1078 }
1079 }
1080 }
1081 // The DescriptorArray descriptors contains a pointer to its contents array,
1082 // but the contents array is already marked.
1083 marking_stack.Push(descriptors);
1084}
1085
1086
1087void MarkCompactCollector::CreateBackPointers() {
1088 HeapObjectIterator iterator(Heap::map_space());
Leon Clarked91b9f72010-01-27 17:25:45 +00001089 for (HeapObject* next_object = iterator.next();
1090 next_object != NULL; next_object = iterator.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001091 if (next_object->IsMap()) { // Could also be ByteArray on free list.
1092 Map* map = Map::cast(next_object);
1093 if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
1094 map->instance_type() <= JS_FUNCTION_TYPE) {
1095 map->CreateBackPointers();
1096 } else {
1097 ASSERT(map->instance_descriptors() == Heap::empty_descriptor_array());
1098 }
1099 }
1100 }
1101}
1102
1103
1104static int OverflowObjectSize(HeapObject* obj) {
1105 // Recover the normal map pointer, it might be marked as live and
1106 // overflowed.
1107 MapWord map_word = obj->map_word();
1108 map_word.ClearMark();
1109 map_word.ClearOverflow();
1110 return obj->SizeFromMap(map_word.ToMap());
1111}
1112
1113
1114// Fill the marking stack with overflowed objects returned by the given
1115// iterator. Stop when the marking stack is filled or the end of the space
1116// is reached, whichever comes first.
1117template<class T>
1118static void ScanOverflowedObjects(T* it) {
1119 // The caller should ensure that the marking stack is initially not full,
1120 // so that we don't waste effort pointlessly scanning for objects.
1121 ASSERT(!marking_stack.is_full());
1122
Leon Clarked91b9f72010-01-27 17:25:45 +00001123 for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001124 if (object->IsOverflowed()) {
1125 object->ClearOverflow();
1126 ASSERT(object->IsMarked());
1127 ASSERT(Heap::Contains(object));
1128 marking_stack.Push(object);
1129 if (marking_stack.is_full()) return;
1130 }
1131 }
1132}
1133
1134
1135bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
1136 return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked();
1137}
1138
1139
Steve Blocka7e24c12009-10-30 11:49:00 +00001140void MarkCompactCollector::MarkSymbolTable() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001141 SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
1142 // Mark the symbol table itself.
1143 SetMark(symbol_table);
1144 // Explicitly mark the prefix.
1145 MarkingVisitor marker;
1146 symbol_table->IteratePrefix(&marker);
Iain Merrick75681382010-08-19 15:07:18 +01001147 ProcessMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00001148}
1149
1150
1151void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
1152 // Mark the heap roots including global variables, stack variables,
1153 // etc., and all objects reachable from them.
Steve Blockd0582a62009-12-15 09:54:21 +00001154 Heap::IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00001155
1156 // Handle the symbol table specially.
1157 MarkSymbolTable();
1158
1159 // There may be overflowed objects in the heap. Visit them now.
1160 while (marking_stack.overflowed()) {
1161 RefillMarkingStack();
Iain Merrick75681382010-08-19 15:07:18 +01001162 EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00001163 }
1164}
1165
1166
1167void MarkCompactCollector::MarkObjectGroups() {
1168 List<ObjectGroup*>* object_groups = GlobalHandles::ObjectGroups();
1169
1170 for (int i = 0; i < object_groups->length(); i++) {
1171 ObjectGroup* entry = object_groups->at(i);
1172 if (entry == NULL) continue;
1173
1174 List<Object**>& objects = entry->objects_;
1175 bool group_marked = false;
1176 for (int j = 0; j < objects.length(); j++) {
1177 Object* object = *objects[j];
1178 if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
1179 group_marked = true;
1180 break;
1181 }
1182 }
1183
1184 if (!group_marked) continue;
1185
1186 // An object in the group is marked, so mark as gray all white heap
1187 // objects in the group.
1188 for (int j = 0; j < objects.length(); ++j) {
1189 if ((*objects[j])->IsHeapObject()) {
1190 MarkObject(HeapObject::cast(*objects[j]));
1191 }
1192 }
1193 // Once the entire group has been colored gray, set the object group
1194 // to NULL so it won't be processed again.
1195 delete object_groups->at(i);
1196 object_groups->at(i) = NULL;
1197 }
1198}
1199
1200
1201// Mark all objects reachable from the objects on the marking stack.
1202// Before: the marking stack contains zero or more heap object pointers.
1203// After: the marking stack is empty, and all objects reachable from the
1204// marking stack have been marked, or are overflowed in the heap.
Iain Merrick75681382010-08-19 15:07:18 +01001205void MarkCompactCollector::EmptyMarkingStack() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001206 while (!marking_stack.is_empty()) {
1207 HeapObject* object = marking_stack.Pop();
1208 ASSERT(object->IsHeapObject());
1209 ASSERT(Heap::Contains(object));
1210 ASSERT(object->IsMarked());
1211 ASSERT(!object->IsOverflowed());
1212
1213 // Because the object is marked, we have to recover the original map
1214 // pointer and use it to mark the object's body.
1215 MapWord map_word = object->map_word();
1216 map_word.ClearMark();
1217 Map* map = map_word.ToMap();
1218 MarkObject(map);
Iain Merrick75681382010-08-19 15:07:18 +01001219
1220 StaticMarkingVisitor::IterateBody(map, object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001221 }
1222}
1223
1224
1225// Sweep the heap for overflowed objects, clear their overflow bits, and
1226// push them on the marking stack. Stop early if the marking stack fills
1227// before sweeping completes. If sweeping completes, there are no remaining
1228// overflowed objects in the heap so the overflow flag on the markings stack
1229// is cleared.
1230void MarkCompactCollector::RefillMarkingStack() {
1231 ASSERT(marking_stack.overflowed());
1232
1233 SemiSpaceIterator new_it(Heap::new_space(), &OverflowObjectSize);
1234 ScanOverflowedObjects(&new_it);
1235 if (marking_stack.is_full()) return;
1236
1237 HeapObjectIterator old_pointer_it(Heap::old_pointer_space(),
1238 &OverflowObjectSize);
1239 ScanOverflowedObjects(&old_pointer_it);
1240 if (marking_stack.is_full()) return;
1241
1242 HeapObjectIterator old_data_it(Heap::old_data_space(), &OverflowObjectSize);
1243 ScanOverflowedObjects(&old_data_it);
1244 if (marking_stack.is_full()) return;
1245
1246 HeapObjectIterator code_it(Heap::code_space(), &OverflowObjectSize);
1247 ScanOverflowedObjects(&code_it);
1248 if (marking_stack.is_full()) return;
1249
1250 HeapObjectIterator map_it(Heap::map_space(), &OverflowObjectSize);
1251 ScanOverflowedObjects(&map_it);
1252 if (marking_stack.is_full()) return;
1253
1254 HeapObjectIterator cell_it(Heap::cell_space(), &OverflowObjectSize);
1255 ScanOverflowedObjects(&cell_it);
1256 if (marking_stack.is_full()) return;
1257
1258 LargeObjectIterator lo_it(Heap::lo_space(), &OverflowObjectSize);
1259 ScanOverflowedObjects(&lo_it);
1260 if (marking_stack.is_full()) return;
1261
1262 marking_stack.clear_overflowed();
1263}
1264
1265
1266// Mark all objects reachable (transitively) from objects on the marking
1267// stack. Before: the marking stack contains zero or more heap object
1268// pointers. After: the marking stack is empty and there are no overflowed
1269// objects in the heap.
Iain Merrick75681382010-08-19 15:07:18 +01001270void MarkCompactCollector::ProcessMarkingStack() {
1271 EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00001272 while (marking_stack.overflowed()) {
1273 RefillMarkingStack();
Iain Merrick75681382010-08-19 15:07:18 +01001274 EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00001275 }
1276}
1277
1278
Iain Merrick75681382010-08-19 15:07:18 +01001279void MarkCompactCollector::ProcessObjectGroups() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001280 bool work_to_do = true;
1281 ASSERT(marking_stack.is_empty());
1282 while (work_to_do) {
1283 MarkObjectGroups();
1284 work_to_do = !marking_stack.is_empty();
Iain Merrick75681382010-08-19 15:07:18 +01001285 ProcessMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00001286 }
1287}
1288
1289
1290void MarkCompactCollector::MarkLiveObjects() {
Leon Clarkef7060e22010-06-03 12:02:55 +01001291 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
Ben Murdochb0fe1622011-05-05 13:52:32 +01001292 // The recursive GC marker detects when it is nearing stack overflow,
1293 // and switches to a different marking system. JS interrupts interfere
1294 // with the C stack limit check.
1295 PostponeInterruptsScope postpone;
1296
Steve Blocka7e24c12009-10-30 11:49:00 +00001297#ifdef DEBUG
1298 ASSERT(state_ == PREPARE_GC);
1299 state_ = MARK_LIVE_OBJECTS;
1300#endif
1301 // The to space contains live objects, the from space is used as a marking
1302 // stack.
1303 marking_stack.Initialize(Heap::new_space()->FromSpaceLow(),
1304 Heap::new_space()->FromSpaceHigh());
1305
1306 ASSERT(!marking_stack.overflowed());
1307
Iain Merrick75681382010-08-19 15:07:18 +01001308 PrepareForCodeFlushing();
1309
Steve Blocka7e24c12009-10-30 11:49:00 +00001310 RootMarkingVisitor root_visitor;
1311 MarkRoots(&root_visitor);
1312
1313 // The objects reachable from the roots are marked, yet unreachable
1314 // objects are unmarked. Mark objects reachable from object groups
1315 // containing at least one marked object, and continue until no new
1316 // objects are reachable from the object groups.
Iain Merrick75681382010-08-19 15:07:18 +01001317 ProcessObjectGroups();
Steve Blocka7e24c12009-10-30 11:49:00 +00001318
1319 // The objects reachable from the roots or object groups are marked,
1320 // yet unreachable objects are unmarked. Mark objects reachable
1321 // only from weak global handles.
1322 //
1323 // First we identify nonlive weak handles and mark them as pending
1324 // destruction.
1325 GlobalHandles::IdentifyWeakHandles(&IsUnmarkedHeapObject);
1326 // Then we mark the objects and process the transitive closure.
1327 GlobalHandles::IterateWeakRoots(&root_visitor);
1328 while (marking_stack.overflowed()) {
1329 RefillMarkingStack();
Iain Merrick75681382010-08-19 15:07:18 +01001330 EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00001331 }
1332
1333 // Repeat the object groups to mark unmarked groups reachable from the
1334 // weak roots.
Iain Merrick75681382010-08-19 15:07:18 +01001335 ProcessObjectGroups();
Steve Blocka7e24c12009-10-30 11:49:00 +00001336
1337 // Prune the symbol table removing all symbols only pointed to by the
1338 // symbol table. Cannot use symbol_table() here because the symbol
1339 // table is marked.
1340 SymbolTable* symbol_table = Heap::raw_unchecked_symbol_table();
1341 SymbolTableCleaner v;
1342 symbol_table->IterateElements(&v);
1343 symbol_table->ElementsRemoved(v.PointersRemoved());
Leon Clarkee46be812010-01-19 14:06:41 +00001344 ExternalStringTable::Iterate(&v);
1345 ExternalStringTable::CleanUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00001346
Ben Murdochf87a2032010-10-22 12:50:53 +01001347 // Process the weak references.
1348 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
1349 Heap::ProcessWeakReferences(&mark_compact_object_retainer);
1350
Steve Blocka7e24c12009-10-30 11:49:00 +00001351 // Remove object groups after marking phase.
1352 GlobalHandles::RemoveObjectGroups();
Ben Murdochb0fe1622011-05-05 13:52:32 +01001353
1354 // Flush code from collected candidates.
1355 FlushCode::ProcessCandidates();
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001356
1357 // Clean up dead objects from the runtime profiler.
1358 RuntimeProfiler::RemoveDeadSamples();
Steve Blocka7e24c12009-10-30 11:49:00 +00001359}
1360
1361
Steve Blocka7e24c12009-10-30 11:49:00 +00001362#ifdef DEBUG
1363void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
1364 live_bytes_ += obj->Size();
1365 if (Heap::new_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001366 live_young_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001367 } else if (Heap::map_space()->Contains(obj)) {
1368 ASSERT(obj->IsMap());
Steve Block6ded16b2010-05-10 14:33:55 +01001369 live_map_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001370 } else if (Heap::cell_space()->Contains(obj)) {
1371 ASSERT(obj->IsJSGlobalPropertyCell());
Steve Block6ded16b2010-05-10 14:33:55 +01001372 live_cell_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001373 } else if (Heap::old_pointer_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001374 live_old_pointer_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001375 } else if (Heap::old_data_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001376 live_old_data_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001377 } else if (Heap::code_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001378 live_code_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001379 } else if (Heap::lo_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001380 live_lo_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001381 } else {
1382 UNREACHABLE();
1383 }
1384}
1385#endif // DEBUG
1386
1387
1388void MarkCompactCollector::SweepLargeObjectSpace() {
1389#ifdef DEBUG
1390 ASSERT(state_ == MARK_LIVE_OBJECTS);
1391 state_ =
1392 compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
1393#endif
1394 // Deallocate unmarked objects and clear marked bits for marked objects.
1395 Heap::lo_space()->FreeUnmarkedObjects();
1396}
1397
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001398
Steve Blocka7e24c12009-10-30 11:49:00 +00001399// Safe to use during marking phase only.
1400bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
1401 MapWord metamap = object->map_word();
1402 metamap.ClearMark();
1403 return metamap.ToMap()->instance_type() == MAP_TYPE;
1404}
1405
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001406
Steve Blocka7e24c12009-10-30 11:49:00 +00001407void MarkCompactCollector::ClearNonLiveTransitions() {
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001408 HeapObjectIterator map_iterator(Heap::map_space(), &SizeOfMarkedObject);
Steve Blocka7e24c12009-10-30 11:49:00 +00001409 // Iterate over the map space, setting map transitions that go from
1410 // a marked map to an unmarked map to null transitions. At the same time,
1411 // set all the prototype fields of maps back to their original value,
1412 // dropping the back pointers temporarily stored in the prototype field.
1413 // Setting the prototype field requires following the linked list of
1414 // back pointers, reversing them all at once. This allows us to find
1415 // those maps with map transitions that need to be nulled, and only
1416 // scan the descriptor arrays of those maps, not all maps.
Leon Clarkee46be812010-01-19 14:06:41 +00001417 // All of these actions are carried out only on maps of JSObjects
Steve Blocka7e24c12009-10-30 11:49:00 +00001418 // and related subtypes.
Leon Clarked91b9f72010-01-27 17:25:45 +00001419 for (HeapObject* obj = map_iterator.next();
1420 obj != NULL; obj = map_iterator.next()) {
1421 Map* map = reinterpret_cast<Map*>(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00001422 if (!map->IsMarked() && map->IsByteArray()) continue;
1423
1424 ASSERT(SafeIsMap(map));
1425 // Only JSObject and subtypes have map transitions and back pointers.
1426 if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
1427 if (map->instance_type() > JS_FUNCTION_TYPE) continue;
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001428
1429 if (map->IsMarked() && map->attached_to_shared_function_info()) {
1430 // This map is used for inobject slack tracking and has been detached
1431 // from SharedFunctionInfo during the mark phase.
1432 // Since it survived the GC, reattach it now.
1433 map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map);
1434 }
1435
Steve Blocka7e24c12009-10-30 11:49:00 +00001436 // Follow the chain of back pointers to find the prototype.
1437 Map* current = map;
1438 while (SafeIsMap(current)) {
1439 current = reinterpret_cast<Map*>(current->prototype());
1440 ASSERT(current->IsHeapObject());
1441 }
1442 Object* real_prototype = current;
1443
1444 // Follow back pointers, setting them to prototype,
1445 // clearing map transitions when necessary.
1446 current = map;
1447 bool on_dead_path = !current->IsMarked();
1448 Object* next;
1449 while (SafeIsMap(current)) {
1450 next = current->prototype();
1451 // There should never be a dead map above a live map.
1452 ASSERT(on_dead_path || current->IsMarked());
1453
1454 // A live map above a dead map indicates a dead transition.
1455 // This test will always be false on the first iteration.
1456 if (on_dead_path && current->IsMarked()) {
1457 on_dead_path = false;
1458 current->ClearNonLiveTransitions(real_prototype);
1459 }
1460 *HeapObject::RawField(current, Map::kPrototypeOffset) =
1461 real_prototype;
1462 current = reinterpret_cast<Map*>(next);
1463 }
1464 }
1465}
1466
1467// -------------------------------------------------------------------------
1468// Phase 2: Encode forwarding addresses.
1469// When compacting, forwarding addresses for objects in old space and map
1470// space are encoded in their map pointer word (along with an encoding of
1471// their map pointers).
1472//
Leon Clarkee46be812010-01-19 14:06:41 +00001473// The excact encoding is described in the comments for class MapWord in
1474// objects.h.
Steve Blocka7e24c12009-10-30 11:49:00 +00001475//
1476// An address range [start, end) can have both live and non-live objects.
1477// Maximal non-live regions are marked so they can be skipped on subsequent
1478// sweeps of the heap. A distinguished map-pointer encoding is used to mark
1479// free regions of one-word size (in which case the next word is the start
1480// of a live object). A second distinguished map-pointer encoding is used
1481// to mark free regions larger than one word, and the size of the free
1482// region (including the first word) is written to the second word of the
1483// region.
1484//
1485// Any valid map page offset must lie in the object area of the page, so map
1486// page offsets less than Page::kObjectStartOffset are invalid. We use a
1487// pair of distinguished invalid map encodings (for single word and multiple
1488// words) to indicate free regions in the page found during computation of
1489// forwarding addresses and skipped over in subsequent sweeps.
Steve Blocka7e24c12009-10-30 11:49:00 +00001490
1491
1492// Encode a free region, defined by the given start address and size, in the
1493// first word or two of the region.
1494void EncodeFreeRegion(Address free_start, int free_size) {
1495 ASSERT(free_size >= kIntSize);
1496 if (free_size == kIntSize) {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001497 Memory::uint32_at(free_start) = MarkCompactCollector::kSingleFreeEncoding;
Steve Blocka7e24c12009-10-30 11:49:00 +00001498 } else {
1499 ASSERT(free_size >= 2 * kIntSize);
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001500 Memory::uint32_at(free_start) = MarkCompactCollector::kMultiFreeEncoding;
Steve Blocka7e24c12009-10-30 11:49:00 +00001501 Memory::int_at(free_start + kIntSize) = free_size;
1502 }
1503
1504#ifdef DEBUG
1505 // Zap the body of the free region.
1506 if (FLAG_enable_slow_asserts) {
1507 for (int offset = 2 * kIntSize;
1508 offset < free_size;
1509 offset += kPointerSize) {
1510 Memory::Address_at(free_start + offset) = kZapValue;
1511 }
1512 }
1513#endif
1514}
1515
1516
1517// Try to promote all objects in new space. Heap numbers and sequential
1518// strings are promoted to the code space, large objects to large object space,
1519// and all others to the old space.
John Reck59135872010-11-02 12:39:01 -07001520inline MaybeObject* MCAllocateFromNewSpace(HeapObject* object,
1521 int object_size) {
1522 MaybeObject* forwarded;
Steve Blocka7e24c12009-10-30 11:49:00 +00001523 if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
1524 forwarded = Failure::Exception();
1525 } else {
1526 OldSpace* target_space = Heap::TargetSpace(object);
1527 ASSERT(target_space == Heap::old_pointer_space() ||
1528 target_space == Heap::old_data_space());
1529 forwarded = target_space->MCAllocateRaw(object_size);
1530 }
John Reck59135872010-11-02 12:39:01 -07001531 Object* result;
1532 if (!forwarded->ToObject(&result)) {
1533 result = Heap::new_space()->MCAllocateRaw(object_size)->ToObjectUnchecked();
Steve Blocka7e24c12009-10-30 11:49:00 +00001534 }
John Reck59135872010-11-02 12:39:01 -07001535 return result;
Steve Blocka7e24c12009-10-30 11:49:00 +00001536}
1537
1538
1539// Allocation functions for the paged spaces call the space's MCAllocateRaw.
John Reck59135872010-11-02 12:39:01 -07001540MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldPointerSpace(
1541 HeapObject* ignore,
1542 int object_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001543 return Heap::old_pointer_space()->MCAllocateRaw(object_size);
1544}
1545
1546
John Reck59135872010-11-02 12:39:01 -07001547MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldDataSpace(
1548 HeapObject* ignore,
1549 int object_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001550 return Heap::old_data_space()->MCAllocateRaw(object_size);
1551}
1552
1553
John Reck59135872010-11-02 12:39:01 -07001554MUST_USE_RESULT inline MaybeObject* MCAllocateFromCodeSpace(
1555 HeapObject* ignore,
1556 int object_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001557 return Heap::code_space()->MCAllocateRaw(object_size);
1558}
1559
1560
John Reck59135872010-11-02 12:39:01 -07001561MUST_USE_RESULT inline MaybeObject* MCAllocateFromMapSpace(
1562 HeapObject* ignore,
1563 int object_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001564 return Heap::map_space()->MCAllocateRaw(object_size);
1565}
1566
1567
Ben Murdochb0fe1622011-05-05 13:52:32 +01001568MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace(HeapObject* ignore,
1569 int object_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001570 return Heap::cell_space()->MCAllocateRaw(object_size);
1571}
1572
1573
1574// The forwarding address is encoded at the same offset as the current
1575// to-space object, but in from space.
1576inline void EncodeForwardingAddressInNewSpace(HeapObject* old_object,
1577 int object_size,
1578 Object* new_object,
1579 int* ignored) {
1580 int offset =
1581 Heap::new_space()->ToSpaceOffsetForAddress(old_object->address());
1582 Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset) =
1583 HeapObject::cast(new_object)->address();
1584}
1585
1586
1587// The forwarding address is encoded in the map pointer of the object as an
1588// offset (in terms of live bytes) from the address of the first live object
1589// in the page.
1590inline void EncodeForwardingAddressInPagedSpace(HeapObject* old_object,
1591 int object_size,
1592 Object* new_object,
1593 int* offset) {
1594 // Record the forwarding address of the first live object if necessary.
1595 if (*offset == 0) {
1596 Page::FromAddress(old_object->address())->mc_first_forwarded =
1597 HeapObject::cast(new_object)->address();
1598 }
1599
1600 MapWord encoding =
1601 MapWord::EncodeAddress(old_object->map()->address(), *offset);
1602 old_object->set_map_word(encoding);
1603 *offset += object_size;
1604 ASSERT(*offset <= Page::kObjectAreaSize);
1605}
1606
1607
1608// Most non-live objects are ignored.
1609inline void IgnoreNonLiveObject(HeapObject* object) {}
1610
1611
Steve Blocka7e24c12009-10-30 11:49:00 +00001612// Function template that, given a range of addresses (eg, a semispace or a
1613// paged space page), iterates through the objects in the range to clear
1614// mark bits and compute and encode forwarding addresses. As a side effect,
1615// maximal free chunks are marked so that they can be skipped on subsequent
1616// sweeps.
1617//
1618// The template parameters are an allocation function, a forwarding address
1619// encoding function, and a function to process non-live objects.
1620template<MarkCompactCollector::AllocationFunction Alloc,
1621 MarkCompactCollector::EncodingFunction Encode,
1622 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
1623inline void EncodeForwardingAddressesInRange(Address start,
1624 Address end,
1625 int* offset) {
1626 // The start address of the current free region while sweeping the space.
1627 // This address is set when a transition from live to non-live objects is
1628 // encountered. A value (an encoding of the 'next free region' pointer)
1629 // is written to memory at this address when a transition from non-live to
1630 // live objects is encountered.
1631 Address free_start = NULL;
1632
1633 // A flag giving the state of the previously swept object. Initially true
1634 // to ensure that free_start is initialized to a proper address before
1635 // trying to write to it.
1636 bool is_prev_alive = true;
1637
1638 int object_size; // Will be set on each iteration of the loop.
1639 for (Address current = start; current < end; current += object_size) {
1640 HeapObject* object = HeapObject::FromAddress(current);
1641 if (object->IsMarked()) {
1642 object->ClearMark();
1643 MarkCompactCollector::tracer()->decrement_marked_count();
1644 object_size = object->Size();
1645
Steve Blocka7e24c12009-10-30 11:49:00 +00001646 // Allocation cannot fail, because we are compacting the space.
John Reck59135872010-11-02 12:39:01 -07001647 Object* forwarded = Alloc(object, object_size)->ToObjectUnchecked();
Steve Blocka7e24c12009-10-30 11:49:00 +00001648 Encode(object, object_size, forwarded, offset);
1649
1650#ifdef DEBUG
1651 if (FLAG_gc_verbose) {
1652 PrintF("forward %p -> %p.\n", object->address(),
1653 HeapObject::cast(forwarded)->address());
1654 }
1655#endif
1656 if (!is_prev_alive) { // Transition from non-live to live.
Steve Blockd0582a62009-12-15 09:54:21 +00001657 EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
Steve Blocka7e24c12009-10-30 11:49:00 +00001658 is_prev_alive = true;
1659 }
1660 } else { // Non-live object.
1661 object_size = object->Size();
1662 ProcessNonLive(object);
1663 if (is_prev_alive) { // Transition from live to non-live.
1664 free_start = current;
1665 is_prev_alive = false;
1666 }
Steve Block1e0659c2011-05-24 12:43:12 +01001667 LiveObjectList::ProcessNonLive(object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001668 }
1669 }
1670
1671 // If we ended on a free region, mark it.
Steve Blockd0582a62009-12-15 09:54:21 +00001672 if (!is_prev_alive) {
1673 EncodeFreeRegion(free_start, static_cast<int>(end - free_start));
1674 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001675}
1676
1677
1678// Functions to encode the forwarding pointers in each compactable space.
1679void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
1680 int ignored;
1681 EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
1682 EncodeForwardingAddressInNewSpace,
1683 IgnoreNonLiveObject>(
1684 Heap::new_space()->bottom(),
1685 Heap::new_space()->top(),
1686 &ignored);
1687}
1688
1689
1690template<MarkCompactCollector::AllocationFunction Alloc,
1691 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
1692void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
1693 PagedSpace* space) {
1694 PageIterator it(space, PageIterator::PAGES_IN_USE);
1695 while (it.has_next()) {
1696 Page* p = it.next();
Steve Block6ded16b2010-05-10 14:33:55 +01001697
Steve Blocka7e24c12009-10-30 11:49:00 +00001698 // The offset of each live object in the page from the first live object
1699 // in the page.
1700 int offset = 0;
1701 EncodeForwardingAddressesInRange<Alloc,
1702 EncodeForwardingAddressInPagedSpace,
1703 ProcessNonLive>(
1704 p->ObjectAreaStart(),
1705 p->AllocationTop(),
1706 &offset);
1707 }
1708}
1709
1710
Steve Block6ded16b2010-05-10 14:33:55 +01001711// We scavange new space simultaneously with sweeping. This is done in two
1712// passes.
1713// The first pass migrates all alive objects from one semispace to another or
1714// promotes them to old space. Forwading address is written directly into
1715// first word of object without any encoding. If object is dead we are writing
1716// NULL as a forwarding address.
1717// The second pass updates pointers to new space in all spaces. It is possible
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001718// to encounter pointers to dead objects during traversal of dirty regions we
1719// should clear them to avoid encountering them during next dirty regions
1720// iteration.
1721static void MigrateObject(Address dst,
1722 Address src,
1723 int size,
1724 bool to_old_space) {
1725 if (to_old_space) {
1726 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
1727 } else {
1728 Heap::CopyBlock(dst, src, size);
1729 }
Steve Block6ded16b2010-05-10 14:33:55 +01001730
1731 Memory::Address_at(src) = dst;
1732}
1733
1734
Iain Merrick75681382010-08-19 15:07:18 +01001735class StaticPointersToNewGenUpdatingVisitor : public
1736 StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> {
1737 public:
1738 static inline void VisitPointer(Object** p) {
1739 if (!(*p)->IsHeapObject()) return;
1740
1741 HeapObject* obj = HeapObject::cast(*p);
1742 Address old_addr = obj->address();
1743
1744 if (Heap::new_space()->Contains(obj)) {
1745 ASSERT(Heap::InFromSpace(*p));
1746 *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
1747 }
1748 }
1749};
1750
1751
Steve Block6ded16b2010-05-10 14:33:55 +01001752// Visitor for updating pointers from live objects in old spaces to new space.
1753// It does not expect to encounter pointers to dead objects.
1754class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
1755 public:
1756 void VisitPointer(Object** p) {
Iain Merrick75681382010-08-19 15:07:18 +01001757 StaticPointersToNewGenUpdatingVisitor::VisitPointer(p);
Steve Block6ded16b2010-05-10 14:33:55 +01001758 }
1759
1760 void VisitPointers(Object** start, Object** end) {
Iain Merrick75681382010-08-19 15:07:18 +01001761 for (Object** p = start; p < end; p++) {
1762 StaticPointersToNewGenUpdatingVisitor::VisitPointer(p);
1763 }
Steve Block6ded16b2010-05-10 14:33:55 +01001764 }
1765
1766 void VisitCodeTarget(RelocInfo* rinfo) {
1767 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
1768 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1769 VisitPointer(&target);
1770 rinfo->set_target_address(Code::cast(target)->instruction_start());
1771 }
1772
1773 void VisitDebugTarget(RelocInfo* rinfo) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001774 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
1775 rinfo->IsPatchedReturnSequence()) ||
1776 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
1777 rinfo->IsPatchedDebugBreakSlotSequence()));
Steve Block6ded16b2010-05-10 14:33:55 +01001778 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
1779 VisitPointer(&target);
1780 rinfo->set_call_address(Code::cast(target)->instruction_start());
1781 }
Steve Block6ded16b2010-05-10 14:33:55 +01001782};
1783
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001784
Steve Block6ded16b2010-05-10 14:33:55 +01001785// Visitor for updating pointers from live objects in old spaces to new space.
1786// It can encounter pointers to dead objects in new space when traversing map
1787// space (see comment for MigrateObject).
1788static void UpdatePointerToNewGen(HeapObject** p) {
1789 if (!(*p)->IsHeapObject()) return;
1790
1791 Address old_addr = (*p)->address();
1792 ASSERT(Heap::InFromSpace(*p));
1793
1794 Address new_addr = Memory::Address_at(old_addr);
1795
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001796 if (new_addr == NULL) {
1797 // We encountered pointer to a dead object. Clear it so we will
1798 // not visit it again during next iteration of dirty regions.
1799 *p = NULL;
1800 } else {
1801 *p = HeapObject::FromAddress(new_addr);
1802 }
Steve Block6ded16b2010-05-10 14:33:55 +01001803}
1804
1805
1806static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Object **p) {
1807 Address old_addr = HeapObject::cast(*p)->address();
1808 Address new_addr = Memory::Address_at(old_addr);
1809 return String::cast(HeapObject::FromAddress(new_addr));
1810}
1811
1812
1813static bool TryPromoteObject(HeapObject* object, int object_size) {
1814 Object* result;
1815
1816 if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07001817 MaybeObject* maybe_result =
1818 Heap::lo_space()->AllocateRawFixedArray(object_size);
1819 if (maybe_result->ToObject(&result)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001820 HeapObject* target = HeapObject::cast(result);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001821 MigrateObject(target->address(), object->address(), object_size, true);
Leon Clarkef7060e22010-06-03 12:02:55 +01001822 MarkCompactCollector::tracer()->
1823 increment_promoted_objects_size(object_size);
Steve Block6ded16b2010-05-10 14:33:55 +01001824 return true;
1825 }
1826 } else {
1827 OldSpace* target_space = Heap::TargetSpace(object);
1828
1829 ASSERT(target_space == Heap::old_pointer_space() ||
1830 target_space == Heap::old_data_space());
John Reck59135872010-11-02 12:39:01 -07001831 MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
1832 if (maybe_result->ToObject(&result)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001833 HeapObject* target = HeapObject::cast(result);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001834 MigrateObject(target->address(),
1835 object->address(),
1836 object_size,
1837 target_space == Heap::old_pointer_space());
Leon Clarkef7060e22010-06-03 12:02:55 +01001838 MarkCompactCollector::tracer()->
1839 increment_promoted_objects_size(object_size);
Steve Block6ded16b2010-05-10 14:33:55 +01001840 return true;
1841 }
1842 }
1843
1844 return false;
1845}
1846
1847
1848static void SweepNewSpace(NewSpace* space) {
1849 Heap::CheckNewSpaceExpansionCriteria();
1850
1851 Address from_bottom = space->bottom();
1852 Address from_top = space->top();
1853
1854 // Flip the semispaces. After flipping, to space is empty, from space has
1855 // live objects.
1856 space->Flip();
1857 space->ResetAllocationInfo();
1858
1859 int size = 0;
1860 int survivors_size = 0;
1861
1862 // First pass: traverse all objects in inactive semispace, remove marks,
1863 // migrate live objects and write forwarding addresses.
1864 for (Address current = from_bottom; current < from_top; current += size) {
1865 HeapObject* object = HeapObject::FromAddress(current);
1866
1867 if (object->IsMarked()) {
1868 object->ClearMark();
1869 MarkCompactCollector::tracer()->decrement_marked_count();
1870
1871 size = object->Size();
1872 survivors_size += size;
1873
1874 // Aggressively promote young survivors to the old space.
1875 if (TryPromoteObject(object, size)) {
1876 continue;
1877 }
1878
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001879 // Promotion failed. Just migrate object to another semispace.
Steve Block6ded16b2010-05-10 14:33:55 +01001880 // Allocation cannot fail at this point: semispaces are of equal size.
John Reck59135872010-11-02 12:39:01 -07001881 Object* target = space->AllocateRaw(size)->ToObjectUnchecked();
Steve Block6ded16b2010-05-10 14:33:55 +01001882
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001883 MigrateObject(HeapObject::cast(target)->address(),
1884 current,
1885 size,
1886 false);
Steve Block6ded16b2010-05-10 14:33:55 +01001887 } else {
Steve Block1e0659c2011-05-24 12:43:12 +01001888 // Process the dead object before we write a NULL into its header.
1889 LiveObjectList::ProcessNonLive(object);
1890
Steve Block6ded16b2010-05-10 14:33:55 +01001891 size = object->Size();
1892 Memory::Address_at(current) = NULL;
1893 }
1894 }
1895
1896 // Second pass: find pointers to new space and update them.
1897 PointersToNewGenUpdatingVisitor updating_visitor;
1898
1899 // Update pointers in to space.
Iain Merrick75681382010-08-19 15:07:18 +01001900 Address current = space->bottom();
1901 while (current < space->top()) {
1902 HeapObject* object = HeapObject::FromAddress(current);
1903 current +=
1904 StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(),
1905 object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001906 }
Steve Block6ded16b2010-05-10 14:33:55 +01001907
1908 // Update roots.
1909 Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
Steve Block1e0659c2011-05-24 12:43:12 +01001910 LiveObjectList::IterateElements(&updating_visitor);
Steve Block6ded16b2010-05-10 14:33:55 +01001911
1912 // Update pointers in old spaces.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001913 Heap::IterateDirtyRegions(Heap::old_pointer_space(),
1914 &Heap::IteratePointersInDirtyRegion,
1915 &UpdatePointerToNewGen,
1916 Heap::WATERMARK_SHOULD_BE_VALID);
1917
1918 Heap::lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
Steve Block6ded16b2010-05-10 14:33:55 +01001919
1920 // Update pointers from cells.
1921 HeapObjectIterator cell_iterator(Heap::cell_space());
1922 for (HeapObject* cell = cell_iterator.next();
1923 cell != NULL;
1924 cell = cell_iterator.next()) {
1925 if (cell->IsJSGlobalPropertyCell()) {
1926 Address value_address =
1927 reinterpret_cast<Address>(cell) +
1928 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1929 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1930 }
1931 }
1932
Ben Murdochf87a2032010-10-22 12:50:53 +01001933 // Update pointer from the global contexts list.
1934 updating_visitor.VisitPointer(Heap::global_contexts_list_address());
1935
Steve Block6ded16b2010-05-10 14:33:55 +01001936 // Update pointers from external string table.
1937 Heap::UpdateNewSpaceReferencesInExternalStringTable(
1938 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1939
1940 // All pointers were updated. Update auxiliary allocation info.
1941 Heap::IncrementYoungSurvivorsCounter(survivors_size);
1942 space->set_age_mark(space->top());
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001943
1944 // Update JSFunction pointers from the runtime profiler.
1945 RuntimeProfiler::UpdateSamplesAfterScavenge();
Steve Blocka7e24c12009-10-30 11:49:00 +00001946}
1947
1948
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001949static void SweepSpace(PagedSpace* space) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001950 PageIterator it(space, PageIterator::PAGES_IN_USE);
Steve Block6ded16b2010-05-10 14:33:55 +01001951
1952 // During sweeping of paged space we are trying to find longest sequences
1953 // of pages without live objects and free them (instead of putting them on
1954 // the free list).
1955
1956 // Page preceding current.
1957 Page* prev = Page::FromAddress(NULL);
1958
1959 // First empty page in a sequence.
1960 Page* first_empty_page = Page::FromAddress(NULL);
1961
1962 // Page preceding first empty page.
1963 Page* prec_first_empty_page = Page::FromAddress(NULL);
1964
1965 // If last used page of space ends with a sequence of dead objects
1966 // we can adjust allocation top instead of puting this free area into
1967 // the free list. Thus during sweeping we keep track of such areas
1968 // and defer their deallocation until the sweeping of the next page
1969 // is done: if one of the next pages contains live objects we have
1970 // to put such area into the free list.
1971 Address last_free_start = NULL;
1972 int last_free_size = 0;
1973
Steve Blocka7e24c12009-10-30 11:49:00 +00001974 while (it.has_next()) {
1975 Page* p = it.next();
1976
1977 bool is_previous_alive = true;
1978 Address free_start = NULL;
1979 HeapObject* object;
1980
1981 for (Address current = p->ObjectAreaStart();
1982 current < p->AllocationTop();
1983 current += object->Size()) {
1984 object = HeapObject::FromAddress(current);
1985 if (object->IsMarked()) {
1986 object->ClearMark();
1987 MarkCompactCollector::tracer()->decrement_marked_count();
Steve Block6ded16b2010-05-10 14:33:55 +01001988
Steve Blocka7e24c12009-10-30 11:49:00 +00001989 if (!is_previous_alive) { // Transition from free to live.
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001990 space->DeallocateBlock(free_start,
1991 static_cast<int>(current - free_start),
1992 true);
Steve Blocka7e24c12009-10-30 11:49:00 +00001993 is_previous_alive = true;
1994 }
1995 } else {
Leon Clarked91b9f72010-01-27 17:25:45 +00001996 MarkCompactCollector::ReportDeleteIfNeeded(object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001997 if (is_previous_alive) { // Transition from live to free.
1998 free_start = current;
1999 is_previous_alive = false;
2000 }
Steve Block1e0659c2011-05-24 12:43:12 +01002001 LiveObjectList::ProcessNonLive(object);
Steve Blocka7e24c12009-10-30 11:49:00 +00002002 }
2003 // The object is now unmarked for the call to Size() at the top of the
2004 // loop.
2005 }
2006
Steve Block6ded16b2010-05-10 14:33:55 +01002007 bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
2008 || (!is_previous_alive && free_start == p->ObjectAreaStart());
2009
2010 if (page_is_empty) {
2011 // This page is empty. Check whether we are in the middle of
2012 // sequence of empty pages and start one if not.
2013 if (!first_empty_page->is_valid()) {
2014 first_empty_page = p;
2015 prec_first_empty_page = prev;
2016 }
2017
2018 if (!is_previous_alive) {
2019 // There are dead objects on this page. Update space accounting stats
2020 // without putting anything into free list.
2021 int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
2022 if (size_in_bytes > 0) {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002023 space->DeallocateBlock(free_start, size_in_bytes, false);
Steve Block6ded16b2010-05-10 14:33:55 +01002024 }
2025 }
2026 } else {
2027 // This page is not empty. Sequence of empty pages ended on the previous
2028 // one.
2029 if (first_empty_page->is_valid()) {
2030 space->FreePages(prec_first_empty_page, prev);
2031 prec_first_empty_page = first_empty_page = Page::FromAddress(NULL);
2032 }
2033
2034 // If there is a free ending area on one of the previous pages we have
2035 // deallocate that area and put it on the free list.
2036 if (last_free_size > 0) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002037 Page::FromAddress(last_free_start)->
2038 SetAllocationWatermark(last_free_start);
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002039 space->DeallocateBlock(last_free_start, last_free_size, true);
Steve Block6ded16b2010-05-10 14:33:55 +01002040 last_free_start = NULL;
2041 last_free_size = 0;
2042 }
2043
2044 // If the last region of this page was not live we remember it.
2045 if (!is_previous_alive) {
2046 ASSERT(last_free_size == 0);
2047 last_free_size = static_cast<int>(p->AllocationTop() - free_start);
2048 last_free_start = free_start;
Steve Blocka7e24c12009-10-30 11:49:00 +00002049 }
2050 }
Steve Block6ded16b2010-05-10 14:33:55 +01002051
2052 prev = p;
2053 }
2054
2055 // We reached end of space. See if we need to adjust allocation top.
2056 Address new_allocation_top = NULL;
2057
2058 if (first_empty_page->is_valid()) {
2059 // Last used pages in space are empty. We can move allocation top backwards
2060 // to the beginning of first empty page.
2061 ASSERT(prev == space->AllocationTopPage());
2062
2063 new_allocation_top = first_empty_page->ObjectAreaStart();
2064 }
2065
2066 if (last_free_size > 0) {
2067 // There was a free ending area on the previous page.
2068 // Deallocate it without putting it into freelist and move allocation
2069 // top to the beginning of this free area.
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002070 space->DeallocateBlock(last_free_start, last_free_size, false);
Steve Block6ded16b2010-05-10 14:33:55 +01002071 new_allocation_top = last_free_start;
2072 }
2073
2074 if (new_allocation_top != NULL) {
2075#ifdef DEBUG
2076 Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
2077 if (!first_empty_page->is_valid()) {
2078 ASSERT(new_allocation_top_page == space->AllocationTopPage());
2079 } else if (last_free_size > 0) {
2080 ASSERT(new_allocation_top_page == prec_first_empty_page);
2081 } else {
2082 ASSERT(new_allocation_top_page == first_empty_page);
2083 }
2084#endif
2085
2086 space->SetTop(new_allocation_top);
Steve Blocka7e24c12009-10-30 11:49:00 +00002087 }
2088}
2089
2090
Steve Blocka7e24c12009-10-30 11:49:00 +00002091void MarkCompactCollector::EncodeForwardingAddresses() {
2092 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
2093 // Objects in the active semispace of the young generation may be
2094 // relocated to the inactive semispace (if not promoted). Set the
2095 // relocation info to the beginning of the inactive semispace.
2096 Heap::new_space()->MCResetRelocationInfo();
2097
2098 // Compute the forwarding pointers in each space.
2099 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
Leon Clarked91b9f72010-01-27 17:25:45 +00002100 ReportDeleteIfNeeded>(
Steve Blocka7e24c12009-10-30 11:49:00 +00002101 Heap::old_pointer_space());
2102
2103 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
2104 IgnoreNonLiveObject>(
2105 Heap::old_data_space());
2106
2107 EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
Leon Clarked91b9f72010-01-27 17:25:45 +00002108 ReportDeleteIfNeeded>(
Steve Blocka7e24c12009-10-30 11:49:00 +00002109 Heap::code_space());
2110
2111 EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
2112 IgnoreNonLiveObject>(
2113 Heap::cell_space());
2114
2115
2116 // Compute new space next to last after the old and code spaces have been
2117 // compacted. Objects in new space can be promoted to old or code space.
2118 EncodeForwardingAddressesInNewSpace();
2119
2120 // Compute map space last because computing forwarding addresses
2121 // overwrites non-live objects. Objects in the other spaces rely on
2122 // non-live map pointers to get the sizes of non-live objects.
2123 EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
2124 IgnoreNonLiveObject>(
2125 Heap::map_space());
2126
2127 // Write relocation info to the top page, so we can use it later. This is
2128 // done after promoting objects from the new space so we get the correct
2129 // allocation top.
2130 Heap::old_pointer_space()->MCWriteRelocationInfoToPage();
2131 Heap::old_data_space()->MCWriteRelocationInfoToPage();
2132 Heap::code_space()->MCWriteRelocationInfoToPage();
2133 Heap::map_space()->MCWriteRelocationInfoToPage();
2134 Heap::cell_space()->MCWriteRelocationInfoToPage();
2135}
2136
2137
Leon Clarkee46be812010-01-19 14:06:41 +00002138class MapIterator : public HeapObjectIterator {
2139 public:
2140 MapIterator() : HeapObjectIterator(Heap::map_space(), &SizeCallback) { }
2141
2142 explicit MapIterator(Address start)
2143 : HeapObjectIterator(Heap::map_space(), start, &SizeCallback) { }
2144
2145 private:
2146 static int SizeCallback(HeapObject* unused) {
2147 USE(unused);
2148 return Map::kSize;
2149 }
2150};
2151
2152
2153class MapCompact {
2154 public:
2155 explicit MapCompact(int live_maps)
2156 : live_maps_(live_maps),
2157 to_evacuate_start_(Heap::map_space()->TopAfterCompaction(live_maps)),
2158 map_to_evacuate_it_(to_evacuate_start_),
2159 first_map_to_evacuate_(
2160 reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
2161 }
2162
2163 void CompactMaps() {
2164 // As we know the number of maps to evacuate beforehand,
2165 // we stop then there is no more vacant maps.
2166 for (Map* next_vacant_map = NextVacantMap();
2167 next_vacant_map;
2168 next_vacant_map = NextVacantMap()) {
2169 EvacuateMap(next_vacant_map, NextMapToEvacuate());
2170 }
2171
2172#ifdef DEBUG
2173 CheckNoMapsToEvacuate();
2174#endif
2175 }
2176
2177 void UpdateMapPointersInRoots() {
2178 Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG);
2179 GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
Steve Block1e0659c2011-05-24 12:43:12 +01002180 LiveObjectList::IterateElements(&map_updating_visitor_);
Leon Clarkee46be812010-01-19 14:06:41 +00002181 }
2182
Leon Clarkee46be812010-01-19 14:06:41 +00002183 void UpdateMapPointersInPagedSpace(PagedSpace* space) {
2184 ASSERT(space != Heap::map_space());
2185
2186 PageIterator it(space, PageIterator::PAGES_IN_USE);
2187 while (it.has_next()) {
2188 Page* p = it.next();
2189 UpdateMapPointersInRange(p->ObjectAreaStart(), p->AllocationTop());
2190 }
2191 }
2192
2193 void UpdateMapPointersInNewSpace() {
2194 NewSpace* space = Heap::new_space();
2195 UpdateMapPointersInRange(space->bottom(), space->top());
2196 }
2197
2198 void UpdateMapPointersInLargeObjectSpace() {
2199 LargeObjectIterator it(Heap::lo_space());
Leon Clarked91b9f72010-01-27 17:25:45 +00002200 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
2201 UpdateMapPointersInObject(obj);
Leon Clarkee46be812010-01-19 14:06:41 +00002202 }
2203
2204 void Finish() {
2205 Heap::map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
2206 }
2207
2208 private:
2209 int live_maps_;
2210 Address to_evacuate_start_;
2211 MapIterator vacant_map_it_;
2212 MapIterator map_to_evacuate_it_;
2213 Map* first_map_to_evacuate_;
2214
2215 // Helper class for updating map pointers in HeapObjects.
2216 class MapUpdatingVisitor: public ObjectVisitor {
2217 public:
2218 void VisitPointer(Object** p) {
2219 UpdateMapPointer(p);
2220 }
2221
2222 void VisitPointers(Object** start, Object** end) {
2223 for (Object** p = start; p < end; p++) UpdateMapPointer(p);
2224 }
2225
2226 private:
2227 void UpdateMapPointer(Object** p) {
2228 if (!(*p)->IsHeapObject()) return;
2229 HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
2230
2231 // Moved maps are tagged with overflowed map word. They are the only
2232 // objects those map word is overflowed as marking is already complete.
2233 MapWord map_word = old_map->map_word();
2234 if (!map_word.IsOverflowed()) return;
2235
2236 *p = GetForwardedMap(map_word);
2237 }
2238 };
2239
2240 static MapUpdatingVisitor map_updating_visitor_;
2241
2242 static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
2243 while (true) {
Leon Clarkee46be812010-01-19 14:06:41 +00002244 HeapObject* next = it->next();
Leon Clarked91b9f72010-01-27 17:25:45 +00002245 ASSERT(next != NULL);
Leon Clarkee46be812010-01-19 14:06:41 +00002246 if (next == last)
2247 return NULL;
2248 ASSERT(!next->IsOverflowed());
2249 ASSERT(!next->IsMarked());
2250 ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
2251 if (next->IsMap() == live)
2252 return reinterpret_cast<Map*>(next);
2253 }
2254 }
2255
2256 Map* NextVacantMap() {
2257 Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
2258 ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
2259 return map;
2260 }
2261
2262 Map* NextMapToEvacuate() {
2263 Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
2264 ASSERT(map != NULL);
2265 ASSERT(map->IsMap());
2266 return map;
2267 }
2268
2269 static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
2270 ASSERT(FreeListNode::IsFreeListNode(vacant_map));
2271 ASSERT(map_to_evacuate->IsMap());
2272
Steve Block6ded16b2010-05-10 14:33:55 +01002273 ASSERT(Map::kSize % 4 == 0);
2274
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002275 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(vacant_map->address(),
2276 map_to_evacuate->address(),
2277 Map::kSize);
Steve Block6ded16b2010-05-10 14:33:55 +01002278
Leon Clarkee46be812010-01-19 14:06:41 +00002279 ASSERT(vacant_map->IsMap()); // Due to memcpy above.
2280
2281 MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
2282 forwarding_map_word.SetOverflow();
2283 map_to_evacuate->set_map_word(forwarding_map_word);
2284
2285 ASSERT(map_to_evacuate->map_word().IsOverflowed());
2286 ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
2287 }
2288
2289 static Map* GetForwardedMap(MapWord map_word) {
2290 ASSERT(map_word.IsOverflowed());
2291 map_word.ClearOverflow();
2292 Map* new_map = map_word.ToMap();
2293 ASSERT_MAP_ALIGNED(new_map->address());
2294 return new_map;
2295 }
2296
2297 static int UpdateMapPointersInObject(HeapObject* obj) {
2298 ASSERT(!obj->IsMarked());
2299 Map* map = obj->map();
2300 ASSERT(Heap::map_space()->Contains(map));
2301 MapWord map_word = map->map_word();
2302 ASSERT(!map_word.IsMarked());
2303 if (map_word.IsOverflowed()) {
2304 Map* new_map = GetForwardedMap(map_word);
2305 ASSERT(Heap::map_space()->Contains(new_map));
2306 obj->set_map(new_map);
2307
2308#ifdef DEBUG
2309 if (FLAG_gc_verbose) {
Ben Murdochf87a2032010-10-22 12:50:53 +01002310 PrintF("update %p : %p -> %p\n",
2311 obj->address(),
2312 reinterpret_cast<void*>(map),
2313 reinterpret_cast<void*>(new_map));
Leon Clarkee46be812010-01-19 14:06:41 +00002314 }
2315#endif
2316 }
2317
2318 int size = obj->SizeFromMap(map);
2319 obj->IterateBody(map->instance_type(), size, &map_updating_visitor_);
2320 return size;
2321 }
2322
2323 static void UpdateMapPointersInRange(Address start, Address end) {
2324 HeapObject* object;
2325 int size;
2326 for (Address current = start; current < end; current += size) {
2327 object = HeapObject::FromAddress(current);
2328 size = UpdateMapPointersInObject(object);
2329 ASSERT(size > 0);
2330 }
2331 }
2332
2333#ifdef DEBUG
2334 void CheckNoMapsToEvacuate() {
2335 if (!FLAG_enable_slow_asserts)
2336 return;
2337
Leon Clarked91b9f72010-01-27 17:25:45 +00002338 for (HeapObject* obj = map_to_evacuate_it_.next();
2339 obj != NULL; obj = map_to_evacuate_it_.next())
2340 ASSERT(FreeListNode::IsFreeListNode(obj));
Leon Clarkee46be812010-01-19 14:06:41 +00002341 }
2342#endif
2343};
2344
2345MapCompact::MapUpdatingVisitor MapCompact::map_updating_visitor_;
2346
2347
Steve Blocka7e24c12009-10-30 11:49:00 +00002348void MarkCompactCollector::SweepSpaces() {
Leon Clarkef7060e22010-06-03 12:02:55 +01002349 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
2350
Steve Blocka7e24c12009-10-30 11:49:00 +00002351 ASSERT(state_ == SWEEP_SPACES);
2352 ASSERT(!IsCompacting());
2353 // Noncompacting collections simply sweep the spaces to clear the mark
2354 // bits and free the nonlive blocks (for old and map spaces). We sweep
2355 // the map space last because freeing non-live maps overwrites them and
2356 // the other spaces rely on possibly non-live maps to get the sizes for
2357 // non-live objects.
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002358 SweepSpace(Heap::old_pointer_space());
2359 SweepSpace(Heap::old_data_space());
2360 SweepSpace(Heap::code_space());
2361 SweepSpace(Heap::cell_space());
Iain Merrick75681382010-08-19 15:07:18 +01002362 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
2363 SweepNewSpace(Heap::new_space());
2364 }
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002365 SweepSpace(Heap::map_space());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002366
2367 Heap::IterateDirtyRegions(Heap::map_space(),
2368 &Heap::IteratePointersInDirtyMapsRegion,
2369 &UpdatePointerToNewGen,
2370 Heap::WATERMARK_SHOULD_BE_VALID);
2371
Ben Murdochf87a2032010-10-22 12:50:53 +01002372 intptr_t live_maps_size = Heap::map_space()->Size();
2373 int live_maps = static_cast<int>(live_maps_size / Map::kSize);
Steve Block6ded16b2010-05-10 14:33:55 +01002374 ASSERT(live_map_objects_size_ == live_maps_size);
Leon Clarkee46be812010-01-19 14:06:41 +00002375
2376 if (Heap::map_space()->NeedsCompaction(live_maps)) {
2377 MapCompact map_compact(live_maps);
2378
2379 map_compact.CompactMaps();
2380 map_compact.UpdateMapPointersInRoots();
2381
Leon Clarkee46be812010-01-19 14:06:41 +00002382 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00002383 for (PagedSpace* space = spaces.next();
2384 space != NULL; space = spaces.next()) {
Leon Clarkee46be812010-01-19 14:06:41 +00002385 if (space == Heap::map_space()) continue;
2386 map_compact.UpdateMapPointersInPagedSpace(space);
2387 }
2388 map_compact.UpdateMapPointersInNewSpace();
2389 map_compact.UpdateMapPointersInLargeObjectSpace();
2390
2391 map_compact.Finish();
2392 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002393}
2394
2395
2396// Iterate the live objects in a range of addresses (eg, a page or a
2397// semispace). The live regions of the range have been linked into a list.
2398// The first live region is [first_live_start, first_live_end), and the last
2399// address in the range is top. The callback function is used to get the
2400// size of each live object.
2401int MarkCompactCollector::IterateLiveObjectsInRange(
2402 Address start,
2403 Address end,
2404 HeapObjectCallback size_func) {
Steve Block6ded16b2010-05-10 14:33:55 +01002405 int live_objects_size = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00002406 Address current = start;
2407 while (current < end) {
2408 uint32_t encoded_map = Memory::uint32_at(current);
2409 if (encoded_map == kSingleFreeEncoding) {
2410 current += kPointerSize;
2411 } else if (encoded_map == kMultiFreeEncoding) {
2412 current += Memory::int_at(current + kIntSize);
2413 } else {
Steve Block6ded16b2010-05-10 14:33:55 +01002414 int size = size_func(HeapObject::FromAddress(current));
2415 current += size;
2416 live_objects_size += size;
Steve Blocka7e24c12009-10-30 11:49:00 +00002417 }
2418 }
Steve Block6ded16b2010-05-10 14:33:55 +01002419 return live_objects_size;
Steve Blocka7e24c12009-10-30 11:49:00 +00002420}
2421
2422
2423int MarkCompactCollector::IterateLiveObjects(NewSpace* space,
2424 HeapObjectCallback size_f) {
2425 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2426 return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
2427}
2428
2429
2430int MarkCompactCollector::IterateLiveObjects(PagedSpace* space,
2431 HeapObjectCallback size_f) {
2432 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2433 int total = 0;
2434 PageIterator it(space, PageIterator::PAGES_IN_USE);
2435 while (it.has_next()) {
2436 Page* p = it.next();
2437 total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
2438 p->AllocationTop(),
2439 size_f);
2440 }
2441 return total;
2442}
2443
2444
2445// -------------------------------------------------------------------------
2446// Phase 3: Update pointers
2447
2448// Helper class for updating pointers in HeapObjects.
2449class UpdatingVisitor: public ObjectVisitor {
2450 public:
2451 void VisitPointer(Object** p) {
2452 UpdatePointer(p);
2453 }
2454
2455 void VisitPointers(Object** start, Object** end) {
2456 // Mark all HeapObject pointers in [start, end)
2457 for (Object** p = start; p < end; p++) UpdatePointer(p);
2458 }
2459
2460 void VisitCodeTarget(RelocInfo* rinfo) {
2461 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2462 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2463 VisitPointer(&target);
2464 rinfo->set_target_address(
2465 reinterpret_cast<Code*>(target)->instruction_start());
2466 }
2467
Steve Block3ce2e202009-11-05 08:53:23 +00002468 void VisitDebugTarget(RelocInfo* rinfo) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002469 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2470 rinfo->IsPatchedReturnSequence()) ||
2471 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2472 rinfo->IsPatchedDebugBreakSlotSequence()));
Steve Block3ce2e202009-11-05 08:53:23 +00002473 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2474 VisitPointer(&target);
2475 rinfo->set_call_address(
2476 reinterpret_cast<Code*>(target)->instruction_start());
2477 }
2478
Steve Blocka7e24c12009-10-30 11:49:00 +00002479 private:
2480 void UpdatePointer(Object** p) {
2481 if (!(*p)->IsHeapObject()) return;
2482
2483 HeapObject* obj = HeapObject::cast(*p);
2484 Address old_addr = obj->address();
2485 Address new_addr;
2486 ASSERT(!Heap::InFromSpace(obj));
2487
2488 if (Heap::new_space()->Contains(obj)) {
2489 Address forwarding_pointer_addr =
2490 Heap::new_space()->FromSpaceLow() +
2491 Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
2492 new_addr = Memory::Address_at(forwarding_pointer_addr);
2493
2494#ifdef DEBUG
2495 ASSERT(Heap::old_pointer_space()->Contains(new_addr) ||
2496 Heap::old_data_space()->Contains(new_addr) ||
2497 Heap::new_space()->FromSpaceContains(new_addr) ||
2498 Heap::lo_space()->Contains(HeapObject::FromAddress(new_addr)));
2499
2500 if (Heap::new_space()->FromSpaceContains(new_addr)) {
2501 ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
2502 Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
2503 }
2504#endif
2505
2506 } else if (Heap::lo_space()->Contains(obj)) {
2507 // Don't move objects in the large object space.
2508 return;
2509
2510 } else {
2511#ifdef DEBUG
2512 PagedSpaces spaces;
2513 PagedSpace* original_space = spaces.next();
2514 while (original_space != NULL) {
2515 if (original_space->Contains(obj)) break;
2516 original_space = spaces.next();
2517 }
2518 ASSERT(original_space != NULL);
2519#endif
2520 new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
2521 ASSERT(original_space->Contains(new_addr));
2522 ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
2523 original_space->MCSpaceOffsetForAddress(old_addr));
2524 }
2525
2526 *p = HeapObject::FromAddress(new_addr);
2527
2528#ifdef DEBUG
2529 if (FLAG_gc_verbose) {
2530 PrintF("update %p : %p -> %p\n",
2531 reinterpret_cast<Address>(p), old_addr, new_addr);
2532 }
2533#endif
2534 }
2535};
2536
2537
2538void MarkCompactCollector::UpdatePointers() {
2539#ifdef DEBUG
2540 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
2541 state_ = UPDATE_POINTERS;
2542#endif
2543 UpdatingVisitor updating_visitor;
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002544 RuntimeProfiler::UpdateSamplesAfterCompact(&updating_visitor);
Steve Blockd0582a62009-12-15 09:54:21 +00002545 Heap::IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00002546 GlobalHandles::IterateWeakRoots(&updating_visitor);
2547
Ben Murdochf87a2032010-10-22 12:50:53 +01002548 // Update the pointer to the head of the weak list of global contexts.
2549 updating_visitor.VisitPointer(&Heap::global_contexts_list_);
2550
Steve Block1e0659c2011-05-24 12:43:12 +01002551 LiveObjectList::IterateElements(&updating_visitor);
2552
Steve Block6ded16b2010-05-10 14:33:55 +01002553 int live_maps_size = IterateLiveObjects(Heap::map_space(),
Steve Blocka7e24c12009-10-30 11:49:00 +00002554 &UpdatePointersInOldObject);
Steve Block6ded16b2010-05-10 14:33:55 +01002555 int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
2556 &UpdatePointersInOldObject);
2557 int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
2558 &UpdatePointersInOldObject);
2559 int live_codes_size = IterateLiveObjects(Heap::code_space(),
2560 &UpdatePointersInOldObject);
2561 int live_cells_size = IterateLiveObjects(Heap::cell_space(),
2562 &UpdatePointersInOldObject);
2563 int live_news_size = IterateLiveObjects(Heap::new_space(),
2564 &UpdatePointersInNewObject);
Steve Blocka7e24c12009-10-30 11:49:00 +00002565
2566 // Large objects do not move, the map word can be updated directly.
2567 LargeObjectIterator it(Heap::lo_space());
Ben Murdochb0fe1622011-05-05 13:52:32 +01002568 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
Leon Clarked91b9f72010-01-27 17:25:45 +00002569 UpdatePointersInNewObject(obj);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002570 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002571
Steve Block6ded16b2010-05-10 14:33:55 +01002572 USE(live_maps_size);
2573 USE(live_pointer_olds_size);
2574 USE(live_data_olds_size);
2575 USE(live_codes_size);
2576 USE(live_cells_size);
2577 USE(live_news_size);
2578 ASSERT(live_maps_size == live_map_objects_size_);
2579 ASSERT(live_data_olds_size == live_old_data_objects_size_);
2580 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
2581 ASSERT(live_codes_size == live_code_objects_size_);
2582 ASSERT(live_cells_size == live_cell_objects_size_);
2583 ASSERT(live_news_size == live_young_objects_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002584}
2585
2586
2587int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
2588 // Keep old map pointers
2589 Map* old_map = obj->map();
2590 ASSERT(old_map->IsHeapObject());
2591
2592 Address forwarded = GetForwardingAddressInOldSpace(old_map);
2593
2594 ASSERT(Heap::map_space()->Contains(old_map));
2595 ASSERT(Heap::map_space()->Contains(forwarded));
2596#ifdef DEBUG
2597 if (FLAG_gc_verbose) {
2598 PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
2599 forwarded);
2600 }
2601#endif
2602 // Update the map pointer.
2603 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
2604
2605 // We have to compute the object size relying on the old map because
2606 // map objects are not relocated yet.
2607 int obj_size = obj->SizeFromMap(old_map);
2608
2609 // Update pointers in the object body.
2610 UpdatingVisitor updating_visitor;
2611 obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
2612 return obj_size;
2613}
2614
2615
2616int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
2617 // Decode the map pointer.
2618 MapWord encoding = obj->map_word();
2619 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2620 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
2621
2622 // At this point, the first word of map_addr is also encoded, cannot
2623 // cast it to Map* using Map::cast.
2624 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
2625 int obj_size = obj->SizeFromMap(map);
2626 InstanceType type = map->instance_type();
2627
2628 // Update map pointer.
2629 Address new_map_addr = GetForwardingAddressInOldSpace(map);
2630 int offset = encoding.DecodeOffset();
2631 obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
2632
2633#ifdef DEBUG
2634 if (FLAG_gc_verbose) {
2635 PrintF("update %p : %p -> %p\n", obj->address(),
2636 map_addr, new_map_addr);
2637 }
2638#endif
2639
2640 // Update pointers in the object body.
2641 UpdatingVisitor updating_visitor;
2642 obj->IterateBody(type, obj_size, &updating_visitor);
2643 return obj_size;
2644}
2645
2646
2647Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
2648 // Object should either in old or map space.
2649 MapWord encoding = obj->map_word();
2650
2651 // Offset to the first live object's forwarding address.
2652 int offset = encoding.DecodeOffset();
2653 Address obj_addr = obj->address();
2654
2655 // Find the first live object's forwarding address.
2656 Page* p = Page::FromAddress(obj_addr);
2657 Address first_forwarded = p->mc_first_forwarded;
2658
2659 // Page start address of forwarded address.
2660 Page* forwarded_page = Page::FromAddress(first_forwarded);
2661 int forwarded_offset = forwarded_page->Offset(first_forwarded);
2662
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002663 // Find end of allocation in the page of first_forwarded.
2664 int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
Steve Blocka7e24c12009-10-30 11:49:00 +00002665
2666 // Check if current object's forward pointer is in the same page
2667 // as the first live object's forwarding pointer
2668 if (forwarded_offset + offset < mc_top_offset) {
2669 // In the same page.
2670 return first_forwarded + offset;
2671 }
2672
2673 // Must be in the next page, NOTE: this may cross chunks.
2674 Page* next_page = forwarded_page->next_page();
2675 ASSERT(next_page->is_valid());
2676
2677 offset -= (mc_top_offset - forwarded_offset);
2678 offset += Page::kObjectStartOffset;
2679
2680 ASSERT_PAGE_OFFSET(offset);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002681 ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
Steve Blocka7e24c12009-10-30 11:49:00 +00002682
2683 return next_page->OffsetToAddress(offset);
2684}
2685
2686
2687// -------------------------------------------------------------------------
2688// Phase 4: Relocate objects
2689
2690void MarkCompactCollector::RelocateObjects() {
2691#ifdef DEBUG
2692 ASSERT(state_ == UPDATE_POINTERS);
2693 state_ = RELOCATE_OBJECTS;
2694#endif
2695 // Relocates objects, always relocate map objects first. Relocating
2696 // objects in other space relies on map objects to get object size.
Steve Block6ded16b2010-05-10 14:33:55 +01002697 int live_maps_size = IterateLiveObjects(Heap::map_space(),
2698 &RelocateMapObject);
2699 int live_pointer_olds_size = IterateLiveObjects(Heap::old_pointer_space(),
2700 &RelocateOldPointerObject);
2701 int live_data_olds_size = IterateLiveObjects(Heap::old_data_space(),
2702 &RelocateOldDataObject);
2703 int live_codes_size = IterateLiveObjects(Heap::code_space(),
2704 &RelocateCodeObject);
2705 int live_cells_size = IterateLiveObjects(Heap::cell_space(),
2706 &RelocateCellObject);
2707 int live_news_size = IterateLiveObjects(Heap::new_space(),
2708 &RelocateNewObject);
Steve Blocka7e24c12009-10-30 11:49:00 +00002709
Steve Block6ded16b2010-05-10 14:33:55 +01002710 USE(live_maps_size);
2711 USE(live_pointer_olds_size);
2712 USE(live_data_olds_size);
2713 USE(live_codes_size);
2714 USE(live_cells_size);
2715 USE(live_news_size);
2716 ASSERT(live_maps_size == live_map_objects_size_);
2717 ASSERT(live_data_olds_size == live_old_data_objects_size_);
2718 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
2719 ASSERT(live_codes_size == live_code_objects_size_);
2720 ASSERT(live_cells_size == live_cell_objects_size_);
2721 ASSERT(live_news_size == live_young_objects_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002722
2723 // Flip from and to spaces
2724 Heap::new_space()->Flip();
2725
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002726 Heap::new_space()->MCCommitRelocationInfo();
2727
Steve Blocka7e24c12009-10-30 11:49:00 +00002728 // Set age_mark to bottom in to space
2729 Address mark = Heap::new_space()->bottom();
2730 Heap::new_space()->set_age_mark(mark);
2731
Steve Blocka7e24c12009-10-30 11:49:00 +00002732 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00002733 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
2734 space->MCCommitRelocationInfo();
Steve Block6ded16b2010-05-10 14:33:55 +01002735
2736 Heap::CheckNewSpaceExpansionCriteria();
2737 Heap::IncrementYoungSurvivorsCounter(live_news_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002738}
2739
2740
2741int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
2742 // Recover map pointer.
2743 MapWord encoding = obj->map_word();
2744 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2745 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
2746
2747 // Get forwarding address before resetting map pointer
2748 Address new_addr = GetForwardingAddressInOldSpace(obj);
2749
2750 // Reset map pointer. The meta map object may not be copied yet so
2751 // Map::cast does not yet work.
2752 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
2753
2754 Address old_addr = obj->address();
2755
2756 if (new_addr != old_addr) {
Steve Block6ded16b2010-05-10 14:33:55 +01002757 // Move contents.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002758 Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2759 old_addr,
2760 Map::kSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00002761 }
2762
2763#ifdef DEBUG
2764 if (FLAG_gc_verbose) {
2765 PrintF("relocate %p -> %p\n", old_addr, new_addr);
2766 }
2767#endif
2768
2769 return Map::kSize;
2770}
2771
2772
2773static inline int RestoreMap(HeapObject* obj,
2774 PagedSpace* space,
2775 Address new_addr,
2776 Address map_addr) {
2777 // This must be a non-map object, and the function relies on the
2778 // assumption that the Map space is compacted before the other paged
2779 // spaces (see RelocateObjects).
2780
2781 // Reset map pointer.
2782 obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
2783
2784 int obj_size = obj->Size();
2785 ASSERT_OBJECT_SIZE(obj_size);
2786
2787 ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
2788 space->MCSpaceOffsetForAddress(obj->address()));
2789
2790#ifdef DEBUG
2791 if (FLAG_gc_verbose) {
2792 PrintF("relocate %p -> %p\n", obj->address(), new_addr);
2793 }
2794#endif
2795
2796 return obj_size;
2797}
2798
2799
2800int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
2801 PagedSpace* space) {
2802 // Recover map pointer.
2803 MapWord encoding = obj->map_word();
2804 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2805 ASSERT(Heap::map_space()->Contains(map_addr));
2806
2807 // Get forwarding address before resetting map pointer.
2808 Address new_addr = GetForwardingAddressInOldSpace(obj);
2809
2810 // Reset the map pointer.
2811 int obj_size = RestoreMap(obj, space, new_addr, map_addr);
2812
2813 Address old_addr = obj->address();
2814
2815 if (new_addr != old_addr) {
Steve Block6ded16b2010-05-10 14:33:55 +01002816 // Move contents.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002817 if (space == Heap::old_data_space()) {
2818 Heap::MoveBlock(new_addr, old_addr, obj_size);
2819 } else {
2820 Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2821 old_addr,
2822 obj_size);
2823 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002824 }
2825
2826 ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
2827
Leon Clarked91b9f72010-01-27 17:25:45 +00002828 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002829 if (copied_to->IsSharedFunctionInfo()) {
2830 PROFILE(SFIMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002831 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002832 HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002833
Steve Blocka7e24c12009-10-30 11:49:00 +00002834 return obj_size;
2835}
2836
2837
2838int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
2839 return RelocateOldNonCodeObject(obj, Heap::old_pointer_space());
2840}
2841
2842
2843int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
2844 return RelocateOldNonCodeObject(obj, Heap::old_data_space());
2845}
2846
2847
2848int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
2849 return RelocateOldNonCodeObject(obj, Heap::cell_space());
2850}
2851
2852
2853int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
2854 // Recover map pointer.
2855 MapWord encoding = obj->map_word();
2856 Address map_addr = encoding.DecodeMapAddress(Heap::map_space());
2857 ASSERT(Heap::map_space()->Contains(HeapObject::FromAddress(map_addr)));
2858
2859 // Get forwarding address before resetting map pointer
2860 Address new_addr = GetForwardingAddressInOldSpace(obj);
2861
2862 // Reset the map pointer.
2863 int obj_size = RestoreMap(obj, Heap::code_space(), new_addr, map_addr);
2864
2865 Address old_addr = obj->address();
2866
2867 if (new_addr != old_addr) {
Steve Block6ded16b2010-05-10 14:33:55 +01002868 // Move contents.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002869 Heap::MoveBlock(new_addr, old_addr, obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002870 }
2871
2872 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2873 if (copied_to->IsCode()) {
2874 // May also update inline cache target.
2875 Code::cast(copied_to)->Relocate(new_addr - old_addr);
2876 // Notify the logger that compiled code has moved.
Steve Block6ded16b2010-05-10 14:33:55 +01002877 PROFILE(CodeMoveEvent(old_addr, new_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00002878 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002879 HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00002880
2881 return obj_size;
2882}
2883
2884
2885int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
2886 int obj_size = obj->Size();
2887
2888 // Get forwarding address
2889 Address old_addr = obj->address();
2890 int offset = Heap::new_space()->ToSpaceOffsetForAddress(old_addr);
2891
2892 Address new_addr =
2893 Memory::Address_at(Heap::new_space()->FromSpaceLow() + offset);
2894
2895#ifdef DEBUG
2896 if (Heap::new_space()->FromSpaceContains(new_addr)) {
2897 ASSERT(Heap::new_space()->FromSpaceOffsetForAddress(new_addr) <=
2898 Heap::new_space()->ToSpaceOffsetForAddress(old_addr));
2899 } else {
2900 ASSERT(Heap::TargetSpace(obj) == Heap::old_pointer_space() ||
2901 Heap::TargetSpace(obj) == Heap::old_data_space());
2902 }
2903#endif
2904
2905 // New and old addresses cannot overlap.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002906 if (Heap::InNewSpace(HeapObject::FromAddress(new_addr))) {
2907 Heap::CopyBlock(new_addr, old_addr, obj_size);
2908 } else {
2909 Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2910 old_addr,
2911 obj_size);
2912 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002913
2914#ifdef DEBUG
2915 if (FLAG_gc_verbose) {
2916 PrintF("relocate %p -> %p\n", old_addr, new_addr);
2917 }
2918#endif
2919
Leon Clarked91b9f72010-01-27 17:25:45 +00002920 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002921 if (copied_to->IsSharedFunctionInfo()) {
2922 PROFILE(SFIMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002923 }
Ben Murdoch3bec4d22010-07-22 14:51:16 +01002924 HEAP_PROFILE(ObjectMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002925
Steve Blocka7e24c12009-10-30 11:49:00 +00002926 return obj_size;
2927}
2928
2929
Leon Clarked91b9f72010-01-27 17:25:45 +00002930void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
Ben Murdochb8e0da22011-05-16 14:20:40 +01002931#ifdef ENABLE_GDB_JIT_INTERFACE
2932 if (obj->IsCode()) {
2933 GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
2934 }
2935#endif
Leon Clarked91b9f72010-01-27 17:25:45 +00002936#ifdef ENABLE_LOGGING_AND_PROFILING
2937 if (obj->IsCode()) {
Steve Block6ded16b2010-05-10 14:33:55 +01002938 PROFILE(CodeDeleteEvent(obj->address()));
Leon Clarked91b9f72010-01-27 17:25:45 +00002939 }
2940#endif
2941}
2942
Iain Merrick75681382010-08-19 15:07:18 +01002943
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002944int MarkCompactCollector::SizeOfMarkedObject(HeapObject* obj) {
2945 MapWord map_word = obj->map_word();
2946 map_word.ClearMark();
2947 return obj->SizeFromMap(map_word.ToMap());
2948}
2949
2950
Iain Merrick75681382010-08-19 15:07:18 +01002951void MarkCompactCollector::Initialize() {
2952 StaticPointersToNewGenUpdatingVisitor::Initialize();
2953 StaticMarkingVisitor::Initialize();
2954}
2955
2956
Steve Blocka7e24c12009-10-30 11:49:00 +00002957} } // namespace v8::internal