blob: 1f73388fcb2f362813a24e683b0d38f3df6677fe [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
Iain Merrick75681382010-08-19 15:07:18 +010030#include "compilation-cache.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000031#include "execution.h"
Ben Murdoch3bec4d22010-07-22 14:51:16 +010032#include "heap-profiler.h"
Ben Murdochb8e0da22011-05-16 14:20:40 +010033#include "gdb-jit.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000034#include "global-handles.h"
35#include "ic-inl.h"
Steve Block1e0659c2011-05-24 12:43:12 +010036#include "liveobjectlist-inl.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000037#include "mark-compact.h"
Iain Merrick75681382010-08-19 15:07:18 +010038#include "objects-visiting.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000039#include "stub-cache.h"
40
41namespace v8 {
42namespace internal {
43
44// -------------------------------------------------------------------------
45// MarkCompactCollector
46
Steve Block44f0eee2011-05-26 01:26:41 +010047MarkCompactCollector::MarkCompactCollector() : // NOLINT
Steve Blocka7e24c12009-10-30 11:49:00 +000048#ifdef DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +010049 state_(IDLE),
Steve Blocka7e24c12009-10-30 11:49:00 +000050#endif
Steve Block44f0eee2011-05-26 01:26:41 +010051 force_compaction_(false),
52 compacting_collection_(false),
53 compact_on_next_gc_(false),
54 previous_marked_count_(0),
55 tracer_(NULL),
56#ifdef DEBUG
57 live_young_objects_size_(0),
58 live_old_pointer_objects_size_(0),
59 live_old_data_objects_size_(0),
60 live_code_objects_size_(0),
61 live_map_objects_size_(0),
62 live_cell_objects_size_(0),
63 live_lo_objects_size_(0),
64 live_bytes_(0),
65#endif
66 heap_(NULL),
67 code_flusher_(NULL) { }
Steve Blocka7e24c12009-10-30 11:49:00 +000068
Iain Merrick75681382010-08-19 15:07:18 +010069
Steve Blocka7e24c12009-10-30 11:49:00 +000070void MarkCompactCollector::CollectGarbage() {
71 // Make sure that Prepare() has been called. The individual steps below will
72 // update the state as they proceed.
73 ASSERT(state_ == PREPARE_GC);
74
75 // Prepare has selected whether to compact the old generation or not.
76 // Tell the tracer.
77 if (IsCompacting()) tracer_->set_is_compacting();
78
79 MarkLiveObjects();
80
81 if (FLAG_collect_maps) ClearNonLiveTransitions();
82
83 SweepLargeObjectSpace();
84
85 if (IsCompacting()) {
Leon Clarkef7060e22010-06-03 12:02:55 +010086 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
Steve Blocka7e24c12009-10-30 11:49:00 +000087 EncodeForwardingAddresses();
88
Steve Block44f0eee2011-05-26 01:26:41 +010089 heap_->MarkMapPointersAsEncoded(true);
Steve Blocka7e24c12009-10-30 11:49:00 +000090 UpdatePointers();
Steve Block44f0eee2011-05-26 01:26:41 +010091 heap_->MarkMapPointersAsEncoded(false);
92 heap_->isolate()->pc_to_code_cache()->Flush();
Steve Blocka7e24c12009-10-30 11:49:00 +000093
94 RelocateObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +000095 } else {
96 SweepSpaces();
Steve Block44f0eee2011-05-26 01:26:41 +010097 heap_->isolate()->pc_to_code_cache()->Flush();
Steve Blocka7e24c12009-10-30 11:49:00 +000098 }
99
100 Finish();
101
102 // Save the count of marked objects remaining after the collection and
103 // null out the GC tracer.
104 previous_marked_count_ = tracer_->marked_count();
105 ASSERT(previous_marked_count_ == 0);
106 tracer_ = NULL;
107}
108
109
110void MarkCompactCollector::Prepare(GCTracer* tracer) {
111 // Rather than passing the tracer around we stash it in a static member
112 // variable.
113 tracer_ = tracer;
114
115#ifdef DEBUG
116 ASSERT(state_ == IDLE);
117 state_ = PREPARE_GC;
118#endif
119 ASSERT(!FLAG_always_compact || !FLAG_never_compact);
120
121 compacting_collection_ =
122 FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
123 compact_on_next_gc_ = false;
124
125 if (FLAG_never_compact) compacting_collection_ = false;
Steve Block44f0eee2011-05-26 01:26:41 +0100126 if (!HEAP->map_space()->MapPointersEncodable())
Leon Clarkee46be812010-01-19 14:06:41 +0000127 compacting_collection_ = false;
Steve Blocka7e24c12009-10-30 11:49:00 +0000128 if (FLAG_collect_maps) CreateBackPointers();
Ben Murdochb8e0da22011-05-16 14:20:40 +0100129#ifdef ENABLE_GDB_JIT_INTERFACE
130 if (FLAG_gdbjit) {
131 // If GDBJIT interface is active disable compaction.
132 compacting_collection_ = false;
133 }
134#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000135
Steve Blocka7e24c12009-10-30 11:49:00 +0000136 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000137 for (PagedSpace* space = spaces.next();
138 space != NULL; space = spaces.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000139 space->PrepareForMarkCompact(compacting_collection_);
140 }
141
142#ifdef DEBUG
143 live_bytes_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100144 live_young_objects_size_ = 0;
145 live_old_pointer_objects_size_ = 0;
146 live_old_data_objects_size_ = 0;
147 live_code_objects_size_ = 0;
148 live_map_objects_size_ = 0;
149 live_cell_objects_size_ = 0;
150 live_lo_objects_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000151#endif
152}
153
154
155void MarkCompactCollector::Finish() {
156#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100157 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
Steve Blocka7e24c12009-10-30 11:49:00 +0000158 state_ = IDLE;
159#endif
160 // The stub cache is not traversed during GC; clear the cache to
161 // force lazy re-initialization of it. This must be done after the
162 // GC, because it relies on the new address of certain old space
163 // objects (empty string, illegal builtin).
Steve Block44f0eee2011-05-26 01:26:41 +0100164 Isolate::Current()->stub_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +0000165
Steve Block44f0eee2011-05-26 01:26:41 +0100166 heap_->external_string_table_.CleanUp();
Leon Clarkee46be812010-01-19 14:06:41 +0000167
Steve Blocka7e24c12009-10-30 11:49:00 +0000168 // If we've just compacted old space there's no reason to check the
169 // fragmentation limit. Just return.
170 if (HasCompacted()) return;
171
172 // We compact the old generation on the next GC if it has gotten too
173 // fragmented (ie, we could recover an expected amount of space by
174 // reclaiming the waste and free list blocks).
175 static const int kFragmentationLimit = 15; // Percent.
176 static const int kFragmentationAllowed = 1 * MB; // Absolute.
Ben Murdochf87a2032010-10-22 12:50:53 +0100177 intptr_t old_gen_recoverable = 0;
178 intptr_t old_gen_used = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000179
180 OldSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000181 for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000182 old_gen_recoverable += space->Waste() + space->AvailableFree();
183 old_gen_used += space->Size();
184 }
185
186 int old_gen_fragmentation =
187 static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
188 if (old_gen_fragmentation > kFragmentationLimit &&
189 old_gen_recoverable > kFragmentationAllowed) {
190 compact_on_next_gc_ = true;
191 }
192}
193
194
195// -------------------------------------------------------------------------
196// Phase 1: tracing and marking live objects.
197// before: all objects are in normal state.
198// after: a live object's map pointer is marked as '00'.
199
200// Marking all live objects in the heap as part of mark-sweep or mark-compact
201// collection. Before marking, all objects are in their normal state. After
202// marking, live objects' map pointers are marked indicating that the object
203// has been found reachable.
204//
205// The marking algorithm is a (mostly) depth-first (because of possible stack
206// overflow) traversal of the graph of objects reachable from the roots. It
207// uses an explicit stack of pointers rather than recursion. The young
208// generation's inactive ('from') space is used as a marking stack. The
209// objects in the marking stack are the ones that have been reached and marked
210// but their children have not yet been visited.
211//
212// The marking stack can overflow during traversal. In that case, we set an
213// overflow flag. When the overflow flag is set, we continue marking objects
214// reachable from the objects on the marking stack, but no longer push them on
215// the marking stack. Instead, we mark them as both marked and overflowed.
216// When the stack is in the overflowed state, objects marked as overflowed
217// have been reached and marked but their children have not been visited yet.
218// After emptying the marking stack, we clear the overflow flag and traverse
219// the heap looking for objects marked as overflowed, push them on the stack,
220// and continue with marking. This process repeats until all reachable
221// objects have been marked.
222
Steve Block44f0eee2011-05-26 01:26:41 +0100223class CodeFlusher {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100224 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100225 explicit CodeFlusher(Isolate* isolate)
226 : isolate_(isolate),
227 jsfunction_candidates_head_(NULL),
228 shared_function_info_candidates_head_(NULL) {}
229
230 void AddCandidate(SharedFunctionInfo* shared_info) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100231 SetNextCandidate(shared_info, shared_function_info_candidates_head_);
232 shared_function_info_candidates_head_ = shared_info;
233 }
234
Steve Block44f0eee2011-05-26 01:26:41 +0100235 void AddCandidate(JSFunction* function) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100236 ASSERT(function->unchecked_code() ==
237 function->unchecked_shared()->unchecked_code());
238
239 SetNextCandidate(function, jsfunction_candidates_head_);
240 jsfunction_candidates_head_ = function;
241 }
242
Steve Block44f0eee2011-05-26 01:26:41 +0100243 void ProcessCandidates() {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100244 ProcessSharedFunctionInfoCandidates();
245 ProcessJSFunctionCandidates();
246 }
247
248 private:
Steve Block44f0eee2011-05-26 01:26:41 +0100249 void ProcessJSFunctionCandidates() {
250 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100251
252 JSFunction* candidate = jsfunction_candidates_head_;
253 JSFunction* next_candidate;
254 while (candidate != NULL) {
255 next_candidate = GetNextCandidate(candidate);
256
257 SharedFunctionInfo* shared = candidate->unchecked_shared();
258
259 Code* code = shared->unchecked_code();
260 if (!code->IsMarked()) {
261 shared->set_code(lazy_compile);
262 candidate->set_code(lazy_compile);
263 } else {
264 candidate->set_code(shared->unchecked_code());
265 }
266
267 candidate = next_candidate;
268 }
269
270 jsfunction_candidates_head_ = NULL;
271 }
272
273
Steve Block44f0eee2011-05-26 01:26:41 +0100274 void ProcessSharedFunctionInfoCandidates() {
275 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100276
277 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
278 SharedFunctionInfo* next_candidate;
279 while (candidate != NULL) {
280 next_candidate = GetNextCandidate(candidate);
281 SetNextCandidate(candidate, NULL);
282
283 Code* code = candidate->unchecked_code();
284 if (!code->IsMarked()) {
285 candidate->set_code(lazy_compile);
286 }
287
288 candidate = next_candidate;
289 }
290
291 shared_function_info_candidates_head_ = NULL;
292 }
293
Ben Murdochb0fe1622011-05-05 13:52:32 +0100294 static JSFunction** GetNextCandidateField(JSFunction* candidate) {
295 return reinterpret_cast<JSFunction**>(
296 candidate->address() + JSFunction::kCodeEntryOffset);
297 }
298
Ben Murdochb0fe1622011-05-05 13:52:32 +0100299 static JSFunction* GetNextCandidate(JSFunction* candidate) {
300 return *GetNextCandidateField(candidate);
301 }
302
Ben Murdochb0fe1622011-05-05 13:52:32 +0100303 static void SetNextCandidate(JSFunction* candidate,
304 JSFunction* next_candidate) {
305 *GetNextCandidateField(candidate) = next_candidate;
306 }
307
Ben Murdochb0fe1622011-05-05 13:52:32 +0100308 STATIC_ASSERT(kPointerSize <= Code::kHeaderSize - Code::kHeaderPaddingStart);
309
Ben Murdochb0fe1622011-05-05 13:52:32 +0100310 static SharedFunctionInfo** GetNextCandidateField(
311 SharedFunctionInfo* candidate) {
312 Code* code = candidate->unchecked_code();
313 return reinterpret_cast<SharedFunctionInfo**>(
314 code->address() + Code::kHeaderPaddingStart);
315 }
316
Ben Murdochb0fe1622011-05-05 13:52:32 +0100317 static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
318 return *GetNextCandidateField(candidate);
319 }
320
Ben Murdochb0fe1622011-05-05 13:52:32 +0100321 static void SetNextCandidate(SharedFunctionInfo* candidate,
322 SharedFunctionInfo* next_candidate) {
323 *GetNextCandidateField(candidate) = next_candidate;
324 }
325
Steve Block44f0eee2011-05-26 01:26:41 +0100326 Isolate* isolate_;
327 JSFunction* jsfunction_candidates_head_;
328 SharedFunctionInfo* shared_function_info_candidates_head_;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100329
Steve Block44f0eee2011-05-26 01:26:41 +0100330 DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100331};
332
Ben Murdochb0fe1622011-05-05 13:52:32 +0100333
Steve Block44f0eee2011-05-26 01:26:41 +0100334MarkCompactCollector::~MarkCompactCollector() {
335 if (code_flusher_ != NULL) {
336 delete code_flusher_;
337 code_flusher_ = NULL;
338 }
339}
340
Steve Blocka7e24c12009-10-30 11:49:00 +0000341
342static inline HeapObject* ShortCircuitConsString(Object** p) {
343 // Optimization: If the heap object pointed to by p is a non-symbol
Steve Block44f0eee2011-05-26 01:26:41 +0100344 // cons string whose right substring is HEAP->empty_string, update
Steve Blocka7e24c12009-10-30 11:49:00 +0000345 // it in place to its left substring. Return the updated value.
346 //
347 // Here we assume that if we change *p, we replace it with a heap object
348 // (ie, the left substring of a cons string is always a heap object).
349 //
350 // The check performed is:
351 // object->IsConsString() && !object->IsSymbol() &&
Steve Block44f0eee2011-05-26 01:26:41 +0100352 // (ConsString::cast(object)->second() == HEAP->empty_string())
Steve Blocka7e24c12009-10-30 11:49:00 +0000353 // except the maps for the object and its possible substrings might be
354 // marked.
355 HeapObject* object = HeapObject::cast(*p);
356 MapWord map_word = object->map_word();
357 map_word.ClearMark();
358 InstanceType type = map_word.ToMap()->instance_type();
359 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
360
361 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
Steve Block44f0eee2011-05-26 01:26:41 +0100362 Heap* heap = map_word.ToMap()->heap();
363 if (second != heap->raw_unchecked_empty_string()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000364 return object;
365 }
366
367 // Since we don't have the object's start, it is impossible to update the
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100368 // page dirty marks. Therefore, we only replace the string with its left
369 // substring when page dirty marks do not change.
Steve Blocka7e24c12009-10-30 11:49:00 +0000370 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
Steve Block44f0eee2011-05-26 01:26:41 +0100371 if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
Steve Blocka7e24c12009-10-30 11:49:00 +0000372
373 *p = first;
374 return HeapObject::cast(first);
375}
376
377
Iain Merrick75681382010-08-19 15:07:18 +0100378class StaticMarkingVisitor : public StaticVisitorBase {
Steve Blocka7e24c12009-10-30 11:49:00 +0000379 public:
Iain Merrick75681382010-08-19 15:07:18 +0100380 static inline void IterateBody(Map* map, HeapObject* obj) {
381 table_.GetVisitor(map)(map, obj);
382 }
383
Iain Merrick75681382010-08-19 15:07:18 +0100384 static void Initialize() {
385 table_.Register(kVisitShortcutCandidate,
386 &FixedBodyVisitor<StaticMarkingVisitor,
387 ConsString::BodyDescriptor,
388 void>::Visit);
389
390 table_.Register(kVisitConsString,
391 &FixedBodyVisitor<StaticMarkingVisitor,
392 ConsString::BodyDescriptor,
393 void>::Visit);
394
395
396 table_.Register(kVisitFixedArray,
397 &FlexibleBodyVisitor<StaticMarkingVisitor,
398 FixedArray::BodyDescriptor,
399 void>::Visit);
400
Ben Murdochf87a2032010-10-22 12:50:53 +0100401 table_.Register(kVisitGlobalContext,
402 &FixedBodyVisitor<StaticMarkingVisitor,
403 Context::MarkCompactBodyDescriptor,
404 void>::Visit);
405
Iain Merrick75681382010-08-19 15:07:18 +0100406 table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
407 table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
408 table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
409
410 table_.Register(kVisitOddball,
411 &FixedBodyVisitor<StaticMarkingVisitor,
412 Oddball::BodyDescriptor,
413 void>::Visit);
414 table_.Register(kVisitMap,
415 &FixedBodyVisitor<StaticMarkingVisitor,
416 Map::BodyDescriptor,
417 void>::Visit);
418
419 table_.Register(kVisitCode, &VisitCode);
420
Ben Murdochb0fe1622011-05-05 13:52:32 +0100421 table_.Register(kVisitSharedFunctionInfo,
422 &VisitSharedFunctionInfoAndFlushCode);
423
424 table_.Register(kVisitJSFunction,
425 &VisitJSFunctionAndFlushCode);
Iain Merrick75681382010-08-19 15:07:18 +0100426
427 table_.Register(kVisitPropertyCell,
428 &FixedBodyVisitor<StaticMarkingVisitor,
429 JSGlobalPropertyCell::BodyDescriptor,
430 void>::Visit);
431
432 table_.RegisterSpecializations<DataObjectVisitor,
433 kVisitDataObject,
434 kVisitDataObjectGeneric>();
435
436 table_.RegisterSpecializations<JSObjectVisitor,
437 kVisitJSObject,
438 kVisitJSObjectGeneric>();
439
440 table_.RegisterSpecializations<StructObjectVisitor,
441 kVisitStruct,
442 kVisitStructGeneric>();
443 }
444
Steve Block44f0eee2011-05-26 01:26:41 +0100445 INLINE(static void VisitPointer(Heap* heap, Object** p)) {
446 MarkObjectByPointer(heap, p);
Steve Blocka7e24c12009-10-30 11:49:00 +0000447 }
448
Steve Block44f0eee2011-05-26 01:26:41 +0100449 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000450 // Mark all objects pointed to in [start, end).
451 const int kMinRangeForMarkingRecursion = 64;
452 if (end - start >= kMinRangeForMarkingRecursion) {
Steve Block44f0eee2011-05-26 01:26:41 +0100453 if (VisitUnmarkedObjects(heap, start, end)) return;
Steve Blocka7e24c12009-10-30 11:49:00 +0000454 // We are close to a stack overflow, so just mark the objects.
455 }
Steve Block44f0eee2011-05-26 01:26:41 +0100456 for (Object** p = start; p < end; p++) MarkObjectByPointer(heap, p);
Steve Blocka7e24c12009-10-30 11:49:00 +0000457 }
458
Iain Merrick75681382010-08-19 15:07:18 +0100459 static inline void VisitCodeTarget(RelocInfo* rinfo) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000460 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
461 Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
462 if (FLAG_cleanup_ics_at_gc && code->is_inline_cache_stub()) {
463 IC::Clear(rinfo->pc());
464 // Please note targets for cleared inline cached do not have to be
Steve Block44f0eee2011-05-26 01:26:41 +0100465 // marked since they are contained in HEAP->non_monomorphic_cache().
Steve Blocka7e24c12009-10-30 11:49:00 +0000466 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100467 HEAP->mark_compact_collector()->MarkObject(code);
Steve Blocka7e24c12009-10-30 11:49:00 +0000468 }
469 }
470
Ben Murdochb0fe1622011-05-05 13:52:32 +0100471 static void VisitGlobalPropertyCell(RelocInfo* rinfo) {
472 ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
473 Object* cell = rinfo->target_cell();
474 Object* old_cell = cell;
Steve Block44f0eee2011-05-26 01:26:41 +0100475 VisitPointer(HEAP, &cell);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100476 if (cell != old_cell) {
477 rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell));
478 }
479 }
480
Iain Merrick75681382010-08-19 15:07:18 +0100481 static inline void VisitDebugTarget(RelocInfo* rinfo) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100482 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
483 rinfo->IsPatchedReturnSequence()) ||
484 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
485 rinfo->IsPatchedDebugBreakSlotSequence()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000486 HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
Steve Block44f0eee2011-05-26 01:26:41 +0100487 HEAP->mark_compact_collector()->MarkObject(code);
Steve Blocka7e24c12009-10-30 11:49:00 +0000488 }
489
Steve Blocka7e24c12009-10-30 11:49:00 +0000490 // Mark object pointed to by p.
Steve Block44f0eee2011-05-26 01:26:41 +0100491 INLINE(static void MarkObjectByPointer(Heap* heap, Object** p)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000492 if (!(*p)->IsHeapObject()) return;
493 HeapObject* object = ShortCircuitConsString(p);
Steve Block44f0eee2011-05-26 01:26:41 +0100494 heap->mark_compact_collector()->MarkObject(object);
Steve Blocka7e24c12009-10-30 11:49:00 +0000495 }
496
Steve Block44f0eee2011-05-26 01:26:41 +0100497
Steve Blocka7e24c12009-10-30 11:49:00 +0000498 // Visit an unmarked object.
Iain Merrick75681382010-08-19 15:07:18 +0100499 static inline void VisitUnmarkedObject(HeapObject* obj) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000500#ifdef DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +0100501 ASSERT(HEAP->Contains(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +0000502 ASSERT(!obj->IsMarked());
503#endif
504 Map* map = obj->map();
Steve Block44f0eee2011-05-26 01:26:41 +0100505 MarkCompactCollector* collector = map->heap()->mark_compact_collector();
506 collector->SetMark(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +0000507 // Mark the map pointer and the body.
Steve Block44f0eee2011-05-26 01:26:41 +0100508 collector->MarkObject(map);
Iain Merrick75681382010-08-19 15:07:18 +0100509 IterateBody(map, obj);
Steve Blocka7e24c12009-10-30 11:49:00 +0000510 }
511
512 // Visit all unmarked objects pointed to by [start, end).
513 // Returns false if the operation fails (lack of stack space).
Steve Block44f0eee2011-05-26 01:26:41 +0100514 static inline bool VisitUnmarkedObjects(Heap* heap,
515 Object** start,
516 Object** end) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000517 // Return false is we are close to the stack limit.
Steve Block44f0eee2011-05-26 01:26:41 +0100518 StackLimitCheck check(heap->isolate());
Steve Blocka7e24c12009-10-30 11:49:00 +0000519 if (check.HasOverflowed()) return false;
520
521 // Visit the unmarked objects.
522 for (Object** p = start; p < end; p++) {
523 if (!(*p)->IsHeapObject()) continue;
524 HeapObject* obj = HeapObject::cast(*p);
525 if (obj->IsMarked()) continue;
526 VisitUnmarkedObject(obj);
527 }
528 return true;
529 }
Iain Merrick75681382010-08-19 15:07:18 +0100530
531 static inline void VisitExternalReference(Address* p) { }
532 static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
533
534 private:
535 class DataObjectVisitor {
536 public:
537 template<int size>
538 static void VisitSpecialized(Map* map, HeapObject* object) {
539 }
540
541 static void Visit(Map* map, HeapObject* object) {
542 }
543 };
544
545 typedef FlexibleBodyVisitor<StaticMarkingVisitor,
546 JSObject::BodyDescriptor,
547 void> JSObjectVisitor;
548
549 typedef FlexibleBodyVisitor<StaticMarkingVisitor,
550 StructBodyDescriptor,
551 void> StructObjectVisitor;
552
553 static void VisitCode(Map* map, HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +0100554 reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>(
555 map->heap());
Iain Merrick75681382010-08-19 15:07:18 +0100556 }
557
558 // Code flushing support.
559
560 // How many collections newly compiled code object will survive before being
561 // flushed.
562 static const int kCodeAgeThreshold = 5;
563
564 inline static bool HasSourceCode(SharedFunctionInfo* info) {
Steve Block44f0eee2011-05-26 01:26:41 +0100565 Object* undefined = HEAP->raw_unchecked_undefined_value();
Iain Merrick75681382010-08-19 15:07:18 +0100566 return (info->script() != undefined) &&
567 (reinterpret_cast<Script*>(info->script())->source() != undefined);
568 }
569
570
571 inline static bool IsCompiled(JSFunction* function) {
Steve Block44f0eee2011-05-26 01:26:41 +0100572 return function->unchecked_code() !=
573 Isolate::Current()->builtins()->builtin(Builtins::kLazyCompile);
Iain Merrick75681382010-08-19 15:07:18 +0100574 }
575
Iain Merrick75681382010-08-19 15:07:18 +0100576 inline static bool IsCompiled(SharedFunctionInfo* function) {
Steve Block44f0eee2011-05-26 01:26:41 +0100577 return function->unchecked_code() !=
578 Isolate::Current()->builtins()->builtin(Builtins::kLazyCompile);
Iain Merrick75681382010-08-19 15:07:18 +0100579 }
580
Ben Murdochb0fe1622011-05-05 13:52:32 +0100581 inline static bool IsFlushable(JSFunction* function) {
Iain Merrick75681382010-08-19 15:07:18 +0100582 SharedFunctionInfo* shared_info = function->unchecked_shared();
583
Ben Murdochb0fe1622011-05-05 13:52:32 +0100584 // Code is either on stack, in compilation cache or referenced
585 // by optimized version of function.
586 if (function->unchecked_code()->IsMarked()) {
587 shared_info->set_code_age(0);
588 return false;
Iain Merrick75681382010-08-19 15:07:18 +0100589 }
590
Ben Murdochb0fe1622011-05-05 13:52:32 +0100591 // We do not flush code for optimized functions.
592 if (function->code() != shared_info->unchecked_code()) {
593 return false;
594 }
595
596 return IsFlushable(shared_info);
597 }
598
599 inline static bool IsFlushable(SharedFunctionInfo* shared_info) {
600 // Code is either on stack, in compilation cache or referenced
601 // by optimized version of function.
Iain Merrick75681382010-08-19 15:07:18 +0100602 if (shared_info->unchecked_code()->IsMarked()) {
603 shared_info->set_code_age(0);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100604 return false;
Iain Merrick75681382010-08-19 15:07:18 +0100605 }
606
607 // The function must be compiled and have the source code available,
608 // to be able to recompile it in case we need the function again.
Ben Murdochb0fe1622011-05-05 13:52:32 +0100609 if (!(shared_info->is_compiled() && HasSourceCode(shared_info))) {
610 return false;
611 }
Iain Merrick75681382010-08-19 15:07:18 +0100612
613 // We never flush code for Api functions.
614 Object* function_data = shared_info->function_data();
615 if (function_data->IsHeapObject() &&
616 (SafeMap(function_data)->instance_type() ==
617 FUNCTION_TEMPLATE_INFO_TYPE)) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100618 return false;
Iain Merrick75681382010-08-19 15:07:18 +0100619 }
620
621 // Only flush code for functions.
Ben Murdochb0fe1622011-05-05 13:52:32 +0100622 if (shared_info->code()->kind() != Code::FUNCTION) return false;
Iain Merrick75681382010-08-19 15:07:18 +0100623
624 // Function must be lazy compilable.
Ben Murdochb0fe1622011-05-05 13:52:32 +0100625 if (!shared_info->allows_lazy_compilation()) return false;
Iain Merrick75681382010-08-19 15:07:18 +0100626
627 // If this is a full script wrapped in a function we do no flush the code.
Ben Murdochb0fe1622011-05-05 13:52:32 +0100628 if (shared_info->is_toplevel()) return false;
Iain Merrick75681382010-08-19 15:07:18 +0100629
630 // Age this shared function info.
631 if (shared_info->code_age() < kCodeAgeThreshold) {
632 shared_info->set_code_age(shared_info->code_age() + 1);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100633 return false;
Iain Merrick75681382010-08-19 15:07:18 +0100634 }
635
Ben Murdochb0fe1622011-05-05 13:52:32 +0100636 return true;
637 }
638
639
Steve Block44f0eee2011-05-26 01:26:41 +0100640 static bool FlushCodeForFunction(Heap* heap, JSFunction* function) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100641 if (!IsFlushable(function)) return false;
642
643 // This function's code looks flushable. But we have to postpone the
644 // decision until we see all functions that point to the same
645 // SharedFunctionInfo because some of them might be optimized.
646 // That would make the nonoptimized version of the code nonflushable,
647 // because it is required for bailing out from optimized code.
Steve Block44f0eee2011-05-26 01:26:41 +0100648 heap->mark_compact_collector()->code_flusher()->AddCandidate(function);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100649 return true;
Iain Merrick75681382010-08-19 15:07:18 +0100650 }
651
652
653 static inline Map* SafeMap(Object* obj) {
654 MapWord map_word = HeapObject::cast(obj)->map_word();
655 map_word.ClearMark();
656 map_word.ClearOverflow();
657 return map_word.ToMap();
658 }
659
660
661 static inline bool IsJSBuiltinsObject(Object* obj) {
662 return obj->IsHeapObject() &&
663 (SafeMap(obj)->instance_type() == JS_BUILTINS_OBJECT_TYPE);
664 }
665
666
667 static inline bool IsValidNotBuiltinContext(Object* ctx) {
668 if (!ctx->IsHeapObject()) return false;
669
670 Map* map = SafeMap(ctx);
Steve Block44f0eee2011-05-26 01:26:41 +0100671 Heap* heap = map->heap();
672 if (!(map == heap->raw_unchecked_context_map() ||
673 map == heap->raw_unchecked_catch_context_map() ||
674 map == heap->raw_unchecked_global_context_map())) {
Iain Merrick75681382010-08-19 15:07:18 +0100675 return false;
676 }
677
678 Context* context = reinterpret_cast<Context*>(ctx);
679
680 if (IsJSBuiltinsObject(context->global())) {
681 return false;
682 }
683
684 return true;
685 }
686
687
Ben Murdochb0fe1622011-05-05 13:52:32 +0100688 static void VisitSharedFunctionInfoGeneric(Map* map, HeapObject* object) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100689 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100690
691 if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
692
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100693 FixedBodyVisitor<StaticMarkingVisitor,
694 SharedFunctionInfo::BodyDescriptor,
695 void>::Visit(map, object);
696 }
697
698
Ben Murdochb0fe1622011-05-05 13:52:32 +0100699 static void VisitSharedFunctionInfoAndFlushCode(Map* map,
700 HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +0100701 MarkCompactCollector* collector = map->heap()->mark_compact_collector();
702 if (!collector->is_code_flushing_enabled()) {
703 VisitSharedFunctionInfoGeneric(map, object);
704 return;
705 }
Ben Murdochb0fe1622011-05-05 13:52:32 +0100706 VisitSharedFunctionInfoAndFlushCodeGeneric(map, object, false);
707 }
708
709
710 static void VisitSharedFunctionInfoAndFlushCodeGeneric(
711 Map* map, HeapObject* object, bool known_flush_code_candidate) {
Steve Block44f0eee2011-05-26 01:26:41 +0100712 Heap* heap = map->heap();
Ben Murdochb0fe1622011-05-05 13:52:32 +0100713 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
714
715 if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
716
717 if (!known_flush_code_candidate) {
718 known_flush_code_candidate = IsFlushable(shared);
Steve Block44f0eee2011-05-26 01:26:41 +0100719 if (known_flush_code_candidate) {
720 heap->mark_compact_collector()->code_flusher()->AddCandidate(shared);
721 }
Ben Murdochb0fe1622011-05-05 13:52:32 +0100722 }
723
Steve Block44f0eee2011-05-26 01:26:41 +0100724 VisitSharedFunctionInfoFields(heap, object, known_flush_code_candidate);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100725 }
726
727
Steve Block44f0eee2011-05-26 01:26:41 +0100728 static void VisitCodeEntry(Heap* heap, Address entry_address) {
Steve Block791712a2010-08-27 10:21:07 +0100729 Object* code = Code::GetObjectFromEntryAddress(entry_address);
730 Object* old_code = code;
Steve Block44f0eee2011-05-26 01:26:41 +0100731 VisitPointer(heap, &code);
Steve Block791712a2010-08-27 10:21:07 +0100732 if (code != old_code) {
733 Memory::Address_at(entry_address) =
734 reinterpret_cast<Code*>(code)->entry();
735 }
736 }
Iain Merrick75681382010-08-19 15:07:18 +0100737
Steve Block791712a2010-08-27 10:21:07 +0100738
739 static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +0100740 Heap* heap = map->heap();
741 MarkCompactCollector* collector = heap->mark_compact_collector();
742 if (!collector->is_code_flushing_enabled()) {
743 VisitJSFunction(map, object);
744 return;
745 }
746
Steve Block791712a2010-08-27 10:21:07 +0100747 JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
Iain Merrick75681382010-08-19 15:07:18 +0100748 // The function must have a valid context and not be a builtin.
Ben Murdochb0fe1622011-05-05 13:52:32 +0100749 bool flush_code_candidate = false;
Iain Merrick75681382010-08-19 15:07:18 +0100750 if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
Steve Block44f0eee2011-05-26 01:26:41 +0100751 flush_code_candidate = FlushCodeForFunction(heap, jsfunction);
Iain Merrick75681382010-08-19 15:07:18 +0100752 }
Ben Murdochb0fe1622011-05-05 13:52:32 +0100753
754 if (!flush_code_candidate) {
Steve Block44f0eee2011-05-26 01:26:41 +0100755 collector->MarkObject(jsfunction->unchecked_shared()->unchecked_code());
Ben Murdochb0fe1622011-05-05 13:52:32 +0100756
757 if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) {
758 // For optimized functions we should retain both non-optimized version
759 // of it's code and non-optimized version of all inlined functions.
760 // This is required to support bailing out from inlined code.
761 DeoptimizationInputData* data =
762 reinterpret_cast<DeoptimizationInputData*>(
763 jsfunction->unchecked_code()->unchecked_deoptimization_data());
764
765 FixedArray* literals = data->UncheckedLiteralArray();
766
767 for (int i = 0, count = data->InlinedFunctionCount()->value();
768 i < count;
769 i++) {
770 JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i));
Steve Block44f0eee2011-05-26 01:26:41 +0100771 collector->MarkObject(inlined->unchecked_shared()->unchecked_code());
Ben Murdochb0fe1622011-05-05 13:52:32 +0100772 }
773 }
774 }
775
776 VisitJSFunctionFields(map,
777 reinterpret_cast<JSFunction*>(object),
778 flush_code_candidate);
Iain Merrick75681382010-08-19 15:07:18 +0100779 }
780
Steve Block791712a2010-08-27 10:21:07 +0100781
782 static void VisitJSFunction(Map* map, HeapObject* object) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100783 VisitJSFunctionFields(map,
784 reinterpret_cast<JSFunction*>(object),
785 false);
786 }
Steve Block791712a2010-08-27 10:21:07 +0100787
Ben Murdochb0fe1622011-05-05 13:52:32 +0100788
789#define SLOT_ADDR(obj, offset) \
790 reinterpret_cast<Object**>((obj)->address() + offset)
791
792
793 static inline void VisitJSFunctionFields(Map* map,
794 JSFunction* object,
795 bool flush_code_candidate) {
Steve Block44f0eee2011-05-26 01:26:41 +0100796 Heap* heap = map->heap();
797 MarkCompactCollector* collector = heap->mark_compact_collector();
798
799 VisitPointers(heap,
800 SLOT_ADDR(object, JSFunction::kPropertiesOffset),
Steve Block791712a2010-08-27 10:21:07 +0100801 SLOT_ADDR(object, JSFunction::kCodeEntryOffset));
802
Ben Murdochb0fe1622011-05-05 13:52:32 +0100803 if (!flush_code_candidate) {
Steve Block44f0eee2011-05-26 01:26:41 +0100804 VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100805 } else {
806 // Don't visit code object.
807
808 // Visit shared function info to avoid double checking of it's
809 // flushability.
810 SharedFunctionInfo* shared_info = object->unchecked_shared();
811 if (!shared_info->IsMarked()) {
812 Map* shared_info_map = shared_info->map();
Steve Block44f0eee2011-05-26 01:26:41 +0100813 collector->SetMark(shared_info);
814 collector->MarkObject(shared_info_map);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100815 VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map,
816 shared_info,
817 true);
818 }
819 }
Steve Block791712a2010-08-27 10:21:07 +0100820
Steve Block44f0eee2011-05-26 01:26:41 +0100821 VisitPointers(heap,
822 SLOT_ADDR(object,
Steve Block791712a2010-08-27 10:21:07 +0100823 JSFunction::kCodeEntryOffset + kPointerSize),
Ben Murdochb0fe1622011-05-05 13:52:32 +0100824 SLOT_ADDR(object, JSFunction::kNonWeakFieldsEndOffset));
Ben Murdochf87a2032010-10-22 12:50:53 +0100825
Ben Murdochb0fe1622011-05-05 13:52:32 +0100826 // Don't visit the next function list field as it is a weak reference.
Steve Block791712a2010-08-27 10:21:07 +0100827 }
828
829
Steve Block44f0eee2011-05-26 01:26:41 +0100830 static void VisitSharedFunctionInfoFields(Heap* heap,
831 HeapObject* object,
Ben Murdochb0fe1622011-05-05 13:52:32 +0100832 bool flush_code_candidate) {
Steve Block44f0eee2011-05-26 01:26:41 +0100833 VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kNameOffset));
Ben Murdochb0fe1622011-05-05 13:52:32 +0100834
835 if (!flush_code_candidate) {
Steve Block44f0eee2011-05-26 01:26:41 +0100836 VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kCodeOffset));
Ben Murdochb0fe1622011-05-05 13:52:32 +0100837 }
838
Steve Block44f0eee2011-05-26 01:26:41 +0100839 VisitPointers(heap,
840 SLOT_ADDR(object, SharedFunctionInfo::kScopeInfoOffset),
Ben Murdochb0fe1622011-05-05 13:52:32 +0100841 SLOT_ADDR(object, SharedFunctionInfo::kSize));
842 }
843
844 #undef SLOT_ADDR
845
Iain Merrick75681382010-08-19 15:07:18 +0100846 typedef void (*Callback)(Map* map, HeapObject* object);
847
848 static VisitorDispatchTable<Callback> table_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000849};
850
851
Iain Merrick75681382010-08-19 15:07:18 +0100852VisitorDispatchTable<StaticMarkingVisitor::Callback>
853 StaticMarkingVisitor::table_;
854
855
856class MarkingVisitor : public ObjectVisitor {
857 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100858 explicit MarkingVisitor(Heap* heap) : heap_(heap) { }
859
Iain Merrick75681382010-08-19 15:07:18 +0100860 void VisitPointer(Object** p) {
Steve Block44f0eee2011-05-26 01:26:41 +0100861 StaticMarkingVisitor::VisitPointer(heap_, p);
Iain Merrick75681382010-08-19 15:07:18 +0100862 }
863
864 void VisitPointers(Object** start, Object** end) {
Steve Block44f0eee2011-05-26 01:26:41 +0100865 StaticMarkingVisitor::VisitPointers(heap_, start, end);
Iain Merrick75681382010-08-19 15:07:18 +0100866 }
867
868 void VisitCodeTarget(RelocInfo* rinfo) {
869 StaticMarkingVisitor::VisitCodeTarget(rinfo);
870 }
871
Ben Murdochb0fe1622011-05-05 13:52:32 +0100872 void VisitGlobalPropertyCell(RelocInfo* rinfo) {
873 StaticMarkingVisitor::VisitGlobalPropertyCell(rinfo);
874 }
875
Iain Merrick75681382010-08-19 15:07:18 +0100876 void VisitDebugTarget(RelocInfo* rinfo) {
877 StaticMarkingVisitor::VisitDebugTarget(rinfo);
878 }
Steve Block44f0eee2011-05-26 01:26:41 +0100879
880 private:
881 Heap* heap_;
Iain Merrick75681382010-08-19 15:07:18 +0100882};
883
884
885class CodeMarkingVisitor : public ThreadVisitor {
886 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100887 explicit CodeMarkingVisitor(MarkCompactCollector* collector)
888 : collector_(collector) {}
889
Iain Merrick75681382010-08-19 15:07:18 +0100890 void VisitThread(ThreadLocalTop* top) {
891 for (StackFrameIterator it(top); !it.done(); it.Advance()) {
Steve Block44f0eee2011-05-26 01:26:41 +0100892 collector_->MarkObject(it.frame()->unchecked_code());
Iain Merrick75681382010-08-19 15:07:18 +0100893 }
894 }
Steve Block44f0eee2011-05-26 01:26:41 +0100895
896 private:
897 MarkCompactCollector* collector_;
Iain Merrick75681382010-08-19 15:07:18 +0100898};
899
900
901class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
902 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100903 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
904 : collector_(collector) {}
905
Iain Merrick75681382010-08-19 15:07:18 +0100906 void VisitPointers(Object** start, Object** end) {
907 for (Object** p = start; p < end; p++) VisitPointer(p);
908 }
909
910 void VisitPointer(Object** slot) {
911 Object* obj = *slot;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100912 if (obj->IsSharedFunctionInfo()) {
913 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
Steve Block44f0eee2011-05-26 01:26:41 +0100914 collector_->MarkObject(shared->unchecked_code());
915 collector_->MarkObject(shared);
Iain Merrick75681382010-08-19 15:07:18 +0100916 }
917 }
Steve Block44f0eee2011-05-26 01:26:41 +0100918
919 private:
920 MarkCompactCollector* collector_;
Iain Merrick75681382010-08-19 15:07:18 +0100921};
922
923
924void MarkCompactCollector::PrepareForCodeFlushing() {
Steve Block44f0eee2011-05-26 01:26:41 +0100925 ASSERT(heap_ == Isolate::Current()->heap());
926
Iain Merrick75681382010-08-19 15:07:18 +0100927 if (!FLAG_flush_code) {
Steve Block44f0eee2011-05-26 01:26:41 +0100928 EnableCodeFlushing(false);
Iain Merrick75681382010-08-19 15:07:18 +0100929 return;
930 }
931
932#ifdef ENABLE_DEBUGGER_SUPPORT
Steve Block44f0eee2011-05-26 01:26:41 +0100933 if (heap_->isolate()->debug()->IsLoaded() ||
934 heap_->isolate()->debug()->has_break_points()) {
935 EnableCodeFlushing(false);
Iain Merrick75681382010-08-19 15:07:18 +0100936 return;
937 }
938#endif
Steve Block44f0eee2011-05-26 01:26:41 +0100939 EnableCodeFlushing(true);
Iain Merrick75681382010-08-19 15:07:18 +0100940
Iain Merrick9ac36c92010-09-13 15:29:50 +0100941 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
942 // relies on it being marked before any other descriptor array.
Steve Block44f0eee2011-05-26 01:26:41 +0100943 MarkObject(heap_->raw_unchecked_empty_descriptor_array());
Iain Merrick9ac36c92010-09-13 15:29:50 +0100944
Iain Merrick75681382010-08-19 15:07:18 +0100945 // Make sure we are not referencing the code from the stack.
Steve Block44f0eee2011-05-26 01:26:41 +0100946 ASSERT(this == heap_->mark_compact_collector());
Iain Merrick75681382010-08-19 15:07:18 +0100947 for (StackFrameIterator it; !it.done(); it.Advance()) {
Iain Merrick9ac36c92010-09-13 15:29:50 +0100948 MarkObject(it.frame()->unchecked_code());
Iain Merrick75681382010-08-19 15:07:18 +0100949 }
950
951 // Iterate the archived stacks in all threads to check if
952 // the code is referenced.
Steve Block44f0eee2011-05-26 01:26:41 +0100953 CodeMarkingVisitor code_marking_visitor(this);
954 heap_->isolate()->thread_manager()->IterateArchivedThreads(
955 &code_marking_visitor);
Iain Merrick75681382010-08-19 15:07:18 +0100956
Steve Block44f0eee2011-05-26 01:26:41 +0100957 SharedFunctionInfoMarkingVisitor visitor(this);
958 heap_->isolate()->compilation_cache()->IterateFunctions(&visitor);
959 heap_->isolate()->handle_scope_implementer()->Iterate(&visitor);
Iain Merrick75681382010-08-19 15:07:18 +0100960
Iain Merrick9ac36c92010-09-13 15:29:50 +0100961 ProcessMarkingStack();
Iain Merrick75681382010-08-19 15:07:18 +0100962}
963
964
Steve Blocka7e24c12009-10-30 11:49:00 +0000965// Visitor class for marking heap roots.
966class RootMarkingVisitor : public ObjectVisitor {
967 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100968 explicit RootMarkingVisitor(Heap* heap)
969 : collector_(heap->mark_compact_collector()) { }
970
Steve Blocka7e24c12009-10-30 11:49:00 +0000971 void VisitPointer(Object** p) {
972 MarkObjectByPointer(p);
973 }
974
975 void VisitPointers(Object** start, Object** end) {
976 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
977 }
978
Steve Blocka7e24c12009-10-30 11:49:00 +0000979 private:
Steve Blocka7e24c12009-10-30 11:49:00 +0000980 void MarkObjectByPointer(Object** p) {
981 if (!(*p)->IsHeapObject()) return;
982
983 // Replace flat cons strings in place.
984 HeapObject* object = ShortCircuitConsString(p);
985 if (object->IsMarked()) return;
986
987 Map* map = object->map();
988 // Mark the object.
Steve Block44f0eee2011-05-26 01:26:41 +0100989 collector_->SetMark(object);
Iain Merrick75681382010-08-19 15:07:18 +0100990
Steve Blocka7e24c12009-10-30 11:49:00 +0000991 // Mark the map pointer and body, and push them on the marking stack.
Steve Block44f0eee2011-05-26 01:26:41 +0100992 collector_->MarkObject(map);
Iain Merrick75681382010-08-19 15:07:18 +0100993 StaticMarkingVisitor::IterateBody(map, object);
Steve Blocka7e24c12009-10-30 11:49:00 +0000994
995 // Mark all the objects reachable from the map and body. May leave
996 // overflowed objects in the heap.
Steve Block44f0eee2011-05-26 01:26:41 +0100997 collector_->EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +0000998 }
Steve Block44f0eee2011-05-26 01:26:41 +0100999
1000 MarkCompactCollector* collector_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001001};
1002
1003
1004// Helper class for pruning the symbol table.
1005class SymbolTableCleaner : public ObjectVisitor {
1006 public:
1007 SymbolTableCleaner() : pointers_removed_(0) { }
Leon Clarkee46be812010-01-19 14:06:41 +00001008
1009 virtual void VisitPointers(Object** start, Object** end) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001010 // Visit all HeapObject pointers in [start, end).
1011 for (Object** p = start; p < end; p++) {
1012 if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
1013 // Check if the symbol being pruned is an external symbol. We need to
1014 // delete the associated external data as this symbol is going away.
1015
Steve Blocka7e24c12009-10-30 11:49:00 +00001016 // Since no objects have yet been moved we can safely access the map of
1017 // the object.
Leon Clarkee46be812010-01-19 14:06:41 +00001018 if ((*p)->IsExternalString()) {
Steve Block44f0eee2011-05-26 01:26:41 +01001019 HEAP->FinalizeExternalString(String::cast(*p));
Steve Blocka7e24c12009-10-30 11:49:00 +00001020 }
1021 // Set the entry to null_value (as deleted).
Steve Block44f0eee2011-05-26 01:26:41 +01001022 *p = HEAP->raw_unchecked_null_value();
Steve Blocka7e24c12009-10-30 11:49:00 +00001023 pointers_removed_++;
1024 }
1025 }
1026 }
1027
1028 int PointersRemoved() {
1029 return pointers_removed_;
1030 }
1031 private:
1032 int pointers_removed_;
1033};
1034
1035
Ben Murdochf87a2032010-10-22 12:50:53 +01001036// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1037// are retained.
1038class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1039 public:
1040 virtual Object* RetainAs(Object* object) {
1041 MapWord first_word = HeapObject::cast(object)->map_word();
1042 if (first_word.IsMarked()) {
1043 return object;
1044 } else {
1045 return NULL;
1046 }
1047 }
1048};
1049
1050
Steve Blocka7e24c12009-10-30 11:49:00 +00001051void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
1052 ASSERT(!object->IsMarked());
Steve Block44f0eee2011-05-26 01:26:41 +01001053 ASSERT(HEAP->Contains(object));
Steve Blocka7e24c12009-10-30 11:49:00 +00001054 if (object->IsMap()) {
1055 Map* map = Map::cast(object);
1056 if (FLAG_cleanup_caches_in_maps_at_gc) {
Steve Block44f0eee2011-05-26 01:26:41 +01001057 map->ClearCodeCache(heap_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001058 }
1059 SetMark(map);
1060 if (FLAG_collect_maps &&
1061 map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
1062 map->instance_type() <= JS_FUNCTION_TYPE) {
1063 MarkMapContents(map);
1064 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001065 marking_stack_.Push(map);
Steve Blocka7e24c12009-10-30 11:49:00 +00001066 }
1067 } else {
1068 SetMark(object);
Steve Block44f0eee2011-05-26 01:26:41 +01001069 marking_stack_.Push(object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001070 }
1071}
1072
1073
1074void MarkCompactCollector::MarkMapContents(Map* map) {
1075 MarkDescriptorArray(reinterpret_cast<DescriptorArray*>(
1076 *HeapObject::RawField(map, Map::kInstanceDescriptorsOffset)));
1077
1078 // Mark the Object* fields of the Map.
1079 // Since the descriptor array has been marked already, it is fine
1080 // that one of these fields contains a pointer to it.
Iain Merrick75681382010-08-19 15:07:18 +01001081 Object** start_slot = HeapObject::RawField(map,
1082 Map::kPointerFieldsBeginOffset);
1083
1084 Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
1085
Steve Block44f0eee2011-05-26 01:26:41 +01001086 StaticMarkingVisitor::VisitPointers(map->heap(), start_slot, end_slot);
Steve Blocka7e24c12009-10-30 11:49:00 +00001087}
1088
1089
1090void MarkCompactCollector::MarkDescriptorArray(
1091 DescriptorArray* descriptors) {
1092 if (descriptors->IsMarked()) return;
1093 // Empty descriptor array is marked as a root before any maps are marked.
Steve Block44f0eee2011-05-26 01:26:41 +01001094 ASSERT(descriptors != HEAP->raw_unchecked_empty_descriptor_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001095 SetMark(descriptors);
1096
1097 FixedArray* contents = reinterpret_cast<FixedArray*>(
1098 descriptors->get(DescriptorArray::kContentArrayIndex));
1099 ASSERT(contents->IsHeapObject());
1100 ASSERT(!contents->IsMarked());
1101 ASSERT(contents->IsFixedArray());
1102 ASSERT(contents->length() >= 2);
1103 SetMark(contents);
Steve Block44f0eee2011-05-26 01:26:41 +01001104 // Contents contains (value, details) pairs. If the details say that the type
1105 // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
1106 // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as
1107 // live. Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and
1108 // CONSTANT_TRANSITION is the value an Object* (a Map*).
Steve Blocka7e24c12009-10-30 11:49:00 +00001109 for (int i = 0; i < contents->length(); i += 2) {
1110 // If the pair (value, details) at index i, i+1 is not
1111 // a transition or null descriptor, mark the value.
1112 PropertyDetails details(Smi::cast(contents->get(i + 1)));
1113 if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
1114 HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
1115 if (object->IsHeapObject() && !object->IsMarked()) {
1116 SetMark(object);
Steve Block44f0eee2011-05-26 01:26:41 +01001117 marking_stack_.Push(object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001118 }
1119 }
1120 }
1121 // The DescriptorArray descriptors contains a pointer to its contents array,
1122 // but the contents array is already marked.
Steve Block44f0eee2011-05-26 01:26:41 +01001123 marking_stack_.Push(descriptors);
Steve Blocka7e24c12009-10-30 11:49:00 +00001124}
1125
1126
1127void MarkCompactCollector::CreateBackPointers() {
Steve Block44f0eee2011-05-26 01:26:41 +01001128 HeapObjectIterator iterator(HEAP->map_space());
Leon Clarked91b9f72010-01-27 17:25:45 +00001129 for (HeapObject* next_object = iterator.next();
1130 next_object != NULL; next_object = iterator.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001131 if (next_object->IsMap()) { // Could also be ByteArray on free list.
1132 Map* map = Map::cast(next_object);
1133 if (map->instance_type() >= FIRST_JS_OBJECT_TYPE &&
1134 map->instance_type() <= JS_FUNCTION_TYPE) {
1135 map->CreateBackPointers();
1136 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001137 ASSERT(map->instance_descriptors() == HEAP->empty_descriptor_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001138 }
1139 }
1140 }
1141}
1142
1143
1144static int OverflowObjectSize(HeapObject* obj) {
1145 // Recover the normal map pointer, it might be marked as live and
1146 // overflowed.
1147 MapWord map_word = obj->map_word();
1148 map_word.ClearMark();
1149 map_word.ClearOverflow();
1150 return obj->SizeFromMap(map_word.ToMap());
1151}
1152
1153
Steve Block44f0eee2011-05-26 01:26:41 +01001154class OverflowedObjectsScanner : public AllStatic {
1155 public:
1156 // Fill the marking stack with overflowed objects returned by the given
1157 // iterator. Stop when the marking stack is filled or the end of the space
1158 // is reached, whichever comes first.
1159 template<class T>
1160 static inline void ScanOverflowedObjects(MarkCompactCollector* collector,
1161 T* it) {
1162 // The caller should ensure that the marking stack is initially not full,
1163 // so that we don't waste effort pointlessly scanning for objects.
1164 ASSERT(!collector->marking_stack_.is_full());
Steve Blocka7e24c12009-10-30 11:49:00 +00001165
Steve Block44f0eee2011-05-26 01:26:41 +01001166 for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
1167 if (object->IsOverflowed()) {
1168 object->ClearOverflow();
1169 ASSERT(object->IsMarked());
1170 ASSERT(HEAP->Contains(object));
1171 collector->marking_stack_.Push(object);
1172 if (collector->marking_stack_.is_full()) return;
1173 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001174 }
1175 }
Steve Block44f0eee2011-05-26 01:26:41 +01001176};
Steve Blocka7e24c12009-10-30 11:49:00 +00001177
1178
1179bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
1180 return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked();
1181}
1182
1183
Steve Blocka7e24c12009-10-30 11:49:00 +00001184void MarkCompactCollector::MarkSymbolTable() {
Steve Block44f0eee2011-05-26 01:26:41 +01001185 SymbolTable* symbol_table = heap_->raw_unchecked_symbol_table();
Steve Blocka7e24c12009-10-30 11:49:00 +00001186 // Mark the symbol table itself.
1187 SetMark(symbol_table);
1188 // Explicitly mark the prefix.
Steve Block44f0eee2011-05-26 01:26:41 +01001189 MarkingVisitor marker(heap_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001190 symbol_table->IteratePrefix(&marker);
Iain Merrick75681382010-08-19 15:07:18 +01001191 ProcessMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00001192}
1193
1194
1195void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
1196 // Mark the heap roots including global variables, stack variables,
1197 // etc., and all objects reachable from them.
Steve Block44f0eee2011-05-26 01:26:41 +01001198 HEAP->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00001199
1200 // Handle the symbol table specially.
1201 MarkSymbolTable();
1202
1203 // There may be overflowed objects in the heap. Visit them now.
Steve Block44f0eee2011-05-26 01:26:41 +01001204 while (marking_stack_.overflowed()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001205 RefillMarkingStack();
Iain Merrick75681382010-08-19 15:07:18 +01001206 EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00001207 }
1208}
1209
1210
1211void MarkCompactCollector::MarkObjectGroups() {
Steve Block44f0eee2011-05-26 01:26:41 +01001212 List<ObjectGroup*>* object_groups =
1213 heap_->isolate()->global_handles()->object_groups();
Steve Blocka7e24c12009-10-30 11:49:00 +00001214
1215 for (int i = 0; i < object_groups->length(); i++) {
1216 ObjectGroup* entry = object_groups->at(i);
1217 if (entry == NULL) continue;
1218
1219 List<Object**>& objects = entry->objects_;
1220 bool group_marked = false;
1221 for (int j = 0; j < objects.length(); j++) {
1222 Object* object = *objects[j];
1223 if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
1224 group_marked = true;
1225 break;
1226 }
1227 }
1228
1229 if (!group_marked) continue;
1230
1231 // An object in the group is marked, so mark as gray all white heap
1232 // objects in the group.
1233 for (int j = 0; j < objects.length(); ++j) {
1234 if ((*objects[j])->IsHeapObject()) {
1235 MarkObject(HeapObject::cast(*objects[j]));
1236 }
1237 }
Steve Block44f0eee2011-05-26 01:26:41 +01001238
Steve Blocka7e24c12009-10-30 11:49:00 +00001239 // Once the entire group has been colored gray, set the object group
1240 // to NULL so it won't be processed again.
Steve Block44f0eee2011-05-26 01:26:41 +01001241 delete entry;
Steve Blocka7e24c12009-10-30 11:49:00 +00001242 object_groups->at(i) = NULL;
1243 }
1244}
1245
1246
Steve Block44f0eee2011-05-26 01:26:41 +01001247void MarkCompactCollector::MarkImplicitRefGroups() {
1248 List<ImplicitRefGroup*>* ref_groups =
1249 heap_->isolate()->global_handles()->implicit_ref_groups();
1250
1251 for (int i = 0; i < ref_groups->length(); i++) {
1252 ImplicitRefGroup* entry = ref_groups->at(i);
1253 if (entry == NULL) continue;
1254
1255 if (!entry->parent_->IsMarked()) continue;
1256
1257 List<Object**>& children = entry->children_;
1258 // A parent object is marked, so mark as gray all child white heap
1259 // objects.
1260 for (int j = 0; j < children.length(); ++j) {
1261 if ((*children[j])->IsHeapObject()) {
1262 MarkObject(HeapObject::cast(*children[j]));
1263 }
1264 }
1265
1266 // Once the entire group has been colored gray, set the group
1267 // to NULL so it won't be processed again.
1268 delete entry;
1269 ref_groups->at(i) = NULL;
1270 }
1271}
1272
1273
Steve Blocka7e24c12009-10-30 11:49:00 +00001274// Mark all objects reachable from the objects on the marking stack.
1275// Before: the marking stack contains zero or more heap object pointers.
1276// After: the marking stack is empty, and all objects reachable from the
1277// marking stack have been marked, or are overflowed in the heap.
Iain Merrick75681382010-08-19 15:07:18 +01001278void MarkCompactCollector::EmptyMarkingStack() {
Steve Block44f0eee2011-05-26 01:26:41 +01001279 while (!marking_stack_.is_empty()) {
1280 HeapObject* object = marking_stack_.Pop();
Steve Blocka7e24c12009-10-30 11:49:00 +00001281 ASSERT(object->IsHeapObject());
Steve Block44f0eee2011-05-26 01:26:41 +01001282 ASSERT(heap_->Contains(object));
Steve Blocka7e24c12009-10-30 11:49:00 +00001283 ASSERT(object->IsMarked());
1284 ASSERT(!object->IsOverflowed());
1285
1286 // Because the object is marked, we have to recover the original map
1287 // pointer and use it to mark the object's body.
1288 MapWord map_word = object->map_word();
1289 map_word.ClearMark();
1290 Map* map = map_word.ToMap();
1291 MarkObject(map);
Iain Merrick75681382010-08-19 15:07:18 +01001292
1293 StaticMarkingVisitor::IterateBody(map, object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001294 }
1295}
1296
1297
1298// Sweep the heap for overflowed objects, clear their overflow bits, and
1299// push them on the marking stack. Stop early if the marking stack fills
1300// before sweeping completes. If sweeping completes, there are no remaining
1301// overflowed objects in the heap so the overflow flag on the markings stack
1302// is cleared.
1303void MarkCompactCollector::RefillMarkingStack() {
Steve Block44f0eee2011-05-26 01:26:41 +01001304 ASSERT(marking_stack_.overflowed());
Steve Blocka7e24c12009-10-30 11:49:00 +00001305
Steve Block44f0eee2011-05-26 01:26:41 +01001306 SemiSpaceIterator new_it(HEAP->new_space(), &OverflowObjectSize);
1307 OverflowedObjectsScanner::ScanOverflowedObjects(this, &new_it);
1308 if (marking_stack_.is_full()) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00001309
Steve Block44f0eee2011-05-26 01:26:41 +01001310 HeapObjectIterator old_pointer_it(HEAP->old_pointer_space(),
Steve Blocka7e24c12009-10-30 11:49:00 +00001311 &OverflowObjectSize);
Steve Block44f0eee2011-05-26 01:26:41 +01001312 OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_pointer_it);
1313 if (marking_stack_.is_full()) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00001314
Steve Block44f0eee2011-05-26 01:26:41 +01001315 HeapObjectIterator old_data_it(HEAP->old_data_space(), &OverflowObjectSize);
1316 OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_data_it);
1317 if (marking_stack_.is_full()) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00001318
Steve Block44f0eee2011-05-26 01:26:41 +01001319 HeapObjectIterator code_it(HEAP->code_space(), &OverflowObjectSize);
1320 OverflowedObjectsScanner::ScanOverflowedObjects(this, &code_it);
1321 if (marking_stack_.is_full()) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00001322
Steve Block44f0eee2011-05-26 01:26:41 +01001323 HeapObjectIterator map_it(HEAP->map_space(), &OverflowObjectSize);
1324 OverflowedObjectsScanner::ScanOverflowedObjects(this, &map_it);
1325 if (marking_stack_.is_full()) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00001326
Steve Block44f0eee2011-05-26 01:26:41 +01001327 HeapObjectIterator cell_it(HEAP->cell_space(), &OverflowObjectSize);
1328 OverflowedObjectsScanner::ScanOverflowedObjects(this, &cell_it);
1329 if (marking_stack_.is_full()) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00001330
Steve Block44f0eee2011-05-26 01:26:41 +01001331 LargeObjectIterator lo_it(HEAP->lo_space(), &OverflowObjectSize);
1332 OverflowedObjectsScanner::ScanOverflowedObjects(this, &lo_it);
1333 if (marking_stack_.is_full()) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00001334
Steve Block44f0eee2011-05-26 01:26:41 +01001335 marking_stack_.clear_overflowed();
Steve Blocka7e24c12009-10-30 11:49:00 +00001336}
1337
1338
1339// Mark all objects reachable (transitively) from objects on the marking
1340// stack. Before: the marking stack contains zero or more heap object
1341// pointers. After: the marking stack is empty and there are no overflowed
1342// objects in the heap.
Iain Merrick75681382010-08-19 15:07:18 +01001343void MarkCompactCollector::ProcessMarkingStack() {
1344 EmptyMarkingStack();
Steve Block44f0eee2011-05-26 01:26:41 +01001345 while (marking_stack_.overflowed()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001346 RefillMarkingStack();
Iain Merrick75681382010-08-19 15:07:18 +01001347 EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00001348 }
1349}
1350
1351
Steve Block44f0eee2011-05-26 01:26:41 +01001352void MarkCompactCollector::ProcessExternalMarking() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001353 bool work_to_do = true;
Steve Block44f0eee2011-05-26 01:26:41 +01001354 ASSERT(marking_stack_.is_empty());
Steve Blocka7e24c12009-10-30 11:49:00 +00001355 while (work_to_do) {
1356 MarkObjectGroups();
Steve Block44f0eee2011-05-26 01:26:41 +01001357 MarkImplicitRefGroups();
1358 work_to_do = !marking_stack_.is_empty();
Iain Merrick75681382010-08-19 15:07:18 +01001359 ProcessMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00001360 }
1361}
1362
1363
1364void MarkCompactCollector::MarkLiveObjects() {
Leon Clarkef7060e22010-06-03 12:02:55 +01001365 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
Ben Murdochb0fe1622011-05-05 13:52:32 +01001366 // The recursive GC marker detects when it is nearing stack overflow,
1367 // and switches to a different marking system. JS interrupts interfere
1368 // with the C stack limit check.
Steve Block44f0eee2011-05-26 01:26:41 +01001369 PostponeInterruptsScope postpone(heap_->isolate());
Ben Murdochb0fe1622011-05-05 13:52:32 +01001370
Steve Blocka7e24c12009-10-30 11:49:00 +00001371#ifdef DEBUG
1372 ASSERT(state_ == PREPARE_GC);
1373 state_ = MARK_LIVE_OBJECTS;
1374#endif
1375 // The to space contains live objects, the from space is used as a marking
1376 // stack.
Steve Block44f0eee2011-05-26 01:26:41 +01001377 marking_stack_.Initialize(heap_->new_space()->FromSpaceLow(),
1378 heap_->new_space()->FromSpaceHigh());
Steve Blocka7e24c12009-10-30 11:49:00 +00001379
Steve Block44f0eee2011-05-26 01:26:41 +01001380 ASSERT(!marking_stack_.overflowed());
Steve Blocka7e24c12009-10-30 11:49:00 +00001381
Iain Merrick75681382010-08-19 15:07:18 +01001382 PrepareForCodeFlushing();
1383
Steve Block44f0eee2011-05-26 01:26:41 +01001384 RootMarkingVisitor root_visitor(heap_);
Steve Blocka7e24c12009-10-30 11:49:00 +00001385 MarkRoots(&root_visitor);
1386
1387 // The objects reachable from the roots are marked, yet unreachable
Steve Block44f0eee2011-05-26 01:26:41 +01001388 // objects are unmarked. Mark objects reachable due to host
1389 // application specific logic.
1390 ProcessExternalMarking();
Steve Blocka7e24c12009-10-30 11:49:00 +00001391
1392 // The objects reachable from the roots or object groups are marked,
1393 // yet unreachable objects are unmarked. Mark objects reachable
1394 // only from weak global handles.
1395 //
1396 // First we identify nonlive weak handles and mark them as pending
1397 // destruction.
Steve Block44f0eee2011-05-26 01:26:41 +01001398 heap_->isolate()->global_handles()->IdentifyWeakHandles(
1399 &IsUnmarkedHeapObject);
Steve Blocka7e24c12009-10-30 11:49:00 +00001400 // Then we mark the objects and process the transitive closure.
Steve Block44f0eee2011-05-26 01:26:41 +01001401 heap_->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
1402 while (marking_stack_.overflowed()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001403 RefillMarkingStack();
Iain Merrick75681382010-08-19 15:07:18 +01001404 EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00001405 }
1406
Steve Block44f0eee2011-05-26 01:26:41 +01001407 // Repeat host application specific marking to mark unmarked objects
1408 // reachable from the weak roots.
1409 ProcessExternalMarking();
Steve Blocka7e24c12009-10-30 11:49:00 +00001410
1411 // Prune the symbol table removing all symbols only pointed to by the
1412 // symbol table. Cannot use symbol_table() here because the symbol
1413 // table is marked.
Steve Block44f0eee2011-05-26 01:26:41 +01001414 SymbolTable* symbol_table = heap_->raw_unchecked_symbol_table();
Steve Blocka7e24c12009-10-30 11:49:00 +00001415 SymbolTableCleaner v;
1416 symbol_table->IterateElements(&v);
1417 symbol_table->ElementsRemoved(v.PointersRemoved());
Steve Block44f0eee2011-05-26 01:26:41 +01001418 heap_->external_string_table_.Iterate(&v);
1419 heap_->external_string_table_.CleanUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00001420
Ben Murdochf87a2032010-10-22 12:50:53 +01001421 // Process the weak references.
1422 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
Steve Block44f0eee2011-05-26 01:26:41 +01001423 heap_->ProcessWeakReferences(&mark_compact_object_retainer);
Ben Murdochf87a2032010-10-22 12:50:53 +01001424
Steve Blocka7e24c12009-10-30 11:49:00 +00001425 // Remove object groups after marking phase.
Steve Block44f0eee2011-05-26 01:26:41 +01001426 heap_->isolate()->global_handles()->RemoveObjectGroups();
1427 heap_->isolate()->global_handles()->RemoveImplicitRefGroups();
Ben Murdochb0fe1622011-05-05 13:52:32 +01001428
1429 // Flush code from collected candidates.
Steve Block44f0eee2011-05-26 01:26:41 +01001430 if (is_code_flushing_enabled()) {
1431 code_flusher_->ProcessCandidates();
1432 }
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001433
1434 // Clean up dead objects from the runtime profiler.
Steve Block44f0eee2011-05-26 01:26:41 +01001435 heap_->isolate()->runtime_profiler()->RemoveDeadSamples();
Steve Blocka7e24c12009-10-30 11:49:00 +00001436}
1437
1438
Steve Blocka7e24c12009-10-30 11:49:00 +00001439#ifdef DEBUG
1440void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
1441 live_bytes_ += obj->Size();
Steve Block44f0eee2011-05-26 01:26:41 +01001442 if (HEAP->new_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001443 live_young_objects_size_ += obj->Size();
Steve Block44f0eee2011-05-26 01:26:41 +01001444 } else if (HEAP->map_space()->Contains(obj)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001445 ASSERT(obj->IsMap());
Steve Block6ded16b2010-05-10 14:33:55 +01001446 live_map_objects_size_ += obj->Size();
Steve Block44f0eee2011-05-26 01:26:41 +01001447 } else if (HEAP->cell_space()->Contains(obj)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001448 ASSERT(obj->IsJSGlobalPropertyCell());
Steve Block6ded16b2010-05-10 14:33:55 +01001449 live_cell_objects_size_ += obj->Size();
Steve Block44f0eee2011-05-26 01:26:41 +01001450 } else if (HEAP->old_pointer_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001451 live_old_pointer_objects_size_ += obj->Size();
Steve Block44f0eee2011-05-26 01:26:41 +01001452 } else if (HEAP->old_data_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001453 live_old_data_objects_size_ += obj->Size();
Steve Block44f0eee2011-05-26 01:26:41 +01001454 } else if (HEAP->code_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001455 live_code_objects_size_ += obj->Size();
Steve Block44f0eee2011-05-26 01:26:41 +01001456 } else if (HEAP->lo_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001457 live_lo_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001458 } else {
1459 UNREACHABLE();
1460 }
1461}
1462#endif // DEBUG
1463
1464
1465void MarkCompactCollector::SweepLargeObjectSpace() {
1466#ifdef DEBUG
1467 ASSERT(state_ == MARK_LIVE_OBJECTS);
1468 state_ =
1469 compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
1470#endif
1471 // Deallocate unmarked objects and clear marked bits for marked objects.
Steve Block44f0eee2011-05-26 01:26:41 +01001472 HEAP->lo_space()->FreeUnmarkedObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +00001473}
1474
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001475
Steve Blocka7e24c12009-10-30 11:49:00 +00001476// Safe to use during marking phase only.
1477bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
1478 MapWord metamap = object->map_word();
1479 metamap.ClearMark();
1480 return metamap.ToMap()->instance_type() == MAP_TYPE;
1481}
1482
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001483
Steve Blocka7e24c12009-10-30 11:49:00 +00001484void MarkCompactCollector::ClearNonLiveTransitions() {
Steve Block44f0eee2011-05-26 01:26:41 +01001485 HeapObjectIterator map_iterator(HEAP->map_space(), &SizeOfMarkedObject);
Steve Blocka7e24c12009-10-30 11:49:00 +00001486 // Iterate over the map space, setting map transitions that go from
1487 // a marked map to an unmarked map to null transitions. At the same time,
1488 // set all the prototype fields of maps back to their original value,
1489 // dropping the back pointers temporarily stored in the prototype field.
1490 // Setting the prototype field requires following the linked list of
1491 // back pointers, reversing them all at once. This allows us to find
1492 // those maps with map transitions that need to be nulled, and only
1493 // scan the descriptor arrays of those maps, not all maps.
Leon Clarkee46be812010-01-19 14:06:41 +00001494 // All of these actions are carried out only on maps of JSObjects
Steve Blocka7e24c12009-10-30 11:49:00 +00001495 // and related subtypes.
Leon Clarked91b9f72010-01-27 17:25:45 +00001496 for (HeapObject* obj = map_iterator.next();
1497 obj != NULL; obj = map_iterator.next()) {
1498 Map* map = reinterpret_cast<Map*>(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00001499 if (!map->IsMarked() && map->IsByteArray()) continue;
1500
1501 ASSERT(SafeIsMap(map));
1502 // Only JSObject and subtypes have map transitions and back pointers.
1503 if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue;
1504 if (map->instance_type() > JS_FUNCTION_TYPE) continue;
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001505
1506 if (map->IsMarked() && map->attached_to_shared_function_info()) {
1507 // This map is used for inobject slack tracking and has been detached
1508 // from SharedFunctionInfo during the mark phase.
1509 // Since it survived the GC, reattach it now.
1510 map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map);
1511 }
1512
Steve Blocka7e24c12009-10-30 11:49:00 +00001513 // Follow the chain of back pointers to find the prototype.
1514 Map* current = map;
1515 while (SafeIsMap(current)) {
1516 current = reinterpret_cast<Map*>(current->prototype());
1517 ASSERT(current->IsHeapObject());
1518 }
1519 Object* real_prototype = current;
1520
1521 // Follow back pointers, setting them to prototype,
1522 // clearing map transitions when necessary.
1523 current = map;
1524 bool on_dead_path = !current->IsMarked();
1525 Object* next;
1526 while (SafeIsMap(current)) {
1527 next = current->prototype();
1528 // There should never be a dead map above a live map.
1529 ASSERT(on_dead_path || current->IsMarked());
1530
1531 // A live map above a dead map indicates a dead transition.
1532 // This test will always be false on the first iteration.
1533 if (on_dead_path && current->IsMarked()) {
1534 on_dead_path = false;
Steve Block44f0eee2011-05-26 01:26:41 +01001535 current->ClearNonLiveTransitions(heap_, real_prototype);
Steve Blocka7e24c12009-10-30 11:49:00 +00001536 }
1537 *HeapObject::RawField(current, Map::kPrototypeOffset) =
1538 real_prototype;
1539 current = reinterpret_cast<Map*>(next);
1540 }
1541 }
1542}
1543
1544// -------------------------------------------------------------------------
1545// Phase 2: Encode forwarding addresses.
1546// When compacting, forwarding addresses for objects in old space and map
1547// space are encoded in their map pointer word (along with an encoding of
1548// their map pointers).
1549//
Leon Clarkee46be812010-01-19 14:06:41 +00001550// The excact encoding is described in the comments for class MapWord in
1551// objects.h.
Steve Blocka7e24c12009-10-30 11:49:00 +00001552//
1553// An address range [start, end) can have both live and non-live objects.
1554// Maximal non-live regions are marked so they can be skipped on subsequent
1555// sweeps of the heap. A distinguished map-pointer encoding is used to mark
1556// free regions of one-word size (in which case the next word is the start
1557// of a live object). A second distinguished map-pointer encoding is used
1558// to mark free regions larger than one word, and the size of the free
1559// region (including the first word) is written to the second word of the
1560// region.
1561//
1562// Any valid map page offset must lie in the object area of the page, so map
1563// page offsets less than Page::kObjectStartOffset are invalid. We use a
1564// pair of distinguished invalid map encodings (for single word and multiple
1565// words) to indicate free regions in the page found during computation of
1566// forwarding addresses and skipped over in subsequent sweeps.
Steve Blocka7e24c12009-10-30 11:49:00 +00001567
1568
1569// Encode a free region, defined by the given start address and size, in the
1570// first word or two of the region.
1571void EncodeFreeRegion(Address free_start, int free_size) {
1572 ASSERT(free_size >= kIntSize);
1573 if (free_size == kIntSize) {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001574 Memory::uint32_at(free_start) = MarkCompactCollector::kSingleFreeEncoding;
Steve Blocka7e24c12009-10-30 11:49:00 +00001575 } else {
1576 ASSERT(free_size >= 2 * kIntSize);
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001577 Memory::uint32_at(free_start) = MarkCompactCollector::kMultiFreeEncoding;
Steve Blocka7e24c12009-10-30 11:49:00 +00001578 Memory::int_at(free_start + kIntSize) = free_size;
1579 }
1580
1581#ifdef DEBUG
1582 // Zap the body of the free region.
1583 if (FLAG_enable_slow_asserts) {
1584 for (int offset = 2 * kIntSize;
1585 offset < free_size;
1586 offset += kPointerSize) {
1587 Memory::Address_at(free_start + offset) = kZapValue;
1588 }
1589 }
1590#endif
1591}
1592
1593
1594// Try to promote all objects in new space. Heap numbers and sequential
1595// strings are promoted to the code space, large objects to large object space,
1596// and all others to the old space.
Steve Block44f0eee2011-05-26 01:26:41 +01001597inline MaybeObject* MCAllocateFromNewSpace(Heap* heap,
1598 HeapObject* object,
John Reck59135872010-11-02 12:39:01 -07001599 int object_size) {
1600 MaybeObject* forwarded;
Steve Block44f0eee2011-05-26 01:26:41 +01001601 if (object_size > heap->MaxObjectSizeInPagedSpace()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001602 forwarded = Failure::Exception();
1603 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001604 OldSpace* target_space = heap->TargetSpace(object);
1605 ASSERT(target_space == heap->old_pointer_space() ||
1606 target_space == heap->old_data_space());
Steve Blocka7e24c12009-10-30 11:49:00 +00001607 forwarded = target_space->MCAllocateRaw(object_size);
1608 }
John Reck59135872010-11-02 12:39:01 -07001609 Object* result;
1610 if (!forwarded->ToObject(&result)) {
Steve Block44f0eee2011-05-26 01:26:41 +01001611 result = heap->new_space()->MCAllocateRaw(object_size)->ToObjectUnchecked();
Steve Blocka7e24c12009-10-30 11:49:00 +00001612 }
John Reck59135872010-11-02 12:39:01 -07001613 return result;
Steve Blocka7e24c12009-10-30 11:49:00 +00001614}
1615
1616
1617// Allocation functions for the paged spaces call the space's MCAllocateRaw.
John Reck59135872010-11-02 12:39:01 -07001618MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldPointerSpace(
Steve Block44f0eee2011-05-26 01:26:41 +01001619 Heap *heap,
John Reck59135872010-11-02 12:39:01 -07001620 HeapObject* ignore,
1621 int object_size) {
Steve Block44f0eee2011-05-26 01:26:41 +01001622 return heap->old_pointer_space()->MCAllocateRaw(object_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001623}
1624
1625
John Reck59135872010-11-02 12:39:01 -07001626MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldDataSpace(
Steve Block44f0eee2011-05-26 01:26:41 +01001627 Heap* heap,
John Reck59135872010-11-02 12:39:01 -07001628 HeapObject* ignore,
1629 int object_size) {
Steve Block44f0eee2011-05-26 01:26:41 +01001630 return heap->old_data_space()->MCAllocateRaw(object_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001631}
1632
1633
John Reck59135872010-11-02 12:39:01 -07001634MUST_USE_RESULT inline MaybeObject* MCAllocateFromCodeSpace(
Steve Block44f0eee2011-05-26 01:26:41 +01001635 Heap* heap,
John Reck59135872010-11-02 12:39:01 -07001636 HeapObject* ignore,
1637 int object_size) {
Steve Block44f0eee2011-05-26 01:26:41 +01001638 return heap->code_space()->MCAllocateRaw(object_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001639}
1640
1641
John Reck59135872010-11-02 12:39:01 -07001642MUST_USE_RESULT inline MaybeObject* MCAllocateFromMapSpace(
Steve Block44f0eee2011-05-26 01:26:41 +01001643 Heap* heap,
John Reck59135872010-11-02 12:39:01 -07001644 HeapObject* ignore,
1645 int object_size) {
Steve Block44f0eee2011-05-26 01:26:41 +01001646 return heap->map_space()->MCAllocateRaw(object_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001647}
1648
1649
Steve Block44f0eee2011-05-26 01:26:41 +01001650MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace(
1651 Heap* heap, HeapObject* ignore, int object_size) {
1652 return heap->cell_space()->MCAllocateRaw(object_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001653}
1654
1655
1656// The forwarding address is encoded at the same offset as the current
1657// to-space object, but in from space.
Steve Block44f0eee2011-05-26 01:26:41 +01001658inline void EncodeForwardingAddressInNewSpace(Heap* heap,
1659 HeapObject* old_object,
Steve Blocka7e24c12009-10-30 11:49:00 +00001660 int object_size,
1661 Object* new_object,
1662 int* ignored) {
1663 int offset =
Steve Block44f0eee2011-05-26 01:26:41 +01001664 heap->new_space()->ToSpaceOffsetForAddress(old_object->address());
1665 Memory::Address_at(heap->new_space()->FromSpaceLow() + offset) =
Steve Blocka7e24c12009-10-30 11:49:00 +00001666 HeapObject::cast(new_object)->address();
1667}
1668
1669
1670// The forwarding address is encoded in the map pointer of the object as an
1671// offset (in terms of live bytes) from the address of the first live object
1672// in the page.
Steve Block44f0eee2011-05-26 01:26:41 +01001673inline void EncodeForwardingAddressInPagedSpace(Heap* heap,
1674 HeapObject* old_object,
Steve Blocka7e24c12009-10-30 11:49:00 +00001675 int object_size,
1676 Object* new_object,
1677 int* offset) {
1678 // Record the forwarding address of the first live object if necessary.
1679 if (*offset == 0) {
1680 Page::FromAddress(old_object->address())->mc_first_forwarded =
1681 HeapObject::cast(new_object)->address();
1682 }
1683
1684 MapWord encoding =
1685 MapWord::EncodeAddress(old_object->map()->address(), *offset);
1686 old_object->set_map_word(encoding);
1687 *offset += object_size;
1688 ASSERT(*offset <= Page::kObjectAreaSize);
1689}
1690
1691
1692// Most non-live objects are ignored.
1693inline void IgnoreNonLiveObject(HeapObject* object) {}
1694
1695
Steve Blocka7e24c12009-10-30 11:49:00 +00001696// Function template that, given a range of addresses (eg, a semispace or a
1697// paged space page), iterates through the objects in the range to clear
1698// mark bits and compute and encode forwarding addresses. As a side effect,
1699// maximal free chunks are marked so that they can be skipped on subsequent
1700// sweeps.
1701//
1702// The template parameters are an allocation function, a forwarding address
1703// encoding function, and a function to process non-live objects.
1704template<MarkCompactCollector::AllocationFunction Alloc,
1705 MarkCompactCollector::EncodingFunction Encode,
1706 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
Steve Block44f0eee2011-05-26 01:26:41 +01001707inline void EncodeForwardingAddressesInRange(MarkCompactCollector* collector,
1708 Address start,
Steve Blocka7e24c12009-10-30 11:49:00 +00001709 Address end,
1710 int* offset) {
1711 // The start address of the current free region while sweeping the space.
1712 // This address is set when a transition from live to non-live objects is
1713 // encountered. A value (an encoding of the 'next free region' pointer)
1714 // is written to memory at this address when a transition from non-live to
1715 // live objects is encountered.
1716 Address free_start = NULL;
1717
1718 // A flag giving the state of the previously swept object. Initially true
1719 // to ensure that free_start is initialized to a proper address before
1720 // trying to write to it.
1721 bool is_prev_alive = true;
1722
1723 int object_size; // Will be set on each iteration of the loop.
1724 for (Address current = start; current < end; current += object_size) {
1725 HeapObject* object = HeapObject::FromAddress(current);
1726 if (object->IsMarked()) {
1727 object->ClearMark();
Steve Block44f0eee2011-05-26 01:26:41 +01001728 collector->tracer()->decrement_marked_count();
Steve Blocka7e24c12009-10-30 11:49:00 +00001729 object_size = object->Size();
1730
Steve Block44f0eee2011-05-26 01:26:41 +01001731 Object* forwarded =
1732 Alloc(collector->heap(), object, object_size)->ToObjectUnchecked();
1733 Encode(collector->heap(), object, object_size, forwarded, offset);
Steve Blocka7e24c12009-10-30 11:49:00 +00001734
1735#ifdef DEBUG
1736 if (FLAG_gc_verbose) {
1737 PrintF("forward %p -> %p.\n", object->address(),
1738 HeapObject::cast(forwarded)->address());
1739 }
1740#endif
1741 if (!is_prev_alive) { // Transition from non-live to live.
Steve Blockd0582a62009-12-15 09:54:21 +00001742 EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
Steve Blocka7e24c12009-10-30 11:49:00 +00001743 is_prev_alive = true;
1744 }
1745 } else { // Non-live object.
1746 object_size = object->Size();
1747 ProcessNonLive(object);
1748 if (is_prev_alive) { // Transition from live to non-live.
1749 free_start = current;
1750 is_prev_alive = false;
1751 }
Steve Block1e0659c2011-05-24 12:43:12 +01001752 LiveObjectList::ProcessNonLive(object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001753 }
1754 }
1755
1756 // If we ended on a free region, mark it.
Steve Blockd0582a62009-12-15 09:54:21 +00001757 if (!is_prev_alive) {
1758 EncodeFreeRegion(free_start, static_cast<int>(end - free_start));
1759 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001760}
1761
1762
1763// Functions to encode the forwarding pointers in each compactable space.
1764void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
1765 int ignored;
1766 EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
1767 EncodeForwardingAddressInNewSpace,
1768 IgnoreNonLiveObject>(
Steve Block44f0eee2011-05-26 01:26:41 +01001769 this,
1770 heap_->new_space()->bottom(),
1771 heap_->new_space()->top(),
Steve Blocka7e24c12009-10-30 11:49:00 +00001772 &ignored);
1773}
1774
1775
1776template<MarkCompactCollector::AllocationFunction Alloc,
1777 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
1778void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
1779 PagedSpace* space) {
1780 PageIterator it(space, PageIterator::PAGES_IN_USE);
1781 while (it.has_next()) {
1782 Page* p = it.next();
Steve Block6ded16b2010-05-10 14:33:55 +01001783
Steve Blocka7e24c12009-10-30 11:49:00 +00001784 // The offset of each live object in the page from the first live object
1785 // in the page.
1786 int offset = 0;
1787 EncodeForwardingAddressesInRange<Alloc,
1788 EncodeForwardingAddressInPagedSpace,
1789 ProcessNonLive>(
Steve Block44f0eee2011-05-26 01:26:41 +01001790 this,
Steve Blocka7e24c12009-10-30 11:49:00 +00001791 p->ObjectAreaStart(),
1792 p->AllocationTop(),
1793 &offset);
1794 }
1795}
1796
1797
Steve Block6ded16b2010-05-10 14:33:55 +01001798// We scavange new space simultaneously with sweeping. This is done in two
1799// passes.
1800// The first pass migrates all alive objects from one semispace to another or
1801// promotes them to old space. Forwading address is written directly into
1802// first word of object without any encoding. If object is dead we are writing
1803// NULL as a forwarding address.
1804// The second pass updates pointers to new space in all spaces. It is possible
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001805// to encounter pointers to dead objects during traversal of dirty regions we
1806// should clear them to avoid encountering them during next dirty regions
1807// iteration.
Steve Block44f0eee2011-05-26 01:26:41 +01001808static void MigrateObject(Heap* heap,
1809 Address dst,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001810 Address src,
1811 int size,
1812 bool to_old_space) {
1813 if (to_old_space) {
Steve Block44f0eee2011-05-26 01:26:41 +01001814 heap->CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001815 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001816 heap->CopyBlock(dst, src, size);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001817 }
Steve Block6ded16b2010-05-10 14:33:55 +01001818
1819 Memory::Address_at(src) = dst;
1820}
1821
1822
Iain Merrick75681382010-08-19 15:07:18 +01001823class StaticPointersToNewGenUpdatingVisitor : public
1824 StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> {
1825 public:
Steve Block44f0eee2011-05-26 01:26:41 +01001826 static inline void VisitPointer(Heap* heap, Object** p) {
Iain Merrick75681382010-08-19 15:07:18 +01001827 if (!(*p)->IsHeapObject()) return;
1828
1829 HeapObject* obj = HeapObject::cast(*p);
1830 Address old_addr = obj->address();
1831
Steve Block44f0eee2011-05-26 01:26:41 +01001832 if (heap->new_space()->Contains(obj)) {
1833 ASSERT(heap->InFromSpace(*p));
Iain Merrick75681382010-08-19 15:07:18 +01001834 *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
1835 }
1836 }
1837};
1838
1839
Steve Block6ded16b2010-05-10 14:33:55 +01001840// Visitor for updating pointers from live objects in old spaces to new space.
1841// It does not expect to encounter pointers to dead objects.
1842class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
1843 public:
Steve Block44f0eee2011-05-26 01:26:41 +01001844 explicit PointersToNewGenUpdatingVisitor(Heap* heap) : heap_(heap) { }
1845
Steve Block6ded16b2010-05-10 14:33:55 +01001846 void VisitPointer(Object** p) {
Steve Block44f0eee2011-05-26 01:26:41 +01001847 StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
Steve Block6ded16b2010-05-10 14:33:55 +01001848 }
1849
1850 void VisitPointers(Object** start, Object** end) {
Iain Merrick75681382010-08-19 15:07:18 +01001851 for (Object** p = start; p < end; p++) {
Steve Block44f0eee2011-05-26 01:26:41 +01001852 StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
Iain Merrick75681382010-08-19 15:07:18 +01001853 }
Steve Block6ded16b2010-05-10 14:33:55 +01001854 }
1855
1856 void VisitCodeTarget(RelocInfo* rinfo) {
1857 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
1858 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1859 VisitPointer(&target);
1860 rinfo->set_target_address(Code::cast(target)->instruction_start());
1861 }
1862
1863 void VisitDebugTarget(RelocInfo* rinfo) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001864 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
1865 rinfo->IsPatchedReturnSequence()) ||
1866 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
1867 rinfo->IsPatchedDebugBreakSlotSequence()));
Steve Block6ded16b2010-05-10 14:33:55 +01001868 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
1869 VisitPointer(&target);
1870 rinfo->set_call_address(Code::cast(target)->instruction_start());
1871 }
Steve Block44f0eee2011-05-26 01:26:41 +01001872 private:
1873 Heap* heap_;
Steve Block6ded16b2010-05-10 14:33:55 +01001874};
1875
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001876
Steve Block6ded16b2010-05-10 14:33:55 +01001877// Visitor for updating pointers from live objects in old spaces to new space.
1878// It can encounter pointers to dead objects in new space when traversing map
1879// space (see comment for MigrateObject).
1880static void UpdatePointerToNewGen(HeapObject** p) {
1881 if (!(*p)->IsHeapObject()) return;
1882
1883 Address old_addr = (*p)->address();
Steve Block44f0eee2011-05-26 01:26:41 +01001884 ASSERT(HEAP->InFromSpace(*p));
Steve Block6ded16b2010-05-10 14:33:55 +01001885
1886 Address new_addr = Memory::Address_at(old_addr);
1887
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001888 if (new_addr == NULL) {
1889 // We encountered pointer to a dead object. Clear it so we will
1890 // not visit it again during next iteration of dirty regions.
1891 *p = NULL;
1892 } else {
1893 *p = HeapObject::FromAddress(new_addr);
1894 }
Steve Block6ded16b2010-05-10 14:33:55 +01001895}
1896
1897
Steve Block44f0eee2011-05-26 01:26:41 +01001898static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
1899 Object** p) {
Steve Block6ded16b2010-05-10 14:33:55 +01001900 Address old_addr = HeapObject::cast(*p)->address();
1901 Address new_addr = Memory::Address_at(old_addr);
1902 return String::cast(HeapObject::FromAddress(new_addr));
1903}
1904
1905
Steve Block44f0eee2011-05-26 01:26:41 +01001906static bool TryPromoteObject(Heap* heap, HeapObject* object, int object_size) {
Steve Block6ded16b2010-05-10 14:33:55 +01001907 Object* result;
1908
Steve Block44f0eee2011-05-26 01:26:41 +01001909 if (object_size > heap->MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07001910 MaybeObject* maybe_result =
Steve Block44f0eee2011-05-26 01:26:41 +01001911 heap->lo_space()->AllocateRawFixedArray(object_size);
John Reck59135872010-11-02 12:39:01 -07001912 if (maybe_result->ToObject(&result)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001913 HeapObject* target = HeapObject::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01001914 MigrateObject(heap, target->address(), object->address(), object_size,
1915 true);
1916 heap->mark_compact_collector()->tracer()->
Leon Clarkef7060e22010-06-03 12:02:55 +01001917 increment_promoted_objects_size(object_size);
Steve Block6ded16b2010-05-10 14:33:55 +01001918 return true;
1919 }
1920 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001921 OldSpace* target_space = heap->TargetSpace(object);
Steve Block6ded16b2010-05-10 14:33:55 +01001922
Steve Block44f0eee2011-05-26 01:26:41 +01001923 ASSERT(target_space == heap->old_pointer_space() ||
1924 target_space == heap->old_data_space());
John Reck59135872010-11-02 12:39:01 -07001925 MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
1926 if (maybe_result->ToObject(&result)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001927 HeapObject* target = HeapObject::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01001928 MigrateObject(heap,
1929 target->address(),
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001930 object->address(),
1931 object_size,
Steve Block44f0eee2011-05-26 01:26:41 +01001932 target_space == heap->old_pointer_space());
1933 heap->mark_compact_collector()->tracer()->
Leon Clarkef7060e22010-06-03 12:02:55 +01001934 increment_promoted_objects_size(object_size);
Steve Block6ded16b2010-05-10 14:33:55 +01001935 return true;
1936 }
1937 }
1938
1939 return false;
1940}
1941
1942
Steve Block44f0eee2011-05-26 01:26:41 +01001943static void SweepNewSpace(Heap* heap, NewSpace* space) {
1944 heap->CheckNewSpaceExpansionCriteria();
Steve Block6ded16b2010-05-10 14:33:55 +01001945
1946 Address from_bottom = space->bottom();
1947 Address from_top = space->top();
1948
1949 // Flip the semispaces. After flipping, to space is empty, from space has
1950 // live objects.
1951 space->Flip();
1952 space->ResetAllocationInfo();
1953
1954 int size = 0;
1955 int survivors_size = 0;
1956
1957 // First pass: traverse all objects in inactive semispace, remove marks,
1958 // migrate live objects and write forwarding addresses.
1959 for (Address current = from_bottom; current < from_top; current += size) {
1960 HeapObject* object = HeapObject::FromAddress(current);
1961
1962 if (object->IsMarked()) {
1963 object->ClearMark();
Steve Block44f0eee2011-05-26 01:26:41 +01001964 heap->mark_compact_collector()->tracer()->decrement_marked_count();
Steve Block6ded16b2010-05-10 14:33:55 +01001965
1966 size = object->Size();
1967 survivors_size += size;
1968
1969 // Aggressively promote young survivors to the old space.
Steve Block44f0eee2011-05-26 01:26:41 +01001970 if (TryPromoteObject(heap, object, size)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001971 continue;
1972 }
1973
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001974 // Promotion failed. Just migrate object to another semispace.
Steve Block6ded16b2010-05-10 14:33:55 +01001975 // Allocation cannot fail at this point: semispaces are of equal size.
John Reck59135872010-11-02 12:39:01 -07001976 Object* target = space->AllocateRaw(size)->ToObjectUnchecked();
Steve Block6ded16b2010-05-10 14:33:55 +01001977
Steve Block44f0eee2011-05-26 01:26:41 +01001978 MigrateObject(heap,
1979 HeapObject::cast(target)->address(),
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001980 current,
1981 size,
1982 false);
Steve Block6ded16b2010-05-10 14:33:55 +01001983 } else {
Steve Block1e0659c2011-05-24 12:43:12 +01001984 // Process the dead object before we write a NULL into its header.
1985 LiveObjectList::ProcessNonLive(object);
1986
Steve Block6ded16b2010-05-10 14:33:55 +01001987 size = object->Size();
1988 Memory::Address_at(current) = NULL;
1989 }
1990 }
1991
1992 // Second pass: find pointers to new space and update them.
Steve Block44f0eee2011-05-26 01:26:41 +01001993 PointersToNewGenUpdatingVisitor updating_visitor(heap);
Steve Block6ded16b2010-05-10 14:33:55 +01001994
1995 // Update pointers in to space.
Iain Merrick75681382010-08-19 15:07:18 +01001996 Address current = space->bottom();
1997 while (current < space->top()) {
1998 HeapObject* object = HeapObject::FromAddress(current);
1999 current +=
2000 StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(),
2001 object);
Steve Blocka7e24c12009-10-30 11:49:00 +00002002 }
Steve Block6ded16b2010-05-10 14:33:55 +01002003
2004 // Update roots.
Steve Block44f0eee2011-05-26 01:26:41 +01002005 heap->IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
Steve Block1e0659c2011-05-24 12:43:12 +01002006 LiveObjectList::IterateElements(&updating_visitor);
Steve Block6ded16b2010-05-10 14:33:55 +01002007
2008 // Update pointers in old spaces.
Steve Block44f0eee2011-05-26 01:26:41 +01002009 heap->IterateDirtyRegions(heap->old_pointer_space(),
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002010 &Heap::IteratePointersInDirtyRegion,
2011 &UpdatePointerToNewGen,
Steve Block44f0eee2011-05-26 01:26:41 +01002012 heap->WATERMARK_SHOULD_BE_VALID);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002013
Steve Block44f0eee2011-05-26 01:26:41 +01002014 heap->lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
Steve Block6ded16b2010-05-10 14:33:55 +01002015
2016 // Update pointers from cells.
Steve Block44f0eee2011-05-26 01:26:41 +01002017 HeapObjectIterator cell_iterator(heap->cell_space());
Steve Block6ded16b2010-05-10 14:33:55 +01002018 for (HeapObject* cell = cell_iterator.next();
2019 cell != NULL;
2020 cell = cell_iterator.next()) {
2021 if (cell->IsJSGlobalPropertyCell()) {
2022 Address value_address =
2023 reinterpret_cast<Address>(cell) +
2024 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
2025 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
2026 }
2027 }
2028
Ben Murdochf87a2032010-10-22 12:50:53 +01002029 // Update pointer from the global contexts list.
Steve Block44f0eee2011-05-26 01:26:41 +01002030 updating_visitor.VisitPointer(heap->global_contexts_list_address());
Ben Murdochf87a2032010-10-22 12:50:53 +01002031
Steve Block6ded16b2010-05-10 14:33:55 +01002032 // Update pointers from external string table.
Steve Block44f0eee2011-05-26 01:26:41 +01002033 heap->UpdateNewSpaceReferencesInExternalStringTable(
Steve Block6ded16b2010-05-10 14:33:55 +01002034 &UpdateNewSpaceReferenceInExternalStringTableEntry);
2035
2036 // All pointers were updated. Update auxiliary allocation info.
Steve Block44f0eee2011-05-26 01:26:41 +01002037 heap->IncrementYoungSurvivorsCounter(survivors_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002038 space->set_age_mark(space->top());
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002039
2040 // Update JSFunction pointers from the runtime profiler.
Steve Block44f0eee2011-05-26 01:26:41 +01002041 heap->isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
Steve Blocka7e24c12009-10-30 11:49:00 +00002042}
2043
2044
Steve Block44f0eee2011-05-26 01:26:41 +01002045static void SweepSpace(Heap* heap, PagedSpace* space) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002046 PageIterator it(space, PageIterator::PAGES_IN_USE);
Steve Block6ded16b2010-05-10 14:33:55 +01002047
2048 // During sweeping of paged space we are trying to find longest sequences
2049 // of pages without live objects and free them (instead of putting them on
2050 // the free list).
2051
2052 // Page preceding current.
2053 Page* prev = Page::FromAddress(NULL);
2054
2055 // First empty page in a sequence.
2056 Page* first_empty_page = Page::FromAddress(NULL);
2057
2058 // Page preceding first empty page.
2059 Page* prec_first_empty_page = Page::FromAddress(NULL);
2060
2061 // If last used page of space ends with a sequence of dead objects
2062 // we can adjust allocation top instead of puting this free area into
2063 // the free list. Thus during sweeping we keep track of such areas
2064 // and defer their deallocation until the sweeping of the next page
2065 // is done: if one of the next pages contains live objects we have
2066 // to put such area into the free list.
2067 Address last_free_start = NULL;
2068 int last_free_size = 0;
2069
Steve Blocka7e24c12009-10-30 11:49:00 +00002070 while (it.has_next()) {
2071 Page* p = it.next();
2072
2073 bool is_previous_alive = true;
2074 Address free_start = NULL;
2075 HeapObject* object;
2076
2077 for (Address current = p->ObjectAreaStart();
2078 current < p->AllocationTop();
2079 current += object->Size()) {
2080 object = HeapObject::FromAddress(current);
2081 if (object->IsMarked()) {
2082 object->ClearMark();
Steve Block44f0eee2011-05-26 01:26:41 +01002083 heap->mark_compact_collector()->tracer()->decrement_marked_count();
Steve Block6ded16b2010-05-10 14:33:55 +01002084
Steve Blocka7e24c12009-10-30 11:49:00 +00002085 if (!is_previous_alive) { // Transition from free to live.
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002086 space->DeallocateBlock(free_start,
2087 static_cast<int>(current - free_start),
2088 true);
Steve Blocka7e24c12009-10-30 11:49:00 +00002089 is_previous_alive = true;
2090 }
2091 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01002092 heap->mark_compact_collector()->ReportDeleteIfNeeded(object);
Steve Blocka7e24c12009-10-30 11:49:00 +00002093 if (is_previous_alive) { // Transition from live to free.
2094 free_start = current;
2095 is_previous_alive = false;
2096 }
Steve Block1e0659c2011-05-24 12:43:12 +01002097 LiveObjectList::ProcessNonLive(object);
Steve Blocka7e24c12009-10-30 11:49:00 +00002098 }
2099 // The object is now unmarked for the call to Size() at the top of the
2100 // loop.
2101 }
2102
Steve Block6ded16b2010-05-10 14:33:55 +01002103 bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
2104 || (!is_previous_alive && free_start == p->ObjectAreaStart());
2105
2106 if (page_is_empty) {
2107 // This page is empty. Check whether we are in the middle of
2108 // sequence of empty pages and start one if not.
2109 if (!first_empty_page->is_valid()) {
2110 first_empty_page = p;
2111 prec_first_empty_page = prev;
2112 }
2113
2114 if (!is_previous_alive) {
2115 // There are dead objects on this page. Update space accounting stats
2116 // without putting anything into free list.
2117 int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
2118 if (size_in_bytes > 0) {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002119 space->DeallocateBlock(free_start, size_in_bytes, false);
Steve Block6ded16b2010-05-10 14:33:55 +01002120 }
2121 }
2122 } else {
2123 // This page is not empty. Sequence of empty pages ended on the previous
2124 // one.
2125 if (first_empty_page->is_valid()) {
2126 space->FreePages(prec_first_empty_page, prev);
2127 prec_first_empty_page = first_empty_page = Page::FromAddress(NULL);
2128 }
2129
2130 // If there is a free ending area on one of the previous pages we have
2131 // deallocate that area and put it on the free list.
2132 if (last_free_size > 0) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002133 Page::FromAddress(last_free_start)->
2134 SetAllocationWatermark(last_free_start);
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002135 space->DeallocateBlock(last_free_start, last_free_size, true);
Steve Block6ded16b2010-05-10 14:33:55 +01002136 last_free_start = NULL;
2137 last_free_size = 0;
2138 }
2139
2140 // If the last region of this page was not live we remember it.
2141 if (!is_previous_alive) {
2142 ASSERT(last_free_size == 0);
2143 last_free_size = static_cast<int>(p->AllocationTop() - free_start);
2144 last_free_start = free_start;
Steve Blocka7e24c12009-10-30 11:49:00 +00002145 }
2146 }
Steve Block6ded16b2010-05-10 14:33:55 +01002147
2148 prev = p;
2149 }
2150
2151 // We reached end of space. See if we need to adjust allocation top.
2152 Address new_allocation_top = NULL;
2153
2154 if (first_empty_page->is_valid()) {
2155 // Last used pages in space are empty. We can move allocation top backwards
2156 // to the beginning of first empty page.
2157 ASSERT(prev == space->AllocationTopPage());
2158
2159 new_allocation_top = first_empty_page->ObjectAreaStart();
2160 }
2161
2162 if (last_free_size > 0) {
2163 // There was a free ending area on the previous page.
2164 // Deallocate it without putting it into freelist and move allocation
2165 // top to the beginning of this free area.
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002166 space->DeallocateBlock(last_free_start, last_free_size, false);
Steve Block6ded16b2010-05-10 14:33:55 +01002167 new_allocation_top = last_free_start;
2168 }
2169
2170 if (new_allocation_top != NULL) {
2171#ifdef DEBUG
2172 Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
2173 if (!first_empty_page->is_valid()) {
2174 ASSERT(new_allocation_top_page == space->AllocationTopPage());
2175 } else if (last_free_size > 0) {
2176 ASSERT(new_allocation_top_page == prec_first_empty_page);
2177 } else {
2178 ASSERT(new_allocation_top_page == first_empty_page);
2179 }
2180#endif
2181
2182 space->SetTop(new_allocation_top);
Steve Blocka7e24c12009-10-30 11:49:00 +00002183 }
2184}
2185
2186
Steve Blocka7e24c12009-10-30 11:49:00 +00002187void MarkCompactCollector::EncodeForwardingAddresses() {
2188 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
2189 // Objects in the active semispace of the young generation may be
2190 // relocated to the inactive semispace (if not promoted). Set the
2191 // relocation info to the beginning of the inactive semispace.
Steve Block44f0eee2011-05-26 01:26:41 +01002192 heap_->new_space()->MCResetRelocationInfo();
Steve Blocka7e24c12009-10-30 11:49:00 +00002193
2194 // Compute the forwarding pointers in each space.
2195 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
Leon Clarked91b9f72010-01-27 17:25:45 +00002196 ReportDeleteIfNeeded>(
Steve Block44f0eee2011-05-26 01:26:41 +01002197 heap_->old_pointer_space());
Steve Blocka7e24c12009-10-30 11:49:00 +00002198
2199 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
2200 IgnoreNonLiveObject>(
Steve Block44f0eee2011-05-26 01:26:41 +01002201 heap_->old_data_space());
Steve Blocka7e24c12009-10-30 11:49:00 +00002202
2203 EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
Leon Clarked91b9f72010-01-27 17:25:45 +00002204 ReportDeleteIfNeeded>(
Steve Block44f0eee2011-05-26 01:26:41 +01002205 heap_->code_space());
Steve Blocka7e24c12009-10-30 11:49:00 +00002206
2207 EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
2208 IgnoreNonLiveObject>(
Steve Block44f0eee2011-05-26 01:26:41 +01002209 heap_->cell_space());
Steve Blocka7e24c12009-10-30 11:49:00 +00002210
2211
2212 // Compute new space next to last after the old and code spaces have been
2213 // compacted. Objects in new space can be promoted to old or code space.
2214 EncodeForwardingAddressesInNewSpace();
2215
2216 // Compute map space last because computing forwarding addresses
2217 // overwrites non-live objects. Objects in the other spaces rely on
2218 // non-live map pointers to get the sizes of non-live objects.
2219 EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
2220 IgnoreNonLiveObject>(
Steve Block44f0eee2011-05-26 01:26:41 +01002221 heap_->map_space());
Steve Blocka7e24c12009-10-30 11:49:00 +00002222
2223 // Write relocation info to the top page, so we can use it later. This is
2224 // done after promoting objects from the new space so we get the correct
2225 // allocation top.
Steve Block44f0eee2011-05-26 01:26:41 +01002226 heap_->old_pointer_space()->MCWriteRelocationInfoToPage();
2227 heap_->old_data_space()->MCWriteRelocationInfoToPage();
2228 heap_->code_space()->MCWriteRelocationInfoToPage();
2229 heap_->map_space()->MCWriteRelocationInfoToPage();
2230 heap_->cell_space()->MCWriteRelocationInfoToPage();
Steve Blocka7e24c12009-10-30 11:49:00 +00002231}
2232
2233
Leon Clarkee46be812010-01-19 14:06:41 +00002234class MapIterator : public HeapObjectIterator {
2235 public:
Steve Block44f0eee2011-05-26 01:26:41 +01002236 MapIterator() : HeapObjectIterator(HEAP->map_space(), &SizeCallback) { }
Leon Clarkee46be812010-01-19 14:06:41 +00002237
2238 explicit MapIterator(Address start)
Steve Block44f0eee2011-05-26 01:26:41 +01002239 : HeapObjectIterator(HEAP->map_space(), start, &SizeCallback) { }
Leon Clarkee46be812010-01-19 14:06:41 +00002240
2241 private:
2242 static int SizeCallback(HeapObject* unused) {
2243 USE(unused);
2244 return Map::kSize;
2245 }
2246};
2247
2248
2249class MapCompact {
2250 public:
Steve Block44f0eee2011-05-26 01:26:41 +01002251 explicit MapCompact(Heap* heap, int live_maps)
2252 : heap_(heap),
2253 live_maps_(live_maps),
2254 to_evacuate_start_(heap->map_space()->TopAfterCompaction(live_maps)),
Leon Clarkee46be812010-01-19 14:06:41 +00002255 map_to_evacuate_it_(to_evacuate_start_),
2256 first_map_to_evacuate_(
2257 reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
2258 }
2259
2260 void CompactMaps() {
2261 // As we know the number of maps to evacuate beforehand,
2262 // we stop then there is no more vacant maps.
2263 for (Map* next_vacant_map = NextVacantMap();
2264 next_vacant_map;
2265 next_vacant_map = NextVacantMap()) {
2266 EvacuateMap(next_vacant_map, NextMapToEvacuate());
2267 }
2268
2269#ifdef DEBUG
2270 CheckNoMapsToEvacuate();
2271#endif
2272 }
2273
2274 void UpdateMapPointersInRoots() {
Steve Block44f0eee2011-05-26 01:26:41 +01002275 MapUpdatingVisitor map_updating_visitor;
2276 heap_->IterateRoots(&map_updating_visitor, VISIT_ONLY_STRONG);
2277 heap_->isolate()->global_handles()->IterateWeakRoots(&map_updating_visitor);
2278 LiveObjectList::IterateElements(&map_updating_visitor);
Leon Clarkee46be812010-01-19 14:06:41 +00002279 }
2280
Leon Clarkee46be812010-01-19 14:06:41 +00002281 void UpdateMapPointersInPagedSpace(PagedSpace* space) {
Steve Block44f0eee2011-05-26 01:26:41 +01002282 ASSERT(space != heap_->map_space());
Leon Clarkee46be812010-01-19 14:06:41 +00002283
2284 PageIterator it(space, PageIterator::PAGES_IN_USE);
2285 while (it.has_next()) {
2286 Page* p = it.next();
Steve Block44f0eee2011-05-26 01:26:41 +01002287 UpdateMapPointersInRange(heap_, p->ObjectAreaStart(), p->AllocationTop());
Leon Clarkee46be812010-01-19 14:06:41 +00002288 }
2289 }
2290
2291 void UpdateMapPointersInNewSpace() {
Steve Block44f0eee2011-05-26 01:26:41 +01002292 NewSpace* space = heap_->new_space();
2293 UpdateMapPointersInRange(heap_, space->bottom(), space->top());
Leon Clarkee46be812010-01-19 14:06:41 +00002294 }
2295
2296 void UpdateMapPointersInLargeObjectSpace() {
Steve Block44f0eee2011-05-26 01:26:41 +01002297 LargeObjectIterator it(heap_->lo_space());
Leon Clarked91b9f72010-01-27 17:25:45 +00002298 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
Steve Block44f0eee2011-05-26 01:26:41 +01002299 UpdateMapPointersInObject(heap_, obj);
Leon Clarkee46be812010-01-19 14:06:41 +00002300 }
2301
2302 void Finish() {
Steve Block44f0eee2011-05-26 01:26:41 +01002303 heap_->map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
Leon Clarkee46be812010-01-19 14:06:41 +00002304 }
2305
2306 private:
Steve Block44f0eee2011-05-26 01:26:41 +01002307 Heap* heap_;
Leon Clarkee46be812010-01-19 14:06:41 +00002308 int live_maps_;
2309 Address to_evacuate_start_;
2310 MapIterator vacant_map_it_;
2311 MapIterator map_to_evacuate_it_;
2312 Map* first_map_to_evacuate_;
2313
2314 // Helper class for updating map pointers in HeapObjects.
2315 class MapUpdatingVisitor: public ObjectVisitor {
2316 public:
Steve Block44f0eee2011-05-26 01:26:41 +01002317 MapUpdatingVisitor() {}
2318
Leon Clarkee46be812010-01-19 14:06:41 +00002319 void VisitPointer(Object** p) {
2320 UpdateMapPointer(p);
2321 }
2322
2323 void VisitPointers(Object** start, Object** end) {
2324 for (Object** p = start; p < end; p++) UpdateMapPointer(p);
2325 }
2326
2327 private:
2328 void UpdateMapPointer(Object** p) {
2329 if (!(*p)->IsHeapObject()) return;
2330 HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
2331
2332 // Moved maps are tagged with overflowed map word. They are the only
2333 // objects those map word is overflowed as marking is already complete.
2334 MapWord map_word = old_map->map_word();
2335 if (!map_word.IsOverflowed()) return;
2336
2337 *p = GetForwardedMap(map_word);
2338 }
2339 };
2340
Leon Clarkee46be812010-01-19 14:06:41 +00002341 static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
2342 while (true) {
Leon Clarkee46be812010-01-19 14:06:41 +00002343 HeapObject* next = it->next();
Leon Clarked91b9f72010-01-27 17:25:45 +00002344 ASSERT(next != NULL);
Leon Clarkee46be812010-01-19 14:06:41 +00002345 if (next == last)
2346 return NULL;
2347 ASSERT(!next->IsOverflowed());
2348 ASSERT(!next->IsMarked());
2349 ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
2350 if (next->IsMap() == live)
2351 return reinterpret_cast<Map*>(next);
2352 }
2353 }
2354
2355 Map* NextVacantMap() {
2356 Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
2357 ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
2358 return map;
2359 }
2360
2361 Map* NextMapToEvacuate() {
2362 Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
2363 ASSERT(map != NULL);
2364 ASSERT(map->IsMap());
2365 return map;
2366 }
2367
2368 static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
2369 ASSERT(FreeListNode::IsFreeListNode(vacant_map));
2370 ASSERT(map_to_evacuate->IsMap());
2371
Steve Block6ded16b2010-05-10 14:33:55 +01002372 ASSERT(Map::kSize % 4 == 0);
2373
Steve Block44f0eee2011-05-26 01:26:41 +01002374 map_to_evacuate->heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(
2375 vacant_map->address(), map_to_evacuate->address(), Map::kSize);
Steve Block6ded16b2010-05-10 14:33:55 +01002376
Leon Clarkee46be812010-01-19 14:06:41 +00002377 ASSERT(vacant_map->IsMap()); // Due to memcpy above.
2378
2379 MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
2380 forwarding_map_word.SetOverflow();
2381 map_to_evacuate->set_map_word(forwarding_map_word);
2382
2383 ASSERT(map_to_evacuate->map_word().IsOverflowed());
2384 ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
2385 }
2386
2387 static Map* GetForwardedMap(MapWord map_word) {
2388 ASSERT(map_word.IsOverflowed());
2389 map_word.ClearOverflow();
2390 Map* new_map = map_word.ToMap();
2391 ASSERT_MAP_ALIGNED(new_map->address());
2392 return new_map;
2393 }
2394
Steve Block44f0eee2011-05-26 01:26:41 +01002395 static int UpdateMapPointersInObject(Heap* heap, HeapObject* obj) {
Leon Clarkee46be812010-01-19 14:06:41 +00002396 ASSERT(!obj->IsMarked());
2397 Map* map = obj->map();
Steve Block44f0eee2011-05-26 01:26:41 +01002398 ASSERT(heap->map_space()->Contains(map));
Leon Clarkee46be812010-01-19 14:06:41 +00002399 MapWord map_word = map->map_word();
2400 ASSERT(!map_word.IsMarked());
2401 if (map_word.IsOverflowed()) {
2402 Map* new_map = GetForwardedMap(map_word);
Steve Block44f0eee2011-05-26 01:26:41 +01002403 ASSERT(heap->map_space()->Contains(new_map));
Leon Clarkee46be812010-01-19 14:06:41 +00002404 obj->set_map(new_map);
2405
2406#ifdef DEBUG
2407 if (FLAG_gc_verbose) {
Ben Murdochf87a2032010-10-22 12:50:53 +01002408 PrintF("update %p : %p -> %p\n",
2409 obj->address(),
2410 reinterpret_cast<void*>(map),
2411 reinterpret_cast<void*>(new_map));
Leon Clarkee46be812010-01-19 14:06:41 +00002412 }
2413#endif
2414 }
2415
2416 int size = obj->SizeFromMap(map);
Steve Block44f0eee2011-05-26 01:26:41 +01002417 MapUpdatingVisitor map_updating_visitor;
2418 obj->IterateBody(map->instance_type(), size, &map_updating_visitor);
Leon Clarkee46be812010-01-19 14:06:41 +00002419 return size;
2420 }
2421
Steve Block44f0eee2011-05-26 01:26:41 +01002422 static void UpdateMapPointersInRange(Heap* heap, Address start, Address end) {
Leon Clarkee46be812010-01-19 14:06:41 +00002423 HeapObject* object;
2424 int size;
2425 for (Address current = start; current < end; current += size) {
2426 object = HeapObject::FromAddress(current);
Steve Block44f0eee2011-05-26 01:26:41 +01002427 size = UpdateMapPointersInObject(heap, object);
Leon Clarkee46be812010-01-19 14:06:41 +00002428 ASSERT(size > 0);
2429 }
2430 }
2431
2432#ifdef DEBUG
2433 void CheckNoMapsToEvacuate() {
2434 if (!FLAG_enable_slow_asserts)
2435 return;
2436
Leon Clarked91b9f72010-01-27 17:25:45 +00002437 for (HeapObject* obj = map_to_evacuate_it_.next();
2438 obj != NULL; obj = map_to_evacuate_it_.next())
2439 ASSERT(FreeListNode::IsFreeListNode(obj));
Leon Clarkee46be812010-01-19 14:06:41 +00002440 }
2441#endif
2442};
2443
Leon Clarkee46be812010-01-19 14:06:41 +00002444
Steve Blocka7e24c12009-10-30 11:49:00 +00002445void MarkCompactCollector::SweepSpaces() {
Leon Clarkef7060e22010-06-03 12:02:55 +01002446 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
2447
Steve Blocka7e24c12009-10-30 11:49:00 +00002448 ASSERT(state_ == SWEEP_SPACES);
2449 ASSERT(!IsCompacting());
2450 // Noncompacting collections simply sweep the spaces to clear the mark
2451 // bits and free the nonlive blocks (for old and map spaces). We sweep
2452 // the map space last because freeing non-live maps overwrites them and
2453 // the other spaces rely on possibly non-live maps to get the sizes for
2454 // non-live objects.
Steve Block44f0eee2011-05-26 01:26:41 +01002455 SweepSpace(heap_, heap_->old_pointer_space());
2456 SweepSpace(heap_, heap_->old_data_space());
2457 SweepSpace(heap_, heap_->code_space());
2458 SweepSpace(heap_, heap_->cell_space());
Iain Merrick75681382010-08-19 15:07:18 +01002459 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
Steve Block44f0eee2011-05-26 01:26:41 +01002460 SweepNewSpace(heap_, heap_->new_space());
Iain Merrick75681382010-08-19 15:07:18 +01002461 }
Steve Block44f0eee2011-05-26 01:26:41 +01002462 SweepSpace(heap_, heap_->map_space());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002463
Steve Block44f0eee2011-05-26 01:26:41 +01002464 heap_->IterateDirtyRegions(heap_->map_space(),
2465 &heap_->IteratePointersInDirtyMapsRegion,
2466 &UpdatePointerToNewGen,
2467 heap_->WATERMARK_SHOULD_BE_VALID);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002468
Steve Block44f0eee2011-05-26 01:26:41 +01002469 intptr_t live_maps_size = heap_->map_space()->Size();
Ben Murdochf87a2032010-10-22 12:50:53 +01002470 int live_maps = static_cast<int>(live_maps_size / Map::kSize);
Steve Block6ded16b2010-05-10 14:33:55 +01002471 ASSERT(live_map_objects_size_ == live_maps_size);
Leon Clarkee46be812010-01-19 14:06:41 +00002472
Steve Block44f0eee2011-05-26 01:26:41 +01002473 if (heap_->map_space()->NeedsCompaction(live_maps)) {
2474 MapCompact map_compact(heap_, live_maps);
Leon Clarkee46be812010-01-19 14:06:41 +00002475
2476 map_compact.CompactMaps();
2477 map_compact.UpdateMapPointersInRoots();
2478
Leon Clarkee46be812010-01-19 14:06:41 +00002479 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00002480 for (PagedSpace* space = spaces.next();
2481 space != NULL; space = spaces.next()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002482 if (space == heap_->map_space()) continue;
Leon Clarkee46be812010-01-19 14:06:41 +00002483 map_compact.UpdateMapPointersInPagedSpace(space);
2484 }
2485 map_compact.UpdateMapPointersInNewSpace();
2486 map_compact.UpdateMapPointersInLargeObjectSpace();
2487
2488 map_compact.Finish();
2489 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002490}
2491
2492
2493// Iterate the live objects in a range of addresses (eg, a page or a
2494// semispace). The live regions of the range have been linked into a list.
2495// The first live region is [first_live_start, first_live_end), and the last
2496// address in the range is top. The callback function is used to get the
2497// size of each live object.
2498int MarkCompactCollector::IterateLiveObjectsInRange(
2499 Address start,
2500 Address end,
Steve Block44f0eee2011-05-26 01:26:41 +01002501 LiveObjectCallback size_func) {
Steve Block6ded16b2010-05-10 14:33:55 +01002502 int live_objects_size = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00002503 Address current = start;
2504 while (current < end) {
2505 uint32_t encoded_map = Memory::uint32_at(current);
2506 if (encoded_map == kSingleFreeEncoding) {
2507 current += kPointerSize;
2508 } else if (encoded_map == kMultiFreeEncoding) {
2509 current += Memory::int_at(current + kIntSize);
2510 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01002511 int size = (this->*size_func)(HeapObject::FromAddress(current));
Steve Block6ded16b2010-05-10 14:33:55 +01002512 current += size;
2513 live_objects_size += size;
Steve Blocka7e24c12009-10-30 11:49:00 +00002514 }
2515 }
Steve Block6ded16b2010-05-10 14:33:55 +01002516 return live_objects_size;
Steve Blocka7e24c12009-10-30 11:49:00 +00002517}
2518
2519
Steve Block44f0eee2011-05-26 01:26:41 +01002520int MarkCompactCollector::IterateLiveObjects(
2521 NewSpace* space, LiveObjectCallback size_f) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002522 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2523 return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
2524}
2525
2526
Steve Block44f0eee2011-05-26 01:26:41 +01002527int MarkCompactCollector::IterateLiveObjects(
2528 PagedSpace* space, LiveObjectCallback size_f) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002529 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2530 int total = 0;
2531 PageIterator it(space, PageIterator::PAGES_IN_USE);
2532 while (it.has_next()) {
2533 Page* p = it.next();
2534 total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
2535 p->AllocationTop(),
2536 size_f);
2537 }
2538 return total;
2539}
2540
2541
2542// -------------------------------------------------------------------------
2543// Phase 3: Update pointers
2544
2545// Helper class for updating pointers in HeapObjects.
2546class UpdatingVisitor: public ObjectVisitor {
2547 public:
Steve Block44f0eee2011-05-26 01:26:41 +01002548 explicit UpdatingVisitor(Heap* heap) : heap_(heap) {}
2549
Steve Blocka7e24c12009-10-30 11:49:00 +00002550 void VisitPointer(Object** p) {
2551 UpdatePointer(p);
2552 }
2553
2554 void VisitPointers(Object** start, Object** end) {
2555 // Mark all HeapObject pointers in [start, end)
2556 for (Object** p = start; p < end; p++) UpdatePointer(p);
2557 }
2558
2559 void VisitCodeTarget(RelocInfo* rinfo) {
2560 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2561 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2562 VisitPointer(&target);
2563 rinfo->set_target_address(
2564 reinterpret_cast<Code*>(target)->instruction_start());
2565 }
2566
Steve Block3ce2e202009-11-05 08:53:23 +00002567 void VisitDebugTarget(RelocInfo* rinfo) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002568 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2569 rinfo->IsPatchedReturnSequence()) ||
2570 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2571 rinfo->IsPatchedDebugBreakSlotSequence()));
Steve Block3ce2e202009-11-05 08:53:23 +00002572 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2573 VisitPointer(&target);
2574 rinfo->set_call_address(
2575 reinterpret_cast<Code*>(target)->instruction_start());
2576 }
2577
Steve Blocka7e24c12009-10-30 11:49:00 +00002578 private:
2579 void UpdatePointer(Object** p) {
2580 if (!(*p)->IsHeapObject()) return;
2581
2582 HeapObject* obj = HeapObject::cast(*p);
2583 Address old_addr = obj->address();
2584 Address new_addr;
Steve Block44f0eee2011-05-26 01:26:41 +01002585 ASSERT(!heap_->InFromSpace(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00002586
Steve Block44f0eee2011-05-26 01:26:41 +01002587 if (heap_->new_space()->Contains(obj)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002588 Address forwarding_pointer_addr =
Steve Block44f0eee2011-05-26 01:26:41 +01002589 heap_->new_space()->FromSpaceLow() +
2590 heap_->new_space()->ToSpaceOffsetForAddress(old_addr);
Steve Blocka7e24c12009-10-30 11:49:00 +00002591 new_addr = Memory::Address_at(forwarding_pointer_addr);
2592
2593#ifdef DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +01002594 ASSERT(heap_->old_pointer_space()->Contains(new_addr) ||
2595 heap_->old_data_space()->Contains(new_addr) ||
2596 heap_->new_space()->FromSpaceContains(new_addr) ||
2597 heap_->lo_space()->Contains(HeapObject::FromAddress(new_addr)));
Steve Blocka7e24c12009-10-30 11:49:00 +00002598
Steve Block44f0eee2011-05-26 01:26:41 +01002599 if (heap_->new_space()->FromSpaceContains(new_addr)) {
2600 ASSERT(heap_->new_space()->FromSpaceOffsetForAddress(new_addr) <=
2601 heap_->new_space()->ToSpaceOffsetForAddress(old_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00002602 }
2603#endif
2604
Steve Block44f0eee2011-05-26 01:26:41 +01002605 } else if (heap_->lo_space()->Contains(obj)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002606 // Don't move objects in the large object space.
2607 return;
2608
2609 } else {
2610#ifdef DEBUG
2611 PagedSpaces spaces;
2612 PagedSpace* original_space = spaces.next();
2613 while (original_space != NULL) {
2614 if (original_space->Contains(obj)) break;
2615 original_space = spaces.next();
2616 }
2617 ASSERT(original_space != NULL);
2618#endif
2619 new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
2620 ASSERT(original_space->Contains(new_addr));
2621 ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
2622 original_space->MCSpaceOffsetForAddress(old_addr));
2623 }
2624
2625 *p = HeapObject::FromAddress(new_addr);
2626
2627#ifdef DEBUG
2628 if (FLAG_gc_verbose) {
2629 PrintF("update %p : %p -> %p\n",
2630 reinterpret_cast<Address>(p), old_addr, new_addr);
2631 }
2632#endif
2633 }
Steve Block44f0eee2011-05-26 01:26:41 +01002634
2635 Heap* heap_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002636};
2637
2638
2639void MarkCompactCollector::UpdatePointers() {
2640#ifdef DEBUG
2641 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
2642 state_ = UPDATE_POINTERS;
2643#endif
Steve Block44f0eee2011-05-26 01:26:41 +01002644 UpdatingVisitor updating_visitor(heap_);
2645 heap_->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
2646 &updating_visitor);
2647 heap_->IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
2648 heap_->isolate()->global_handles()->IterateWeakRoots(&updating_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00002649
Ben Murdochf87a2032010-10-22 12:50:53 +01002650 // Update the pointer to the head of the weak list of global contexts.
Steve Block44f0eee2011-05-26 01:26:41 +01002651 updating_visitor.VisitPointer(&heap_->global_contexts_list_);
Ben Murdochf87a2032010-10-22 12:50:53 +01002652
Steve Block1e0659c2011-05-24 12:43:12 +01002653 LiveObjectList::IterateElements(&updating_visitor);
2654
Steve Block44f0eee2011-05-26 01:26:41 +01002655 int live_maps_size = IterateLiveObjects(
2656 heap_->map_space(), &MarkCompactCollector::UpdatePointersInOldObject);
2657 int live_pointer_olds_size = IterateLiveObjects(
2658 heap_->old_pointer_space(),
2659 &MarkCompactCollector::UpdatePointersInOldObject);
2660 int live_data_olds_size = IterateLiveObjects(
2661 heap_->old_data_space(),
2662 &MarkCompactCollector::UpdatePointersInOldObject);
2663 int live_codes_size = IterateLiveObjects(
2664 heap_->code_space(), &MarkCompactCollector::UpdatePointersInOldObject);
2665 int live_cells_size = IterateLiveObjects(
2666 heap_->cell_space(), &MarkCompactCollector::UpdatePointersInOldObject);
2667 int live_news_size = IterateLiveObjects(
2668 heap_->new_space(), &MarkCompactCollector::UpdatePointersInNewObject);
Steve Blocka7e24c12009-10-30 11:49:00 +00002669
2670 // Large objects do not move, the map word can be updated directly.
Steve Block44f0eee2011-05-26 01:26:41 +01002671 LargeObjectIterator it(heap_->lo_space());
Ben Murdochb0fe1622011-05-05 13:52:32 +01002672 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
Leon Clarked91b9f72010-01-27 17:25:45 +00002673 UpdatePointersInNewObject(obj);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002674 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002675
Steve Block6ded16b2010-05-10 14:33:55 +01002676 USE(live_maps_size);
2677 USE(live_pointer_olds_size);
2678 USE(live_data_olds_size);
2679 USE(live_codes_size);
2680 USE(live_cells_size);
2681 USE(live_news_size);
2682 ASSERT(live_maps_size == live_map_objects_size_);
2683 ASSERT(live_data_olds_size == live_old_data_objects_size_);
2684 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
2685 ASSERT(live_codes_size == live_code_objects_size_);
2686 ASSERT(live_cells_size == live_cell_objects_size_);
2687 ASSERT(live_news_size == live_young_objects_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002688}
2689
2690
2691int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
2692 // Keep old map pointers
2693 Map* old_map = obj->map();
2694 ASSERT(old_map->IsHeapObject());
2695
2696 Address forwarded = GetForwardingAddressInOldSpace(old_map);
2697
Steve Block44f0eee2011-05-26 01:26:41 +01002698 ASSERT(heap_->map_space()->Contains(old_map));
2699 ASSERT(heap_->map_space()->Contains(forwarded));
Steve Blocka7e24c12009-10-30 11:49:00 +00002700#ifdef DEBUG
2701 if (FLAG_gc_verbose) {
2702 PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
2703 forwarded);
2704 }
2705#endif
2706 // Update the map pointer.
2707 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
2708
2709 // We have to compute the object size relying on the old map because
2710 // map objects are not relocated yet.
2711 int obj_size = obj->SizeFromMap(old_map);
2712
2713 // Update pointers in the object body.
Steve Block44f0eee2011-05-26 01:26:41 +01002714 UpdatingVisitor updating_visitor(heap_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002715 obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
2716 return obj_size;
2717}
2718
2719
2720int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
2721 // Decode the map pointer.
2722 MapWord encoding = obj->map_word();
Steve Block44f0eee2011-05-26 01:26:41 +01002723 Address map_addr = encoding.DecodeMapAddress(heap_->map_space());
2724 ASSERT(heap_->map_space()->Contains(HeapObject::FromAddress(map_addr)));
Steve Blocka7e24c12009-10-30 11:49:00 +00002725
2726 // At this point, the first word of map_addr is also encoded, cannot
2727 // cast it to Map* using Map::cast.
2728 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
2729 int obj_size = obj->SizeFromMap(map);
2730 InstanceType type = map->instance_type();
2731
2732 // Update map pointer.
2733 Address new_map_addr = GetForwardingAddressInOldSpace(map);
2734 int offset = encoding.DecodeOffset();
2735 obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
2736
2737#ifdef DEBUG
2738 if (FLAG_gc_verbose) {
2739 PrintF("update %p : %p -> %p\n", obj->address(),
2740 map_addr, new_map_addr);
2741 }
2742#endif
2743
2744 // Update pointers in the object body.
Steve Block44f0eee2011-05-26 01:26:41 +01002745 UpdatingVisitor updating_visitor(heap_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002746 obj->IterateBody(type, obj_size, &updating_visitor);
2747 return obj_size;
2748}
2749
2750
2751Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
2752 // Object should either in old or map space.
2753 MapWord encoding = obj->map_word();
2754
2755 // Offset to the first live object's forwarding address.
2756 int offset = encoding.DecodeOffset();
2757 Address obj_addr = obj->address();
2758
2759 // Find the first live object's forwarding address.
2760 Page* p = Page::FromAddress(obj_addr);
2761 Address first_forwarded = p->mc_first_forwarded;
2762
2763 // Page start address of forwarded address.
2764 Page* forwarded_page = Page::FromAddress(first_forwarded);
2765 int forwarded_offset = forwarded_page->Offset(first_forwarded);
2766
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002767 // Find end of allocation in the page of first_forwarded.
2768 int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
Steve Blocka7e24c12009-10-30 11:49:00 +00002769
2770 // Check if current object's forward pointer is in the same page
2771 // as the first live object's forwarding pointer
2772 if (forwarded_offset + offset < mc_top_offset) {
2773 // In the same page.
2774 return first_forwarded + offset;
2775 }
2776
2777 // Must be in the next page, NOTE: this may cross chunks.
2778 Page* next_page = forwarded_page->next_page();
2779 ASSERT(next_page->is_valid());
2780
2781 offset -= (mc_top_offset - forwarded_offset);
2782 offset += Page::kObjectStartOffset;
2783
2784 ASSERT_PAGE_OFFSET(offset);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002785 ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
Steve Blocka7e24c12009-10-30 11:49:00 +00002786
2787 return next_page->OffsetToAddress(offset);
2788}
2789
2790
2791// -------------------------------------------------------------------------
2792// Phase 4: Relocate objects
2793
2794void MarkCompactCollector::RelocateObjects() {
2795#ifdef DEBUG
2796 ASSERT(state_ == UPDATE_POINTERS);
2797 state_ = RELOCATE_OBJECTS;
2798#endif
2799 // Relocates objects, always relocate map objects first. Relocating
2800 // objects in other space relies on map objects to get object size.
Steve Block44f0eee2011-05-26 01:26:41 +01002801 int live_maps_size = IterateLiveObjects(
2802 heap_->map_space(), &MarkCompactCollector::RelocateMapObject);
2803 int live_pointer_olds_size = IterateLiveObjects(
2804 heap_->old_pointer_space(),
2805 &MarkCompactCollector::RelocateOldPointerObject);
2806 int live_data_olds_size = IterateLiveObjects(
2807 heap_->old_data_space(), &MarkCompactCollector::RelocateOldDataObject);
2808 int live_codes_size = IterateLiveObjects(
2809 heap_->code_space(), &MarkCompactCollector::RelocateCodeObject);
2810 int live_cells_size = IterateLiveObjects(
2811 heap_->cell_space(), &MarkCompactCollector::RelocateCellObject);
2812 int live_news_size = IterateLiveObjects(
2813 heap_->new_space(), &MarkCompactCollector::RelocateNewObject);
Steve Blocka7e24c12009-10-30 11:49:00 +00002814
Steve Block6ded16b2010-05-10 14:33:55 +01002815 USE(live_maps_size);
2816 USE(live_pointer_olds_size);
2817 USE(live_data_olds_size);
2818 USE(live_codes_size);
2819 USE(live_cells_size);
2820 USE(live_news_size);
2821 ASSERT(live_maps_size == live_map_objects_size_);
2822 ASSERT(live_data_olds_size == live_old_data_objects_size_);
2823 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
2824 ASSERT(live_codes_size == live_code_objects_size_);
2825 ASSERT(live_cells_size == live_cell_objects_size_);
2826 ASSERT(live_news_size == live_young_objects_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00002827
2828 // Flip from and to spaces
Steve Block44f0eee2011-05-26 01:26:41 +01002829 heap_->new_space()->Flip();
Steve Blocka7e24c12009-10-30 11:49:00 +00002830
Steve Block44f0eee2011-05-26 01:26:41 +01002831 heap_->new_space()->MCCommitRelocationInfo();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002832
Steve Blocka7e24c12009-10-30 11:49:00 +00002833 // Set age_mark to bottom in to space
Steve Block44f0eee2011-05-26 01:26:41 +01002834 Address mark = heap_->new_space()->bottom();
2835 heap_->new_space()->set_age_mark(mark);
Steve Blocka7e24c12009-10-30 11:49:00 +00002836
Steve Blocka7e24c12009-10-30 11:49:00 +00002837 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00002838 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
2839 space->MCCommitRelocationInfo();
Steve Block6ded16b2010-05-10 14:33:55 +01002840
Steve Block44f0eee2011-05-26 01:26:41 +01002841 heap_->CheckNewSpaceExpansionCriteria();
2842 heap_->IncrementYoungSurvivorsCounter(live_news_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002843}
2844
2845
2846int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
2847 // Recover map pointer.
2848 MapWord encoding = obj->map_word();
Steve Block44f0eee2011-05-26 01:26:41 +01002849 Address map_addr = encoding.DecodeMapAddress(heap_->map_space());
2850 ASSERT(heap_->map_space()->Contains(HeapObject::FromAddress(map_addr)));
Steve Blocka7e24c12009-10-30 11:49:00 +00002851
2852 // Get forwarding address before resetting map pointer
2853 Address new_addr = GetForwardingAddressInOldSpace(obj);
2854
2855 // Reset map pointer. The meta map object may not be copied yet so
2856 // Map::cast does not yet work.
2857 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
2858
2859 Address old_addr = obj->address();
2860
2861 if (new_addr != old_addr) {
Steve Block6ded16b2010-05-10 14:33:55 +01002862 // Move contents.
Steve Block44f0eee2011-05-26 01:26:41 +01002863 heap_->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2864 old_addr,
2865 Map::kSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00002866 }
2867
2868#ifdef DEBUG
2869 if (FLAG_gc_verbose) {
2870 PrintF("relocate %p -> %p\n", old_addr, new_addr);
2871 }
2872#endif
2873
2874 return Map::kSize;
2875}
2876
2877
2878static inline int RestoreMap(HeapObject* obj,
2879 PagedSpace* space,
2880 Address new_addr,
2881 Address map_addr) {
2882 // This must be a non-map object, and the function relies on the
2883 // assumption that the Map space is compacted before the other paged
2884 // spaces (see RelocateObjects).
2885
2886 // Reset map pointer.
2887 obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
2888
2889 int obj_size = obj->Size();
2890 ASSERT_OBJECT_SIZE(obj_size);
2891
2892 ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
2893 space->MCSpaceOffsetForAddress(obj->address()));
2894
2895#ifdef DEBUG
2896 if (FLAG_gc_verbose) {
2897 PrintF("relocate %p -> %p\n", obj->address(), new_addr);
2898 }
2899#endif
2900
2901 return obj_size;
2902}
2903
2904
2905int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
2906 PagedSpace* space) {
2907 // Recover map pointer.
2908 MapWord encoding = obj->map_word();
Steve Block44f0eee2011-05-26 01:26:41 +01002909 Address map_addr = encoding.DecodeMapAddress(heap_->map_space());
2910 ASSERT(heap_->map_space()->Contains(map_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00002911
2912 // Get forwarding address before resetting map pointer.
2913 Address new_addr = GetForwardingAddressInOldSpace(obj);
2914
2915 // Reset the map pointer.
2916 int obj_size = RestoreMap(obj, space, new_addr, map_addr);
2917
2918 Address old_addr = obj->address();
2919
2920 if (new_addr != old_addr) {
Steve Block6ded16b2010-05-10 14:33:55 +01002921 // Move contents.
Steve Block44f0eee2011-05-26 01:26:41 +01002922 if (space == heap_->old_data_space()) {
2923 heap_->MoveBlock(new_addr, old_addr, obj_size);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002924 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01002925 heap_->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
2926 old_addr,
2927 obj_size);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002928 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002929 }
2930
2931 ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
2932
Leon Clarked91b9f72010-01-27 17:25:45 +00002933 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002934 if (copied_to->IsSharedFunctionInfo()) {
Steve Block44f0eee2011-05-26 01:26:41 +01002935 PROFILE(heap_->isolate(),
2936 SharedFunctionInfoMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002937 }
Steve Block44f0eee2011-05-26 01:26:41 +01002938 HEAP_PROFILE(heap_, ObjectMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00002939
Steve Blocka7e24c12009-10-30 11:49:00 +00002940 return obj_size;
2941}
2942
2943
2944int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
Steve Block44f0eee2011-05-26 01:26:41 +01002945 return RelocateOldNonCodeObject(obj, heap_->old_pointer_space());
Steve Blocka7e24c12009-10-30 11:49:00 +00002946}
2947
2948
2949int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
Steve Block44f0eee2011-05-26 01:26:41 +01002950 return RelocateOldNonCodeObject(obj, heap_->old_data_space());
Steve Blocka7e24c12009-10-30 11:49:00 +00002951}
2952
2953
2954int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
Steve Block44f0eee2011-05-26 01:26:41 +01002955 return RelocateOldNonCodeObject(obj, heap_->cell_space());
Steve Blocka7e24c12009-10-30 11:49:00 +00002956}
2957
2958
2959int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
2960 // Recover map pointer.
2961 MapWord encoding = obj->map_word();
Steve Block44f0eee2011-05-26 01:26:41 +01002962 Address map_addr = encoding.DecodeMapAddress(heap_->map_space());
2963 ASSERT(heap_->map_space()->Contains(HeapObject::FromAddress(map_addr)));
Steve Blocka7e24c12009-10-30 11:49:00 +00002964
2965 // Get forwarding address before resetting map pointer
2966 Address new_addr = GetForwardingAddressInOldSpace(obj);
2967
2968 // Reset the map pointer.
Steve Block44f0eee2011-05-26 01:26:41 +01002969 int obj_size = RestoreMap(obj, heap_->code_space(), new_addr, map_addr);
Steve Blocka7e24c12009-10-30 11:49:00 +00002970
2971 Address old_addr = obj->address();
2972
2973 if (new_addr != old_addr) {
Steve Block6ded16b2010-05-10 14:33:55 +01002974 // Move contents.
Steve Block44f0eee2011-05-26 01:26:41 +01002975 heap_->MoveBlock(new_addr, old_addr, obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00002976 }
2977
2978 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
2979 if (copied_to->IsCode()) {
2980 // May also update inline cache target.
2981 Code::cast(copied_to)->Relocate(new_addr - old_addr);
2982 // Notify the logger that compiled code has moved.
Steve Block44f0eee2011-05-26 01:26:41 +01002983 PROFILE(heap_->isolate(), CodeMoveEvent(old_addr, new_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00002984 }
Steve Block44f0eee2011-05-26 01:26:41 +01002985 HEAP_PROFILE(heap_, ObjectMoveEvent(old_addr, new_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00002986
2987 return obj_size;
2988}
2989
2990
2991int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
2992 int obj_size = obj->Size();
2993
2994 // Get forwarding address
2995 Address old_addr = obj->address();
Steve Block44f0eee2011-05-26 01:26:41 +01002996 int offset = heap_->new_space()->ToSpaceOffsetForAddress(old_addr);
Steve Blocka7e24c12009-10-30 11:49:00 +00002997
2998 Address new_addr =
Steve Block44f0eee2011-05-26 01:26:41 +01002999 Memory::Address_at(heap_->new_space()->FromSpaceLow() + offset);
Steve Blocka7e24c12009-10-30 11:49:00 +00003000
3001#ifdef DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +01003002 if (heap_->new_space()->FromSpaceContains(new_addr)) {
3003 ASSERT(heap_->new_space()->FromSpaceOffsetForAddress(new_addr) <=
3004 heap_->new_space()->ToSpaceOffsetForAddress(old_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00003005 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01003006 ASSERT(heap_->TargetSpace(obj) == heap_->old_pointer_space() ||
3007 heap_->TargetSpace(obj) == heap_->old_data_space());
Steve Blocka7e24c12009-10-30 11:49:00 +00003008 }
3009#endif
3010
3011 // New and old addresses cannot overlap.
Steve Block44f0eee2011-05-26 01:26:41 +01003012 if (heap_->InNewSpace(HeapObject::FromAddress(new_addr))) {
3013 heap_->CopyBlock(new_addr, old_addr, obj_size);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003014 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01003015 heap_->CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
3016 old_addr,
3017 obj_size);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003018 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003019
3020#ifdef DEBUG
3021 if (FLAG_gc_verbose) {
3022 PrintF("relocate %p -> %p\n", old_addr, new_addr);
3023 }
3024#endif
3025
Leon Clarked91b9f72010-01-27 17:25:45 +00003026 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
Ben Murdoche0cee9b2011-05-25 10:26:03 +01003027 if (copied_to->IsSharedFunctionInfo()) {
Steve Block44f0eee2011-05-26 01:26:41 +01003028 PROFILE(heap_->isolate(),
3029 SharedFunctionInfoMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00003030 }
Steve Block44f0eee2011-05-26 01:26:41 +01003031 HEAP_PROFILE(heap_, ObjectMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00003032
Steve Blocka7e24c12009-10-30 11:49:00 +00003033 return obj_size;
3034}
3035
3036
Steve Block44f0eee2011-05-26 01:26:41 +01003037void MarkCompactCollector::EnableCodeFlushing(bool enable) {
3038 if (enable) {
3039 if (code_flusher_ != NULL) return;
3040 code_flusher_ = new CodeFlusher(heap_->isolate());
3041 } else {
3042 if (code_flusher_ == NULL) return;
3043 delete code_flusher_;
3044 code_flusher_ = NULL;
3045 }
3046}
3047
3048
Leon Clarked91b9f72010-01-27 17:25:45 +00003049void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
Ben Murdochb8e0da22011-05-16 14:20:40 +01003050#ifdef ENABLE_GDB_JIT_INTERFACE
3051 if (obj->IsCode()) {
3052 GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
3053 }
3054#endif
Leon Clarked91b9f72010-01-27 17:25:45 +00003055#ifdef ENABLE_LOGGING_AND_PROFILING
3056 if (obj->IsCode()) {
Steve Block44f0eee2011-05-26 01:26:41 +01003057 PROFILE(ISOLATE, CodeDeleteEvent(obj->address()));
Leon Clarked91b9f72010-01-27 17:25:45 +00003058 }
3059#endif
3060}
3061
Iain Merrick75681382010-08-19 15:07:18 +01003062
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08003063int MarkCompactCollector::SizeOfMarkedObject(HeapObject* obj) {
3064 MapWord map_word = obj->map_word();
3065 map_word.ClearMark();
3066 return obj->SizeFromMap(map_word.ToMap());
3067}
3068
3069
Iain Merrick75681382010-08-19 15:07:18 +01003070void MarkCompactCollector::Initialize() {
3071 StaticPointersToNewGenUpdatingVisitor::Initialize();
3072 StaticMarkingVisitor::Initialize();
3073}
3074
3075
Steve Blocka7e24c12009-10-30 11:49:00 +00003076} } // namespace v8::internal