blob: 9b0d5fca5a7c0e0219f534eaaa9199fbedacf3f2 [file] [log] [blame]
Ben Murdoch257744e2011-11-30 15:57:28 +00001// Copyright 2011 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
Iain Merrick75681382010-08-19 15:07:18 +010030#include "compilation-cache.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000031#include "execution.h"
Ben Murdoch3bec4d22010-07-22 14:51:16 +010032#include "heap-profiler.h"
Ben Murdochb8e0da22011-05-16 14:20:40 +010033#include "gdb-jit.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000034#include "global-handles.h"
35#include "ic-inl.h"
Steve Block1e0659c2011-05-24 12:43:12 +010036#include "liveobjectlist-inl.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000037#include "mark-compact.h"
Iain Merrick75681382010-08-19 15:07:18 +010038#include "objects-visiting.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000039#include "stub-cache.h"
40
41namespace v8 {
42namespace internal {
43
44// -------------------------------------------------------------------------
45// MarkCompactCollector
46
Steve Block44f0eee2011-05-26 01:26:41 +010047MarkCompactCollector::MarkCompactCollector() : // NOLINT
Steve Blocka7e24c12009-10-30 11:49:00 +000048#ifdef DEBUG
Steve Block44f0eee2011-05-26 01:26:41 +010049 state_(IDLE),
Steve Blocka7e24c12009-10-30 11:49:00 +000050#endif
Steve Block44f0eee2011-05-26 01:26:41 +010051 force_compaction_(false),
52 compacting_collection_(false),
53 compact_on_next_gc_(false),
54 previous_marked_count_(0),
55 tracer_(NULL),
56#ifdef DEBUG
57 live_young_objects_size_(0),
58 live_old_pointer_objects_size_(0),
59 live_old_data_objects_size_(0),
60 live_code_objects_size_(0),
61 live_map_objects_size_(0),
62 live_cell_objects_size_(0),
63 live_lo_objects_size_(0),
64 live_bytes_(0),
65#endif
66 heap_(NULL),
Ben Murdoch69a99ed2011-11-30 16:03:39 +000067 code_flusher_(NULL),
68 encountered_weak_maps_(NULL) { }
Steve Blocka7e24c12009-10-30 11:49:00 +000069
Iain Merrick75681382010-08-19 15:07:18 +010070
Steve Blocka7e24c12009-10-30 11:49:00 +000071void MarkCompactCollector::CollectGarbage() {
72 // Make sure that Prepare() has been called. The individual steps below will
73 // update the state as they proceed.
74 ASSERT(state_ == PREPARE_GC);
Ben Murdoch69a99ed2011-11-30 16:03:39 +000075 ASSERT(encountered_weak_maps_ == Smi::FromInt(0));
Steve Blocka7e24c12009-10-30 11:49:00 +000076
77 // Prepare has selected whether to compact the old generation or not.
78 // Tell the tracer.
79 if (IsCompacting()) tracer_->set_is_compacting();
80
81 MarkLiveObjects();
82
83 if (FLAG_collect_maps) ClearNonLiveTransitions();
84
Ben Murdoch69a99ed2011-11-30 16:03:39 +000085 ClearWeakMaps();
86
Steve Blocka7e24c12009-10-30 11:49:00 +000087 SweepLargeObjectSpace();
88
89 if (IsCompacting()) {
Leon Clarkef7060e22010-06-03 12:02:55 +010090 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
Steve Blocka7e24c12009-10-30 11:49:00 +000091 EncodeForwardingAddresses();
92
Ben Murdoch8b112d22011-06-08 16:22:53 +010093 heap()->MarkMapPointersAsEncoded(true);
Steve Blocka7e24c12009-10-30 11:49:00 +000094 UpdatePointers();
Ben Murdoch8b112d22011-06-08 16:22:53 +010095 heap()->MarkMapPointersAsEncoded(false);
96 heap()->isolate()->pc_to_code_cache()->Flush();
Steve Blocka7e24c12009-10-30 11:49:00 +000097
98 RelocateObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +000099 } else {
100 SweepSpaces();
Ben Murdoch8b112d22011-06-08 16:22:53 +0100101 heap()->isolate()->pc_to_code_cache()->Flush();
Steve Blocka7e24c12009-10-30 11:49:00 +0000102 }
103
104 Finish();
105
106 // Save the count of marked objects remaining after the collection and
107 // null out the GC tracer.
108 previous_marked_count_ = tracer_->marked_count();
109 ASSERT(previous_marked_count_ == 0);
110 tracer_ = NULL;
111}
112
113
114void MarkCompactCollector::Prepare(GCTracer* tracer) {
115 // Rather than passing the tracer around we stash it in a static member
116 // variable.
117 tracer_ = tracer;
118
119#ifdef DEBUG
120 ASSERT(state_ == IDLE);
121 state_ = PREPARE_GC;
122#endif
123 ASSERT(!FLAG_always_compact || !FLAG_never_compact);
124
125 compacting_collection_ =
126 FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
127 compact_on_next_gc_ = false;
128
129 if (FLAG_never_compact) compacting_collection_ = false;
Ben Murdoch8b112d22011-06-08 16:22:53 +0100130 if (!heap()->map_space()->MapPointersEncodable())
Leon Clarkee46be812010-01-19 14:06:41 +0000131 compacting_collection_ = false;
Steve Blocka7e24c12009-10-30 11:49:00 +0000132 if (FLAG_collect_maps) CreateBackPointers();
Ben Murdochb8e0da22011-05-16 14:20:40 +0100133#ifdef ENABLE_GDB_JIT_INTERFACE
134 if (FLAG_gdbjit) {
135 // If GDBJIT interface is active disable compaction.
136 compacting_collection_ = false;
137 }
138#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000139
Steve Blocka7e24c12009-10-30 11:49:00 +0000140 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000141 for (PagedSpace* space = spaces.next();
142 space != NULL; space = spaces.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000143 space->PrepareForMarkCompact(compacting_collection_);
144 }
145
146#ifdef DEBUG
147 live_bytes_ = 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100148 live_young_objects_size_ = 0;
149 live_old_pointer_objects_size_ = 0;
150 live_old_data_objects_size_ = 0;
151 live_code_objects_size_ = 0;
152 live_map_objects_size_ = 0;
153 live_cell_objects_size_ = 0;
154 live_lo_objects_size_ = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000155#endif
156}
157
158
159void MarkCompactCollector::Finish() {
160#ifdef DEBUG
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100161 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
Steve Blocka7e24c12009-10-30 11:49:00 +0000162 state_ = IDLE;
163#endif
164 // The stub cache is not traversed during GC; clear the cache to
165 // force lazy re-initialization of it. This must be done after the
166 // GC, because it relies on the new address of certain old space
167 // objects (empty string, illegal builtin).
Ben Murdoch8b112d22011-06-08 16:22:53 +0100168 heap()->isolate()->stub_cache()->Clear();
Steve Blocka7e24c12009-10-30 11:49:00 +0000169
Ben Murdoch8b112d22011-06-08 16:22:53 +0100170 heap()->external_string_table_.CleanUp();
Leon Clarkee46be812010-01-19 14:06:41 +0000171
Steve Blocka7e24c12009-10-30 11:49:00 +0000172 // If we've just compacted old space there's no reason to check the
173 // fragmentation limit. Just return.
174 if (HasCompacted()) return;
175
176 // We compact the old generation on the next GC if it has gotten too
177 // fragmented (ie, we could recover an expected amount of space by
178 // reclaiming the waste and free list blocks).
179 static const int kFragmentationLimit = 15; // Percent.
180 static const int kFragmentationAllowed = 1 * MB; // Absolute.
Ben Murdochf87a2032010-10-22 12:50:53 +0100181 intptr_t old_gen_recoverable = 0;
182 intptr_t old_gen_used = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000183
184 OldSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +0000185 for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000186 old_gen_recoverable += space->Waste() + space->AvailableFree();
187 old_gen_used += space->Size();
188 }
189
190 int old_gen_fragmentation =
191 static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
192 if (old_gen_fragmentation > kFragmentationLimit &&
193 old_gen_recoverable > kFragmentationAllowed) {
194 compact_on_next_gc_ = true;
195 }
196}
197
198
199// -------------------------------------------------------------------------
200// Phase 1: tracing and marking live objects.
201// before: all objects are in normal state.
202// after: a live object's map pointer is marked as '00'.
203
204// Marking all live objects in the heap as part of mark-sweep or mark-compact
205// collection. Before marking, all objects are in their normal state. After
206// marking, live objects' map pointers are marked indicating that the object
207// has been found reachable.
208//
209// The marking algorithm is a (mostly) depth-first (because of possible stack
210// overflow) traversal of the graph of objects reachable from the roots. It
211// uses an explicit stack of pointers rather than recursion. The young
212// generation's inactive ('from') space is used as a marking stack. The
213// objects in the marking stack are the ones that have been reached and marked
214// but their children have not yet been visited.
215//
216// The marking stack can overflow during traversal. In that case, we set an
217// overflow flag. When the overflow flag is set, we continue marking objects
218// reachable from the objects on the marking stack, but no longer push them on
219// the marking stack. Instead, we mark them as both marked and overflowed.
220// When the stack is in the overflowed state, objects marked as overflowed
221// have been reached and marked but their children have not been visited yet.
222// After emptying the marking stack, we clear the overflow flag and traverse
223// the heap looking for objects marked as overflowed, push them on the stack,
224// and continue with marking. This process repeats until all reachable
225// objects have been marked.
226
Steve Block44f0eee2011-05-26 01:26:41 +0100227class CodeFlusher {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100228 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100229 explicit CodeFlusher(Isolate* isolate)
230 : isolate_(isolate),
231 jsfunction_candidates_head_(NULL),
232 shared_function_info_candidates_head_(NULL) {}
233
234 void AddCandidate(SharedFunctionInfo* shared_info) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100235 SetNextCandidate(shared_info, shared_function_info_candidates_head_);
236 shared_function_info_candidates_head_ = shared_info;
237 }
238
Steve Block44f0eee2011-05-26 01:26:41 +0100239 void AddCandidate(JSFunction* function) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100240 ASSERT(function->unchecked_code() ==
241 function->unchecked_shared()->unchecked_code());
242
243 SetNextCandidate(function, jsfunction_candidates_head_);
244 jsfunction_candidates_head_ = function;
245 }
246
Steve Block44f0eee2011-05-26 01:26:41 +0100247 void ProcessCandidates() {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100248 ProcessSharedFunctionInfoCandidates();
249 ProcessJSFunctionCandidates();
250 }
251
252 private:
Steve Block44f0eee2011-05-26 01:26:41 +0100253 void ProcessJSFunctionCandidates() {
254 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100255
256 JSFunction* candidate = jsfunction_candidates_head_;
257 JSFunction* next_candidate;
258 while (candidate != NULL) {
259 next_candidate = GetNextCandidate(candidate);
260
261 SharedFunctionInfo* shared = candidate->unchecked_shared();
262
263 Code* code = shared->unchecked_code();
264 if (!code->IsMarked()) {
265 shared->set_code(lazy_compile);
266 candidate->set_code(lazy_compile);
267 } else {
268 candidate->set_code(shared->unchecked_code());
269 }
270
271 candidate = next_candidate;
272 }
273
274 jsfunction_candidates_head_ = NULL;
275 }
276
277
Steve Block44f0eee2011-05-26 01:26:41 +0100278 void ProcessSharedFunctionInfoCandidates() {
279 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100280
281 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
282 SharedFunctionInfo* next_candidate;
283 while (candidate != NULL) {
284 next_candidate = GetNextCandidate(candidate);
285 SetNextCandidate(candidate, NULL);
286
287 Code* code = candidate->unchecked_code();
288 if (!code->IsMarked()) {
289 candidate->set_code(lazy_compile);
290 }
291
292 candidate = next_candidate;
293 }
294
295 shared_function_info_candidates_head_ = NULL;
296 }
297
Ben Murdochb0fe1622011-05-05 13:52:32 +0100298 static JSFunction** GetNextCandidateField(JSFunction* candidate) {
299 return reinterpret_cast<JSFunction**>(
300 candidate->address() + JSFunction::kCodeEntryOffset);
301 }
302
Ben Murdochb0fe1622011-05-05 13:52:32 +0100303 static JSFunction* GetNextCandidate(JSFunction* candidate) {
304 return *GetNextCandidateField(candidate);
305 }
306
Ben Murdochb0fe1622011-05-05 13:52:32 +0100307 static void SetNextCandidate(JSFunction* candidate,
308 JSFunction* next_candidate) {
309 *GetNextCandidateField(candidate) = next_candidate;
310 }
311
Ben Murdochb0fe1622011-05-05 13:52:32 +0100312 static SharedFunctionInfo** GetNextCandidateField(
313 SharedFunctionInfo* candidate) {
314 Code* code = candidate->unchecked_code();
315 return reinterpret_cast<SharedFunctionInfo**>(
Ben Murdoch257744e2011-11-30 15:57:28 +0000316 code->address() + Code::kNextCodeFlushingCandidateOffset);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100317 }
318
Ben Murdochb0fe1622011-05-05 13:52:32 +0100319 static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
320 return *GetNextCandidateField(candidate);
321 }
322
Ben Murdochb0fe1622011-05-05 13:52:32 +0100323 static void SetNextCandidate(SharedFunctionInfo* candidate,
324 SharedFunctionInfo* next_candidate) {
325 *GetNextCandidateField(candidate) = next_candidate;
326 }
327
Steve Block44f0eee2011-05-26 01:26:41 +0100328 Isolate* isolate_;
329 JSFunction* jsfunction_candidates_head_;
330 SharedFunctionInfo* shared_function_info_candidates_head_;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100331
Steve Block44f0eee2011-05-26 01:26:41 +0100332 DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100333};
334
Ben Murdochb0fe1622011-05-05 13:52:32 +0100335
Steve Block44f0eee2011-05-26 01:26:41 +0100336MarkCompactCollector::~MarkCompactCollector() {
337 if (code_flusher_ != NULL) {
338 delete code_flusher_;
339 code_flusher_ = NULL;
340 }
341}
342
Steve Blocka7e24c12009-10-30 11:49:00 +0000343
344static inline HeapObject* ShortCircuitConsString(Object** p) {
345 // Optimization: If the heap object pointed to by p is a non-symbol
Steve Block44f0eee2011-05-26 01:26:41 +0100346 // cons string whose right substring is HEAP->empty_string, update
Steve Blocka7e24c12009-10-30 11:49:00 +0000347 // it in place to its left substring. Return the updated value.
348 //
349 // Here we assume that if we change *p, we replace it with a heap object
350 // (ie, the left substring of a cons string is always a heap object).
351 //
352 // The check performed is:
353 // object->IsConsString() && !object->IsSymbol() &&
Steve Block44f0eee2011-05-26 01:26:41 +0100354 // (ConsString::cast(object)->second() == HEAP->empty_string())
Steve Blocka7e24c12009-10-30 11:49:00 +0000355 // except the maps for the object and its possible substrings might be
356 // marked.
357 HeapObject* object = HeapObject::cast(*p);
358 MapWord map_word = object->map_word();
359 map_word.ClearMark();
360 InstanceType type = map_word.ToMap()->instance_type();
361 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
362
363 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
Steve Block44f0eee2011-05-26 01:26:41 +0100364 Heap* heap = map_word.ToMap()->heap();
365 if (second != heap->raw_unchecked_empty_string()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000366 return object;
367 }
368
369 // Since we don't have the object's start, it is impossible to update the
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100370 // page dirty marks. Therefore, we only replace the string with its left
371 // substring when page dirty marks do not change.
Steve Blocka7e24c12009-10-30 11:49:00 +0000372 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
Steve Block44f0eee2011-05-26 01:26:41 +0100373 if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
Steve Blocka7e24c12009-10-30 11:49:00 +0000374
375 *p = first;
376 return HeapObject::cast(first);
377}
378
379
Iain Merrick75681382010-08-19 15:07:18 +0100380class StaticMarkingVisitor : public StaticVisitorBase {
Steve Blocka7e24c12009-10-30 11:49:00 +0000381 public:
Iain Merrick75681382010-08-19 15:07:18 +0100382 static inline void IterateBody(Map* map, HeapObject* obj) {
383 table_.GetVisitor(map)(map, obj);
384 }
385
Iain Merrick75681382010-08-19 15:07:18 +0100386 static void Initialize() {
387 table_.Register(kVisitShortcutCandidate,
388 &FixedBodyVisitor<StaticMarkingVisitor,
389 ConsString::BodyDescriptor,
390 void>::Visit);
391
392 table_.Register(kVisitConsString,
393 &FixedBodyVisitor<StaticMarkingVisitor,
394 ConsString::BodyDescriptor,
395 void>::Visit);
396
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000397 table_.Register(kVisitSlicedString,
398 &FixedBodyVisitor<StaticMarkingVisitor,
399 SlicedString::BodyDescriptor,
400 void>::Visit);
Iain Merrick75681382010-08-19 15:07:18 +0100401
402 table_.Register(kVisitFixedArray,
403 &FlexibleBodyVisitor<StaticMarkingVisitor,
404 FixedArray::BodyDescriptor,
405 void>::Visit);
406
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000407 table_.Register(kVisitFixedDoubleArray, DataObjectVisitor::Visit);
408
Ben Murdochf87a2032010-10-22 12:50:53 +0100409 table_.Register(kVisitGlobalContext,
410 &FixedBodyVisitor<StaticMarkingVisitor,
411 Context::MarkCompactBodyDescriptor,
412 void>::Visit);
413
Iain Merrick75681382010-08-19 15:07:18 +0100414 table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
415 table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
416 table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
417
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000418 table_.Register(kVisitJSWeakMap, &VisitJSWeakMap);
419
Iain Merrick75681382010-08-19 15:07:18 +0100420 table_.Register(kVisitOddball,
421 &FixedBodyVisitor<StaticMarkingVisitor,
422 Oddball::BodyDescriptor,
423 void>::Visit);
424 table_.Register(kVisitMap,
425 &FixedBodyVisitor<StaticMarkingVisitor,
426 Map::BodyDescriptor,
427 void>::Visit);
428
429 table_.Register(kVisitCode, &VisitCode);
430
Ben Murdochb0fe1622011-05-05 13:52:32 +0100431 table_.Register(kVisitSharedFunctionInfo,
432 &VisitSharedFunctionInfoAndFlushCode);
433
434 table_.Register(kVisitJSFunction,
435 &VisitJSFunctionAndFlushCode);
Iain Merrick75681382010-08-19 15:07:18 +0100436
Ben Murdoch257744e2011-11-30 15:57:28 +0000437 table_.Register(kVisitJSRegExp,
438 &VisitRegExpAndFlushCode);
439
Iain Merrick75681382010-08-19 15:07:18 +0100440 table_.Register(kVisitPropertyCell,
441 &FixedBodyVisitor<StaticMarkingVisitor,
442 JSGlobalPropertyCell::BodyDescriptor,
443 void>::Visit);
444
445 table_.RegisterSpecializations<DataObjectVisitor,
446 kVisitDataObject,
447 kVisitDataObjectGeneric>();
448
449 table_.RegisterSpecializations<JSObjectVisitor,
450 kVisitJSObject,
451 kVisitJSObjectGeneric>();
452
453 table_.RegisterSpecializations<StructObjectVisitor,
454 kVisitStruct,
455 kVisitStructGeneric>();
456 }
457
Steve Block44f0eee2011-05-26 01:26:41 +0100458 INLINE(static void VisitPointer(Heap* heap, Object** p)) {
459 MarkObjectByPointer(heap, p);
Steve Blocka7e24c12009-10-30 11:49:00 +0000460 }
461
Steve Block44f0eee2011-05-26 01:26:41 +0100462 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000463 // Mark all objects pointed to in [start, end).
464 const int kMinRangeForMarkingRecursion = 64;
465 if (end - start >= kMinRangeForMarkingRecursion) {
Steve Block44f0eee2011-05-26 01:26:41 +0100466 if (VisitUnmarkedObjects(heap, start, end)) return;
Steve Blocka7e24c12009-10-30 11:49:00 +0000467 // We are close to a stack overflow, so just mark the objects.
468 }
Steve Block44f0eee2011-05-26 01:26:41 +0100469 for (Object** p = start; p < end; p++) MarkObjectByPointer(heap, p);
Steve Blocka7e24c12009-10-30 11:49:00 +0000470 }
471
Ben Murdoch8b112d22011-06-08 16:22:53 +0100472 static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000473 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
474 Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
Ben Murdoch257744e2011-11-30 15:57:28 +0000475 if (FLAG_cleanup_code_caches_at_gc && code->is_inline_cache_stub()) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000476 IC::Clear(rinfo->pc());
477 // Please note targets for cleared inline cached do not have to be
Steve Block44f0eee2011-05-26 01:26:41 +0100478 // marked since they are contained in HEAP->non_monomorphic_cache().
Steve Blocka7e24c12009-10-30 11:49:00 +0000479 } else {
Ben Murdoch8b112d22011-06-08 16:22:53 +0100480 heap->mark_compact_collector()->MarkObject(code);
Steve Blocka7e24c12009-10-30 11:49:00 +0000481 }
482 }
483
Ben Murdoch8b112d22011-06-08 16:22:53 +0100484 static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100485 ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
486 Object* cell = rinfo->target_cell();
487 Object* old_cell = cell;
Ben Murdoch8b112d22011-06-08 16:22:53 +0100488 VisitPointer(heap, &cell);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100489 if (cell != old_cell) {
490 rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell));
491 }
492 }
493
Ben Murdoch8b112d22011-06-08 16:22:53 +0100494 static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100495 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
496 rinfo->IsPatchedReturnSequence()) ||
497 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
498 rinfo->IsPatchedDebugBreakSlotSequence()));
Steve Blocka7e24c12009-10-30 11:49:00 +0000499 HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
Ben Murdoch8b112d22011-06-08 16:22:53 +0100500 heap->mark_compact_collector()->MarkObject(code);
Steve Blocka7e24c12009-10-30 11:49:00 +0000501 }
502
Steve Blocka7e24c12009-10-30 11:49:00 +0000503 // Mark object pointed to by p.
Steve Block44f0eee2011-05-26 01:26:41 +0100504 INLINE(static void MarkObjectByPointer(Heap* heap, Object** p)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000505 if (!(*p)->IsHeapObject()) return;
506 HeapObject* object = ShortCircuitConsString(p);
Ben Murdoch8b112d22011-06-08 16:22:53 +0100507 if (!object->IsMarked()) {
508 heap->mark_compact_collector()->MarkUnmarkedObject(object);
509 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000510 }
511
Steve Block44f0eee2011-05-26 01:26:41 +0100512
Steve Blocka7e24c12009-10-30 11:49:00 +0000513 // Visit an unmarked object.
Ben Murdoch8b112d22011-06-08 16:22:53 +0100514 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
515 HeapObject* obj)) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000516#ifdef DEBUG
Ben Murdoch8b112d22011-06-08 16:22:53 +0100517 ASSERT(Isolate::Current()->heap()->Contains(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +0000518 ASSERT(!obj->IsMarked());
519#endif
520 Map* map = obj->map();
Steve Block44f0eee2011-05-26 01:26:41 +0100521 collector->SetMark(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +0000522 // Mark the map pointer and the body.
Ben Murdoch8b112d22011-06-08 16:22:53 +0100523 if (!map->IsMarked()) collector->MarkUnmarkedObject(map);
Iain Merrick75681382010-08-19 15:07:18 +0100524 IterateBody(map, obj);
Steve Blocka7e24c12009-10-30 11:49:00 +0000525 }
526
527 // Visit all unmarked objects pointed to by [start, end).
528 // Returns false if the operation fails (lack of stack space).
Steve Block44f0eee2011-05-26 01:26:41 +0100529 static inline bool VisitUnmarkedObjects(Heap* heap,
530 Object** start,
531 Object** end) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000532 // Return false is we are close to the stack limit.
Steve Block44f0eee2011-05-26 01:26:41 +0100533 StackLimitCheck check(heap->isolate());
Steve Blocka7e24c12009-10-30 11:49:00 +0000534 if (check.HasOverflowed()) return false;
535
Ben Murdoch8b112d22011-06-08 16:22:53 +0100536 MarkCompactCollector* collector = heap->mark_compact_collector();
Steve Blocka7e24c12009-10-30 11:49:00 +0000537 // Visit the unmarked objects.
538 for (Object** p = start; p < end; p++) {
539 if (!(*p)->IsHeapObject()) continue;
540 HeapObject* obj = HeapObject::cast(*p);
541 if (obj->IsMarked()) continue;
Ben Murdoch8b112d22011-06-08 16:22:53 +0100542 VisitUnmarkedObject(collector, obj);
Steve Blocka7e24c12009-10-30 11:49:00 +0000543 }
544 return true;
545 }
Iain Merrick75681382010-08-19 15:07:18 +0100546
547 static inline void VisitExternalReference(Address* p) { }
548 static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
549
550 private:
551 class DataObjectVisitor {
552 public:
553 template<int size>
554 static void VisitSpecialized(Map* map, HeapObject* object) {
555 }
556
557 static void Visit(Map* map, HeapObject* object) {
558 }
559 };
560
561 typedef FlexibleBodyVisitor<StaticMarkingVisitor,
562 JSObject::BodyDescriptor,
563 void> JSObjectVisitor;
564
565 typedef FlexibleBodyVisitor<StaticMarkingVisitor,
566 StructBodyDescriptor,
567 void> StructObjectVisitor;
568
Ben Murdoch69a99ed2011-11-30 16:03:39 +0000569 static void VisitJSWeakMap(Map* map, HeapObject* object) {
570 MarkCompactCollector* collector = map->heap()->mark_compact_collector();
571 JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object);
572
573 // Enqueue weak map in linked list of encountered weak maps.
574 ASSERT(weak_map->next() == Smi::FromInt(0));
575 weak_map->set_next(collector->encountered_weak_maps());
576 collector->set_encountered_weak_maps(weak_map);
577
578 // Skip visiting the backing hash table containing the mappings.
579 int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object);
580 BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
581 map->heap(),
582 object,
583 JSWeakMap::BodyDescriptor::kStartOffset,
584 JSWeakMap::kTableOffset);
585 BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
586 map->heap(),
587 object,
588 JSWeakMap::kTableOffset + kPointerSize,
589 object_size);
590
591 // Mark the backing hash table without pushing it on the marking stack.
592 ASSERT(!weak_map->unchecked_table()->IsMarked());
593 ASSERT(weak_map->unchecked_table()->map()->IsMarked());
594 collector->SetMark(weak_map->unchecked_table());
595 }
596
Iain Merrick75681382010-08-19 15:07:18 +0100597 static void VisitCode(Map* map, HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +0100598 reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>(
599 map->heap());
Iain Merrick75681382010-08-19 15:07:18 +0100600 }
601
602 // Code flushing support.
603
604 // How many collections newly compiled code object will survive before being
605 // flushed.
606 static const int kCodeAgeThreshold = 5;
607
Ben Murdoch257744e2011-11-30 15:57:28 +0000608 static const int kRegExpCodeThreshold = 5;
609
Ben Murdoch8b112d22011-06-08 16:22:53 +0100610 inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
611 Object* undefined = heap->raw_unchecked_undefined_value();
Iain Merrick75681382010-08-19 15:07:18 +0100612 return (info->script() != undefined) &&
613 (reinterpret_cast<Script*>(info->script())->source() != undefined);
614 }
615
616
617 inline static bool IsCompiled(JSFunction* function) {
Steve Block44f0eee2011-05-26 01:26:41 +0100618 return function->unchecked_code() !=
Ben Murdoch8b112d22011-06-08 16:22:53 +0100619 function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
Iain Merrick75681382010-08-19 15:07:18 +0100620 }
621
Iain Merrick75681382010-08-19 15:07:18 +0100622 inline static bool IsCompiled(SharedFunctionInfo* function) {
Steve Block44f0eee2011-05-26 01:26:41 +0100623 return function->unchecked_code() !=
Ben Murdoch8b112d22011-06-08 16:22:53 +0100624 function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
Iain Merrick75681382010-08-19 15:07:18 +0100625 }
626
Ben Murdoch8b112d22011-06-08 16:22:53 +0100627 inline static bool IsFlushable(Heap* heap, JSFunction* function) {
Iain Merrick75681382010-08-19 15:07:18 +0100628 SharedFunctionInfo* shared_info = function->unchecked_shared();
629
Ben Murdochb0fe1622011-05-05 13:52:32 +0100630 // Code is either on stack, in compilation cache or referenced
631 // by optimized version of function.
632 if (function->unchecked_code()->IsMarked()) {
633 shared_info->set_code_age(0);
634 return false;
Iain Merrick75681382010-08-19 15:07:18 +0100635 }
636
Ben Murdochb0fe1622011-05-05 13:52:32 +0100637 // We do not flush code for optimized functions.
638 if (function->code() != shared_info->unchecked_code()) {
639 return false;
640 }
641
Ben Murdoch8b112d22011-06-08 16:22:53 +0100642 return IsFlushable(heap, shared_info);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100643 }
644
Ben Murdoch8b112d22011-06-08 16:22:53 +0100645 inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100646 // Code is either on stack, in compilation cache or referenced
647 // by optimized version of function.
Iain Merrick75681382010-08-19 15:07:18 +0100648 if (shared_info->unchecked_code()->IsMarked()) {
649 shared_info->set_code_age(0);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100650 return false;
Iain Merrick75681382010-08-19 15:07:18 +0100651 }
652
653 // The function must be compiled and have the source code available,
654 // to be able to recompile it in case we need the function again.
Ben Murdoch8b112d22011-06-08 16:22:53 +0100655 if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100656 return false;
657 }
Iain Merrick75681382010-08-19 15:07:18 +0100658
659 // We never flush code for Api functions.
660 Object* function_data = shared_info->function_data();
661 if (function_data->IsHeapObject() &&
662 (SafeMap(function_data)->instance_type() ==
663 FUNCTION_TEMPLATE_INFO_TYPE)) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100664 return false;
Iain Merrick75681382010-08-19 15:07:18 +0100665 }
666
667 // Only flush code for functions.
Ben Murdoch692be652012-01-10 18:47:50 +0000668 if (shared_info->code()->kind() != Code::FUNCTION) {
669 return false;
670 }
Iain Merrick75681382010-08-19 15:07:18 +0100671
672 // Function must be lazy compilable.
Ben Murdoch692be652012-01-10 18:47:50 +0000673 if (!shared_info->allows_lazy_compilation()) {
674 return false;
675 }
Iain Merrick75681382010-08-19 15:07:18 +0100676
677 // If this is a full script wrapped in a function we do no flush the code.
Ben Murdoch692be652012-01-10 18:47:50 +0000678 if (shared_info->is_toplevel()) {
679 return false;
680 }
Iain Merrick75681382010-08-19 15:07:18 +0100681
682 // Age this shared function info.
683 if (shared_info->code_age() < kCodeAgeThreshold) {
684 shared_info->set_code_age(shared_info->code_age() + 1);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100685 return false;
Iain Merrick75681382010-08-19 15:07:18 +0100686 }
687
Ben Murdochb0fe1622011-05-05 13:52:32 +0100688 return true;
689 }
690
691
Steve Block44f0eee2011-05-26 01:26:41 +0100692 static bool FlushCodeForFunction(Heap* heap, JSFunction* function) {
Ben Murdoch8b112d22011-06-08 16:22:53 +0100693 if (!IsFlushable(heap, function)) return false;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100694
695 // This function's code looks flushable. But we have to postpone the
696 // decision until we see all functions that point to the same
697 // SharedFunctionInfo because some of them might be optimized.
698 // That would make the nonoptimized version of the code nonflushable,
699 // because it is required for bailing out from optimized code.
Steve Block44f0eee2011-05-26 01:26:41 +0100700 heap->mark_compact_collector()->code_flusher()->AddCandidate(function);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100701 return true;
Iain Merrick75681382010-08-19 15:07:18 +0100702 }
703
704
705 static inline Map* SafeMap(Object* obj) {
706 MapWord map_word = HeapObject::cast(obj)->map_word();
707 map_word.ClearMark();
708 map_word.ClearOverflow();
709 return map_word.ToMap();
710 }
711
712
713 static inline bool IsJSBuiltinsObject(Object* obj) {
714 return obj->IsHeapObject() &&
715 (SafeMap(obj)->instance_type() == JS_BUILTINS_OBJECT_TYPE);
716 }
717
718
719 static inline bool IsValidNotBuiltinContext(Object* ctx) {
720 if (!ctx->IsHeapObject()) return false;
721
722 Map* map = SafeMap(ctx);
Steve Block44f0eee2011-05-26 01:26:41 +0100723 Heap* heap = map->heap();
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000724 if (!(map == heap->raw_unchecked_function_context_map() ||
Steve Block44f0eee2011-05-26 01:26:41 +0100725 map == heap->raw_unchecked_catch_context_map() ||
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000726 map == heap->raw_unchecked_with_context_map() ||
Steve Block44f0eee2011-05-26 01:26:41 +0100727 map == heap->raw_unchecked_global_context_map())) {
Iain Merrick75681382010-08-19 15:07:18 +0100728 return false;
729 }
730
731 Context* context = reinterpret_cast<Context*>(ctx);
732
733 if (IsJSBuiltinsObject(context->global())) {
734 return false;
735 }
736
737 return true;
738 }
739
740
Ben Murdochb0fe1622011-05-05 13:52:32 +0100741 static void VisitSharedFunctionInfoGeneric(Map* map, HeapObject* object) {
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100742 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100743
744 if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
745
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100746 FixedBodyVisitor<StaticMarkingVisitor,
747 SharedFunctionInfo::BodyDescriptor,
748 void>::Visit(map, object);
749 }
750
751
Ben Murdoch257744e2011-11-30 15:57:28 +0000752 static void UpdateRegExpCodeAgeAndFlush(Heap* heap,
753 JSRegExp* re,
754 bool is_ascii) {
755 // Make sure that the fixed array is in fact initialized on the RegExp.
756 // We could potentially trigger a GC when initializing the RegExp.
757 if (SafeMap(re->data())->instance_type() != FIXED_ARRAY_TYPE) return;
758
759 // Make sure this is a RegExp that actually contains code.
760 if (re->TypeTagUnchecked() != JSRegExp::IRREGEXP) return;
761
762 Object* code = re->DataAtUnchecked(JSRegExp::code_index(is_ascii));
763 if (!code->IsSmi() && SafeMap(code)->instance_type() == CODE_TYPE) {
764 // Save a copy that can be reinstated if we need the code again.
765 re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
766 code,
767 heap);
768 // Set a number in the 0-255 range to guarantee no smi overflow.
769 re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
770 Smi::FromInt(heap->sweep_generation() & 0xff),
771 heap);
772 } else if (code->IsSmi()) {
773 int value = Smi::cast(code)->value();
774 // The regexp has not been compiled yet or there was a compilation error.
775 if (value == JSRegExp::kUninitializedValue ||
776 value == JSRegExp::kCompilationErrorValue) {
777 return;
778 }
779
780 // Check if we should flush now.
781 if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
782 re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
783 Smi::FromInt(JSRegExp::kUninitializedValue),
784 heap);
785 re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
786 Smi::FromInt(JSRegExp::kUninitializedValue),
787 heap);
788 }
789 }
790 }
791
792
793 // Works by setting the current sweep_generation (as a smi) in the
794 // code object place in the data array of the RegExp and keeps a copy
795 // around that can be reinstated if we reuse the RegExp before flushing.
796 // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
797 // we flush the code.
798 static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
799 Heap* heap = map->heap();
800 MarkCompactCollector* collector = heap->mark_compact_collector();
801 if (!collector->is_code_flushing_enabled()) {
802 VisitJSRegExpFields(map, object);
803 return;
804 }
805 JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
806 // Flush code or set age on both ascii and two byte code.
807 UpdateRegExpCodeAgeAndFlush(heap, re, true);
808 UpdateRegExpCodeAgeAndFlush(heap, re, false);
809 // Visit the fields of the RegExp, including the updated FixedArray.
810 VisitJSRegExpFields(map, object);
811 }
812
813
Ben Murdochb0fe1622011-05-05 13:52:32 +0100814 static void VisitSharedFunctionInfoAndFlushCode(Map* map,
815 HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +0100816 MarkCompactCollector* collector = map->heap()->mark_compact_collector();
817 if (!collector->is_code_flushing_enabled()) {
818 VisitSharedFunctionInfoGeneric(map, object);
819 return;
820 }
Ben Murdochb0fe1622011-05-05 13:52:32 +0100821 VisitSharedFunctionInfoAndFlushCodeGeneric(map, object, false);
822 }
823
824
825 static void VisitSharedFunctionInfoAndFlushCodeGeneric(
826 Map* map, HeapObject* object, bool known_flush_code_candidate) {
Steve Block44f0eee2011-05-26 01:26:41 +0100827 Heap* heap = map->heap();
Ben Murdochb0fe1622011-05-05 13:52:32 +0100828 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
829
830 if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
831
832 if (!known_flush_code_candidate) {
Ben Murdoch8b112d22011-06-08 16:22:53 +0100833 known_flush_code_candidate = IsFlushable(heap, shared);
Steve Block44f0eee2011-05-26 01:26:41 +0100834 if (known_flush_code_candidate) {
835 heap->mark_compact_collector()->code_flusher()->AddCandidate(shared);
836 }
Ben Murdochb0fe1622011-05-05 13:52:32 +0100837 }
838
Steve Block44f0eee2011-05-26 01:26:41 +0100839 VisitSharedFunctionInfoFields(heap, object, known_flush_code_candidate);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100840 }
841
842
Steve Block44f0eee2011-05-26 01:26:41 +0100843 static void VisitCodeEntry(Heap* heap, Address entry_address) {
Steve Block791712a2010-08-27 10:21:07 +0100844 Object* code = Code::GetObjectFromEntryAddress(entry_address);
845 Object* old_code = code;
Steve Block44f0eee2011-05-26 01:26:41 +0100846 VisitPointer(heap, &code);
Steve Block791712a2010-08-27 10:21:07 +0100847 if (code != old_code) {
848 Memory::Address_at(entry_address) =
849 reinterpret_cast<Code*>(code)->entry();
850 }
851 }
Iain Merrick75681382010-08-19 15:07:18 +0100852
Steve Block791712a2010-08-27 10:21:07 +0100853
854 static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +0100855 Heap* heap = map->heap();
856 MarkCompactCollector* collector = heap->mark_compact_collector();
857 if (!collector->is_code_flushing_enabled()) {
858 VisitJSFunction(map, object);
859 return;
860 }
861
Steve Block791712a2010-08-27 10:21:07 +0100862 JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
Iain Merrick75681382010-08-19 15:07:18 +0100863 // The function must have a valid context and not be a builtin.
Ben Murdochb0fe1622011-05-05 13:52:32 +0100864 bool flush_code_candidate = false;
Iain Merrick75681382010-08-19 15:07:18 +0100865 if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
Steve Block44f0eee2011-05-26 01:26:41 +0100866 flush_code_candidate = FlushCodeForFunction(heap, jsfunction);
Iain Merrick75681382010-08-19 15:07:18 +0100867 }
Ben Murdochb0fe1622011-05-05 13:52:32 +0100868
869 if (!flush_code_candidate) {
Steve Block44f0eee2011-05-26 01:26:41 +0100870 collector->MarkObject(jsfunction->unchecked_shared()->unchecked_code());
Ben Murdochb0fe1622011-05-05 13:52:32 +0100871
872 if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) {
Ben Murdoch692be652012-01-10 18:47:50 +0000873 collector->MarkInlinedFunctionsCode(jsfunction->unchecked_code());
Ben Murdochb0fe1622011-05-05 13:52:32 +0100874 }
875 }
876
877 VisitJSFunctionFields(map,
878 reinterpret_cast<JSFunction*>(object),
879 flush_code_candidate);
Iain Merrick75681382010-08-19 15:07:18 +0100880 }
881
Steve Block791712a2010-08-27 10:21:07 +0100882
883 static void VisitJSFunction(Map* map, HeapObject* object) {
Ben Murdochb0fe1622011-05-05 13:52:32 +0100884 VisitJSFunctionFields(map,
885 reinterpret_cast<JSFunction*>(object),
886 false);
887 }
Steve Block791712a2010-08-27 10:21:07 +0100888
Ben Murdochb0fe1622011-05-05 13:52:32 +0100889
890#define SLOT_ADDR(obj, offset) \
891 reinterpret_cast<Object**>((obj)->address() + offset)
892
893
894 static inline void VisitJSFunctionFields(Map* map,
895 JSFunction* object,
896 bool flush_code_candidate) {
Steve Block44f0eee2011-05-26 01:26:41 +0100897 Heap* heap = map->heap();
898 MarkCompactCollector* collector = heap->mark_compact_collector();
899
900 VisitPointers(heap,
901 SLOT_ADDR(object, JSFunction::kPropertiesOffset),
Steve Block791712a2010-08-27 10:21:07 +0100902 SLOT_ADDR(object, JSFunction::kCodeEntryOffset));
903
Ben Murdochb0fe1622011-05-05 13:52:32 +0100904 if (!flush_code_candidate) {
Steve Block44f0eee2011-05-26 01:26:41 +0100905 VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100906 } else {
907 // Don't visit code object.
908
909 // Visit shared function info to avoid double checking of it's
910 // flushability.
911 SharedFunctionInfo* shared_info = object->unchecked_shared();
912 if (!shared_info->IsMarked()) {
913 Map* shared_info_map = shared_info->map();
Steve Block44f0eee2011-05-26 01:26:41 +0100914 collector->SetMark(shared_info);
915 collector->MarkObject(shared_info_map);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100916 VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map,
917 shared_info,
918 true);
919 }
920 }
Steve Block791712a2010-08-27 10:21:07 +0100921
Steve Block44f0eee2011-05-26 01:26:41 +0100922 VisitPointers(heap,
923 SLOT_ADDR(object,
Steve Block791712a2010-08-27 10:21:07 +0100924 JSFunction::kCodeEntryOffset + kPointerSize),
Ben Murdochb0fe1622011-05-05 13:52:32 +0100925 SLOT_ADDR(object, JSFunction::kNonWeakFieldsEndOffset));
Ben Murdochf87a2032010-10-22 12:50:53 +0100926
Ben Murdochb0fe1622011-05-05 13:52:32 +0100927 // Don't visit the next function list field as it is a weak reference.
Steve Block791712a2010-08-27 10:21:07 +0100928 }
929
Ben Murdoch257744e2011-11-30 15:57:28 +0000930 static inline void VisitJSRegExpFields(Map* map,
931 HeapObject* object) {
932 int last_property_offset =
933 JSRegExp::kSize + kPointerSize * map->inobject_properties();
934 VisitPointers(map->heap(),
935 SLOT_ADDR(object, JSRegExp::kPropertiesOffset),
936 SLOT_ADDR(object, last_property_offset));
937 }
938
Steve Block791712a2010-08-27 10:21:07 +0100939
Steve Block44f0eee2011-05-26 01:26:41 +0100940 static void VisitSharedFunctionInfoFields(Heap* heap,
941 HeapObject* object,
Ben Murdochb0fe1622011-05-05 13:52:32 +0100942 bool flush_code_candidate) {
Steve Block44f0eee2011-05-26 01:26:41 +0100943 VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kNameOffset));
Ben Murdochb0fe1622011-05-05 13:52:32 +0100944
945 if (!flush_code_candidate) {
Steve Block44f0eee2011-05-26 01:26:41 +0100946 VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kCodeOffset));
Ben Murdochb0fe1622011-05-05 13:52:32 +0100947 }
948
Steve Block44f0eee2011-05-26 01:26:41 +0100949 VisitPointers(heap,
950 SLOT_ADDR(object, SharedFunctionInfo::kScopeInfoOffset),
Ben Murdochb0fe1622011-05-05 13:52:32 +0100951 SLOT_ADDR(object, SharedFunctionInfo::kSize));
952 }
953
954 #undef SLOT_ADDR
955
Iain Merrick75681382010-08-19 15:07:18 +0100956 typedef void (*Callback)(Map* map, HeapObject* object);
957
958 static VisitorDispatchTable<Callback> table_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000959};
960
961
Iain Merrick75681382010-08-19 15:07:18 +0100962VisitorDispatchTable<StaticMarkingVisitor::Callback>
963 StaticMarkingVisitor::table_;
964
965
966class MarkingVisitor : public ObjectVisitor {
967 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100968 explicit MarkingVisitor(Heap* heap) : heap_(heap) { }
969
Iain Merrick75681382010-08-19 15:07:18 +0100970 void VisitPointer(Object** p) {
Steve Block44f0eee2011-05-26 01:26:41 +0100971 StaticMarkingVisitor::VisitPointer(heap_, p);
Iain Merrick75681382010-08-19 15:07:18 +0100972 }
973
974 void VisitPointers(Object** start, Object** end) {
Steve Block44f0eee2011-05-26 01:26:41 +0100975 StaticMarkingVisitor::VisitPointers(heap_, start, end);
Iain Merrick75681382010-08-19 15:07:18 +0100976 }
977
Steve Block44f0eee2011-05-26 01:26:41 +0100978 private:
979 Heap* heap_;
Iain Merrick75681382010-08-19 15:07:18 +0100980};
981
982
983class CodeMarkingVisitor : public ThreadVisitor {
984 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100985 explicit CodeMarkingVisitor(MarkCompactCollector* collector)
986 : collector_(collector) {}
987
Ben Murdoch8b112d22011-06-08 16:22:53 +0100988 void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
Ben Murdoch692be652012-01-10 18:47:50 +0000989 collector_->PrepareThreadForCodeFlushing(isolate, top);
Iain Merrick75681382010-08-19 15:07:18 +0100990 }
Steve Block44f0eee2011-05-26 01:26:41 +0100991
992 private:
993 MarkCompactCollector* collector_;
Iain Merrick75681382010-08-19 15:07:18 +0100994};
995
996
997class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
998 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100999 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1000 : collector_(collector) {}
1001
Iain Merrick75681382010-08-19 15:07:18 +01001002 void VisitPointers(Object** start, Object** end) {
1003 for (Object** p = start; p < end; p++) VisitPointer(p);
1004 }
1005
1006 void VisitPointer(Object** slot) {
1007 Object* obj = *slot;
Ben Murdochb0fe1622011-05-05 13:52:32 +01001008 if (obj->IsSharedFunctionInfo()) {
1009 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
Steve Block44f0eee2011-05-26 01:26:41 +01001010 collector_->MarkObject(shared->unchecked_code());
1011 collector_->MarkObject(shared);
Iain Merrick75681382010-08-19 15:07:18 +01001012 }
1013 }
Steve Block44f0eee2011-05-26 01:26:41 +01001014
1015 private:
1016 MarkCompactCollector* collector_;
Iain Merrick75681382010-08-19 15:07:18 +01001017};
1018
1019
Ben Murdoch692be652012-01-10 18:47:50 +00001020void MarkCompactCollector::MarkInlinedFunctionsCode(Code* code) {
1021 // For optimized functions we should retain both non-optimized version
1022 // of it's code and non-optimized version of all inlined functions.
1023 // This is required to support bailing out from inlined code.
1024 DeoptimizationInputData* data =
1025 reinterpret_cast<DeoptimizationInputData*>(
1026 code->unchecked_deoptimization_data());
1027
1028 FixedArray* literals = data->UncheckedLiteralArray();
1029
1030 for (int i = 0, count = data->InlinedFunctionCount()->value();
1031 i < count;
1032 i++) {
1033 JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i));
1034 MarkObject(inlined->unchecked_shared()->unchecked_code());
1035 }
1036}
1037
1038
1039void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1040 ThreadLocalTop* top) {
1041 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1042 // Note: for the frame that has a pending lazy deoptimization
1043 // StackFrame::unchecked_code will return a non-optimized code object for
1044 // the outermost function and StackFrame::LookupCode will return
1045 // actual optimized code object.
1046 StackFrame* frame = it.frame();
1047 Code* code = frame->unchecked_code();
1048 MarkObject(code);
1049 if (frame->is_optimized()) {
1050 MarkInlinedFunctionsCode(frame->LookupCode());
1051 }
1052 }
1053}
1054
1055
Iain Merrick75681382010-08-19 15:07:18 +01001056void MarkCompactCollector::PrepareForCodeFlushing() {
Ben Murdoch8b112d22011-06-08 16:22:53 +01001057 ASSERT(heap() == Isolate::Current()->heap());
Steve Block44f0eee2011-05-26 01:26:41 +01001058
Iain Merrick75681382010-08-19 15:07:18 +01001059 if (!FLAG_flush_code) {
Steve Block44f0eee2011-05-26 01:26:41 +01001060 EnableCodeFlushing(false);
Iain Merrick75681382010-08-19 15:07:18 +01001061 return;
1062 }
1063
1064#ifdef ENABLE_DEBUGGER_SUPPORT
Ben Murdoch8b112d22011-06-08 16:22:53 +01001065 if (heap()->isolate()->debug()->IsLoaded() ||
1066 heap()->isolate()->debug()->has_break_points()) {
Steve Block44f0eee2011-05-26 01:26:41 +01001067 EnableCodeFlushing(false);
Iain Merrick75681382010-08-19 15:07:18 +01001068 return;
1069 }
1070#endif
Steve Block44f0eee2011-05-26 01:26:41 +01001071 EnableCodeFlushing(true);
Iain Merrick75681382010-08-19 15:07:18 +01001072
Iain Merrick9ac36c92010-09-13 15:29:50 +01001073 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1074 // relies on it being marked before any other descriptor array.
Ben Murdoch8b112d22011-06-08 16:22:53 +01001075 MarkObject(heap()->raw_unchecked_empty_descriptor_array());
Iain Merrick9ac36c92010-09-13 15:29:50 +01001076
Iain Merrick75681382010-08-19 15:07:18 +01001077 // Make sure we are not referencing the code from the stack.
Ben Murdoch8b112d22011-06-08 16:22:53 +01001078 ASSERT(this == heap()->mark_compact_collector());
Ben Murdoch692be652012-01-10 18:47:50 +00001079 PrepareThreadForCodeFlushing(heap()->isolate(),
1080 heap()->isolate()->thread_local_top());
Iain Merrick75681382010-08-19 15:07:18 +01001081
1082 // Iterate the archived stacks in all threads to check if
1083 // the code is referenced.
Steve Block44f0eee2011-05-26 01:26:41 +01001084 CodeMarkingVisitor code_marking_visitor(this);
Ben Murdoch8b112d22011-06-08 16:22:53 +01001085 heap()->isolate()->thread_manager()->IterateArchivedThreads(
Steve Block44f0eee2011-05-26 01:26:41 +01001086 &code_marking_visitor);
Iain Merrick75681382010-08-19 15:07:18 +01001087
Steve Block44f0eee2011-05-26 01:26:41 +01001088 SharedFunctionInfoMarkingVisitor visitor(this);
Ben Murdoch8b112d22011-06-08 16:22:53 +01001089 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1090 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
Iain Merrick75681382010-08-19 15:07:18 +01001091
Iain Merrick9ac36c92010-09-13 15:29:50 +01001092 ProcessMarkingStack();
Iain Merrick75681382010-08-19 15:07:18 +01001093}
1094
1095
Steve Blocka7e24c12009-10-30 11:49:00 +00001096// Visitor class for marking heap roots.
1097class RootMarkingVisitor : public ObjectVisitor {
1098 public:
Steve Block44f0eee2011-05-26 01:26:41 +01001099 explicit RootMarkingVisitor(Heap* heap)
1100 : collector_(heap->mark_compact_collector()) { }
1101
Steve Blocka7e24c12009-10-30 11:49:00 +00001102 void VisitPointer(Object** p) {
1103 MarkObjectByPointer(p);
1104 }
1105
1106 void VisitPointers(Object** start, Object** end) {
1107 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1108 }
1109
Steve Blocka7e24c12009-10-30 11:49:00 +00001110 private:
Steve Blocka7e24c12009-10-30 11:49:00 +00001111 void MarkObjectByPointer(Object** p) {
1112 if (!(*p)->IsHeapObject()) return;
1113
1114 // Replace flat cons strings in place.
1115 HeapObject* object = ShortCircuitConsString(p);
1116 if (object->IsMarked()) return;
1117
1118 Map* map = object->map();
1119 // Mark the object.
Steve Block44f0eee2011-05-26 01:26:41 +01001120 collector_->SetMark(object);
Iain Merrick75681382010-08-19 15:07:18 +01001121
Steve Blocka7e24c12009-10-30 11:49:00 +00001122 // Mark the map pointer and body, and push them on the marking stack.
Steve Block44f0eee2011-05-26 01:26:41 +01001123 collector_->MarkObject(map);
Iain Merrick75681382010-08-19 15:07:18 +01001124 StaticMarkingVisitor::IterateBody(map, object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001125
1126 // Mark all the objects reachable from the map and body. May leave
1127 // overflowed objects in the heap.
Steve Block44f0eee2011-05-26 01:26:41 +01001128 collector_->EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00001129 }
Steve Block44f0eee2011-05-26 01:26:41 +01001130
1131 MarkCompactCollector* collector_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001132};
1133
1134
1135// Helper class for pruning the symbol table.
1136class SymbolTableCleaner : public ObjectVisitor {
1137 public:
Ben Murdoch8b112d22011-06-08 16:22:53 +01001138 explicit SymbolTableCleaner(Heap* heap)
1139 : heap_(heap), pointers_removed_(0) { }
Leon Clarkee46be812010-01-19 14:06:41 +00001140
1141 virtual void VisitPointers(Object** start, Object** end) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001142 // Visit all HeapObject pointers in [start, end).
1143 for (Object** p = start; p < end; p++) {
1144 if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
1145 // Check if the symbol being pruned is an external symbol. We need to
1146 // delete the associated external data as this symbol is going away.
1147
Steve Blocka7e24c12009-10-30 11:49:00 +00001148 // Since no objects have yet been moved we can safely access the map of
1149 // the object.
Leon Clarkee46be812010-01-19 14:06:41 +00001150 if ((*p)->IsExternalString()) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01001151 heap_->FinalizeExternalString(String::cast(*p));
Steve Blocka7e24c12009-10-30 11:49:00 +00001152 }
1153 // Set the entry to null_value (as deleted).
Ben Murdoch8b112d22011-06-08 16:22:53 +01001154 *p = heap_->raw_unchecked_null_value();
Steve Blocka7e24c12009-10-30 11:49:00 +00001155 pointers_removed_++;
1156 }
1157 }
1158 }
1159
1160 int PointersRemoved() {
1161 return pointers_removed_;
1162 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001163
Steve Blocka7e24c12009-10-30 11:49:00 +00001164 private:
Ben Murdoch8b112d22011-06-08 16:22:53 +01001165 Heap* heap_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001166 int pointers_removed_;
1167};
1168
1169
Ben Murdochf87a2032010-10-22 12:50:53 +01001170// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1171// are retained.
1172class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1173 public:
1174 virtual Object* RetainAs(Object* object) {
1175 MapWord first_word = HeapObject::cast(object)->map_word();
1176 if (first_word.IsMarked()) {
1177 return object;
1178 } else {
1179 return NULL;
1180 }
1181 }
1182};
1183
1184
Steve Blocka7e24c12009-10-30 11:49:00 +00001185void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
1186 ASSERT(!object->IsMarked());
Steve Block44f0eee2011-05-26 01:26:41 +01001187 ASSERT(HEAP->Contains(object));
Steve Blocka7e24c12009-10-30 11:49:00 +00001188 if (object->IsMap()) {
1189 Map* map = Map::cast(object);
Ben Murdoch257744e2011-11-30 15:57:28 +00001190 if (FLAG_cleanup_code_caches_at_gc) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01001191 map->ClearCodeCache(heap());
Steve Blocka7e24c12009-10-30 11:49:00 +00001192 }
1193 SetMark(map);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001194
1195 // When map collection is enabled we have to mark through map's transitions
1196 // in a special way to make transition links weak.
1197 // Only maps for subclasses of JSReceiver can have transitions.
1198 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
1199 if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001200 MarkMapContents(map);
1201 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001202 marking_stack_.Push(map);
Steve Blocka7e24c12009-10-30 11:49:00 +00001203 }
1204 } else {
1205 SetMark(object);
Steve Block44f0eee2011-05-26 01:26:41 +01001206 marking_stack_.Push(object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001207 }
1208}
1209
1210
1211void MarkCompactCollector::MarkMapContents(Map* map) {
Steve Block053d10c2011-06-13 19:13:29 +01001212 // Mark prototype transitions array but don't push it into marking stack.
1213 // This will make references from it weak. We will clean dead prototype
1214 // transitions in ClearNonLiveTransitions.
1215 FixedArray* prototype_transitions = map->unchecked_prototype_transitions();
1216 if (!prototype_transitions->IsMarked()) SetMark(prototype_transitions);
1217
Ben Murdoch257744e2011-11-30 15:57:28 +00001218 Object* raw_descriptor_array =
1219 *HeapObject::RawField(map,
1220 Map::kInstanceDescriptorsOrBitField3Offset);
1221 if (!raw_descriptor_array->IsSmi()) {
1222 MarkDescriptorArray(
1223 reinterpret_cast<DescriptorArray*>(raw_descriptor_array));
1224 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001225
1226 // Mark the Object* fields of the Map.
1227 // Since the descriptor array has been marked already, it is fine
1228 // that one of these fields contains a pointer to it.
Iain Merrick75681382010-08-19 15:07:18 +01001229 Object** start_slot = HeapObject::RawField(map,
1230 Map::kPointerFieldsBeginOffset);
1231
1232 Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
1233
Steve Block44f0eee2011-05-26 01:26:41 +01001234 StaticMarkingVisitor::VisitPointers(map->heap(), start_slot, end_slot);
Steve Blocka7e24c12009-10-30 11:49:00 +00001235}
1236
1237
1238void MarkCompactCollector::MarkDescriptorArray(
1239 DescriptorArray* descriptors) {
1240 if (descriptors->IsMarked()) return;
1241 // Empty descriptor array is marked as a root before any maps are marked.
Steve Block44f0eee2011-05-26 01:26:41 +01001242 ASSERT(descriptors != HEAP->raw_unchecked_empty_descriptor_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001243 SetMark(descriptors);
1244
1245 FixedArray* contents = reinterpret_cast<FixedArray*>(
1246 descriptors->get(DescriptorArray::kContentArrayIndex));
1247 ASSERT(contents->IsHeapObject());
1248 ASSERT(!contents->IsMarked());
1249 ASSERT(contents->IsFixedArray());
1250 ASSERT(contents->length() >= 2);
1251 SetMark(contents);
Steve Block44f0eee2011-05-26 01:26:41 +01001252 // Contents contains (value, details) pairs. If the details say that the type
1253 // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
1254 // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as
1255 // live. Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and
1256 // CONSTANT_TRANSITION is the value an Object* (a Map*).
Steve Blocka7e24c12009-10-30 11:49:00 +00001257 for (int i = 0; i < contents->length(); i += 2) {
1258 // If the pair (value, details) at index i, i+1 is not
1259 // a transition or null descriptor, mark the value.
1260 PropertyDetails details(Smi::cast(contents->get(i + 1)));
1261 if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
1262 HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
1263 if (object->IsHeapObject() && !object->IsMarked()) {
1264 SetMark(object);
Steve Block44f0eee2011-05-26 01:26:41 +01001265 marking_stack_.Push(object);
Steve Blocka7e24c12009-10-30 11:49:00 +00001266 }
1267 }
1268 }
1269 // The DescriptorArray descriptors contains a pointer to its contents array,
1270 // but the contents array is already marked.
Steve Block44f0eee2011-05-26 01:26:41 +01001271 marking_stack_.Push(descriptors);
Steve Blocka7e24c12009-10-30 11:49:00 +00001272}
1273
1274
1275void MarkCompactCollector::CreateBackPointers() {
Ben Murdoch8b112d22011-06-08 16:22:53 +01001276 HeapObjectIterator iterator(heap()->map_space());
Leon Clarked91b9f72010-01-27 17:25:45 +00001277 for (HeapObject* next_object = iterator.next();
1278 next_object != NULL; next_object = iterator.next()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001279 if (next_object->IsMap()) { // Could also be ByteArray on free list.
1280 Map* map = Map::cast(next_object);
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001281 STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
1282 if (map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001283 map->CreateBackPointers();
1284 } else {
Ben Murdoch8b112d22011-06-08 16:22:53 +01001285 ASSERT(map->instance_descriptors() == heap()->empty_descriptor_array());
Steve Blocka7e24c12009-10-30 11:49:00 +00001286 }
1287 }
1288 }
1289}
1290
1291
1292static int OverflowObjectSize(HeapObject* obj) {
1293 // Recover the normal map pointer, it might be marked as live and
1294 // overflowed.
1295 MapWord map_word = obj->map_word();
1296 map_word.ClearMark();
1297 map_word.ClearOverflow();
1298 return obj->SizeFromMap(map_word.ToMap());
1299}
1300
1301
Steve Block44f0eee2011-05-26 01:26:41 +01001302class OverflowedObjectsScanner : public AllStatic {
1303 public:
1304 // Fill the marking stack with overflowed objects returned by the given
1305 // iterator. Stop when the marking stack is filled or the end of the space
1306 // is reached, whichever comes first.
1307 template<class T>
1308 static inline void ScanOverflowedObjects(MarkCompactCollector* collector,
1309 T* it) {
1310 // The caller should ensure that the marking stack is initially not full,
1311 // so that we don't waste effort pointlessly scanning for objects.
1312 ASSERT(!collector->marking_stack_.is_full());
Steve Blocka7e24c12009-10-30 11:49:00 +00001313
Steve Block44f0eee2011-05-26 01:26:41 +01001314 for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
1315 if (object->IsOverflowed()) {
1316 object->ClearOverflow();
1317 ASSERT(object->IsMarked());
1318 ASSERT(HEAP->Contains(object));
1319 collector->marking_stack_.Push(object);
1320 if (collector->marking_stack_.is_full()) return;
1321 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001322 }
1323 }
Steve Block44f0eee2011-05-26 01:26:41 +01001324};
Steve Blocka7e24c12009-10-30 11:49:00 +00001325
1326
1327bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
1328 return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked();
1329}
1330
1331
Steve Blocka7e24c12009-10-30 11:49:00 +00001332void MarkCompactCollector::MarkSymbolTable() {
Ben Murdoch8b112d22011-06-08 16:22:53 +01001333 SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
Steve Blocka7e24c12009-10-30 11:49:00 +00001334 // Mark the symbol table itself.
1335 SetMark(symbol_table);
1336 // Explicitly mark the prefix.
Ben Murdoch8b112d22011-06-08 16:22:53 +01001337 MarkingVisitor marker(heap());
Steve Blocka7e24c12009-10-30 11:49:00 +00001338 symbol_table->IteratePrefix(&marker);
Iain Merrick75681382010-08-19 15:07:18 +01001339 ProcessMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00001340}
1341
1342
1343void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
1344 // Mark the heap roots including global variables, stack variables,
1345 // etc., and all objects reachable from them.
Ben Murdoch8b112d22011-06-08 16:22:53 +01001346 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
Steve Blocka7e24c12009-10-30 11:49:00 +00001347
1348 // Handle the symbol table specially.
1349 MarkSymbolTable();
1350
1351 // There may be overflowed objects in the heap. Visit them now.
Steve Block44f0eee2011-05-26 01:26:41 +01001352 while (marking_stack_.overflowed()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001353 RefillMarkingStack();
Iain Merrick75681382010-08-19 15:07:18 +01001354 EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00001355 }
1356}
1357
1358
1359void MarkCompactCollector::MarkObjectGroups() {
Steve Block44f0eee2011-05-26 01:26:41 +01001360 List<ObjectGroup*>* object_groups =
Ben Murdoch8b112d22011-06-08 16:22:53 +01001361 heap()->isolate()->global_handles()->object_groups();
Steve Blocka7e24c12009-10-30 11:49:00 +00001362
Ben Murdoch8b112d22011-06-08 16:22:53 +01001363 int last = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001364 for (int i = 0; i < object_groups->length(); i++) {
1365 ObjectGroup* entry = object_groups->at(i);
Ben Murdoch8b112d22011-06-08 16:22:53 +01001366 ASSERT(entry != NULL);
Steve Blocka7e24c12009-10-30 11:49:00 +00001367
Ben Murdoch8b112d22011-06-08 16:22:53 +01001368 Object*** objects = entry->objects_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001369 bool group_marked = false;
Ben Murdoch8b112d22011-06-08 16:22:53 +01001370 for (size_t j = 0; j < entry->length_; j++) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001371 Object* object = *objects[j];
1372 if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
1373 group_marked = true;
1374 break;
1375 }
1376 }
1377
Ben Murdoch8b112d22011-06-08 16:22:53 +01001378 if (!group_marked) {
1379 (*object_groups)[last++] = entry;
1380 continue;
1381 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001382
Ben Murdoch8b112d22011-06-08 16:22:53 +01001383 // An object in the group is marked, so mark all heap objects in
1384 // the group.
1385 for (size_t j = 0; j < entry->length_; ++j) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001386 if ((*objects[j])->IsHeapObject()) {
1387 MarkObject(HeapObject::cast(*objects[j]));
1388 }
1389 }
Steve Block44f0eee2011-05-26 01:26:41 +01001390
Ben Murdoch8b112d22011-06-08 16:22:53 +01001391 // Once the entire group has been marked, dispose it because it's
1392 // not needed anymore.
1393 entry->Dispose();
Steve Blocka7e24c12009-10-30 11:49:00 +00001394 }
Ben Murdoch8b112d22011-06-08 16:22:53 +01001395 object_groups->Rewind(last);
Steve Blocka7e24c12009-10-30 11:49:00 +00001396}
1397
1398
Steve Block44f0eee2011-05-26 01:26:41 +01001399void MarkCompactCollector::MarkImplicitRefGroups() {
1400 List<ImplicitRefGroup*>* ref_groups =
Ben Murdoch8b112d22011-06-08 16:22:53 +01001401 heap()->isolate()->global_handles()->implicit_ref_groups();
Steve Block44f0eee2011-05-26 01:26:41 +01001402
Ben Murdoch8b112d22011-06-08 16:22:53 +01001403 int last = 0;
Steve Block44f0eee2011-05-26 01:26:41 +01001404 for (int i = 0; i < ref_groups->length(); i++) {
1405 ImplicitRefGroup* entry = ref_groups->at(i);
Ben Murdoch8b112d22011-06-08 16:22:53 +01001406 ASSERT(entry != NULL);
Steve Block44f0eee2011-05-26 01:26:41 +01001407
Ben Murdoch8b112d22011-06-08 16:22:53 +01001408 if (!(*entry->parent_)->IsMarked()) {
1409 (*ref_groups)[last++] = entry;
1410 continue;
1411 }
Steve Block44f0eee2011-05-26 01:26:41 +01001412
Ben Murdoch8b112d22011-06-08 16:22:53 +01001413 Object*** children = entry->children_;
1414 // A parent object is marked, so mark all child heap objects.
1415 for (size_t j = 0; j < entry->length_; ++j) {
Steve Block44f0eee2011-05-26 01:26:41 +01001416 if ((*children[j])->IsHeapObject()) {
1417 MarkObject(HeapObject::cast(*children[j]));
1418 }
1419 }
1420
Ben Murdoch8b112d22011-06-08 16:22:53 +01001421 // Once the entire group has been marked, dispose it because it's
1422 // not needed anymore.
1423 entry->Dispose();
Steve Block44f0eee2011-05-26 01:26:41 +01001424 }
Ben Murdoch8b112d22011-06-08 16:22:53 +01001425 ref_groups->Rewind(last);
Steve Block44f0eee2011-05-26 01:26:41 +01001426}
1427
1428
Steve Blocka7e24c12009-10-30 11:49:00 +00001429// Mark all objects reachable from the objects on the marking stack.
1430// Before: the marking stack contains zero or more heap object pointers.
1431// After: the marking stack is empty, and all objects reachable from the
1432// marking stack have been marked, or are overflowed in the heap.
Iain Merrick75681382010-08-19 15:07:18 +01001433void MarkCompactCollector::EmptyMarkingStack() {
Steve Block44f0eee2011-05-26 01:26:41 +01001434 while (!marking_stack_.is_empty()) {
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001435 while (!marking_stack_.is_empty()) {
1436 HeapObject* object = marking_stack_.Pop();
1437 ASSERT(object->IsHeapObject());
1438 ASSERT(heap()->Contains(object));
1439 ASSERT(object->IsMarked());
1440 ASSERT(!object->IsOverflowed());
Steve Blocka7e24c12009-10-30 11:49:00 +00001441
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001442 // Because the object is marked, we have to recover the original map
1443 // pointer and use it to mark the object's body.
1444 MapWord map_word = object->map_word();
1445 map_word.ClearMark();
1446 Map* map = map_word.ToMap();
1447 MarkObject(map);
Iain Merrick75681382010-08-19 15:07:18 +01001448
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001449 StaticMarkingVisitor::IterateBody(map, object);
1450 }
1451
1452 // Process encountered weak maps, mark objects only reachable by those
1453 // weak maps and repeat until fix-point is reached.
1454 ProcessWeakMaps();
Steve Blocka7e24c12009-10-30 11:49:00 +00001455 }
1456}
1457
1458
1459// Sweep the heap for overflowed objects, clear their overflow bits, and
1460// push them on the marking stack. Stop early if the marking stack fills
1461// before sweeping completes. If sweeping completes, there are no remaining
1462// overflowed objects in the heap so the overflow flag on the markings stack
1463// is cleared.
1464void MarkCompactCollector::RefillMarkingStack() {
Steve Block44f0eee2011-05-26 01:26:41 +01001465 ASSERT(marking_stack_.overflowed());
Steve Blocka7e24c12009-10-30 11:49:00 +00001466
Ben Murdoch8b112d22011-06-08 16:22:53 +01001467 SemiSpaceIterator new_it(heap()->new_space(), &OverflowObjectSize);
Steve Block44f0eee2011-05-26 01:26:41 +01001468 OverflowedObjectsScanner::ScanOverflowedObjects(this, &new_it);
1469 if (marking_stack_.is_full()) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00001470
Ben Murdoch8b112d22011-06-08 16:22:53 +01001471 HeapObjectIterator old_pointer_it(heap()->old_pointer_space(),
Steve Blocka7e24c12009-10-30 11:49:00 +00001472 &OverflowObjectSize);
Steve Block44f0eee2011-05-26 01:26:41 +01001473 OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_pointer_it);
1474 if (marking_stack_.is_full()) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00001475
Ben Murdoch8b112d22011-06-08 16:22:53 +01001476 HeapObjectIterator old_data_it(heap()->old_data_space(), &OverflowObjectSize);
Steve Block44f0eee2011-05-26 01:26:41 +01001477 OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_data_it);
1478 if (marking_stack_.is_full()) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00001479
Ben Murdoch8b112d22011-06-08 16:22:53 +01001480 HeapObjectIterator code_it(heap()->code_space(), &OverflowObjectSize);
Steve Block44f0eee2011-05-26 01:26:41 +01001481 OverflowedObjectsScanner::ScanOverflowedObjects(this, &code_it);
1482 if (marking_stack_.is_full()) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00001483
Ben Murdoch8b112d22011-06-08 16:22:53 +01001484 HeapObjectIterator map_it(heap()->map_space(), &OverflowObjectSize);
Steve Block44f0eee2011-05-26 01:26:41 +01001485 OverflowedObjectsScanner::ScanOverflowedObjects(this, &map_it);
1486 if (marking_stack_.is_full()) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00001487
Ben Murdoch8b112d22011-06-08 16:22:53 +01001488 HeapObjectIterator cell_it(heap()->cell_space(), &OverflowObjectSize);
Steve Block44f0eee2011-05-26 01:26:41 +01001489 OverflowedObjectsScanner::ScanOverflowedObjects(this, &cell_it);
1490 if (marking_stack_.is_full()) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00001491
Ben Murdoch8b112d22011-06-08 16:22:53 +01001492 LargeObjectIterator lo_it(heap()->lo_space(), &OverflowObjectSize);
Steve Block44f0eee2011-05-26 01:26:41 +01001493 OverflowedObjectsScanner::ScanOverflowedObjects(this, &lo_it);
1494 if (marking_stack_.is_full()) return;
Steve Blocka7e24c12009-10-30 11:49:00 +00001495
Steve Block44f0eee2011-05-26 01:26:41 +01001496 marking_stack_.clear_overflowed();
Steve Blocka7e24c12009-10-30 11:49:00 +00001497}
1498
1499
1500// Mark all objects reachable (transitively) from objects on the marking
1501// stack. Before: the marking stack contains zero or more heap object
1502// pointers. After: the marking stack is empty and there are no overflowed
1503// objects in the heap.
Iain Merrick75681382010-08-19 15:07:18 +01001504void MarkCompactCollector::ProcessMarkingStack() {
1505 EmptyMarkingStack();
Steve Block44f0eee2011-05-26 01:26:41 +01001506 while (marking_stack_.overflowed()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001507 RefillMarkingStack();
Iain Merrick75681382010-08-19 15:07:18 +01001508 EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00001509 }
1510}
1511
1512
Steve Block44f0eee2011-05-26 01:26:41 +01001513void MarkCompactCollector::ProcessExternalMarking() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001514 bool work_to_do = true;
Steve Block44f0eee2011-05-26 01:26:41 +01001515 ASSERT(marking_stack_.is_empty());
Steve Blocka7e24c12009-10-30 11:49:00 +00001516 while (work_to_do) {
1517 MarkObjectGroups();
Steve Block44f0eee2011-05-26 01:26:41 +01001518 MarkImplicitRefGroups();
1519 work_to_do = !marking_stack_.is_empty();
Iain Merrick75681382010-08-19 15:07:18 +01001520 ProcessMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00001521 }
1522}
1523
1524
1525void MarkCompactCollector::MarkLiveObjects() {
Leon Clarkef7060e22010-06-03 12:02:55 +01001526 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
Ben Murdochb0fe1622011-05-05 13:52:32 +01001527 // The recursive GC marker detects when it is nearing stack overflow,
1528 // and switches to a different marking system. JS interrupts interfere
1529 // with the C stack limit check.
Ben Murdoch8b112d22011-06-08 16:22:53 +01001530 PostponeInterruptsScope postpone(heap()->isolate());
Ben Murdochb0fe1622011-05-05 13:52:32 +01001531
Steve Blocka7e24c12009-10-30 11:49:00 +00001532#ifdef DEBUG
1533 ASSERT(state_ == PREPARE_GC);
1534 state_ = MARK_LIVE_OBJECTS;
1535#endif
1536 // The to space contains live objects, the from space is used as a marking
1537 // stack.
Ben Murdoch8b112d22011-06-08 16:22:53 +01001538 marking_stack_.Initialize(heap()->new_space()->FromSpaceLow(),
1539 heap()->new_space()->FromSpaceHigh());
Steve Blocka7e24c12009-10-30 11:49:00 +00001540
Steve Block44f0eee2011-05-26 01:26:41 +01001541 ASSERT(!marking_stack_.overflowed());
Steve Blocka7e24c12009-10-30 11:49:00 +00001542
Iain Merrick75681382010-08-19 15:07:18 +01001543 PrepareForCodeFlushing();
1544
Ben Murdoch8b112d22011-06-08 16:22:53 +01001545 RootMarkingVisitor root_visitor(heap());
Steve Blocka7e24c12009-10-30 11:49:00 +00001546 MarkRoots(&root_visitor);
1547
1548 // The objects reachable from the roots are marked, yet unreachable
Steve Block44f0eee2011-05-26 01:26:41 +01001549 // objects are unmarked. Mark objects reachable due to host
1550 // application specific logic.
1551 ProcessExternalMarking();
Steve Blocka7e24c12009-10-30 11:49:00 +00001552
1553 // The objects reachable from the roots or object groups are marked,
1554 // yet unreachable objects are unmarked. Mark objects reachable
1555 // only from weak global handles.
1556 //
1557 // First we identify nonlive weak handles and mark them as pending
1558 // destruction.
Ben Murdoch8b112d22011-06-08 16:22:53 +01001559 heap()->isolate()->global_handles()->IdentifyWeakHandles(
Steve Block44f0eee2011-05-26 01:26:41 +01001560 &IsUnmarkedHeapObject);
Steve Blocka7e24c12009-10-30 11:49:00 +00001561 // Then we mark the objects and process the transitive closure.
Ben Murdoch8b112d22011-06-08 16:22:53 +01001562 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
Steve Block44f0eee2011-05-26 01:26:41 +01001563 while (marking_stack_.overflowed()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001564 RefillMarkingStack();
Iain Merrick75681382010-08-19 15:07:18 +01001565 EmptyMarkingStack();
Steve Blocka7e24c12009-10-30 11:49:00 +00001566 }
1567
Steve Block44f0eee2011-05-26 01:26:41 +01001568 // Repeat host application specific marking to mark unmarked objects
1569 // reachable from the weak roots.
1570 ProcessExternalMarking();
Steve Blocka7e24c12009-10-30 11:49:00 +00001571
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001572 // Object literal map caches reference symbols (cache keys) and maps
1573 // (cache values). At this point still useful maps have already been
1574 // marked. Mark the keys for the alive values before we process the
1575 // symbol table.
1576 ProcessMapCaches();
1577
Steve Blocka7e24c12009-10-30 11:49:00 +00001578 // Prune the symbol table removing all symbols only pointed to by the
1579 // symbol table. Cannot use symbol_table() here because the symbol
1580 // table is marked.
Ben Murdoch8b112d22011-06-08 16:22:53 +01001581 SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
1582 SymbolTableCleaner v(heap());
Steve Blocka7e24c12009-10-30 11:49:00 +00001583 symbol_table->IterateElements(&v);
1584 symbol_table->ElementsRemoved(v.PointersRemoved());
Ben Murdoch8b112d22011-06-08 16:22:53 +01001585 heap()->external_string_table_.Iterate(&v);
1586 heap()->external_string_table_.CleanUp();
Steve Blocka7e24c12009-10-30 11:49:00 +00001587
Ben Murdochf87a2032010-10-22 12:50:53 +01001588 // Process the weak references.
1589 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
Ben Murdoch8b112d22011-06-08 16:22:53 +01001590 heap()->ProcessWeakReferences(&mark_compact_object_retainer);
Ben Murdochf87a2032010-10-22 12:50:53 +01001591
Steve Blocka7e24c12009-10-30 11:49:00 +00001592 // Remove object groups after marking phase.
Ben Murdoch8b112d22011-06-08 16:22:53 +01001593 heap()->isolate()->global_handles()->RemoveObjectGroups();
1594 heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
Ben Murdochb0fe1622011-05-05 13:52:32 +01001595
1596 // Flush code from collected candidates.
Steve Block44f0eee2011-05-26 01:26:41 +01001597 if (is_code_flushing_enabled()) {
1598 code_flusher_->ProcessCandidates();
1599 }
Ben Murdoche0cee9b2011-05-25 10:26:03 +01001600
1601 // Clean up dead objects from the runtime profiler.
Ben Murdoch8b112d22011-06-08 16:22:53 +01001602 heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
Steve Blocka7e24c12009-10-30 11:49:00 +00001603}
1604
1605
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001606void MarkCompactCollector::ProcessMapCaches() {
1607 Object* raw_context = heap()->global_contexts_list_;
1608 while (raw_context != heap()->undefined_value()) {
1609 Context* context = reinterpret_cast<Context*>(raw_context);
1610 if (context->IsMarked()) {
1611 HeapObject* raw_map_cache =
1612 HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
1613 // A map cache may be reachable from the stack. In this case
1614 // it's already transitively marked and it's too late to clean
1615 // up its parts.
1616 if (!raw_map_cache->IsMarked() &&
1617 raw_map_cache != heap()->undefined_value()) {
1618 MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
1619 int existing_elements = map_cache->NumberOfElements();
1620 int used_elements = 0;
1621 for (int i = MapCache::kElementsStartIndex;
1622 i < map_cache->length();
1623 i += MapCache::kEntrySize) {
1624 Object* raw_key = map_cache->get(i);
1625 if (raw_key == heap()->undefined_value() ||
1626 raw_key == heap()->null_value()) continue;
1627 STATIC_ASSERT(MapCache::kEntrySize == 2);
1628 Object* raw_map = map_cache->get(i + 1);
1629 if (raw_map->IsHeapObject() &&
1630 HeapObject::cast(raw_map)->IsMarked()) {
1631 ++used_elements;
1632 } else {
1633 // Delete useless entries with unmarked maps.
1634 ASSERT(raw_map->IsMap());
1635 map_cache->set_null_unchecked(heap(), i);
1636 map_cache->set_null_unchecked(heap(), i + 1);
1637 }
1638 }
1639 if (used_elements == 0) {
1640 context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
1641 } else {
1642 // Note: we don't actually shrink the cache here to avoid
1643 // extra complexity during GC. We rely on subsequent cache
1644 // usages (EnsureCapacity) to do this.
1645 map_cache->ElementsRemoved(existing_elements - used_elements);
1646 MarkObject(map_cache);
1647 }
1648 }
1649 }
1650 // Move to next element in the list.
1651 raw_context = context->get(Context::NEXT_CONTEXT_LINK);
1652 }
1653 ProcessMarkingStack();
1654}
1655
1656
Steve Blocka7e24c12009-10-30 11:49:00 +00001657#ifdef DEBUG
1658void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
1659 live_bytes_ += obj->Size();
Ben Murdoch8b112d22011-06-08 16:22:53 +01001660 if (heap()->new_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001661 live_young_objects_size_ += obj->Size();
Ben Murdoch8b112d22011-06-08 16:22:53 +01001662 } else if (heap()->map_space()->Contains(obj)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001663 ASSERT(obj->IsMap());
Steve Block6ded16b2010-05-10 14:33:55 +01001664 live_map_objects_size_ += obj->Size();
Ben Murdoch8b112d22011-06-08 16:22:53 +01001665 } else if (heap()->cell_space()->Contains(obj)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001666 ASSERT(obj->IsJSGlobalPropertyCell());
Steve Block6ded16b2010-05-10 14:33:55 +01001667 live_cell_objects_size_ += obj->Size();
Ben Murdoch8b112d22011-06-08 16:22:53 +01001668 } else if (heap()->old_pointer_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001669 live_old_pointer_objects_size_ += obj->Size();
Ben Murdoch8b112d22011-06-08 16:22:53 +01001670 } else if (heap()->old_data_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001671 live_old_data_objects_size_ += obj->Size();
Ben Murdoch8b112d22011-06-08 16:22:53 +01001672 } else if (heap()->code_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001673 live_code_objects_size_ += obj->Size();
Ben Murdoch8b112d22011-06-08 16:22:53 +01001674 } else if (heap()->lo_space()->Contains(obj)) {
Steve Block6ded16b2010-05-10 14:33:55 +01001675 live_lo_objects_size_ += obj->Size();
Steve Blocka7e24c12009-10-30 11:49:00 +00001676 } else {
1677 UNREACHABLE();
1678 }
1679}
1680#endif // DEBUG
1681
1682
1683void MarkCompactCollector::SweepLargeObjectSpace() {
1684#ifdef DEBUG
1685 ASSERT(state_ == MARK_LIVE_OBJECTS);
1686 state_ =
1687 compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
1688#endif
1689 // Deallocate unmarked objects and clear marked bits for marked objects.
Ben Murdoch8b112d22011-06-08 16:22:53 +01001690 heap()->lo_space()->FreeUnmarkedObjects();
Steve Blocka7e24c12009-10-30 11:49:00 +00001691}
1692
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001693
Steve Blocka7e24c12009-10-30 11:49:00 +00001694// Safe to use during marking phase only.
1695bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
1696 MapWord metamap = object->map_word();
1697 metamap.ClearMark();
1698 return metamap.ToMap()->instance_type() == MAP_TYPE;
1699}
1700
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001701
Steve Blocka7e24c12009-10-30 11:49:00 +00001702void MarkCompactCollector::ClearNonLiveTransitions() {
Steve Block053d10c2011-06-13 19:13:29 +01001703 HeapObjectIterator map_iterator(heap()->map_space(), &SizeOfMarkedObject);
Steve Blocka7e24c12009-10-30 11:49:00 +00001704 // Iterate over the map space, setting map transitions that go from
1705 // a marked map to an unmarked map to null transitions. At the same time,
1706 // set all the prototype fields of maps back to their original value,
1707 // dropping the back pointers temporarily stored in the prototype field.
1708 // Setting the prototype field requires following the linked list of
1709 // back pointers, reversing them all at once. This allows us to find
1710 // those maps with map transitions that need to be nulled, and only
1711 // scan the descriptor arrays of those maps, not all maps.
Leon Clarkee46be812010-01-19 14:06:41 +00001712 // All of these actions are carried out only on maps of JSObjects
Steve Blocka7e24c12009-10-30 11:49:00 +00001713 // and related subtypes.
Leon Clarked91b9f72010-01-27 17:25:45 +00001714 for (HeapObject* obj = map_iterator.next();
1715 obj != NULL; obj = map_iterator.next()) {
1716 Map* map = reinterpret_cast<Map*>(obj);
Steve Blocka7e24c12009-10-30 11:49:00 +00001717 if (!map->IsMarked() && map->IsByteArray()) continue;
1718
1719 ASSERT(SafeIsMap(map));
1720 // Only JSObject and subtypes have map transitions and back pointers.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001721 STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
1722 if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
Kristian Monsen0d5e1162010-09-30 15:31:59 +01001723
1724 if (map->IsMarked() && map->attached_to_shared_function_info()) {
1725 // This map is used for inobject slack tracking and has been detached
1726 // from SharedFunctionInfo during the mark phase.
1727 // Since it survived the GC, reattach it now.
1728 map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map);
1729 }
1730
Steve Block053d10c2011-06-13 19:13:29 +01001731 // Clear dead prototype transitions.
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001732 int number_of_transitions = map->NumberOfProtoTransitions();
1733 if (number_of_transitions > 0) {
1734 FixedArray* prototype_transitions =
1735 map->unchecked_prototype_transitions();
1736 int new_number_of_transitions = 0;
1737 const int header = Map::kProtoTransitionHeaderSize;
1738 const int proto_offset =
1739 header + Map::kProtoTransitionPrototypeOffset;
1740 const int map_offset = header + Map::kProtoTransitionMapOffset;
1741 const int step = Map::kProtoTransitionElementsPerEntry;
1742 for (int i = 0; i < number_of_transitions; i++) {
1743 Object* prototype = prototype_transitions->get(proto_offset + i * step);
1744 Object* cached_map = prototype_transitions->get(map_offset + i * step);
Steve Block053d10c2011-06-13 19:13:29 +01001745 if (HeapObject::cast(prototype)->IsMarked() &&
1746 HeapObject::cast(cached_map)->IsMarked()) {
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001747 if (new_number_of_transitions != i) {
1748 prototype_transitions->set_unchecked(
1749 heap_,
1750 proto_offset + new_number_of_transitions * step,
1751 prototype,
1752 UPDATE_WRITE_BARRIER);
1753 prototype_transitions->set_unchecked(
1754 heap_,
1755 map_offset + new_number_of_transitions * step,
1756 cached_map,
1757 SKIP_WRITE_BARRIER);
Steve Block053d10c2011-06-13 19:13:29 +01001758 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001759 new_number_of_transitions++;
Steve Block053d10c2011-06-13 19:13:29 +01001760 }
1761 }
1762
1763 // Fill slots that became free with undefined value.
1764 Object* undefined = heap()->raw_unchecked_undefined_value();
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001765 for (int i = new_number_of_transitions * step;
1766 i < number_of_transitions * step;
1767 i++) {
Steve Block053d10c2011-06-13 19:13:29 +01001768 prototype_transitions->set_unchecked(heap_,
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001769 header + i,
Steve Block053d10c2011-06-13 19:13:29 +01001770 undefined,
1771 SKIP_WRITE_BARRIER);
1772 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00001773 map->SetNumberOfProtoTransitions(new_number_of_transitions);
Steve Block053d10c2011-06-13 19:13:29 +01001774 }
1775
Steve Blocka7e24c12009-10-30 11:49:00 +00001776 // Follow the chain of back pointers to find the prototype.
1777 Map* current = map;
1778 while (SafeIsMap(current)) {
1779 current = reinterpret_cast<Map*>(current->prototype());
1780 ASSERT(current->IsHeapObject());
1781 }
1782 Object* real_prototype = current;
1783
1784 // Follow back pointers, setting them to prototype,
1785 // clearing map transitions when necessary.
1786 current = map;
1787 bool on_dead_path = !current->IsMarked();
1788 Object* next;
1789 while (SafeIsMap(current)) {
1790 next = current->prototype();
1791 // There should never be a dead map above a live map.
1792 ASSERT(on_dead_path || current->IsMarked());
1793
1794 // A live map above a dead map indicates a dead transition.
1795 // This test will always be false on the first iteration.
1796 if (on_dead_path && current->IsMarked()) {
1797 on_dead_path = false;
Ben Murdoch8b112d22011-06-08 16:22:53 +01001798 current->ClearNonLiveTransitions(heap(), real_prototype);
Steve Blocka7e24c12009-10-30 11:49:00 +00001799 }
1800 *HeapObject::RawField(current, Map::kPrototypeOffset) =
1801 real_prototype;
1802 current = reinterpret_cast<Map*>(next);
1803 }
1804 }
1805}
1806
Ben Murdoch69a99ed2011-11-30 16:03:39 +00001807
1808void MarkCompactCollector::ProcessWeakMaps() {
1809 Object* weak_map_obj = encountered_weak_maps();
1810 while (weak_map_obj != Smi::FromInt(0)) {
1811 ASSERT(HeapObject::cast(weak_map_obj)->IsMarked());
1812 JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
1813 ObjectHashTable* table = weak_map->unchecked_table();
1814 for (int i = 0; i < table->Capacity(); i++) {
1815 if (HeapObject::cast(table->KeyAt(i))->IsMarked()) {
1816 Object* value = table->get(table->EntryToValueIndex(i));
1817 StaticMarkingVisitor::MarkObjectByPointer(heap(), &value);
1818 table->set_unchecked(heap(),
1819 table->EntryToValueIndex(i),
1820 value,
1821 UPDATE_WRITE_BARRIER);
1822 }
1823 }
1824 weak_map_obj = weak_map->next();
1825 }
1826}
1827
1828
1829void MarkCompactCollector::ClearWeakMaps() {
1830 Object* weak_map_obj = encountered_weak_maps();
1831 while (weak_map_obj != Smi::FromInt(0)) {
1832 ASSERT(HeapObject::cast(weak_map_obj)->IsMarked());
1833 JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
1834 ObjectHashTable* table = weak_map->unchecked_table();
1835 for (int i = 0; i < table->Capacity(); i++) {
1836 if (!HeapObject::cast(table->KeyAt(i))->IsMarked()) {
1837 table->RemoveEntry(i, heap());
1838 }
1839 }
1840 weak_map_obj = weak_map->next();
1841 weak_map->set_next(Smi::FromInt(0));
1842 }
1843 set_encountered_weak_maps(Smi::FromInt(0));
1844}
1845
Steve Blocka7e24c12009-10-30 11:49:00 +00001846// -------------------------------------------------------------------------
1847// Phase 2: Encode forwarding addresses.
1848// When compacting, forwarding addresses for objects in old space and map
1849// space are encoded in their map pointer word (along with an encoding of
1850// their map pointers).
1851//
Leon Clarkee46be812010-01-19 14:06:41 +00001852// The excact encoding is described in the comments for class MapWord in
1853// objects.h.
Steve Blocka7e24c12009-10-30 11:49:00 +00001854//
1855// An address range [start, end) can have both live and non-live objects.
1856// Maximal non-live regions are marked so they can be skipped on subsequent
1857// sweeps of the heap. A distinguished map-pointer encoding is used to mark
1858// free regions of one-word size (in which case the next word is the start
1859// of a live object). A second distinguished map-pointer encoding is used
1860// to mark free regions larger than one word, and the size of the free
1861// region (including the first word) is written to the second word of the
1862// region.
1863//
1864// Any valid map page offset must lie in the object area of the page, so map
1865// page offsets less than Page::kObjectStartOffset are invalid. We use a
1866// pair of distinguished invalid map encodings (for single word and multiple
1867// words) to indicate free regions in the page found during computation of
1868// forwarding addresses and skipped over in subsequent sweeps.
Steve Blocka7e24c12009-10-30 11:49:00 +00001869
1870
1871// Encode a free region, defined by the given start address and size, in the
1872// first word or two of the region.
1873void EncodeFreeRegion(Address free_start, int free_size) {
1874 ASSERT(free_size >= kIntSize);
1875 if (free_size == kIntSize) {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001876 Memory::uint32_at(free_start) = MarkCompactCollector::kSingleFreeEncoding;
Steve Blocka7e24c12009-10-30 11:49:00 +00001877 } else {
1878 ASSERT(free_size >= 2 * kIntSize);
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001879 Memory::uint32_at(free_start) = MarkCompactCollector::kMultiFreeEncoding;
Steve Blocka7e24c12009-10-30 11:49:00 +00001880 Memory::int_at(free_start + kIntSize) = free_size;
1881 }
1882
1883#ifdef DEBUG
1884 // Zap the body of the free region.
1885 if (FLAG_enable_slow_asserts) {
1886 for (int offset = 2 * kIntSize;
1887 offset < free_size;
1888 offset += kPointerSize) {
1889 Memory::Address_at(free_start + offset) = kZapValue;
1890 }
1891 }
1892#endif
1893}
1894
1895
1896// Try to promote all objects in new space. Heap numbers and sequential
1897// strings are promoted to the code space, large objects to large object space,
1898// and all others to the old space.
Steve Block44f0eee2011-05-26 01:26:41 +01001899inline MaybeObject* MCAllocateFromNewSpace(Heap* heap,
1900 HeapObject* object,
John Reck59135872010-11-02 12:39:01 -07001901 int object_size) {
1902 MaybeObject* forwarded;
Steve Block44f0eee2011-05-26 01:26:41 +01001903 if (object_size > heap->MaxObjectSizeInPagedSpace()) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001904 forwarded = Failure::Exception();
1905 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01001906 OldSpace* target_space = heap->TargetSpace(object);
1907 ASSERT(target_space == heap->old_pointer_space() ||
1908 target_space == heap->old_data_space());
Steve Blocka7e24c12009-10-30 11:49:00 +00001909 forwarded = target_space->MCAllocateRaw(object_size);
1910 }
John Reck59135872010-11-02 12:39:01 -07001911 Object* result;
1912 if (!forwarded->ToObject(&result)) {
Steve Block44f0eee2011-05-26 01:26:41 +01001913 result = heap->new_space()->MCAllocateRaw(object_size)->ToObjectUnchecked();
Steve Blocka7e24c12009-10-30 11:49:00 +00001914 }
John Reck59135872010-11-02 12:39:01 -07001915 return result;
Steve Blocka7e24c12009-10-30 11:49:00 +00001916}
1917
1918
1919// Allocation functions for the paged spaces call the space's MCAllocateRaw.
John Reck59135872010-11-02 12:39:01 -07001920MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldPointerSpace(
Steve Block44f0eee2011-05-26 01:26:41 +01001921 Heap *heap,
John Reck59135872010-11-02 12:39:01 -07001922 HeapObject* ignore,
1923 int object_size) {
Steve Block44f0eee2011-05-26 01:26:41 +01001924 return heap->old_pointer_space()->MCAllocateRaw(object_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001925}
1926
1927
John Reck59135872010-11-02 12:39:01 -07001928MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldDataSpace(
Steve Block44f0eee2011-05-26 01:26:41 +01001929 Heap* heap,
John Reck59135872010-11-02 12:39:01 -07001930 HeapObject* ignore,
1931 int object_size) {
Steve Block44f0eee2011-05-26 01:26:41 +01001932 return heap->old_data_space()->MCAllocateRaw(object_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001933}
1934
1935
John Reck59135872010-11-02 12:39:01 -07001936MUST_USE_RESULT inline MaybeObject* MCAllocateFromCodeSpace(
Steve Block44f0eee2011-05-26 01:26:41 +01001937 Heap* heap,
John Reck59135872010-11-02 12:39:01 -07001938 HeapObject* ignore,
1939 int object_size) {
Steve Block44f0eee2011-05-26 01:26:41 +01001940 return heap->code_space()->MCAllocateRaw(object_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001941}
1942
1943
John Reck59135872010-11-02 12:39:01 -07001944MUST_USE_RESULT inline MaybeObject* MCAllocateFromMapSpace(
Steve Block44f0eee2011-05-26 01:26:41 +01001945 Heap* heap,
John Reck59135872010-11-02 12:39:01 -07001946 HeapObject* ignore,
1947 int object_size) {
Steve Block44f0eee2011-05-26 01:26:41 +01001948 return heap->map_space()->MCAllocateRaw(object_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001949}
1950
1951
Steve Block44f0eee2011-05-26 01:26:41 +01001952MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace(
1953 Heap* heap, HeapObject* ignore, int object_size) {
1954 return heap->cell_space()->MCAllocateRaw(object_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001955}
1956
1957
1958// The forwarding address is encoded at the same offset as the current
1959// to-space object, but in from space.
Steve Block44f0eee2011-05-26 01:26:41 +01001960inline void EncodeForwardingAddressInNewSpace(Heap* heap,
1961 HeapObject* old_object,
Steve Blocka7e24c12009-10-30 11:49:00 +00001962 int object_size,
1963 Object* new_object,
1964 int* ignored) {
1965 int offset =
Steve Block44f0eee2011-05-26 01:26:41 +01001966 heap->new_space()->ToSpaceOffsetForAddress(old_object->address());
1967 Memory::Address_at(heap->new_space()->FromSpaceLow() + offset) =
Steve Blocka7e24c12009-10-30 11:49:00 +00001968 HeapObject::cast(new_object)->address();
1969}
1970
1971
1972// The forwarding address is encoded in the map pointer of the object as an
1973// offset (in terms of live bytes) from the address of the first live object
1974// in the page.
Steve Block44f0eee2011-05-26 01:26:41 +01001975inline void EncodeForwardingAddressInPagedSpace(Heap* heap,
1976 HeapObject* old_object,
Steve Blocka7e24c12009-10-30 11:49:00 +00001977 int object_size,
1978 Object* new_object,
1979 int* offset) {
1980 // Record the forwarding address of the first live object if necessary.
1981 if (*offset == 0) {
1982 Page::FromAddress(old_object->address())->mc_first_forwarded =
1983 HeapObject::cast(new_object)->address();
1984 }
1985
1986 MapWord encoding =
1987 MapWord::EncodeAddress(old_object->map()->address(), *offset);
1988 old_object->set_map_word(encoding);
1989 *offset += object_size;
1990 ASSERT(*offset <= Page::kObjectAreaSize);
1991}
1992
1993
1994// Most non-live objects are ignored.
Ben Murdoch8b112d22011-06-08 16:22:53 +01001995inline void IgnoreNonLiveObject(HeapObject* object, Isolate* isolate) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00001996
1997
Steve Blocka7e24c12009-10-30 11:49:00 +00001998// Function template that, given a range of addresses (eg, a semispace or a
1999// paged space page), iterates through the objects in the range to clear
2000// mark bits and compute and encode forwarding addresses. As a side effect,
2001// maximal free chunks are marked so that they can be skipped on subsequent
2002// sweeps.
2003//
2004// The template parameters are an allocation function, a forwarding address
2005// encoding function, and a function to process non-live objects.
2006template<MarkCompactCollector::AllocationFunction Alloc,
2007 MarkCompactCollector::EncodingFunction Encode,
2008 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
Steve Block44f0eee2011-05-26 01:26:41 +01002009inline void EncodeForwardingAddressesInRange(MarkCompactCollector* collector,
2010 Address start,
Steve Blocka7e24c12009-10-30 11:49:00 +00002011 Address end,
2012 int* offset) {
2013 // The start address of the current free region while sweeping the space.
2014 // This address is set when a transition from live to non-live objects is
2015 // encountered. A value (an encoding of the 'next free region' pointer)
2016 // is written to memory at this address when a transition from non-live to
2017 // live objects is encountered.
2018 Address free_start = NULL;
2019
2020 // A flag giving the state of the previously swept object. Initially true
2021 // to ensure that free_start is initialized to a proper address before
2022 // trying to write to it.
2023 bool is_prev_alive = true;
2024
2025 int object_size; // Will be set on each iteration of the loop.
2026 for (Address current = start; current < end; current += object_size) {
2027 HeapObject* object = HeapObject::FromAddress(current);
2028 if (object->IsMarked()) {
2029 object->ClearMark();
Steve Block44f0eee2011-05-26 01:26:41 +01002030 collector->tracer()->decrement_marked_count();
Steve Blocka7e24c12009-10-30 11:49:00 +00002031 object_size = object->Size();
2032
Steve Block44f0eee2011-05-26 01:26:41 +01002033 Object* forwarded =
2034 Alloc(collector->heap(), object, object_size)->ToObjectUnchecked();
2035 Encode(collector->heap(), object, object_size, forwarded, offset);
Steve Blocka7e24c12009-10-30 11:49:00 +00002036
2037#ifdef DEBUG
2038 if (FLAG_gc_verbose) {
2039 PrintF("forward %p -> %p.\n", object->address(),
2040 HeapObject::cast(forwarded)->address());
2041 }
2042#endif
2043 if (!is_prev_alive) { // Transition from non-live to live.
Steve Blockd0582a62009-12-15 09:54:21 +00002044 EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
Steve Blocka7e24c12009-10-30 11:49:00 +00002045 is_prev_alive = true;
2046 }
2047 } else { // Non-live object.
2048 object_size = object->Size();
Ben Murdoch8b112d22011-06-08 16:22:53 +01002049 ProcessNonLive(object, collector->heap()->isolate());
Steve Blocka7e24c12009-10-30 11:49:00 +00002050 if (is_prev_alive) { // Transition from live to non-live.
2051 free_start = current;
2052 is_prev_alive = false;
2053 }
Steve Block1e0659c2011-05-24 12:43:12 +01002054 LiveObjectList::ProcessNonLive(object);
Steve Blocka7e24c12009-10-30 11:49:00 +00002055 }
2056 }
2057
2058 // If we ended on a free region, mark it.
Steve Blockd0582a62009-12-15 09:54:21 +00002059 if (!is_prev_alive) {
2060 EncodeFreeRegion(free_start, static_cast<int>(end - free_start));
2061 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002062}
2063
2064
2065// Functions to encode the forwarding pointers in each compactable space.
2066void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
2067 int ignored;
2068 EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
2069 EncodeForwardingAddressInNewSpace,
2070 IgnoreNonLiveObject>(
Steve Block44f0eee2011-05-26 01:26:41 +01002071 this,
Ben Murdoch8b112d22011-06-08 16:22:53 +01002072 heap()->new_space()->bottom(),
2073 heap()->new_space()->top(),
Steve Blocka7e24c12009-10-30 11:49:00 +00002074 &ignored);
2075}
2076
2077
2078template<MarkCompactCollector::AllocationFunction Alloc,
2079 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
2080void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
2081 PagedSpace* space) {
2082 PageIterator it(space, PageIterator::PAGES_IN_USE);
2083 while (it.has_next()) {
2084 Page* p = it.next();
Steve Block6ded16b2010-05-10 14:33:55 +01002085
Steve Blocka7e24c12009-10-30 11:49:00 +00002086 // The offset of each live object in the page from the first live object
2087 // in the page.
2088 int offset = 0;
2089 EncodeForwardingAddressesInRange<Alloc,
2090 EncodeForwardingAddressInPagedSpace,
2091 ProcessNonLive>(
Steve Block44f0eee2011-05-26 01:26:41 +01002092 this,
Steve Blocka7e24c12009-10-30 11:49:00 +00002093 p->ObjectAreaStart(),
2094 p->AllocationTop(),
2095 &offset);
2096 }
2097}
2098
2099
Steve Block6ded16b2010-05-10 14:33:55 +01002100// We scavange new space simultaneously with sweeping. This is done in two
2101// passes.
2102// The first pass migrates all alive objects from one semispace to another or
2103// promotes them to old space. Forwading address is written directly into
2104// first word of object without any encoding. If object is dead we are writing
2105// NULL as a forwarding address.
2106// The second pass updates pointers to new space in all spaces. It is possible
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002107// to encounter pointers to dead objects during traversal of dirty regions we
2108// should clear them to avoid encountering them during next dirty regions
2109// iteration.
Steve Block44f0eee2011-05-26 01:26:41 +01002110static void MigrateObject(Heap* heap,
2111 Address dst,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002112 Address src,
2113 int size,
2114 bool to_old_space) {
2115 if (to_old_space) {
Steve Block44f0eee2011-05-26 01:26:41 +01002116 heap->CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002117 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01002118 heap->CopyBlock(dst, src, size);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002119 }
Steve Block6ded16b2010-05-10 14:33:55 +01002120
2121 Memory::Address_at(src) = dst;
2122}
2123
2124
Iain Merrick75681382010-08-19 15:07:18 +01002125class StaticPointersToNewGenUpdatingVisitor : public
2126 StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> {
2127 public:
Steve Block44f0eee2011-05-26 01:26:41 +01002128 static inline void VisitPointer(Heap* heap, Object** p) {
Iain Merrick75681382010-08-19 15:07:18 +01002129 if (!(*p)->IsHeapObject()) return;
2130
2131 HeapObject* obj = HeapObject::cast(*p);
2132 Address old_addr = obj->address();
2133
Steve Block44f0eee2011-05-26 01:26:41 +01002134 if (heap->new_space()->Contains(obj)) {
2135 ASSERT(heap->InFromSpace(*p));
Iain Merrick75681382010-08-19 15:07:18 +01002136 *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
2137 }
2138 }
2139};
2140
2141
Steve Block6ded16b2010-05-10 14:33:55 +01002142// Visitor for updating pointers from live objects in old spaces to new space.
2143// It does not expect to encounter pointers to dead objects.
2144class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
2145 public:
Steve Block44f0eee2011-05-26 01:26:41 +01002146 explicit PointersToNewGenUpdatingVisitor(Heap* heap) : heap_(heap) { }
2147
Steve Block6ded16b2010-05-10 14:33:55 +01002148 void VisitPointer(Object** p) {
Steve Block44f0eee2011-05-26 01:26:41 +01002149 StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
Steve Block6ded16b2010-05-10 14:33:55 +01002150 }
2151
2152 void VisitPointers(Object** start, Object** end) {
Iain Merrick75681382010-08-19 15:07:18 +01002153 for (Object** p = start; p < end; p++) {
Steve Block44f0eee2011-05-26 01:26:41 +01002154 StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
Iain Merrick75681382010-08-19 15:07:18 +01002155 }
Steve Block6ded16b2010-05-10 14:33:55 +01002156 }
2157
2158 void VisitCodeTarget(RelocInfo* rinfo) {
2159 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2160 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2161 VisitPointer(&target);
2162 rinfo->set_target_address(Code::cast(target)->instruction_start());
2163 }
2164
2165 void VisitDebugTarget(RelocInfo* rinfo) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002166 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2167 rinfo->IsPatchedReturnSequence()) ||
2168 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2169 rinfo->IsPatchedDebugBreakSlotSequence()));
Steve Block6ded16b2010-05-10 14:33:55 +01002170 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2171 VisitPointer(&target);
2172 rinfo->set_call_address(Code::cast(target)->instruction_start());
2173 }
Ben Murdoch3fb3ca82011-12-02 17:19:32 +00002174
Steve Block44f0eee2011-05-26 01:26:41 +01002175 private:
2176 Heap* heap_;
Steve Block6ded16b2010-05-10 14:33:55 +01002177};
2178
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002179
Steve Block6ded16b2010-05-10 14:33:55 +01002180// Visitor for updating pointers from live objects in old spaces to new space.
2181// It can encounter pointers to dead objects in new space when traversing map
2182// space (see comment for MigrateObject).
2183static void UpdatePointerToNewGen(HeapObject** p) {
2184 if (!(*p)->IsHeapObject()) return;
2185
2186 Address old_addr = (*p)->address();
Steve Block44f0eee2011-05-26 01:26:41 +01002187 ASSERT(HEAP->InFromSpace(*p));
Steve Block6ded16b2010-05-10 14:33:55 +01002188
2189 Address new_addr = Memory::Address_at(old_addr);
2190
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002191 if (new_addr == NULL) {
2192 // We encountered pointer to a dead object. Clear it so we will
2193 // not visit it again during next iteration of dirty regions.
2194 *p = NULL;
2195 } else {
2196 *p = HeapObject::FromAddress(new_addr);
2197 }
Steve Block6ded16b2010-05-10 14:33:55 +01002198}
2199
2200
Steve Block44f0eee2011-05-26 01:26:41 +01002201static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
2202 Object** p) {
Steve Block6ded16b2010-05-10 14:33:55 +01002203 Address old_addr = HeapObject::cast(*p)->address();
2204 Address new_addr = Memory::Address_at(old_addr);
2205 return String::cast(HeapObject::FromAddress(new_addr));
2206}
2207
2208
Steve Block44f0eee2011-05-26 01:26:41 +01002209static bool TryPromoteObject(Heap* heap, HeapObject* object, int object_size) {
Steve Block6ded16b2010-05-10 14:33:55 +01002210 Object* result;
2211
Steve Block44f0eee2011-05-26 01:26:41 +01002212 if (object_size > heap->MaxObjectSizeInPagedSpace()) {
John Reck59135872010-11-02 12:39:01 -07002213 MaybeObject* maybe_result =
Steve Block44f0eee2011-05-26 01:26:41 +01002214 heap->lo_space()->AllocateRawFixedArray(object_size);
John Reck59135872010-11-02 12:39:01 -07002215 if (maybe_result->ToObject(&result)) {
Steve Block6ded16b2010-05-10 14:33:55 +01002216 HeapObject* target = HeapObject::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01002217 MigrateObject(heap, target->address(), object->address(), object_size,
2218 true);
2219 heap->mark_compact_collector()->tracer()->
Leon Clarkef7060e22010-06-03 12:02:55 +01002220 increment_promoted_objects_size(object_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002221 return true;
2222 }
2223 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01002224 OldSpace* target_space = heap->TargetSpace(object);
Steve Block6ded16b2010-05-10 14:33:55 +01002225
Steve Block44f0eee2011-05-26 01:26:41 +01002226 ASSERT(target_space == heap->old_pointer_space() ||
2227 target_space == heap->old_data_space());
John Reck59135872010-11-02 12:39:01 -07002228 MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
2229 if (maybe_result->ToObject(&result)) {
Steve Block6ded16b2010-05-10 14:33:55 +01002230 HeapObject* target = HeapObject::cast(result);
Steve Block44f0eee2011-05-26 01:26:41 +01002231 MigrateObject(heap,
2232 target->address(),
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002233 object->address(),
2234 object_size,
Steve Block44f0eee2011-05-26 01:26:41 +01002235 target_space == heap->old_pointer_space());
2236 heap->mark_compact_collector()->tracer()->
Leon Clarkef7060e22010-06-03 12:02:55 +01002237 increment_promoted_objects_size(object_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002238 return true;
2239 }
2240 }
2241
2242 return false;
2243}
2244
2245
Steve Block44f0eee2011-05-26 01:26:41 +01002246static void SweepNewSpace(Heap* heap, NewSpace* space) {
2247 heap->CheckNewSpaceExpansionCriteria();
Steve Block6ded16b2010-05-10 14:33:55 +01002248
2249 Address from_bottom = space->bottom();
2250 Address from_top = space->top();
2251
2252 // Flip the semispaces. After flipping, to space is empty, from space has
2253 // live objects.
2254 space->Flip();
2255 space->ResetAllocationInfo();
2256
2257 int size = 0;
2258 int survivors_size = 0;
2259
2260 // First pass: traverse all objects in inactive semispace, remove marks,
2261 // migrate live objects and write forwarding addresses.
2262 for (Address current = from_bottom; current < from_top; current += size) {
2263 HeapObject* object = HeapObject::FromAddress(current);
2264
2265 if (object->IsMarked()) {
2266 object->ClearMark();
Steve Block44f0eee2011-05-26 01:26:41 +01002267 heap->mark_compact_collector()->tracer()->decrement_marked_count();
Steve Block6ded16b2010-05-10 14:33:55 +01002268
2269 size = object->Size();
2270 survivors_size += size;
2271
2272 // Aggressively promote young survivors to the old space.
Steve Block44f0eee2011-05-26 01:26:41 +01002273 if (TryPromoteObject(heap, object, size)) {
Steve Block6ded16b2010-05-10 14:33:55 +01002274 continue;
2275 }
2276
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002277 // Promotion failed. Just migrate object to another semispace.
Steve Block6ded16b2010-05-10 14:33:55 +01002278 // Allocation cannot fail at this point: semispaces are of equal size.
John Reck59135872010-11-02 12:39:01 -07002279 Object* target = space->AllocateRaw(size)->ToObjectUnchecked();
Steve Block6ded16b2010-05-10 14:33:55 +01002280
Steve Block44f0eee2011-05-26 01:26:41 +01002281 MigrateObject(heap,
2282 HeapObject::cast(target)->address(),
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002283 current,
2284 size,
2285 false);
Steve Block6ded16b2010-05-10 14:33:55 +01002286 } else {
Steve Block1e0659c2011-05-24 12:43:12 +01002287 // Process the dead object before we write a NULL into its header.
2288 LiveObjectList::ProcessNonLive(object);
2289
Steve Block6ded16b2010-05-10 14:33:55 +01002290 size = object->Size();
2291 Memory::Address_at(current) = NULL;
2292 }
2293 }
2294
2295 // Second pass: find pointers to new space and update them.
Steve Block44f0eee2011-05-26 01:26:41 +01002296 PointersToNewGenUpdatingVisitor updating_visitor(heap);
Steve Block6ded16b2010-05-10 14:33:55 +01002297
2298 // Update pointers in to space.
Iain Merrick75681382010-08-19 15:07:18 +01002299 Address current = space->bottom();
2300 while (current < space->top()) {
2301 HeapObject* object = HeapObject::FromAddress(current);
2302 current +=
2303 StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(),
2304 object);
Steve Blocka7e24c12009-10-30 11:49:00 +00002305 }
Steve Block6ded16b2010-05-10 14:33:55 +01002306
2307 // Update roots.
Ben Murdoch257744e2011-11-30 15:57:28 +00002308 heap->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
Steve Block1e0659c2011-05-24 12:43:12 +01002309 LiveObjectList::IterateElements(&updating_visitor);
Steve Block6ded16b2010-05-10 14:33:55 +01002310
2311 // Update pointers in old spaces.
Steve Block44f0eee2011-05-26 01:26:41 +01002312 heap->IterateDirtyRegions(heap->old_pointer_space(),
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002313 &Heap::IteratePointersInDirtyRegion,
2314 &UpdatePointerToNewGen,
Steve Block44f0eee2011-05-26 01:26:41 +01002315 heap->WATERMARK_SHOULD_BE_VALID);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002316
Steve Block44f0eee2011-05-26 01:26:41 +01002317 heap->lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
Steve Block6ded16b2010-05-10 14:33:55 +01002318
2319 // Update pointers from cells.
Steve Block44f0eee2011-05-26 01:26:41 +01002320 HeapObjectIterator cell_iterator(heap->cell_space());
Steve Block6ded16b2010-05-10 14:33:55 +01002321 for (HeapObject* cell = cell_iterator.next();
2322 cell != NULL;
2323 cell = cell_iterator.next()) {
2324 if (cell->IsJSGlobalPropertyCell()) {
2325 Address value_address =
2326 reinterpret_cast<Address>(cell) +
2327 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
2328 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
2329 }
2330 }
2331
Ben Murdochf87a2032010-10-22 12:50:53 +01002332 // Update pointer from the global contexts list.
Steve Block44f0eee2011-05-26 01:26:41 +01002333 updating_visitor.VisitPointer(heap->global_contexts_list_address());
Ben Murdochf87a2032010-10-22 12:50:53 +01002334
Steve Block6ded16b2010-05-10 14:33:55 +01002335 // Update pointers from external string table.
Steve Block44f0eee2011-05-26 01:26:41 +01002336 heap->UpdateNewSpaceReferencesInExternalStringTable(
Steve Block6ded16b2010-05-10 14:33:55 +01002337 &UpdateNewSpaceReferenceInExternalStringTableEntry);
2338
2339 // All pointers were updated. Update auxiliary allocation info.
Steve Block44f0eee2011-05-26 01:26:41 +01002340 heap->IncrementYoungSurvivorsCounter(survivors_size);
Steve Block6ded16b2010-05-10 14:33:55 +01002341 space->set_age_mark(space->top());
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002342
2343 // Update JSFunction pointers from the runtime profiler.
Steve Block44f0eee2011-05-26 01:26:41 +01002344 heap->isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
Steve Blocka7e24c12009-10-30 11:49:00 +00002345}
2346
2347
Steve Block44f0eee2011-05-26 01:26:41 +01002348static void SweepSpace(Heap* heap, PagedSpace* space) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002349 PageIterator it(space, PageIterator::PAGES_IN_USE);
Steve Block6ded16b2010-05-10 14:33:55 +01002350
2351 // During sweeping of paged space we are trying to find longest sequences
2352 // of pages without live objects and free them (instead of putting them on
2353 // the free list).
2354
2355 // Page preceding current.
2356 Page* prev = Page::FromAddress(NULL);
2357
2358 // First empty page in a sequence.
2359 Page* first_empty_page = Page::FromAddress(NULL);
2360
2361 // Page preceding first empty page.
2362 Page* prec_first_empty_page = Page::FromAddress(NULL);
2363
2364 // If last used page of space ends with a sequence of dead objects
2365 // we can adjust allocation top instead of puting this free area into
2366 // the free list. Thus during sweeping we keep track of such areas
2367 // and defer their deallocation until the sweeping of the next page
2368 // is done: if one of the next pages contains live objects we have
2369 // to put such area into the free list.
2370 Address last_free_start = NULL;
2371 int last_free_size = 0;
2372
Steve Blocka7e24c12009-10-30 11:49:00 +00002373 while (it.has_next()) {
2374 Page* p = it.next();
2375
2376 bool is_previous_alive = true;
2377 Address free_start = NULL;
2378 HeapObject* object;
2379
2380 for (Address current = p->ObjectAreaStart();
2381 current < p->AllocationTop();
2382 current += object->Size()) {
2383 object = HeapObject::FromAddress(current);
2384 if (object->IsMarked()) {
2385 object->ClearMark();
Steve Block44f0eee2011-05-26 01:26:41 +01002386 heap->mark_compact_collector()->tracer()->decrement_marked_count();
Steve Block6ded16b2010-05-10 14:33:55 +01002387
Steve Blocka7e24c12009-10-30 11:49:00 +00002388 if (!is_previous_alive) { // Transition from free to live.
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002389 space->DeallocateBlock(free_start,
2390 static_cast<int>(current - free_start),
2391 true);
Steve Blocka7e24c12009-10-30 11:49:00 +00002392 is_previous_alive = true;
2393 }
2394 } else {
Ben Murdoch8b112d22011-06-08 16:22:53 +01002395 heap->mark_compact_collector()->ReportDeleteIfNeeded(
2396 object, heap->isolate());
Steve Blocka7e24c12009-10-30 11:49:00 +00002397 if (is_previous_alive) { // Transition from live to free.
2398 free_start = current;
2399 is_previous_alive = false;
2400 }
Steve Block1e0659c2011-05-24 12:43:12 +01002401 LiveObjectList::ProcessNonLive(object);
Steve Blocka7e24c12009-10-30 11:49:00 +00002402 }
2403 // The object is now unmarked for the call to Size() at the top of the
2404 // loop.
2405 }
2406
Steve Block6ded16b2010-05-10 14:33:55 +01002407 bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
2408 || (!is_previous_alive && free_start == p->ObjectAreaStart());
2409
2410 if (page_is_empty) {
2411 // This page is empty. Check whether we are in the middle of
2412 // sequence of empty pages and start one if not.
2413 if (!first_empty_page->is_valid()) {
2414 first_empty_page = p;
2415 prec_first_empty_page = prev;
2416 }
2417
2418 if (!is_previous_alive) {
2419 // There are dead objects on this page. Update space accounting stats
2420 // without putting anything into free list.
2421 int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
2422 if (size_in_bytes > 0) {
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002423 space->DeallocateBlock(free_start, size_in_bytes, false);
Steve Block6ded16b2010-05-10 14:33:55 +01002424 }
2425 }
2426 } else {
2427 // This page is not empty. Sequence of empty pages ended on the previous
2428 // one.
2429 if (first_empty_page->is_valid()) {
2430 space->FreePages(prec_first_empty_page, prev);
2431 prec_first_empty_page = first_empty_page = Page::FromAddress(NULL);
2432 }
2433
2434 // If there is a free ending area on one of the previous pages we have
2435 // deallocate that area and put it on the free list.
2436 if (last_free_size > 0) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002437 Page::FromAddress(last_free_start)->
2438 SetAllocationWatermark(last_free_start);
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002439 space->DeallocateBlock(last_free_start, last_free_size, true);
Steve Block6ded16b2010-05-10 14:33:55 +01002440 last_free_start = NULL;
2441 last_free_size = 0;
2442 }
2443
2444 // If the last region of this page was not live we remember it.
2445 if (!is_previous_alive) {
2446 ASSERT(last_free_size == 0);
2447 last_free_size = static_cast<int>(p->AllocationTop() - free_start);
2448 last_free_start = free_start;
Steve Blocka7e24c12009-10-30 11:49:00 +00002449 }
2450 }
Steve Block6ded16b2010-05-10 14:33:55 +01002451
2452 prev = p;
2453 }
2454
2455 // We reached end of space. See if we need to adjust allocation top.
2456 Address new_allocation_top = NULL;
2457
2458 if (first_empty_page->is_valid()) {
2459 // Last used pages in space are empty. We can move allocation top backwards
2460 // to the beginning of first empty page.
2461 ASSERT(prev == space->AllocationTopPage());
2462
2463 new_allocation_top = first_empty_page->ObjectAreaStart();
2464 }
2465
2466 if (last_free_size > 0) {
2467 // There was a free ending area on the previous page.
2468 // Deallocate it without putting it into freelist and move allocation
2469 // top to the beginning of this free area.
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002470 space->DeallocateBlock(last_free_start, last_free_size, false);
Steve Block6ded16b2010-05-10 14:33:55 +01002471 new_allocation_top = last_free_start;
2472 }
2473
2474 if (new_allocation_top != NULL) {
2475#ifdef DEBUG
2476 Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
2477 if (!first_empty_page->is_valid()) {
2478 ASSERT(new_allocation_top_page == space->AllocationTopPage());
2479 } else if (last_free_size > 0) {
2480 ASSERT(new_allocation_top_page == prec_first_empty_page);
2481 } else {
2482 ASSERT(new_allocation_top_page == first_empty_page);
2483 }
2484#endif
2485
2486 space->SetTop(new_allocation_top);
Steve Blocka7e24c12009-10-30 11:49:00 +00002487 }
2488}
2489
2490
Steve Blocka7e24c12009-10-30 11:49:00 +00002491void MarkCompactCollector::EncodeForwardingAddresses() {
2492 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
2493 // Objects in the active semispace of the young generation may be
2494 // relocated to the inactive semispace (if not promoted). Set the
2495 // relocation info to the beginning of the inactive semispace.
Ben Murdoch8b112d22011-06-08 16:22:53 +01002496 heap()->new_space()->MCResetRelocationInfo();
Steve Blocka7e24c12009-10-30 11:49:00 +00002497
2498 // Compute the forwarding pointers in each space.
2499 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
Leon Clarked91b9f72010-01-27 17:25:45 +00002500 ReportDeleteIfNeeded>(
Ben Murdoch8b112d22011-06-08 16:22:53 +01002501 heap()->old_pointer_space());
Steve Blocka7e24c12009-10-30 11:49:00 +00002502
2503 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
2504 IgnoreNonLiveObject>(
Ben Murdoch8b112d22011-06-08 16:22:53 +01002505 heap()->old_data_space());
Steve Blocka7e24c12009-10-30 11:49:00 +00002506
2507 EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
Leon Clarked91b9f72010-01-27 17:25:45 +00002508 ReportDeleteIfNeeded>(
Ben Murdoch8b112d22011-06-08 16:22:53 +01002509 heap()->code_space());
Steve Blocka7e24c12009-10-30 11:49:00 +00002510
2511 EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
2512 IgnoreNonLiveObject>(
Ben Murdoch8b112d22011-06-08 16:22:53 +01002513 heap()->cell_space());
Steve Blocka7e24c12009-10-30 11:49:00 +00002514
2515
2516 // Compute new space next to last after the old and code spaces have been
2517 // compacted. Objects in new space can be promoted to old or code space.
2518 EncodeForwardingAddressesInNewSpace();
2519
2520 // Compute map space last because computing forwarding addresses
2521 // overwrites non-live objects. Objects in the other spaces rely on
2522 // non-live map pointers to get the sizes of non-live objects.
2523 EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
2524 IgnoreNonLiveObject>(
Ben Murdoch8b112d22011-06-08 16:22:53 +01002525 heap()->map_space());
Steve Blocka7e24c12009-10-30 11:49:00 +00002526
2527 // Write relocation info to the top page, so we can use it later. This is
2528 // done after promoting objects from the new space so we get the correct
2529 // allocation top.
Ben Murdoch8b112d22011-06-08 16:22:53 +01002530 heap()->old_pointer_space()->MCWriteRelocationInfoToPage();
2531 heap()->old_data_space()->MCWriteRelocationInfoToPage();
2532 heap()->code_space()->MCWriteRelocationInfoToPage();
2533 heap()->map_space()->MCWriteRelocationInfoToPage();
2534 heap()->cell_space()->MCWriteRelocationInfoToPage();
Steve Blocka7e24c12009-10-30 11:49:00 +00002535}
2536
2537
Leon Clarkee46be812010-01-19 14:06:41 +00002538class MapIterator : public HeapObjectIterator {
2539 public:
Ben Murdoch8b112d22011-06-08 16:22:53 +01002540 explicit MapIterator(Heap* heap)
2541 : HeapObjectIterator(heap->map_space(), &SizeCallback) { }
Leon Clarkee46be812010-01-19 14:06:41 +00002542
Ben Murdoch8b112d22011-06-08 16:22:53 +01002543 MapIterator(Heap* heap, Address start)
2544 : HeapObjectIterator(heap->map_space(), start, &SizeCallback) { }
Leon Clarkee46be812010-01-19 14:06:41 +00002545
2546 private:
2547 static int SizeCallback(HeapObject* unused) {
2548 USE(unused);
2549 return Map::kSize;
2550 }
2551};
2552
2553
2554class MapCompact {
2555 public:
Steve Block44f0eee2011-05-26 01:26:41 +01002556 explicit MapCompact(Heap* heap, int live_maps)
2557 : heap_(heap),
2558 live_maps_(live_maps),
2559 to_evacuate_start_(heap->map_space()->TopAfterCompaction(live_maps)),
Ben Murdoch8b112d22011-06-08 16:22:53 +01002560 vacant_map_it_(heap),
2561 map_to_evacuate_it_(heap, to_evacuate_start_),
Leon Clarkee46be812010-01-19 14:06:41 +00002562 first_map_to_evacuate_(
2563 reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
2564 }
2565
2566 void CompactMaps() {
2567 // As we know the number of maps to evacuate beforehand,
2568 // we stop then there is no more vacant maps.
2569 for (Map* next_vacant_map = NextVacantMap();
2570 next_vacant_map;
2571 next_vacant_map = NextVacantMap()) {
2572 EvacuateMap(next_vacant_map, NextMapToEvacuate());
2573 }
2574
2575#ifdef DEBUG
2576 CheckNoMapsToEvacuate();
2577#endif
2578 }
2579
2580 void UpdateMapPointersInRoots() {
Steve Block44f0eee2011-05-26 01:26:41 +01002581 MapUpdatingVisitor map_updating_visitor;
Ben Murdoch8b112d22011-06-08 16:22:53 +01002582 heap()->IterateRoots(&map_updating_visitor, VISIT_ONLY_STRONG);
2583 heap()->isolate()->global_handles()->IterateWeakRoots(
2584 &map_updating_visitor);
Steve Block44f0eee2011-05-26 01:26:41 +01002585 LiveObjectList::IterateElements(&map_updating_visitor);
Leon Clarkee46be812010-01-19 14:06:41 +00002586 }
2587
Leon Clarkee46be812010-01-19 14:06:41 +00002588 void UpdateMapPointersInPagedSpace(PagedSpace* space) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01002589 ASSERT(space != heap()->map_space());
Leon Clarkee46be812010-01-19 14:06:41 +00002590
2591 PageIterator it(space, PageIterator::PAGES_IN_USE);
2592 while (it.has_next()) {
2593 Page* p = it.next();
Ben Murdoch8b112d22011-06-08 16:22:53 +01002594 UpdateMapPointersInRange(heap(),
2595 p->ObjectAreaStart(),
2596 p->AllocationTop());
Leon Clarkee46be812010-01-19 14:06:41 +00002597 }
2598 }
2599
2600 void UpdateMapPointersInNewSpace() {
Ben Murdoch8b112d22011-06-08 16:22:53 +01002601 NewSpace* space = heap()->new_space();
2602 UpdateMapPointersInRange(heap(), space->bottom(), space->top());
Leon Clarkee46be812010-01-19 14:06:41 +00002603 }
2604
2605 void UpdateMapPointersInLargeObjectSpace() {
Ben Murdoch8b112d22011-06-08 16:22:53 +01002606 LargeObjectIterator it(heap()->lo_space());
Leon Clarked91b9f72010-01-27 17:25:45 +00002607 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
Ben Murdoch8b112d22011-06-08 16:22:53 +01002608 UpdateMapPointersInObject(heap(), obj);
Leon Clarkee46be812010-01-19 14:06:41 +00002609 }
2610
2611 void Finish() {
Ben Murdoch8b112d22011-06-08 16:22:53 +01002612 heap()->map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
Leon Clarkee46be812010-01-19 14:06:41 +00002613 }
2614
Ben Murdoch8b112d22011-06-08 16:22:53 +01002615 inline Heap* heap() const { return heap_; }
2616
Leon Clarkee46be812010-01-19 14:06:41 +00002617 private:
Steve Block44f0eee2011-05-26 01:26:41 +01002618 Heap* heap_;
Leon Clarkee46be812010-01-19 14:06:41 +00002619 int live_maps_;
2620 Address to_evacuate_start_;
2621 MapIterator vacant_map_it_;
2622 MapIterator map_to_evacuate_it_;
2623 Map* first_map_to_evacuate_;
2624
2625 // Helper class for updating map pointers in HeapObjects.
2626 class MapUpdatingVisitor: public ObjectVisitor {
2627 public:
Steve Block44f0eee2011-05-26 01:26:41 +01002628 MapUpdatingVisitor() {}
2629
Leon Clarkee46be812010-01-19 14:06:41 +00002630 void VisitPointer(Object** p) {
2631 UpdateMapPointer(p);
2632 }
2633
2634 void VisitPointers(Object** start, Object** end) {
2635 for (Object** p = start; p < end; p++) UpdateMapPointer(p);
2636 }
2637
2638 private:
2639 void UpdateMapPointer(Object** p) {
2640 if (!(*p)->IsHeapObject()) return;
2641 HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
2642
2643 // Moved maps are tagged with overflowed map word. They are the only
2644 // objects those map word is overflowed as marking is already complete.
2645 MapWord map_word = old_map->map_word();
2646 if (!map_word.IsOverflowed()) return;
2647
2648 *p = GetForwardedMap(map_word);
2649 }
2650 };
2651
Leon Clarkee46be812010-01-19 14:06:41 +00002652 static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
2653 while (true) {
Leon Clarkee46be812010-01-19 14:06:41 +00002654 HeapObject* next = it->next();
Leon Clarked91b9f72010-01-27 17:25:45 +00002655 ASSERT(next != NULL);
Leon Clarkee46be812010-01-19 14:06:41 +00002656 if (next == last)
2657 return NULL;
2658 ASSERT(!next->IsOverflowed());
2659 ASSERT(!next->IsMarked());
2660 ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
2661 if (next->IsMap() == live)
2662 return reinterpret_cast<Map*>(next);
2663 }
2664 }
2665
2666 Map* NextVacantMap() {
2667 Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
2668 ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
2669 return map;
2670 }
2671
2672 Map* NextMapToEvacuate() {
2673 Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
2674 ASSERT(map != NULL);
2675 ASSERT(map->IsMap());
2676 return map;
2677 }
2678
2679 static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
2680 ASSERT(FreeListNode::IsFreeListNode(vacant_map));
2681 ASSERT(map_to_evacuate->IsMap());
2682
Steve Block6ded16b2010-05-10 14:33:55 +01002683 ASSERT(Map::kSize % 4 == 0);
2684
Steve Block44f0eee2011-05-26 01:26:41 +01002685 map_to_evacuate->heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(
2686 vacant_map->address(), map_to_evacuate->address(), Map::kSize);
Steve Block6ded16b2010-05-10 14:33:55 +01002687
Leon Clarkee46be812010-01-19 14:06:41 +00002688 ASSERT(vacant_map->IsMap()); // Due to memcpy above.
2689
2690 MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
2691 forwarding_map_word.SetOverflow();
2692 map_to_evacuate->set_map_word(forwarding_map_word);
2693
2694 ASSERT(map_to_evacuate->map_word().IsOverflowed());
2695 ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
2696 }
2697
2698 static Map* GetForwardedMap(MapWord map_word) {
2699 ASSERT(map_word.IsOverflowed());
2700 map_word.ClearOverflow();
2701 Map* new_map = map_word.ToMap();
2702 ASSERT_MAP_ALIGNED(new_map->address());
2703 return new_map;
2704 }
2705
Steve Block44f0eee2011-05-26 01:26:41 +01002706 static int UpdateMapPointersInObject(Heap* heap, HeapObject* obj) {
Leon Clarkee46be812010-01-19 14:06:41 +00002707 ASSERT(!obj->IsMarked());
2708 Map* map = obj->map();
Steve Block44f0eee2011-05-26 01:26:41 +01002709 ASSERT(heap->map_space()->Contains(map));
Leon Clarkee46be812010-01-19 14:06:41 +00002710 MapWord map_word = map->map_word();
2711 ASSERT(!map_word.IsMarked());
2712 if (map_word.IsOverflowed()) {
2713 Map* new_map = GetForwardedMap(map_word);
Steve Block44f0eee2011-05-26 01:26:41 +01002714 ASSERT(heap->map_space()->Contains(new_map));
Leon Clarkee46be812010-01-19 14:06:41 +00002715 obj->set_map(new_map);
2716
2717#ifdef DEBUG
2718 if (FLAG_gc_verbose) {
Ben Murdochf87a2032010-10-22 12:50:53 +01002719 PrintF("update %p : %p -> %p\n",
2720 obj->address(),
2721 reinterpret_cast<void*>(map),
2722 reinterpret_cast<void*>(new_map));
Leon Clarkee46be812010-01-19 14:06:41 +00002723 }
2724#endif
2725 }
2726
2727 int size = obj->SizeFromMap(map);
Steve Block44f0eee2011-05-26 01:26:41 +01002728 MapUpdatingVisitor map_updating_visitor;
2729 obj->IterateBody(map->instance_type(), size, &map_updating_visitor);
Leon Clarkee46be812010-01-19 14:06:41 +00002730 return size;
2731 }
2732
Steve Block44f0eee2011-05-26 01:26:41 +01002733 static void UpdateMapPointersInRange(Heap* heap, Address start, Address end) {
Leon Clarkee46be812010-01-19 14:06:41 +00002734 HeapObject* object;
2735 int size;
2736 for (Address current = start; current < end; current += size) {
2737 object = HeapObject::FromAddress(current);
Steve Block44f0eee2011-05-26 01:26:41 +01002738 size = UpdateMapPointersInObject(heap, object);
Leon Clarkee46be812010-01-19 14:06:41 +00002739 ASSERT(size > 0);
2740 }
2741 }
2742
2743#ifdef DEBUG
2744 void CheckNoMapsToEvacuate() {
2745 if (!FLAG_enable_slow_asserts)
2746 return;
2747
Leon Clarked91b9f72010-01-27 17:25:45 +00002748 for (HeapObject* obj = map_to_evacuate_it_.next();
2749 obj != NULL; obj = map_to_evacuate_it_.next())
2750 ASSERT(FreeListNode::IsFreeListNode(obj));
Leon Clarkee46be812010-01-19 14:06:41 +00002751 }
2752#endif
2753};
2754
Leon Clarkee46be812010-01-19 14:06:41 +00002755
Steve Blocka7e24c12009-10-30 11:49:00 +00002756void MarkCompactCollector::SweepSpaces() {
Leon Clarkef7060e22010-06-03 12:02:55 +01002757 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
2758
Steve Blocka7e24c12009-10-30 11:49:00 +00002759 ASSERT(state_ == SWEEP_SPACES);
2760 ASSERT(!IsCompacting());
2761 // Noncompacting collections simply sweep the spaces to clear the mark
2762 // bits and free the nonlive blocks (for old and map spaces). We sweep
2763 // the map space last because freeing non-live maps overwrites them and
2764 // the other spaces rely on possibly non-live maps to get the sizes for
2765 // non-live objects.
Ben Murdoch8b112d22011-06-08 16:22:53 +01002766 SweepSpace(heap(), heap()->old_pointer_space());
2767 SweepSpace(heap(), heap()->old_data_space());
2768 SweepSpace(heap(), heap()->code_space());
2769 SweepSpace(heap(), heap()->cell_space());
Iain Merrick75681382010-08-19 15:07:18 +01002770 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
Ben Murdoch8b112d22011-06-08 16:22:53 +01002771 SweepNewSpace(heap(), heap()->new_space());
Iain Merrick75681382010-08-19 15:07:18 +01002772 }
Ben Murdoch8b112d22011-06-08 16:22:53 +01002773 SweepSpace(heap(), heap()->map_space());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002774
Ben Murdoch8b112d22011-06-08 16:22:53 +01002775 heap()->IterateDirtyRegions(heap()->map_space(),
2776 &heap()->IteratePointersInDirtyMapsRegion,
Steve Block44f0eee2011-05-26 01:26:41 +01002777 &UpdatePointerToNewGen,
Ben Murdoch8b112d22011-06-08 16:22:53 +01002778 heap()->WATERMARK_SHOULD_BE_VALID);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002779
Ben Murdoch8b112d22011-06-08 16:22:53 +01002780 intptr_t live_maps_size = heap()->map_space()->Size();
Ben Murdochf87a2032010-10-22 12:50:53 +01002781 int live_maps = static_cast<int>(live_maps_size / Map::kSize);
Steve Block6ded16b2010-05-10 14:33:55 +01002782 ASSERT(live_map_objects_size_ == live_maps_size);
Leon Clarkee46be812010-01-19 14:06:41 +00002783
Ben Murdoch8b112d22011-06-08 16:22:53 +01002784 if (heap()->map_space()->NeedsCompaction(live_maps)) {
2785 MapCompact map_compact(heap(), live_maps);
Leon Clarkee46be812010-01-19 14:06:41 +00002786
2787 map_compact.CompactMaps();
2788 map_compact.UpdateMapPointersInRoots();
2789
Leon Clarkee46be812010-01-19 14:06:41 +00002790 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00002791 for (PagedSpace* space = spaces.next();
2792 space != NULL; space = spaces.next()) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01002793 if (space == heap()->map_space()) continue;
Leon Clarkee46be812010-01-19 14:06:41 +00002794 map_compact.UpdateMapPointersInPagedSpace(space);
2795 }
2796 map_compact.UpdateMapPointersInNewSpace();
2797 map_compact.UpdateMapPointersInLargeObjectSpace();
2798
2799 map_compact.Finish();
2800 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002801}
2802
2803
2804// Iterate the live objects in a range of addresses (eg, a page or a
2805// semispace). The live regions of the range have been linked into a list.
2806// The first live region is [first_live_start, first_live_end), and the last
2807// address in the range is top. The callback function is used to get the
2808// size of each live object.
2809int MarkCompactCollector::IterateLiveObjectsInRange(
2810 Address start,
2811 Address end,
Steve Block44f0eee2011-05-26 01:26:41 +01002812 LiveObjectCallback size_func) {
Steve Block6ded16b2010-05-10 14:33:55 +01002813 int live_objects_size = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00002814 Address current = start;
2815 while (current < end) {
2816 uint32_t encoded_map = Memory::uint32_at(current);
2817 if (encoded_map == kSingleFreeEncoding) {
2818 current += kPointerSize;
2819 } else if (encoded_map == kMultiFreeEncoding) {
2820 current += Memory::int_at(current + kIntSize);
2821 } else {
Steve Block44f0eee2011-05-26 01:26:41 +01002822 int size = (this->*size_func)(HeapObject::FromAddress(current));
Steve Block6ded16b2010-05-10 14:33:55 +01002823 current += size;
2824 live_objects_size += size;
Steve Blocka7e24c12009-10-30 11:49:00 +00002825 }
2826 }
Steve Block6ded16b2010-05-10 14:33:55 +01002827 return live_objects_size;
Steve Blocka7e24c12009-10-30 11:49:00 +00002828}
2829
2830
Steve Block44f0eee2011-05-26 01:26:41 +01002831int MarkCompactCollector::IterateLiveObjects(
2832 NewSpace* space, LiveObjectCallback size_f) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002833 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2834 return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
2835}
2836
2837
Steve Block44f0eee2011-05-26 01:26:41 +01002838int MarkCompactCollector::IterateLiveObjects(
2839 PagedSpace* space, LiveObjectCallback size_f) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002840 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2841 int total = 0;
2842 PageIterator it(space, PageIterator::PAGES_IN_USE);
2843 while (it.has_next()) {
2844 Page* p = it.next();
2845 total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
2846 p->AllocationTop(),
2847 size_f);
2848 }
2849 return total;
2850}
2851
2852
2853// -------------------------------------------------------------------------
2854// Phase 3: Update pointers
2855
2856// Helper class for updating pointers in HeapObjects.
2857class UpdatingVisitor: public ObjectVisitor {
2858 public:
Steve Block44f0eee2011-05-26 01:26:41 +01002859 explicit UpdatingVisitor(Heap* heap) : heap_(heap) {}
2860
Steve Blocka7e24c12009-10-30 11:49:00 +00002861 void VisitPointer(Object** p) {
2862 UpdatePointer(p);
2863 }
2864
2865 void VisitPointers(Object** start, Object** end) {
2866 // Mark all HeapObject pointers in [start, end)
2867 for (Object** p = start; p < end; p++) UpdatePointer(p);
2868 }
2869
2870 void VisitCodeTarget(RelocInfo* rinfo) {
2871 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2872 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2873 VisitPointer(&target);
2874 rinfo->set_target_address(
2875 reinterpret_cast<Code*>(target)->instruction_start());
2876 }
2877
Steve Block3ce2e202009-11-05 08:53:23 +00002878 void VisitDebugTarget(RelocInfo* rinfo) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002879 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2880 rinfo->IsPatchedReturnSequence()) ||
2881 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2882 rinfo->IsPatchedDebugBreakSlotSequence()));
Steve Block3ce2e202009-11-05 08:53:23 +00002883 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2884 VisitPointer(&target);
2885 rinfo->set_call_address(
2886 reinterpret_cast<Code*>(target)->instruction_start());
2887 }
2888
Ben Murdoch8b112d22011-06-08 16:22:53 +01002889 inline Heap* heap() const { return heap_; }
2890
Steve Blocka7e24c12009-10-30 11:49:00 +00002891 private:
2892 void UpdatePointer(Object** p) {
2893 if (!(*p)->IsHeapObject()) return;
2894
2895 HeapObject* obj = HeapObject::cast(*p);
2896 Address old_addr = obj->address();
2897 Address new_addr;
Ben Murdoch8b112d22011-06-08 16:22:53 +01002898 ASSERT(!heap()->InFromSpace(obj));
Steve Blocka7e24c12009-10-30 11:49:00 +00002899
Ben Murdoch8b112d22011-06-08 16:22:53 +01002900 if (heap()->new_space()->Contains(obj)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002901 Address forwarding_pointer_addr =
Ben Murdoch8b112d22011-06-08 16:22:53 +01002902 heap()->new_space()->FromSpaceLow() +
2903 heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
Steve Blocka7e24c12009-10-30 11:49:00 +00002904 new_addr = Memory::Address_at(forwarding_pointer_addr);
2905
2906#ifdef DEBUG
Ben Murdoch8b112d22011-06-08 16:22:53 +01002907 ASSERT(heap()->old_pointer_space()->Contains(new_addr) ||
2908 heap()->old_data_space()->Contains(new_addr) ||
2909 heap()->new_space()->FromSpaceContains(new_addr) ||
2910 heap()->lo_space()->Contains(HeapObject::FromAddress(new_addr)));
Steve Blocka7e24c12009-10-30 11:49:00 +00002911
Ben Murdoch8b112d22011-06-08 16:22:53 +01002912 if (heap()->new_space()->FromSpaceContains(new_addr)) {
2913 ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
2914 heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00002915 }
2916#endif
2917
Ben Murdoch8b112d22011-06-08 16:22:53 +01002918 } else if (heap()->lo_space()->Contains(obj)) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002919 // Don't move objects in the large object space.
2920 return;
2921
2922 } else {
2923#ifdef DEBUG
2924 PagedSpaces spaces;
2925 PagedSpace* original_space = spaces.next();
2926 while (original_space != NULL) {
2927 if (original_space->Contains(obj)) break;
2928 original_space = spaces.next();
2929 }
2930 ASSERT(original_space != NULL);
2931#endif
2932 new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
2933 ASSERT(original_space->Contains(new_addr));
2934 ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
2935 original_space->MCSpaceOffsetForAddress(old_addr));
2936 }
2937
2938 *p = HeapObject::FromAddress(new_addr);
2939
2940#ifdef DEBUG
2941 if (FLAG_gc_verbose) {
2942 PrintF("update %p : %p -> %p\n",
2943 reinterpret_cast<Address>(p), old_addr, new_addr);
2944 }
2945#endif
2946 }
Steve Block44f0eee2011-05-26 01:26:41 +01002947
2948 Heap* heap_;
Steve Blocka7e24c12009-10-30 11:49:00 +00002949};
2950
2951
2952void MarkCompactCollector::UpdatePointers() {
2953#ifdef DEBUG
2954 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
2955 state_ = UPDATE_POINTERS;
2956#endif
Ben Murdoch8b112d22011-06-08 16:22:53 +01002957 UpdatingVisitor updating_visitor(heap());
2958 heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
Steve Block44f0eee2011-05-26 01:26:41 +01002959 &updating_visitor);
Ben Murdoch8b112d22011-06-08 16:22:53 +01002960 heap()->IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
2961 heap()->isolate()->global_handles()->IterateWeakRoots(&updating_visitor);
Steve Blocka7e24c12009-10-30 11:49:00 +00002962
Ben Murdochf87a2032010-10-22 12:50:53 +01002963 // Update the pointer to the head of the weak list of global contexts.
Ben Murdoch8b112d22011-06-08 16:22:53 +01002964 updating_visitor.VisitPointer(&heap()->global_contexts_list_);
Ben Murdochf87a2032010-10-22 12:50:53 +01002965
Steve Block1e0659c2011-05-24 12:43:12 +01002966 LiveObjectList::IterateElements(&updating_visitor);
2967
Steve Block44f0eee2011-05-26 01:26:41 +01002968 int live_maps_size = IterateLiveObjects(
Ben Murdoch8b112d22011-06-08 16:22:53 +01002969 heap()->map_space(), &MarkCompactCollector::UpdatePointersInOldObject);
Steve Block44f0eee2011-05-26 01:26:41 +01002970 int live_pointer_olds_size = IterateLiveObjects(
Ben Murdoch8b112d22011-06-08 16:22:53 +01002971 heap()->old_pointer_space(),
Steve Block44f0eee2011-05-26 01:26:41 +01002972 &MarkCompactCollector::UpdatePointersInOldObject);
2973 int live_data_olds_size = IterateLiveObjects(
Ben Murdoch8b112d22011-06-08 16:22:53 +01002974 heap()->old_data_space(),
Steve Block44f0eee2011-05-26 01:26:41 +01002975 &MarkCompactCollector::UpdatePointersInOldObject);
2976 int live_codes_size = IterateLiveObjects(
Ben Murdoch8b112d22011-06-08 16:22:53 +01002977 heap()->code_space(), &MarkCompactCollector::UpdatePointersInOldObject);
Steve Block44f0eee2011-05-26 01:26:41 +01002978 int live_cells_size = IterateLiveObjects(
Ben Murdoch8b112d22011-06-08 16:22:53 +01002979 heap()->cell_space(), &MarkCompactCollector::UpdatePointersInOldObject);
Steve Block44f0eee2011-05-26 01:26:41 +01002980 int live_news_size = IterateLiveObjects(
Ben Murdoch8b112d22011-06-08 16:22:53 +01002981 heap()->new_space(), &MarkCompactCollector::UpdatePointersInNewObject);
Steve Blocka7e24c12009-10-30 11:49:00 +00002982
2983 // Large objects do not move, the map word can be updated directly.
Ben Murdoch8b112d22011-06-08 16:22:53 +01002984 LargeObjectIterator it(heap()->lo_space());
Ben Murdochb0fe1622011-05-05 13:52:32 +01002985 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
Leon Clarked91b9f72010-01-27 17:25:45 +00002986 UpdatePointersInNewObject(obj);
Ben Murdochb0fe1622011-05-05 13:52:32 +01002987 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002988
Steve Block6ded16b2010-05-10 14:33:55 +01002989 USE(live_maps_size);
2990 USE(live_pointer_olds_size);
2991 USE(live_data_olds_size);
2992 USE(live_codes_size);
2993 USE(live_cells_size);
2994 USE(live_news_size);
2995 ASSERT(live_maps_size == live_map_objects_size_);
2996 ASSERT(live_data_olds_size == live_old_data_objects_size_);
2997 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
2998 ASSERT(live_codes_size == live_code_objects_size_);
2999 ASSERT(live_cells_size == live_cell_objects_size_);
3000 ASSERT(live_news_size == live_young_objects_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003001}
3002
3003
3004int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
3005 // Keep old map pointers
3006 Map* old_map = obj->map();
3007 ASSERT(old_map->IsHeapObject());
3008
3009 Address forwarded = GetForwardingAddressInOldSpace(old_map);
3010
Ben Murdoch8b112d22011-06-08 16:22:53 +01003011 ASSERT(heap()->map_space()->Contains(old_map));
3012 ASSERT(heap()->map_space()->Contains(forwarded));
Steve Blocka7e24c12009-10-30 11:49:00 +00003013#ifdef DEBUG
3014 if (FLAG_gc_verbose) {
3015 PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
3016 forwarded);
3017 }
3018#endif
3019 // Update the map pointer.
3020 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
3021
3022 // We have to compute the object size relying on the old map because
3023 // map objects are not relocated yet.
3024 int obj_size = obj->SizeFromMap(old_map);
3025
3026 // Update pointers in the object body.
Ben Murdoch8b112d22011-06-08 16:22:53 +01003027 UpdatingVisitor updating_visitor(heap());
Steve Blocka7e24c12009-10-30 11:49:00 +00003028 obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
3029 return obj_size;
3030}
3031
3032
3033int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
3034 // Decode the map pointer.
3035 MapWord encoding = obj->map_word();
Ben Murdoch8b112d22011-06-08 16:22:53 +01003036 Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
3037 ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
Steve Blocka7e24c12009-10-30 11:49:00 +00003038
3039 // At this point, the first word of map_addr is also encoded, cannot
3040 // cast it to Map* using Map::cast.
3041 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
3042 int obj_size = obj->SizeFromMap(map);
3043 InstanceType type = map->instance_type();
3044
3045 // Update map pointer.
3046 Address new_map_addr = GetForwardingAddressInOldSpace(map);
3047 int offset = encoding.DecodeOffset();
3048 obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
3049
3050#ifdef DEBUG
3051 if (FLAG_gc_verbose) {
3052 PrintF("update %p : %p -> %p\n", obj->address(),
3053 map_addr, new_map_addr);
3054 }
3055#endif
3056
3057 // Update pointers in the object body.
Ben Murdoch8b112d22011-06-08 16:22:53 +01003058 UpdatingVisitor updating_visitor(heap());
Steve Blocka7e24c12009-10-30 11:49:00 +00003059 obj->IterateBody(type, obj_size, &updating_visitor);
3060 return obj_size;
3061}
3062
3063
3064Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
3065 // Object should either in old or map space.
3066 MapWord encoding = obj->map_word();
3067
3068 // Offset to the first live object's forwarding address.
3069 int offset = encoding.DecodeOffset();
3070 Address obj_addr = obj->address();
3071
3072 // Find the first live object's forwarding address.
3073 Page* p = Page::FromAddress(obj_addr);
3074 Address first_forwarded = p->mc_first_forwarded;
3075
3076 // Page start address of forwarded address.
3077 Page* forwarded_page = Page::FromAddress(first_forwarded);
3078 int forwarded_offset = forwarded_page->Offset(first_forwarded);
3079
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003080 // Find end of allocation in the page of first_forwarded.
3081 int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
Steve Blocka7e24c12009-10-30 11:49:00 +00003082
3083 // Check if current object's forward pointer is in the same page
3084 // as the first live object's forwarding pointer
3085 if (forwarded_offset + offset < mc_top_offset) {
3086 // In the same page.
3087 return first_forwarded + offset;
3088 }
3089
3090 // Must be in the next page, NOTE: this may cross chunks.
3091 Page* next_page = forwarded_page->next_page();
3092 ASSERT(next_page->is_valid());
3093
3094 offset -= (mc_top_offset - forwarded_offset);
3095 offset += Page::kObjectStartOffset;
3096
3097 ASSERT_PAGE_OFFSET(offset);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003098 ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
Steve Blocka7e24c12009-10-30 11:49:00 +00003099
3100 return next_page->OffsetToAddress(offset);
3101}
3102
3103
3104// -------------------------------------------------------------------------
3105// Phase 4: Relocate objects
3106
3107void MarkCompactCollector::RelocateObjects() {
3108#ifdef DEBUG
3109 ASSERT(state_ == UPDATE_POINTERS);
3110 state_ = RELOCATE_OBJECTS;
3111#endif
3112 // Relocates objects, always relocate map objects first. Relocating
3113 // objects in other space relies on map objects to get object size.
Steve Block44f0eee2011-05-26 01:26:41 +01003114 int live_maps_size = IterateLiveObjects(
Ben Murdoch8b112d22011-06-08 16:22:53 +01003115 heap()->map_space(), &MarkCompactCollector::RelocateMapObject);
Steve Block44f0eee2011-05-26 01:26:41 +01003116 int live_pointer_olds_size = IterateLiveObjects(
Ben Murdoch8b112d22011-06-08 16:22:53 +01003117 heap()->old_pointer_space(),
Steve Block44f0eee2011-05-26 01:26:41 +01003118 &MarkCompactCollector::RelocateOldPointerObject);
3119 int live_data_olds_size = IterateLiveObjects(
Ben Murdoch8b112d22011-06-08 16:22:53 +01003120 heap()->old_data_space(), &MarkCompactCollector::RelocateOldDataObject);
Steve Block44f0eee2011-05-26 01:26:41 +01003121 int live_codes_size = IterateLiveObjects(
Ben Murdoch8b112d22011-06-08 16:22:53 +01003122 heap()->code_space(), &MarkCompactCollector::RelocateCodeObject);
Steve Block44f0eee2011-05-26 01:26:41 +01003123 int live_cells_size = IterateLiveObjects(
Ben Murdoch8b112d22011-06-08 16:22:53 +01003124 heap()->cell_space(), &MarkCompactCollector::RelocateCellObject);
Steve Block44f0eee2011-05-26 01:26:41 +01003125 int live_news_size = IterateLiveObjects(
Ben Murdoch8b112d22011-06-08 16:22:53 +01003126 heap()->new_space(), &MarkCompactCollector::RelocateNewObject);
Steve Blocka7e24c12009-10-30 11:49:00 +00003127
Steve Block6ded16b2010-05-10 14:33:55 +01003128 USE(live_maps_size);
3129 USE(live_pointer_olds_size);
3130 USE(live_data_olds_size);
3131 USE(live_codes_size);
3132 USE(live_cells_size);
3133 USE(live_news_size);
3134 ASSERT(live_maps_size == live_map_objects_size_);
3135 ASSERT(live_data_olds_size == live_old_data_objects_size_);
3136 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
3137 ASSERT(live_codes_size == live_code_objects_size_);
3138 ASSERT(live_cells_size == live_cell_objects_size_);
3139 ASSERT(live_news_size == live_young_objects_size_);
Steve Blocka7e24c12009-10-30 11:49:00 +00003140
3141 // Flip from and to spaces
Ben Murdoch8b112d22011-06-08 16:22:53 +01003142 heap()->new_space()->Flip();
Steve Blocka7e24c12009-10-30 11:49:00 +00003143
Ben Murdoch8b112d22011-06-08 16:22:53 +01003144 heap()->new_space()->MCCommitRelocationInfo();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003145
Steve Blocka7e24c12009-10-30 11:49:00 +00003146 // Set age_mark to bottom in to space
Ben Murdoch8b112d22011-06-08 16:22:53 +01003147 Address mark = heap()->new_space()->bottom();
3148 heap()->new_space()->set_age_mark(mark);
Steve Blocka7e24c12009-10-30 11:49:00 +00003149
Steve Blocka7e24c12009-10-30 11:49:00 +00003150 PagedSpaces spaces;
Leon Clarked91b9f72010-01-27 17:25:45 +00003151 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
3152 space->MCCommitRelocationInfo();
Steve Block6ded16b2010-05-10 14:33:55 +01003153
Ben Murdoch8b112d22011-06-08 16:22:53 +01003154 heap()->CheckNewSpaceExpansionCriteria();
3155 heap()->IncrementYoungSurvivorsCounter(live_news_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00003156}
3157
3158
3159int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
3160 // Recover map pointer.
3161 MapWord encoding = obj->map_word();
Ben Murdoch8b112d22011-06-08 16:22:53 +01003162 Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
3163 ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
Steve Blocka7e24c12009-10-30 11:49:00 +00003164
3165 // Get forwarding address before resetting map pointer
3166 Address new_addr = GetForwardingAddressInOldSpace(obj);
3167
3168 // Reset map pointer. The meta map object may not be copied yet so
3169 // Map::cast does not yet work.
3170 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
3171
3172 Address old_addr = obj->address();
3173
3174 if (new_addr != old_addr) {
Steve Block6ded16b2010-05-10 14:33:55 +01003175 // Move contents.
Ben Murdoch8b112d22011-06-08 16:22:53 +01003176 heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
Steve Block44f0eee2011-05-26 01:26:41 +01003177 old_addr,
3178 Map::kSize);
Steve Blocka7e24c12009-10-30 11:49:00 +00003179 }
3180
3181#ifdef DEBUG
3182 if (FLAG_gc_verbose) {
3183 PrintF("relocate %p -> %p\n", old_addr, new_addr);
3184 }
3185#endif
3186
3187 return Map::kSize;
3188}
3189
3190
3191static inline int RestoreMap(HeapObject* obj,
3192 PagedSpace* space,
3193 Address new_addr,
3194 Address map_addr) {
3195 // This must be a non-map object, and the function relies on the
3196 // assumption that the Map space is compacted before the other paged
3197 // spaces (see RelocateObjects).
3198
3199 // Reset map pointer.
3200 obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
3201
3202 int obj_size = obj->Size();
3203 ASSERT_OBJECT_SIZE(obj_size);
3204
3205 ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
3206 space->MCSpaceOffsetForAddress(obj->address()));
3207
3208#ifdef DEBUG
3209 if (FLAG_gc_verbose) {
3210 PrintF("relocate %p -> %p\n", obj->address(), new_addr);
3211 }
3212#endif
3213
3214 return obj_size;
3215}
3216
3217
3218int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
3219 PagedSpace* space) {
3220 // Recover map pointer.
3221 MapWord encoding = obj->map_word();
Ben Murdoch8b112d22011-06-08 16:22:53 +01003222 Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
3223 ASSERT(heap()->map_space()->Contains(map_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00003224
3225 // Get forwarding address before resetting map pointer.
3226 Address new_addr = GetForwardingAddressInOldSpace(obj);
3227
3228 // Reset the map pointer.
3229 int obj_size = RestoreMap(obj, space, new_addr, map_addr);
3230
3231 Address old_addr = obj->address();
3232
3233 if (new_addr != old_addr) {
Steve Block6ded16b2010-05-10 14:33:55 +01003234 // Move contents.
Ben Murdoch8b112d22011-06-08 16:22:53 +01003235 if (space == heap()->old_data_space()) {
3236 heap()->MoveBlock(new_addr, old_addr, obj_size);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003237 } else {
Ben Murdoch8b112d22011-06-08 16:22:53 +01003238 heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
Steve Block44f0eee2011-05-26 01:26:41 +01003239 old_addr,
3240 obj_size);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003241 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003242 }
3243
3244 ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
3245
Leon Clarked91b9f72010-01-27 17:25:45 +00003246 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
Ben Murdoche0cee9b2011-05-25 10:26:03 +01003247 if (copied_to->IsSharedFunctionInfo()) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01003248 PROFILE(heap()->isolate(),
Steve Block44f0eee2011-05-26 01:26:41 +01003249 SharedFunctionInfoMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00003250 }
Ben Murdoch8b112d22011-06-08 16:22:53 +01003251 HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00003252
Steve Blocka7e24c12009-10-30 11:49:00 +00003253 return obj_size;
3254}
3255
3256
3257int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01003258 return RelocateOldNonCodeObject(obj, heap()->old_pointer_space());
Steve Blocka7e24c12009-10-30 11:49:00 +00003259}
3260
3261
3262int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01003263 return RelocateOldNonCodeObject(obj, heap()->old_data_space());
Steve Blocka7e24c12009-10-30 11:49:00 +00003264}
3265
3266
3267int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01003268 return RelocateOldNonCodeObject(obj, heap()->cell_space());
Steve Blocka7e24c12009-10-30 11:49:00 +00003269}
3270
3271
3272int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
3273 // Recover map pointer.
3274 MapWord encoding = obj->map_word();
Ben Murdoch8b112d22011-06-08 16:22:53 +01003275 Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
3276 ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
Steve Blocka7e24c12009-10-30 11:49:00 +00003277
3278 // Get forwarding address before resetting map pointer
3279 Address new_addr = GetForwardingAddressInOldSpace(obj);
3280
3281 // Reset the map pointer.
Ben Murdoch8b112d22011-06-08 16:22:53 +01003282 int obj_size = RestoreMap(obj, heap()->code_space(), new_addr, map_addr);
Steve Blocka7e24c12009-10-30 11:49:00 +00003283
3284 Address old_addr = obj->address();
3285
3286 if (new_addr != old_addr) {
Steve Block6ded16b2010-05-10 14:33:55 +01003287 // Move contents.
Ben Murdoch8b112d22011-06-08 16:22:53 +01003288 heap()->MoveBlock(new_addr, old_addr, obj_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00003289 }
3290
3291 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
3292 if (copied_to->IsCode()) {
3293 // May also update inline cache target.
3294 Code::cast(copied_to)->Relocate(new_addr - old_addr);
3295 // Notify the logger that compiled code has moved.
Ben Murdoch8b112d22011-06-08 16:22:53 +01003296 PROFILE(heap()->isolate(), CodeMoveEvent(old_addr, new_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00003297 }
Ben Murdoch8b112d22011-06-08 16:22:53 +01003298 HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00003299
3300 return obj_size;
3301}
3302
3303
3304int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
3305 int obj_size = obj->Size();
3306
3307 // Get forwarding address
3308 Address old_addr = obj->address();
Ben Murdoch8b112d22011-06-08 16:22:53 +01003309 int offset = heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
Steve Blocka7e24c12009-10-30 11:49:00 +00003310
3311 Address new_addr =
Ben Murdoch8b112d22011-06-08 16:22:53 +01003312 Memory::Address_at(heap()->new_space()->FromSpaceLow() + offset);
Steve Blocka7e24c12009-10-30 11:49:00 +00003313
3314#ifdef DEBUG
Ben Murdoch8b112d22011-06-08 16:22:53 +01003315 if (heap()->new_space()->FromSpaceContains(new_addr)) {
3316 ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
3317 heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
Steve Blocka7e24c12009-10-30 11:49:00 +00003318 } else {
Ben Murdoch8b112d22011-06-08 16:22:53 +01003319 ASSERT(heap()->TargetSpace(obj) == heap()->old_pointer_space() ||
3320 heap()->TargetSpace(obj) == heap()->old_data_space());
Steve Blocka7e24c12009-10-30 11:49:00 +00003321 }
3322#endif
3323
3324 // New and old addresses cannot overlap.
Ben Murdoch8b112d22011-06-08 16:22:53 +01003325 if (heap()->InNewSpace(HeapObject::FromAddress(new_addr))) {
3326 heap()->CopyBlock(new_addr, old_addr, obj_size);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003327 } else {
Ben Murdoch8b112d22011-06-08 16:22:53 +01003328 heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
Steve Block44f0eee2011-05-26 01:26:41 +01003329 old_addr,
3330 obj_size);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01003331 }
Steve Blocka7e24c12009-10-30 11:49:00 +00003332
3333#ifdef DEBUG
3334 if (FLAG_gc_verbose) {
3335 PrintF("relocate %p -> %p\n", old_addr, new_addr);
3336 }
3337#endif
3338
Leon Clarked91b9f72010-01-27 17:25:45 +00003339 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
Ben Murdoche0cee9b2011-05-25 10:26:03 +01003340 if (copied_to->IsSharedFunctionInfo()) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01003341 PROFILE(heap()->isolate(),
Steve Block44f0eee2011-05-26 01:26:41 +01003342 SharedFunctionInfoMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00003343 }
Ben Murdoch8b112d22011-06-08 16:22:53 +01003344 HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
Leon Clarked91b9f72010-01-27 17:25:45 +00003345
Steve Blocka7e24c12009-10-30 11:49:00 +00003346 return obj_size;
3347}
3348
3349
Steve Block44f0eee2011-05-26 01:26:41 +01003350void MarkCompactCollector::EnableCodeFlushing(bool enable) {
3351 if (enable) {
3352 if (code_flusher_ != NULL) return;
Ben Murdoch8b112d22011-06-08 16:22:53 +01003353 code_flusher_ = new CodeFlusher(heap()->isolate());
Steve Block44f0eee2011-05-26 01:26:41 +01003354 } else {
3355 if (code_flusher_ == NULL) return;
3356 delete code_flusher_;
3357 code_flusher_ = NULL;
3358 }
3359}
3360
3361
Ben Murdoch8b112d22011-06-08 16:22:53 +01003362void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
3363 Isolate* isolate) {
Ben Murdochb8e0da22011-05-16 14:20:40 +01003364#ifdef ENABLE_GDB_JIT_INTERFACE
3365 if (obj->IsCode()) {
3366 GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
3367 }
3368#endif
Leon Clarked91b9f72010-01-27 17:25:45 +00003369 if (obj->IsCode()) {
Ben Murdoch8b112d22011-06-08 16:22:53 +01003370 PROFILE(isolate, CodeDeleteEvent(obj->address()));
Leon Clarked91b9f72010-01-27 17:25:45 +00003371 }
Leon Clarked91b9f72010-01-27 17:25:45 +00003372}
3373
Iain Merrick75681382010-08-19 15:07:18 +01003374
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08003375int MarkCompactCollector::SizeOfMarkedObject(HeapObject* obj) {
3376 MapWord map_word = obj->map_word();
3377 map_word.ClearMark();
3378 return obj->SizeFromMap(map_word.ToMap());
3379}
3380
3381
Iain Merrick75681382010-08-19 15:07:18 +01003382void MarkCompactCollector::Initialize() {
3383 StaticPointersToNewGenUpdatingVisitor::Initialize();
3384 StaticMarkingVisitor::Initialize();
3385}
3386
3387
Steve Blocka7e24c12009-10-30 11:49:00 +00003388} } // namespace v8::internal