blob: 096135051ed391bc5f5b7f06c25f54d456776dcb [file] [log] [blame]
vegorov@chromium.org7304bca2011-05-16 12:14:13 +00001// Copyright 2011 the V8 project authors. All rights reserved.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
ricow@chromium.org0b9f8502010-08-18 07:45:01 +000030#include "compilation-cache.h"
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000031#include "execution.h"
ricow@chromium.org4980dff2010-07-19 08:33:45 +000032#include "heap-profiler.h"
erik.corry@gmail.com0511e242011-01-19 11:11:08 +000033#include "gdb-jit.h"
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000034#include "global-handles.h"
35#include "ic-inl.h"
ager@chromium.org0ee099b2011-01-25 14:06:47 +000036#include "liveobjectlist-inl.h"
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000037#include "mark-compact.h"
ager@chromium.orgea4f62e2010-08-16 16:28:43 +000038#include "objects-visiting.h"
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000039#include "stub-cache.h"
40
kasperl@chromium.org71affb52009-05-26 05:44:31 +000041namespace v8 {
42namespace internal {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000043
ager@chromium.orgddb913d2009-01-27 10:01:48 +000044// -------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000045// MarkCompactCollector
46
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +000047MarkCompactCollector::MarkCompactCollector() : // NOLINT
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000048#ifdef DEBUG
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +000049 state_(IDLE),
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000050#endif
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +000051 force_compaction_(false),
52 compacting_collection_(false),
53 compact_on_next_gc_(false),
54 previous_marked_count_(0),
55 tracer_(NULL),
56#ifdef DEBUG
57 live_young_objects_size_(0),
58 live_old_pointer_objects_size_(0),
59 live_old_data_objects_size_(0),
60 live_code_objects_size_(0),
61 live_map_objects_size_(0),
62 live_cell_objects_size_(0),
63 live_lo_objects_size_(0),
64 live_bytes_(0),
65#endif
66 heap_(NULL),
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +000067 code_flusher_(NULL),
68 encountered_weak_maps_(NULL) { }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000069
ager@chromium.orgea4f62e2010-08-16 16:28:43 +000070
kasperl@chromium.org061ef742009-02-27 12:16:20 +000071void MarkCompactCollector::CollectGarbage() {
72 // Make sure that Prepare() has been called. The individual steps below will
73 // update the state as they proceed.
74 ASSERT(state_ == PREPARE_GC);
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +000075 ASSERT(encountered_weak_maps_ == Smi::FromInt(0));
kasperl@chromium.org061ef742009-02-27 12:16:20 +000076
kasper.lund7276f142008-07-30 08:49:36 +000077 // Prepare has selected whether to compact the old generation or not.
78 // Tell the tracer.
79 if (IsCompacting()) tracer_->set_is_compacting();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000080
81 MarkLiveObjects();
82
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +000083 if (FLAG_collect_maps) ClearNonLiveTransitions();
84
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +000085 ClearWeakMaps();
86
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000087 SweepLargeObjectSpace();
88
ager@chromium.org96c75b52009-08-26 09:13:16 +000089 if (IsCompacting()) {
erik.corry@gmail.com9dfbea42010-05-21 12:58:28 +000090 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000091 EncodeForwardingAddresses();
92
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +000093 heap()->MarkMapPointersAsEncoded(true);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000094 UpdatePointers();
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +000095 heap()->MarkMapPointersAsEncoded(false);
96 heap()->isolate()->pc_to_code_cache()->Flush();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000097
98 RelocateObjects();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000099 } else {
100 SweepSpaces();
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000101 heap()->isolate()->pc_to_code_cache()->Flush();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000102 }
103
104 Finish();
kasper.lund7276f142008-07-30 08:49:36 +0000105
106 // Save the count of marked objects remaining after the collection and
107 // null out the GC tracer.
108 previous_marked_count_ = tracer_->marked_count();
109 ASSERT(previous_marked_count_ == 0);
110 tracer_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000111}
112
113
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000114void MarkCompactCollector::Prepare(GCTracer* tracer) {
115 // Rather than passing the tracer around we stash it in a static member
116 // variable.
117 tracer_ = tracer;
118
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000119#ifdef DEBUG
120 ASSERT(state_ == IDLE);
121 state_ = PREPARE_GC;
122#endif
123 ASSERT(!FLAG_always_compact || !FLAG_never_compact);
124
ager@chromium.orga1645e22009-09-09 19:27:10 +0000125 compacting_collection_ =
126 FLAG_always_compact || force_compaction_ || compact_on_next_gc_;
127 compact_on_next_gc_ = false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000128
129 if (FLAG_never_compact) compacting_collection_ = false;
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000130 if (!heap()->map_space()->MapPointersEncodable())
fschneider@chromium.org0c20e672010-01-14 15:28:53 +0000131 compacting_collection_ = false;
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +0000132 if (FLAG_collect_maps) CreateBackPointers();
erik.corry@gmail.com0511e242011-01-19 11:11:08 +0000133#ifdef ENABLE_GDB_JIT_INTERFACE
134 if (FLAG_gdbjit) {
135 // If GDBJIT interface is active disable compaction.
136 compacting_collection_ = false;
137 }
138#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000139
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000140 PagedSpaces spaces;
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000141 for (PagedSpace* space = spaces.next();
142 space != NULL; space = spaces.next()) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000143 space->PrepareForMarkCompact(compacting_collection_);
144 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000145
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000146#ifdef DEBUG
147 live_bytes_ = 0;
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +0000148 live_young_objects_size_ = 0;
149 live_old_pointer_objects_size_ = 0;
150 live_old_data_objects_size_ = 0;
151 live_code_objects_size_ = 0;
152 live_map_objects_size_ = 0;
153 live_cell_objects_size_ = 0;
154 live_lo_objects_size_ = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000155#endif
156}
157
158
159void MarkCompactCollector::Finish() {
160#ifdef DEBUG
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000161 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000162 state_ = IDLE;
163#endif
164 // The stub cache is not traversed during GC; clear the cache to
165 // force lazy re-initialization of it. This must be done after the
166 // GC, because it relies on the new address of certain old space
167 // objects (empty string, illegal builtin).
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000168 heap()->isolate()->stub_cache()->Clear();
ager@chromium.orga1645e22009-09-09 19:27:10 +0000169
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000170 heap()->external_string_table_.CleanUp();
kmillikin@chromium.org13bd2942009-12-16 15:36:05 +0000171
ager@chromium.orga1645e22009-09-09 19:27:10 +0000172 // If we've just compacted old space there's no reason to check the
173 // fragmentation limit. Just return.
174 if (HasCompacted()) return;
175
176 // We compact the old generation on the next GC if it has gotten too
177 // fragmented (ie, we could recover an expected amount of space by
178 // reclaiming the waste and free list blocks).
179 static const int kFragmentationLimit = 15; // Percent.
180 static const int kFragmentationAllowed = 1 * MB; // Absolute.
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +0000181 intptr_t old_gen_recoverable = 0;
182 intptr_t old_gen_used = 0;
ager@chromium.orga1645e22009-09-09 19:27:10 +0000183
184 OldSpaces spaces;
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000185 for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
ager@chromium.orga1645e22009-09-09 19:27:10 +0000186 old_gen_recoverable += space->Waste() + space->AvailableFree();
187 old_gen_used += space->Size();
188 }
189
190 int old_gen_fragmentation =
191 static_cast<int>((old_gen_recoverable * 100.0) / old_gen_used);
192 if (old_gen_fragmentation > kFragmentationLimit &&
193 old_gen_recoverable > kFragmentationAllowed) {
194 compact_on_next_gc_ = true;
195 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000196}
197
198
ager@chromium.orgddb913d2009-01-27 10:01:48 +0000199// -------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000200// Phase 1: tracing and marking live objects.
201// before: all objects are in normal state.
202// after: a live object's map pointer is marked as '00'.
203
204// Marking all live objects in the heap as part of mark-sweep or mark-compact
205// collection. Before marking, all objects are in their normal state. After
206// marking, live objects' map pointers are marked indicating that the object
207// has been found reachable.
208//
209// The marking algorithm is a (mostly) depth-first (because of possible stack
210// overflow) traversal of the graph of objects reachable from the roots. It
211// uses an explicit stack of pointers rather than recursion. The young
212// generation's inactive ('from') space is used as a marking stack. The
213// objects in the marking stack are the ones that have been reached and marked
214// but their children have not yet been visited.
215//
216// The marking stack can overflow during traversal. In that case, we set an
217// overflow flag. When the overflow flag is set, we continue marking objects
218// reachable from the objects on the marking stack, but no longer push them on
219// the marking stack. Instead, we mark them as both marked and overflowed.
220// When the stack is in the overflowed state, objects marked as overflowed
221// have been reached and marked but their children have not been visited yet.
222// After emptying the marking stack, we clear the overflow flag and traverse
223// the heap looking for objects marked as overflowed, push them on the stack,
224// and continue with marking. This process repeats until all reachable
225// objects have been marked.
226
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000227class CodeFlusher {
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000228 public:
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000229 explicit CodeFlusher(Isolate* isolate)
230 : isolate_(isolate),
231 jsfunction_candidates_head_(NULL),
232 shared_function_info_candidates_head_(NULL) {}
233
234 void AddCandidate(SharedFunctionInfo* shared_info) {
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000235 SetNextCandidate(shared_info, shared_function_info_candidates_head_);
236 shared_function_info_candidates_head_ = shared_info;
237 }
238
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000239 void AddCandidate(JSFunction* function) {
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000240 ASSERT(function->unchecked_code() ==
241 function->unchecked_shared()->unchecked_code());
242
243 SetNextCandidate(function, jsfunction_candidates_head_);
244 jsfunction_candidates_head_ = function;
245 }
246
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000247 void ProcessCandidates() {
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000248 ProcessSharedFunctionInfoCandidates();
249 ProcessJSFunctionCandidates();
250 }
251
252 private:
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000253 void ProcessJSFunctionCandidates() {
fschneider@chromium.org7979bbb2011-03-28 10:47:03 +0000254 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000255
256 JSFunction* candidate = jsfunction_candidates_head_;
257 JSFunction* next_candidate;
258 while (candidate != NULL) {
259 next_candidate = GetNextCandidate(candidate);
260
261 SharedFunctionInfo* shared = candidate->unchecked_shared();
262
263 Code* code = shared->unchecked_code();
264 if (!code->IsMarked()) {
265 shared->set_code(lazy_compile);
266 candidate->set_code(lazy_compile);
267 } else {
268 candidate->set_code(shared->unchecked_code());
269 }
270
271 candidate = next_candidate;
272 }
273
274 jsfunction_candidates_head_ = NULL;
275 }
276
277
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000278 void ProcessSharedFunctionInfoCandidates() {
fschneider@chromium.org7979bbb2011-03-28 10:47:03 +0000279 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000280
281 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
282 SharedFunctionInfo* next_candidate;
283 while (candidate != NULL) {
284 next_candidate = GetNextCandidate(candidate);
285 SetNextCandidate(candidate, NULL);
286
287 Code* code = candidate->unchecked_code();
288 if (!code->IsMarked()) {
289 candidate->set_code(lazy_compile);
290 }
291
292 candidate = next_candidate;
293 }
294
295 shared_function_info_candidates_head_ = NULL;
296 }
297
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000298 static JSFunction** GetNextCandidateField(JSFunction* candidate) {
299 return reinterpret_cast<JSFunction**>(
300 candidate->address() + JSFunction::kCodeEntryOffset);
301 }
302
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000303 static JSFunction* GetNextCandidate(JSFunction* candidate) {
304 return *GetNextCandidateField(candidate);
305 }
306
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000307 static void SetNextCandidate(JSFunction* candidate,
308 JSFunction* next_candidate) {
309 *GetNextCandidateField(candidate) = next_candidate;
310 }
311
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000312 static SharedFunctionInfo** GetNextCandidateField(
313 SharedFunctionInfo* candidate) {
314 Code* code = candidate->unchecked_code();
315 return reinterpret_cast<SharedFunctionInfo**>(
vegorov@chromium.org7304bca2011-05-16 12:14:13 +0000316 code->address() + Code::kNextCodeFlushingCandidateOffset);
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000317 }
318
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000319 static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
320 return *GetNextCandidateField(candidate);
321 }
322
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000323 static void SetNextCandidate(SharedFunctionInfo* candidate,
324 SharedFunctionInfo* next_candidate) {
325 *GetNextCandidateField(candidate) = next_candidate;
326 }
327
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000328 Isolate* isolate_;
329 JSFunction* jsfunction_candidates_head_;
330 SharedFunctionInfo* shared_function_info_candidates_head_;
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000331
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000332 DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000333};
334
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000335
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000336MarkCompactCollector::~MarkCompactCollector() {
337 if (code_flusher_ != NULL) {
338 delete code_flusher_;
339 code_flusher_ = NULL;
340 }
341}
342
mads.s.ager31e71382008-08-13 09:32:07 +0000343
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000344static inline HeapObject* ShortCircuitConsString(Object** p) {
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000345 // Optimization: If the heap object pointed to by p is a non-symbol
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000346 // cons string whose right substring is HEAP->empty_string, update
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000347 // it in place to its left substring. Return the updated value.
mads.s.ager31e71382008-08-13 09:32:07 +0000348 //
349 // Here we assume that if we change *p, we replace it with a heap object
350 // (ie, the left substring of a cons string is always a heap object).
351 //
352 // The check performed is:
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000353 // object->IsConsString() && !object->IsSymbol() &&
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000354 // (ConsString::cast(object)->second() == HEAP->empty_string())
mads.s.ager31e71382008-08-13 09:32:07 +0000355 // except the maps for the object and its possible substrings might be
356 // marked.
357 HeapObject* object = HeapObject::cast(*p);
358 MapWord map_word = object->map_word();
359 map_word.ClearMark();
360 InstanceType type = map_word.ToMap()->instance_type();
kasperl@chromium.orgd1e3e722009-04-14 13:38:25 +0000361 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
mads.s.ager31e71382008-08-13 09:32:07 +0000362
ager@chromium.orgbb29dc92009-03-24 13:25:23 +0000363 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000364 Heap* heap = map_word.ToMap()->heap();
365 if (second != heap->raw_unchecked_empty_string()) {
kasperl@chromium.org68ac0092009-07-09 06:00:35 +0000366 return object;
367 }
mads.s.ager31e71382008-08-13 09:32:07 +0000368
369 // Since we don't have the object's start, it is impossible to update the
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000370 // page dirty marks. Therefore, we only replace the string with its left
371 // substring when page dirty marks do not change.
ager@chromium.orgbb29dc92009-03-24 13:25:23 +0000372 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000373 if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
mads.s.ager31e71382008-08-13 09:32:07 +0000374
375 *p = first;
376 return HeapObject::cast(first);
377}
378
379
ager@chromium.orgea4f62e2010-08-16 16:28:43 +0000380class StaticMarkingVisitor : public StaticVisitorBase {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000381 public:
ager@chromium.orgea4f62e2010-08-16 16:28:43 +0000382 static inline void IterateBody(Map* map, HeapObject* obj) {
383 table_.GetVisitor(map)(map, obj);
384 }
385
386 static void Initialize() {
387 table_.Register(kVisitShortcutCandidate,
388 &FixedBodyVisitor<StaticMarkingVisitor,
389 ConsString::BodyDescriptor,
390 void>::Visit);
391
392 table_.Register(kVisitConsString,
393 &FixedBodyVisitor<StaticMarkingVisitor,
394 ConsString::BodyDescriptor,
395 void>::Visit);
396
397
398 table_.Register(kVisitFixedArray,
399 &FlexibleBodyVisitor<StaticMarkingVisitor,
400 FixedArray::BodyDescriptor,
401 void>::Visit);
402
svenpanne@chromium.org6d786c92011-06-15 10:58:27 +0000403 table_.Register(kVisitFixedDoubleArray, DataObjectVisitor::Visit);
404
whesse@chromium.org4a5224e2010-10-20 12:37:07 +0000405 table_.Register(kVisitGlobalContext,
406 &FixedBodyVisitor<StaticMarkingVisitor,
407 Context::MarkCompactBodyDescriptor,
408 void>::Visit);
409
ager@chromium.orgea4f62e2010-08-16 16:28:43 +0000410 table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
411 table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
412 table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
413
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +0000414 table_.Register(kVisitJSWeakMap, &VisitJSWeakMap);
415
ager@chromium.orgea4f62e2010-08-16 16:28:43 +0000416 table_.Register(kVisitOddball,
417 &FixedBodyVisitor<StaticMarkingVisitor,
418 Oddball::BodyDescriptor,
419 void>::Visit);
420 table_.Register(kVisitMap,
421 &FixedBodyVisitor<StaticMarkingVisitor,
422 Map::BodyDescriptor,
423 void>::Visit);
424
425 table_.Register(kVisitCode, &VisitCode);
426
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000427 table_.Register(kVisitSharedFunctionInfo,
428 &VisitSharedFunctionInfoAndFlushCode);
429
430 table_.Register(kVisitJSFunction,
431 &VisitJSFunctionAndFlushCode);
ricow@chromium.org0b9f8502010-08-18 07:45:01 +0000432
jkummerow@chromium.orgddda9e82011-07-06 11:27:02 +0000433 table_.Register(kVisitJSRegExp,
434 &VisitRegExpAndFlushCode);
435
ager@chromium.orgea4f62e2010-08-16 16:28:43 +0000436 table_.Register(kVisitPropertyCell,
437 &FixedBodyVisitor<StaticMarkingVisitor,
438 JSGlobalPropertyCell::BodyDescriptor,
439 void>::Visit);
440
441 table_.RegisterSpecializations<DataObjectVisitor,
442 kVisitDataObject,
443 kVisitDataObjectGeneric>();
444
445 table_.RegisterSpecializations<JSObjectVisitor,
446 kVisitJSObject,
447 kVisitJSObjectGeneric>();
448
449 table_.RegisterSpecializations<StructObjectVisitor,
450 kVisitStruct,
451 kVisitStructGeneric>();
452 }
453
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000454 INLINE(static void VisitPointer(Heap* heap, Object** p)) {
455 MarkObjectByPointer(heap, p);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000456 }
457
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000458 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000459 // Mark all objects pointed to in [start, end).
460 const int kMinRangeForMarkingRecursion = 64;
461 if (end - start >= kMinRangeForMarkingRecursion) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000462 if (VisitUnmarkedObjects(heap, start, end)) return;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000463 // We are close to a stack overflow, so just mark the objects.
464 }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000465 for (Object** p = start; p < end; p++) MarkObjectByPointer(heap, p);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000466 }
467
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000468 static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) {
ager@chromium.org236ad962008-09-25 09:45:57 +0000469 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
ager@chromium.org4af710e2009-09-15 12:20:11 +0000470 Code* code = Code::GetCodeFromTargetAddress(rinfo->target_address());
ager@chromium.orgea91cc52011-05-23 06:06:11 +0000471 if (FLAG_cleanup_code_caches_at_gc && code->is_inline_cache_stub()) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000472 IC::Clear(rinfo->pc());
473 // Please note targets for cleared inline cached do not have to be
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000474 // marked since they are contained in HEAP->non_monomorphic_cache().
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000475 } else {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000476 heap->mark_compact_collector()->MarkObject(code);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000477 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000478 }
479
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000480 static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) {
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000481 ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL);
482 Object* cell = rinfo->target_cell();
483 Object* old_cell = cell;
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000484 VisitPointer(heap, &cell);
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000485 if (cell != old_cell) {
486 rinfo->set_target_cell(reinterpret_cast<JSGlobalPropertyCell*>(cell));
487 }
488 }
489
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000490 static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) {
vegorov@chromium.org2356e6f2010-06-09 09:38:56 +0000491 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
492 rinfo->IsPatchedReturnSequence()) ||
493 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
494 rinfo->IsPatchedDebugBreakSlotSequence()));
ager@chromium.org4af710e2009-09-15 12:20:11 +0000495 HeapObject* code = Code::GetCodeFromTargetAddress(rinfo->call_address());
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000496 heap->mark_compact_collector()->MarkObject(code);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000497 }
498
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000499 // Mark object pointed to by p.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000500 INLINE(static void MarkObjectByPointer(Heap* heap, Object** p)) {
mads.s.ager31e71382008-08-13 09:32:07 +0000501 if (!(*p)->IsHeapObject()) return;
502 HeapObject* object = ShortCircuitConsString(p);
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000503 if (!object->IsMarked()) {
504 heap->mark_compact_collector()->MarkUnmarkedObject(object);
505 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000506 }
507
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000508
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000509 // Visit an unmarked object.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000510 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
511 HeapObject* obj)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000512#ifdef DEBUG
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000513 ASSERT(Isolate::Current()->heap()->Contains(obj));
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +0000514 ASSERT(!obj->IsMarked());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000515#endif
516 Map* map = obj->map();
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000517 collector->SetMark(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000518 // Mark the map pointer and the body.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000519 if (!map->IsMarked()) collector->MarkUnmarkedObject(map);
ager@chromium.orgea4f62e2010-08-16 16:28:43 +0000520 IterateBody(map, obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000521 }
522
523 // Visit all unmarked objects pointed to by [start, end).
524 // Returns false if the operation fails (lack of stack space).
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000525 static inline bool VisitUnmarkedObjects(Heap* heap,
526 Object** start,
527 Object** end) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000528 // Return false is we are close to the stack limit.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000529 StackLimitCheck check(heap->isolate());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000530 if (check.HasOverflowed()) return false;
531
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000532 MarkCompactCollector* collector = heap->mark_compact_collector();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000533 // Visit the unmarked objects.
534 for (Object** p = start; p < end; p++) {
535 if (!(*p)->IsHeapObject()) continue;
536 HeapObject* obj = HeapObject::cast(*p);
kasper.lund7276f142008-07-30 08:49:36 +0000537 if (obj->IsMarked()) continue;
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000538 VisitUnmarkedObject(collector, obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000539 }
540 return true;
541 }
ager@chromium.orgea4f62e2010-08-16 16:28:43 +0000542
543 static inline void VisitExternalReference(Address* p) { }
544 static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
545
546 private:
547 class DataObjectVisitor {
548 public:
549 template<int size>
550 static void VisitSpecialized(Map* map, HeapObject* object) {
551 }
552
553 static void Visit(Map* map, HeapObject* object) {
554 }
555 };
556
557 typedef FlexibleBodyVisitor<StaticMarkingVisitor,
558 JSObject::BodyDescriptor,
559 void> JSObjectVisitor;
560
561 typedef FlexibleBodyVisitor<StaticMarkingVisitor,
562 StructBodyDescriptor,
563 void> StructObjectVisitor;
564
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +0000565 static void VisitJSWeakMap(Map* map, HeapObject* object) {
566 MarkCompactCollector* collector = map->heap()->mark_compact_collector();
567 JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object);
568
569 // Enqueue weak map in linked list of encountered weak maps.
570 ASSERT(weak_map->next() == Smi::FromInt(0));
571 weak_map->set_next(collector->encountered_weak_maps());
572 collector->set_encountered_weak_maps(weak_map);
573
574 // Skip visiting the backing hash table containing the mappings.
575 int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object);
576 BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
577 map->heap(),
578 object,
579 JSWeakMap::BodyDescriptor::kStartOffset,
580 JSWeakMap::kTableOffset);
581 BodyVisitorBase<StaticMarkingVisitor>::IteratePointers(
582 map->heap(),
583 object,
584 JSWeakMap::kTableOffset + kPointerSize,
585 object_size);
586
587 // Mark the backing hash table without pushing it on the marking stack.
588 ASSERT(!weak_map->unchecked_table()->IsMarked());
589 ASSERT(weak_map->unchecked_table()->map()->IsMarked());
590 collector->SetMark(weak_map->unchecked_table());
591 }
592
ager@chromium.orgea4f62e2010-08-16 16:28:43 +0000593 static void VisitCode(Map* map, HeapObject* object) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000594 reinterpret_cast<Code*>(object)->CodeIterateBody<StaticMarkingVisitor>(
595 map->heap());
ager@chromium.orgea4f62e2010-08-16 16:28:43 +0000596 }
597
ricow@chromium.org0b9f8502010-08-18 07:45:01 +0000598 // Code flushing support.
599
600 // How many collections newly compiled code object will survive before being
601 // flushed.
602 static const int kCodeAgeThreshold = 5;
603
jkummerow@chromium.orgddda9e82011-07-06 11:27:02 +0000604 static const int kRegExpCodeThreshold = 5;
605
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000606 inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
607 Object* undefined = heap->raw_unchecked_undefined_value();
ricow@chromium.org0b9f8502010-08-18 07:45:01 +0000608 return (info->script() != undefined) &&
609 (reinterpret_cast<Script*>(info->script())->source() != undefined);
610 }
611
612
613 inline static bool IsCompiled(JSFunction* function) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000614 return function->unchecked_code() !=
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000615 function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
ricow@chromium.org0b9f8502010-08-18 07:45:01 +0000616 }
617
ricow@chromium.org0b9f8502010-08-18 07:45:01 +0000618 inline static bool IsCompiled(SharedFunctionInfo* function) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000619 return function->unchecked_code() !=
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000620 function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
ricow@chromium.org0b9f8502010-08-18 07:45:01 +0000621 }
622
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000623 inline static bool IsFlushable(Heap* heap, JSFunction* function) {
ricow@chromium.org0b9f8502010-08-18 07:45:01 +0000624 SharedFunctionInfo* shared_info = function->unchecked_shared();
625
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000626 // Code is either on stack, in compilation cache or referenced
627 // by optimized version of function.
628 if (function->unchecked_code()->IsMarked()) {
629 shared_info->set_code_age(0);
630 return false;
ricow@chromium.org0b9f8502010-08-18 07:45:01 +0000631 }
632
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000633 // We do not flush code for optimized functions.
634 if (function->code() != shared_info->unchecked_code()) {
635 return false;
636 }
637
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000638 return IsFlushable(heap, shared_info);
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000639 }
640
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000641 inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) {
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000642 // Code is either on stack, in compilation cache or referenced
643 // by optimized version of function.
ricow@chromium.org0b9f8502010-08-18 07:45:01 +0000644 if (shared_info->unchecked_code()->IsMarked()) {
645 shared_info->set_code_age(0);
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000646 return false;
ricow@chromium.org0b9f8502010-08-18 07:45:01 +0000647 }
648
649 // The function must be compiled and have the source code available,
650 // to be able to recompile it in case we need the function again.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000651 if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) {
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000652 return false;
653 }
ricow@chromium.org0b9f8502010-08-18 07:45:01 +0000654
655 // We never flush code for Api functions.
656 Object* function_data = shared_info->function_data();
657 if (function_data->IsHeapObject() &&
658 (SafeMap(function_data)->instance_type() ==
659 FUNCTION_TEMPLATE_INFO_TYPE)) {
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000660 return false;
ricow@chromium.org0b9f8502010-08-18 07:45:01 +0000661 }
662
663 // Only flush code for functions.
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000664 if (shared_info->code()->kind() != Code::FUNCTION) return false;
ricow@chromium.org0b9f8502010-08-18 07:45:01 +0000665
666 // Function must be lazy compilable.
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000667 if (!shared_info->allows_lazy_compilation()) return false;
ricow@chromium.org0b9f8502010-08-18 07:45:01 +0000668
669 // If this is a full script wrapped in a function we do no flush the code.
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000670 if (shared_info->is_toplevel()) return false;
ricow@chromium.org0b9f8502010-08-18 07:45:01 +0000671
672 // Age this shared function info.
673 if (shared_info->code_age() < kCodeAgeThreshold) {
674 shared_info->set_code_age(shared_info->code_age() + 1);
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000675 return false;
ricow@chromium.org0b9f8502010-08-18 07:45:01 +0000676 }
677
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000678 return true;
679 }
680
681
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000682 static bool FlushCodeForFunction(Heap* heap, JSFunction* function) {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000683 if (!IsFlushable(heap, function)) return false;
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000684
685 // This function's code looks flushable. But we have to postpone the
686 // decision until we see all functions that point to the same
687 // SharedFunctionInfo because some of them might be optimized.
688 // That would make the nonoptimized version of the code nonflushable,
689 // because it is required for bailing out from optimized code.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000690 heap->mark_compact_collector()->code_flusher()->AddCandidate(function);
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000691 return true;
ricow@chromium.org0b9f8502010-08-18 07:45:01 +0000692 }
693
694
695 static inline Map* SafeMap(Object* obj) {
696 MapWord map_word = HeapObject::cast(obj)->map_word();
697 map_word.ClearMark();
698 map_word.ClearOverflow();
699 return map_word.ToMap();
700 }
701
702
703 static inline bool IsJSBuiltinsObject(Object* obj) {
704 return obj->IsHeapObject() &&
705 (SafeMap(obj)->instance_type() == JS_BUILTINS_OBJECT_TYPE);
706 }
707
708
709 static inline bool IsValidNotBuiltinContext(Object* ctx) {
710 if (!ctx->IsHeapObject()) return false;
711
712 Map* map = SafeMap(ctx);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000713 Heap* heap = map->heap();
svenpanne@chromium.org6d786c92011-06-15 10:58:27 +0000714 if (!(map == heap->raw_unchecked_function_context_map() ||
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000715 map == heap->raw_unchecked_catch_context_map() ||
svenpanne@chromium.org6d786c92011-06-15 10:58:27 +0000716 map == heap->raw_unchecked_with_context_map() ||
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000717 map == heap->raw_unchecked_global_context_map())) {
ricow@chromium.org0b9f8502010-08-18 07:45:01 +0000718 return false;
719 }
720
721 Context* context = reinterpret_cast<Context*>(ctx);
722
723 if (IsJSBuiltinsObject(context->global())) {
724 return false;
725 }
726
727 return true;
728 }
729
730
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000731 static void VisitSharedFunctionInfoGeneric(Map* map, HeapObject* object) {
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +0000732 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000733
734 if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
735
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +0000736 FixedBodyVisitor<StaticMarkingVisitor,
737 SharedFunctionInfo::BodyDescriptor,
738 void>::Visit(map, object);
739 }
740
741
jkummerow@chromium.orgddda9e82011-07-06 11:27:02 +0000742 static void UpdateRegExpCodeAgeAndFlush(Heap* heap,
743 JSRegExp* re,
744 bool is_ascii) {
745 // Make sure that the fixed array is in fact initialized on the RegExp.
746 // We could potentially trigger a GC when initializing the RegExp.
747 if (SafeMap(re->data())->instance_type() != FIXED_ARRAY_TYPE) return;
748
749 // Make sure this is a RegExp that actually contains code.
750 if (re->TypeTagUnchecked() != JSRegExp::IRREGEXP) return;
751
752 Object* code = re->DataAtUnchecked(JSRegExp::code_index(is_ascii));
753 if (!code->IsSmi() && SafeMap(code)->instance_type() == CODE_TYPE) {
754 // Save a copy that can be reinstated if we need the code again.
755 re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
756 code,
757 heap);
758 // Set a number in the 0-255 range to guarantee no smi overflow.
759 re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
760 Smi::FromInt(heap->sweep_generation() & 0xff),
761 heap);
762 } else if (code->IsSmi()) {
763 int value = Smi::cast(code)->value();
764 // The regexp has not been compiled yet or there was a compilation error.
765 if (value == JSRegExp::kUninitializedValue ||
766 value == JSRegExp::kCompilationErrorValue) {
767 return;
768 }
769
770 // Check if we should flush now.
771 if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
772 re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
773 Smi::FromInt(JSRegExp::kUninitializedValue),
774 heap);
775 re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
776 Smi::FromInt(JSRegExp::kUninitializedValue),
777 heap);
778 }
779 }
780 }
781
782
783 // Works by setting the current sweep_generation (as a smi) in the
784 // code object place in the data array of the RegExp and keeps a copy
785 // around that can be reinstated if we reuse the RegExp before flushing.
786 // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
787 // we flush the code.
788 static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
789 Heap* heap = map->heap();
790 MarkCompactCollector* collector = heap->mark_compact_collector();
791 if (!collector->is_code_flushing_enabled()) {
792 VisitJSRegExpFields(map, object);
793 return;
794 }
795 JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
796 // Flush code or set age on both ascii and two byte code.
797 UpdateRegExpCodeAgeAndFlush(heap, re, true);
798 UpdateRegExpCodeAgeAndFlush(heap, re, false);
799 // Visit the fields of the RegExp, including the updated FixedArray.
800 VisitJSRegExpFields(map, object);
801 }
802
803
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000804 static void VisitSharedFunctionInfoAndFlushCode(Map* map,
805 HeapObject* object) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000806 MarkCompactCollector* collector = map->heap()->mark_compact_collector();
807 if (!collector->is_code_flushing_enabled()) {
808 VisitSharedFunctionInfoGeneric(map, object);
809 return;
810 }
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000811 VisitSharedFunctionInfoAndFlushCodeGeneric(map, object, false);
812 }
813
814
815 static void VisitSharedFunctionInfoAndFlushCodeGeneric(
816 Map* map, HeapObject* object, bool known_flush_code_candidate) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000817 Heap* heap = map->heap();
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000818 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object);
819
820 if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap();
821
822 if (!known_flush_code_candidate) {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000823 known_flush_code_candidate = IsFlushable(heap, shared);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000824 if (known_flush_code_candidate) {
825 heap->mark_compact_collector()->code_flusher()->AddCandidate(shared);
826 }
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000827 }
828
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000829 VisitSharedFunctionInfoFields(heap, object, known_flush_code_candidate);
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000830 }
831
832
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000833 static void VisitCodeEntry(Heap* heap, Address entry_address) {
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000834 Object* code = Code::GetObjectFromEntryAddress(entry_address);
835 Object* old_code = code;
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000836 VisitPointer(heap, &code);
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000837 if (code != old_code) {
838 Memory::Address_at(entry_address) =
839 reinterpret_cast<Code*>(code)->entry();
840 }
841 }
ricow@chromium.org0b9f8502010-08-18 07:45:01 +0000842
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000843
844 static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000845 Heap* heap = map->heap();
846 MarkCompactCollector* collector = heap->mark_compact_collector();
847 if (!collector->is_code_flushing_enabled()) {
848 VisitJSFunction(map, object);
849 return;
850 }
851
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000852 JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object);
ricow@chromium.org0b9f8502010-08-18 07:45:01 +0000853 // The function must have a valid context and not be a builtin.
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000854 bool flush_code_candidate = false;
ricow@chromium.org0b9f8502010-08-18 07:45:01 +0000855 if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000856 flush_code_candidate = FlushCodeForFunction(heap, jsfunction);
ricow@chromium.org0b9f8502010-08-18 07:45:01 +0000857 }
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000858
859 if (!flush_code_candidate) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000860 collector->MarkObject(jsfunction->unchecked_shared()->unchecked_code());
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000861
862 if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) {
863 // For optimized functions we should retain both non-optimized version
864 // of it's code and non-optimized version of all inlined functions.
865 // This is required to support bailing out from inlined code.
866 DeoptimizationInputData* data =
867 reinterpret_cast<DeoptimizationInputData*>(
868 jsfunction->unchecked_code()->unchecked_deoptimization_data());
869
870 FixedArray* literals = data->UncheckedLiteralArray();
871
872 for (int i = 0, count = data->InlinedFunctionCount()->value();
873 i < count;
874 i++) {
875 JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i));
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000876 collector->MarkObject(inlined->unchecked_shared()->unchecked_code());
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000877 }
878 }
879 }
880
881 VisitJSFunctionFields(map,
882 reinterpret_cast<JSFunction*>(object),
883 flush_code_candidate);
ricow@chromium.org0b9f8502010-08-18 07:45:01 +0000884 }
885
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000886
887 static void VisitJSFunction(Map* map, HeapObject* object) {
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000888 VisitJSFunctionFields(map,
889 reinterpret_cast<JSFunction*>(object),
890 false);
891 }
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000892
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000893
894#define SLOT_ADDR(obj, offset) \
895 reinterpret_cast<Object**>((obj)->address() + offset)
896
897
898 static inline void VisitJSFunctionFields(Map* map,
899 JSFunction* object,
900 bool flush_code_candidate) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000901 Heap* heap = map->heap();
902 MarkCompactCollector* collector = heap->mark_compact_collector();
903
904 VisitPointers(heap,
905 SLOT_ADDR(object, JSFunction::kPropertiesOffset),
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000906 SLOT_ADDR(object, JSFunction::kCodeEntryOffset));
907
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000908 if (!flush_code_candidate) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000909 VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000910 } else {
911 // Don't visit code object.
912
913 // Visit shared function info to avoid double checking of it's
914 // flushability.
915 SharedFunctionInfo* shared_info = object->unchecked_shared();
916 if (!shared_info->IsMarked()) {
917 Map* shared_info_map = shared_info->map();
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000918 collector->SetMark(shared_info);
919 collector->MarkObject(shared_info_map);
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000920 VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map,
921 shared_info,
922 true);
923 }
924 }
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000925
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000926 VisitPointers(heap,
927 SLOT_ADDR(object,
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000928 JSFunction::kCodeEntryOffset + kPointerSize),
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000929 SLOT_ADDR(object, JSFunction::kNonWeakFieldsEndOffset));
whesse@chromium.org4a5224e2010-10-20 12:37:07 +0000930
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000931 // Don't visit the next function list field as it is a weak reference.
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000932 }
933
jkummerow@chromium.orgddda9e82011-07-06 11:27:02 +0000934 static inline void VisitJSRegExpFields(Map* map,
935 HeapObject* object) {
936 int last_property_offset =
937 JSRegExp::kSize + kPointerSize * map->inobject_properties();
938 VisitPointers(map->heap(),
939 SLOT_ADDR(object, JSRegExp::kPropertiesOffset),
940 SLOT_ADDR(object, last_property_offset));
941 }
942
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000943
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000944 static void VisitSharedFunctionInfoFields(Heap* heap,
945 HeapObject* object,
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000946 bool flush_code_candidate) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000947 VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kNameOffset));
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000948
949 if (!flush_code_candidate) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000950 VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kCodeOffset));
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000951 }
952
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000953 VisitPointers(heap,
954 SLOT_ADDR(object, SharedFunctionInfo::kScopeInfoOffset),
kasperl@chromium.orga5551262010-12-07 12:49:48 +0000955 SLOT_ADDR(object, SharedFunctionInfo::kSize));
956 }
957
958 #undef SLOT_ADDR
959
ager@chromium.orgea4f62e2010-08-16 16:28:43 +0000960 typedef void (*Callback)(Map* map, HeapObject* object);
961
962 static VisitorDispatchTable<Callback> table_;
963};
964
965
966VisitorDispatchTable<StaticMarkingVisitor::Callback>
967 StaticMarkingVisitor::table_;
968
969
970class MarkingVisitor : public ObjectVisitor {
971 public:
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000972 explicit MarkingVisitor(Heap* heap) : heap_(heap) { }
973
ager@chromium.orgea4f62e2010-08-16 16:28:43 +0000974 void VisitPointer(Object** p) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000975 StaticMarkingVisitor::VisitPointer(heap_, p);
ager@chromium.orgea4f62e2010-08-16 16:28:43 +0000976 }
977
978 void VisitPointers(Object** start, Object** end) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000979 StaticMarkingVisitor::VisitPointers(heap_, start, end);
ager@chromium.orgea4f62e2010-08-16 16:28:43 +0000980 }
981
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000982 private:
983 Heap* heap_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000984};
985
986
ricow@chromium.org0b9f8502010-08-18 07:45:01 +0000987class CodeMarkingVisitor : public ThreadVisitor {
988 public:
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000989 explicit CodeMarkingVisitor(MarkCompactCollector* collector)
990 : collector_(collector) {}
991
vegorov@chromium.org74f333b2011-04-06 11:17:46 +0000992 void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
993 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000994 collector_->MarkObject(it.frame()->unchecked_code());
ricow@chromium.org0b9f8502010-08-18 07:45:01 +0000995 }
996 }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000997
998 private:
999 MarkCompactCollector* collector_;
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001000};
1001
1002
1003class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1004 public:
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001005 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1006 : collector_(collector) {}
1007
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001008 void VisitPointers(Object** start, Object** end) {
1009 for (Object** p = start; p < end; p++) VisitPointer(p);
1010 }
1011
1012 void VisitPointer(Object** slot) {
1013 Object* obj = *slot;
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001014 if (obj->IsSharedFunctionInfo()) {
1015 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001016 collector_->MarkObject(shared->unchecked_code());
1017 collector_->MarkObject(shared);
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001018 }
1019 }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001020
1021 private:
1022 MarkCompactCollector* collector_;
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001023};
1024
1025
1026void MarkCompactCollector::PrepareForCodeFlushing() {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001027 ASSERT(heap() == Isolate::Current()->heap());
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001028
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001029 if (!FLAG_flush_code) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001030 EnableCodeFlushing(false);
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001031 return;
1032 }
1033
1034#ifdef ENABLE_DEBUGGER_SUPPORT
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001035 if (heap()->isolate()->debug()->IsLoaded() ||
1036 heap()->isolate()->debug()->has_break_points()) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001037 EnableCodeFlushing(false);
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001038 return;
1039 }
1040#endif
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001041 EnableCodeFlushing(true);
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001042
ager@chromium.org5b2fbee2010-09-08 06:38:15 +00001043 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1044 // relies on it being marked before any other descriptor array.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001045 MarkObject(heap()->raw_unchecked_empty_descriptor_array());
ager@chromium.org5b2fbee2010-09-08 06:38:15 +00001046
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001047 // Make sure we are not referencing the code from the stack.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001048 ASSERT(this == heap()->mark_compact_collector());
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001049 for (StackFrameIterator it; !it.done(); it.Advance()) {
ager@chromium.org5b2fbee2010-09-08 06:38:15 +00001050 MarkObject(it.frame()->unchecked_code());
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001051 }
1052
1053 // Iterate the archived stacks in all threads to check if
1054 // the code is referenced.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001055 CodeMarkingVisitor code_marking_visitor(this);
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001056 heap()->isolate()->thread_manager()->IterateArchivedThreads(
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001057 &code_marking_visitor);
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001058
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001059 SharedFunctionInfoMarkingVisitor visitor(this);
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001060 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1061 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001062
ager@chromium.org5b2fbee2010-09-08 06:38:15 +00001063 ProcessMarkingStack();
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001064}
1065
1066
mads.s.ager31e71382008-08-13 09:32:07 +00001067// Visitor class for marking heap roots.
1068class RootMarkingVisitor : public ObjectVisitor {
1069 public:
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001070 explicit RootMarkingVisitor(Heap* heap)
1071 : collector_(heap->mark_compact_collector()) { }
1072
mads.s.ager31e71382008-08-13 09:32:07 +00001073 void VisitPointer(Object** p) {
1074 MarkObjectByPointer(p);
1075 }
1076
1077 void VisitPointers(Object** start, Object** end) {
1078 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1079 }
1080
mads.s.ager31e71382008-08-13 09:32:07 +00001081 private:
mads.s.ager31e71382008-08-13 09:32:07 +00001082 void MarkObjectByPointer(Object** p) {
1083 if (!(*p)->IsHeapObject()) return;
1084
1085 // Replace flat cons strings in place.
1086 HeapObject* object = ShortCircuitConsString(p);
1087 if (object->IsMarked()) return;
1088
mads.s.ager31e71382008-08-13 09:32:07 +00001089 Map* map = object->map();
1090 // Mark the object.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001091 collector_->SetMark(object);
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001092
mads.s.ager31e71382008-08-13 09:32:07 +00001093 // Mark the map pointer and body, and push them on the marking stack.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001094 collector_->MarkObject(map);
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001095 StaticMarkingVisitor::IterateBody(map, object);
mads.s.ager31e71382008-08-13 09:32:07 +00001096
1097 // Mark all the objects reachable from the map and body. May leave
1098 // overflowed objects in the heap.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001099 collector_->EmptyMarkingStack();
mads.s.ager31e71382008-08-13 09:32:07 +00001100 }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001101
1102 MarkCompactCollector* collector_;
mads.s.ager31e71382008-08-13 09:32:07 +00001103};
1104
1105
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001106// Helper class for pruning the symbol table.
1107class SymbolTableCleaner : public ObjectVisitor {
1108 public:
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001109 explicit SymbolTableCleaner(Heap* heap)
1110 : heap_(heap), pointers_removed_(0) { }
kmillikin@chromium.org13bd2942009-12-16 15:36:05 +00001111
1112 virtual void VisitPointers(Object** start, Object** end) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001113 // Visit all HeapObject pointers in [start, end).
1114 for (Object** p = start; p < end; p++) {
kasper.lund7276f142008-07-30 08:49:36 +00001115 if ((*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked()) {
ager@chromium.org6f10e412009-02-13 10:11:16 +00001116 // Check if the symbol being pruned is an external symbol. We need to
1117 // delete the associated external data as this symbol is going away.
1118
ager@chromium.org6f10e412009-02-13 10:11:16 +00001119 // Since no objects have yet been moved we can safely access the map of
1120 // the object.
kmillikin@chromium.org13bd2942009-12-16 15:36:05 +00001121 if ((*p)->IsExternalString()) {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001122 heap_->FinalizeExternalString(String::cast(*p));
ager@chromium.org6f10e412009-02-13 10:11:16 +00001123 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001124 // Set the entry to null_value (as deleted).
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001125 *p = heap_->raw_unchecked_null_value();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001126 pointers_removed_++;
1127 }
1128 }
1129 }
1130
1131 int PointersRemoved() {
1132 return pointers_removed_;
1133 }
jkummerow@chromium.orge297f592011-06-08 10:05:15 +00001134
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001135 private:
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001136 Heap* heap_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001137 int pointers_removed_;
1138};
1139
1140
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00001141// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1142// are retained.
1143class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1144 public:
1145 virtual Object* RetainAs(Object* object) {
1146 MapWord first_word = HeapObject::cast(object)->map_word();
1147 if (first_word.IsMarked()) {
1148 return object;
1149 } else {
1150 return NULL;
1151 }
1152 }
1153};
1154
1155
mads.s.ager31e71382008-08-13 09:32:07 +00001156void MarkCompactCollector::MarkUnmarkedObject(HeapObject* object) {
mads.s.ager31e71382008-08-13 09:32:07 +00001157 ASSERT(!object->IsMarked());
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001158 ASSERT(HEAP->Contains(object));
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001159 if (object->IsMap()) {
1160 Map* map = Map::cast(object);
ager@chromium.orgea91cc52011-05-23 06:06:11 +00001161 if (FLAG_cleanup_code_caches_at_gc) {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001162 map->ClearCodeCache(heap());
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001163 }
ager@chromium.org3bf7b912008-11-17 09:09:45 +00001164 SetMark(map);
ricow@chromium.orgc54d3652011-05-30 09:20:16 +00001165
1166 // When map collection is enabled we have to mark through map's transitions
1167 // in a special way to make transition links weak.
ricow@chromium.orgd2be9012011-06-01 06:00:58 +00001168 // Only maps for subclasses of JSReceiver can have transitions.
1169 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
1170 if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001171 MarkMapContents(map);
1172 } else {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001173 marking_stack_.Push(map);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001174 }
1175 } else {
ager@chromium.org3bf7b912008-11-17 09:09:45 +00001176 SetMark(object);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001177 marking_stack_.Push(object);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001178 }
1179}
1180
1181
1182void MarkCompactCollector::MarkMapContents(Map* map) {
erik.corry@gmail.com3847bd52011-04-27 10:38:56 +00001183 // Mark prototype transitions array but don't push it into marking stack.
1184 // This will make references from it weak. We will clean dead prototype
1185 // transitions in ClearNonLiveTransitions.
1186 FixedArray* prototype_transitions = map->unchecked_prototype_transitions();
1187 if (!prototype_transitions->IsMarked()) SetMark(prototype_transitions);
1188
danno@chromium.org40cb8782011-05-25 07:58:50 +00001189 Object* raw_descriptor_array =
1190 *HeapObject::RawField(map,
1191 Map::kInstanceDescriptorsOrBitField3Offset);
1192 if (!raw_descriptor_array->IsSmi()) {
1193 MarkDescriptorArray(
1194 reinterpret_cast<DescriptorArray*>(raw_descriptor_array));
1195 }
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001196
1197 // Mark the Object* fields of the Map.
1198 // Since the descriptor array has been marked already, it is fine
1199 // that one of these fields contains a pointer to it.
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001200 Object** start_slot = HeapObject::RawField(map,
1201 Map::kPointerFieldsBeginOffset);
1202
1203 Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset);
1204
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001205 StaticMarkingVisitor::VisitPointers(map->heap(), start_slot, end_slot);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001206}
1207
1208
1209void MarkCompactCollector::MarkDescriptorArray(
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +00001210 DescriptorArray* descriptors) {
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001211 if (descriptors->IsMarked()) return;
1212 // Empty descriptor array is marked as a root before any maps are marked.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001213 ASSERT(descriptors != HEAP->raw_unchecked_empty_descriptor_array());
ager@chromium.org3bf7b912008-11-17 09:09:45 +00001214 SetMark(descriptors);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001215
1216 FixedArray* contents = reinterpret_cast<FixedArray*>(
1217 descriptors->get(DescriptorArray::kContentArrayIndex));
1218 ASSERT(contents->IsHeapObject());
1219 ASSERT(!contents->IsMarked());
1220 ASSERT(contents->IsFixedArray());
1221 ASSERT(contents->length() >= 2);
ager@chromium.org3bf7b912008-11-17 09:09:45 +00001222 SetMark(contents);
fschneider@chromium.org7979bbb2011-03-28 10:47:03 +00001223 // Contents contains (value, details) pairs. If the details say that the type
1224 // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION,
1225 // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as
1226 // live. Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and
1227 // CONSTANT_TRANSITION is the value an Object* (a Map*).
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001228 for (int i = 0; i < contents->length(); i += 2) {
1229 // If the pair (value, details) at index i, i+1 is not
1230 // a transition or null descriptor, mark the value.
1231 PropertyDetails details(Smi::cast(contents->get(i + 1)));
1232 if (details.type() < FIRST_PHANTOM_PROPERTY_TYPE) {
1233 HeapObject* object = reinterpret_cast<HeapObject*>(contents->get(i));
1234 if (object->IsHeapObject() && !object->IsMarked()) {
ager@chromium.org3bf7b912008-11-17 09:09:45 +00001235 SetMark(object);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001236 marking_stack_.Push(object);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001237 }
1238 }
1239 }
1240 // The DescriptorArray descriptors contains a pointer to its contents array,
1241 // but the contents array is already marked.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001242 marking_stack_.Push(descriptors);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001243}
1244
1245
1246void MarkCompactCollector::CreateBackPointers() {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001247 HeapObjectIterator iterator(heap()->map_space());
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00001248 for (HeapObject* next_object = iterator.next();
1249 next_object != NULL; next_object = iterator.next()) {
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001250 if (next_object->IsMap()) { // Could also be ByteArray on free list.
1251 Map* map = Map::cast(next_object);
ricow@chromium.orgd2be9012011-06-01 06:00:58 +00001252 STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
1253 if (map->instance_type() >= FIRST_JS_RECEIVER_TYPE) {
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001254 map->CreateBackPointers();
1255 } else {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001256 ASSERT(map->instance_descriptors() == heap()->empty_descriptor_array());
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001257 }
1258 }
1259 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001260}
1261
1262
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001263static int OverflowObjectSize(HeapObject* obj) {
1264 // Recover the normal map pointer, it might be marked as live and
1265 // overflowed.
kasper.lund7276f142008-07-30 08:49:36 +00001266 MapWord map_word = obj->map_word();
1267 map_word.ClearMark();
1268 map_word.ClearOverflow();
1269 return obj->SizeFromMap(map_word.ToMap());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001270}
1271
1272
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001273class OverflowedObjectsScanner : public AllStatic {
1274 public:
1275 // Fill the marking stack with overflowed objects returned by the given
1276 // iterator. Stop when the marking stack is filled or the end of the space
1277 // is reached, whichever comes first.
1278 template<class T>
1279 static inline void ScanOverflowedObjects(MarkCompactCollector* collector,
1280 T* it) {
1281 // The caller should ensure that the marking stack is initially not full,
1282 // so that we don't waste effort pointlessly scanning for objects.
1283 ASSERT(!collector->marking_stack_.is_full());
mads.s.ager31e71382008-08-13 09:32:07 +00001284
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001285 for (HeapObject* object = it->next(); object != NULL; object = it->next()) {
1286 if (object->IsOverflowed()) {
1287 object->ClearOverflow();
1288 ASSERT(object->IsMarked());
1289 ASSERT(HEAP->Contains(object));
1290 collector->marking_stack_.Push(object);
1291 if (collector->marking_stack_.is_full()) return;
1292 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001293 }
1294 }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001295};
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001296
1297
ager@chromium.org9085a012009-05-11 19:22:57 +00001298bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
1299 return (*p)->IsHeapObject() && !HeapObject::cast(*p)->IsMarked();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001300}
1301
1302
ager@chromium.org5ec48922009-05-05 07:25:34 +00001303void MarkCompactCollector::MarkSymbolTable() {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001304 SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
ager@chromium.org5ec48922009-05-05 07:25:34 +00001305 // Mark the symbol table itself.
1306 SetMark(symbol_table);
1307 // Explicitly mark the prefix.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001308 MarkingVisitor marker(heap());
ager@chromium.org5ec48922009-05-05 07:25:34 +00001309 symbol_table->IteratePrefix(&marker);
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001310 ProcessMarkingStack();
ager@chromium.org5ec48922009-05-05 07:25:34 +00001311}
1312
1313
1314void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
1315 // Mark the heap roots including global variables, stack variables,
1316 // etc., and all objects reachable from them.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001317 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001318
ager@chromium.org5ec48922009-05-05 07:25:34 +00001319 // Handle the symbol table specially.
1320 MarkSymbolTable();
mads.s.ager31e71382008-08-13 09:32:07 +00001321
1322 // There may be overflowed objects in the heap. Visit them now.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001323 while (marking_stack_.overflowed()) {
mads.s.ager31e71382008-08-13 09:32:07 +00001324 RefillMarkingStack();
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001325 EmptyMarkingStack();
mads.s.ager31e71382008-08-13 09:32:07 +00001326 }
kasper.lund7276f142008-07-30 08:49:36 +00001327}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001328
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001329
kasper.lund7276f142008-07-30 08:49:36 +00001330void MarkCompactCollector::MarkObjectGroups() {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001331 List<ObjectGroup*>* object_groups =
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001332 heap()->isolate()->global_handles()->object_groups();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001333
karlklose@chromium.org44bc7082011-04-11 12:33:05 +00001334 int last = 0;
ager@chromium.org8bb60582008-12-11 12:02:20 +00001335 for (int i = 0; i < object_groups->length(); i++) {
1336 ObjectGroup* entry = object_groups->at(i);
karlklose@chromium.org44bc7082011-04-11 12:33:05 +00001337 ASSERT(entry != NULL);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001338
karlklose@chromium.org44bc7082011-04-11 12:33:05 +00001339 Object*** objects = entry->objects_;
kasper.lund7276f142008-07-30 08:49:36 +00001340 bool group_marked = false;
karlklose@chromium.org44bc7082011-04-11 12:33:05 +00001341 for (size_t j = 0; j < entry->length_; j++) {
kasper.lund7276f142008-07-30 08:49:36 +00001342 Object* object = *objects[j];
1343 if (object->IsHeapObject() && HeapObject::cast(object)->IsMarked()) {
1344 group_marked = true;
1345 break;
1346 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001347 }
1348
karlklose@chromium.org44bc7082011-04-11 12:33:05 +00001349 if (!group_marked) {
1350 (*object_groups)[last++] = entry;
1351 continue;
1352 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001353
karlklose@chromium.org44bc7082011-04-11 12:33:05 +00001354 // An object in the group is marked, so mark all heap objects in
1355 // the group.
1356 for (size_t j = 0; j < entry->length_; ++j) {
kasper.lund7276f142008-07-30 08:49:36 +00001357 if ((*objects[j])->IsHeapObject()) {
1358 MarkObject(HeapObject::cast(*objects[j]));
1359 }
1360 }
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00001361
karlklose@chromium.org44bc7082011-04-11 12:33:05 +00001362 // Once the entire group has been marked, dispose it because it's
1363 // not needed anymore.
1364 entry->Dispose();
kasper.lund7276f142008-07-30 08:49:36 +00001365 }
karlklose@chromium.org44bc7082011-04-11 12:33:05 +00001366 object_groups->Rewind(last);
kasper.lund7276f142008-07-30 08:49:36 +00001367}
1368
1369
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00001370void MarkCompactCollector::MarkImplicitRefGroups() {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001371 List<ImplicitRefGroup*>* ref_groups =
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001372 heap()->isolate()->global_handles()->implicit_ref_groups();
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00001373
karlklose@chromium.org44bc7082011-04-11 12:33:05 +00001374 int last = 0;
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00001375 for (int i = 0; i < ref_groups->length(); i++) {
1376 ImplicitRefGroup* entry = ref_groups->at(i);
karlklose@chromium.org44bc7082011-04-11 12:33:05 +00001377 ASSERT(entry != NULL);
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00001378
karlklose@chromium.org44bc7082011-04-11 12:33:05 +00001379 if (!(*entry->parent_)->IsMarked()) {
1380 (*ref_groups)[last++] = entry;
1381 continue;
1382 }
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00001383
karlklose@chromium.org44bc7082011-04-11 12:33:05 +00001384 Object*** children = entry->children_;
1385 // A parent object is marked, so mark all child heap objects.
1386 for (size_t j = 0; j < entry->length_; ++j) {
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00001387 if ((*children[j])->IsHeapObject()) {
1388 MarkObject(HeapObject::cast(*children[j]));
1389 }
1390 }
1391
karlklose@chromium.org44bc7082011-04-11 12:33:05 +00001392 // Once the entire group has been marked, dispose it because it's
1393 // not needed anymore.
1394 entry->Dispose();
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00001395 }
karlklose@chromium.org44bc7082011-04-11 12:33:05 +00001396 ref_groups->Rewind(last);
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00001397}
1398
1399
mads.s.ager31e71382008-08-13 09:32:07 +00001400// Mark all objects reachable from the objects on the marking stack.
1401// Before: the marking stack contains zero or more heap object pointers.
1402// After: the marking stack is empty, and all objects reachable from the
1403// marking stack have been marked, or are overflowed in the heap.
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001404void MarkCompactCollector::EmptyMarkingStack() {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001405 while (!marking_stack_.is_empty()) {
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00001406 while (!marking_stack_.is_empty()) {
1407 HeapObject* object = marking_stack_.Pop();
1408 ASSERT(object->IsHeapObject());
1409 ASSERT(heap()->Contains(object));
1410 ASSERT(object->IsMarked());
1411 ASSERT(!object->IsOverflowed());
kasper.lund7276f142008-07-30 08:49:36 +00001412
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00001413 // Because the object is marked, we have to recover the original map
1414 // pointer and use it to mark the object's body.
1415 MapWord map_word = object->map_word();
1416 map_word.ClearMark();
1417 Map* map = map_word.ToMap();
1418 MarkObject(map);
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001419
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00001420 StaticMarkingVisitor::IterateBody(map, object);
1421 }
1422
1423 // Process encountered weak maps, mark objects only reachable by those
1424 // weak maps and repeat until fix-point is reached.
1425 ProcessWeakMaps();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001426 }
kasper.lund7276f142008-07-30 08:49:36 +00001427}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001428
kasper.lund7276f142008-07-30 08:49:36 +00001429
mads.s.ager31e71382008-08-13 09:32:07 +00001430// Sweep the heap for overflowed objects, clear their overflow bits, and
1431// push them on the marking stack. Stop early if the marking stack fills
1432// before sweeping completes. If sweeping completes, there are no remaining
1433// overflowed objects in the heap so the overflow flag on the markings stack
1434// is cleared.
1435void MarkCompactCollector::RefillMarkingStack() {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001436 ASSERT(marking_stack_.overflowed());
mads.s.ager31e71382008-08-13 09:32:07 +00001437
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001438 SemiSpaceIterator new_it(heap()->new_space(), &OverflowObjectSize);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001439 OverflowedObjectsScanner::ScanOverflowedObjects(this, &new_it);
1440 if (marking_stack_.is_full()) return;
mads.s.ager31e71382008-08-13 09:32:07 +00001441
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001442 HeapObjectIterator old_pointer_it(heap()->old_pointer_space(),
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001443 &OverflowObjectSize);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001444 OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_pointer_it);
1445 if (marking_stack_.is_full()) return;
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001446
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001447 HeapObjectIterator old_data_it(heap()->old_data_space(), &OverflowObjectSize);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001448 OverflowedObjectsScanner::ScanOverflowedObjects(this, &old_data_it);
1449 if (marking_stack_.is_full()) return;
mads.s.ager31e71382008-08-13 09:32:07 +00001450
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001451 HeapObjectIterator code_it(heap()->code_space(), &OverflowObjectSize);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001452 OverflowedObjectsScanner::ScanOverflowedObjects(this, &code_it);
1453 if (marking_stack_.is_full()) return;
mads.s.ager31e71382008-08-13 09:32:07 +00001454
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001455 HeapObjectIterator map_it(heap()->map_space(), &OverflowObjectSize);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001456 OverflowedObjectsScanner::ScanOverflowedObjects(this, &map_it);
1457 if (marking_stack_.is_full()) return;
mads.s.ager31e71382008-08-13 09:32:07 +00001458
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001459 HeapObjectIterator cell_it(heap()->cell_space(), &OverflowObjectSize);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001460 OverflowedObjectsScanner::ScanOverflowedObjects(this, &cell_it);
1461 if (marking_stack_.is_full()) return;
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001462
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001463 LargeObjectIterator lo_it(heap()->lo_space(), &OverflowObjectSize);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001464 OverflowedObjectsScanner::ScanOverflowedObjects(this, &lo_it);
1465 if (marking_stack_.is_full()) return;
mads.s.ager31e71382008-08-13 09:32:07 +00001466
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001467 marking_stack_.clear_overflowed();
mads.s.ager31e71382008-08-13 09:32:07 +00001468}
1469
1470
1471// Mark all objects reachable (transitively) from objects on the marking
1472// stack. Before: the marking stack contains zero or more heap object
1473// pointers. After: the marking stack is empty and there are no overflowed
1474// objects in the heap.
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001475void MarkCompactCollector::ProcessMarkingStack() {
1476 EmptyMarkingStack();
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001477 while (marking_stack_.overflowed()) {
mads.s.ager31e71382008-08-13 09:32:07 +00001478 RefillMarkingStack();
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001479 EmptyMarkingStack();
mads.s.ager31e71382008-08-13 09:32:07 +00001480 }
1481}
1482
1483
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00001484void MarkCompactCollector::ProcessExternalMarking() {
kasper.lund7276f142008-07-30 08:49:36 +00001485 bool work_to_do = true;
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001486 ASSERT(marking_stack_.is_empty());
kasper.lund7276f142008-07-30 08:49:36 +00001487 while (work_to_do) {
1488 MarkObjectGroups();
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00001489 MarkImplicitRefGroups();
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001490 work_to_do = !marking_stack_.is_empty();
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001491 ProcessMarkingStack();
kasper.lund7276f142008-07-30 08:49:36 +00001492 }
1493}
1494
1495
1496void MarkCompactCollector::MarkLiveObjects() {
erik.corry@gmail.com9dfbea42010-05-21 12:58:28 +00001497 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00001498 // The recursive GC marker detects when it is nearing stack overflow,
1499 // and switches to a different marking system. JS interrupts interfere
1500 // with the C stack limit check.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001501 PostponeInterruptsScope postpone(heap()->isolate());
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00001502
kasper.lund7276f142008-07-30 08:49:36 +00001503#ifdef DEBUG
1504 ASSERT(state_ == PREPARE_GC);
1505 state_ = MARK_LIVE_OBJECTS;
1506#endif
1507 // The to space contains live objects, the from space is used as a marking
1508 // stack.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001509 marking_stack_.Initialize(heap()->new_space()->FromSpaceLow(),
1510 heap()->new_space()->FromSpaceHigh());
kasper.lund7276f142008-07-30 08:49:36 +00001511
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001512 ASSERT(!marking_stack_.overflowed());
kasper.lund7276f142008-07-30 08:49:36 +00001513
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001514 PrepareForCodeFlushing();
1515
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001516 RootMarkingVisitor root_visitor(heap());
ager@chromium.org5ec48922009-05-05 07:25:34 +00001517 MarkRoots(&root_visitor);
kasper.lund7276f142008-07-30 08:49:36 +00001518
ager@chromium.org9085a012009-05-11 19:22:57 +00001519 // The objects reachable from the roots are marked, yet unreachable
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00001520 // objects are unmarked. Mark objects reachable due to host
1521 // application specific logic.
1522 ProcessExternalMarking();
kasper.lund7276f142008-07-30 08:49:36 +00001523
ager@chromium.org9085a012009-05-11 19:22:57 +00001524 // The objects reachable from the roots or object groups are marked,
1525 // yet unreachable objects are unmarked. Mark objects reachable
1526 // only from weak global handles.
kasper.lund7276f142008-07-30 08:49:36 +00001527 //
ager@chromium.org9085a012009-05-11 19:22:57 +00001528 // First we identify nonlive weak handles and mark them as pending
1529 // destruction.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001530 heap()->isolate()->global_handles()->IdentifyWeakHandles(
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001531 &IsUnmarkedHeapObject);
ager@chromium.org9085a012009-05-11 19:22:57 +00001532 // Then we mark the objects and process the transitive closure.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001533 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001534 while (marking_stack_.overflowed()) {
mads.s.ager31e71382008-08-13 09:32:07 +00001535 RefillMarkingStack();
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001536 EmptyMarkingStack();
mads.s.ager31e71382008-08-13 09:32:07 +00001537 }
kasper.lund7276f142008-07-30 08:49:36 +00001538
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00001539 // Repeat host application specific marking to mark unmarked objects
1540 // reachable from the weak roots.
1541 ProcessExternalMarking();
kasper.lund7276f142008-07-30 08:49:36 +00001542
ricow@chromium.org4f693d62011-07-04 14:01:31 +00001543 // Object literal map caches reference symbols (cache keys) and maps
1544 // (cache values). At this point still useful maps have already been
1545 // marked. Mark the keys for the alive values before we process the
1546 // symbol table.
1547 ProcessMapCaches();
1548
kasper.lund7276f142008-07-30 08:49:36 +00001549 // Prune the symbol table removing all symbols only pointed to by the
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00001550 // symbol table. Cannot use symbol_table() here because the symbol
kasper.lund7276f142008-07-30 08:49:36 +00001551 // table is marked.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001552 SymbolTable* symbol_table = heap()->raw_unchecked_symbol_table();
1553 SymbolTableCleaner v(heap());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001554 symbol_table->IterateElements(&v);
1555 symbol_table->ElementsRemoved(v.PointersRemoved());
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001556 heap()->external_string_table_.Iterate(&v);
1557 heap()->external_string_table_.CleanUp();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001558
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00001559 // Process the weak references.
1560 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001561 heap()->ProcessWeakReferences(&mark_compact_object_retainer);
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00001562
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001563 // Remove object groups after marking phase.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001564 heap()->isolate()->global_handles()->RemoveObjectGroups();
1565 heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001566
1567 // Flush code from collected candidates.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001568 if (is_code_flushing_enabled()) {
1569 code_flusher_->ProcessCandidates();
1570 }
ager@chromium.org9ee27ae2011-03-02 13:43:26 +00001571
1572 // Clean up dead objects from the runtime profiler.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001573 heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001574}
1575
1576
ricow@chromium.org4f693d62011-07-04 14:01:31 +00001577void MarkCompactCollector::ProcessMapCaches() {
1578 Object* raw_context = heap()->global_contexts_list_;
1579 while (raw_context != heap()->undefined_value()) {
1580 Context* context = reinterpret_cast<Context*>(raw_context);
1581 if (context->IsMarked()) {
1582 HeapObject* raw_map_cache =
1583 HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
1584 // A map cache may be reachable from the stack. In this case
1585 // it's already transitively marked and it's too late to clean
1586 // up its parts.
1587 if (!raw_map_cache->IsMarked() &&
1588 raw_map_cache != heap()->undefined_value()) {
1589 MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
1590 int existing_elements = map_cache->NumberOfElements();
1591 int used_elements = 0;
1592 for (int i = MapCache::kElementsStartIndex;
1593 i < map_cache->length();
1594 i += MapCache::kEntrySize) {
1595 Object* raw_key = map_cache->get(i);
1596 if (raw_key == heap()->undefined_value() ||
1597 raw_key == heap()->null_value()) continue;
1598 STATIC_ASSERT(MapCache::kEntrySize == 2);
1599 Object* raw_map = map_cache->get(i + 1);
1600 if (raw_map->IsHeapObject() &&
1601 HeapObject::cast(raw_map)->IsMarked()) {
1602 ++used_elements;
1603 } else {
1604 // Delete useless entries with unmarked maps.
1605 ASSERT(raw_map->IsMap());
1606 map_cache->set_null_unchecked(heap(), i);
1607 map_cache->set_null_unchecked(heap(), i + 1);
1608 }
1609 }
1610 if (used_elements == 0) {
1611 context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
1612 } else {
1613 // Note: we don't actually shrink the cache here to avoid
1614 // extra complexity during GC. We rely on subsequent cache
1615 // usages (EnsureCapacity) to do this.
1616 map_cache->ElementsRemoved(existing_elements - used_elements);
1617 MarkObject(map_cache);
1618 }
1619 }
1620 }
1621 // Move to next element in the list.
1622 raw_context = context->get(Context::NEXT_CONTEXT_LINK);
1623 }
1624 ProcessMarkingStack();
1625}
1626
1627
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001628#ifdef DEBUG
ager@chromium.org9085a012009-05-11 19:22:57 +00001629void MarkCompactCollector::UpdateLiveObjectCount(HeapObject* obj) {
1630 live_bytes_ += obj->Size();
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001631 if (heap()->new_space()->Contains(obj)) {
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00001632 live_young_objects_size_ += obj->Size();
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001633 } else if (heap()->map_space()->Contains(obj)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001634 ASSERT(obj->IsMap());
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00001635 live_map_objects_size_ += obj->Size();
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001636 } else if (heap()->cell_space()->Contains(obj)) {
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001637 ASSERT(obj->IsJSGlobalPropertyCell());
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00001638 live_cell_objects_size_ += obj->Size();
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001639 } else if (heap()->old_pointer_space()->Contains(obj)) {
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00001640 live_old_pointer_objects_size_ += obj->Size();
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001641 } else if (heap()->old_data_space()->Contains(obj)) {
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00001642 live_old_data_objects_size_ += obj->Size();
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001643 } else if (heap()->code_space()->Contains(obj)) {
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00001644 live_code_objects_size_ += obj->Size();
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001645 } else if (heap()->lo_space()->Contains(obj)) {
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00001646 live_lo_objects_size_ += obj->Size();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001647 } else {
1648 UNREACHABLE();
1649 }
1650}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001651#endif // DEBUG
1652
1653
1654void MarkCompactCollector::SweepLargeObjectSpace() {
1655#ifdef DEBUG
1656 ASSERT(state_ == MARK_LIVE_OBJECTS);
1657 state_ =
1658 compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
1659#endif
1660 // Deallocate unmarked objects and clear marked bits for marked objects.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001661 heap()->lo_space()->FreeUnmarkedObjects();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001662}
1663
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001664
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001665// Safe to use during marking phase only.
1666bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
1667 MapWord metamap = object->map_word();
1668 metamap.ClearMark();
1669 return metamap.ToMap()->instance_type() == MAP_TYPE;
1670}
1671
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001672
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001673void MarkCompactCollector::ClearNonLiveTransitions() {
erik.corry@gmail.com3847bd52011-04-27 10:38:56 +00001674 HeapObjectIterator map_iterator(heap()->map_space(), &SizeOfMarkedObject);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001675 // Iterate over the map space, setting map transitions that go from
1676 // a marked map to an unmarked map to null transitions. At the same time,
1677 // set all the prototype fields of maps back to their original value,
1678 // dropping the back pointers temporarily stored in the prototype field.
1679 // Setting the prototype field requires following the linked list of
1680 // back pointers, reversing them all at once. This allows us to find
1681 // those maps with map transitions that need to be nulled, and only
1682 // scan the descriptor arrays of those maps, not all maps.
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00001683 // All of these actions are carried out only on maps of JSObjects
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001684 // and related subtypes.
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00001685 for (HeapObject* obj = map_iterator.next();
1686 obj != NULL; obj = map_iterator.next()) {
1687 Map* map = reinterpret_cast<Map*>(obj);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001688 if (!map->IsMarked() && map->IsByteArray()) continue;
1689
1690 ASSERT(SafeIsMap(map));
1691 // Only JSObject and subtypes have map transitions and back pointers.
ricow@chromium.orgd2be9012011-06-01 06:00:58 +00001692 STATIC_ASSERT(LAST_TYPE == LAST_CALLABLE_SPEC_OBJECT_TYPE);
1693 if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00001694
1695 if (map->IsMarked() && map->attached_to_shared_function_info()) {
1696 // This map is used for inobject slack tracking and has been detached
1697 // from SharedFunctionInfo during the mark phase.
1698 // Since it survived the GC, reattach it now.
1699 map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map);
1700 }
1701
erik.corry@gmail.com3847bd52011-04-27 10:38:56 +00001702 // Clear dead prototype transitions.
erik.corry@gmail.comd6076d92011-06-06 09:39:18 +00001703 int number_of_transitions = map->NumberOfProtoTransitions();
whesse@chromium.org37f44472011-07-13 14:21:21 +00001704 if (number_of_transitions > 0) {
1705 FixedArray* prototype_transitions =
1706 map->unchecked_prototype_transitions();
1707 int new_number_of_transitions = 0;
1708 const int header = Map::kProtoTransitionHeaderSize;
1709 const int proto_offset =
1710 header + Map::kProtoTransitionPrototypeOffset;
1711 const int map_offset = header + Map::kProtoTransitionMapOffset;
1712 const int step = Map::kProtoTransitionElementsPerEntry;
1713 for (int i = 0; i < number_of_transitions; i++) {
1714 Object* prototype = prototype_transitions->get(proto_offset + i * step);
1715 Object* cached_map = prototype_transitions->get(map_offset + i * step);
1716 if (HeapObject::cast(prototype)->IsMarked() &&
1717 HeapObject::cast(cached_map)->IsMarked()) {
1718 if (new_number_of_transitions != i) {
1719 prototype_transitions->set_unchecked(
1720 heap_,
1721 proto_offset + new_number_of_transitions * step,
1722 prototype,
1723 UPDATE_WRITE_BARRIER);
1724 prototype_transitions->set_unchecked(
1725 heap_,
1726 map_offset + new_number_of_transitions * step,
1727 cached_map,
1728 SKIP_WRITE_BARRIER);
1729 }
1730 new_number_of_transitions++;
erik.corry@gmail.com3847bd52011-04-27 10:38:56 +00001731 }
1732 }
1733
1734 // Fill slots that became free with undefined value.
1735 Object* undefined = heap()->raw_unchecked_undefined_value();
erik.corry@gmail.comd6076d92011-06-06 09:39:18 +00001736 for (int i = new_number_of_transitions * step;
1737 i < number_of_transitions * step;
1738 i++) {
erik.corry@gmail.com3847bd52011-04-27 10:38:56 +00001739 prototype_transitions->set_unchecked(heap_,
erik.corry@gmail.comd6076d92011-06-06 09:39:18 +00001740 header + i,
erik.corry@gmail.com3847bd52011-04-27 10:38:56 +00001741 undefined,
1742 SKIP_WRITE_BARRIER);
1743 }
erik.corry@gmail.comd6076d92011-06-06 09:39:18 +00001744 map->SetNumberOfProtoTransitions(new_number_of_transitions);
erik.corry@gmail.com3847bd52011-04-27 10:38:56 +00001745 }
1746
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001747 // Follow the chain of back pointers to find the prototype.
1748 Map* current = map;
1749 while (SafeIsMap(current)) {
1750 current = reinterpret_cast<Map*>(current->prototype());
1751 ASSERT(current->IsHeapObject());
1752 }
1753 Object* real_prototype = current;
1754
1755 // Follow back pointers, setting them to prototype,
1756 // clearing map transitions when necessary.
1757 current = map;
1758 bool on_dead_path = !current->IsMarked();
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +00001759 Object* next;
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001760 while (SafeIsMap(current)) {
1761 next = current->prototype();
1762 // There should never be a dead map above a live map.
1763 ASSERT(on_dead_path || current->IsMarked());
1764
1765 // A live map above a dead map indicates a dead transition.
1766 // This test will always be false on the first iteration.
1767 if (on_dead_path && current->IsMarked()) {
1768 on_dead_path = false;
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001769 current->ClearNonLiveTransitions(heap(), real_prototype);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00001770 }
1771 *HeapObject::RawField(current, Map::kPrototypeOffset) =
1772 real_prototype;
1773 current = reinterpret_cast<Map*>(next);
1774 }
1775 }
1776}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001777
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00001778
1779void MarkCompactCollector::ProcessWeakMaps() {
1780 Object* weak_map_obj = encountered_weak_maps();
1781 while (weak_map_obj != Smi::FromInt(0)) {
1782 ASSERT(HeapObject::cast(weak_map_obj)->IsMarked());
1783 JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
1784 ObjectHashTable* table = weak_map->unchecked_table();
1785 for (int i = 0; i < table->Capacity(); i++) {
1786 if (HeapObject::cast(table->KeyAt(i))->IsMarked()) {
1787 Object* value = table->get(table->EntryToValueIndex(i));
1788 StaticMarkingVisitor::MarkObjectByPointer(heap(), &value);
1789 table->set_unchecked(heap(),
1790 table->EntryToValueIndex(i),
1791 value,
1792 UPDATE_WRITE_BARRIER);
1793 }
1794 }
1795 weak_map_obj = weak_map->next();
1796 }
1797}
1798
1799
1800void MarkCompactCollector::ClearWeakMaps() {
1801 Object* weak_map_obj = encountered_weak_maps();
1802 while (weak_map_obj != Smi::FromInt(0)) {
1803 ASSERT(HeapObject::cast(weak_map_obj)->IsMarked());
1804 JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
1805 ObjectHashTable* table = weak_map->unchecked_table();
1806 for (int i = 0; i < table->Capacity(); i++) {
1807 if (!HeapObject::cast(table->KeyAt(i))->IsMarked()) {
1808 table->RemoveEntry(i, heap());
1809 }
1810 }
1811 weak_map_obj = weak_map->next();
1812 weak_map->set_next(Smi::FromInt(0));
1813 }
1814 set_encountered_weak_maps(Smi::FromInt(0));
1815}
1816
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001817// -------------------------------------------------------------------------
1818// Phase 2: Encode forwarding addresses.
1819// When compacting, forwarding addresses for objects in old space and map
1820// space are encoded in their map pointer word (along with an encoding of
1821// their map pointers).
1822//
sgjesse@chromium.org846fb742009-12-18 08:56:33 +00001823// The excact encoding is described in the comments for class MapWord in
1824// objects.h.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001825//
1826// An address range [start, end) can have both live and non-live objects.
1827// Maximal non-live regions are marked so they can be skipped on subsequent
1828// sweeps of the heap. A distinguished map-pointer encoding is used to mark
1829// free regions of one-word size (in which case the next word is the start
1830// of a live object). A second distinguished map-pointer encoding is used
1831// to mark free regions larger than one word, and the size of the free
1832// region (including the first word) is written to the second word of the
1833// region.
1834//
1835// Any valid map page offset must lie in the object area of the page, so map
1836// page offsets less than Page::kObjectStartOffset are invalid. We use a
1837// pair of distinguished invalid map encodings (for single word and multiple
1838// words) to indicate free regions in the page found during computation of
1839// forwarding addresses and skipped over in subsequent sweeps.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001840
1841
1842// Encode a free region, defined by the given start address and size, in the
1843// first word or two of the region.
1844void EncodeFreeRegion(Address free_start, int free_size) {
1845 ASSERT(free_size >= kIntSize);
1846 if (free_size == kIntSize) {
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00001847 Memory::uint32_at(free_start) = MarkCompactCollector::kSingleFreeEncoding;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001848 } else {
1849 ASSERT(free_size >= 2 * kIntSize);
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00001850 Memory::uint32_at(free_start) = MarkCompactCollector::kMultiFreeEncoding;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001851 Memory::int_at(free_start + kIntSize) = free_size;
1852 }
1853
1854#ifdef DEBUG
1855 // Zap the body of the free region.
1856 if (FLAG_enable_slow_asserts) {
1857 for (int offset = 2 * kIntSize;
1858 offset < free_size;
1859 offset += kPointerSize) {
1860 Memory::Address_at(free_start + offset) = kZapValue;
1861 }
1862 }
1863#endif
1864}
1865
1866
1867// Try to promote all objects in new space. Heap numbers and sequential
ager@chromium.org5aa501c2009-06-23 07:57:28 +00001868// strings are promoted to the code space, large objects to large object space,
1869// and all others to the old space.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001870inline MaybeObject* MCAllocateFromNewSpace(Heap* heap,
1871 HeapObject* object,
lrn@chromium.org303ada72010-10-27 09:33:13 +00001872 int object_size) {
1873 MaybeObject* forwarded;
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001874 if (object_size > heap->MaxObjectSizeInPagedSpace()) {
ager@chromium.org5aa501c2009-06-23 07:57:28 +00001875 forwarded = Failure::Exception();
1876 } else {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001877 OldSpace* target_space = heap->TargetSpace(object);
1878 ASSERT(target_space == heap->old_pointer_space() ||
1879 target_space == heap->old_data_space());
ager@chromium.org5aa501c2009-06-23 07:57:28 +00001880 forwarded = target_space->MCAllocateRaw(object_size);
1881 }
lrn@chromium.org303ada72010-10-27 09:33:13 +00001882 Object* result;
1883 if (!forwarded->ToObject(&result)) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001884 result = heap->new_space()->MCAllocateRaw(object_size)->ToObjectUnchecked();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001885 }
lrn@chromium.org303ada72010-10-27 09:33:13 +00001886 return result;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001887}
1888
1889
1890// Allocation functions for the paged spaces call the space's MCAllocateRaw.
lrn@chromium.org303ada72010-10-27 09:33:13 +00001891MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldPointerSpace(
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001892 Heap *heap,
lrn@chromium.org303ada72010-10-27 09:33:13 +00001893 HeapObject* ignore,
1894 int object_size) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001895 return heap->old_pointer_space()->MCAllocateRaw(object_size);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001896}
1897
1898
lrn@chromium.org303ada72010-10-27 09:33:13 +00001899MUST_USE_RESULT inline MaybeObject* MCAllocateFromOldDataSpace(
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001900 Heap* heap,
lrn@chromium.org303ada72010-10-27 09:33:13 +00001901 HeapObject* ignore,
1902 int object_size) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001903 return heap->old_data_space()->MCAllocateRaw(object_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001904}
1905
1906
lrn@chromium.org303ada72010-10-27 09:33:13 +00001907MUST_USE_RESULT inline MaybeObject* MCAllocateFromCodeSpace(
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001908 Heap* heap,
lrn@chromium.org303ada72010-10-27 09:33:13 +00001909 HeapObject* ignore,
1910 int object_size) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001911 return heap->code_space()->MCAllocateRaw(object_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001912}
1913
1914
lrn@chromium.org303ada72010-10-27 09:33:13 +00001915MUST_USE_RESULT inline MaybeObject* MCAllocateFromMapSpace(
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001916 Heap* heap,
lrn@chromium.org303ada72010-10-27 09:33:13 +00001917 HeapObject* ignore,
1918 int object_size) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001919 return heap->map_space()->MCAllocateRaw(object_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001920}
1921
1922
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001923MUST_USE_RESULT inline MaybeObject* MCAllocateFromCellSpace(
1924 Heap* heap, HeapObject* ignore, int object_size) {
1925 return heap->cell_space()->MCAllocateRaw(object_size);
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001926}
1927
1928
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001929// The forwarding address is encoded at the same offset as the current
1930// to-space object, but in from space.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001931inline void EncodeForwardingAddressInNewSpace(Heap* heap,
1932 HeapObject* old_object,
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001933 int object_size,
1934 Object* new_object,
1935 int* ignored) {
1936 int offset =
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001937 heap->new_space()->ToSpaceOffsetForAddress(old_object->address());
1938 Memory::Address_at(heap->new_space()->FromSpaceLow() + offset) =
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001939 HeapObject::cast(new_object)->address();
1940}
1941
1942
1943// The forwarding address is encoded in the map pointer of the object as an
1944// offset (in terms of live bytes) from the address of the first live object
1945// in the page.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001946inline void EncodeForwardingAddressInPagedSpace(Heap* heap,
1947 HeapObject* old_object,
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001948 int object_size,
1949 Object* new_object,
1950 int* offset) {
1951 // Record the forwarding address of the first live object if necessary.
1952 if (*offset == 0) {
1953 Page::FromAddress(old_object->address())->mc_first_forwarded =
1954 HeapObject::cast(new_object)->address();
1955 }
1956
kasper.lund7276f142008-07-30 08:49:36 +00001957 MapWord encoding =
1958 MapWord::EncodeAddress(old_object->map()->address(), *offset);
1959 old_object->set_map_word(encoding);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001960 *offset += object_size;
1961 ASSERT(*offset <= Page::kObjectAreaSize);
1962}
1963
1964
1965// Most non-live objects are ignored.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001966inline void IgnoreNonLiveObject(HeapObject* object, Isolate* isolate) {}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001967
1968
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001969// Function template that, given a range of addresses (eg, a semispace or a
1970// paged space page), iterates through the objects in the range to clear
1971// mark bits and compute and encode forwarding addresses. As a side effect,
1972// maximal free chunks are marked so that they can be skipped on subsequent
1973// sweeps.
1974//
1975// The template parameters are an allocation function, a forwarding address
1976// encoding function, and a function to process non-live objects.
1977template<MarkCompactCollector::AllocationFunction Alloc,
1978 MarkCompactCollector::EncodingFunction Encode,
1979 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001980inline void EncodeForwardingAddressesInRange(MarkCompactCollector* collector,
1981 Address start,
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001982 Address end,
1983 int* offset) {
1984 // The start address of the current free region while sweeping the space.
1985 // This address is set when a transition from live to non-live objects is
1986 // encountered. A value (an encoding of the 'next free region' pointer)
1987 // is written to memory at this address when a transition from non-live to
1988 // live objects is encountered.
1989 Address free_start = NULL;
1990
1991 // A flag giving the state of the previously swept object. Initially true
1992 // to ensure that free_start is initialized to a proper address before
1993 // trying to write to it.
1994 bool is_prev_alive = true;
1995
1996 int object_size; // Will be set on each iteration of the loop.
1997 for (Address current = start; current < end; current += object_size) {
1998 HeapObject* object = HeapObject::FromAddress(current);
kasper.lund7276f142008-07-30 08:49:36 +00001999 if (object->IsMarked()) {
2000 object->ClearMark();
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002001 collector->tracer()->decrement_marked_count();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002002 object_size = object->Size();
2003
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002004 Object* forwarded =
2005 Alloc(collector->heap(), object, object_size)->ToObjectUnchecked();
2006 Encode(collector->heap(), object, object_size, forwarded, offset);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002007
2008#ifdef DEBUG
2009 if (FLAG_gc_verbose) {
2010 PrintF("forward %p -> %p.\n", object->address(),
2011 HeapObject::cast(forwarded)->address());
2012 }
2013#endif
2014 if (!is_prev_alive) { // Transition from non-live to live.
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002015 EncodeFreeRegion(free_start, static_cast<int>(current - free_start));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002016 is_prev_alive = true;
2017 }
2018 } else { // Non-live object.
2019 object_size = object->Size();
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002020 ProcessNonLive(object, collector->heap()->isolate());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002021 if (is_prev_alive) { // Transition from live to non-live.
2022 free_start = current;
2023 is_prev_alive = false;
2024 }
ager@chromium.org0ee099b2011-01-25 14:06:47 +00002025 LiveObjectList::ProcessNonLive(object);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002026 }
2027 }
2028
2029 // If we ended on a free region, mark it.
ager@chromium.orgc4c92722009-11-18 14:12:51 +00002030 if (!is_prev_alive) {
2031 EncodeFreeRegion(free_start, static_cast<int>(end - free_start));
2032 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002033}
2034
2035
2036// Functions to encode the forwarding pointers in each compactable space.
2037void MarkCompactCollector::EncodeForwardingAddressesInNewSpace() {
2038 int ignored;
2039 EncodeForwardingAddressesInRange<MCAllocateFromNewSpace,
2040 EncodeForwardingAddressInNewSpace,
2041 IgnoreNonLiveObject>(
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002042 this,
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002043 heap()->new_space()->bottom(),
2044 heap()->new_space()->top(),
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002045 &ignored);
2046}
2047
2048
2049template<MarkCompactCollector::AllocationFunction Alloc,
2050 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive>
2051void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
2052 PagedSpace* space) {
2053 PageIterator it(space, PageIterator::PAGES_IN_USE);
2054 while (it.has_next()) {
2055 Page* p = it.next();
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002056
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002057 // The offset of each live object in the page from the first live object
2058 // in the page.
2059 int offset = 0;
2060 EncodeForwardingAddressesInRange<Alloc,
2061 EncodeForwardingAddressInPagedSpace,
2062 ProcessNonLive>(
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002063 this,
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002064 p->ObjectAreaStart(),
2065 p->AllocationTop(),
2066 &offset);
2067 }
2068}
2069
2070
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002071// We scavange new space simultaneously with sweeping. This is done in two
2072// passes.
2073// The first pass migrates all alive objects from one semispace to another or
2074// promotes them to old space. Forwading address is written directly into
2075// first word of object without any encoding. If object is dead we are writing
2076// NULL as a forwarding address.
2077// The second pass updates pointers to new space in all spaces. It is possible
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002078// to encounter pointers to dead objects during traversal of dirty regions we
2079// should clear them to avoid encountering them during next dirty regions
2080// iteration.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002081static void MigrateObject(Heap* heap,
2082 Address dst,
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002083 Address src,
2084 int size,
2085 bool to_old_space) {
2086 if (to_old_space) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002087 heap->CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002088 } else {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002089 heap->CopyBlock(dst, src, size);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002090 }
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002091
2092 Memory::Address_at(src) = dst;
2093}
2094
2095
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00002096class StaticPointersToNewGenUpdatingVisitor : public
2097 StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> {
2098 public:
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002099 static inline void VisitPointer(Heap* heap, Object** p) {
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00002100 if (!(*p)->IsHeapObject()) return;
2101
2102 HeapObject* obj = HeapObject::cast(*p);
2103 Address old_addr = obj->address();
2104
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002105 if (heap->new_space()->Contains(obj)) {
2106 ASSERT(heap->InFromSpace(*p));
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00002107 *p = HeapObject::FromAddress(Memory::Address_at(old_addr));
2108 }
2109 }
2110};
2111
2112
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002113// Visitor for updating pointers from live objects in old spaces to new space.
2114// It does not expect to encounter pointers to dead objects.
2115class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
2116 public:
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002117 explicit PointersToNewGenUpdatingVisitor(Heap* heap) : heap_(heap) { }
2118
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002119 void VisitPointer(Object** p) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002120 StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002121 }
2122
2123 void VisitPointers(Object** start, Object** end) {
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00002124 for (Object** p = start; p < end; p++) {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002125 StaticPointersToNewGenUpdatingVisitor::VisitPointer(heap_, p);
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00002126 }
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002127 }
2128
2129 void VisitCodeTarget(RelocInfo* rinfo) {
2130 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2131 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2132 VisitPointer(&target);
2133 rinfo->set_target_address(Code::cast(target)->instruction_start());
2134 }
2135
2136 void VisitDebugTarget(RelocInfo* rinfo) {
vegorov@chromium.org2356e6f2010-06-09 09:38:56 +00002137 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2138 rinfo->IsPatchedReturnSequence()) ||
2139 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2140 rinfo->IsPatchedDebugBreakSlotSequence()));
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002141 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2142 VisitPointer(&target);
2143 rinfo->set_call_address(Code::cast(target)->instruction_start());
2144 }
jkummerow@chromium.orge297f592011-06-08 10:05:15 +00002145
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002146 private:
2147 Heap* heap_;
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002148};
2149
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002150
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002151// Visitor for updating pointers from live objects in old spaces to new space.
2152// It can encounter pointers to dead objects in new space when traversing map
2153// space (see comment for MigrateObject).
2154static void UpdatePointerToNewGen(HeapObject** p) {
2155 if (!(*p)->IsHeapObject()) return;
2156
2157 Address old_addr = (*p)->address();
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002158 ASSERT(HEAP->InFromSpace(*p));
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002159
2160 Address new_addr = Memory::Address_at(old_addr);
2161
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002162 if (new_addr == NULL) {
2163 // We encountered pointer to a dead object. Clear it so we will
2164 // not visit it again during next iteration of dirty regions.
2165 *p = NULL;
2166 } else {
2167 *p = HeapObject::FromAddress(new_addr);
2168 }
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002169}
2170
2171
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002172static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
2173 Object** p) {
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002174 Address old_addr = HeapObject::cast(*p)->address();
2175 Address new_addr = Memory::Address_at(old_addr);
2176 return String::cast(HeapObject::FromAddress(new_addr));
2177}
2178
2179
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002180static bool TryPromoteObject(Heap* heap, HeapObject* object, int object_size) {
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002181 Object* result;
2182
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002183 if (object_size > heap->MaxObjectSizeInPagedSpace()) {
lrn@chromium.org303ada72010-10-27 09:33:13 +00002184 MaybeObject* maybe_result =
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002185 heap->lo_space()->AllocateRawFixedArray(object_size);
lrn@chromium.org303ada72010-10-27 09:33:13 +00002186 if (maybe_result->ToObject(&result)) {
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002187 HeapObject* target = HeapObject::cast(result);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002188 MigrateObject(heap, target->address(), object->address(), object_size,
2189 true);
2190 heap->mark_compact_collector()->tracer()->
erik.corry@gmail.com9dfbea42010-05-21 12:58:28 +00002191 increment_promoted_objects_size(object_size);
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002192 return true;
2193 }
2194 } else {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002195 OldSpace* target_space = heap->TargetSpace(object);
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002196
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002197 ASSERT(target_space == heap->old_pointer_space() ||
2198 target_space == heap->old_data_space());
lrn@chromium.org303ada72010-10-27 09:33:13 +00002199 MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
2200 if (maybe_result->ToObject(&result)) {
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002201 HeapObject* target = HeapObject::cast(result);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002202 MigrateObject(heap,
2203 target->address(),
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002204 object->address(),
2205 object_size,
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002206 target_space == heap->old_pointer_space());
2207 heap->mark_compact_collector()->tracer()->
erik.corry@gmail.com9dfbea42010-05-21 12:58:28 +00002208 increment_promoted_objects_size(object_size);
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002209 return true;
2210 }
2211 }
2212
2213 return false;
2214}
2215
2216
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002217static void SweepNewSpace(Heap* heap, NewSpace* space) {
2218 heap->CheckNewSpaceExpansionCriteria();
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002219
2220 Address from_bottom = space->bottom();
2221 Address from_top = space->top();
2222
2223 // Flip the semispaces. After flipping, to space is empty, from space has
2224 // live objects.
2225 space->Flip();
2226 space->ResetAllocationInfo();
2227
2228 int size = 0;
2229 int survivors_size = 0;
2230
2231 // First pass: traverse all objects in inactive semispace, remove marks,
2232 // migrate live objects and write forwarding addresses.
2233 for (Address current = from_bottom; current < from_top; current += size) {
2234 HeapObject* object = HeapObject::FromAddress(current);
2235
2236 if (object->IsMarked()) {
2237 object->ClearMark();
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002238 heap->mark_compact_collector()->tracer()->decrement_marked_count();
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002239
2240 size = object->Size();
2241 survivors_size += size;
2242
lrn@chromium.orgc34f5802010-04-28 12:53:43 +00002243 // Aggressively promote young survivors to the old space.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002244 if (TryPromoteObject(heap, object, size)) {
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002245 continue;
2246 }
2247
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002248 // Promotion failed. Just migrate object to another semispace.
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002249 // Allocation cannot fail at this point: semispaces are of equal size.
lrn@chromium.org303ada72010-10-27 09:33:13 +00002250 Object* target = space->AllocateRaw(size)->ToObjectUnchecked();
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002251
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002252 MigrateObject(heap,
2253 HeapObject::cast(target)->address(),
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002254 current,
2255 size,
2256 false);
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002257 } else {
ager@chromium.org0ee099b2011-01-25 14:06:47 +00002258 // Process the dead object before we write a NULL into its header.
2259 LiveObjectList::ProcessNonLive(object);
2260
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002261 size = object->Size();
2262 Memory::Address_at(current) = NULL;
2263 }
2264 }
2265
2266 // Second pass: find pointers to new space and update them.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002267 PointersToNewGenUpdatingVisitor updating_visitor(heap);
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002268
2269 // Update pointers in to space.
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00002270 Address current = space->bottom();
2271 while (current < space->top()) {
2272 HeapObject* object = HeapObject::FromAddress(current);
2273 current +=
2274 StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(),
2275 object);
lrn@chromium.org25156de2010-04-06 13:10:27 +00002276 }
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002277
2278 // Update roots.
kmillikin@chromium.orgc53e10d2011-05-18 09:12:58 +00002279 heap->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
ager@chromium.org0ee099b2011-01-25 14:06:47 +00002280 LiveObjectList::IterateElements(&updating_visitor);
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002281
2282 // Update pointers in old spaces.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002283 heap->IterateDirtyRegions(heap->old_pointer_space(),
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002284 &Heap::IteratePointersInDirtyRegion,
2285 &UpdatePointerToNewGen,
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002286 heap->WATERMARK_SHOULD_BE_VALID);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002287
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002288 heap->lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002289
2290 // Update pointers from cells.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002291 HeapObjectIterator cell_iterator(heap->cell_space());
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002292 for (HeapObject* cell = cell_iterator.next();
2293 cell != NULL;
2294 cell = cell_iterator.next()) {
2295 if (cell->IsJSGlobalPropertyCell()) {
2296 Address value_address =
2297 reinterpret_cast<Address>(cell) +
2298 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
2299 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
2300 }
2301 }
2302
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00002303 // Update pointer from the global contexts list.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002304 updating_visitor.VisitPointer(heap->global_contexts_list_address());
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00002305
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002306 // Update pointers from external string table.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002307 heap->UpdateNewSpaceReferencesInExternalStringTable(
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002308 &UpdateNewSpaceReferenceInExternalStringTableEntry);
2309
2310 // All pointers were updated. Update auxiliary allocation info.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002311 heap->IncrementYoungSurvivorsCounter(survivors_size);
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002312 space->set_age_mark(space->top());
ager@chromium.org9ee27ae2011-03-02 13:43:26 +00002313
2314 // Update JSFunction pointers from the runtime profiler.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002315 heap->isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002316}
2317
2318
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002319static void SweepSpace(Heap* heap, PagedSpace* space) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002320 PageIterator it(space, PageIterator::PAGES_IN_USE);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002321
2322 // During sweeping of paged space we are trying to find longest sequences
2323 // of pages without live objects and free them (instead of putting them on
2324 // the free list).
2325
2326 // Page preceding current.
2327 Page* prev = Page::FromAddress(NULL);
2328
2329 // First empty page in a sequence.
2330 Page* first_empty_page = Page::FromAddress(NULL);
2331
2332 // Page preceding first empty page.
2333 Page* prec_first_empty_page = Page::FromAddress(NULL);
2334
2335 // If last used page of space ends with a sequence of dead objects
2336 // we can adjust allocation top instead of puting this free area into
2337 // the free list. Thus during sweeping we keep track of such areas
2338 // and defer their deallocation until the sweeping of the next page
2339 // is done: if one of the next pages contains live objects we have
2340 // to put such area into the free list.
2341 Address last_free_start = NULL;
2342 int last_free_size = 0;
2343
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002344 while (it.has_next()) {
2345 Page* p = it.next();
2346
2347 bool is_previous_alive = true;
2348 Address free_start = NULL;
2349 HeapObject* object;
2350
2351 for (Address current = p->ObjectAreaStart();
2352 current < p->AllocationTop();
2353 current += object->Size()) {
2354 object = HeapObject::FromAddress(current);
kasper.lund7276f142008-07-30 08:49:36 +00002355 if (object->IsMarked()) {
2356 object->ClearMark();
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002357 heap->mark_compact_collector()->tracer()->decrement_marked_count();
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002358
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002359 if (!is_previous_alive) { // Transition from free to live.
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002360 space->DeallocateBlock(free_start,
2361 static_cast<int>(current - free_start),
2362 true);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002363 is_previous_alive = true;
2364 }
2365 } else {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002366 heap->mark_compact_collector()->ReportDeleteIfNeeded(
2367 object, heap->isolate());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002368 if (is_previous_alive) { // Transition from live to free.
2369 free_start = current;
2370 is_previous_alive = false;
2371 }
ager@chromium.org0ee099b2011-01-25 14:06:47 +00002372 LiveObjectList::ProcessNonLive(object);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002373 }
2374 // The object is now unmarked for the call to Size() at the top of the
2375 // loop.
2376 }
2377
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002378 bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop())
2379 || (!is_previous_alive && free_start == p->ObjectAreaStart());
2380
2381 if (page_is_empty) {
2382 // This page is empty. Check whether we are in the middle of
2383 // sequence of empty pages and start one if not.
2384 if (!first_empty_page->is_valid()) {
2385 first_empty_page = p;
2386 prec_first_empty_page = prev;
2387 }
2388
2389 if (!is_previous_alive) {
2390 // There are dead objects on this page. Update space accounting stats
2391 // without putting anything into free list.
2392 int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
2393 if (size_in_bytes > 0) {
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002394 space->DeallocateBlock(free_start, size_in_bytes, false);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002395 }
2396 }
2397 } else {
2398 // This page is not empty. Sequence of empty pages ended on the previous
2399 // one.
2400 if (first_empty_page->is_valid()) {
2401 space->FreePages(prec_first_empty_page, prev);
2402 prec_first_empty_page = first_empty_page = Page::FromAddress(NULL);
2403 }
2404
2405 // If there is a free ending area on one of the previous pages we have
2406 // deallocate that area and put it on the free list.
2407 if (last_free_size > 0) {
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002408 Page::FromAddress(last_free_start)->
2409 SetAllocationWatermark(last_free_start);
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002410 space->DeallocateBlock(last_free_start, last_free_size, true);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002411 last_free_start = NULL;
2412 last_free_size = 0;
2413 }
2414
2415 // If the last region of this page was not live we remember it.
2416 if (!is_previous_alive) {
2417 ASSERT(last_free_size == 0);
2418 last_free_size = static_cast<int>(p->AllocationTop() - free_start);
2419 last_free_start = free_start;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002420 }
2421 }
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002422
2423 prev = p;
2424 }
2425
2426 // We reached end of space. See if we need to adjust allocation top.
2427 Address new_allocation_top = NULL;
2428
2429 if (first_empty_page->is_valid()) {
2430 // Last used pages in space are empty. We can move allocation top backwards
2431 // to the beginning of first empty page.
2432 ASSERT(prev == space->AllocationTopPage());
2433
2434 new_allocation_top = first_empty_page->ObjectAreaStart();
2435 }
2436
2437 if (last_free_size > 0) {
2438 // There was a free ending area on the previous page.
2439 // Deallocate it without putting it into freelist and move allocation
2440 // top to the beginning of this free area.
ricow@chromium.orgd236f4d2010-09-01 06:52:08 +00002441 space->DeallocateBlock(last_free_start, last_free_size, false);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00002442 new_allocation_top = last_free_start;
2443 }
2444
2445 if (new_allocation_top != NULL) {
2446#ifdef DEBUG
2447 Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top);
2448 if (!first_empty_page->is_valid()) {
2449 ASSERT(new_allocation_top_page == space->AllocationTopPage());
2450 } else if (last_free_size > 0) {
2451 ASSERT(new_allocation_top_page == prec_first_empty_page);
2452 } else {
2453 ASSERT(new_allocation_top_page == first_empty_page);
2454 }
2455#endif
2456
2457 space->SetTop(new_allocation_top);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002458 }
2459}
2460
2461
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002462void MarkCompactCollector::EncodeForwardingAddresses() {
2463 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
kasper.lund7276f142008-07-30 08:49:36 +00002464 // Objects in the active semispace of the young generation may be
2465 // relocated to the inactive semispace (if not promoted). Set the
2466 // relocation info to the beginning of the inactive semispace.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002467 heap()->new_space()->MCResetRelocationInfo();
kasper.lund7276f142008-07-30 08:49:36 +00002468
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002469 // Compute the forwarding pointers in each space.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002470 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldPointerSpace,
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002471 ReportDeleteIfNeeded>(
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002472 heap()->old_pointer_space());
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002473
2474 EncodeForwardingAddressesInPagedSpace<MCAllocateFromOldDataSpace,
2475 IgnoreNonLiveObject>(
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002476 heap()->old_data_space());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002477
2478 EncodeForwardingAddressesInPagedSpace<MCAllocateFromCodeSpace,
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002479 ReportDeleteIfNeeded>(
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002480 heap()->code_space());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002481
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002482 EncodeForwardingAddressesInPagedSpace<MCAllocateFromCellSpace,
2483 IgnoreNonLiveObject>(
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002484 heap()->cell_space());
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002485
2486
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002487 // Compute new space next to last after the old and code spaces have been
2488 // compacted. Objects in new space can be promoted to old or code space.
2489 EncodeForwardingAddressesInNewSpace();
2490
2491 // Compute map space last because computing forwarding addresses
2492 // overwrites non-live objects. Objects in the other spaces rely on
2493 // non-live map pointers to get the sizes of non-live objects.
2494 EncodeForwardingAddressesInPagedSpace<MCAllocateFromMapSpace,
2495 IgnoreNonLiveObject>(
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002496 heap()->map_space());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002497
2498 // Write relocation info to the top page, so we can use it later. This is
2499 // done after promoting objects from the new space so we get the correct
2500 // allocation top.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002501 heap()->old_pointer_space()->MCWriteRelocationInfoToPage();
2502 heap()->old_data_space()->MCWriteRelocationInfoToPage();
2503 heap()->code_space()->MCWriteRelocationInfoToPage();
2504 heap()->map_space()->MCWriteRelocationInfoToPage();
2505 heap()->cell_space()->MCWriteRelocationInfoToPage();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002506}
2507
2508
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002509class MapIterator : public HeapObjectIterator {
2510 public:
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002511 explicit MapIterator(Heap* heap)
2512 : HeapObjectIterator(heap->map_space(), &SizeCallback) { }
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002513
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002514 MapIterator(Heap* heap, Address start)
2515 : HeapObjectIterator(heap->map_space(), start, &SizeCallback) { }
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002516
2517 private:
2518 static int SizeCallback(HeapObject* unused) {
2519 USE(unused);
2520 return Map::kSize;
2521 }
2522};
2523
2524
2525class MapCompact {
2526 public:
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002527 explicit MapCompact(Heap* heap, int live_maps)
2528 : heap_(heap),
2529 live_maps_(live_maps),
2530 to_evacuate_start_(heap->map_space()->TopAfterCompaction(live_maps)),
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002531 vacant_map_it_(heap),
2532 map_to_evacuate_it_(heap, to_evacuate_start_),
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002533 first_map_to_evacuate_(
2534 reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
2535 }
2536
2537 void CompactMaps() {
2538 // As we know the number of maps to evacuate beforehand,
2539 // we stop then there is no more vacant maps.
2540 for (Map* next_vacant_map = NextVacantMap();
2541 next_vacant_map;
2542 next_vacant_map = NextVacantMap()) {
2543 EvacuateMap(next_vacant_map, NextMapToEvacuate());
2544 }
2545
2546#ifdef DEBUG
2547 CheckNoMapsToEvacuate();
2548#endif
2549 }
2550
2551 void UpdateMapPointersInRoots() {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002552 MapUpdatingVisitor map_updating_visitor;
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002553 heap()->IterateRoots(&map_updating_visitor, VISIT_ONLY_STRONG);
2554 heap()->isolate()->global_handles()->IterateWeakRoots(
2555 &map_updating_visitor);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002556 LiveObjectList::IterateElements(&map_updating_visitor);
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002557 }
2558
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002559 void UpdateMapPointersInPagedSpace(PagedSpace* space) {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002560 ASSERT(space != heap()->map_space());
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002561
2562 PageIterator it(space, PageIterator::PAGES_IN_USE);
2563 while (it.has_next()) {
2564 Page* p = it.next();
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002565 UpdateMapPointersInRange(heap(),
2566 p->ObjectAreaStart(),
2567 p->AllocationTop());
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002568 }
2569 }
2570
2571 void UpdateMapPointersInNewSpace() {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002572 NewSpace* space = heap()->new_space();
2573 UpdateMapPointersInRange(heap(), space->bottom(), space->top());
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002574 }
2575
2576 void UpdateMapPointersInLargeObjectSpace() {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002577 LargeObjectIterator it(heap()->lo_space());
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002578 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002579 UpdateMapPointersInObject(heap(), obj);
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002580 }
2581
2582 void Finish() {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002583 heap()->map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002584 }
2585
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002586 inline Heap* heap() const { return heap_; }
2587
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002588 private:
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002589 Heap* heap_;
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002590 int live_maps_;
2591 Address to_evacuate_start_;
2592 MapIterator vacant_map_it_;
2593 MapIterator map_to_evacuate_it_;
2594 Map* first_map_to_evacuate_;
2595
2596 // Helper class for updating map pointers in HeapObjects.
2597 class MapUpdatingVisitor: public ObjectVisitor {
2598 public:
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002599 MapUpdatingVisitor() {}
2600
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002601 void VisitPointer(Object** p) {
2602 UpdateMapPointer(p);
2603 }
2604
2605 void VisitPointers(Object** start, Object** end) {
2606 for (Object** p = start; p < end; p++) UpdateMapPointer(p);
2607 }
2608
2609 private:
2610 void UpdateMapPointer(Object** p) {
2611 if (!(*p)->IsHeapObject()) return;
2612 HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
2613
2614 // Moved maps are tagged with overflowed map word. They are the only
2615 // objects those map word is overflowed as marking is already complete.
2616 MapWord map_word = old_map->map_word();
2617 if (!map_word.IsOverflowed()) return;
2618
2619 *p = GetForwardedMap(map_word);
2620 }
2621 };
2622
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002623 static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
2624 while (true) {
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002625 HeapObject* next = it->next();
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002626 ASSERT(next != NULL);
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002627 if (next == last)
2628 return NULL;
2629 ASSERT(!next->IsOverflowed());
2630 ASSERT(!next->IsMarked());
2631 ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
2632 if (next->IsMap() == live)
2633 return reinterpret_cast<Map*>(next);
2634 }
2635 }
2636
2637 Map* NextVacantMap() {
2638 Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
2639 ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
2640 return map;
2641 }
2642
2643 Map* NextMapToEvacuate() {
2644 Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
2645 ASSERT(map != NULL);
2646 ASSERT(map->IsMap());
2647 return map;
2648 }
2649
2650 static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
2651 ASSERT(FreeListNode::IsFreeListNode(vacant_map));
2652 ASSERT(map_to_evacuate->IsMap());
2653
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002654 ASSERT(Map::kSize % 4 == 0);
2655
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002656 map_to_evacuate->heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(
2657 vacant_map->address(), map_to_evacuate->address(), Map::kSize);
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002658
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002659 ASSERT(vacant_map->IsMap()); // Due to memcpy above.
2660
2661 MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
2662 forwarding_map_word.SetOverflow();
2663 map_to_evacuate->set_map_word(forwarding_map_word);
2664
2665 ASSERT(map_to_evacuate->map_word().IsOverflowed());
2666 ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
2667 }
2668
2669 static Map* GetForwardedMap(MapWord map_word) {
2670 ASSERT(map_word.IsOverflowed());
2671 map_word.ClearOverflow();
2672 Map* new_map = map_word.ToMap();
2673 ASSERT_MAP_ALIGNED(new_map->address());
2674 return new_map;
2675 }
2676
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002677 static int UpdateMapPointersInObject(Heap* heap, HeapObject* obj) {
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002678 ASSERT(!obj->IsMarked());
2679 Map* map = obj->map();
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002680 ASSERT(heap->map_space()->Contains(map));
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002681 MapWord map_word = map->map_word();
2682 ASSERT(!map_word.IsMarked());
2683 if (map_word.IsOverflowed()) {
2684 Map* new_map = GetForwardedMap(map_word);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002685 ASSERT(heap->map_space()->Contains(new_map));
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002686 obj->set_map(new_map);
2687
2688#ifdef DEBUG
2689 if (FLAG_gc_verbose) {
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00002690 PrintF("update %p : %p -> %p\n",
2691 obj->address(),
2692 reinterpret_cast<void*>(map),
2693 reinterpret_cast<void*>(new_map));
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002694 }
2695#endif
2696 }
2697
2698 int size = obj->SizeFromMap(map);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002699 MapUpdatingVisitor map_updating_visitor;
2700 obj->IterateBody(map->instance_type(), size, &map_updating_visitor);
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002701 return size;
2702 }
2703
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002704 static void UpdateMapPointersInRange(Heap* heap, Address start, Address end) {
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002705 HeapObject* object;
2706 int size;
2707 for (Address current = start; current < end; current += size) {
2708 object = HeapObject::FromAddress(current);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002709 size = UpdateMapPointersInObject(heap, object);
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002710 ASSERT(size > 0);
2711 }
2712 }
2713
2714#ifdef DEBUG
2715 void CheckNoMapsToEvacuate() {
2716 if (!FLAG_enable_slow_asserts)
2717 return;
2718
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002719 for (HeapObject* obj = map_to_evacuate_it_.next();
2720 obj != NULL; obj = map_to_evacuate_it_.next())
2721 ASSERT(FreeListNode::IsFreeListNode(obj));
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002722 }
2723#endif
2724};
2725
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002726
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002727void MarkCompactCollector::SweepSpaces() {
erik.corry@gmail.com9dfbea42010-05-21 12:58:28 +00002728 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
2729
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002730 ASSERT(state_ == SWEEP_SPACES);
2731 ASSERT(!IsCompacting());
2732 // Noncompacting collections simply sweep the spaces to clear the mark
2733 // bits and free the nonlive blocks (for old and map spaces). We sweep
2734 // the map space last because freeing non-live maps overwrites them and
2735 // the other spaces rely on possibly non-live maps to get the sizes for
2736 // non-live objects.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002737 SweepSpace(heap(), heap()->old_pointer_space());
2738 SweepSpace(heap(), heap()->old_data_space());
2739 SweepSpace(heap(), heap()->code_space());
2740 SweepSpace(heap(), heap()->cell_space());
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00002741 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002742 SweepNewSpace(heap(), heap()->new_space());
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00002743 }
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002744 SweepSpace(heap(), heap()->map_space());
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002745
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002746 heap()->IterateDirtyRegions(heap()->map_space(),
2747 &heap()->IteratePointersInDirtyMapsRegion,
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002748 &UpdatePointerToNewGen,
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002749 heap()->WATERMARK_SHOULD_BE_VALID);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002750
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002751 intptr_t live_maps_size = heap()->map_space()->Size();
kmillikin@chromium.orgf05f2912010-09-30 10:07:24 +00002752 int live_maps = static_cast<int>(live_maps_size / Map::kSize);
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002753 ASSERT(live_map_objects_size_ == live_maps_size);
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002754
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002755 if (heap()->map_space()->NeedsCompaction(live_maps)) {
2756 MapCompact map_compact(heap(), live_maps);
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002757
2758 map_compact.CompactMaps();
2759 map_compact.UpdateMapPointersInRoots();
2760
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002761 PagedSpaces spaces;
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002762 for (PagedSpace* space = spaces.next();
2763 space != NULL; space = spaces.next()) {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002764 if (space == heap()->map_space()) continue;
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00002765 map_compact.UpdateMapPointersInPagedSpace(space);
2766 }
2767 map_compact.UpdateMapPointersInNewSpace();
2768 map_compact.UpdateMapPointersInLargeObjectSpace();
2769
2770 map_compact.Finish();
2771 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002772}
2773
2774
2775// Iterate the live objects in a range of addresses (eg, a page or a
2776// semispace). The live regions of the range have been linked into a list.
2777// The first live region is [first_live_start, first_live_end), and the last
2778// address in the range is top. The callback function is used to get the
2779// size of each live object.
2780int MarkCompactCollector::IterateLiveObjectsInRange(
2781 Address start,
2782 Address end,
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002783 LiveObjectCallback size_func) {
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002784 int live_objects_size = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002785 Address current = start;
2786 while (current < end) {
2787 uint32_t encoded_map = Memory::uint32_at(current);
2788 if (encoded_map == kSingleFreeEncoding) {
2789 current += kPointerSize;
2790 } else if (encoded_map == kMultiFreeEncoding) {
2791 current += Memory::int_at(current + kIntSize);
2792 } else {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002793 int size = (this->*size_func)(HeapObject::FromAddress(current));
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002794 current += size;
2795 live_objects_size += size;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002796 }
2797 }
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002798 return live_objects_size;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002799}
2800
2801
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002802int MarkCompactCollector::IterateLiveObjects(
2803 NewSpace* space, LiveObjectCallback size_f) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002804 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2805 return IterateLiveObjectsInRange(space->bottom(), space->top(), size_f);
2806}
2807
2808
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002809int MarkCompactCollector::IterateLiveObjects(
2810 PagedSpace* space, LiveObjectCallback size_f) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002811 ASSERT(MARK_LIVE_OBJECTS < state_ && state_ <= RELOCATE_OBJECTS);
2812 int total = 0;
2813 PageIterator it(space, PageIterator::PAGES_IN_USE);
2814 while (it.has_next()) {
2815 Page* p = it.next();
2816 total += IterateLiveObjectsInRange(p->ObjectAreaStart(),
2817 p->AllocationTop(),
2818 size_f);
2819 }
2820 return total;
2821}
2822
2823
ager@chromium.orgddb913d2009-01-27 10:01:48 +00002824// -------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002825// Phase 3: Update pointers
2826
2827// Helper class for updating pointers in HeapObjects.
2828class UpdatingVisitor: public ObjectVisitor {
2829 public:
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002830 explicit UpdatingVisitor(Heap* heap) : heap_(heap) {}
2831
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002832 void VisitPointer(Object** p) {
mads.s.agercbaa0602008-08-14 13:41:48 +00002833 UpdatePointer(p);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002834 }
2835
2836 void VisitPointers(Object** start, Object** end) {
2837 // Mark all HeapObject pointers in [start, end)
mads.s.agercbaa0602008-08-14 13:41:48 +00002838 for (Object** p = start; p < end; p++) UpdatePointer(p);
2839 }
2840
christian.plesner.hansen@gmail.com2bc58ef2009-09-22 10:00:30 +00002841 void VisitCodeTarget(RelocInfo* rinfo) {
2842 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2843 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
2844 VisitPointer(&target);
2845 rinfo->set_target_address(
2846 reinterpret_cast<Code*>(target)->instruction_start());
2847 }
2848
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +00002849 void VisitDebugTarget(RelocInfo* rinfo) {
vegorov@chromium.org2356e6f2010-06-09 09:38:56 +00002850 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2851 rinfo->IsPatchedReturnSequence()) ||
2852 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2853 rinfo->IsPatchedDebugBreakSlotSequence()));
sgjesse@chromium.orgc5145742009-10-07 09:00:33 +00002854 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2855 VisitPointer(&target);
2856 rinfo->set_call_address(
2857 reinterpret_cast<Code*>(target)->instruction_start());
2858 }
2859
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002860 inline Heap* heap() const { return heap_; }
2861
mads.s.agercbaa0602008-08-14 13:41:48 +00002862 private:
2863 void UpdatePointer(Object** p) {
2864 if (!(*p)->IsHeapObject()) return;
2865
2866 HeapObject* obj = HeapObject::cast(*p);
2867 Address old_addr = obj->address();
2868 Address new_addr;
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002869 ASSERT(!heap()->InFromSpace(obj));
mads.s.agercbaa0602008-08-14 13:41:48 +00002870
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002871 if (heap()->new_space()->Contains(obj)) {
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002872 Address forwarding_pointer_addr =
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002873 heap()->new_space()->FromSpaceLow() +
2874 heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002875 new_addr = Memory::Address_at(forwarding_pointer_addr);
mads.s.agercbaa0602008-08-14 13:41:48 +00002876
2877#ifdef DEBUG
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002878 ASSERT(heap()->old_pointer_space()->Contains(new_addr) ||
2879 heap()->old_data_space()->Contains(new_addr) ||
2880 heap()->new_space()->FromSpaceContains(new_addr) ||
2881 heap()->lo_space()->Contains(HeapObject::FromAddress(new_addr)));
mads.s.agercbaa0602008-08-14 13:41:48 +00002882
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002883 if (heap()->new_space()->FromSpaceContains(new_addr)) {
2884 ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
2885 heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
mads.s.agercbaa0602008-08-14 13:41:48 +00002886 }
2887#endif
2888
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002889 } else if (heap()->lo_space()->Contains(obj)) {
mads.s.agercbaa0602008-08-14 13:41:48 +00002890 // Don't move objects in the large object space.
2891 return;
2892
2893 } else {
mads.s.agercbaa0602008-08-14 13:41:48 +00002894#ifdef DEBUG
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002895 PagedSpaces spaces;
2896 PagedSpace* original_space = spaces.next();
2897 while (original_space != NULL) {
2898 if (original_space->Contains(obj)) break;
2899 original_space = spaces.next();
mads.s.agercbaa0602008-08-14 13:41:48 +00002900 }
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002901 ASSERT(original_space != NULL);
mads.s.agercbaa0602008-08-14 13:41:48 +00002902#endif
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002903 new_addr = MarkCompactCollector::GetForwardingAddressInOldSpace(obj);
2904 ASSERT(original_space->Contains(new_addr));
2905 ASSERT(original_space->MCSpaceOffsetForAddress(new_addr) <=
2906 original_space->MCSpaceOffsetForAddress(old_addr));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002907 }
mads.s.agercbaa0602008-08-14 13:41:48 +00002908
2909 *p = HeapObject::FromAddress(new_addr);
2910
2911#ifdef DEBUG
2912 if (FLAG_gc_verbose) {
2913 PrintF("update %p : %p -> %p\n",
2914 reinterpret_cast<Address>(p), old_addr, new_addr);
2915 }
2916#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002917 }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002918
2919 Heap* heap_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002920};
2921
mads.s.agercbaa0602008-08-14 13:41:48 +00002922
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002923void MarkCompactCollector::UpdatePointers() {
2924#ifdef DEBUG
2925 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
2926 state_ = UPDATE_POINTERS;
2927#endif
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002928 UpdatingVisitor updating_visitor(heap());
2929 heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002930 &updating_visitor);
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002931 heap()->IterateRoots(&updating_visitor, VISIT_ONLY_STRONG);
2932 heap()->isolate()->global_handles()->IterateWeakRoots(&updating_visitor);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002933
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00002934 // Update the pointer to the head of the weak list of global contexts.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002935 updating_visitor.VisitPointer(&heap()->global_contexts_list_);
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00002936
ager@chromium.org0ee099b2011-01-25 14:06:47 +00002937 LiveObjectList::IterateElements(&updating_visitor);
2938
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002939 int live_maps_size = IterateLiveObjects(
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002940 heap()->map_space(), &MarkCompactCollector::UpdatePointersInOldObject);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002941 int live_pointer_olds_size = IterateLiveObjects(
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002942 heap()->old_pointer_space(),
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002943 &MarkCompactCollector::UpdatePointersInOldObject);
2944 int live_data_olds_size = IterateLiveObjects(
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002945 heap()->old_data_space(),
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002946 &MarkCompactCollector::UpdatePointersInOldObject);
2947 int live_codes_size = IterateLiveObjects(
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002948 heap()->code_space(), &MarkCompactCollector::UpdatePointersInOldObject);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002949 int live_cells_size = IterateLiveObjects(
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002950 heap()->cell_space(), &MarkCompactCollector::UpdatePointersInOldObject);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002951 int live_news_size = IterateLiveObjects(
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002952 heap()->new_space(), &MarkCompactCollector::UpdatePointersInNewObject);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002953
2954 // Large objects do not move, the map word can be updated directly.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002955 LargeObjectIterator it(heap()->lo_space());
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002956 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) {
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002957 UpdatePointersInNewObject(obj);
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002958 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002959
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002960 USE(live_maps_size);
2961 USE(live_pointer_olds_size);
2962 USE(live_data_olds_size);
2963 USE(live_codes_size);
2964 USE(live_cells_size);
2965 USE(live_news_size);
2966 ASSERT(live_maps_size == live_map_objects_size_);
2967 ASSERT(live_data_olds_size == live_old_data_objects_size_);
2968 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
2969 ASSERT(live_codes_size == live_code_objects_size_);
2970 ASSERT(live_cells_size == live_cell_objects_size_);
2971 ASSERT(live_news_size == live_young_objects_size_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002972}
2973
2974
2975int MarkCompactCollector::UpdatePointersInNewObject(HeapObject* obj) {
2976 // Keep old map pointers
2977 Map* old_map = obj->map();
2978 ASSERT(old_map->IsHeapObject());
2979
2980 Address forwarded = GetForwardingAddressInOldSpace(old_map);
2981
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002982 ASSERT(heap()->map_space()->Contains(old_map));
2983 ASSERT(heap()->map_space()->Contains(forwarded));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002984#ifdef DEBUG
2985 if (FLAG_gc_verbose) {
2986 PrintF("update %p : %p -> %p\n", obj->address(), old_map->address(),
2987 forwarded);
2988 }
2989#endif
2990 // Update the map pointer.
2991 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(forwarded)));
2992
2993 // We have to compute the object size relying on the old map because
2994 // map objects are not relocated yet.
2995 int obj_size = obj->SizeFromMap(old_map);
2996
2997 // Update pointers in the object body.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002998 UpdatingVisitor updating_visitor(heap());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002999 obj->IterateBody(old_map->instance_type(), obj_size, &updating_visitor);
3000 return obj_size;
3001}
3002
3003
3004int MarkCompactCollector::UpdatePointersInOldObject(HeapObject* obj) {
3005 // Decode the map pointer.
kasper.lund7276f142008-07-30 08:49:36 +00003006 MapWord encoding = obj->map_word();
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003007 Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
3008 ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003009
3010 // At this point, the first word of map_addr is also encoded, cannot
3011 // cast it to Map* using Map::cast.
3012 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr));
3013 int obj_size = obj->SizeFromMap(map);
3014 InstanceType type = map->instance_type();
3015
3016 // Update map pointer.
3017 Address new_map_addr = GetForwardingAddressInOldSpace(map);
kasper.lund7276f142008-07-30 08:49:36 +00003018 int offset = encoding.DecodeOffset();
3019 obj->set_map_word(MapWord::EncodeAddress(new_map_addr, offset));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003020
3021#ifdef DEBUG
3022 if (FLAG_gc_verbose) {
3023 PrintF("update %p : %p -> %p\n", obj->address(),
3024 map_addr, new_map_addr);
3025 }
3026#endif
3027
3028 // Update pointers in the object body.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003029 UpdatingVisitor updating_visitor(heap());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003030 obj->IterateBody(type, obj_size, &updating_visitor);
3031 return obj_size;
3032}
3033
3034
3035Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
3036 // Object should either in old or map space.
kasper.lund7276f142008-07-30 08:49:36 +00003037 MapWord encoding = obj->map_word();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003038
3039 // Offset to the first live object's forwarding address.
kasper.lund7276f142008-07-30 08:49:36 +00003040 int offset = encoding.DecodeOffset();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003041 Address obj_addr = obj->address();
3042
3043 // Find the first live object's forwarding address.
3044 Page* p = Page::FromAddress(obj_addr);
3045 Address first_forwarded = p->mc_first_forwarded;
3046
3047 // Page start address of forwarded address.
3048 Page* forwarded_page = Page::FromAddress(first_forwarded);
3049 int forwarded_offset = forwarded_page->Offset(first_forwarded);
3050
ricow@chromium.org30ce4112010-05-31 10:38:25 +00003051 // Find end of allocation in the page of first_forwarded.
3052 int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003053
3054 // Check if current object's forward pointer is in the same page
3055 // as the first live object's forwarding pointer
3056 if (forwarded_offset + offset < mc_top_offset) {
3057 // In the same page.
3058 return first_forwarded + offset;
3059 }
3060
3061 // Must be in the next page, NOTE: this may cross chunks.
3062 Page* next_page = forwarded_page->next_page();
3063 ASSERT(next_page->is_valid());
3064
3065 offset -= (mc_top_offset - forwarded_offset);
3066 offset += Page::kObjectStartOffset;
3067
3068 ASSERT_PAGE_OFFSET(offset);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00003069 ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003070
3071 return next_page->OffsetToAddress(offset);
3072}
3073
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003074
ager@chromium.orgddb913d2009-01-27 10:01:48 +00003075// -------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003076// Phase 4: Relocate objects
3077
3078void MarkCompactCollector::RelocateObjects() {
3079#ifdef DEBUG
3080 ASSERT(state_ == UPDATE_POINTERS);
3081 state_ = RELOCATE_OBJECTS;
3082#endif
3083 // Relocates objects, always relocate map objects first. Relocating
3084 // objects in other space relies on map objects to get object size.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00003085 int live_maps_size = IterateLiveObjects(
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003086 heap()->map_space(), &MarkCompactCollector::RelocateMapObject);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00003087 int live_pointer_olds_size = IterateLiveObjects(
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003088 heap()->old_pointer_space(),
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00003089 &MarkCompactCollector::RelocateOldPointerObject);
3090 int live_data_olds_size = IterateLiveObjects(
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003091 heap()->old_data_space(), &MarkCompactCollector::RelocateOldDataObject);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00003092 int live_codes_size = IterateLiveObjects(
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003093 heap()->code_space(), &MarkCompactCollector::RelocateCodeObject);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00003094 int live_cells_size = IterateLiveObjects(
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003095 heap()->cell_space(), &MarkCompactCollector::RelocateCellObject);
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00003096 int live_news_size = IterateLiveObjects(
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003097 heap()->new_space(), &MarkCompactCollector::RelocateNewObject);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003098
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00003099 USE(live_maps_size);
3100 USE(live_pointer_olds_size);
3101 USE(live_data_olds_size);
3102 USE(live_codes_size);
3103 USE(live_cells_size);
3104 USE(live_news_size);
3105 ASSERT(live_maps_size == live_map_objects_size_);
3106 ASSERT(live_data_olds_size == live_old_data_objects_size_);
3107 ASSERT(live_pointer_olds_size == live_old_pointer_objects_size_);
3108 ASSERT(live_codes_size == live_code_objects_size_);
3109 ASSERT(live_cells_size == live_cell_objects_size_);
3110 ASSERT(live_news_size == live_young_objects_size_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003111
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00003112 // Flip from and to spaces
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003113 heap()->new_space()->Flip();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003114
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003115 heap()->new_space()->MCCommitRelocationInfo();
ricow@chromium.org30ce4112010-05-31 10:38:25 +00003116
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00003117 // Set age_mark to bottom in to space
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003118 Address mark = heap()->new_space()->bottom();
3119 heap()->new_space()->set_age_mark(mark);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003120
ager@chromium.org9258b6b2008-09-11 09:11:10 +00003121 PagedSpaces spaces;
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003122 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
3123 space->MCCommitRelocationInfo();
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00003124
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003125 heap()->CheckNewSpaceExpansionCriteria();
3126 heap()->IncrementYoungSurvivorsCounter(live_news_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003127}
3128
3129
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003130int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00003131 // Recover map pointer.
kasper.lund7276f142008-07-30 08:49:36 +00003132 MapWord encoding = obj->map_word();
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003133 Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
3134 ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003135
3136 // Get forwarding address before resetting map pointer
3137 Address new_addr = GetForwardingAddressInOldSpace(obj);
3138
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00003139 // Reset map pointer. The meta map object may not be copied yet so
3140 // Map::cast does not yet work.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003141 obj->set_map(reinterpret_cast<Map*>(HeapObject::FromAddress(map_addr)));
3142
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003143 Address old_addr = obj->address();
3144
3145 if (new_addr != old_addr) {
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00003146 // Move contents.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003147 heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00003148 old_addr,
3149 Map::kSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003150 }
3151
3152#ifdef DEBUG
3153 if (FLAG_gc_verbose) {
3154 PrintF("relocate %p -> %p\n", old_addr, new_addr);
3155 }
3156#endif
3157
3158 return Map::kSize;
3159}
3160
3161
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00003162static inline int RestoreMap(HeapObject* obj,
3163 PagedSpace* space,
3164 Address new_addr,
3165 Address map_addr) {
3166 // This must be a non-map object, and the function relies on the
3167 // assumption that the Map space is compacted before the other paged
3168 // spaces (see RelocateObjects).
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003169
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00003170 // Reset map pointer.
3171 obj->set_map(Map::cast(HeapObject::FromAddress(map_addr)));
3172
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003173 int obj_size = obj->Size();
3174 ASSERT_OBJECT_SIZE(obj_size);
3175
ager@chromium.org9258b6b2008-09-11 09:11:10 +00003176 ASSERT(space->MCSpaceOffsetForAddress(new_addr) <=
3177 space->MCSpaceOffsetForAddress(obj->address()));
3178
ager@chromium.org9258b6b2008-09-11 09:11:10 +00003179#ifdef DEBUG
3180 if (FLAG_gc_verbose) {
3181 PrintF("relocate %p -> %p\n", obj->address(), new_addr);
3182 }
3183#endif
3184
3185 return obj_size;
3186}
3187
3188
3189int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00003190 PagedSpace* space) {
3191 // Recover map pointer.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00003192 MapWord encoding = obj->map_word();
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003193 Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
3194 ASSERT(heap()->map_space()->Contains(map_addr));
ager@chromium.org9258b6b2008-09-11 09:11:10 +00003195
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00003196 // Get forwarding address before resetting map pointer.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00003197 Address new_addr = GetForwardingAddressInOldSpace(obj);
3198
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00003199 // Reset the map pointer.
3200 int obj_size = RestoreMap(obj, space, new_addr, map_addr);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00003201
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003202 Address old_addr = obj->address();
3203
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003204 if (new_addr != old_addr) {
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00003205 // Move contents.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003206 if (space == heap()->old_data_space()) {
3207 heap()->MoveBlock(new_addr, old_addr, obj_size);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00003208 } else {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003209 heap()->MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00003210 old_addr,
3211 obj_size);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00003212 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003213 }
3214
ager@chromium.org9258b6b2008-09-11 09:11:10 +00003215 ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003216
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003217 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
fschneider@chromium.org3a5fd782011-02-24 10:10:44 +00003218 if (copied_to->IsSharedFunctionInfo()) {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003219 PROFILE(heap()->isolate(),
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00003220 SharedFunctionInfoMoveEvent(old_addr, new_addr));
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003221 }
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003222 HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003223
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003224 return obj_size;
3225}
3226
3227
ager@chromium.org9258b6b2008-09-11 09:11:10 +00003228int MarkCompactCollector::RelocateOldPointerObject(HeapObject* obj) {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003229 return RelocateOldNonCodeObject(obj, heap()->old_pointer_space());
ager@chromium.org9258b6b2008-09-11 09:11:10 +00003230}
3231
3232
3233int MarkCompactCollector::RelocateOldDataObject(HeapObject* obj) {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003234 return RelocateOldNonCodeObject(obj, heap()->old_data_space());
ager@chromium.org9258b6b2008-09-11 09:11:10 +00003235}
3236
3237
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00003238int MarkCompactCollector::RelocateCellObject(HeapObject* obj) {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003239 return RelocateOldNonCodeObject(obj, heap()->cell_space());
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00003240}
3241
3242
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003243int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00003244 // Recover map pointer.
kasper.lund7276f142008-07-30 08:49:36 +00003245 MapWord encoding = obj->map_word();
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003246 Address map_addr = encoding.DecodeMapAddress(heap()->map_space());
3247 ASSERT(heap()->map_space()->Contains(HeapObject::FromAddress(map_addr)));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003248
3249 // Get forwarding address before resetting map pointer
3250 Address new_addr = GetForwardingAddressInOldSpace(obj);
3251
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00003252 // Reset the map pointer.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003253 int obj_size = RestoreMap(obj, heap()->code_space(), new_addr, map_addr);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003254
ager@chromium.org9258b6b2008-09-11 09:11:10 +00003255 Address old_addr = obj->address();
3256
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003257 if (new_addr != old_addr) {
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00003258 // Move contents.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003259 heap()->MoveBlock(new_addr, old_addr, obj_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003260 }
3261
3262 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
3263 if (copied_to->IsCode()) {
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00003264 // May also update inline cache target.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003265 Code::cast(copied_to)->Relocate(new_addr - old_addr);
iposva@chromium.org245aa852009-02-10 00:49:54 +00003266 // Notify the logger that compiled code has moved.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003267 PROFILE(heap()->isolate(), CodeMoveEvent(old_addr, new_addr));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003268 }
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003269 HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003270
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003271 return obj_size;
3272}
3273
3274
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003275int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
3276 int obj_size = obj->Size();
3277
3278 // Get forwarding address
3279 Address old_addr = obj->address();
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003280 int offset = heap()->new_space()->ToSpaceOffsetForAddress(old_addr);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003281
3282 Address new_addr =
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003283 Memory::Address_at(heap()->new_space()->FromSpaceLow() + offset);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003284
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00003285#ifdef DEBUG
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003286 if (heap()->new_space()->FromSpaceContains(new_addr)) {
3287 ASSERT(heap()->new_space()->FromSpaceOffsetForAddress(new_addr) <=
3288 heap()->new_space()->ToSpaceOffsetForAddress(old_addr));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003289 } else {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003290 ASSERT(heap()->TargetSpace(obj) == heap()->old_pointer_space() ||
3291 heap()->TargetSpace(obj) == heap()->old_data_space());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003292 }
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00003293#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003294
3295 // New and old addresses cannot overlap.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003296 if (heap()->InNewSpace(HeapObject::FromAddress(new_addr))) {
3297 heap()->CopyBlock(new_addr, old_addr, obj_size);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00003298 } else {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003299 heap()->CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00003300 old_addr,
3301 obj_size);
ricow@chromium.org30ce4112010-05-31 10:38:25 +00003302 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003303
3304#ifdef DEBUG
3305 if (FLAG_gc_verbose) {
3306 PrintF("relocate %p -> %p\n", old_addr, new_addr);
3307 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003308#endif
3309
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003310 HeapObject* copied_to = HeapObject::FromAddress(new_addr);
fschneider@chromium.org3a5fd782011-02-24 10:10:44 +00003311 if (copied_to->IsSharedFunctionInfo()) {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003312 PROFILE(heap()->isolate(),
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00003313 SharedFunctionInfoMoveEvent(old_addr, new_addr));
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003314 }
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003315 HEAP_PROFILE(heap(), ObjectMoveEvent(old_addr, new_addr));
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003316
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003317 return obj_size;
3318}
3319
3320
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00003321void MarkCompactCollector::EnableCodeFlushing(bool enable) {
3322 if (enable) {
3323 if (code_flusher_ != NULL) return;
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003324 code_flusher_ = new CodeFlusher(heap()->isolate());
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00003325 } else {
3326 if (code_flusher_ == NULL) return;
3327 delete code_flusher_;
3328 code_flusher_ = NULL;
3329 }
3330}
3331
3332
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003333void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
3334 Isolate* isolate) {
erik.corry@gmail.com0511e242011-01-19 11:11:08 +00003335#ifdef ENABLE_GDB_JIT_INTERFACE
3336 if (obj->IsCode()) {
3337 GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
3338 }
3339#endif
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003340 if (obj->IsCode()) {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003341 PROFILE(isolate, CodeDeleteEvent(obj->address()));
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003342 }
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003343}
3344
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00003345
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00003346int MarkCompactCollector::SizeOfMarkedObject(HeapObject* obj) {
3347 MapWord map_word = obj->map_word();
3348 map_word.ClearMark();
3349 return obj->SizeFromMap(map_word.ToMap());
3350}
3351
3352
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00003353void MarkCompactCollector::Initialize() {
3354 StaticPointersToNewGenUpdatingVisitor::Initialize();
3355 StaticMarkingVisitor::Initialize();
3356}
3357
3358
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003359} } // namespace v8::internal