blob: 366594f0f7304a2aea24b45db3d0e1739d53fcde [file] [log] [blame]
ulan@chromium.org2efb9002012-01-19 15:36:35 +00001// Copyright 2012 the V8 project authors. All rights reserved.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000030#include "code-stubs.h"
ricow@chromium.org0b9f8502010-08-18 07:45:01 +000031#include "compilation-cache.h"
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000032#include "deoptimizer.h"
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000033#include "execution.h"
erik.corry@gmail.com0511e242011-01-19 11:11:08 +000034#include "gdb-jit.h"
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000035#include "global-handles.h"
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000036#include "heap-profiler.h"
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000037#include "ic-inl.h"
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000038#include "incremental-marking.h"
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000039#include "mark-compact.h"
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +000040#include "marking-thread.h"
ager@chromium.orgea4f62e2010-08-16 16:28:43 +000041#include "objects-visiting.h"
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000042#include "objects-visiting-inl.h"
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000043#include "stub-cache.h"
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +000044#include "sweeper-thread.h"
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000045
kasperl@chromium.org71affb52009-05-26 05:44:31 +000046namespace v8 {
47namespace internal {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000048
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000049
50const char* Marking::kWhiteBitPattern = "00";
51const char* Marking::kBlackBitPattern = "10";
52const char* Marking::kGreyBitPattern = "11";
53const char* Marking::kImpossibleBitPattern = "01";
54
55
ager@chromium.orgddb913d2009-01-27 10:01:48 +000056// -------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000057// MarkCompactCollector
58
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +000059MarkCompactCollector::MarkCompactCollector() : // NOLINT
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000060#ifdef DEBUG
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +000061 state_(IDLE),
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000062#endif
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000063 sweep_precisely_(false),
yangguo@chromium.org154ff992012-03-13 08:09:54 +000064 reduce_memory_footprint_(false),
65 abort_incremental_marking_(false),
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +000066 marking_parity_(ODD_MARKING_PARITY),
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000067 compacting_(false),
68 was_marked_incrementally_(false),
hpayer@chromium.org7c3372b2013-02-13 17:26:04 +000069 sweeping_pending_(false),
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +000070 tracer_(NULL),
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000071 migration_slots_buffer_(NULL),
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +000072 heap_(NULL),
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +000073 code_flusher_(NULL),
verwaest@chromium.org33e09c82012-10-10 17:07:22 +000074 encountered_weak_maps_(NULL) { }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000075
ager@chromium.orgea4f62e2010-08-16 16:28:43 +000076
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +000077#ifdef VERIFY_HEAP
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000078class VerifyMarkingVisitor: public ObjectVisitor {
79 public:
80 void VisitPointers(Object** start, Object** end) {
81 for (Object** current = start; current < end; current++) {
82 if ((*current)->IsHeapObject()) {
83 HeapObject* object = HeapObject::cast(*current);
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +000084 CHECK(HEAP->mark_compact_collector()->IsMarked(object));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000085 }
86 }
87 }
yangguo@chromium.org003650e2013-01-24 16:31:08 +000088
89 void VisitEmbeddedPointer(RelocInfo* rinfo) {
90 ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
danno@chromium.org94b0d6f2013-02-04 13:33:20 +000091 if (!FLAG_weak_embedded_maps_in_optimized_code || !FLAG_collect_maps ||
mstarzinger@chromium.org068ea0a2013-01-30 09:39:44 +000092 rinfo->host()->kind() != Code::OPTIMIZED_FUNCTION ||
yangguo@chromium.org003650e2013-01-24 16:31:08 +000093 !rinfo->target_object()->IsMap() ||
94 !Map::cast(rinfo->target_object())->CanTransition()) {
95 VisitPointer(rinfo->target_object_address());
96 }
97 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000098};
99
100
101static void VerifyMarking(Address bottom, Address top) {
102 VerifyMarkingVisitor visitor;
103 HeapObject* object;
104 Address next_object_must_be_here_or_later = bottom;
105
106 for (Address current = bottom;
107 current < top;
108 current += kPointerSize) {
109 object = HeapObject::FromAddress(current);
110 if (MarkCompactCollector::IsMarked(object)) {
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000111 CHECK(current >= next_object_must_be_here_or_later);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000112 object->Iterate(&visitor);
113 next_object_must_be_here_or_later = current + object->Size();
114 }
115 }
116}
117
118
119static void VerifyMarking(NewSpace* space) {
120 Address end = space->top();
121 NewSpacePageIterator it(space->bottom(), end);
122 // The bottom position is at the start of its page. Allows us to use
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000123 // page->area_start() as start of range on all pages.
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000124 CHECK_EQ(space->bottom(),
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000125 NewSpacePage::FromAddress(space->bottom())->area_start());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000126 while (it.has_next()) {
127 NewSpacePage* page = it.next();
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000128 Address limit = it.has_next() ? page->area_end() : end;
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000129 CHECK(limit == end || !page->Contains(end));
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000130 VerifyMarking(page->area_start(), limit);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000131 }
132}
133
134
135static void VerifyMarking(PagedSpace* space) {
136 PageIterator it(space);
137
138 while (it.has_next()) {
139 Page* p = it.next();
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000140 VerifyMarking(p->area_start(), p->area_end());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000141 }
142}
143
144
145static void VerifyMarking(Heap* heap) {
146 VerifyMarking(heap->old_pointer_space());
147 VerifyMarking(heap->old_data_space());
148 VerifyMarking(heap->code_space());
149 VerifyMarking(heap->cell_space());
150 VerifyMarking(heap->map_space());
151 VerifyMarking(heap->new_space());
152
153 VerifyMarkingVisitor visitor;
154
155 LargeObjectIterator it(heap->lo_space());
156 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
157 if (MarkCompactCollector::IsMarked(obj)) {
158 obj->Iterate(&visitor);
159 }
160 }
161
162 heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
163}
164
165
166class VerifyEvacuationVisitor: public ObjectVisitor {
167 public:
168 void VisitPointers(Object** start, Object** end) {
169 for (Object** current = start; current < end; current++) {
170 if ((*current)->IsHeapObject()) {
171 HeapObject* object = HeapObject::cast(*current);
172 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
173 }
174 }
175 }
176};
177
178
179static void VerifyEvacuation(Address bottom, Address top) {
180 VerifyEvacuationVisitor visitor;
181 HeapObject* object;
182 Address next_object_must_be_here_or_later = bottom;
183
184 for (Address current = bottom;
185 current < top;
186 current += kPointerSize) {
187 object = HeapObject::FromAddress(current);
188 if (MarkCompactCollector::IsMarked(object)) {
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000189 CHECK(current >= next_object_must_be_here_or_later);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000190 object->Iterate(&visitor);
191 next_object_must_be_here_or_later = current + object->Size();
192 }
193 }
194}
195
196
197static void VerifyEvacuation(NewSpace* space) {
198 NewSpacePageIterator it(space->bottom(), space->top());
199 VerifyEvacuationVisitor visitor;
200
201 while (it.has_next()) {
202 NewSpacePage* page = it.next();
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000203 Address current = page->area_start();
204 Address limit = it.has_next() ? page->area_end() : space->top();
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000205 CHECK(limit == space->top() || !page->Contains(space->top()));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000206 while (current < limit) {
207 HeapObject* object = HeapObject::FromAddress(current);
208 object->Iterate(&visitor);
209 current += object->Size();
210 }
211 }
212}
213
214
215static void VerifyEvacuation(PagedSpace* space) {
216 PageIterator it(space);
217
218 while (it.has_next()) {
219 Page* p = it.next();
220 if (p->IsEvacuationCandidate()) continue;
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000221 VerifyEvacuation(p->area_start(), p->area_end());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000222 }
223}
224
225
226static void VerifyEvacuation(Heap* heap) {
227 VerifyEvacuation(heap->old_pointer_space());
228 VerifyEvacuation(heap->old_data_space());
229 VerifyEvacuation(heap->code_space());
230 VerifyEvacuation(heap->cell_space());
231 VerifyEvacuation(heap->map_space());
232 VerifyEvacuation(heap->new_space());
233
234 VerifyEvacuationVisitor visitor;
235 heap->IterateStrongRoots(&visitor, VISIT_ALL);
236}
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000237#endif // VERIFY_HEAP
ulan@chromium.orgea52b5f2012-07-30 13:05:33 +0000238
239
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000240#ifdef DEBUG
yangguo@chromium.org46839fb2012-08-28 09:06:19 +0000241class VerifyNativeContextSeparationVisitor: public ObjectVisitor {
ulan@chromium.orgea52b5f2012-07-30 13:05:33 +0000242 public:
yangguo@chromium.org46839fb2012-08-28 09:06:19 +0000243 VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {}
ulan@chromium.orgea52b5f2012-07-30 13:05:33 +0000244
245 void VisitPointers(Object** start, Object** end) {
246 for (Object** current = start; current < end; current++) {
247 if ((*current)->IsHeapObject()) {
248 HeapObject* object = HeapObject::cast(*current);
249 if (object->IsString()) continue;
250 switch (object->map()->instance_type()) {
251 case JS_FUNCTION_TYPE:
252 CheckContext(JSFunction::cast(object)->context());
253 break;
254 case JS_GLOBAL_PROXY_TYPE:
yangguo@chromium.org46839fb2012-08-28 09:06:19 +0000255 CheckContext(JSGlobalProxy::cast(object)->native_context());
ulan@chromium.orgea52b5f2012-07-30 13:05:33 +0000256 break;
257 case JS_GLOBAL_OBJECT_TYPE:
258 case JS_BUILTINS_OBJECT_TYPE:
yangguo@chromium.org46839fb2012-08-28 09:06:19 +0000259 CheckContext(GlobalObject::cast(object)->native_context());
ulan@chromium.orgea52b5f2012-07-30 13:05:33 +0000260 break;
261 case JS_ARRAY_TYPE:
262 case JS_DATE_TYPE:
263 case JS_OBJECT_TYPE:
264 case JS_REGEXP_TYPE:
265 VisitPointer(HeapObject::RawField(object, JSObject::kMapOffset));
266 break;
267 case MAP_TYPE:
268 VisitPointer(HeapObject::RawField(object, Map::kPrototypeOffset));
269 VisitPointer(HeapObject::RawField(object, Map::kConstructorOffset));
270 break;
271 case FIXED_ARRAY_TYPE:
272 if (object->IsContext()) {
273 CheckContext(object);
274 } else {
275 FixedArray* array = FixedArray::cast(object);
276 int length = array->length();
277 // Set array length to zero to prevent cycles while iterating
278 // over array bodies, this is easier than intrusive marking.
279 array->set_length(0);
280 array->IterateBody(
281 FIXED_ARRAY_TYPE, FixedArray::SizeFor(length), this);
282 array->set_length(length);
283 }
284 break;
285 case JS_GLOBAL_PROPERTY_CELL_TYPE:
286 case JS_PROXY_TYPE:
287 case JS_VALUE_TYPE:
288 case TYPE_FEEDBACK_INFO_TYPE:
289 object->Iterate(this);
290 break;
hpayer@chromium.org7c3372b2013-02-13 17:26:04 +0000291 case DECLARED_ACCESSOR_INFO_TYPE:
292 case EXECUTABLE_ACCESSOR_INFO_TYPE:
ulan@chromium.orgea52b5f2012-07-30 13:05:33 +0000293 case BYTE_ARRAY_TYPE:
294 case CALL_HANDLER_INFO_TYPE:
295 case CODE_TYPE:
296 case FIXED_DOUBLE_ARRAY_TYPE:
297 case HEAP_NUMBER_TYPE:
298 case INTERCEPTOR_INFO_TYPE:
299 case ODDBALL_TYPE:
300 case SCRIPT_TYPE:
301 case SHARED_FUNCTION_INFO_TYPE:
302 break;
303 default:
304 UNREACHABLE();
305 }
306 }
307 }
308 }
309
310 private:
311 void CheckContext(Object* context) {
312 if (!context->IsContext()) return;
yangguo@chromium.org46839fb2012-08-28 09:06:19 +0000313 Context* native_context = Context::cast(context)->native_context();
314 if (current_native_context_ == NULL) {
315 current_native_context_ = native_context;
ulan@chromium.orgea52b5f2012-07-30 13:05:33 +0000316 } else {
yangguo@chromium.org46839fb2012-08-28 09:06:19 +0000317 CHECK_EQ(current_native_context_, native_context);
ulan@chromium.orgea52b5f2012-07-30 13:05:33 +0000318 }
319 }
320
yangguo@chromium.org46839fb2012-08-28 09:06:19 +0000321 Context* current_native_context_;
ulan@chromium.orgea52b5f2012-07-30 13:05:33 +0000322};
323
324
yangguo@chromium.org46839fb2012-08-28 09:06:19 +0000325static void VerifyNativeContextSeparation(Heap* heap) {
ulan@chromium.orgea52b5f2012-07-30 13:05:33 +0000326 HeapObjectIterator it(heap->code_space());
327
328 for (Object* object = it.Next(); object != NULL; object = it.Next()) {
yangguo@chromium.org46839fb2012-08-28 09:06:19 +0000329 VerifyNativeContextSeparationVisitor visitor;
ulan@chromium.orgea52b5f2012-07-30 13:05:33 +0000330 Code::cast(object)->CodeIterateBody(&visitor);
331 }
332}
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000333#endif
334
335
336void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
337 p->MarkEvacuationCandidate();
338 evacuation_candidates_.Add(p);
339}
340
341
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000342static void TraceFragmentation(PagedSpace* space) {
343 int number_of_pages = space->CountTotalPages();
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000344 intptr_t reserved = (number_of_pages * space->AreaSize());
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000345 intptr_t free = reserved - space->SizeOfObjects();
346 PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
347 AllocationSpaceName(space->identity()),
348 number_of_pages,
349 static_cast<int>(free),
350 static_cast<double>(free) * 100 / reserved);
351}
352
353
jkummerow@chromium.orgab7dad42012-02-07 12:07:34 +0000354bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000355 if (!compacting_) {
356 ASSERT(evacuation_candidates_.length() == 0);
357
verwaest@chromium.org33e09c82012-10-10 17:07:22 +0000358#ifdef ENABLE_GDB_JIT_INTERFACE
359 // If GDBJIT interface is active disable compaction.
360 if (FLAG_gdbjit) return false;
361#endif
362
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000363 CollectEvacuationCandidates(heap()->old_pointer_space());
364 CollectEvacuationCandidates(heap()->old_data_space());
365
ulan@chromium.org56c14af2012-09-20 12:51:09 +0000366 if (FLAG_compact_code_space &&
367 (mode == NON_INCREMENTAL_COMPACTION ||
368 FLAG_incremental_code_compaction)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000369 CollectEvacuationCandidates(heap()->code_space());
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000370 } else if (FLAG_trace_fragmentation) {
371 TraceFragmentation(heap()->code_space());
372 }
373
374 if (FLAG_trace_fragmentation) {
375 TraceFragmentation(heap()->map_space());
376 TraceFragmentation(heap()->cell_space());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000377 }
378
379 heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
380 heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
381 heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
382
383 compacting_ = evacuation_candidates_.length() > 0;
384 }
385
386 return compacting_;
387}
388
389
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000390void MarkCompactCollector::CollectGarbage() {
391 // Make sure that Prepare() has been called. The individual steps below will
392 // update the state as they proceed.
393 ASSERT(state_ == PREPARE_GC);
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +0000394 ASSERT(encountered_weak_maps_ == Smi::FromInt(0));
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000395
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000396 MarkLiveObjects();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000397 ASSERT(heap_->incremental_marking()->IsStopped());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000398
yangguo@chromium.org003650e2013-01-24 16:31:08 +0000399 if (FLAG_collect_maps) ClearNonLiveReferences();
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +0000400
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +0000401 ClearWeakMaps();
402
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000403#ifdef VERIFY_HEAP
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000404 if (FLAG_verify_heap) {
405 VerifyMarking(heap_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000406 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000407#endif
408
409 SweepSpaces();
410
yangguo@chromium.org5f0b8ea2012-05-16 12:37:04 +0000411 if (!FLAG_collect_maps) ReattachInitialMaps();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000412
ulan@chromium.orgea52b5f2012-07-30 13:05:33 +0000413#ifdef DEBUG
yangguo@chromium.org46839fb2012-08-28 09:06:19 +0000414 if (FLAG_verify_native_context_separation) {
415 VerifyNativeContextSeparation(heap_);
ulan@chromium.orgea52b5f2012-07-30 13:05:33 +0000416 }
417#endif
418
danno@chromium.org94b0d6f2013-02-04 13:33:20 +0000419#ifdef VERIFY_HEAP
420 if (FLAG_collect_maps && FLAG_weak_embedded_maps_in_optimized_code &&
421 heap()->weak_embedded_maps_verification_enabled()) {
422 VerifyWeakEmbeddedMapsInOptimizedCode();
423 }
ulan@chromium.org2e04b582013-02-21 14:06:02 +0000424 if (FLAG_collect_maps && FLAG_omit_prototype_checks_for_leaf_maps) {
425 VerifyOmittedPrototypeChecks();
426 }
danno@chromium.org94b0d6f2013-02-04 13:33:20 +0000427#endif
428
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000429 Finish();
kasper.lund7276f142008-07-30 08:49:36 +0000430
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +0000431 if (marking_parity_ == EVEN_MARKING_PARITY) {
432 marking_parity_ = ODD_MARKING_PARITY;
433 } else {
434 ASSERT(marking_parity_ == ODD_MARKING_PARITY);
435 marking_parity_ = EVEN_MARKING_PARITY;
436 }
437
kasper.lund7276f142008-07-30 08:49:36 +0000438 tracer_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000439}
440
441
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000442#ifdef VERIFY_HEAP
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000443void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
444 PageIterator it(space);
445
446 while (it.has_next()) {
447 Page* p = it.next();
448 CHECK(p->markbits()->IsClean());
449 CHECK_EQ(0, p->LiveBytes());
450 }
451}
452
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000453
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000454void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
455 NewSpacePageIterator it(space->bottom(), space->top());
456
457 while (it.has_next()) {
458 NewSpacePage* p = it.next();
459 CHECK(p->markbits()->IsClean());
460 CHECK_EQ(0, p->LiveBytes());
461 }
462}
463
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000464
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000465void MarkCompactCollector::VerifyMarkbitsAreClean() {
466 VerifyMarkbitsAreClean(heap_->old_pointer_space());
467 VerifyMarkbitsAreClean(heap_->old_data_space());
468 VerifyMarkbitsAreClean(heap_->code_space());
469 VerifyMarkbitsAreClean(heap_->cell_space());
470 VerifyMarkbitsAreClean(heap_->map_space());
471 VerifyMarkbitsAreClean(heap_->new_space());
472
473 LargeObjectIterator it(heap_->lo_space());
474 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
475 MarkBit mark_bit = Marking::MarkBitFrom(obj);
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000476 CHECK(Marking::IsWhite(mark_bit));
477 CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000478 }
479}
danno@chromium.org94b0d6f2013-02-04 13:33:20 +0000480
481
482void MarkCompactCollector::VerifyWeakEmbeddedMapsInOptimizedCode() {
483 HeapObjectIterator code_iterator(heap()->code_space());
484 for (HeapObject* obj = code_iterator.Next();
485 obj != NULL;
486 obj = code_iterator.Next()) {
487 Code* code = Code::cast(obj);
488 if (code->kind() != Code::OPTIMIZED_FUNCTION) continue;
489 if (code->marked_for_deoptimization()) continue;
490 code->VerifyEmbeddedMapsDependency();
491 }
492}
ulan@chromium.org2e04b582013-02-21 14:06:02 +0000493
494
495void MarkCompactCollector::VerifyOmittedPrototypeChecks() {
496 HeapObjectIterator iterator(heap()->map_space());
497 for (HeapObject* obj = iterator.Next();
498 obj != NULL;
499 obj = iterator.Next()) {
500 Map* map = Map::cast(obj);
501 map->VerifyOmittedPrototypeChecks();
502 }
503}
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000504#endif // VERIFY_HEAP
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000505
506
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000507static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000508 PageIterator it(space);
509
510 while (it.has_next()) {
511 Bitmap::Clear(it.next());
512 }
513}
514
515
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000516static void ClearMarkbitsInNewSpace(NewSpace* space) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000517 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
518
519 while (it.has_next()) {
520 Bitmap::Clear(it.next());
521 }
522}
523
524
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000525void MarkCompactCollector::ClearMarkbits() {
526 ClearMarkbitsInPagedSpace(heap_->code_space());
527 ClearMarkbitsInPagedSpace(heap_->map_space());
528 ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
529 ClearMarkbitsInPagedSpace(heap_->old_data_space());
530 ClearMarkbitsInPagedSpace(heap_->cell_space());
531 ClearMarkbitsInNewSpace(heap_->new_space());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000532
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000533 LargeObjectIterator it(heap_->lo_space());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000534 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
535 MarkBit mark_bit = Marking::MarkBitFrom(obj);
536 mark_bit.Clear();
537 mark_bit.Next().Clear();
yangguo@chromium.orgfb377212012-11-16 14:43:43 +0000538 Page::FromAddress(obj->address())->ResetProgressBar();
jkummerow@chromium.org28faa982012-04-13 09:58:30 +0000539 Page::FromAddress(obj->address())->ResetLiveBytes();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000540 }
541}
542
543
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000544void MarkCompactCollector::StartSweeperThreads() {
hpayer@chromium.org7c3372b2013-02-13 17:26:04 +0000545 sweeping_pending_ = true;
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000546 for (int i = 0; i < FLAG_sweeper_threads; i++) {
547 heap()->isolate()->sweeper_threads()[i]->StartSweeping();
548 }
549}
550
551
552void MarkCompactCollector::WaitUntilSweepingCompleted() {
hpayer@chromium.org7c3372b2013-02-13 17:26:04 +0000553 if (sweeping_pending_) {
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000554 for (int i = 0; i < FLAG_sweeper_threads; i++) {
555 heap()->isolate()->sweeper_threads()[i]->WaitForSweeperThread();
556 }
hpayer@chromium.org7c3372b2013-02-13 17:26:04 +0000557 sweeping_pending_ = false;
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000558 StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE));
559 StealMemoryFromSweeperThreads(heap()->paged_space(OLD_POINTER_SPACE));
hpayer@chromium.org8432c912013-02-28 15:55:26 +0000560 heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
561 heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000562 }
563}
564
565
566intptr_t MarkCompactCollector::
567 StealMemoryFromSweeperThreads(PagedSpace* space) {
568 intptr_t freed_bytes = 0;
569 for (int i = 0; i < FLAG_sweeper_threads; i++) {
570 freed_bytes += heap()->isolate()->sweeper_threads()[i]->StealMemory(space);
571 }
hpayer@chromium.org8432c912013-02-28 15:55:26 +0000572 space->AddToAccountingStats(freed_bytes);
573 space->DecrementUnsweptFreeBytes(freed_bytes);
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000574 return freed_bytes;
575}
576
577
578bool MarkCompactCollector::AreSweeperThreadsActivated() {
579 return heap()->isolate()->sweeper_threads() != NULL;
580}
581
582
mmassi@chromium.org2f0efde2013-02-06 14:12:58 +0000583bool MarkCompactCollector::IsConcurrentSweepingInProgress() {
hpayer@chromium.org7c3372b2013-02-13 17:26:04 +0000584 return sweeping_pending_;
mmassi@chromium.org2f0efde2013-02-06 14:12:58 +0000585}
586
587
ulan@chromium.org2e04b582013-02-21 14:06:02 +0000588void MarkCompactCollector::FinalizeSweeping() {
589 ASSERT(sweeping_pending_ == false);
590 ReleaseEvacuationCandidates();
591 heap()->FreeQueuedChunks();
592}
593
594
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000595void MarkCompactCollector::MarkInParallel() {
596 for (int i = 0; i < FLAG_marking_threads; i++) {
597 heap()->isolate()->marking_threads()[i]->StartMarking();
598 }
599}
600
601
602void MarkCompactCollector::WaitUntilMarkingCompleted() {
603 for (int i = 0; i < FLAG_marking_threads; i++) {
604 heap()->isolate()->marking_threads()[i]->WaitForMarkingThread();
605 }
606}
607
608
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000609bool Marking::TransferMark(Address old_start, Address new_start) {
610 // This is only used when resizing an object.
611 ASSERT(MemoryChunk::FromAddress(old_start) ==
612 MemoryChunk::FromAddress(new_start));
613
614 // If the mark doesn't move, we don't check the color of the object.
615 // It doesn't matter whether the object is black, since it hasn't changed
616 // size, so the adjustment to the live data count will be zero anyway.
617 if (old_start == new_start) return false;
618
619 MarkBit new_mark_bit = MarkBitFrom(new_start);
620 MarkBit old_mark_bit = MarkBitFrom(old_start);
621
622#ifdef DEBUG
623 ObjectColor old_color = Color(old_mark_bit);
624#endif
625
626 if (Marking::IsBlack(old_mark_bit)) {
627 old_mark_bit.Clear();
628 ASSERT(IsWhite(old_mark_bit));
629 Marking::MarkBlack(new_mark_bit);
630 return true;
631 } else if (Marking::IsGrey(old_mark_bit)) {
632 ASSERT(heap_->incremental_marking()->IsMarking());
633 old_mark_bit.Clear();
634 old_mark_bit.Next().Clear();
635 ASSERT(IsWhite(old_mark_bit));
636 heap_->incremental_marking()->WhiteToGreyAndPush(
637 HeapObject::FromAddress(new_start), new_mark_bit);
638 heap_->incremental_marking()->RestartIfNotMarking();
639 }
640
641#ifdef DEBUG
642 ObjectColor new_color = Color(new_mark_bit);
643 ASSERT(new_color == old_color);
644#endif
645
646 return false;
647}
648
649
650const char* AllocationSpaceName(AllocationSpace space) {
651 switch (space) {
652 case NEW_SPACE: return "NEW_SPACE";
653 case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
654 case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
655 case CODE_SPACE: return "CODE_SPACE";
656 case MAP_SPACE: return "MAP_SPACE";
657 case CELL_SPACE: return "CELL_SPACE";
658 case LO_SPACE: return "LO_SPACE";
659 default:
660 UNREACHABLE();
661 }
662
663 return NULL;
664}
665
666
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000667// Returns zero for pages that have so little fragmentation that it is not
668// worth defragmenting them. Otherwise a positive integer that gives an
669// estimate of fragmentation on an arbitrary scale.
670static int FreeListFragmentation(PagedSpace* space, Page* p) {
671 // If page was not swept then there are no free list items on it.
672 if (!p->WasSwept()) {
673 if (FLAG_trace_fragmentation) {
674 PrintF("%p [%s]: %d bytes live (unswept)\n",
675 reinterpret_cast<void*>(p),
676 AllocationSpaceName(space->identity()),
677 p->LiveBytes());
678 }
679 return 0;
680 }
681
682 FreeList::SizeStats sizes;
683 space->CountFreeListItems(p, &sizes);
684
685 intptr_t ratio;
686 intptr_t ratio_threshold;
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000687 intptr_t area_size = space->AreaSize();
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000688 if (space->identity() == CODE_SPACE) {
689 ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000690 area_size;
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000691 ratio_threshold = 10;
692 } else {
693 ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000694 area_size;
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000695 ratio_threshold = 15;
696 }
697
698 if (FLAG_trace_fragmentation) {
699 PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
700 reinterpret_cast<void*>(p),
701 AllocationSpaceName(space->identity()),
702 static_cast<int>(sizes.small_size_),
703 static_cast<double>(sizes.small_size_ * 100) /
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000704 area_size,
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000705 static_cast<int>(sizes.medium_size_),
706 static_cast<double>(sizes.medium_size_ * 100) /
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000707 area_size,
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000708 static_cast<int>(sizes.large_size_),
709 static_cast<double>(sizes.large_size_ * 100) /
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000710 area_size,
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000711 static_cast<int>(sizes.huge_size_),
712 static_cast<double>(sizes.huge_size_ * 100) /
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000713 area_size,
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000714 (ratio > ratio_threshold) ? "[fragmented]" : "");
715 }
716
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000717 if (FLAG_always_compact && sizes.Total() != area_size) {
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000718 return 1;
719 }
720
721 if (ratio <= ratio_threshold) return 0; // Not fragmented.
722
723 return static_cast<int>(ratio - ratio_threshold);
724}
725
726
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000727void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
728 ASSERT(space->identity() == OLD_POINTER_SPACE ||
729 space->identity() == OLD_DATA_SPACE ||
730 space->identity() == CODE_SPACE);
731
yangguo@chromium.orgde0db002012-06-22 13:44:28 +0000732 static const int kMaxMaxEvacuationCandidates = 1000;
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +0000733 int number_of_pages = space->CountTotalPages();
yangguo@chromium.orgde0db002012-06-22 13:44:28 +0000734 int max_evacuation_candidates =
mstarzinger@chromium.org471f2f12012-08-10 14:46:33 +0000735 static_cast<int>(sqrt(number_of_pages / 2.0) + 1);
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +0000736
737 if (FLAG_stress_compaction || FLAG_always_compact) {
738 max_evacuation_candidates = kMaxMaxEvacuationCandidates;
739 }
740
741 class Candidate {
742 public:
743 Candidate() : fragmentation_(0), page_(NULL) { }
744 Candidate(int f, Page* p) : fragmentation_(f), page_(p) { }
745
746 int fragmentation() { return fragmentation_; }
747 Page* page() { return page_; }
748
749 private:
750 int fragmentation_;
751 Page* page_;
752 };
753
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000754 enum CompactionMode {
755 COMPACT_FREE_LISTS,
756 REDUCE_MEMORY_FOOTPRINT
757 };
758
759 CompactionMode mode = COMPACT_FREE_LISTS;
760
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000761 intptr_t reserved = number_of_pages * space->AreaSize();
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000762 intptr_t over_reserved = reserved - space->SizeOfObjects();
763 static const intptr_t kFreenessThreshold = 50;
764
mstarzinger@chromium.org471f2f12012-08-10 14:46:33 +0000765 if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) {
yangguo@chromium.orgde0db002012-06-22 13:44:28 +0000766 // If reduction of memory footprint was requested, we are aggressive
767 // about choosing pages to free. We expect that half-empty pages
768 // are easier to compact so slightly bump the limit.
mstarzinger@chromium.org471f2f12012-08-10 14:46:33 +0000769 mode = REDUCE_MEMORY_FOOTPRINT;
770 max_evacuation_candidates += 2;
771 }
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000772
mstarzinger@chromium.org471f2f12012-08-10 14:46:33 +0000773
774 if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) {
yangguo@chromium.orgde0db002012-06-22 13:44:28 +0000775 // If over-usage is very high (more than a third of the space), we
776 // try to free all mostly empty pages. We expect that almost empty
777 // pages are even easier to compact so bump the limit even more.
mstarzinger@chromium.org471f2f12012-08-10 14:46:33 +0000778 mode = REDUCE_MEMORY_FOOTPRINT;
779 max_evacuation_candidates *= 2;
780 }
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000781
mstarzinger@chromium.org471f2f12012-08-10 14:46:33 +0000782 if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
783 PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d)\n",
784 static_cast<double>(over_reserved) / MB,
785 static_cast<double>(reserved) / MB,
786 static_cast<int>(kFreenessThreshold));
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000787 }
788
789 intptr_t estimated_release = 0;
790
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +0000791 Candidate candidates[kMaxMaxEvacuationCandidates];
792
yangguo@chromium.orgde0db002012-06-22 13:44:28 +0000793 max_evacuation_candidates =
794 Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
795
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000796 int count = 0;
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +0000797 int fragmentation = 0;
798 Candidate* least = NULL;
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000799
800 PageIterator it(space);
801 if (it.has_next()) it.next(); // Never compact the first page.
802
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000803 while (it.has_next()) {
804 Page* p = it.next();
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +0000805 p->ClearEvacuationCandidate();
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000806
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +0000807 if (FLAG_stress_compaction) {
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000808 unsigned int counter = space->heap()->ms_count();
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +0000809 uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +0000810 if ((counter & 1) == (page_number & 1)) fragmentation = 1;
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000811 } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
812 // Don't try to release too many pages.
813 if (estimated_release >= ((over_reserved * 3) / 4)) {
814 continue;
815 }
816
817 intptr_t free_bytes = 0;
818
819 if (!p->WasSwept()) {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000820 free_bytes = (p->area_size() - p->LiveBytes());
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000821 } else {
822 FreeList::SizeStats sizes;
823 space->CountFreeListItems(p, &sizes);
824 free_bytes = sizes.Total();
825 }
826
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000827 int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000828
829 if (free_pct >= kFreenessThreshold) {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000830 estimated_release += 2 * p->area_size() - free_bytes;
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000831 fragmentation = free_pct;
832 } else {
833 fragmentation = 0;
834 }
835
836 if (FLAG_trace_fragmentation) {
837 PrintF("%p [%s]: %d (%.2f%%) free %s\n",
838 reinterpret_cast<void*>(p),
839 AllocationSpaceName(space->identity()),
840 static_cast<int>(free_bytes),
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000841 static_cast<double>(free_bytes * 100) / p->area_size(),
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000842 (fragmentation > 0) ? "[fragmented]" : "");
843 }
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +0000844 } else {
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000845 fragmentation = FreeListFragmentation(space, p);
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +0000846 }
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000847
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +0000848 if (fragmentation != 0) {
849 if (count < max_evacuation_candidates) {
850 candidates[count++] = Candidate(fragmentation, p);
851 } else {
852 if (least == NULL) {
853 for (int i = 0; i < max_evacuation_candidates; i++) {
854 if (least == NULL ||
855 candidates[i].fragmentation() < least->fragmentation()) {
856 least = candidates + i;
857 }
858 }
859 }
860 if (least->fragmentation() < fragmentation) {
861 *least = Candidate(fragmentation, p);
862 least = NULL;
863 }
864 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000865 }
866 }
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000867
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +0000868 for (int i = 0; i < count; i++) {
869 AddEvacuationCandidate(candidates[i].page());
870 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000871
872 if (count > 0 && FLAG_trace_fragmentation) {
873 PrintF("Collected %d evacuation candidates for space %s\n",
874 count,
875 AllocationSpaceName(space->identity()));
876 }
877}
878
879
880void MarkCompactCollector::AbortCompaction() {
881 if (compacting_) {
882 int npages = evacuation_candidates_.length();
883 for (int i = 0; i < npages; i++) {
884 Page* p = evacuation_candidates_[i];
885 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
886 p->ClearEvacuationCandidate();
887 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
888 }
889 compacting_ = false;
890 evacuation_candidates_.Rewind(0);
891 invalidated_code_.Rewind(0);
892 }
893 ASSERT_EQ(0, evacuation_candidates_.length());
894}
895
896
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000897void MarkCompactCollector::Prepare(GCTracer* tracer) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000898 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
899
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000900 // Rather than passing the tracer around we stash it in a static member
901 // variable.
902 tracer_ = tracer;
903
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000904#ifdef DEBUG
905 ASSERT(state_ == IDLE);
906 state_ = PREPARE_GC;
907#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000908
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000909 ASSERT(!FLAG_never_compact || !FLAG_always_compact);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000910
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000911 if (AreSweeperThreadsActivated() && FLAG_concurrent_sweeping) {
912 // Instead of waiting we could also abort the sweeper threads here.
913 WaitUntilSweepingCompleted();
ulan@chromium.org2e04b582013-02-21 14:06:02 +0000914 FinalizeSweeping();
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000915 }
916
erik.corry@gmail.combbceb572012-03-09 10:52:05 +0000917 // Clear marking bits if incremental marking is aborted.
918 if (was_marked_incrementally_ && abort_incremental_marking_) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000919 heap()->incremental_marking()->Abort();
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000920 ClearMarkbits();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000921 AbortCompaction();
922 was_marked_incrementally_ = false;
923 }
924
925 // Don't start compaction if we are in the middle of incremental
926 // marking cycle. We did not collect any slots.
927 if (!FLAG_never_compact && !was_marked_incrementally_) {
jkummerow@chromium.orgab7dad42012-02-07 12:07:34 +0000928 StartCompaction(NON_INCREMENTAL_COMPACTION);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000929 }
930
hpayer@chromium.org7c3372b2013-02-13 17:26:04 +0000931 PagedSpaces spaces(heap());
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000932 for (PagedSpace* space = spaces.next();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000933 space != NULL;
934 space = spaces.next()) {
935 space->PrepareForMarkCompact();
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000936 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000937
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000938#ifdef VERIFY_HEAP
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000939 if (!was_marked_incrementally_ && FLAG_verify_heap) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000940 VerifyMarkbitsAreClean();
941 }
942#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000943}
944
danno@chromium.org94b0d6f2013-02-04 13:33:20 +0000945
yangguo@chromium.org003650e2013-01-24 16:31:08 +0000946class DeoptimizeMarkedCodeFilter : public OptimizedFunctionFilter {
947 public:
948 virtual bool TakeFunction(JSFunction* function) {
949 return function->code()->marked_for_deoptimization();
950 }
951};
952
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000953
954void MarkCompactCollector::Finish() {
955#ifdef DEBUG
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000956 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000957 state_ = IDLE;
958#endif
959 // The stub cache is not traversed during GC; clear the cache to
960 // force lazy re-initialization of it. This must be done after the
961 // GC, because it relies on the new address of certain old space
962 // objects (empty string, illegal builtin).
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +0000963 heap()->isolate()->stub_cache()->Clear();
yangguo@chromium.org003650e2013-01-24 16:31:08 +0000964
965 DeoptimizeMarkedCodeFilter filter;
966 Deoptimizer::DeoptimizeAllFunctionsWith(&filter);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000967}
968
969
ager@chromium.orgddb913d2009-01-27 10:01:48 +0000970// -------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000971// Phase 1: tracing and marking live objects.
972// before: all objects are in normal state.
973// after: a live object's map pointer is marked as '00'.
974
975// Marking all live objects in the heap as part of mark-sweep or mark-compact
976// collection. Before marking, all objects are in their normal state. After
977// marking, live objects' map pointers are marked indicating that the object
978// has been found reachable.
979//
980// The marking algorithm is a (mostly) depth-first (because of possible stack
981// overflow) traversal of the graph of objects reachable from the roots. It
982// uses an explicit stack of pointers rather than recursion. The young
983// generation's inactive ('from') space is used as a marking stack. The
984// objects in the marking stack are the ones that have been reached and marked
985// but their children have not yet been visited.
986//
987// The marking stack can overflow during traversal. In that case, we set an
988// overflow flag. When the overflow flag is set, we continue marking objects
989// reachable from the objects on the marking stack, but no longer push them on
990// the marking stack. Instead, we mark them as both marked and overflowed.
991// When the stack is in the overflowed state, objects marked as overflowed
992// have been reached and marked but their children have not been visited yet.
993// After emptying the marking stack, we clear the overflow flag and traverse
994// the heap looking for objects marked as overflowed, push them on the stack,
995// and continue with marking. This process repeats until all reachable
996// objects have been marked.
997
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000998void CodeFlusher::ProcessJSFunctionCandidates() {
999 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
rossberg@chromium.org89e18f52012-10-22 13:09:53 +00001000 Object* undefined = isolate_->heap()->undefined_value();
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001001
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001002 JSFunction* candidate = jsfunction_candidates_head_;
1003 JSFunction* next_candidate;
1004 while (candidate != NULL) {
1005 next_candidate = GetNextCandidate(candidate);
rossberg@chromium.org89e18f52012-10-22 13:09:53 +00001006 ClearNextCandidate(candidate, undefined);
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001007
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001008 SharedFunctionInfo* shared = candidate->shared();
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001009
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001010 Code* code = shared->code();
1011 MarkBit code_mark = Marking::MarkBitFrom(code);
1012 if (!code_mark.Get()) {
1013 shared->set_code(lazy_compile);
1014 candidate->set_code(lazy_compile);
yangguo@chromium.org9768bf12013-01-11 14:51:07 +00001015 } else {
1016 candidate->set_code(code);
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001017 }
1018
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001019 // We are in the middle of a GC cycle so the write barrier in the code
1020 // setter did not record the slot update and we have to do that manually.
1021 Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
1022 Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
svenpanne@chromium.orgb1df11d2012-02-08 10:26:21 +00001023 isolate_->heap()->mark_compact_collector()->
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001024 RecordCodeEntrySlot(slot, target);
1025
1026 Object** shared_code_slot =
1027 HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
1028 isolate_->heap()->mark_compact_collector()->
1029 RecordSlot(shared_code_slot, shared_code_slot, *shared_code_slot);
1030
1031 candidate = next_candidate;
svenpanne@chromium.orgb1df11d2012-02-08 10:26:21 +00001032 }
1033
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001034 jsfunction_candidates_head_ = NULL;
1035}
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001036
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001037
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001038void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
1039 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001040
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001041 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1042 SharedFunctionInfo* next_candidate;
1043 while (candidate != NULL) {
1044 next_candidate = GetNextCandidate(candidate);
rossberg@chromium.org89e18f52012-10-22 13:09:53 +00001045 ClearNextCandidate(candidate);
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001046
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +00001047 Code* code = candidate->code();
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001048 MarkBit code_mark = Marking::MarkBitFrom(code);
1049 if (!code_mark.Get()) {
1050 candidate->set_code(lazy_compile);
1051 }
1052
1053 Object** code_slot =
1054 HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
1055 isolate_->heap()->mark_compact_collector()->
1056 RecordSlot(code_slot, code_slot, *code_slot);
1057
1058 candidate = next_candidate;
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001059 }
1060
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001061 shared_function_info_candidates_head_ = NULL;
1062}
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001063
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001064
danno@chromium.org94b0d6f2013-02-04 13:33:20 +00001065bool CodeFlusher::ContainsCandidate(SharedFunctionInfo* shared_info) {
1066 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1067 while (candidate != NULL) {
1068 if (candidate == shared_info) return true;
1069 candidate = GetNextCandidate(candidate);
1070 }
1071 return false;
1072}
1073
1074
yangguo@chromium.org9768bf12013-01-11 14:51:07 +00001075void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
mvstanton@chromium.orgc47dff52013-01-23 16:28:41 +00001076 // Make sure previous flushing decisions are revisited.
yangguo@chromium.org9768bf12013-01-11 14:51:07 +00001077 isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
1078
1079 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1080 SharedFunctionInfo* next_candidate;
1081 if (candidate == shared_info) {
1082 next_candidate = GetNextCandidate(shared_info);
1083 shared_function_info_candidates_head_ = next_candidate;
1084 ClearNextCandidate(shared_info);
1085 } else {
1086 while (candidate != NULL) {
1087 next_candidate = GetNextCandidate(candidate);
1088
1089 if (next_candidate == shared_info) {
1090 next_candidate = GetNextCandidate(shared_info);
1091 SetNextCandidate(candidate, next_candidate);
1092 ClearNextCandidate(shared_info);
1093 break;
1094 }
1095
1096 candidate = next_candidate;
1097 }
1098 }
1099}
1100
1101
verwaest@chromium.orge4ee6de2012-11-06 12:13:00 +00001102void CodeFlusher::EvictCandidate(JSFunction* function) {
1103 ASSERT(!function->next_function_link()->IsUndefined());
1104 Object* undefined = isolate_->heap()->undefined_value();
1105
mvstanton@chromium.orgc47dff52013-01-23 16:28:41 +00001106 // Make sure previous flushing decisions are revisited.
mstarzinger@chromium.org32280cf2012-12-06 17:32:37 +00001107 isolate_->heap()->incremental_marking()->RecordWrites(function);
mvstanton@chromium.orgc47dff52013-01-23 16:28:41 +00001108 isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
mstarzinger@chromium.org32280cf2012-12-06 17:32:37 +00001109
verwaest@chromium.orge4ee6de2012-11-06 12:13:00 +00001110 JSFunction* candidate = jsfunction_candidates_head_;
1111 JSFunction* next_candidate;
1112 if (candidate == function) {
1113 next_candidate = GetNextCandidate(function);
1114 jsfunction_candidates_head_ = next_candidate;
1115 ClearNextCandidate(function, undefined);
1116 } else {
1117 while (candidate != NULL) {
1118 next_candidate = GetNextCandidate(candidate);
1119
1120 if (next_candidate == function) {
1121 next_candidate = GetNextCandidate(function);
1122 SetNextCandidate(candidate, next_candidate);
1123 ClearNextCandidate(function, undefined);
yangguo@chromium.org9768bf12013-01-11 14:51:07 +00001124 break;
verwaest@chromium.orge4ee6de2012-11-06 12:13:00 +00001125 }
1126
1127 candidate = next_candidate;
1128 }
1129 }
1130}
1131
1132
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +00001133void CodeFlusher::EvictJSFunctionCandidates() {
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +00001134 JSFunction* candidate = jsfunction_candidates_head_;
1135 JSFunction* next_candidate;
1136 while (candidate != NULL) {
1137 next_candidate = GetNextCandidate(candidate);
hpayer@chromium.org7c3372b2013-02-13 17:26:04 +00001138 EvictCandidate(candidate);
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +00001139 candidate = next_candidate;
1140 }
hpayer@chromium.org7c3372b2013-02-13 17:26:04 +00001141 ASSERT(jsfunction_candidates_head_ == NULL);
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +00001142}
1143
1144
1145void CodeFlusher::EvictSharedFunctionInfoCandidates() {
1146 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1147 SharedFunctionInfo* next_candidate;
1148 while (candidate != NULL) {
1149 next_candidate = GetNextCandidate(candidate);
hpayer@chromium.org7c3372b2013-02-13 17:26:04 +00001150 EvictCandidate(candidate);
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +00001151 candidate = next_candidate;
1152 }
hpayer@chromium.org7c3372b2013-02-13 17:26:04 +00001153 ASSERT(shared_function_info_candidates_head_ == NULL);
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +00001154}
1155
1156
verwaest@chromium.orge4ee6de2012-11-06 12:13:00 +00001157void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
1158 Heap* heap = isolate_->heap();
1159
1160 JSFunction** slot = &jsfunction_candidates_head_;
1161 JSFunction* candidate = jsfunction_candidates_head_;
1162 while (candidate != NULL) {
1163 if (heap->InFromSpace(candidate)) {
1164 v->VisitPointer(reinterpret_cast<Object**>(slot));
1165 }
1166 candidate = GetNextCandidate(*slot);
1167 slot = GetNextCandidateSlot(*slot);
1168 }
1169}
1170
1171
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001172MarkCompactCollector::~MarkCompactCollector() {
1173 if (code_flusher_ != NULL) {
1174 delete code_flusher_;
1175 code_flusher_ = NULL;
1176 }
1177}
1178
mads.s.ager31e71382008-08-13 09:32:07 +00001179
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001180static inline HeapObject* ShortCircuitConsString(Object** p) {
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00001181 // Optimization: If the heap object pointed to by p is a non-internalized
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001182 // cons string whose right substring is HEAP->empty_string, update
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001183 // it in place to its left substring. Return the updated value.
mads.s.ager31e71382008-08-13 09:32:07 +00001184 //
1185 // Here we assume that if we change *p, we replace it with a heap object
ulan@chromium.org2efb9002012-01-19 15:36:35 +00001186 // (i.e., the left substring of a cons string is always a heap object).
mads.s.ager31e71382008-08-13 09:32:07 +00001187 //
1188 // The check performed is:
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00001189 // object->IsConsString() && !object->IsInternalizedString() &&
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001190 // (ConsString::cast(object)->second() == HEAP->empty_string())
mads.s.ager31e71382008-08-13 09:32:07 +00001191 // except the maps for the object and its possible substrings might be
1192 // marked.
1193 HeapObject* object = HeapObject::cast(*p);
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +00001194 if (!FLAG_clever_optimizations) return object;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001195 Map* map = object->map();
1196 InstanceType type = map->instance_type();
kasperl@chromium.orgd1e3e722009-04-14 13:38:25 +00001197 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
mads.s.ager31e71382008-08-13 09:32:07 +00001198
ager@chromium.orgbb29dc92009-03-24 13:25:23 +00001199 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001200 Heap* heap = map->GetHeap();
1201 if (second != heap->empty_string()) {
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00001202 return object;
1203 }
mads.s.ager31e71382008-08-13 09:32:07 +00001204
1205 // Since we don't have the object's start, it is impossible to update the
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001206 // page dirty marks. Therefore, we only replace the string with its left
1207 // substring when page dirty marks do not change.
ager@chromium.orgbb29dc92009-03-24 13:25:23 +00001208 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001209 if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
mads.s.ager31e71382008-08-13 09:32:07 +00001210
1211 *p = first;
1212 return HeapObject::cast(first);
1213}
1214
1215
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001216class MarkCompactMarkingVisitor
1217 : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001218 public:
yangguo@chromium.org304cc332012-07-24 07:59:48 +00001219 static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id,
1220 Map* map, HeapObject* obj);
1221
1222 static void ObjectStatsCountFixedArray(
1223 FixedArrayBase* fixed_array,
1224 FixedArraySubInstanceType fast_type,
1225 FixedArraySubInstanceType dictionary_type);
1226
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001227 template<MarkCompactMarkingVisitor::VisitorId id>
jkummerow@chromium.org28583c92012-07-16 11:31:55 +00001228 class ObjectStatsTracker {
1229 public:
verwaest@chromium.org753aee42012-07-17 16:15:42 +00001230 static inline void Visit(Map* map, HeapObject* obj);
jkummerow@chromium.org28583c92012-07-16 11:31:55 +00001231 };
1232
verwaest@chromium.org753aee42012-07-17 16:15:42 +00001233 static void Initialize();
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001234
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001235 INLINE(static void VisitPointer(Heap* heap, Object** p)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001236 MarkObjectByPointer(heap->mark_compact_collector(), p, p);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001237 }
1238
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +00001239 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001240 // Mark all objects pointed to in [start, end).
1241 const int kMinRangeForMarkingRecursion = 64;
1242 if (end - start >= kMinRangeForMarkingRecursion) {
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +00001243 if (VisitUnmarkedObjects(heap, start, end)) return;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001244 // We are close to a stack overflow, so just mark the objects.
1245 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001246 MarkCompactCollector* collector = heap->mark_compact_collector();
1247 for (Object** p = start; p < end; p++) {
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +00001248 MarkObjectByPointer(collector, start, p);
jkummerow@chromium.org7a96c2a2012-10-01 16:24:39 +00001249 }
1250 }
1251
verwaest@chromium.org33e09c82012-10-10 17:07:22 +00001252 // Marks the object black and pushes it on the marking stack.
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001253 INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
1254 MarkBit mark = Marking::MarkBitFrom(object);
1255 heap->mark_compact_collector()->MarkObject(object, mark);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001256 }
1257
verwaest@chromium.org33e09c82012-10-10 17:07:22 +00001258 // Marks the object black without pushing it on the marking stack.
1259 // Returns true if object needed marking and false otherwise.
1260 INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
1261 MarkBit mark_bit = Marking::MarkBitFrom(object);
1262 if (!mark_bit.Get()) {
1263 heap->mark_compact_collector()->SetMark(object, mark_bit);
1264 return true;
1265 }
1266 return false;
1267 }
1268
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001269 // Mark object pointed to by p.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001270 INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
1271 Object** anchor_slot,
1272 Object** p)) {
mads.s.ager31e71382008-08-13 09:32:07 +00001273 if (!(*p)->IsHeapObject()) return;
1274 HeapObject* object = ShortCircuitConsString(p);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001275 collector->RecordSlot(anchor_slot, p, object);
1276 MarkBit mark = Marking::MarkBitFrom(object);
1277 collector->MarkObject(object, mark);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001278 }
1279
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001280
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001281 // Visit an unmarked object.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001282 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1283 HeapObject* obj)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001284#ifdef DEBUG
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001285 ASSERT(Isolate::Current()->heap()->Contains(obj));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001286 ASSERT(!HEAP->mark_compact_collector()->IsMarked(obj));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001287#endif
1288 Map* map = obj->map();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001289 Heap* heap = obj->GetHeap();
1290 MarkBit mark = Marking::MarkBitFrom(obj);
1291 heap->mark_compact_collector()->SetMark(obj, mark);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001292 // Mark the map pointer and the body.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001293 MarkBit map_mark = Marking::MarkBitFrom(map);
1294 heap->mark_compact_collector()->MarkObject(map, map_mark);
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001295 IterateBody(map, obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001296 }
1297
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +00001298 // Visit all unmarked objects pointed to by [start, end).
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001299 // Returns false if the operation fails (lack of stack space).
ulan@chromium.org2e04b582013-02-21 14:06:02 +00001300 INLINE(static bool VisitUnmarkedObjects(Heap* heap,
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +00001301 Object** start,
ulan@chromium.org2e04b582013-02-21 14:06:02 +00001302 Object** end)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001303 // Return false is we are close to the stack limit.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001304 StackLimitCheck check(heap->isolate());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001305 if (check.HasOverflowed()) return false;
1306
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001307 MarkCompactCollector* collector = heap->mark_compact_collector();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001308 // Visit the unmarked objects.
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +00001309 for (Object** p = start; p < end; p++) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001310 Object* o = *p;
1311 if (!o->IsHeapObject()) continue;
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +00001312 collector->RecordSlot(start, p, o);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001313 HeapObject* obj = HeapObject::cast(o);
1314 MarkBit mark = Marking::MarkBitFrom(obj);
1315 if (mark.Get()) continue;
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001316 VisitUnmarkedObject(collector, obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001317 }
1318 return true;
1319 }
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001320
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001321 INLINE(static void BeforeVisitingSharedFunctionInfo(HeapObject* object)) {
1322 SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
1323 shared->BeforeVisitingPointers();
1324 }
1325
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00001326 static void VisitJSWeakMap(Map* map, HeapObject* object) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001327 MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00001328 JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object);
1329
1330 // Enqueue weak map in linked list of encountered weak maps.
svenpanne@chromium.orgfb046332012-04-19 12:02:44 +00001331 if (weak_map->next() == Smi::FromInt(0)) {
1332 weak_map->set_next(collector->encountered_weak_maps());
1333 collector->set_encountered_weak_maps(weak_map);
1334 }
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00001335
1336 // Skip visiting the backing hash table containing the mappings.
1337 int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object);
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001338 BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001339 map->GetHeap(),
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00001340 object,
1341 JSWeakMap::BodyDescriptor::kStartOffset,
1342 JSWeakMap::kTableOffset);
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001343 BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001344 map->GetHeap(),
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00001345 object,
1346 JSWeakMap::kTableOffset + kPointerSize,
1347 object_size);
1348
1349 // Mark the backing hash table without pushing it on the marking stack.
svenpanne@chromium.orgfb046332012-04-19 12:02:44 +00001350 Object* table_object = weak_map->table();
1351 if (!table_object->IsHashTable()) return;
1352 ObjectHashTable* table = ObjectHashTable::cast(table_object);
1353 Object** table_slot =
1354 HeapObject::RawField(weak_map, JSWeakMap::kTableOffset);
1355 MarkBit table_mark = Marking::MarkBitFrom(table);
1356 collector->RecordSlot(table_slot, table_slot, table);
1357 if (!table_mark.Get()) collector->SetMark(table, table_mark);
1358 // Recording the map slot can be skipped, because maps are not compacted.
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00001359 collector->MarkObject(table->map(), Marking::MarkBitFrom(table->map()));
1360 ASSERT(MarkCompactCollector::IsMarked(table->map()));
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00001361 }
1362
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001363 private:
1364 template<int id>
1365 static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001366
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001367 // Code flushing support.
1368
jkummerow@chromium.orgddda9e82011-07-06 11:27:02 +00001369 static const int kRegExpCodeThreshold = 5;
1370
jkummerow@chromium.orgddda9e82011-07-06 11:27:02 +00001371 static void UpdateRegExpCodeAgeAndFlush(Heap* heap,
1372 JSRegExp* re,
1373 bool is_ascii) {
1374 // Make sure that the fixed array is in fact initialized on the RegExp.
1375 // We could potentially trigger a GC when initializing the RegExp.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001376 if (HeapObject::cast(re->data())->map()->instance_type() !=
1377 FIXED_ARRAY_TYPE) return;
jkummerow@chromium.orgddda9e82011-07-06 11:27:02 +00001378
1379 // Make sure this is a RegExp that actually contains code.
1380 if (re->TypeTagUnchecked() != JSRegExp::IRREGEXP) return;
1381
1382 Object* code = re->DataAtUnchecked(JSRegExp::code_index(is_ascii));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001383 if (!code->IsSmi() &&
1384 HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
jkummerow@chromium.orgddda9e82011-07-06 11:27:02 +00001385 // Save a copy that can be reinstated if we need the code again.
1386 re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
1387 code,
1388 heap);
yangguo@chromium.org78d1ad42012-02-09 13:53:47 +00001389
1390 // Saving a copy might create a pointer into compaction candidate
1391 // that was not observed by marker. This might happen if JSRegExp data
1392 // was marked through the compilation cache before marker reached JSRegExp
1393 // object.
1394 FixedArray* data = FixedArray::cast(re->data());
1395 Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii);
1396 heap->mark_compact_collector()->
1397 RecordSlot(slot, slot, code);
1398
jkummerow@chromium.orgddda9e82011-07-06 11:27:02 +00001399 // Set a number in the 0-255 range to guarantee no smi overflow.
1400 re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
1401 Smi::FromInt(heap->sweep_generation() & 0xff),
1402 heap);
1403 } else if (code->IsSmi()) {
1404 int value = Smi::cast(code)->value();
1405 // The regexp has not been compiled yet or there was a compilation error.
1406 if (value == JSRegExp::kUninitializedValue ||
1407 value == JSRegExp::kCompilationErrorValue) {
1408 return;
1409 }
1410
1411 // Check if we should flush now.
1412 if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
1413 re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii),
1414 Smi::FromInt(JSRegExp::kUninitializedValue),
1415 heap);
1416 re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii),
1417 Smi::FromInt(JSRegExp::kUninitializedValue),
1418 heap);
1419 }
1420 }
1421 }
1422
1423
1424 // Works by setting the current sweep_generation (as a smi) in the
1425 // code object place in the data array of the RegExp and keeps a copy
1426 // around that can be reinstated if we reuse the RegExp before flushing.
1427 // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1428 // we flush the code.
1429 static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001430 Heap* heap = map->GetHeap();
jkummerow@chromium.orgddda9e82011-07-06 11:27:02 +00001431 MarkCompactCollector* collector = heap->mark_compact_collector();
1432 if (!collector->is_code_flushing_enabled()) {
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001433 VisitJSRegExp(map, object);
jkummerow@chromium.orgddda9e82011-07-06 11:27:02 +00001434 return;
1435 }
1436 JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
ulan@chromium.org2efb9002012-01-19 15:36:35 +00001437 // Flush code or set age on both ASCII and two byte code.
jkummerow@chromium.orgddda9e82011-07-06 11:27:02 +00001438 UpdateRegExpCodeAgeAndFlush(heap, re, true);
1439 UpdateRegExpCodeAgeAndFlush(heap, re, false);
1440 // Visit the fields of the RegExp, including the updated FixedArray.
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001441 VisitJSRegExp(map, object);
jkummerow@chromium.orgddda9e82011-07-06 11:27:02 +00001442 }
1443
jkummerow@chromium.org28583c92012-07-16 11:31:55 +00001444 static VisitorDispatchTable<Callback> non_count_table_;
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001445};
1446
1447
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001448void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
yangguo@chromium.org304cc332012-07-24 07:59:48 +00001449 FixedArrayBase* fixed_array,
1450 FixedArraySubInstanceType fast_type,
1451 FixedArraySubInstanceType dictionary_type) {
1452 Heap* heap = fixed_array->map()->GetHeap();
1453 if (fixed_array->map() != heap->fixed_cow_array_map() &&
1454 fixed_array->map() != heap->fixed_double_array_map() &&
1455 fixed_array != heap->empty_fixed_array()) {
1456 if (fixed_array->IsDictionary()) {
1457 heap->RecordObjectStats(FIXED_ARRAY_TYPE,
1458 dictionary_type,
1459 fixed_array->Size());
1460 } else {
1461 heap->RecordObjectStats(FIXED_ARRAY_TYPE,
1462 fast_type,
1463 fixed_array->Size());
1464 }
1465 }
1466}
1467
1468
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001469void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
1470 MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) {
verwaest@chromium.org753aee42012-07-17 16:15:42 +00001471 Heap* heap = map->GetHeap();
1472 int object_size = obj->Size();
1473 heap->RecordObjectStats(map->instance_type(), -1, object_size);
yangguo@chromium.org304cc332012-07-24 07:59:48 +00001474 non_count_table_.GetVisitorById(id)(map, obj);
1475 if (obj->IsJSObject()) {
1476 JSObject* object = JSObject::cast(obj);
1477 ObjectStatsCountFixedArray(object->elements(),
1478 DICTIONARY_ELEMENTS_SUB_TYPE,
1479 FAST_ELEMENTS_SUB_TYPE);
1480 ObjectStatsCountFixedArray(object->properties(),
1481 DICTIONARY_PROPERTIES_SUB_TYPE,
1482 FAST_PROPERTIES_SUB_TYPE);
1483 }
verwaest@chromium.org753aee42012-07-17 16:15:42 +00001484}
1485
1486
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001487template<MarkCompactMarkingVisitor::VisitorId id>
1488void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(
yangguo@chromium.org304cc332012-07-24 07:59:48 +00001489 Map* map, HeapObject* obj) {
1490 ObjectStatsVisitBase(id, map, obj);
1491}
1492
1493
1494template<>
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001495class MarkCompactMarkingVisitor::ObjectStatsTracker<
1496 MarkCompactMarkingVisitor::kVisitMap> {
yangguo@chromium.org304cc332012-07-24 07:59:48 +00001497 public:
1498 static inline void Visit(Map* map, HeapObject* obj) {
1499 Heap* heap = map->GetHeap();
1500 Map* map_obj = Map::cast(obj);
1501 ASSERT(map->instance_type() == MAP_TYPE);
1502 DescriptorArray* array = map_obj->instance_descriptors();
verwaest@chromium.org06ab2ec2012-10-09 17:00:13 +00001503 if (map_obj->owns_descriptors() &&
1504 array != heap->empty_descriptor_array()) {
yangguo@chromium.org304cc332012-07-24 07:59:48 +00001505 int fixed_array_size = array->Size();
1506 heap->RecordObjectStats(FIXED_ARRAY_TYPE,
1507 DESCRIPTOR_ARRAY_SUB_TYPE,
1508 fixed_array_size);
1509 }
1510 if (map_obj->HasTransitionArray()) {
1511 int fixed_array_size = map_obj->transitions()->Size();
1512 heap->RecordObjectStats(FIXED_ARRAY_TYPE,
1513 TRANSITION_ARRAY_SUB_TYPE,
1514 fixed_array_size);
1515 }
1516 if (map_obj->code_cache() != heap->empty_fixed_array()) {
1517 heap->RecordObjectStats(
1518 FIXED_ARRAY_TYPE,
1519 MAP_CODE_CACHE_SUB_TYPE,
1520 FixedArray::cast(map_obj->code_cache())->Size());
1521 }
1522 ObjectStatsVisitBase(kVisitMap, map, obj);
1523 }
1524};
1525
1526
verwaest@chromium.org753aee42012-07-17 16:15:42 +00001527template<>
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001528class MarkCompactMarkingVisitor::ObjectStatsTracker<
1529 MarkCompactMarkingVisitor::kVisitCode> {
verwaest@chromium.org753aee42012-07-17 16:15:42 +00001530 public:
1531 static inline void Visit(Map* map, HeapObject* obj) {
1532 Heap* heap = map->GetHeap();
1533 int object_size = obj->Size();
1534 ASSERT(map->instance_type() == CODE_TYPE);
verwaest@chromium.org753aee42012-07-17 16:15:42 +00001535 heap->RecordObjectStats(CODE_TYPE, Code::cast(obj)->kind(), object_size);
yangguo@chromium.org304cc332012-07-24 07:59:48 +00001536 ObjectStatsVisitBase(kVisitCode, map, obj);
1537 }
1538};
1539
1540
1541template<>
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001542class MarkCompactMarkingVisitor::ObjectStatsTracker<
1543 MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
yangguo@chromium.org304cc332012-07-24 07:59:48 +00001544 public:
1545 static inline void Visit(Map* map, HeapObject* obj) {
1546 Heap* heap = map->GetHeap();
1547 SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
1548 if (sfi->scope_info() != heap->empty_fixed_array()) {
1549 heap->RecordObjectStats(
1550 FIXED_ARRAY_TYPE,
1551 SCOPE_INFO_SUB_TYPE,
1552 FixedArray::cast(sfi->scope_info())->Size());
1553 }
1554 ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
1555 }
1556};
1557
1558
1559template<>
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001560class MarkCompactMarkingVisitor::ObjectStatsTracker<
1561 MarkCompactMarkingVisitor::kVisitFixedArray> {
yangguo@chromium.org304cc332012-07-24 07:59:48 +00001562 public:
1563 static inline void Visit(Map* map, HeapObject* obj) {
1564 Heap* heap = map->GetHeap();
1565 FixedArray* fixed_array = FixedArray::cast(obj);
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00001566 if (fixed_array == heap->string_table()) {
yangguo@chromium.org304cc332012-07-24 07:59:48 +00001567 heap->RecordObjectStats(
1568 FIXED_ARRAY_TYPE,
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00001569 STRING_TABLE_SUB_TYPE,
yangguo@chromium.org304cc332012-07-24 07:59:48 +00001570 fixed_array->Size());
1571 }
1572 ObjectStatsVisitBase(kVisitFixedArray, map, obj);
verwaest@chromium.org753aee42012-07-17 16:15:42 +00001573 }
1574};
1575
1576
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001577void MarkCompactMarkingVisitor::Initialize() {
1578 StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
verwaest@chromium.org753aee42012-07-17 16:15:42 +00001579
verwaest@chromium.org753aee42012-07-17 16:15:42 +00001580 table_.Register(kVisitJSRegExp,
1581 &VisitRegExpAndFlushCode);
1582
verwaest@chromium.org753aee42012-07-17 16:15:42 +00001583 if (FLAG_track_gc_object_stats) {
1584 // Copy the visitor table to make call-through possible.
1585 non_count_table_.CopyFrom(&table_);
1586#define VISITOR_ID_COUNT_FUNCTION(id) \
1587 table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
1588 VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION)
1589#undef VISITOR_ID_COUNT_FUNCTION
1590 }
1591}
1592
1593
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001594VisitorDispatchTable<MarkCompactMarkingVisitor::Callback>
1595 MarkCompactMarkingVisitor::non_count_table_;
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001596
1597
1598class MarkingVisitor : public ObjectVisitor {
1599 public:
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001600 explicit MarkingVisitor(Heap* heap) : heap_(heap) { }
1601
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001602 void VisitPointer(Object** p) {
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001603 MarkCompactMarkingVisitor::VisitPointer(heap_, p);
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001604 }
1605
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +00001606 void VisitPointers(Object** start, Object** end) {
1607 MarkCompactMarkingVisitor::VisitPointers(heap_, start, end);
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001608 }
1609
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001610 private:
1611 Heap* heap_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001612};
1613
1614
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001615class CodeMarkingVisitor : public ThreadVisitor {
1616 public:
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001617 explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1618 : collector_(collector) {}
1619
vegorov@chromium.org74f333b2011-04-06 11:17:46 +00001620 void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +00001621 collector_->PrepareThreadForCodeFlushing(isolate, top);
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001622 }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001623
1624 private:
1625 MarkCompactCollector* collector_;
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001626};
1627
1628
1629class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1630 public:
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001631 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1632 : collector_(collector) {}
1633
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +00001634 void VisitPointers(Object** start, Object** end) {
1635 for (Object** p = start; p < end; p++) VisitPointer(p);
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001636 }
1637
1638 void VisitPointer(Object** slot) {
1639 Object* obj = *slot;
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001640 if (obj->IsSharedFunctionInfo()) {
1641 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001642 MarkBit shared_mark = Marking::MarkBitFrom(shared);
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +00001643 MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1644 collector_->MarkObject(shared->code(), code_mark);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001645 collector_->MarkObject(shared, shared_mark);
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001646 }
1647 }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001648
1649 private:
1650 MarkCompactCollector* collector_;
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001651};
1652
1653
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +00001654void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1655 ThreadLocalTop* top) {
1656 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1657 // Note: for the frame that has a pending lazy deoptimization
1658 // StackFrame::unchecked_code will return a non-optimized code object for
1659 // the outermost function and StackFrame::LookupCode will return
1660 // actual optimized code object.
1661 StackFrame* frame = it.frame();
1662 Code* code = frame->unchecked_code();
1663 MarkBit code_mark = Marking::MarkBitFrom(code);
1664 MarkObject(code, code_mark);
1665 if (frame->is_optimized()) {
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001666 MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(),
1667 frame->LookupCode());
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +00001668 }
1669 }
1670}
1671
1672
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001673void MarkCompactCollector::PrepareForCodeFlushing() {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001674 ASSERT(heap() == Isolate::Current()->heap());
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001675
verwaest@chromium.orge4ee6de2012-11-06 12:13:00 +00001676 // Enable code flushing for non-incremental cycles.
1677 if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
1678 EnableCodeFlushing(!was_marked_incrementally_);
mstarzinger@chromium.orgb1016112012-11-02 15:55:00 +00001679 }
1680
verwaest@chromium.orge4ee6de2012-11-06 12:13:00 +00001681 // If code flushing is disabled, there is no need to prepare for it.
1682 if (!is_code_flushing_enabled()) return;
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001683
ager@chromium.org5b2fbee2010-09-08 06:38:15 +00001684 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1685 // relies on it being marked before any other descriptor array.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001686 HeapObject* descriptor_array = heap()->empty_descriptor_array();
1687 MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1688 MarkObject(descriptor_array, descriptor_array_mark);
ager@chromium.org5b2fbee2010-09-08 06:38:15 +00001689
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001690 // Make sure we are not referencing the code from the stack.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001691 ASSERT(this == heap()->mark_compact_collector());
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +00001692 PrepareThreadForCodeFlushing(heap()->isolate(),
1693 heap()->isolate()->thread_local_top());
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001694
1695 // Iterate the archived stacks in all threads to check if
1696 // the code is referenced.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001697 CodeMarkingVisitor code_marking_visitor(this);
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001698 heap()->isolate()->thread_manager()->IterateArchivedThreads(
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001699 &code_marking_visitor);
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001700
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001701 SharedFunctionInfoMarkingVisitor visitor(this);
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001702 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1703 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001704
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001705 ProcessMarkingDeque();
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001706}
1707
1708
mads.s.ager31e71382008-08-13 09:32:07 +00001709// Visitor class for marking heap roots.
1710class RootMarkingVisitor : public ObjectVisitor {
1711 public:
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001712 explicit RootMarkingVisitor(Heap* heap)
1713 : collector_(heap->mark_compact_collector()) { }
1714
mads.s.ager31e71382008-08-13 09:32:07 +00001715 void VisitPointer(Object** p) {
1716 MarkObjectByPointer(p);
1717 }
1718
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +00001719 void VisitPointers(Object** start, Object** end) {
1720 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
mads.s.ager31e71382008-08-13 09:32:07 +00001721 }
1722
mads.s.ager31e71382008-08-13 09:32:07 +00001723 private:
mads.s.ager31e71382008-08-13 09:32:07 +00001724 void MarkObjectByPointer(Object** p) {
1725 if (!(*p)->IsHeapObject()) return;
1726
1727 // Replace flat cons strings in place.
1728 HeapObject* object = ShortCircuitConsString(p);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001729 MarkBit mark_bit = Marking::MarkBitFrom(object);
1730 if (mark_bit.Get()) return;
mads.s.ager31e71382008-08-13 09:32:07 +00001731
mads.s.ager31e71382008-08-13 09:32:07 +00001732 Map* map = object->map();
1733 // Mark the object.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001734 collector_->SetMark(object, mark_bit);
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001735
mads.s.ager31e71382008-08-13 09:32:07 +00001736 // Mark the map pointer and body, and push them on the marking stack.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001737 MarkBit map_mark = Marking::MarkBitFrom(map);
1738 collector_->MarkObject(map, map_mark);
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001739 MarkCompactMarkingVisitor::IterateBody(map, object);
mads.s.ager31e71382008-08-13 09:32:07 +00001740
1741 // Mark all the objects reachable from the map and body. May leave
1742 // overflowed objects in the heap.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001743 collector_->EmptyMarkingDeque();
mads.s.ager31e71382008-08-13 09:32:07 +00001744 }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001745
1746 MarkCompactCollector* collector_;
mads.s.ager31e71382008-08-13 09:32:07 +00001747};
1748
1749
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00001750// Helper class for pruning the string table.
1751class StringTableCleaner : public ObjectVisitor {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001752 public:
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00001753 explicit StringTableCleaner(Heap* heap)
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001754 : heap_(heap), pointers_removed_(0) { }
kmillikin@chromium.org13bd2942009-12-16 15:36:05 +00001755
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +00001756 virtual void VisitPointers(Object** start, Object** end) {
1757 // Visit all HeapObject pointers in [start, end).
1758 for (Object** p = start; p < end; p++) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001759 Object* o = *p;
1760 if (o->IsHeapObject() &&
1761 !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00001762 // Check if the internalized string being pruned is external. We need to
1763 // delete the associated external data as this string is going away.
ager@chromium.org6f10e412009-02-13 10:11:16 +00001764
ager@chromium.org6f10e412009-02-13 10:11:16 +00001765 // Since no objects have yet been moved we can safely access the map of
1766 // the object.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001767 if (o->IsExternalString()) {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001768 heap_->FinalizeExternalString(String::cast(*p));
ager@chromium.org6f10e412009-02-13 10:11:16 +00001769 }
jkummerow@chromium.orgc3b37122011-11-07 10:14:12 +00001770 // Set the entry to the_hole_value (as deleted).
1771 *p = heap_->the_hole_value();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001772 pointers_removed_++;
1773 }
1774 }
1775 }
1776
1777 int PointersRemoved() {
1778 return pointers_removed_;
1779 }
jkummerow@chromium.orge297f592011-06-08 10:05:15 +00001780
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001781 private:
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001782 Heap* heap_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001783 int pointers_removed_;
1784};
1785
1786
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00001787// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1788// are retained.
1789class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1790 public:
1791 virtual Object* RetainAs(Object* object) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001792 if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00001793 return object;
1794 } else {
1795 return NULL;
1796 }
1797 }
1798};
1799
1800
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001801// Fill the marking stack with overflowed objects returned by the given
1802// iterator. Stop when the marking stack is filled or the end of the space
1803// is reached, whichever comes first.
1804template<class T>
1805static void DiscoverGreyObjectsWithIterator(Heap* heap,
1806 MarkingDeque* marking_deque,
1807 T* it) {
1808 // The caller should ensure that the marking stack is initially not full,
1809 // so that we don't waste effort pointlessly scanning for objects.
1810 ASSERT(!marking_deque->IsFull());
1811
1812 Map* filler_map = heap->one_pointer_filler_map();
1813 for (HeapObject* object = it->Next();
1814 object != NULL;
1815 object = it->Next()) {
1816 MarkBit markbit = Marking::MarkBitFrom(object);
1817 if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1818 Marking::GreyToBlack(markbit);
ulan@chromium.org2efb9002012-01-19 15:36:35 +00001819 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001820 marking_deque->PushBlack(object);
1821 if (marking_deque->IsFull()) return;
1822 }
1823 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001824}
1825
1826
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001827static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
mads.s.ager31e71382008-08-13 09:32:07 +00001828
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001829
1830static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) {
danno@chromium.org2c26cb12012-05-03 09:06:43 +00001831 ASSERT(!marking_deque->IsFull());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001832 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1833 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
1834 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
1835 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1836
1837 MarkBit::CellType* cells = p->markbits()->cells();
1838
1839 int last_cell_index =
1840 Bitmap::IndexToCell(
1841 Bitmap::CellAlignIndex(
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001842 p->AddressToMarkbitIndex(p->area_end())));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001843
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001844 Address cell_base = p->area_start();
1845 int cell_index = Bitmap::IndexToCell(
1846 Bitmap::CellAlignIndex(
1847 p->AddressToMarkbitIndex(cell_base)));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001848
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00001849
1850 for (;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001851 cell_index < last_cell_index;
1852 cell_index++, cell_base += 32 * kPointerSize) {
jkummerow@chromium.org59297c72013-01-09 16:32:23 +00001853 ASSERT(static_cast<unsigned>(cell_index) ==
1854 Bitmap::IndexToCell(
1855 Bitmap::CellAlignIndex(
1856 p->AddressToMarkbitIndex(cell_base))));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001857
1858 const MarkBit::CellType current_cell = cells[cell_index];
1859 if (current_cell == 0) continue;
1860
1861 const MarkBit::CellType next_cell = cells[cell_index + 1];
1862 MarkBit::CellType grey_objects = current_cell &
1863 ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
1864
1865 int offset = 0;
1866 while (grey_objects != 0) {
1867 int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
1868 grey_objects >>= trailing_zeros;
1869 offset += trailing_zeros;
1870 MarkBit markbit(&cells[cell_index], 1 << offset, false);
1871 ASSERT(Marking::IsGrey(markbit));
1872 Marking::GreyToBlack(markbit);
1873 Address addr = cell_base + offset * kPointerSize;
1874 HeapObject* object = HeapObject::FromAddress(addr);
ulan@chromium.org2efb9002012-01-19 15:36:35 +00001875 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001876 marking_deque->PushBlack(object);
1877 if (marking_deque->IsFull()) return;
1878 offset += 2;
1879 grey_objects >>= 2;
1880 }
1881
1882 grey_objects >>= (Bitmap::kBitsPerCell - 1);
1883 }
1884}
1885
1886
1887static void DiscoverGreyObjectsInSpace(Heap* heap,
1888 MarkingDeque* marking_deque,
1889 PagedSpace* space) {
1890 if (!space->was_swept_conservatively()) {
1891 HeapObjectIterator it(space);
1892 DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
1893 } else {
1894 PageIterator it(space);
1895 while (it.has_next()) {
1896 Page* p = it.next();
1897 DiscoverGreyObjectsOnPage(marking_deque, p);
1898 if (marking_deque->IsFull()) return;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001899 }
1900 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001901}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001902
1903
ager@chromium.org9085a012009-05-11 19:22:57 +00001904bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001905 Object* o = *p;
1906 if (!o->IsHeapObject()) return false;
1907 HeapObject* heap_object = HeapObject::cast(o);
1908 MarkBit mark = Marking::MarkBitFrom(heap_object);
1909 return !mark.Get();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001910}
1911
1912
mmassi@chromium.org49a44672012-12-04 13:52:03 +00001913bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
1914 Object** p) {
1915 Object* o = *p;
1916 ASSERT(o->IsHeapObject());
1917 HeapObject* heap_object = HeapObject::cast(o);
1918 MarkBit mark = Marking::MarkBitFrom(heap_object);
1919 return !mark.Get();
1920}
1921
1922
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00001923void MarkCompactCollector::MarkStringTable() {
1924 StringTable* string_table = heap()->string_table();
1925 // Mark the string table itself.
1926 MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
1927 SetMark(string_table, string_table_mark);
ager@chromium.org5ec48922009-05-05 07:25:34 +00001928 // Explicitly mark the prefix.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001929 MarkingVisitor marker(heap());
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00001930 string_table->IteratePrefix(&marker);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001931 ProcessMarkingDeque();
ager@chromium.org5ec48922009-05-05 07:25:34 +00001932}
1933
1934
1935void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
1936 // Mark the heap roots including global variables, stack variables,
1937 // etc., and all objects reachable from them.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001938 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001939
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00001940 // Handle the string table specially.
1941 MarkStringTable();
mads.s.ager31e71382008-08-13 09:32:07 +00001942
1943 // There may be overflowed objects in the heap. Visit them now.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001944 while (marking_deque_.overflowed()) {
1945 RefillMarkingDeque();
1946 EmptyMarkingDeque();
mads.s.ager31e71382008-08-13 09:32:07 +00001947 }
kasper.lund7276f142008-07-30 08:49:36 +00001948}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001949
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001950
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00001951void MarkCompactCollector::MarkImplicitRefGroups() {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001952 List<ImplicitRefGroup*>* ref_groups =
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001953 heap()->isolate()->global_handles()->implicit_ref_groups();
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00001954
karlklose@chromium.org44bc7082011-04-11 12:33:05 +00001955 int last = 0;
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00001956 for (int i = 0; i < ref_groups->length(); i++) {
1957 ImplicitRefGroup* entry = ref_groups->at(i);
karlklose@chromium.org44bc7082011-04-11 12:33:05 +00001958 ASSERT(entry != NULL);
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00001959
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001960 if (!IsMarked(*entry->parent_)) {
karlklose@chromium.org44bc7082011-04-11 12:33:05 +00001961 (*ref_groups)[last++] = entry;
1962 continue;
1963 }
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00001964
karlklose@chromium.org44bc7082011-04-11 12:33:05 +00001965 Object*** children = entry->children_;
1966 // A parent object is marked, so mark all child heap objects.
1967 for (size_t j = 0; j < entry->length_; ++j) {
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00001968 if ((*children[j])->IsHeapObject()) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001969 HeapObject* child = HeapObject::cast(*children[j]);
1970 MarkBit mark = Marking::MarkBitFrom(child);
1971 MarkObject(child, mark);
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00001972 }
1973 }
1974
karlklose@chromium.org44bc7082011-04-11 12:33:05 +00001975 // Once the entire group has been marked, dispose it because it's
1976 // not needed anymore.
1977 entry->Dispose();
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00001978 }
karlklose@chromium.org44bc7082011-04-11 12:33:05 +00001979 ref_groups->Rewind(last);
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00001980}
1981
1982
mads.s.ager31e71382008-08-13 09:32:07 +00001983// Mark all objects reachable from the objects on the marking stack.
1984// Before: the marking stack contains zero or more heap object pointers.
1985// After: the marking stack is empty, and all objects reachable from the
1986// marking stack have been marked, or are overflowed in the heap.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001987void MarkCompactCollector::EmptyMarkingDeque() {
1988 while (!marking_deque_.IsEmpty()) {
1989 while (!marking_deque_.IsEmpty()) {
1990 HeapObject* object = marking_deque_.Pop();
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00001991 ASSERT(object->IsHeapObject());
1992 ASSERT(heap()->Contains(object));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001993 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
kasper.lund7276f142008-07-30 08:49:36 +00001994
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001995 Map* map = object->map();
1996 MarkBit map_mark = Marking::MarkBitFrom(map);
1997 MarkObject(map, map_mark);
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001998
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001999 MarkCompactMarkingVisitor::IterateBody(map, object);
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00002000 }
2001
2002 // Process encountered weak maps, mark objects only reachable by those
2003 // weak maps and repeat until fix-point is reached.
2004 ProcessWeakMaps();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002005 }
kasper.lund7276f142008-07-30 08:49:36 +00002006}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002007
kasper.lund7276f142008-07-30 08:49:36 +00002008
mads.s.ager31e71382008-08-13 09:32:07 +00002009// Sweep the heap for overflowed objects, clear their overflow bits, and
2010// push them on the marking stack. Stop early if the marking stack fills
2011// before sweeping completes. If sweeping completes, there are no remaining
2012// overflowed objects in the heap so the overflow flag on the markings stack
2013// is cleared.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002014void MarkCompactCollector::RefillMarkingDeque() {
2015 ASSERT(marking_deque_.overflowed());
mads.s.ager31e71382008-08-13 09:32:07 +00002016
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002017 SemiSpaceIterator new_it(heap()->new_space());
2018 DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &new_it);
2019 if (marking_deque_.IsFull()) return;
mads.s.ager31e71382008-08-13 09:32:07 +00002020
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002021 DiscoverGreyObjectsInSpace(heap(),
2022 &marking_deque_,
2023 heap()->old_pointer_space());
2024 if (marking_deque_.IsFull()) return;
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002025
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002026 DiscoverGreyObjectsInSpace(heap(),
2027 &marking_deque_,
2028 heap()->old_data_space());
2029 if (marking_deque_.IsFull()) return;
mads.s.ager31e71382008-08-13 09:32:07 +00002030
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002031 DiscoverGreyObjectsInSpace(heap(),
2032 &marking_deque_,
2033 heap()->code_space());
2034 if (marking_deque_.IsFull()) return;
mads.s.ager31e71382008-08-13 09:32:07 +00002035
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002036 DiscoverGreyObjectsInSpace(heap(),
2037 &marking_deque_,
2038 heap()->map_space());
2039 if (marking_deque_.IsFull()) return;
mads.s.ager31e71382008-08-13 09:32:07 +00002040
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002041 DiscoverGreyObjectsInSpace(heap(),
2042 &marking_deque_,
2043 heap()->cell_space());
2044 if (marking_deque_.IsFull()) return;
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002045
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002046 LargeObjectIterator lo_it(heap()->lo_space());
2047 DiscoverGreyObjectsWithIterator(heap(),
2048 &marking_deque_,
2049 &lo_it);
2050 if (marking_deque_.IsFull()) return;
mads.s.ager31e71382008-08-13 09:32:07 +00002051
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002052 marking_deque_.ClearOverflowed();
mads.s.ager31e71382008-08-13 09:32:07 +00002053}
2054
2055
2056// Mark all objects reachable (transitively) from objects on the marking
2057// stack. Before: the marking stack contains zero or more heap object
2058// pointers. After: the marking stack is empty and there are no overflowed
2059// objects in the heap.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002060void MarkCompactCollector::ProcessMarkingDeque() {
2061 EmptyMarkingDeque();
2062 while (marking_deque_.overflowed()) {
2063 RefillMarkingDeque();
2064 EmptyMarkingDeque();
mads.s.ager31e71382008-08-13 09:32:07 +00002065 }
2066}
2067
2068
mmassi@chromium.org49a44672012-12-04 13:52:03 +00002069void MarkCompactCollector::ProcessExternalMarking(RootMarkingVisitor* visitor) {
kasper.lund7276f142008-07-30 08:49:36 +00002070 bool work_to_do = true;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002071 ASSERT(marking_deque_.IsEmpty());
kasper.lund7276f142008-07-30 08:49:36 +00002072 while (work_to_do) {
mmassi@chromium.org49a44672012-12-04 13:52:03 +00002073 heap()->isolate()->global_handles()->IterateObjectGroups(
2074 visitor, &IsUnmarkedHeapObjectWithHeap);
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00002075 MarkImplicitRefGroups();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002076 work_to_do = !marking_deque_.IsEmpty();
2077 ProcessMarkingDeque();
kasper.lund7276f142008-07-30 08:49:36 +00002078 }
2079}
2080
2081
2082void MarkCompactCollector::MarkLiveObjects() {
erik.corry@gmail.com9dfbea42010-05-21 12:58:28 +00002083 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00002084 // The recursive GC marker detects when it is nearing stack overflow,
2085 // and switches to a different marking system. JS interrupts interfere
2086 // with the C stack limit check.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002087 PostponeInterruptsScope postpone(heap()->isolate());
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00002088
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002089 bool incremental_marking_overflowed = false;
2090 IncrementalMarking* incremental_marking = heap_->incremental_marking();
2091 if (was_marked_incrementally_) {
2092 // Finalize the incremental marking and check whether we had an overflow.
2093 // Both markers use grey color to mark overflowed objects so
2094 // non-incremental marker can deal with them as if overflow
2095 // occured during normal marking.
2096 // But incremental marker uses a separate marking deque
ulan@chromium.org56c14af2012-09-20 12:51:09 +00002097 // so we have to explicitly copy its overflow state.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002098 incremental_marking->Finalize();
2099 incremental_marking_overflowed =
2100 incremental_marking->marking_deque()->overflowed();
2101 incremental_marking->marking_deque()->ClearOverflowed();
2102 } else {
2103 // Abort any pending incremental activities e.g. incremental sweeping.
2104 incremental_marking->Abort();
2105 }
2106
kasper.lund7276f142008-07-30 08:49:36 +00002107#ifdef DEBUG
2108 ASSERT(state_ == PREPARE_GC);
2109 state_ = MARK_LIVE_OBJECTS;
2110#endif
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002111 // The to space contains live objects, a page in from space is used as a
2112 // marking stack.
2113 Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
2114 Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
2115 if (FLAG_force_marking_deque_overflows) {
2116 marking_deque_end = marking_deque_start + 64 * kPointerSize;
2117 }
2118 marking_deque_.Initialize(marking_deque_start,
2119 marking_deque_end);
2120 ASSERT(!marking_deque_.overflowed());
kasper.lund7276f142008-07-30 08:49:36 +00002121
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002122 if (incremental_marking_overflowed) {
2123 // There are overflowed objects left in the heap after incremental marking.
2124 marking_deque_.SetOverflowed();
2125 }
kasper.lund7276f142008-07-30 08:49:36 +00002126
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00002127 PrepareForCodeFlushing();
2128
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +00002129 if (was_marked_incrementally_) {
2130 // There is no write barrier on cells so we have to scan them now at the end
2131 // of the incremental marking.
2132 {
2133 HeapObjectIterator cell_iterator(heap()->cell_space());
2134 HeapObject* cell;
2135 while ((cell = cell_iterator.Next()) != NULL) {
2136 ASSERT(cell->IsJSGlobalPropertyCell());
2137 if (IsMarked(cell)) {
2138 int offset = JSGlobalPropertyCell::kValueOffset;
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00002139 MarkCompactMarkingVisitor::VisitPointer(
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +00002140 heap(),
2141 reinterpret_cast<Object**>(cell->address() + offset));
2142 }
2143 }
2144 }
2145 }
2146
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002147 RootMarkingVisitor root_visitor(heap());
ager@chromium.org5ec48922009-05-05 07:25:34 +00002148 MarkRoots(&root_visitor);
kasper.lund7276f142008-07-30 08:49:36 +00002149
ager@chromium.org9085a012009-05-11 19:22:57 +00002150 // The objects reachable from the roots are marked, yet unreachable
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00002151 // objects are unmarked. Mark objects reachable due to host
2152 // application specific logic.
mmassi@chromium.org49a44672012-12-04 13:52:03 +00002153 ProcessExternalMarking(&root_visitor);
kasper.lund7276f142008-07-30 08:49:36 +00002154
ager@chromium.org9085a012009-05-11 19:22:57 +00002155 // The objects reachable from the roots or object groups are marked,
2156 // yet unreachable objects are unmarked. Mark objects reachable
2157 // only from weak global handles.
kasper.lund7276f142008-07-30 08:49:36 +00002158 //
ager@chromium.org9085a012009-05-11 19:22:57 +00002159 // First we identify nonlive weak handles and mark them as pending
2160 // destruction.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002161 heap()->isolate()->global_handles()->IdentifyWeakHandles(
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002162 &IsUnmarkedHeapObject);
ager@chromium.org9085a012009-05-11 19:22:57 +00002163 // Then we mark the objects and process the transitive closure.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002164 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002165 while (marking_deque_.overflowed()) {
2166 RefillMarkingDeque();
2167 EmptyMarkingDeque();
mads.s.ager31e71382008-08-13 09:32:07 +00002168 }
kasper.lund7276f142008-07-30 08:49:36 +00002169
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00002170 // Repeat host application specific marking to mark unmarked objects
2171 // reachable from the weak roots.
mmassi@chromium.org49a44672012-12-04 13:52:03 +00002172 ProcessExternalMarking(&root_visitor);
kasper.lund7276f142008-07-30 08:49:36 +00002173
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002174 AfterMarking();
2175}
2176
2177
2178void MarkCompactCollector::AfterMarking() {
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00002179 // Object literal map caches reference strings (cache keys) and maps
ricow@chromium.org4f693d62011-07-04 14:01:31 +00002180 // (cache values). At this point still useful maps have already been
2181 // marked. Mark the keys for the alive values before we process the
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00002182 // string table.
ricow@chromium.org4f693d62011-07-04 14:01:31 +00002183 ProcessMapCaches();
2184
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00002185 // Prune the string table removing all strings only pointed to by the
2186 // string table. Cannot use string_table() here because the string
kasper.lund7276f142008-07-30 08:49:36 +00002187 // table is marked.
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00002188 StringTable* string_table = heap()->string_table();
2189 StringTableCleaner v(heap());
2190 string_table->IterateElements(&v);
2191 string_table->ElementsRemoved(v.PointersRemoved());
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002192 heap()->external_string_table_.Iterate(&v);
2193 heap()->external_string_table_.CleanUp();
yangguo@chromium.org46a2a512013-01-18 16:29:40 +00002194 heap()->error_object_list_.RemoveUnmarked(heap());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002195
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00002196 // Process the weak references.
2197 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002198 heap()->ProcessWeakReferences(&mark_compact_object_retainer);
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00002199
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002200 // Remove object groups after marking phase.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002201 heap()->isolate()->global_handles()->RemoveObjectGroups();
2202 heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002203
2204 // Flush code from collected candidates.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002205 if (is_code_flushing_enabled()) {
2206 code_flusher_->ProcessCandidates();
verwaest@chromium.orge4ee6de2012-11-06 12:13:00 +00002207 // If incremental marker does not support code flushing, we need to
2208 // disable it before incremental marking steps for next cycle.
2209 if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
2210 EnableCodeFlushing(false);
2211 }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002212 }
ager@chromium.org9ee27ae2011-03-02 13:43:26 +00002213
yangguo@chromium.org78d1ad42012-02-09 13:53:47 +00002214 if (!FLAG_watch_ic_patching) {
2215 // Clean up dead objects from the runtime profiler.
2216 heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
2217 }
jkummerow@chromium.org28583c92012-07-16 11:31:55 +00002218
2219 if (FLAG_track_gc_object_stats) {
2220 heap()->CheckpointObjectStats();
2221 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002222}
2223
2224
ricow@chromium.org4f693d62011-07-04 14:01:31 +00002225void MarkCompactCollector::ProcessMapCaches() {
yangguo@chromium.org46839fb2012-08-28 09:06:19 +00002226 Object* raw_context = heap()->native_contexts_list_;
ricow@chromium.org4f693d62011-07-04 14:01:31 +00002227 while (raw_context != heap()->undefined_value()) {
2228 Context* context = reinterpret_cast<Context*>(raw_context);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002229 if (IsMarked(context)) {
ricow@chromium.org4f693d62011-07-04 14:01:31 +00002230 HeapObject* raw_map_cache =
2231 HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
2232 // A map cache may be reachable from the stack. In this case
2233 // it's already transitively marked and it's too late to clean
2234 // up its parts.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002235 if (!IsMarked(raw_map_cache) &&
ricow@chromium.org4f693d62011-07-04 14:01:31 +00002236 raw_map_cache != heap()->undefined_value()) {
2237 MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
2238 int existing_elements = map_cache->NumberOfElements();
2239 int used_elements = 0;
2240 for (int i = MapCache::kElementsStartIndex;
2241 i < map_cache->length();
2242 i += MapCache::kEntrySize) {
2243 Object* raw_key = map_cache->get(i);
2244 if (raw_key == heap()->undefined_value() ||
jkummerow@chromium.orgc3b37122011-11-07 10:14:12 +00002245 raw_key == heap()->the_hole_value()) continue;
ricow@chromium.org4f693d62011-07-04 14:01:31 +00002246 STATIC_ASSERT(MapCache::kEntrySize == 2);
2247 Object* raw_map = map_cache->get(i + 1);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002248 if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
ricow@chromium.org4f693d62011-07-04 14:01:31 +00002249 ++used_elements;
2250 } else {
2251 // Delete useless entries with unmarked maps.
2252 ASSERT(raw_map->IsMap());
jkummerow@chromium.orgc3b37122011-11-07 10:14:12 +00002253 map_cache->set_the_hole(i);
2254 map_cache->set_the_hole(i + 1);
ricow@chromium.org4f693d62011-07-04 14:01:31 +00002255 }
2256 }
2257 if (used_elements == 0) {
2258 context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
2259 } else {
2260 // Note: we don't actually shrink the cache here to avoid
2261 // extra complexity during GC. We rely on subsequent cache
2262 // usages (EnsureCapacity) to do this.
2263 map_cache->ElementsRemoved(existing_elements - used_elements);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002264 MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
2265 MarkObject(map_cache, map_cache_markbit);
ricow@chromium.org4f693d62011-07-04 14:01:31 +00002266 }
2267 }
2268 }
2269 // Move to next element in the list.
2270 raw_context = context->get(Context::NEXT_CONTEXT_LINK);
2271 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002272 ProcessMarkingDeque();
ricow@chromium.org4f693d62011-07-04 14:01:31 +00002273}
2274
2275
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002276void MarkCompactCollector::ReattachInitialMaps() {
2277 HeapObjectIterator map_iterator(heap()->map_space());
2278 for (HeapObject* obj = map_iterator.Next();
2279 obj != NULL;
2280 obj = map_iterator.Next()) {
2281 if (obj->IsFreeSpace()) continue;
2282 Map* map = Map::cast(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002283
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002284 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2285 if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002286
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002287 if (map->attached_to_shared_function_info()) {
2288 JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
2289 }
2290 }
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002291}
2292
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002293
yangguo@chromium.org003650e2013-01-24 16:31:08 +00002294void MarkCompactCollector::ClearNonLiveReferences() {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002295 HeapObjectIterator map_iterator(heap()->map_space());
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002296 // Iterate over the map space, setting map transitions that go from
jkummerow@chromium.org212d9642012-05-11 15:02:09 +00002297 // a marked map to an unmarked map to null transitions. This action
2298 // is carried out only on maps of JSObjects and related subtypes.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002299 for (HeapObject* obj = map_iterator.Next();
2300 obj != NULL; obj = map_iterator.Next()) {
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00002301 Map* map = reinterpret_cast<Map*>(obj);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002302 MarkBit map_mark = Marking::MarkBitFrom(map);
2303 if (map->IsFreeSpace()) continue;
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002304
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002305 ASSERT(map->IsMap());
yangguo@chromium.org003650e2013-01-24 16:31:08 +00002306 if (!map->CanTransition()) continue;
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00002307
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002308 if (map_mark.Get() &&
2309 map->attached_to_shared_function_info()) {
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00002310 // This map is used for inobject slack tracking and has been detached
2311 // from SharedFunctionInfo during the mark phase.
2312 // Since it survived the GC, reattach it now.
2313 map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map);
2314 }
2315
yangguo@chromium.org659ceec2012-01-26 07:37:54 +00002316 ClearNonLivePrototypeTransitions(map);
2317 ClearNonLiveMapTransitions(map, map_mark);
yangguo@chromium.org003650e2013-01-24 16:31:08 +00002318
2319 if (map_mark.Get()) {
ulan@chromium.org2e04b582013-02-21 14:06:02 +00002320 ClearNonLiveDependentCode(map);
yangguo@chromium.org003650e2013-01-24 16:31:08 +00002321 } else {
ulan@chromium.org2e04b582013-02-21 14:06:02 +00002322 ClearAndDeoptimizeDependentCode(map);
yangguo@chromium.org003650e2013-01-24 16:31:08 +00002323 }
yangguo@chromium.org659ceec2012-01-26 07:37:54 +00002324 }
2325}
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002326
yangguo@chromium.org659ceec2012-01-26 07:37:54 +00002327
2328void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
2329 int number_of_transitions = map->NumberOfProtoTransitions();
danno@chromium.org81cac2b2012-07-10 11:28:27 +00002330 FixedArray* prototype_transitions = map->GetPrototypeTransitions();
yangguo@chromium.org659ceec2012-01-26 07:37:54 +00002331
2332 int new_number_of_transitions = 0;
2333 const int header = Map::kProtoTransitionHeaderSize;
2334 const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
2335 const int map_offset = header + Map::kProtoTransitionMapOffset;
2336 const int step = Map::kProtoTransitionElementsPerEntry;
2337 for (int i = 0; i < number_of_transitions; i++) {
2338 Object* prototype = prototype_transitions->get(proto_offset + i * step);
2339 Object* cached_map = prototype_transitions->get(map_offset + i * step);
2340 if (IsMarked(prototype) && IsMarked(cached_map)) {
2341 int proto_index = proto_offset + new_number_of_transitions * step;
2342 int map_index = map_offset + new_number_of_transitions * step;
2343 if (new_number_of_transitions != i) {
2344 prototype_transitions->set_unchecked(
2345 heap_,
2346 proto_index,
2347 prototype,
2348 UPDATE_WRITE_BARRIER);
2349 prototype_transitions->set_unchecked(
2350 heap_,
2351 map_index,
2352 cached_map,
2353 SKIP_WRITE_BARRIER);
erik.corry@gmail.com3847bd52011-04-27 10:38:56 +00002354 }
yangguo@chromium.org659ceec2012-01-26 07:37:54 +00002355 Object** slot =
2356 HeapObject::RawField(prototype_transitions,
2357 FixedArray::OffsetOfElementAt(proto_index));
2358 RecordSlot(slot, slot, prototype);
2359 new_number_of_transitions++;
2360 }
2361 }
2362
2363 if (new_number_of_transitions != number_of_transitions) {
2364 map->SetNumberOfProtoTransitions(new_number_of_transitions);
2365 }
2366
2367 // Fill slots that became free with undefined value.
2368 for (int i = new_number_of_transitions * step;
2369 i < number_of_transitions * step;
2370 i++) {
2371 prototype_transitions->set_undefined(heap_, header + i);
2372 }
2373}
2374
2375
2376void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
2377 MarkBit map_mark) {
jkummerow@chromium.org212d9642012-05-11 15:02:09 +00002378 Object* potential_parent = map->GetBackPointer();
2379 if (!potential_parent->IsMap()) return;
2380 Map* parent = Map::cast(potential_parent);
yangguo@chromium.org659ceec2012-01-26 07:37:54 +00002381
jkummerow@chromium.org212d9642012-05-11 15:02:09 +00002382 // Follow back pointer, check whether we are dealing with a map transition
2383 // from a live map to a dead path and in case clear transitions of parent.
yangguo@chromium.org659ceec2012-01-26 07:37:54 +00002384 bool current_is_alive = map_mark.Get();
jkummerow@chromium.org212d9642012-05-11 15:02:09 +00002385 bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
2386 if (!current_is_alive && parent_is_alive) {
2387 parent->ClearNonLiveTransitions(heap());
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002388 }
2389}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002390
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00002391
ulan@chromium.org2e04b582013-02-21 14:06:02 +00002392void MarkCompactCollector::ClearAndDeoptimizeDependentCode(Map* map) {
yangguo@chromium.org003650e2013-01-24 16:31:08 +00002393 AssertNoAllocation no_allocation_scope;
ulan@chromium.org2e04b582013-02-21 14:06:02 +00002394 DependentCode* entries = map->dependent_code();
2395 DependentCode::GroupStartIndexes starts(entries);
2396 int number_of_entries = starts.number_of_entries();
2397 if (number_of_entries == 0) return;
2398 for (int i = 0; i < number_of_entries; i++) {
2399 Code* code = entries->code_at(i);
yangguo@chromium.org003650e2013-01-24 16:31:08 +00002400 if (IsMarked(code) && !code->marked_for_deoptimization()) {
2401 code->set_marked_for_deoptimization(true);
2402 }
ulan@chromium.org2e04b582013-02-21 14:06:02 +00002403 entries->clear_code_at(i);
yangguo@chromium.org003650e2013-01-24 16:31:08 +00002404 }
ulan@chromium.org2e04b582013-02-21 14:06:02 +00002405 map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
yangguo@chromium.org003650e2013-01-24 16:31:08 +00002406}
2407
2408
ulan@chromium.org2e04b582013-02-21 14:06:02 +00002409void MarkCompactCollector::ClearNonLiveDependentCode(Map* map) {
yangguo@chromium.org003650e2013-01-24 16:31:08 +00002410 AssertNoAllocation no_allocation_scope;
ulan@chromium.org2e04b582013-02-21 14:06:02 +00002411 DependentCode* entries = map->dependent_code();
2412 DependentCode::GroupStartIndexes starts(entries);
2413 int number_of_entries = starts.number_of_entries();
2414 if (number_of_entries == 0) return;
2415 int new_number_of_entries = 0;
2416 // Go through all groups, remove dead codes and compact.
2417 for (int g = 0; g < DependentCode::kGroupCount; g++) {
2418 int group_number_of_entries = 0;
2419 for (int i = starts.at(g); i < starts.at(g + 1); i++) {
2420 Code* code = entries->code_at(i);
2421 if (IsMarked(code) && !code->marked_for_deoptimization()) {
2422 if (new_number_of_entries + group_number_of_entries != i) {
2423 entries->set_code_at(new_number_of_entries +
2424 group_number_of_entries, code);
2425 }
2426 Object** slot = entries->code_slot_at(new_number_of_entries +
2427 group_number_of_entries);
2428 RecordSlot(slot, slot, code);
2429 group_number_of_entries++;
yangguo@chromium.org003650e2013-01-24 16:31:08 +00002430 }
2431 }
ulan@chromium.org2e04b582013-02-21 14:06:02 +00002432 entries->set_number_of_entries(
2433 static_cast<DependentCode::DependencyGroup>(g),
2434 group_number_of_entries);
2435 new_number_of_entries += group_number_of_entries;
yangguo@chromium.org003650e2013-01-24 16:31:08 +00002436 }
ulan@chromium.org2e04b582013-02-21 14:06:02 +00002437 for (int i = new_number_of_entries; i < number_of_entries; i++) {
2438 entries->clear_code_at(i);
yangguo@chromium.org003650e2013-01-24 16:31:08 +00002439 }
yangguo@chromium.org003650e2013-01-24 16:31:08 +00002440}
2441
2442
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00002443void MarkCompactCollector::ProcessWeakMaps() {
2444 Object* weak_map_obj = encountered_weak_maps();
2445 while (weak_map_obj != Smi::FromInt(0)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002446 ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00002447 JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00002448 ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
jkummerow@chromium.org28faa982012-04-13 09:58:30 +00002449 Object** anchor = reinterpret_cast<Object**>(table->address());
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00002450 for (int i = 0; i < table->Capacity(); i++) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002451 if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
jkummerow@chromium.org28faa982012-04-13 09:58:30 +00002452 Object** key_slot =
2453 HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
2454 ObjectHashTable::EntryToIndex(i)));
2455 RecordSlot(anchor, key_slot, *key_slot);
2456 Object** value_slot =
2457 HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
2458 ObjectHashTable::EntryToValueIndex(i)));
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00002459 MarkCompactMarkingVisitor::MarkObjectByPointer(
2460 this, anchor, value_slot);
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00002461 }
2462 }
2463 weak_map_obj = weak_map->next();
2464 }
2465}
2466
2467
2468void MarkCompactCollector::ClearWeakMaps() {
2469 Object* weak_map_obj = encountered_weak_maps();
2470 while (weak_map_obj != Smi::FromInt(0)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002471 ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj)));
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00002472 JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj);
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00002473 ObjectHashTable* table = ObjectHashTable::cast(weak_map->table());
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00002474 for (int i = 0; i < table->Capacity(); i++) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002475 if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
jkummerow@chromium.orgc3b37122011-11-07 10:14:12 +00002476 table->RemoveEntry(i);
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00002477 }
2478 }
2479 weak_map_obj = weak_map->next();
2480 weak_map->set_next(Smi::FromInt(0));
2481 }
2482 set_encountered_weak_maps(Smi::FromInt(0));
2483}
2484
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002485
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002486// We scavange new space simultaneously with sweeping. This is done in two
2487// passes.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002488//
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002489// The first pass migrates all alive objects from one semispace to another or
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002490// promotes them to old space. Forwarding address is written directly into
2491// first word of object without any encoding. If object is dead we write
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002492// NULL as a forwarding address.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002493//
2494// The second pass updates pointers to new space in all spaces. It is possible
2495// to encounter pointers to dead new space objects during traversal of pointers
2496// to new space. We should clear them to avoid encountering them during next
2497// pointer iteration. This is an issue if the store buffer overflows and we
2498// have to scan the entire old space, including dead objects, looking for
2499// pointers to new space.
2500void MarkCompactCollector::MigrateObject(Address dst,
2501 Address src,
2502 int size,
2503 AllocationSpace dest) {
2504 HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
2505 if (dest == OLD_POINTER_SPACE || dest == LO_SPACE) {
2506 Address src_slot = src;
2507 Address dst_slot = dst;
2508 ASSERT(IsAligned(size, kPointerSize));
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002509
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002510 for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2511 Object* value = Memory::Object_at(src_slot);
2512
2513 Memory::Object_at(dst_slot) = value;
2514
2515 if (heap_->InNewSpace(value)) {
2516 heap_->store_buffer()->Mark(dst_slot);
2517 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2518 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2519 &migration_slots_buffer_,
2520 reinterpret_cast<Object**>(dst_slot),
2521 SlotsBuffer::IGNORE_OVERFLOW);
2522 }
2523
2524 src_slot += kPointerSize;
2525 dst_slot += kPointerSize;
2526 }
2527
2528 if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) {
2529 Address code_entry_slot = dst + JSFunction::kCodeEntryOffset;
2530 Address code_entry = Memory::Address_at(code_entry_slot);
2531
2532 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2533 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2534 &migration_slots_buffer_,
2535 SlotsBuffer::CODE_ENTRY_SLOT,
2536 code_entry_slot,
2537 SlotsBuffer::IGNORE_OVERFLOW);
2538 }
2539 }
2540 } else if (dest == CODE_SPACE) {
2541 PROFILE(heap()->isolate(), CodeMoveEvent(src, dst));
2542 heap()->MoveBlock(dst, src, size);
2543 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2544 &migration_slots_buffer_,
2545 SlotsBuffer::RELOCATED_CODE_OBJECT,
2546 dst,
2547 SlotsBuffer::IGNORE_OVERFLOW);
2548 Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
2549 } else {
2550 ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
2551 heap()->MoveBlock(dst, src, size);
2552 }
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002553 Memory::Address_at(src) = dst;
2554}
2555
2556
2557// Visitor for updating pointers from live objects in old spaces to new space.
2558// It does not expect to encounter pointers to dead objects.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002559class PointersUpdatingVisitor: public ObjectVisitor {
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002560 public:
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002561 explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002562
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002563 void VisitPointer(Object** p) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002564 UpdatePointer(p);
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002565 }
2566
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +00002567 void VisitPointers(Object** start, Object** end) {
2568 for (Object** p = start; p < end; p++) UpdatePointer(p);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002569 }
2570
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +00002571 void VisitEmbeddedPointer(RelocInfo* rinfo) {
2572 ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
2573 Object* target = rinfo->target_object();
rossberg@chromium.org89e18f52012-10-22 13:09:53 +00002574 Object* old_target = target;
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +00002575 VisitPointer(&target);
rossberg@chromium.org89e18f52012-10-22 13:09:53 +00002576 // Avoid unnecessary changes that might unnecessary flush the instruction
2577 // cache.
2578 if (target != old_target) {
2579 rinfo->set_target_object(target);
2580 }
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002581 }
2582
2583 void VisitCodeTarget(RelocInfo* rinfo) {
2584 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2585 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
rossberg@chromium.org89e18f52012-10-22 13:09:53 +00002586 Object* old_target = target;
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002587 VisitPointer(&target);
rossberg@chromium.org89e18f52012-10-22 13:09:53 +00002588 if (target != old_target) {
2589 rinfo->set_target_address(Code::cast(target)->instruction_start());
2590 }
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002591 }
2592
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +00002593 void VisitCodeAgeSequence(RelocInfo* rinfo) {
2594 ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
2595 Object* stub = rinfo->code_age_stub();
2596 ASSERT(stub != NULL);
2597 VisitPointer(&stub);
2598 if (stub != rinfo->code_age_stub()) {
2599 rinfo->set_code_age_stub(Code::cast(stub));
2600 }
2601 }
2602
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002603 void VisitDebugTarget(RelocInfo* rinfo) {
vegorov@chromium.org2356e6f2010-06-09 09:38:56 +00002604 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2605 rinfo->IsPatchedReturnSequence()) ||
2606 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2607 rinfo->IsPatchedDebugBreakSlotSequence()));
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002608 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2609 VisitPointer(&target);
2610 rinfo->set_call_address(Code::cast(target)->instruction_start());
2611 }
jkummerow@chromium.orge297f592011-06-08 10:05:15 +00002612
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002613 static inline void UpdateSlot(Heap* heap, Object** slot) {
2614 Object* obj = *slot;
2615
2616 if (!obj->IsHeapObject()) return;
2617
2618 HeapObject* heap_obj = HeapObject::cast(obj);
2619
2620 MapWord map_word = heap_obj->map_word();
2621 if (map_word.IsForwardingAddress()) {
2622 ASSERT(heap->InFromSpace(heap_obj) ||
2623 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
2624 HeapObject* target = map_word.ToForwardingAddress();
2625 *slot = target;
2626 ASSERT(!heap->InFromSpace(target) &&
2627 !MarkCompactCollector::IsOnEvacuationCandidate(target));
2628 }
2629 }
2630
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002631 private:
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002632 inline void UpdatePointer(Object** p) {
2633 UpdateSlot(heap_, p);
2634 }
2635
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002636 Heap* heap_;
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002637};
2638
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002639
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002640static void UpdatePointer(HeapObject** p, HeapObject* object) {
2641 ASSERT(*p == object);
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002642
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002643 Address old_addr = object->address();
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002644
2645 Address new_addr = Memory::Address_at(old_addr);
2646
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002647 // The new space sweep will overwrite the map word of dead objects
2648 // with NULL. In this case we do not need to transfer this entry to
2649 // the store buffer which we are rebuilding.
2650 if (new_addr != NULL) {
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002651 *p = HeapObject::FromAddress(new_addr);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002652 } else {
2653 // We have to zap this pointer, because the store buffer may overflow later,
2654 // and then we have to scan the entire heap and we don't want to find
2655 // spurious newspace pointers in the old space.
mstarzinger@chromium.org15613d02012-05-23 12:04:37 +00002656 // TODO(mstarzinger): This was changed to a sentinel value to track down
2657 // rare crashes, change it back to Smi::FromInt(0) later.
2658 *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0x0f100d00 >> 1)); // flood
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002659 }
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002660}
2661
2662
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002663static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
2664 Object** p) {
2665 MapWord map_word = HeapObject::cast(*p)->map_word();
2666
2667 if (map_word.IsForwardingAddress()) {
2668 return String::cast(map_word.ToForwardingAddress());
2669 }
2670
2671 return String::cast(*p);
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002672}
2673
2674
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002675bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
2676 int object_size) {
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002677 Object* result;
2678
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00002679 if (object_size > Page::kMaxNonCodeHeapObjectSize) {
lrn@chromium.org303ada72010-10-27 09:33:13 +00002680 MaybeObject* maybe_result =
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002681 heap()->lo_space()->AllocateRaw(object_size, NOT_EXECUTABLE);
lrn@chromium.org303ada72010-10-27 09:33:13 +00002682 if (maybe_result->ToObject(&result)) {
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002683 HeapObject* target = HeapObject::cast(result);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002684 MigrateObject(target->address(),
2685 object->address(),
2686 object_size,
2687 LO_SPACE);
2688 heap()->mark_compact_collector()->tracer()->
erik.corry@gmail.com9dfbea42010-05-21 12:58:28 +00002689 increment_promoted_objects_size(object_size);
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002690 return true;
2691 }
2692 } else {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002693 OldSpace* target_space = heap()->TargetSpace(object);
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002694
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002695 ASSERT(target_space == heap()->old_pointer_space() ||
2696 target_space == heap()->old_data_space());
lrn@chromium.org303ada72010-10-27 09:33:13 +00002697 MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
2698 if (maybe_result->ToObject(&result)) {
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002699 HeapObject* target = HeapObject::cast(result);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002700 MigrateObject(target->address(),
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002701 object->address(),
2702 object_size,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002703 target_space->identity());
2704 heap()->mark_compact_collector()->tracer()->
erik.corry@gmail.com9dfbea42010-05-21 12:58:28 +00002705 increment_promoted_objects_size(object_size);
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002706 return true;
2707 }
2708 }
2709
2710 return false;
2711}
2712
2713
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002714void MarkCompactCollector::EvacuateNewSpace() {
svenpanne@chromium.orgecb9dd62011-12-01 08:22:35 +00002715 // There are soft limits in the allocation code, designed trigger a mark
2716 // sweep collection by failing allocations. But since we are already in
2717 // a mark-sweep allocation, there is no sense in trying to trigger one.
2718 AlwaysAllocateScope scope;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002719 heap()->CheckNewSpaceExpansionCriteria();
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002720
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002721 NewSpace* new_space = heap()->new_space();
2722
2723 // Store allocation range before flipping semispaces.
2724 Address from_bottom = new_space->bottom();
2725 Address from_top = new_space->top();
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002726
2727 // Flip the semispaces. After flipping, to space is empty, from space has
2728 // live objects.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002729 new_space->Flip();
2730 new_space->ResetAllocationInfo();
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002731
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002732 int survivors_size = 0;
2733
2734 // First pass: traverse all objects in inactive semispace, remove marks,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002735 // migrate live objects and write forwarding addresses. This stage puts
2736 // new entries in the store buffer and may cause some pages to be marked
2737 // scan-on-scavenge.
2738 SemiSpaceIterator from_it(from_bottom, from_top);
2739 for (HeapObject* object = from_it.Next();
2740 object != NULL;
2741 object = from_it.Next()) {
2742 MarkBit mark_bit = Marking::MarkBitFrom(object);
2743 if (mark_bit.Get()) {
2744 mark_bit.Clear();
2745 // Don't bother decrementing live bytes count. We'll discard the
2746 // entire page at the end.
2747 int size = object->Size();
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002748 survivors_size += size;
2749
lrn@chromium.orgc34f5802010-04-28 12:53:43 +00002750 // Aggressively promote young survivors to the old space.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002751 if (TryPromoteObject(object, size)) {
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002752 continue;
2753 }
2754
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002755 // Promotion failed. Just migrate object to another semispace.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002756 MaybeObject* allocation = new_space->AllocateRaw(size);
2757 if (allocation->IsFailure()) {
2758 if (!new_space->AddFreshPage()) {
2759 // Shouldn't happen. We are sweeping linearly, and to-space
2760 // has the same number of pages as from-space, so there is
2761 // always room.
2762 UNREACHABLE();
2763 }
2764 allocation = new_space->AllocateRaw(size);
2765 ASSERT(!allocation->IsFailure());
2766 }
2767 Object* target = allocation->ToObjectUnchecked();
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002768
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002769 MigrateObject(HeapObject::cast(target)->address(),
2770 object->address(),
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002771 size,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002772 NEW_SPACE);
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002773 } else {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002774 // Mark dead objects in the new space with null in their map field.
2775 Memory::Address_at(object->address()) = NULL;
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002776 }
2777 }
2778
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002779 heap_->IncrementYoungSurvivorsCounter(survivors_size);
2780 new_space->set_age_mark(new_space->top());
2781}
2782
2783
2784void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
2785 AlwaysAllocateScope always_allocate;
2786 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
2787 ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
2788 MarkBit::CellType* cells = p->markbits()->cells();
2789 p->MarkSweptPrecisely();
2790
2791 int last_cell_index =
2792 Bitmap::IndexToCell(
2793 Bitmap::CellAlignIndex(
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00002794 p->AddressToMarkbitIndex(p->area_end())));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002795
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00002796 Address cell_base = p->area_start();
2797 int cell_index = Bitmap::IndexToCell(
2798 Bitmap::CellAlignIndex(
2799 p->AddressToMarkbitIndex(cell_base)));
2800
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002801 int offsets[16];
2802
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00002803 for (;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002804 cell_index < last_cell_index;
2805 cell_index++, cell_base += 32 * kPointerSize) {
jkummerow@chromium.org59297c72013-01-09 16:32:23 +00002806 ASSERT(static_cast<unsigned>(cell_index) ==
2807 Bitmap::IndexToCell(
2808 Bitmap::CellAlignIndex(
2809 p->AddressToMarkbitIndex(cell_base))));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002810 if (cells[cell_index] == 0) continue;
2811
2812 int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
2813 for (int i = 0; i < live_objects; i++) {
2814 Address object_addr = cell_base + offsets[i] * kPointerSize;
2815 HeapObject* object = HeapObject::FromAddress(object_addr);
2816 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
2817
2818 int size = object->Size();
2819
2820 MaybeObject* target = space->AllocateRaw(size);
2821 if (target->IsFailure()) {
2822 // OS refused to give us memory.
2823 V8::FatalProcessOutOfMemory("Evacuation");
2824 return;
2825 }
2826
2827 Object* target_object = target->ToObjectUnchecked();
2828
2829 MigrateObject(HeapObject::cast(target_object)->address(),
2830 object_addr,
2831 size,
2832 space->identity());
2833 ASSERT(object->map_word().IsForwardingAddress());
2834 }
2835
2836 // Clear marking bits for current cell.
2837 cells[cell_index] = 0;
2838 }
2839 p->ResetLiveBytes();
2840}
2841
2842
2843void MarkCompactCollector::EvacuatePages() {
2844 int npages = evacuation_candidates_.length();
2845 for (int i = 0; i < npages; i++) {
2846 Page* p = evacuation_candidates_[i];
2847 ASSERT(p->IsEvacuationCandidate() ||
2848 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
2849 if (p->IsEvacuationCandidate()) {
2850 // During compaction we might have to request a new page.
2851 // Check that space still have room for that.
2852 if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
2853 EvacuateLiveObjectsFromPage(p);
2854 } else {
2855 // Without room for expansion evacuation is not guaranteed to succeed.
2856 // Pessimistically abandon unevacuated pages.
2857 for (int j = i; j < npages; j++) {
2858 Page* page = evacuation_candidates_[j];
2859 slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
2860 page->ClearEvacuationCandidate();
2861 page->SetFlag(Page::RESCAN_ON_EVACUATION);
2862 }
2863 return;
2864 }
2865 }
2866 }
2867}
2868
2869
2870class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
2871 public:
2872 virtual Object* RetainAs(Object* object) {
2873 if (object->IsHeapObject()) {
2874 HeapObject* heap_object = HeapObject::cast(object);
2875 MapWord map_word = heap_object->map_word();
2876 if (map_word.IsForwardingAddress()) {
2877 return map_word.ToForwardingAddress();
2878 }
2879 }
2880 return object;
2881 }
2882};
2883
2884
2885static inline void UpdateSlot(ObjectVisitor* v,
2886 SlotsBuffer::SlotType slot_type,
2887 Address addr) {
2888 switch (slot_type) {
2889 case SlotsBuffer::CODE_TARGET_SLOT: {
2890 RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
2891 rinfo.Visit(v);
2892 break;
2893 }
2894 case SlotsBuffer::CODE_ENTRY_SLOT: {
2895 v->VisitCodeEntry(addr);
2896 break;
2897 }
2898 case SlotsBuffer::RELOCATED_CODE_OBJECT: {
2899 HeapObject* obj = HeapObject::FromAddress(addr);
2900 Code::cast(obj)->CodeIterateBody(v);
2901 break;
2902 }
2903 case SlotsBuffer::DEBUG_TARGET_SLOT: {
2904 RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
2905 if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(v);
2906 break;
2907 }
2908 case SlotsBuffer::JS_RETURN_SLOT: {
2909 RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
2910 if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(v);
2911 break;
2912 }
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +00002913 case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
2914 RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
2915 rinfo.Visit(v);
2916 break;
2917 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002918 default:
2919 UNREACHABLE();
2920 break;
2921 }
2922}
2923
2924
2925enum SweepingMode {
2926 SWEEP_ONLY,
2927 SWEEP_AND_VISIT_LIVE_OBJECTS
2928};
2929
2930
2931enum SkipListRebuildingMode {
2932 REBUILD_SKIP_LIST,
2933 IGNORE_SKIP_LIST
2934};
2935
2936
2937// Sweep a space precisely. After this has been done the space can
2938// be iterated precisely, hitting only the live objects. Code space
2939// is always swept precisely because we want to be able to iterate
2940// over it. Map space is swept precisely, because it is not compacted.
2941// Slots in live objects pointing into evacuation candidates are updated
2942// if requested.
2943template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode>
2944static void SweepPrecisely(PagedSpace* space,
2945 Page* p,
2946 ObjectVisitor* v) {
2947 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
2948 ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
2949 space->identity() == CODE_SPACE);
2950 ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
2951
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00002952 double start_time = 0.0;
2953 if (FLAG_print_cumulative_gc_stat) {
2954 start_time = OS::TimeCurrentMillis();
2955 }
2956
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002957 MarkBit::CellType* cells = p->markbits()->cells();
2958 p->MarkSweptPrecisely();
2959
2960 int last_cell_index =
2961 Bitmap::IndexToCell(
2962 Bitmap::CellAlignIndex(
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00002963 p->AddressToMarkbitIndex(p->area_end())));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002964
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00002965 Address free_start = p->area_start();
2966 int cell_index =
2967 Bitmap::IndexToCell(
2968 Bitmap::CellAlignIndex(
2969 p->AddressToMarkbitIndex(free_start)));
2970
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002971 ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00002972 Address object_address = free_start;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002973 int offsets[16];
2974
2975 SkipList* skip_list = p->skip_list();
2976 int curr_region = -1;
2977 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
2978 skip_list->Clear();
2979 }
2980
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00002981 for (;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002982 cell_index < last_cell_index;
2983 cell_index++, object_address += 32 * kPointerSize) {
jkummerow@chromium.org59297c72013-01-09 16:32:23 +00002984 ASSERT(static_cast<unsigned>(cell_index) ==
2985 Bitmap::IndexToCell(
2986 Bitmap::CellAlignIndex(
2987 p->AddressToMarkbitIndex(object_address))));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002988 int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets);
2989 int live_index = 0;
2990 for ( ; live_objects != 0; live_objects--) {
2991 Address free_end = object_address + offsets[live_index++] * kPointerSize;
2992 if (free_end != free_start) {
2993 space->Free(free_start, static_cast<int>(free_end - free_start));
2994 }
2995 HeapObject* live_object = HeapObject::FromAddress(free_end);
2996 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
2997 Map* map = live_object->map();
2998 int size = live_object->SizeFromMap(map);
2999 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3000 live_object->IterateBody(map->instance_type(), size, v);
3001 }
3002 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3003 int new_region_start =
3004 SkipList::RegionNumber(free_end);
3005 int new_region_end =
3006 SkipList::RegionNumber(free_end + size - kPointerSize);
3007 if (new_region_start != curr_region ||
3008 new_region_end != curr_region) {
3009 skip_list->AddObject(free_end, size);
3010 curr_region = new_region_end;
3011 }
3012 }
3013 free_start = free_end + size;
3014 }
3015 // Clear marking bits for current cell.
3016 cells[cell_index] = 0;
3017 }
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00003018 if (free_start != p->area_end()) {
3019 space->Free(free_start, static_cast<int>(p->area_end() - free_start));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003020 }
3021 p->ResetLiveBytes();
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003022 if (FLAG_print_cumulative_gc_stat) {
3023 space->heap()->AddSweepingTime(OS::TimeCurrentMillis() - start_time);
3024 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003025}
3026
3027
3028static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
3029 Page* p = Page::FromAddress(code->address());
3030
3031 if (p->IsEvacuationCandidate() ||
3032 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3033 return false;
3034 }
3035
3036 Address code_start = code->address();
3037 Address code_end = code_start + code->Size();
3038
3039 uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
3040 uint32_t end_index =
3041 MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
3042
3043 Bitmap* b = p->markbits();
3044
3045 MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
3046 MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
3047
3048 MarkBit::CellType* start_cell = start_mark_bit.cell();
3049 MarkBit::CellType* end_cell = end_mark_bit.cell();
3050
3051 if (value) {
3052 MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
3053 MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
3054
3055 if (start_cell == end_cell) {
3056 *start_cell |= start_mask & end_mask;
3057 } else {
3058 *start_cell |= start_mask;
3059 for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
3060 *cell = ~0;
3061 }
3062 *end_cell |= end_mask;
3063 }
3064 } else {
3065 for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
3066 *cell = 0;
3067 }
3068 }
3069
3070 return true;
3071}
3072
3073
3074static bool IsOnInvalidatedCodeObject(Address addr) {
3075 // We did not record any slots in large objects thus
3076 // we can safely go to the page from the slot address.
3077 Page* p = Page::FromAddress(addr);
3078
3079 // First check owner's identity because old pointer and old data spaces
3080 // are swept lazily and might still have non-zero mark-bits on some
3081 // pages.
3082 if (p->owner()->identity() != CODE_SPACE) return false;
3083
3084 // In code space only bits on evacuation candidates (but we don't record
3085 // any slots on them) and under invalidated code objects are non-zero.
3086 MarkBit mark_bit =
3087 p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
3088
3089 return mark_bit.Get();
3090}
3091
3092
3093void MarkCompactCollector::InvalidateCode(Code* code) {
3094 if (heap_->incremental_marking()->IsCompacting() &&
3095 !ShouldSkipEvacuationSlotRecording(code)) {
3096 ASSERT(compacting_);
3097
3098 // If the object is white than no slots were recorded on it yet.
3099 MarkBit mark_bit = Marking::MarkBitFrom(code);
3100 if (Marking::IsWhite(mark_bit)) return;
3101
3102 invalidated_code_.Add(code);
3103 }
3104}
3105
3106
3107bool MarkCompactCollector::MarkInvalidatedCode() {
3108 bool code_marked = false;
3109
3110 int length = invalidated_code_.length();
3111 for (int i = 0; i < length; i++) {
3112 Code* code = invalidated_code_[i];
3113
3114 if (SetMarkBitsUnderInvalidatedCode(code, true)) {
3115 code_marked = true;
3116 }
3117 }
3118
3119 return code_marked;
3120}
3121
3122
3123void MarkCompactCollector::RemoveDeadInvalidatedCode() {
3124 int length = invalidated_code_.length();
3125 for (int i = 0; i < length; i++) {
3126 if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
3127 }
3128}
3129
3130
3131void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
3132 int length = invalidated_code_.length();
3133 for (int i = 0; i < length; i++) {
3134 Code* code = invalidated_code_[i];
3135 if (code != NULL) {
3136 code->Iterate(visitor);
3137 SetMarkBitsUnderInvalidatedCode(code, false);
3138 }
3139 }
3140 invalidated_code_.Rewind(0);
3141}
3142
3143
3144void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
yangguo@chromium.org304cc332012-07-24 07:59:48 +00003145 Heap::RelocationLock relocation_lock(heap());
3146
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003147 bool code_slots_filtering_required;
3148 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
3149 code_slots_filtering_required = MarkInvalidatedCode();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003150
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003151 EvacuateNewSpace();
3152 }
3153
3154
3155 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
3156 EvacuatePages();
3157 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003158
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00003159 // Second pass: find pointers to new space and update them.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003160 PointersUpdatingVisitor updating_visitor(heap());
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00003161
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003162 { GCTracer::Scope gc_scope(tracer_,
3163 GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
3164 // Update pointers in to space.
3165 SemiSpaceIterator to_it(heap()->new_space()->bottom(),
3166 heap()->new_space()->top());
3167 for (HeapObject* object = to_it.Next();
3168 object != NULL;
3169 object = to_it.Next()) {
3170 Map* map = object->map();
3171 object->IterateBody(map->instance_type(),
3172 object->SizeFromMap(map),
3173 &updating_visitor);
3174 }
lrn@chromium.org25156de2010-04-06 13:10:27 +00003175 }
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00003176
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003177 { GCTracer::Scope gc_scope(tracer_,
3178 GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
3179 // Update roots.
3180 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003181 }
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00003182
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003183 { GCTracer::Scope gc_scope(tracer_,
3184 GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003185 StoreBufferRebuildScope scope(heap_,
3186 heap_->store_buffer(),
3187 &Heap::ScavengeStoreBufferCallback);
3188 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
3189 }
ricow@chromium.org30ce4112010-05-31 10:38:25 +00003190
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003191 { GCTracer::Scope gc_scope(tracer_,
3192 GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
3193 SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3194 migration_slots_buffer_,
3195 code_slots_filtering_required);
3196 if (FLAG_trace_fragmentation) {
3197 PrintF(" migration slots buffer: %d\n",
3198 SlotsBuffer::SizeOfChain(migration_slots_buffer_));
3199 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003200
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003201 if (compacting_ && was_marked_incrementally_) {
3202 // It's difficult to filter out slots recorded for large objects.
3203 LargeObjectIterator it(heap_->lo_space());
3204 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3205 // LargeObjectSpace is not swept yet thus we have to skip
3206 // dead objects explicitly.
3207 if (!IsMarked(obj)) continue;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003208
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003209 Page* p = Page::FromAddress(obj->address());
3210 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3211 obj->Iterate(&updating_visitor);
3212 p->ClearFlag(Page::RESCAN_ON_EVACUATION);
3213 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003214 }
3215 }
3216 }
3217
3218 int npages = evacuation_candidates_.length();
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003219 { GCTracer::Scope gc_scope(
3220 tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
3221 for (int i = 0; i < npages; i++) {
3222 Page* p = evacuation_candidates_[i];
3223 ASSERT(p->IsEvacuationCandidate() ||
3224 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003225
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003226 if (p->IsEvacuationCandidate()) {
3227 SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3228 p->slots_buffer(),
3229 code_slots_filtering_required);
3230 if (FLAG_trace_fragmentation) {
3231 PrintF(" page %p slots buffer: %d\n",
3232 reinterpret_cast<void*>(p),
3233 SlotsBuffer::SizeOfChain(p->slots_buffer()));
3234 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003235
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003236 // Important: skip list should be cleared only after roots were updated
3237 // because root iteration traverses the stack and might have to find
3238 // code objects from non-updated pc pointing into evacuation candidate.
3239 SkipList* list = p->skip_list();
3240 if (list != NULL) list->Clear();
3241 } else {
3242 if (FLAG_gc_verbose) {
3243 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3244 reinterpret_cast<intptr_t>(p));
3245 }
3246 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3247 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003248
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003249 switch (space->identity()) {
3250 case OLD_DATA_SPACE:
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003251 SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003252 break;
3253 case OLD_POINTER_SPACE:
3254 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
3255 space, p, &updating_visitor);
3256 break;
3257 case CODE_SPACE:
3258 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
3259 space, p, &updating_visitor);
3260 break;
3261 default:
3262 UNREACHABLE();
3263 break;
3264 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003265 }
3266 }
3267 }
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00003268
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003269 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
3270
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00003271 // Update pointers from cells.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003272 HeapObjectIterator cell_iterator(heap_->cell_space());
3273 for (HeapObject* cell = cell_iterator.Next();
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00003274 cell != NULL;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003275 cell = cell_iterator.Next()) {
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00003276 if (cell->IsJSGlobalPropertyCell()) {
3277 Address value_address =
3278 reinterpret_cast<Address>(cell) +
3279 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
3280 updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
3281 }
3282 }
3283
yangguo@chromium.org46839fb2012-08-28 09:06:19 +00003284 // Update pointer from the native contexts list.
3285 updating_visitor.VisitPointer(heap_->native_contexts_list_address());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003286
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00003287 heap_->string_table()->Iterate(&updating_visitor);
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00003288
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00003289 // Update pointers from external string table.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003290 heap_->UpdateReferencesInExternalStringTable(
3291 &UpdateReferenceInExternalStringTableEntry);
ager@chromium.org9ee27ae2011-03-02 13:43:26 +00003292
yangguo@chromium.org46a2a512013-01-18 16:29:40 +00003293 // Update pointers in the new error object list.
3294 heap_->error_object_list()->UpdateReferences();
3295
yangguo@chromium.org78d1ad42012-02-09 13:53:47 +00003296 if (!FLAG_watch_ic_patching) {
3297 // Update JSFunction pointers from the runtime profiler.
3298 heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
3299 &updating_visitor);
3300 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003301
3302 EvacuationWeakObjectRetainer evacuation_object_retainer;
3303 heap()->ProcessWeakReferences(&evacuation_object_retainer);
3304
3305 // Visit invalidated code (we ignored all slots on it) and clear mark-bits
3306 // under it.
3307 ProcessInvalidatedCode(&updating_visitor);
3308
mstarzinger@chromium.org88d326b2012-04-23 12:57:22 +00003309 heap_->isolate()->inner_pointer_to_code_cache()->Flush();
3310
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00003311#ifdef VERIFY_HEAP
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003312 if (FLAG_verify_heap) {
3313 VerifyEvacuation(heap_);
3314 }
3315#endif
3316
3317 slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
3318 ASSERT(migration_slots_buffer_ == NULL);
ulan@chromium.org2e04b582013-02-21 14:06:02 +00003319}
3320
3321
3322void MarkCompactCollector::ReleaseEvacuationCandidates() {
3323 int npages = evacuation_candidates_.length();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003324 for (int i = 0; i < npages; i++) {
3325 Page* p = evacuation_candidates_[i];
3326 if (!p->IsEvacuationCandidate()) continue;
3327 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00003328 space->Free(p->area_start(), p->area_size());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003329 p->set_scan_on_scavenge(false);
3330 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
rossberg@chromium.org994edf62012-02-06 10:12:55 +00003331 p->ResetLiveBytes();
3332 space->ReleasePage(p);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003333 }
3334 evacuation_candidates_.Rewind(0);
3335 compacting_ = false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003336}
3337
3338
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003339static const int kStartTableEntriesPerLine = 5;
3340static const int kStartTableLines = 171;
3341static const int kStartTableInvalidLine = 127;
3342static const int kStartTableUnusedEntry = 126;
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00003343
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003344#define _ kStartTableUnusedEntry
3345#define X kStartTableInvalidLine
3346// Mark-bit to object start offset table.
3347//
3348// The line is indexed by the mark bits in a byte. The first number on
3349// the line describes the number of live object starts for the line and the
3350// other numbers on the line describe the offsets (in words) of the object
3351// starts.
3352//
3353// Since objects are at least 2 words large we don't have entries for two
3354// consecutive 1 bits. All entries after 170 have at least 2 consecutive bits.
3355char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
3356 0, _, _, _, _, // 0
3357 1, 0, _, _, _, // 1
3358 1, 1, _, _, _, // 2
3359 X, _, _, _, _, // 3
3360 1, 2, _, _, _, // 4
3361 2, 0, 2, _, _, // 5
3362 X, _, _, _, _, // 6
3363 X, _, _, _, _, // 7
3364 1, 3, _, _, _, // 8
3365 2, 0, 3, _, _, // 9
3366 2, 1, 3, _, _, // 10
3367 X, _, _, _, _, // 11
3368 X, _, _, _, _, // 12
3369 X, _, _, _, _, // 13
3370 X, _, _, _, _, // 14
3371 X, _, _, _, _, // 15
3372 1, 4, _, _, _, // 16
3373 2, 0, 4, _, _, // 17
3374 2, 1, 4, _, _, // 18
3375 X, _, _, _, _, // 19
3376 2, 2, 4, _, _, // 20
3377 3, 0, 2, 4, _, // 21
3378 X, _, _, _, _, // 22
3379 X, _, _, _, _, // 23
3380 X, _, _, _, _, // 24
3381 X, _, _, _, _, // 25
3382 X, _, _, _, _, // 26
3383 X, _, _, _, _, // 27
3384 X, _, _, _, _, // 28
3385 X, _, _, _, _, // 29
3386 X, _, _, _, _, // 30
3387 X, _, _, _, _, // 31
3388 1, 5, _, _, _, // 32
3389 2, 0, 5, _, _, // 33
3390 2, 1, 5, _, _, // 34
3391 X, _, _, _, _, // 35
3392 2, 2, 5, _, _, // 36
3393 3, 0, 2, 5, _, // 37
3394 X, _, _, _, _, // 38
3395 X, _, _, _, _, // 39
3396 2, 3, 5, _, _, // 40
3397 3, 0, 3, 5, _, // 41
3398 3, 1, 3, 5, _, // 42
3399 X, _, _, _, _, // 43
3400 X, _, _, _, _, // 44
3401 X, _, _, _, _, // 45
3402 X, _, _, _, _, // 46
3403 X, _, _, _, _, // 47
3404 X, _, _, _, _, // 48
3405 X, _, _, _, _, // 49
3406 X, _, _, _, _, // 50
3407 X, _, _, _, _, // 51
3408 X, _, _, _, _, // 52
3409 X, _, _, _, _, // 53
3410 X, _, _, _, _, // 54
3411 X, _, _, _, _, // 55
3412 X, _, _, _, _, // 56
3413 X, _, _, _, _, // 57
3414 X, _, _, _, _, // 58
3415 X, _, _, _, _, // 59
3416 X, _, _, _, _, // 60
3417 X, _, _, _, _, // 61
3418 X, _, _, _, _, // 62
3419 X, _, _, _, _, // 63
3420 1, 6, _, _, _, // 64
3421 2, 0, 6, _, _, // 65
3422 2, 1, 6, _, _, // 66
3423 X, _, _, _, _, // 67
3424 2, 2, 6, _, _, // 68
3425 3, 0, 2, 6, _, // 69
3426 X, _, _, _, _, // 70
3427 X, _, _, _, _, // 71
3428 2, 3, 6, _, _, // 72
3429 3, 0, 3, 6, _, // 73
3430 3, 1, 3, 6, _, // 74
3431 X, _, _, _, _, // 75
3432 X, _, _, _, _, // 76
3433 X, _, _, _, _, // 77
3434 X, _, _, _, _, // 78
3435 X, _, _, _, _, // 79
3436 2, 4, 6, _, _, // 80
3437 3, 0, 4, 6, _, // 81
3438 3, 1, 4, 6, _, // 82
3439 X, _, _, _, _, // 83
3440 3, 2, 4, 6, _, // 84
3441 4, 0, 2, 4, 6, // 85
3442 X, _, _, _, _, // 86
3443 X, _, _, _, _, // 87
3444 X, _, _, _, _, // 88
3445 X, _, _, _, _, // 89
3446 X, _, _, _, _, // 90
3447 X, _, _, _, _, // 91
3448 X, _, _, _, _, // 92
3449 X, _, _, _, _, // 93
3450 X, _, _, _, _, // 94
3451 X, _, _, _, _, // 95
3452 X, _, _, _, _, // 96
3453 X, _, _, _, _, // 97
3454 X, _, _, _, _, // 98
3455 X, _, _, _, _, // 99
3456 X, _, _, _, _, // 100
3457 X, _, _, _, _, // 101
3458 X, _, _, _, _, // 102
3459 X, _, _, _, _, // 103
3460 X, _, _, _, _, // 104
3461 X, _, _, _, _, // 105
3462 X, _, _, _, _, // 106
3463 X, _, _, _, _, // 107
3464 X, _, _, _, _, // 108
3465 X, _, _, _, _, // 109
3466 X, _, _, _, _, // 110
3467 X, _, _, _, _, // 111
3468 X, _, _, _, _, // 112
3469 X, _, _, _, _, // 113
3470 X, _, _, _, _, // 114
3471 X, _, _, _, _, // 115
3472 X, _, _, _, _, // 116
3473 X, _, _, _, _, // 117
3474 X, _, _, _, _, // 118
3475 X, _, _, _, _, // 119
3476 X, _, _, _, _, // 120
3477 X, _, _, _, _, // 121
3478 X, _, _, _, _, // 122
3479 X, _, _, _, _, // 123
3480 X, _, _, _, _, // 124
3481 X, _, _, _, _, // 125
3482 X, _, _, _, _, // 126
3483 X, _, _, _, _, // 127
3484 1, 7, _, _, _, // 128
3485 2, 0, 7, _, _, // 129
3486 2, 1, 7, _, _, // 130
3487 X, _, _, _, _, // 131
3488 2, 2, 7, _, _, // 132
3489 3, 0, 2, 7, _, // 133
3490 X, _, _, _, _, // 134
3491 X, _, _, _, _, // 135
3492 2, 3, 7, _, _, // 136
3493 3, 0, 3, 7, _, // 137
3494 3, 1, 3, 7, _, // 138
3495 X, _, _, _, _, // 139
3496 X, _, _, _, _, // 140
3497 X, _, _, _, _, // 141
3498 X, _, _, _, _, // 142
3499 X, _, _, _, _, // 143
3500 2, 4, 7, _, _, // 144
3501 3, 0, 4, 7, _, // 145
3502 3, 1, 4, 7, _, // 146
3503 X, _, _, _, _, // 147
3504 3, 2, 4, 7, _, // 148
3505 4, 0, 2, 4, 7, // 149
3506 X, _, _, _, _, // 150
3507 X, _, _, _, _, // 151
3508 X, _, _, _, _, // 152
3509 X, _, _, _, _, // 153
3510 X, _, _, _, _, // 154
3511 X, _, _, _, _, // 155
3512 X, _, _, _, _, // 156
3513 X, _, _, _, _, // 157
3514 X, _, _, _, _, // 158
3515 X, _, _, _, _, // 159
3516 2, 5, 7, _, _, // 160
3517 3, 0, 5, 7, _, // 161
3518 3, 1, 5, 7, _, // 162
3519 X, _, _, _, _, // 163
3520 3, 2, 5, 7, _, // 164
3521 4, 0, 2, 5, 7, // 165
3522 X, _, _, _, _, // 166
3523 X, _, _, _, _, // 167
3524 3, 3, 5, 7, _, // 168
3525 4, 0, 3, 5, 7, // 169
3526 4, 1, 3, 5, 7 // 170
3527};
3528#undef _
3529#undef X
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00003530
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00003531
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003532// Takes a word of mark bits. Returns the number of objects that start in the
3533// range. Puts the offsets of the words in the supplied array.
3534static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
3535 int objects = 0;
3536 int offset = 0;
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00003537
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003538 // No consecutive 1 bits.
3539 ASSERT((mark_bits & 0x180) != 0x180);
3540 ASSERT((mark_bits & 0x18000) != 0x18000);
3541 ASSERT((mark_bits & 0x1800000) != 0x1800000);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00003542
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003543 while (mark_bits != 0) {
3544 int byte = (mark_bits & 0xff);
3545 mark_bits >>= 8;
3546 if (byte != 0) {
3547 ASSERT(byte < kStartTableLines); // No consecutive 1 bits.
3548 char* table = kStartTable + byte * kStartTableEntriesPerLine;
3549 int objects_in_these_8_words = table[0];
3550 ASSERT(objects_in_these_8_words != kStartTableInvalidLine);
3551 ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine);
3552 for (int i = 0; i < objects_in_these_8_words; i++) {
3553 starts[objects++] = offset + table[1 + i];
3554 }
3555 }
3556 offset += 8;
3557 }
3558 return objects;
3559}
3560
3561
3562static inline Address DigestFreeStart(Address approximate_free_start,
3563 uint32_t free_start_cell) {
3564 ASSERT(free_start_cell != 0);
3565
3566 // No consecutive 1 bits.
3567 ASSERT((free_start_cell & (free_start_cell << 1)) == 0);
3568
3569 int offsets[16];
3570 uint32_t cell = free_start_cell;
3571 int offset_of_last_live;
3572 if ((cell & 0x80000000u) != 0) {
3573 // This case would overflow below.
3574 offset_of_last_live = 31;
3575 } else {
3576 // Remove all but one bit, the most significant. This is an optimization
3577 // that may or may not be worthwhile.
3578 cell |= cell >> 16;
3579 cell |= cell >> 8;
3580 cell |= cell >> 4;
3581 cell |= cell >> 2;
3582 cell |= cell >> 1;
3583 cell = (cell + 1) >> 1;
3584 int live_objects = MarkWordToObjectStarts(cell, offsets);
3585 ASSERT(live_objects == 1);
3586 offset_of_last_live = offsets[live_objects - 1];
3587 }
3588 Address last_live_start =
3589 approximate_free_start + offset_of_last_live * kPointerSize;
3590 HeapObject* last_live = HeapObject::FromAddress(last_live_start);
3591 Address free_start = last_live_start + last_live->Size();
3592 return free_start;
3593}
3594
3595
3596static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
3597 ASSERT(cell != 0);
3598
3599 // No consecutive 1 bits.
3600 ASSERT((cell & (cell << 1)) == 0);
3601
3602 int offsets[16];
3603 if (cell == 0x80000000u) { // Avoid overflow below.
3604 return block_address + 31 * kPointerSize;
3605 }
3606 uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
3607 ASSERT((first_set_bit & cell) == first_set_bit);
3608 int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
3609 ASSERT(live_objects == 1);
3610 USE(live_objects);
3611 return block_address + offsets[0] * kPointerSize;
3612}
3613
3614
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003615template<MarkCompactCollector::SweepingParallelism mode>
3616static intptr_t Free(PagedSpace* space,
3617 FreeList* free_list,
3618 Address start,
3619 int size) {
3620 if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) {
3621 return space->Free(start, size);
3622 } else {
3623 return size - free_list->Free(start, size);
3624 }
3625}
3626
3627
3628// Force instantiation of templatized SweepConservatively method for
3629// SWEEP_SEQUENTIALLY mode.
3630template intptr_t MarkCompactCollector::
3631 SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
3632 PagedSpace*, FreeList*, Page*);
3633
3634
3635// Force instantiation of templatized SweepConservatively method for
3636// SWEEP_IN_PARALLEL mode.
3637template intptr_t MarkCompactCollector::
3638 SweepConservatively<MarkCompactCollector::SWEEP_IN_PARALLEL>(
3639 PagedSpace*, FreeList*, Page*);
3640
3641
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003642// Sweeps a space conservatively. After this has been done the larger free
3643// spaces have been put on the free list and the smaller ones have been
3644// ignored and left untouched. A free space is always either ignored or put
3645// on the free list, never split up into two parts. This is important
3646// because it means that any FreeSpace maps left actually describe a region of
3647// memory that can be ignored when scanning. Dead objects other than free
3648// spaces will not contain the free space map.
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003649template<MarkCompactCollector::SweepingParallelism mode>
3650intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
3651 FreeList* free_list,
3652 Page* p) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003653 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003654 ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
3655 free_list != NULL) ||
3656 (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
3657 free_list == NULL));
3658
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003659 MarkBit::CellType* cells = p->markbits()->cells();
3660 p->MarkSweptConservatively();
3661
3662 int last_cell_index =
3663 Bitmap::IndexToCell(
3664 Bitmap::CellAlignIndex(
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00003665 p->AddressToMarkbitIndex(p->area_end())));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003666
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00003667 int cell_index =
3668 Bitmap::IndexToCell(
3669 Bitmap::CellAlignIndex(
3670 p->AddressToMarkbitIndex(p->area_start())));
3671
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003672 intptr_t freed_bytes = 0;
3673
3674 // This is the start of the 32 word block that we are currently looking at.
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00003675 Address block_address = p->area_start();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003676
3677 // Skip over all the dead objects at the start of the page and mark them free.
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00003678 for (;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003679 cell_index < last_cell_index;
3680 cell_index++, block_address += 32 * kPointerSize) {
3681 if (cells[cell_index] != 0) break;
3682 }
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00003683 size_t size = block_address - p->area_start();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003684 if (cell_index == last_cell_index) {
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003685 freed_bytes += Free<mode>(space, free_list, p->area_start(),
3686 static_cast<int>(size));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003687 ASSERT_EQ(0, p->LiveBytes());
3688 return freed_bytes;
3689 }
3690 // Grow the size of the start-of-page free space a little to get up to the
3691 // first live object.
3692 Address free_end = StartOfLiveObject(block_address, cells[cell_index]);
3693 // Free the first free space.
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00003694 size = free_end - p->area_start();
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003695 freed_bytes += Free<mode>(space, free_list, p->area_start(),
3696 static_cast<int>(size));
3697
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003698 // The start of the current free area is represented in undigested form by
3699 // the address of the last 32-word section that contained a live object and
3700 // the marking bitmap for that cell, which describes where the live object
3701 // started. Unless we find a large free space in the bitmap we will not
3702 // digest this pair into a real address. We start the iteration here at the
3703 // first word in the marking bit map that indicates a live object.
3704 Address free_start = block_address;
3705 uint32_t free_start_cell = cells[cell_index];
3706
3707 for ( ;
3708 cell_index < last_cell_index;
3709 cell_index++, block_address += 32 * kPointerSize) {
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003710 ASSERT((unsigned)cell_index ==
3711 Bitmap::IndexToCell(
3712 Bitmap::CellAlignIndex(
3713 p->AddressToMarkbitIndex(block_address))));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003714 uint32_t cell = cells[cell_index];
3715 if (cell != 0) {
3716 // We have a live object. Check approximately whether it is more than 32
3717 // words since the last live object.
3718 if (block_address - free_start > 32 * kPointerSize) {
3719 free_start = DigestFreeStart(free_start, free_start_cell);
3720 if (block_address - free_start > 32 * kPointerSize) {
3721 // Now that we know the exact start of the free space it still looks
3722 // like we have a large enough free space to be worth bothering with.
3723 // so now we need to find the start of the first live object at the
3724 // end of the free space.
3725 free_end = StartOfLiveObject(block_address, cell);
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003726 freed_bytes += Free<mode>(space, free_list, free_start,
3727 static_cast<int>(free_end - free_start));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003728 }
3729 }
3730 // Update our undigested record of where the current free area started.
3731 free_start = block_address;
3732 free_start_cell = cell;
3733 // Clear marking bits for current cell.
3734 cells[cell_index] = 0;
3735 }
3736 }
3737
3738 // Handle the free space at the end of the page.
3739 if (block_address - free_start > 32 * kPointerSize) {
3740 free_start = DigestFreeStart(free_start, free_start_cell);
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003741 freed_bytes += Free<mode>(space, free_list, free_start,
3742 static_cast<int>(block_address - free_start));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003743 }
3744
3745 p->ResetLiveBytes();
3746 return freed_bytes;
3747}
3748
3749
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003750void MarkCompactCollector::SweepInParallel(PagedSpace* space,
3751 FreeList* private_free_list,
3752 FreeList* free_list) {
3753 PageIterator it(space);
3754 while (it.has_next()) {
3755 Page* p = it.next();
3756
3757 if (p->TryParallelSweeping()) {
3758 SweepConservatively<SWEEP_IN_PARALLEL>(space, private_free_list, p);
3759 free_list->Concatenate(private_free_list);
3760 }
3761 }
3762}
3763
3764
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003765void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003766 space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
hpayer@chromium.org7c3372b2013-02-13 17:26:04 +00003767 sweeper == LAZY_CONSERVATIVE ||
ulan@chromium.org2e04b582013-02-21 14:06:02 +00003768 sweeper == PARALLEL_CONSERVATIVE ||
3769 sweeper == CONCURRENT_CONSERVATIVE);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003770 space->ClearStats();
3771
3772 PageIterator it(space);
3773
3774 intptr_t freed_bytes = 0;
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003775 int pages_swept = 0;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003776 bool lazy_sweeping_active = false;
3777 bool unused_page_present = false;
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00003778
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003779 while (it.has_next()) {
3780 Page* p = it.next();
3781
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003782 ASSERT(p->parallel_sweeping() == 0);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003783 // Clear sweeping flags indicating that marking bits are still intact.
3784 p->ClearSweptPrecisely();
3785 p->ClearSweptConservatively();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003786
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003787 if (p->IsEvacuationCandidate()) {
3788 ASSERT(evacuation_candidates_.length() > 0);
3789 continue;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003790 }
3791
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003792 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3793 // Will be processed in EvacuateNewSpaceAndCandidates.
3794 continue;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003795 }
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00003796
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003797 // One unused page is kept, all further are released before sweeping them.
3798 if (p->LiveBytes() == 0) {
3799 if (unused_page_present) {
3800 if (FLAG_gc_verbose) {
3801 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
3802 reinterpret_cast<intptr_t>(p));
3803 }
ulan@chromium.org2efb9002012-01-19 15:36:35 +00003804 // Adjust unswept free bytes because releasing a page expects said
3805 // counter to be accurate for unswept pages.
3806 space->IncreaseUnsweptFreeBytes(p);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003807 space->ReleasePage(p);
3808 continue;
3809 }
3810 unused_page_present = true;
3811 }
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00003812
ricow@chromium.org7ad65222011-12-19 12:13:11 +00003813 if (lazy_sweeping_active) {
3814 if (FLAG_gc_verbose) {
3815 PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
3816 reinterpret_cast<intptr_t>(p));
3817 }
ulan@chromium.org2efb9002012-01-19 15:36:35 +00003818 space->IncreaseUnsweptFreeBytes(p);
ricow@chromium.org7ad65222011-12-19 12:13:11 +00003819 continue;
3820 }
3821
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003822 switch (sweeper) {
3823 case CONSERVATIVE: {
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003824 if (FLAG_gc_verbose) {
3825 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
3826 reinterpret_cast<intptr_t>(p));
3827 }
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003828 SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003829 pages_swept++;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003830 break;
3831 }
3832 case LAZY_CONSERVATIVE: {
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003833 if (FLAG_gc_verbose) {
3834 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n",
3835 reinterpret_cast<intptr_t>(p));
3836 }
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003837 freed_bytes += SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003838 pages_swept++;
svenpanne@chromium.org83130cf2012-11-30 10:13:25 +00003839 space->SetPagesToSweep(p->next_page());
3840 lazy_sweeping_active = true;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003841 break;
3842 }
ulan@chromium.org2e04b582013-02-21 14:06:02 +00003843 case CONCURRENT_CONSERVATIVE:
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003844 case PARALLEL_CONSERVATIVE: {
3845 if (FLAG_gc_verbose) {
3846 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
3847 reinterpret_cast<intptr_t>(p));
3848 }
3849 p->set_parallel_sweeping(1);
hpayer@chromium.org8432c912013-02-28 15:55:26 +00003850 space->IncreaseUnsweptFreeBytes(p);
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003851 break;
3852 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003853 case PRECISE: {
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003854 if (FLAG_gc_verbose) {
3855 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
3856 reinterpret_cast<intptr_t>(p));
3857 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003858 if (space->identity() == CODE_SPACE) {
3859 SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL);
3860 } else {
3861 SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL);
3862 }
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003863 pages_swept++;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003864 break;
3865 }
3866 default: {
3867 UNREACHABLE();
3868 }
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00003869 }
3870 }
3871
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003872 if (FLAG_gc_verbose) {
3873 PrintF("SweepSpace: %s (%d pages swept)\n",
3874 AllocationSpaceName(space->identity()),
3875 pages_swept);
3876 }
3877
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003878 // Give pages that are queued to be freed back to the OS.
3879 heap()->FreeQueuedChunks();
3880}
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00003881
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00003882
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003883void MarkCompactCollector::SweepSpaces() {
erik.corry@gmail.com9dfbea42010-05-21 12:58:28 +00003884 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003885#ifdef DEBUG
3886 state_ = SWEEP_SPACES;
3887#endif
3888 SweeperType how_to_sweep =
3889 FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
ulan@chromium.org2e04b582013-02-21 14:06:02 +00003890 if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
3891 if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +00003892 if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003893 if (sweep_precisely_) how_to_sweep = PRECISE;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003894 // Noncompacting collections simply sweep the spaces to clear the mark
3895 // bits and free the nonlive blocks (for old and map spaces). We sweep
3896 // the map space last because freeing non-live maps overwrites them and
3897 // the other spaces rely on possibly non-live maps to get the sizes for
3898 // non-live objects.
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003899
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003900 SweepSpace(heap()->old_pointer_space(), how_to_sweep);
3901 SweepSpace(heap()->old_data_space(), how_to_sweep);
3902
ulan@chromium.org2e04b582013-02-21 14:06:02 +00003903 if (how_to_sweep == PARALLEL_CONSERVATIVE ||
3904 how_to_sweep == CONCURRENT_CONSERVATIVE) {
hpayer@chromium.org7c3372b2013-02-13 17:26:04 +00003905 // TODO(hpayer): fix race with concurrent sweeper
3906 StartSweeperThreads();
ulan@chromium.org2e04b582013-02-21 14:06:02 +00003907 }
3908
3909 if (how_to_sweep == PARALLEL_CONSERVATIVE) {
3910 WaitUntilSweepingCompleted();
hpayer@chromium.org7c3372b2013-02-13 17:26:04 +00003911 }
3912
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003913 RemoveDeadInvalidatedCode();
3914 SweepSpace(heap()->code_space(), PRECISE);
3915
3916 SweepSpace(heap()->cell_space(), PRECISE);
3917
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003918 EvacuateNewSpaceAndCandidates();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003919
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003920 // ClearNonLiveTransitions depends on precise sweeping of map space to
3921 // detect whether unmarked map became dead in this collection or in one
3922 // of the previous ones.
3923 SweepSpace(heap()->map_space(), PRECISE);
mads.s.agercbaa0602008-08-14 13:41:48 +00003924
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003925 // Deallocate unmarked objects and clear marked bits for marked objects.
3926 heap_->lo_space()->FreeUnmarkedObjects();
ulan@chromium.org2e04b582013-02-21 14:06:02 +00003927
3928 if (how_to_sweep != CONCURRENT_CONSERVATIVE) {
3929 FinalizeSweeping();
3930 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003931}
3932
3933
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00003934void MarkCompactCollector::EnableCodeFlushing(bool enable) {
verwaest@chromium.orge4ee6de2012-11-06 12:13:00 +00003935#ifdef ENABLE_DEBUGGER_SUPPORT
3936 if (heap()->isolate()->debug()->IsLoaded() ||
3937 heap()->isolate()->debug()->has_break_points()) {
3938 enable = false;
3939 }
3940#endif
3941
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00003942 if (enable) {
3943 if (code_flusher_ != NULL) return;
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003944 code_flusher_ = new CodeFlusher(heap()->isolate());
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00003945 } else {
3946 if (code_flusher_ == NULL) return;
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +00003947 code_flusher_->EvictAllCandidates();
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00003948 delete code_flusher_;
3949 code_flusher_ = NULL;
3950 }
3951}
3952
3953
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003954// TODO(1466) ReportDeleteIfNeeded is not called currently.
3955// Our profiling tools do not expect intersections between
3956// code objects. We should either reenable it or change our tools.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003957void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
3958 Isolate* isolate) {
erik.corry@gmail.com0511e242011-01-19 11:11:08 +00003959#ifdef ENABLE_GDB_JIT_INTERFACE
3960 if (obj->IsCode()) {
3961 GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
3962 }
3963#endif
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003964 if (obj->IsCode()) {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00003965 PROFILE(isolate, CodeDeleteEvent(obj->address()));
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003966 }
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00003967}
3968
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00003969
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003970void MarkCompactCollector::Initialize() {
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00003971 MarkCompactMarkingVisitor::Initialize();
3972 IncrementalMarking::Initialize();
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00003973}
3974
3975
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003976bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
3977 return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
3978}
3979
3980
3981bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
3982 SlotsBuffer** buffer_address,
3983 SlotType type,
3984 Address addr,
3985 AdditionMode mode) {
3986 SlotsBuffer* buffer = *buffer_address;
3987 if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
3988 if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
3989 allocator->DeallocateChain(buffer_address);
3990 return false;
3991 }
3992 buffer = allocator->AllocateBuffer(buffer);
3993 *buffer_address = buffer;
3994 }
3995 ASSERT(buffer->HasSpaceForTypedSlot());
3996 buffer->Add(reinterpret_cast<ObjectSlot>(type));
3997 buffer->Add(reinterpret_cast<ObjectSlot>(addr));
3998 return true;
3999}
4000
4001
4002static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
4003 if (RelocInfo::IsCodeTarget(rmode)) {
4004 return SlotsBuffer::CODE_TARGET_SLOT;
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +00004005 } else if (RelocInfo::IsEmbeddedObject(rmode)) {
4006 return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004007 } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
4008 return SlotsBuffer::DEBUG_TARGET_SLOT;
4009 } else if (RelocInfo::IsJSReturn(rmode)) {
4010 return SlotsBuffer::JS_RETURN_SLOT;
4011 }
4012 UNREACHABLE();
4013 return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
4014}
4015
4016
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +00004017void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
4018 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004019 if (target_page->IsEvacuationCandidate() &&
4020 (rinfo->host() == NULL ||
4021 !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
4022 if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4023 target_page->slots_buffer_address(),
4024 SlotTypeForRMode(rinfo->rmode()),
4025 rinfo->pc(),
4026 SlotsBuffer::FAIL_ON_OVERFLOW)) {
4027 EvictEvacuationCandidate(target_page);
4028 }
4029 }
4030}
4031
4032
4033void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +00004034 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004035 if (target_page->IsEvacuationCandidate() &&
4036 !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
4037 if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4038 target_page->slots_buffer_address(),
4039 SlotsBuffer::CODE_ENTRY_SLOT,
4040 slot,
4041 SlotsBuffer::FAIL_ON_OVERFLOW)) {
4042 EvictEvacuationCandidate(target_page);
4043 }
4044 }
4045}
4046
4047
verwaest@chromium.org33e09c82012-10-10 17:07:22 +00004048void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
4049 ASSERT(heap()->gc_state() == Heap::MARK_COMPACT);
4050 if (is_compacting()) {
4051 Code* host = heap()->isolate()->inner_pointer_to_code_cache()->
4052 GcSafeFindCodeForInnerPointer(pc);
4053 MarkBit mark_bit = Marking::MarkBitFrom(host);
4054 if (Marking::IsBlack(mark_bit)) {
4055 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4056 RecordRelocSlot(&rinfo, target);
4057 }
4058 }
4059}
4060
4061
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004062static inline SlotsBuffer::SlotType DecodeSlotType(
4063 SlotsBuffer::ObjectSlot slot) {
4064 return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
4065}
4066
4067
4068void SlotsBuffer::UpdateSlots(Heap* heap) {
4069 PointersUpdatingVisitor v(heap);
4070
4071 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4072 ObjectSlot slot = slots_[slot_idx];
4073 if (!IsTypedSlot(slot)) {
4074 PointersUpdatingVisitor::UpdateSlot(heap, slot);
4075 } else {
4076 ++slot_idx;
4077 ASSERT(slot_idx < idx_);
4078 UpdateSlot(&v,
4079 DecodeSlotType(slot),
4080 reinterpret_cast<Address>(slots_[slot_idx]));
4081 }
4082 }
4083}
4084
4085
4086void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
4087 PointersUpdatingVisitor v(heap);
4088
4089 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4090 ObjectSlot slot = slots_[slot_idx];
4091 if (!IsTypedSlot(slot)) {
4092 if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
4093 PointersUpdatingVisitor::UpdateSlot(heap, slot);
4094 }
4095 } else {
4096 ++slot_idx;
4097 ASSERT(slot_idx < idx_);
4098 Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
4099 if (!IsOnInvalidatedCodeObject(pc)) {
4100 UpdateSlot(&v,
4101 DecodeSlotType(slot),
4102 reinterpret_cast<Address>(slots_[slot_idx]));
4103 }
4104 }
4105 }
4106}
4107
4108
4109SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
4110 return new SlotsBuffer(next_buffer);
4111}
4112
4113
4114void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
4115 delete buffer;
4116}
4117
4118
4119void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
4120 SlotsBuffer* buffer = *buffer_address;
4121 while (buffer != NULL) {
4122 SlotsBuffer* next_buffer = buffer->next();
4123 DeallocateBuffer(buffer);
4124 buffer = next_buffer;
4125 }
4126 *buffer_address = NULL;
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00004127}
4128
4129
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00004130} } // namespace v8::internal