blob: 2f8f5d74799652614098324fbcf98f6df2d5e455 [file] [log] [blame]
ulan@chromium.org2efb9002012-01-19 15:36:35 +00001// Copyright 2012 the V8 project authors. All rights reserved.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include "v8.h"
29
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000030#include "code-stubs.h"
ricow@chromium.org0b9f8502010-08-18 07:45:01 +000031#include "compilation-cache.h"
machenbach@chromium.orgc1789ee2013-07-05 07:09:57 +000032#include "cpu-profiler.h"
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000033#include "deoptimizer.h"
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000034#include "execution.h"
erik.corry@gmail.com0511e242011-01-19 11:11:08 +000035#include "gdb-jit.h"
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000036#include "global-handles.h"
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000037#include "heap-profiler.h"
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000038#include "ic-inl.h"
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000039#include "incremental-marking.h"
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000040#include "mark-compact.h"
ager@chromium.orgea4f62e2010-08-16 16:28:43 +000041#include "objects-visiting.h"
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000042#include "objects-visiting-inl.h"
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000043#include "stub-cache.h"
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +000044#include "sweeper-thread.h"
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000045
kasperl@chromium.org71affb52009-05-26 05:44:31 +000046namespace v8 {
47namespace internal {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000048
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000049
50const char* Marking::kWhiteBitPattern = "00";
51const char* Marking::kBlackBitPattern = "10";
52const char* Marking::kGreyBitPattern = "11";
53const char* Marking::kImpossibleBitPattern = "01";
54
55
ager@chromium.orgddb913d2009-01-27 10:01:48 +000056// -------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000057// MarkCompactCollector
58
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +000059MarkCompactCollector::MarkCompactCollector() : // NOLINT
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000060#ifdef DEBUG
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +000061 state_(IDLE),
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000062#endif
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000063 sweep_precisely_(false),
yangguo@chromium.org154ff992012-03-13 08:09:54 +000064 reduce_memory_footprint_(false),
65 abort_incremental_marking_(false),
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +000066 marking_parity_(ODD_MARKING_PARITY),
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000067 compacting_(false),
68 was_marked_incrementally_(false),
hpayer@chromium.org7c3372b2013-02-13 17:26:04 +000069 sweeping_pending_(false),
ulan@chromium.org750145a2013-03-07 15:14:13 +000070 sequential_sweeping_(false),
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +000071 tracer_(NULL),
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000072 migration_slots_buffer_(NULL),
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +000073 heap_(NULL),
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +000074 code_flusher_(NULL),
jkummerow@chromium.orgfb732b12013-07-26 10:27:09 +000075 encountered_weak_collections_(NULL),
jkummerow@chromium.org3d00d0a2013-09-04 13:57:32 +000076 have_code_to_deoptimize_(false) { }
ager@chromium.orgea4f62e2010-08-16 16:28:43 +000077
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +000078#ifdef VERIFY_HEAP
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000079class VerifyMarkingVisitor: public ObjectVisitor {
80 public:
hpayer@chromium.orgc5d49712013-09-11 08:25:48 +000081 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
82
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000083 void VisitPointers(Object** start, Object** end) {
84 for (Object** current = start; current < end; current++) {
85 if ((*current)->IsHeapObject()) {
86 HeapObject* object = HeapObject::cast(*current);
hpayer@chromium.orgc5d49712013-09-11 08:25:48 +000087 CHECK(heap_->mark_compact_collector()->IsMarked(object));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +000088 }
89 }
90 }
yangguo@chromium.org003650e2013-01-24 16:31:08 +000091
92 void VisitEmbeddedPointer(RelocInfo* rinfo) {
93 ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
jkummerow@chromium.org25b0e212013-10-04 15:38:52 +000094 if (!Code::IsWeakEmbeddedObject(rinfo->host()->kind(),
95 rinfo->target_object())) {
yangguo@chromium.org003650e2013-01-24 16:31:08 +000096 VisitPointer(rinfo->target_object_address());
97 }
98 }
hpayer@chromium.orgc5d49712013-09-11 08:25:48 +000099
100 private:
101 Heap* heap_;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000102};
103
104
hpayer@chromium.orgc5d49712013-09-11 08:25:48 +0000105static void VerifyMarking(Heap* heap, Address bottom, Address top) {
106 VerifyMarkingVisitor visitor(heap);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000107 HeapObject* object;
108 Address next_object_must_be_here_or_later = bottom;
109
110 for (Address current = bottom;
111 current < top;
112 current += kPointerSize) {
113 object = HeapObject::FromAddress(current);
114 if (MarkCompactCollector::IsMarked(object)) {
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000115 CHECK(current >= next_object_must_be_here_or_later);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000116 object->Iterate(&visitor);
117 next_object_must_be_here_or_later = current + object->Size();
118 }
119 }
120}
121
122
123static void VerifyMarking(NewSpace* space) {
124 Address end = space->top();
125 NewSpacePageIterator it(space->bottom(), end);
126 // The bottom position is at the start of its page. Allows us to use
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000127 // page->area_start() as start of range on all pages.
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000128 CHECK_EQ(space->bottom(),
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000129 NewSpacePage::FromAddress(space->bottom())->area_start());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000130 while (it.has_next()) {
131 NewSpacePage* page = it.next();
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000132 Address limit = it.has_next() ? page->area_end() : end;
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000133 CHECK(limit == end || !page->Contains(end));
hpayer@chromium.orgc5d49712013-09-11 08:25:48 +0000134 VerifyMarking(space->heap(), page->area_start(), limit);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000135 }
136}
137
138
139static void VerifyMarking(PagedSpace* space) {
140 PageIterator it(space);
141
142 while (it.has_next()) {
143 Page* p = it.next();
hpayer@chromium.orgc5d49712013-09-11 08:25:48 +0000144 VerifyMarking(space->heap(), p->area_start(), p->area_end());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000145 }
146}
147
148
149static void VerifyMarking(Heap* heap) {
150 VerifyMarking(heap->old_pointer_space());
151 VerifyMarking(heap->old_data_space());
152 VerifyMarking(heap->code_space());
153 VerifyMarking(heap->cell_space());
danno@chromium.org41728482013-06-12 22:31:22 +0000154 VerifyMarking(heap->property_cell_space());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000155 VerifyMarking(heap->map_space());
156 VerifyMarking(heap->new_space());
157
hpayer@chromium.orgc5d49712013-09-11 08:25:48 +0000158 VerifyMarkingVisitor visitor(heap);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000159
160 LargeObjectIterator it(heap->lo_space());
161 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
162 if (MarkCompactCollector::IsMarked(obj)) {
163 obj->Iterate(&visitor);
164 }
165 }
166
167 heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
168}
169
170
171class VerifyEvacuationVisitor: public ObjectVisitor {
172 public:
173 void VisitPointers(Object** start, Object** end) {
174 for (Object** current = start; current < end; current++) {
175 if ((*current)->IsHeapObject()) {
176 HeapObject* object = HeapObject::cast(*current);
177 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
178 }
179 }
180 }
181};
182
183
184static void VerifyEvacuation(Address bottom, Address top) {
185 VerifyEvacuationVisitor visitor;
186 HeapObject* object;
187 Address next_object_must_be_here_or_later = bottom;
188
189 for (Address current = bottom;
190 current < top;
191 current += kPointerSize) {
192 object = HeapObject::FromAddress(current);
193 if (MarkCompactCollector::IsMarked(object)) {
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000194 CHECK(current >= next_object_must_be_here_or_later);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000195 object->Iterate(&visitor);
196 next_object_must_be_here_or_later = current + object->Size();
197 }
198 }
199}
200
201
202static void VerifyEvacuation(NewSpace* space) {
203 NewSpacePageIterator it(space->bottom(), space->top());
204 VerifyEvacuationVisitor visitor;
205
206 while (it.has_next()) {
207 NewSpacePage* page = it.next();
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000208 Address current = page->area_start();
209 Address limit = it.has_next() ? page->area_end() : space->top();
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000210 CHECK(limit == space->top() || !page->Contains(space->top()));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000211 while (current < limit) {
212 HeapObject* object = HeapObject::FromAddress(current);
213 object->Iterate(&visitor);
214 current += object->Size();
215 }
216 }
217}
218
219
220static void VerifyEvacuation(PagedSpace* space) {
221 PageIterator it(space);
222
223 while (it.has_next()) {
224 Page* p = it.next();
225 if (p->IsEvacuationCandidate()) continue;
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000226 VerifyEvacuation(p->area_start(), p->area_end());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000227 }
228}
229
230
231static void VerifyEvacuation(Heap* heap) {
232 VerifyEvacuation(heap->old_pointer_space());
233 VerifyEvacuation(heap->old_data_space());
234 VerifyEvacuation(heap->code_space());
235 VerifyEvacuation(heap->cell_space());
danno@chromium.org41728482013-06-12 22:31:22 +0000236 VerifyEvacuation(heap->property_cell_space());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000237 VerifyEvacuation(heap->map_space());
238 VerifyEvacuation(heap->new_space());
239
240 VerifyEvacuationVisitor visitor;
241 heap->IterateStrongRoots(&visitor, VISIT_ALL);
242}
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000243#endif // VERIFY_HEAP
ulan@chromium.orgea52b5f2012-07-30 13:05:33 +0000244
245
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000246#ifdef DEBUG
yangguo@chromium.org46839fb2012-08-28 09:06:19 +0000247class VerifyNativeContextSeparationVisitor: public ObjectVisitor {
ulan@chromium.orgea52b5f2012-07-30 13:05:33 +0000248 public:
yangguo@chromium.org46839fb2012-08-28 09:06:19 +0000249 VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {}
ulan@chromium.orgea52b5f2012-07-30 13:05:33 +0000250
251 void VisitPointers(Object** start, Object** end) {
252 for (Object** current = start; current < end; current++) {
253 if ((*current)->IsHeapObject()) {
254 HeapObject* object = HeapObject::cast(*current);
255 if (object->IsString()) continue;
256 switch (object->map()->instance_type()) {
257 case JS_FUNCTION_TYPE:
258 CheckContext(JSFunction::cast(object)->context());
259 break;
260 case JS_GLOBAL_PROXY_TYPE:
yangguo@chromium.org46839fb2012-08-28 09:06:19 +0000261 CheckContext(JSGlobalProxy::cast(object)->native_context());
ulan@chromium.orgea52b5f2012-07-30 13:05:33 +0000262 break;
263 case JS_GLOBAL_OBJECT_TYPE:
264 case JS_BUILTINS_OBJECT_TYPE:
yangguo@chromium.org46839fb2012-08-28 09:06:19 +0000265 CheckContext(GlobalObject::cast(object)->native_context());
ulan@chromium.orgea52b5f2012-07-30 13:05:33 +0000266 break;
267 case JS_ARRAY_TYPE:
268 case JS_DATE_TYPE:
269 case JS_OBJECT_TYPE:
270 case JS_REGEXP_TYPE:
271 VisitPointer(HeapObject::RawField(object, JSObject::kMapOffset));
272 break;
273 case MAP_TYPE:
274 VisitPointer(HeapObject::RawField(object, Map::kPrototypeOffset));
275 VisitPointer(HeapObject::RawField(object, Map::kConstructorOffset));
276 break;
277 case FIXED_ARRAY_TYPE:
278 if (object->IsContext()) {
279 CheckContext(object);
280 } else {
281 FixedArray* array = FixedArray::cast(object);
282 int length = array->length();
283 // Set array length to zero to prevent cycles while iterating
284 // over array bodies, this is easier than intrusive marking.
285 array->set_length(0);
286 array->IterateBody(
287 FIXED_ARRAY_TYPE, FixedArray::SizeFor(length), this);
288 array->set_length(length);
289 }
290 break;
danno@chromium.org41728482013-06-12 22:31:22 +0000291 case CELL_TYPE:
ulan@chromium.orgea52b5f2012-07-30 13:05:33 +0000292 case JS_PROXY_TYPE:
293 case JS_VALUE_TYPE:
294 case TYPE_FEEDBACK_INFO_TYPE:
295 object->Iterate(this);
296 break;
hpayer@chromium.org7c3372b2013-02-13 17:26:04 +0000297 case DECLARED_ACCESSOR_INFO_TYPE:
298 case EXECUTABLE_ACCESSOR_INFO_TYPE:
ulan@chromium.orgea52b5f2012-07-30 13:05:33 +0000299 case BYTE_ARRAY_TYPE:
300 case CALL_HANDLER_INFO_TYPE:
301 case CODE_TYPE:
302 case FIXED_DOUBLE_ARRAY_TYPE:
303 case HEAP_NUMBER_TYPE:
304 case INTERCEPTOR_INFO_TYPE:
305 case ODDBALL_TYPE:
306 case SCRIPT_TYPE:
307 case SHARED_FUNCTION_INFO_TYPE:
308 break;
309 default:
310 UNREACHABLE();
311 }
312 }
313 }
314 }
315
316 private:
317 void CheckContext(Object* context) {
318 if (!context->IsContext()) return;
yangguo@chromium.org46839fb2012-08-28 09:06:19 +0000319 Context* native_context = Context::cast(context)->native_context();
320 if (current_native_context_ == NULL) {
321 current_native_context_ = native_context;
ulan@chromium.orgea52b5f2012-07-30 13:05:33 +0000322 } else {
yangguo@chromium.org46839fb2012-08-28 09:06:19 +0000323 CHECK_EQ(current_native_context_, native_context);
ulan@chromium.orgea52b5f2012-07-30 13:05:33 +0000324 }
325 }
326
yangguo@chromium.org46839fb2012-08-28 09:06:19 +0000327 Context* current_native_context_;
ulan@chromium.orgea52b5f2012-07-30 13:05:33 +0000328};
329
330
yangguo@chromium.org46839fb2012-08-28 09:06:19 +0000331static void VerifyNativeContextSeparation(Heap* heap) {
ulan@chromium.orgea52b5f2012-07-30 13:05:33 +0000332 HeapObjectIterator it(heap->code_space());
333
334 for (Object* object = it.Next(); object != NULL; object = it.Next()) {
yangguo@chromium.org46839fb2012-08-28 09:06:19 +0000335 VerifyNativeContextSeparationVisitor visitor;
ulan@chromium.orgea52b5f2012-07-30 13:05:33 +0000336 Code::cast(object)->CodeIterateBody(&visitor);
337 }
338}
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000339#endif
340
341
danno@chromium.orgd3c42102013-08-01 16:58:23 +0000342void MarkCompactCollector::TearDown() {
343 AbortCompaction();
344}
345
346
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000347void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
348 p->MarkEvacuationCandidate();
349 evacuation_candidates_.Add(p);
350}
351
352
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000353static void TraceFragmentation(PagedSpace* space) {
354 int number_of_pages = space->CountTotalPages();
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000355 intptr_t reserved = (number_of_pages * space->AreaSize());
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000356 intptr_t free = reserved - space->SizeOfObjects();
357 PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
358 AllocationSpaceName(space->identity()),
359 number_of_pages,
360 static_cast<int>(free),
361 static_cast<double>(free) * 100 / reserved);
362}
363
364
jkummerow@chromium.orgab7dad42012-02-07 12:07:34 +0000365bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000366 if (!compacting_) {
367 ASSERT(evacuation_candidates_.length() == 0);
368
verwaest@chromium.org33e09c82012-10-10 17:07:22 +0000369#ifdef ENABLE_GDB_JIT_INTERFACE
370 // If GDBJIT interface is active disable compaction.
371 if (FLAG_gdbjit) return false;
372#endif
373
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000374 CollectEvacuationCandidates(heap()->old_pointer_space());
375 CollectEvacuationCandidates(heap()->old_data_space());
376
ulan@chromium.org56c14af2012-09-20 12:51:09 +0000377 if (FLAG_compact_code_space &&
378 (mode == NON_INCREMENTAL_COMPACTION ||
379 FLAG_incremental_code_compaction)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000380 CollectEvacuationCandidates(heap()->code_space());
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000381 } else if (FLAG_trace_fragmentation) {
382 TraceFragmentation(heap()->code_space());
383 }
384
385 if (FLAG_trace_fragmentation) {
386 TraceFragmentation(heap()->map_space());
387 TraceFragmentation(heap()->cell_space());
danno@chromium.org41728482013-06-12 22:31:22 +0000388 TraceFragmentation(heap()->property_cell_space());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000389 }
390
391 heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
392 heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
393 heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
394
395 compacting_ = evacuation_candidates_.length() > 0;
396 }
397
398 return compacting_;
399}
400
401
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000402void MarkCompactCollector::CollectGarbage() {
403 // Make sure that Prepare() has been called. The individual steps below will
404 // update the state as they proceed.
405 ASSERT(state_ == PREPARE_GC);
jkummerow@chromium.orgba72ec82013-07-22 09:21:20 +0000406 ASSERT(encountered_weak_collections_ == Smi::FromInt(0));
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000407
jkummerow@chromium.orgd8a3a142013-10-03 12:15:05 +0000408 heap()->allocation_mementos_found_ = 0;
409
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000410 MarkLiveObjects();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000411 ASSERT(heap_->incremental_marking()->IsStopped());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000412
yangguo@chromium.org003650e2013-01-24 16:31:08 +0000413 if (FLAG_collect_maps) ClearNonLiveReferences();
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +0000414
jkummerow@chromium.orgba72ec82013-07-22 09:21:20 +0000415 ClearWeakCollections();
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +0000416
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000417#ifdef VERIFY_HEAP
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000418 if (FLAG_verify_heap) {
419 VerifyMarking(heap_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000420 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000421#endif
422
423 SweepSpaces();
424
yangguo@chromium.org5f0b8ea2012-05-16 12:37:04 +0000425 if (!FLAG_collect_maps) ReattachInitialMaps();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000426
ulan@chromium.orgea52b5f2012-07-30 13:05:33 +0000427#ifdef DEBUG
yangguo@chromium.org46839fb2012-08-28 09:06:19 +0000428 if (FLAG_verify_native_context_separation) {
429 VerifyNativeContextSeparation(heap_);
ulan@chromium.orgea52b5f2012-07-30 13:05:33 +0000430 }
431#endif
432
danno@chromium.org94b0d6f2013-02-04 13:33:20 +0000433#ifdef VERIFY_HEAP
jkummerow@chromium.org25b0e212013-10-04 15:38:52 +0000434 if (heap()->weak_embedded_objects_verification_enabled()) {
435 VerifyWeakEmbeddedObjectsInOptimizedCode();
danno@chromium.org94b0d6f2013-02-04 13:33:20 +0000436 }
danno@chromium.org59400602013-08-13 17:09:37 +0000437 if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
438 VerifyOmittedMapChecks();
ulan@chromium.org2e04b582013-02-21 14:06:02 +0000439 }
danno@chromium.org94b0d6f2013-02-04 13:33:20 +0000440#endif
441
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000442 Finish();
kasper.lund7276f142008-07-30 08:49:36 +0000443
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +0000444 if (marking_parity_ == EVEN_MARKING_PARITY) {
445 marking_parity_ = ODD_MARKING_PARITY;
446 } else {
447 ASSERT(marking_parity_ == ODD_MARKING_PARITY);
448 marking_parity_ = EVEN_MARKING_PARITY;
449 }
450
jkummerow@chromium.orgd8a3a142013-10-03 12:15:05 +0000451 if (FLAG_trace_track_allocation_sites &&
452 heap()->allocation_mementos_found_ > 0) {
453 PrintF("AllocationMementos found during mark-sweep = %d\n",
454 heap()->allocation_mementos_found_);
455 }
kasper.lund7276f142008-07-30 08:49:36 +0000456 tracer_ = NULL;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000457}
458
459
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000460#ifdef VERIFY_HEAP
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000461void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
462 PageIterator it(space);
463
464 while (it.has_next()) {
465 Page* p = it.next();
466 CHECK(p->markbits()->IsClean());
467 CHECK_EQ(0, p->LiveBytes());
468 }
469}
470
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000471
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000472void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
473 NewSpacePageIterator it(space->bottom(), space->top());
474
475 while (it.has_next()) {
476 NewSpacePage* p = it.next();
477 CHECK(p->markbits()->IsClean());
478 CHECK_EQ(0, p->LiveBytes());
479 }
480}
481
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000482
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000483void MarkCompactCollector::VerifyMarkbitsAreClean() {
484 VerifyMarkbitsAreClean(heap_->old_pointer_space());
485 VerifyMarkbitsAreClean(heap_->old_data_space());
486 VerifyMarkbitsAreClean(heap_->code_space());
487 VerifyMarkbitsAreClean(heap_->cell_space());
danno@chromium.org41728482013-06-12 22:31:22 +0000488 VerifyMarkbitsAreClean(heap_->property_cell_space());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000489 VerifyMarkbitsAreClean(heap_->map_space());
490 VerifyMarkbitsAreClean(heap_->new_space());
491
492 LargeObjectIterator it(heap_->lo_space());
493 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
494 MarkBit mark_bit = Marking::MarkBitFrom(obj);
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000495 CHECK(Marking::IsWhite(mark_bit));
496 CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000497 }
498}
danno@chromium.org94b0d6f2013-02-04 13:33:20 +0000499
500
jkummerow@chromium.org25b0e212013-10-04 15:38:52 +0000501void MarkCompactCollector::VerifyWeakEmbeddedObjectsInOptimizedCode() {
danno@chromium.org94b0d6f2013-02-04 13:33:20 +0000502 HeapObjectIterator code_iterator(heap()->code_space());
503 for (HeapObject* obj = code_iterator.Next();
504 obj != NULL;
505 obj = code_iterator.Next()) {
506 Code* code = Code::cast(obj);
507 if (code->kind() != Code::OPTIMIZED_FUNCTION) continue;
jkummerow@chromium.orgfb732b12013-07-26 10:27:09 +0000508 if (WillBeDeoptimized(code)) continue;
jkummerow@chromium.org25b0e212013-10-04 15:38:52 +0000509 code->VerifyEmbeddedObjectsDependency();
danno@chromium.org94b0d6f2013-02-04 13:33:20 +0000510 }
511}
ulan@chromium.org2e04b582013-02-21 14:06:02 +0000512
513
danno@chromium.org59400602013-08-13 17:09:37 +0000514void MarkCompactCollector::VerifyOmittedMapChecks() {
ulan@chromium.org2e04b582013-02-21 14:06:02 +0000515 HeapObjectIterator iterator(heap()->map_space());
516 for (HeapObject* obj = iterator.Next();
517 obj != NULL;
518 obj = iterator.Next()) {
519 Map* map = Map::cast(obj);
danno@chromium.org59400602013-08-13 17:09:37 +0000520 map->VerifyOmittedMapChecks();
ulan@chromium.org2e04b582013-02-21 14:06:02 +0000521 }
522}
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000523#endif // VERIFY_HEAP
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000524
525
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000526static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000527 PageIterator it(space);
528
529 while (it.has_next()) {
530 Bitmap::Clear(it.next());
531 }
532}
533
534
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000535static void ClearMarkbitsInNewSpace(NewSpace* space) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000536 NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
537
538 while (it.has_next()) {
539 Bitmap::Clear(it.next());
540 }
541}
542
543
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000544void MarkCompactCollector::ClearMarkbits() {
545 ClearMarkbitsInPagedSpace(heap_->code_space());
546 ClearMarkbitsInPagedSpace(heap_->map_space());
547 ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
548 ClearMarkbitsInPagedSpace(heap_->old_data_space());
549 ClearMarkbitsInPagedSpace(heap_->cell_space());
danno@chromium.org41728482013-06-12 22:31:22 +0000550 ClearMarkbitsInPagedSpace(heap_->property_cell_space());
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000551 ClearMarkbitsInNewSpace(heap_->new_space());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000552
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000553 LargeObjectIterator it(heap_->lo_space());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000554 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
555 MarkBit mark_bit = Marking::MarkBitFrom(obj);
556 mark_bit.Clear();
557 mark_bit.Next().Clear();
yangguo@chromium.orgfb377212012-11-16 14:43:43 +0000558 Page::FromAddress(obj->address())->ResetProgressBar();
jkummerow@chromium.org28faa982012-04-13 09:58:30 +0000559 Page::FromAddress(obj->address())->ResetLiveBytes();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000560 }
561}
562
563
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000564void MarkCompactCollector::StartSweeperThreads() {
hpayer@chromium.org7c3372b2013-02-13 17:26:04 +0000565 sweeping_pending_ = true;
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000566 for (int i = 0; i < FLAG_sweeper_threads; i++) {
svenpanne@chromium.org876cca82013-03-18 14:43:20 +0000567 isolate()->sweeper_threads()[i]->StartSweeping();
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000568 }
569}
570
571
572void MarkCompactCollector::WaitUntilSweepingCompleted() {
ulan@chromium.org750145a2013-03-07 15:14:13 +0000573 ASSERT(sweeping_pending_ == true);
574 for (int i = 0; i < FLAG_sweeper_threads; i++) {
svenpanne@chromium.org876cca82013-03-18 14:43:20 +0000575 isolate()->sweeper_threads()[i]->WaitForSweeperThread();
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000576 }
ulan@chromium.org750145a2013-03-07 15:14:13 +0000577 sweeping_pending_ = false;
578 StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE));
579 StealMemoryFromSweeperThreads(heap()->paged_space(OLD_POINTER_SPACE));
580 heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
581 heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000582}
583
584
585intptr_t MarkCompactCollector::
586 StealMemoryFromSweeperThreads(PagedSpace* space) {
587 intptr_t freed_bytes = 0;
588 for (int i = 0; i < FLAG_sweeper_threads; i++) {
svenpanne@chromium.org876cca82013-03-18 14:43:20 +0000589 freed_bytes += isolate()->sweeper_threads()[i]->StealMemory(space);
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000590 }
hpayer@chromium.org8432c912013-02-28 15:55:26 +0000591 space->AddToAccountingStats(freed_bytes);
592 space->DecrementUnsweptFreeBytes(freed_bytes);
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000593 return freed_bytes;
594}
595
596
597bool MarkCompactCollector::AreSweeperThreadsActivated() {
svenpanne@chromium.org876cca82013-03-18 14:43:20 +0000598 return isolate()->sweeper_threads() != NULL;
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000599}
600
601
mmassi@chromium.org2f0efde2013-02-06 14:12:58 +0000602bool MarkCompactCollector::IsConcurrentSweepingInProgress() {
hpayer@chromium.org7c3372b2013-02-13 17:26:04 +0000603 return sweeping_pending_;
mmassi@chromium.org2f0efde2013-02-06 14:12:58 +0000604}
605
606
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000607bool Marking::TransferMark(Address old_start, Address new_start) {
608 // This is only used when resizing an object.
609 ASSERT(MemoryChunk::FromAddress(old_start) ==
610 MemoryChunk::FromAddress(new_start));
611
612 // If the mark doesn't move, we don't check the color of the object.
613 // It doesn't matter whether the object is black, since it hasn't changed
614 // size, so the adjustment to the live data count will be zero anyway.
615 if (old_start == new_start) return false;
616
617 MarkBit new_mark_bit = MarkBitFrom(new_start);
618 MarkBit old_mark_bit = MarkBitFrom(old_start);
619
620#ifdef DEBUG
621 ObjectColor old_color = Color(old_mark_bit);
622#endif
623
624 if (Marking::IsBlack(old_mark_bit)) {
625 old_mark_bit.Clear();
626 ASSERT(IsWhite(old_mark_bit));
627 Marking::MarkBlack(new_mark_bit);
628 return true;
629 } else if (Marking::IsGrey(old_mark_bit)) {
630 ASSERT(heap_->incremental_marking()->IsMarking());
631 old_mark_bit.Clear();
632 old_mark_bit.Next().Clear();
633 ASSERT(IsWhite(old_mark_bit));
634 heap_->incremental_marking()->WhiteToGreyAndPush(
635 HeapObject::FromAddress(new_start), new_mark_bit);
636 heap_->incremental_marking()->RestartIfNotMarking();
637 }
638
639#ifdef DEBUG
640 ObjectColor new_color = Color(new_mark_bit);
641 ASSERT(new_color == old_color);
642#endif
643
644 return false;
645}
646
647
648const char* AllocationSpaceName(AllocationSpace space) {
649 switch (space) {
650 case NEW_SPACE: return "NEW_SPACE";
651 case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
652 case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
653 case CODE_SPACE: return "CODE_SPACE";
654 case MAP_SPACE: return "MAP_SPACE";
655 case CELL_SPACE: return "CELL_SPACE";
danno@chromium.org41728482013-06-12 22:31:22 +0000656 case PROPERTY_CELL_SPACE:
657 return "PROPERTY_CELL_SPACE";
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000658 case LO_SPACE: return "LO_SPACE";
659 default:
660 UNREACHABLE();
661 }
662
663 return NULL;
664}
665
666
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000667// Returns zero for pages that have so little fragmentation that it is not
668// worth defragmenting them. Otherwise a positive integer that gives an
669// estimate of fragmentation on an arbitrary scale.
670static int FreeListFragmentation(PagedSpace* space, Page* p) {
671 // If page was not swept then there are no free list items on it.
672 if (!p->WasSwept()) {
673 if (FLAG_trace_fragmentation) {
674 PrintF("%p [%s]: %d bytes live (unswept)\n",
675 reinterpret_cast<void*>(p),
676 AllocationSpaceName(space->identity()),
677 p->LiveBytes());
678 }
679 return 0;
680 }
681
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +0000682 PagedSpace::SizeStats sizes;
683 space->ObtainFreeListStatistics(p, &sizes);
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000684
685 intptr_t ratio;
686 intptr_t ratio_threshold;
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000687 intptr_t area_size = space->AreaSize();
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000688 if (space->identity() == CODE_SPACE) {
689 ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000690 area_size;
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000691 ratio_threshold = 10;
692 } else {
693 ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000694 area_size;
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000695 ratio_threshold = 15;
696 }
697
698 if (FLAG_trace_fragmentation) {
699 PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
700 reinterpret_cast<void*>(p),
701 AllocationSpaceName(space->identity()),
702 static_cast<int>(sizes.small_size_),
703 static_cast<double>(sizes.small_size_ * 100) /
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000704 area_size,
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000705 static_cast<int>(sizes.medium_size_),
706 static_cast<double>(sizes.medium_size_ * 100) /
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000707 area_size,
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000708 static_cast<int>(sizes.large_size_),
709 static_cast<double>(sizes.large_size_ * 100) /
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000710 area_size,
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000711 static_cast<int>(sizes.huge_size_),
712 static_cast<double>(sizes.huge_size_ * 100) /
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000713 area_size,
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000714 (ratio > ratio_threshold) ? "[fragmented]" : "");
715 }
716
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000717 if (FLAG_always_compact && sizes.Total() != area_size) {
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000718 return 1;
719 }
720
721 if (ratio <= ratio_threshold) return 0; // Not fragmented.
722
723 return static_cast<int>(ratio - ratio_threshold);
724}
725
726
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000727void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
728 ASSERT(space->identity() == OLD_POINTER_SPACE ||
729 space->identity() == OLD_DATA_SPACE ||
730 space->identity() == CODE_SPACE);
731
yangguo@chromium.orgde0db002012-06-22 13:44:28 +0000732 static const int kMaxMaxEvacuationCandidates = 1000;
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +0000733 int number_of_pages = space->CountTotalPages();
yangguo@chromium.orgde0db002012-06-22 13:44:28 +0000734 int max_evacuation_candidates =
mstarzinger@chromium.org471f2f12012-08-10 14:46:33 +0000735 static_cast<int>(sqrt(number_of_pages / 2.0) + 1);
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +0000736
737 if (FLAG_stress_compaction || FLAG_always_compact) {
738 max_evacuation_candidates = kMaxMaxEvacuationCandidates;
739 }
740
741 class Candidate {
742 public:
743 Candidate() : fragmentation_(0), page_(NULL) { }
744 Candidate(int f, Page* p) : fragmentation_(f), page_(p) { }
745
746 int fragmentation() { return fragmentation_; }
747 Page* page() { return page_; }
748
749 private:
750 int fragmentation_;
751 Page* page_;
752 };
753
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000754 enum CompactionMode {
755 COMPACT_FREE_LISTS,
756 REDUCE_MEMORY_FOOTPRINT
757 };
758
759 CompactionMode mode = COMPACT_FREE_LISTS;
760
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000761 intptr_t reserved = number_of_pages * space->AreaSize();
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000762 intptr_t over_reserved = reserved - space->SizeOfObjects();
763 static const intptr_t kFreenessThreshold = 50;
764
mstarzinger@chromium.org471f2f12012-08-10 14:46:33 +0000765 if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) {
yangguo@chromium.orgde0db002012-06-22 13:44:28 +0000766 // If reduction of memory footprint was requested, we are aggressive
767 // about choosing pages to free. We expect that half-empty pages
768 // are easier to compact so slightly bump the limit.
mstarzinger@chromium.org471f2f12012-08-10 14:46:33 +0000769 mode = REDUCE_MEMORY_FOOTPRINT;
770 max_evacuation_candidates += 2;
771 }
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000772
mstarzinger@chromium.org471f2f12012-08-10 14:46:33 +0000773
774 if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) {
yangguo@chromium.orgde0db002012-06-22 13:44:28 +0000775 // If over-usage is very high (more than a third of the space), we
776 // try to free all mostly empty pages. We expect that almost empty
777 // pages are even easier to compact so bump the limit even more.
mstarzinger@chromium.org471f2f12012-08-10 14:46:33 +0000778 mode = REDUCE_MEMORY_FOOTPRINT;
779 max_evacuation_candidates *= 2;
780 }
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000781
mstarzinger@chromium.org471f2f12012-08-10 14:46:33 +0000782 if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
danno@chromium.orgbee51992013-07-10 14:57:15 +0000783 PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
784 "evacuation candidate limit: %d\n",
mstarzinger@chromium.org471f2f12012-08-10 14:46:33 +0000785 static_cast<double>(over_reserved) / MB,
786 static_cast<double>(reserved) / MB,
danno@chromium.orgbee51992013-07-10 14:57:15 +0000787 static_cast<int>(kFreenessThreshold),
788 max_evacuation_candidates);
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000789 }
790
791 intptr_t estimated_release = 0;
792
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +0000793 Candidate candidates[kMaxMaxEvacuationCandidates];
794
yangguo@chromium.orgde0db002012-06-22 13:44:28 +0000795 max_evacuation_candidates =
796 Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
797
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000798 int count = 0;
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +0000799 int fragmentation = 0;
800 Candidate* least = NULL;
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000801
802 PageIterator it(space);
803 if (it.has_next()) it.next(); // Never compact the first page.
804
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000805 while (it.has_next()) {
806 Page* p = it.next();
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +0000807 p->ClearEvacuationCandidate();
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000808
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +0000809 if (FLAG_stress_compaction) {
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +0000810 unsigned int counter = space->heap()->ms_count();
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +0000811 uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +0000812 if ((counter & 1) == (page_number & 1)) fragmentation = 1;
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000813 } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
814 // Don't try to release too many pages.
danno@chromium.orgbee51992013-07-10 14:57:15 +0000815 if (estimated_release >= over_reserved) {
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000816 continue;
817 }
818
819 intptr_t free_bytes = 0;
820
821 if (!p->WasSwept()) {
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000822 free_bytes = (p->area_size() - p->LiveBytes());
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000823 } else {
mstarzinger@chromium.orge27d6172013-04-17 11:51:44 +0000824 PagedSpace::SizeStats sizes;
825 space->ObtainFreeListStatistics(p, &sizes);
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000826 free_bytes = sizes.Total();
827 }
828
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000829 int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000830
831 if (free_pct >= kFreenessThreshold) {
danno@chromium.orgbee51992013-07-10 14:57:15 +0000832 estimated_release += free_bytes;
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000833 fragmentation = free_pct;
834 } else {
835 fragmentation = 0;
836 }
837
838 if (FLAG_trace_fragmentation) {
839 PrintF("%p [%s]: %d (%.2f%%) free %s\n",
840 reinterpret_cast<void*>(p),
841 AllocationSpaceName(space->identity()),
842 static_cast<int>(free_bytes),
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +0000843 static_cast<double>(free_bytes * 100) / p->area_size(),
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000844 (fragmentation > 0) ? "[fragmented]" : "");
845 }
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +0000846 } else {
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000847 fragmentation = FreeListFragmentation(space, p);
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +0000848 }
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000849
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +0000850 if (fragmentation != 0) {
851 if (count < max_evacuation_candidates) {
852 candidates[count++] = Candidate(fragmentation, p);
853 } else {
854 if (least == NULL) {
855 for (int i = 0; i < max_evacuation_candidates; i++) {
856 if (least == NULL ||
857 candidates[i].fragmentation() < least->fragmentation()) {
858 least = candidates + i;
859 }
860 }
861 }
862 if (least->fragmentation() < fragmentation) {
863 *least = Candidate(fragmentation, p);
864 least = NULL;
865 }
866 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000867 }
868 }
rossberg@chromium.org994edf62012-02-06 10:12:55 +0000869
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +0000870 for (int i = 0; i < count; i++) {
871 AddEvacuationCandidate(candidates[i].page());
872 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000873
874 if (count > 0 && FLAG_trace_fragmentation) {
875 PrintF("Collected %d evacuation candidates for space %s\n",
876 count,
877 AllocationSpaceName(space->identity()));
878 }
879}
880
881
882void MarkCompactCollector::AbortCompaction() {
883 if (compacting_) {
884 int npages = evacuation_candidates_.length();
885 for (int i = 0; i < npages; i++) {
886 Page* p = evacuation_candidates_[i];
887 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
888 p->ClearEvacuationCandidate();
889 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
890 }
891 compacting_ = false;
892 evacuation_candidates_.Rewind(0);
893 invalidated_code_.Rewind(0);
894 }
895 ASSERT_EQ(0, evacuation_candidates_.length());
896}
897
898
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000899void MarkCompactCollector::Prepare(GCTracer* tracer) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000900 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
901
kasperl@chromium.org061ef742009-02-27 12:16:20 +0000902 // Rather than passing the tracer around we stash it in a static member
903 // variable.
904 tracer_ = tracer;
905
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000906#ifdef DEBUG
907 ASSERT(state_ == IDLE);
908 state_ = PREPARE_GC;
909#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000910
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000911 ASSERT(!FLAG_never_compact || !FLAG_always_compact);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000912
ulan@chromium.org750145a2013-03-07 15:14:13 +0000913 if (IsConcurrentSweepingInProgress()) {
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +0000914 // Instead of waiting we could also abort the sweeper threads here.
915 WaitUntilSweepingCompleted();
916 }
917
erik.corry@gmail.combbceb572012-03-09 10:52:05 +0000918 // Clear marking bits if incremental marking is aborted.
919 if (was_marked_incrementally_ && abort_incremental_marking_) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000920 heap()->incremental_marking()->Abort();
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000921 ClearMarkbits();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000922 AbortCompaction();
923 was_marked_incrementally_ = false;
924 }
925
926 // Don't start compaction if we are in the middle of incremental
927 // marking cycle. We did not collect any slots.
928 if (!FLAG_never_compact && !was_marked_incrementally_) {
jkummerow@chromium.orgab7dad42012-02-07 12:07:34 +0000929 StartCompaction(NON_INCREMENTAL_COMPACTION);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000930 }
931
hpayer@chromium.org7c3372b2013-02-13 17:26:04 +0000932 PagedSpaces spaces(heap());
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +0000933 for (PagedSpace* space = spaces.next();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000934 space != NULL;
935 space = spaces.next()) {
936 space->PrepareForMarkCompact();
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000937 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000938
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000939#ifdef VERIFY_HEAP
erik.corry@gmail.com394dbcf2011-10-27 07:38:48 +0000940 if (!was_marked_incrementally_ && FLAG_verify_heap) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +0000941 VerifyMarkbitsAreClean();
942 }
943#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000944}
945
danno@chromium.org94b0d6f2013-02-04 13:33:20 +0000946
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000947void MarkCompactCollector::Finish() {
948#ifdef DEBUG
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000949 ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000950 state_ = IDLE;
951#endif
952 // The stub cache is not traversed during GC; clear the cache to
953 // force lazy re-initialization of it. This must be done after the
954 // GC, because it relies on the new address of certain old space
955 // objects (empty string, illegal builtin).
svenpanne@chromium.org876cca82013-03-18 14:43:20 +0000956 isolate()->stub_cache()->Clear();
yangguo@chromium.org003650e2013-01-24 16:31:08 +0000957
jkummerow@chromium.org3d00d0a2013-09-04 13:57:32 +0000958 if (have_code_to_deoptimize_) {
959 // Some code objects were marked for deoptimization during the GC.
960 Deoptimizer::DeoptimizeMarkedCode(isolate());
961 have_code_to_deoptimize_ = false;
jkummerow@chromium.orgfb732b12013-07-26 10:27:09 +0000962 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000963}
964
965
ager@chromium.orgddb913d2009-01-27 10:01:48 +0000966// -------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000967// Phase 1: tracing and marking live objects.
968// before: all objects are in normal state.
969// after: a live object's map pointer is marked as '00'.
970
971// Marking all live objects in the heap as part of mark-sweep or mark-compact
972// collection. Before marking, all objects are in their normal state. After
973// marking, live objects' map pointers are marked indicating that the object
974// has been found reachable.
975//
976// The marking algorithm is a (mostly) depth-first (because of possible stack
977// overflow) traversal of the graph of objects reachable from the roots. It
978// uses an explicit stack of pointers rather than recursion. The young
979// generation's inactive ('from') space is used as a marking stack. The
980// objects in the marking stack are the ones that have been reached and marked
981// but their children have not yet been visited.
982//
983// The marking stack can overflow during traversal. In that case, we set an
984// overflow flag. When the overflow flag is set, we continue marking objects
985// reachable from the objects on the marking stack, but no longer push them on
986// the marking stack. Instead, we mark them as both marked and overflowed.
987// When the stack is in the overflowed state, objects marked as overflowed
988// have been reached and marked but their children have not been visited yet.
989// After emptying the marking stack, we clear the overflow flag and traverse
990// the heap looking for objects marked as overflowed, push them on the stack,
991// and continue with marking. This process repeats until all reachable
992// objects have been marked.
993
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000994void CodeFlusher::ProcessJSFunctionCandidates() {
995 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
rossberg@chromium.org89e18f52012-10-22 13:09:53 +0000996 Object* undefined = isolate_->heap()->undefined_value();
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +0000997
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +0000998 JSFunction* candidate = jsfunction_candidates_head_;
999 JSFunction* next_candidate;
1000 while (candidate != NULL) {
1001 next_candidate = GetNextCandidate(candidate);
rossberg@chromium.org89e18f52012-10-22 13:09:53 +00001002 ClearNextCandidate(candidate, undefined);
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001003
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001004 SharedFunctionInfo* shared = candidate->shared();
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001005
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001006 Code* code = shared->code();
1007 MarkBit code_mark = Marking::MarkBitFrom(code);
1008 if (!code_mark.Get()) {
ulan@chromium.org837a67e2013-06-11 15:39:48 +00001009 if (FLAG_trace_code_flushing && shared->is_compiled()) {
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00001010 PrintF("[code-flushing clears: ");
1011 shared->ShortPrint();
1012 PrintF(" - age: %d]\n", code->GetAge());
ulan@chromium.org837a67e2013-06-11 15:39:48 +00001013 }
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001014 shared->set_code(lazy_compile);
1015 candidate->set_code(lazy_compile);
yangguo@chromium.org9768bf12013-01-11 14:51:07 +00001016 } else {
1017 candidate->set_code(code);
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001018 }
1019
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001020 // We are in the middle of a GC cycle so the write barrier in the code
1021 // setter did not record the slot update and we have to do that manually.
1022 Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
1023 Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
svenpanne@chromium.orgb1df11d2012-02-08 10:26:21 +00001024 isolate_->heap()->mark_compact_collector()->
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001025 RecordCodeEntrySlot(slot, target);
1026
1027 Object** shared_code_slot =
1028 HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
1029 isolate_->heap()->mark_compact_collector()->
1030 RecordSlot(shared_code_slot, shared_code_slot, *shared_code_slot);
1031
1032 candidate = next_candidate;
svenpanne@chromium.orgb1df11d2012-02-08 10:26:21 +00001033 }
1034
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001035 jsfunction_candidates_head_ = NULL;
1036}
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001037
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001038
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001039void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
1040 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile);
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001041
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001042 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1043 SharedFunctionInfo* next_candidate;
1044 while (candidate != NULL) {
1045 next_candidate = GetNextCandidate(candidate);
rossberg@chromium.org89e18f52012-10-22 13:09:53 +00001046 ClearNextCandidate(candidate);
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001047
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +00001048 Code* code = candidate->code();
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001049 MarkBit code_mark = Marking::MarkBitFrom(code);
1050 if (!code_mark.Get()) {
ulan@chromium.org837a67e2013-06-11 15:39:48 +00001051 if (FLAG_trace_code_flushing && candidate->is_compiled()) {
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00001052 PrintF("[code-flushing clears: ");
1053 candidate->ShortPrint();
1054 PrintF(" - age: %d]\n", code->GetAge());
ulan@chromium.org837a67e2013-06-11 15:39:48 +00001055 }
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001056 candidate->set_code(lazy_compile);
1057 }
1058
1059 Object** code_slot =
1060 HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
1061 isolate_->heap()->mark_compact_collector()->
1062 RecordSlot(code_slot, code_slot, *code_slot);
1063
1064 candidate = next_candidate;
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001065 }
1066
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001067 shared_function_info_candidates_head_ = NULL;
1068}
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001069
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001070
jkummerow@chromium.org4e308cf2013-05-17 13:39:16 +00001071void CodeFlusher::ProcessOptimizedCodeMaps() {
1072 static const int kEntriesStart = SharedFunctionInfo::kEntriesStart;
1073 static const int kEntryLength = SharedFunctionInfo::kEntryLength;
1074 static const int kContextOffset = 0;
1075 static const int kCodeOffset = 1;
1076 static const int kLiteralsOffset = 2;
1077 STATIC_ASSERT(kEntryLength == 3);
1078
1079 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1080 SharedFunctionInfo* next_holder;
1081 while (holder != NULL) {
1082 next_holder = GetNextCodeMap(holder);
1083 ClearNextCodeMap(holder);
1084
1085 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
1086 int new_length = kEntriesStart;
1087 int old_length = code_map->length();
1088 for (int i = kEntriesStart; i < old_length; i += kEntryLength) {
1089 Code* code = Code::cast(code_map->get(i + kCodeOffset));
1090 MarkBit code_mark = Marking::MarkBitFrom(code);
1091 if (!code_mark.Get()) {
1092 continue;
1093 }
1094
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00001095 // Update and record the context slot in the optimized code map.
jkummerow@chromium.org4e308cf2013-05-17 13:39:16 +00001096 Object** context_slot = HeapObject::RawField(code_map,
1097 FixedArray::OffsetOfElementAt(new_length));
1098 code_map->set(new_length++, code_map->get(i + kContextOffset));
1099 ASSERT(Marking::IsBlack(
1100 Marking::MarkBitFrom(HeapObject::cast(*context_slot))));
1101 isolate_->heap()->mark_compact_collector()->
1102 RecordSlot(context_slot, context_slot, *context_slot);
1103
1104 // Update and record the code slot in the optimized code map.
1105 Object** code_slot = HeapObject::RawField(code_map,
1106 FixedArray::OffsetOfElementAt(new_length));
1107 code_map->set(new_length++, code_map->get(i + kCodeOffset));
1108 ASSERT(Marking::IsBlack(
1109 Marking::MarkBitFrom(HeapObject::cast(*code_slot))));
1110 isolate_->heap()->mark_compact_collector()->
1111 RecordSlot(code_slot, code_slot, *code_slot);
1112
1113 // Update and record the literals slot in the optimized code map.
1114 Object** literals_slot = HeapObject::RawField(code_map,
1115 FixedArray::OffsetOfElementAt(new_length));
1116 code_map->set(new_length++, code_map->get(i + kLiteralsOffset));
1117 ASSERT(Marking::IsBlack(
1118 Marking::MarkBitFrom(HeapObject::cast(*literals_slot))));
1119 isolate_->heap()->mark_compact_collector()->
1120 RecordSlot(literals_slot, literals_slot, *literals_slot);
1121 }
1122
1123 // Trim the optimized code map if entries have been removed.
1124 if (new_length < old_length) {
1125 holder->TrimOptimizedCodeMap(old_length - new_length);
1126 }
1127
1128 holder = next_holder;
1129 }
1130
1131 optimized_code_map_holder_head_ = NULL;
1132}
1133
1134
yangguo@chromium.org9768bf12013-01-11 14:51:07 +00001135void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
mvstanton@chromium.orgc47dff52013-01-23 16:28:41 +00001136 // Make sure previous flushing decisions are revisited.
yangguo@chromium.org9768bf12013-01-11 14:51:07 +00001137 isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
1138
ulan@chromium.org837a67e2013-06-11 15:39:48 +00001139 if (FLAG_trace_code_flushing) {
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00001140 PrintF("[code-flushing abandons function-info: ");
1141 shared_info->ShortPrint();
1142 PrintF("]\n");
ulan@chromium.org837a67e2013-06-11 15:39:48 +00001143 }
1144
yangguo@chromium.org9768bf12013-01-11 14:51:07 +00001145 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1146 SharedFunctionInfo* next_candidate;
1147 if (candidate == shared_info) {
1148 next_candidate = GetNextCandidate(shared_info);
1149 shared_function_info_candidates_head_ = next_candidate;
1150 ClearNextCandidate(shared_info);
1151 } else {
1152 while (candidate != NULL) {
1153 next_candidate = GetNextCandidate(candidate);
1154
1155 if (next_candidate == shared_info) {
1156 next_candidate = GetNextCandidate(shared_info);
1157 SetNextCandidate(candidate, next_candidate);
1158 ClearNextCandidate(shared_info);
1159 break;
1160 }
1161
1162 candidate = next_candidate;
1163 }
1164 }
1165}
1166
1167
verwaest@chromium.orge4ee6de2012-11-06 12:13:00 +00001168void CodeFlusher::EvictCandidate(JSFunction* function) {
1169 ASSERT(!function->next_function_link()->IsUndefined());
1170 Object* undefined = isolate_->heap()->undefined_value();
1171
mvstanton@chromium.orgc47dff52013-01-23 16:28:41 +00001172 // Make sure previous flushing decisions are revisited.
mstarzinger@chromium.org32280cf2012-12-06 17:32:37 +00001173 isolate_->heap()->incremental_marking()->RecordWrites(function);
mvstanton@chromium.orgc47dff52013-01-23 16:28:41 +00001174 isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
mstarzinger@chromium.org32280cf2012-12-06 17:32:37 +00001175
ulan@chromium.org837a67e2013-06-11 15:39:48 +00001176 if (FLAG_trace_code_flushing) {
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00001177 PrintF("[code-flushing abandons closure: ");
1178 function->shared()->ShortPrint();
1179 PrintF("]\n");
ulan@chromium.org837a67e2013-06-11 15:39:48 +00001180 }
1181
verwaest@chromium.orge4ee6de2012-11-06 12:13:00 +00001182 JSFunction* candidate = jsfunction_candidates_head_;
1183 JSFunction* next_candidate;
1184 if (candidate == function) {
1185 next_candidate = GetNextCandidate(function);
1186 jsfunction_candidates_head_ = next_candidate;
1187 ClearNextCandidate(function, undefined);
1188 } else {
1189 while (candidate != NULL) {
1190 next_candidate = GetNextCandidate(candidate);
1191
1192 if (next_candidate == function) {
1193 next_candidate = GetNextCandidate(function);
1194 SetNextCandidate(candidate, next_candidate);
1195 ClearNextCandidate(function, undefined);
yangguo@chromium.org9768bf12013-01-11 14:51:07 +00001196 break;
verwaest@chromium.orge4ee6de2012-11-06 12:13:00 +00001197 }
1198
1199 candidate = next_candidate;
1200 }
1201 }
1202}
1203
1204
jkummerow@chromium.org4e308cf2013-05-17 13:39:16 +00001205void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
1206 ASSERT(!FixedArray::cast(code_map_holder->optimized_code_map())->
1207 get(SharedFunctionInfo::kNextMapIndex)->IsUndefined());
1208
1209 // Make sure previous flushing decisions are revisited.
1210 isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
1211
ulan@chromium.org837a67e2013-06-11 15:39:48 +00001212 if (FLAG_trace_code_flushing) {
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00001213 PrintF("[code-flushing abandons code-map: ");
1214 code_map_holder->ShortPrint();
1215 PrintF("]\n");
ulan@chromium.org837a67e2013-06-11 15:39:48 +00001216 }
1217
jkummerow@chromium.org4e308cf2013-05-17 13:39:16 +00001218 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1219 SharedFunctionInfo* next_holder;
1220 if (holder == code_map_holder) {
1221 next_holder = GetNextCodeMap(code_map_holder);
1222 optimized_code_map_holder_head_ = next_holder;
1223 ClearNextCodeMap(code_map_holder);
1224 } else {
1225 while (holder != NULL) {
1226 next_holder = GetNextCodeMap(holder);
1227
1228 if (next_holder == code_map_holder) {
1229 next_holder = GetNextCodeMap(code_map_holder);
1230 SetNextCodeMap(holder, next_holder);
1231 ClearNextCodeMap(code_map_holder);
1232 break;
1233 }
1234
1235 holder = next_holder;
1236 }
1237 }
1238}
1239
1240
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +00001241void CodeFlusher::EvictJSFunctionCandidates() {
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +00001242 JSFunction* candidate = jsfunction_candidates_head_;
1243 JSFunction* next_candidate;
1244 while (candidate != NULL) {
1245 next_candidate = GetNextCandidate(candidate);
hpayer@chromium.org7c3372b2013-02-13 17:26:04 +00001246 EvictCandidate(candidate);
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +00001247 candidate = next_candidate;
1248 }
hpayer@chromium.org7c3372b2013-02-13 17:26:04 +00001249 ASSERT(jsfunction_candidates_head_ == NULL);
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +00001250}
1251
1252
1253void CodeFlusher::EvictSharedFunctionInfoCandidates() {
1254 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1255 SharedFunctionInfo* next_candidate;
1256 while (candidate != NULL) {
1257 next_candidate = GetNextCandidate(candidate);
hpayer@chromium.org7c3372b2013-02-13 17:26:04 +00001258 EvictCandidate(candidate);
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +00001259 candidate = next_candidate;
1260 }
hpayer@chromium.org7c3372b2013-02-13 17:26:04 +00001261 ASSERT(shared_function_info_candidates_head_ == NULL);
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +00001262}
1263
1264
jkummerow@chromium.org4e308cf2013-05-17 13:39:16 +00001265void CodeFlusher::EvictOptimizedCodeMaps() {
1266 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1267 SharedFunctionInfo* next_holder;
1268 while (holder != NULL) {
1269 next_holder = GetNextCodeMap(holder);
1270 EvictOptimizedCodeMap(holder);
1271 holder = next_holder;
1272 }
1273 ASSERT(optimized_code_map_holder_head_ == NULL);
1274}
1275
1276
verwaest@chromium.orge4ee6de2012-11-06 12:13:00 +00001277void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
1278 Heap* heap = isolate_->heap();
1279
1280 JSFunction** slot = &jsfunction_candidates_head_;
1281 JSFunction* candidate = jsfunction_candidates_head_;
1282 while (candidate != NULL) {
1283 if (heap->InFromSpace(candidate)) {
1284 v->VisitPointer(reinterpret_cast<Object**>(slot));
1285 }
1286 candidate = GetNextCandidate(*slot);
1287 slot = GetNextCandidateSlot(*slot);
1288 }
1289}
1290
1291
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001292MarkCompactCollector::~MarkCompactCollector() {
1293 if (code_flusher_ != NULL) {
1294 delete code_flusher_;
1295 code_flusher_ = NULL;
1296 }
1297}
1298
mads.s.ager31e71382008-08-13 09:32:07 +00001299
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001300static inline HeapObject* ShortCircuitConsString(Object** p) {
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00001301 // Optimization: If the heap object pointed to by p is a non-internalized
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001302 // cons string whose right substring is HEAP->empty_string, update
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001303 // it in place to its left substring. Return the updated value.
mads.s.ager31e71382008-08-13 09:32:07 +00001304 //
1305 // Here we assume that if we change *p, we replace it with a heap object
ulan@chromium.org2efb9002012-01-19 15:36:35 +00001306 // (i.e., the left substring of a cons string is always a heap object).
mads.s.ager31e71382008-08-13 09:32:07 +00001307 //
1308 // The check performed is:
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00001309 // object->IsConsString() && !object->IsInternalizedString() &&
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001310 // (ConsString::cast(object)->second() == HEAP->empty_string())
mads.s.ager31e71382008-08-13 09:32:07 +00001311 // except the maps for the object and its possible substrings might be
1312 // marked.
1313 HeapObject* object = HeapObject::cast(*p);
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +00001314 if (!FLAG_clever_optimizations) return object;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001315 Map* map = object->map();
1316 InstanceType type = map->instance_type();
kasperl@chromium.orgd1e3e722009-04-14 13:38:25 +00001317 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
mads.s.ager31e71382008-08-13 09:32:07 +00001318
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00001319 Object* second = reinterpret_cast<ConsString*>(object)->second();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001320 Heap* heap = map->GetHeap();
1321 if (second != heap->empty_string()) {
kasperl@chromium.org68ac0092009-07-09 06:00:35 +00001322 return object;
1323 }
mads.s.ager31e71382008-08-13 09:32:07 +00001324
1325 // Since we don't have the object's start, it is impossible to update the
ricow@chromium.org30ce4112010-05-31 10:38:25 +00001326 // page dirty marks. Therefore, we only replace the string with its left
1327 // substring when page dirty marks do not change.
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00001328 Object* first = reinterpret_cast<ConsString*>(object)->first();
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001329 if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
mads.s.ager31e71382008-08-13 09:32:07 +00001330
1331 *p = first;
1332 return HeapObject::cast(first);
1333}
1334
1335
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001336class MarkCompactMarkingVisitor
1337 : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001338 public:
yangguo@chromium.org304cc332012-07-24 07:59:48 +00001339 static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id,
1340 Map* map, HeapObject* obj);
1341
1342 static void ObjectStatsCountFixedArray(
1343 FixedArrayBase* fixed_array,
1344 FixedArraySubInstanceType fast_type,
1345 FixedArraySubInstanceType dictionary_type);
1346
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001347 template<MarkCompactMarkingVisitor::VisitorId id>
jkummerow@chromium.org28583c92012-07-16 11:31:55 +00001348 class ObjectStatsTracker {
1349 public:
verwaest@chromium.org753aee42012-07-17 16:15:42 +00001350 static inline void Visit(Map* map, HeapObject* obj);
jkummerow@chromium.org28583c92012-07-16 11:31:55 +00001351 };
1352
verwaest@chromium.org753aee42012-07-17 16:15:42 +00001353 static void Initialize();
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001354
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001355 INLINE(static void VisitPointer(Heap* heap, Object** p)) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001356 MarkObjectByPointer(heap->mark_compact_collector(), p, p);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001357 }
1358
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +00001359 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001360 // Mark all objects pointed to in [start, end).
1361 const int kMinRangeForMarkingRecursion = 64;
1362 if (end - start >= kMinRangeForMarkingRecursion) {
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +00001363 if (VisitUnmarkedObjects(heap, start, end)) return;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001364 // We are close to a stack overflow, so just mark the objects.
1365 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001366 MarkCompactCollector* collector = heap->mark_compact_collector();
1367 for (Object** p = start; p < end; p++) {
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +00001368 MarkObjectByPointer(collector, start, p);
jkummerow@chromium.org7a96c2a2012-10-01 16:24:39 +00001369 }
1370 }
1371
verwaest@chromium.org33e09c82012-10-10 17:07:22 +00001372 // Marks the object black and pushes it on the marking stack.
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001373 INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
1374 MarkBit mark = Marking::MarkBitFrom(object);
1375 heap->mark_compact_collector()->MarkObject(object, mark);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001376 }
1377
verwaest@chromium.org33e09c82012-10-10 17:07:22 +00001378 // Marks the object black without pushing it on the marking stack.
1379 // Returns true if object needed marking and false otherwise.
1380 INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
1381 MarkBit mark_bit = Marking::MarkBitFrom(object);
1382 if (!mark_bit.Get()) {
1383 heap->mark_compact_collector()->SetMark(object, mark_bit);
1384 return true;
1385 }
1386 return false;
1387 }
1388
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001389 // Mark object pointed to by p.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001390 INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
1391 Object** anchor_slot,
1392 Object** p)) {
mads.s.ager31e71382008-08-13 09:32:07 +00001393 if (!(*p)->IsHeapObject()) return;
1394 HeapObject* object = ShortCircuitConsString(p);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001395 collector->RecordSlot(anchor_slot, p, object);
1396 MarkBit mark = Marking::MarkBitFrom(object);
1397 collector->MarkObject(object, mark);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001398 }
1399
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001400
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001401 // Visit an unmarked object.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001402 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1403 HeapObject* obj)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001404#ifdef DEBUG
jkummerow@chromium.org2c9426b2013-09-05 16:31:13 +00001405 ASSERT(collector->heap()->Contains(obj));
hpayer@chromium.orgc5d49712013-09-11 08:25:48 +00001406 ASSERT(!collector->heap()->mark_compact_collector()->IsMarked(obj));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001407#endif
1408 Map* map = obj->map();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001409 Heap* heap = obj->GetHeap();
1410 MarkBit mark = Marking::MarkBitFrom(obj);
1411 heap->mark_compact_collector()->SetMark(obj, mark);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001412 // Mark the map pointer and the body.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001413 MarkBit map_mark = Marking::MarkBitFrom(map);
1414 heap->mark_compact_collector()->MarkObject(map, map_mark);
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001415 IterateBody(map, obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001416 }
1417
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +00001418 // Visit all unmarked objects pointed to by [start, end).
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001419 // Returns false if the operation fails (lack of stack space).
ulan@chromium.org2e04b582013-02-21 14:06:02 +00001420 INLINE(static bool VisitUnmarkedObjects(Heap* heap,
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +00001421 Object** start,
ulan@chromium.org2e04b582013-02-21 14:06:02 +00001422 Object** end)) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001423 // Return false is we are close to the stack limit.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001424 StackLimitCheck check(heap->isolate());
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001425 if (check.HasOverflowed()) return false;
1426
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001427 MarkCompactCollector* collector = heap->mark_compact_collector();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001428 // Visit the unmarked objects.
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +00001429 for (Object** p = start; p < end; p++) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001430 Object* o = *p;
1431 if (!o->IsHeapObject()) continue;
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +00001432 collector->RecordSlot(start, p, o);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001433 HeapObject* obj = HeapObject::cast(o);
1434 MarkBit mark = Marking::MarkBitFrom(obj);
1435 if (mark.Get()) continue;
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001436 VisitUnmarkedObject(collector, obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001437 }
1438 return true;
1439 }
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001440
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001441 INLINE(static void BeforeVisitingSharedFunctionInfo(HeapObject* object)) {
1442 SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
1443 shared->BeforeVisitingPointers();
1444 }
1445
jkummerow@chromium.orgba72ec82013-07-22 09:21:20 +00001446 static void VisitWeakCollection(Map* map, HeapObject* object) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001447 MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
jkummerow@chromium.orgba72ec82013-07-22 09:21:20 +00001448 JSWeakCollection* weak_collection =
1449 reinterpret_cast<JSWeakCollection*>(object);
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00001450
1451 // Enqueue weak map in linked list of encountered weak maps.
jkummerow@chromium.orgba72ec82013-07-22 09:21:20 +00001452 if (weak_collection->next() == Smi::FromInt(0)) {
1453 weak_collection->set_next(collector->encountered_weak_collections());
1454 collector->set_encountered_weak_collections(weak_collection);
svenpanne@chromium.orgfb046332012-04-19 12:02:44 +00001455 }
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00001456
1457 // Skip visiting the backing hash table containing the mappings.
jkummerow@chromium.orgba72ec82013-07-22 09:21:20 +00001458 int object_size = JSWeakCollection::BodyDescriptor::SizeOf(map, object);
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001459 BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001460 map->GetHeap(),
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00001461 object,
jkummerow@chromium.orgba72ec82013-07-22 09:21:20 +00001462 JSWeakCollection::BodyDescriptor::kStartOffset,
1463 JSWeakCollection::kTableOffset);
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001464 BodyVisitorBase<MarkCompactMarkingVisitor>::IteratePointers(
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001465 map->GetHeap(),
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00001466 object,
jkummerow@chromium.orgba72ec82013-07-22 09:21:20 +00001467 JSWeakCollection::kTableOffset + kPointerSize,
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00001468 object_size);
1469
1470 // Mark the backing hash table without pushing it on the marking stack.
jkummerow@chromium.orgba72ec82013-07-22 09:21:20 +00001471 Object* table_object = weak_collection->table();
svenpanne@chromium.orgfb046332012-04-19 12:02:44 +00001472 if (!table_object->IsHashTable()) return;
jkummerow@chromium.org25b0e212013-10-04 15:38:52 +00001473 WeakHashTable* table = WeakHashTable::cast(table_object);
svenpanne@chromium.orgfb046332012-04-19 12:02:44 +00001474 Object** table_slot =
jkummerow@chromium.orgba72ec82013-07-22 09:21:20 +00001475 HeapObject::RawField(weak_collection, JSWeakCollection::kTableOffset);
svenpanne@chromium.orgfb046332012-04-19 12:02:44 +00001476 MarkBit table_mark = Marking::MarkBitFrom(table);
1477 collector->RecordSlot(table_slot, table_slot, table);
1478 if (!table_mark.Get()) collector->SetMark(table, table_mark);
1479 // Recording the map slot can be skipped, because maps are not compacted.
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00001480 collector->MarkObject(table->map(), Marking::MarkBitFrom(table->map()));
1481 ASSERT(MarkCompactCollector::IsMarked(table->map()));
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00001482 }
1483
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001484 private:
1485 template<int id>
1486 static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001487
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001488 // Code flushing support.
1489
jkummerow@chromium.orgddda9e82011-07-06 11:27:02 +00001490 static const int kRegExpCodeThreshold = 5;
1491
jkummerow@chromium.orgddda9e82011-07-06 11:27:02 +00001492 static void UpdateRegExpCodeAgeAndFlush(Heap* heap,
1493 JSRegExp* re,
1494 bool is_ascii) {
1495 // Make sure that the fixed array is in fact initialized on the RegExp.
1496 // We could potentially trigger a GC when initializing the RegExp.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001497 if (HeapObject::cast(re->data())->map()->instance_type() !=
1498 FIXED_ARRAY_TYPE) return;
jkummerow@chromium.orgddda9e82011-07-06 11:27:02 +00001499
1500 // Make sure this is a RegExp that actually contains code.
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00001501 if (re->TypeTag() != JSRegExp::IRREGEXP) return;
jkummerow@chromium.orgddda9e82011-07-06 11:27:02 +00001502
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00001503 Object* code = re->DataAt(JSRegExp::code_index(is_ascii));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001504 if (!code->IsSmi() &&
1505 HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
jkummerow@chromium.orgddda9e82011-07-06 11:27:02 +00001506 // Save a copy that can be reinstated if we need the code again.
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00001507 re->SetDataAt(JSRegExp::saved_code_index(is_ascii), code);
yangguo@chromium.org78d1ad42012-02-09 13:53:47 +00001508
1509 // Saving a copy might create a pointer into compaction candidate
1510 // that was not observed by marker. This might happen if JSRegExp data
1511 // was marked through the compilation cache before marker reached JSRegExp
1512 // object.
1513 FixedArray* data = FixedArray::cast(re->data());
1514 Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii);
1515 heap->mark_compact_collector()->
1516 RecordSlot(slot, slot, code);
1517
jkummerow@chromium.orgddda9e82011-07-06 11:27:02 +00001518 // Set a number in the 0-255 range to guarantee no smi overflow.
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00001519 re->SetDataAt(JSRegExp::code_index(is_ascii),
1520 Smi::FromInt(heap->sweep_generation() & 0xff));
jkummerow@chromium.orgddda9e82011-07-06 11:27:02 +00001521 } else if (code->IsSmi()) {
1522 int value = Smi::cast(code)->value();
1523 // The regexp has not been compiled yet or there was a compilation error.
1524 if (value == JSRegExp::kUninitializedValue ||
1525 value == JSRegExp::kCompilationErrorValue) {
1526 return;
1527 }
1528
1529 // Check if we should flush now.
1530 if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00001531 re->SetDataAt(JSRegExp::code_index(is_ascii),
1532 Smi::FromInt(JSRegExp::kUninitializedValue));
1533 re->SetDataAt(JSRegExp::saved_code_index(is_ascii),
1534 Smi::FromInt(JSRegExp::kUninitializedValue));
jkummerow@chromium.orgddda9e82011-07-06 11:27:02 +00001535 }
1536 }
1537 }
1538
1539
1540 // Works by setting the current sweep_generation (as a smi) in the
1541 // code object place in the data array of the RegExp and keeps a copy
1542 // around that can be reinstated if we reuse the RegExp before flushing.
1543 // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1544 // we flush the code.
1545 static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001546 Heap* heap = map->GetHeap();
jkummerow@chromium.orgddda9e82011-07-06 11:27:02 +00001547 MarkCompactCollector* collector = heap->mark_compact_collector();
1548 if (!collector->is_code_flushing_enabled()) {
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001549 VisitJSRegExp(map, object);
jkummerow@chromium.orgddda9e82011-07-06 11:27:02 +00001550 return;
1551 }
1552 JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
ulan@chromium.org2efb9002012-01-19 15:36:35 +00001553 // Flush code or set age on both ASCII and two byte code.
jkummerow@chromium.orgddda9e82011-07-06 11:27:02 +00001554 UpdateRegExpCodeAgeAndFlush(heap, re, true);
1555 UpdateRegExpCodeAgeAndFlush(heap, re, false);
1556 // Visit the fields of the RegExp, including the updated FixedArray.
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001557 VisitJSRegExp(map, object);
jkummerow@chromium.orgddda9e82011-07-06 11:27:02 +00001558 }
1559
jkummerow@chromium.org28583c92012-07-16 11:31:55 +00001560 static VisitorDispatchTable<Callback> non_count_table_;
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001561};
1562
1563
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001564void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
yangguo@chromium.org304cc332012-07-24 07:59:48 +00001565 FixedArrayBase* fixed_array,
1566 FixedArraySubInstanceType fast_type,
1567 FixedArraySubInstanceType dictionary_type) {
1568 Heap* heap = fixed_array->map()->GetHeap();
1569 if (fixed_array->map() != heap->fixed_cow_array_map() &&
1570 fixed_array->map() != heap->fixed_double_array_map() &&
1571 fixed_array != heap->empty_fixed_array()) {
1572 if (fixed_array->IsDictionary()) {
1573 heap->RecordObjectStats(FIXED_ARRAY_TYPE,
1574 dictionary_type,
1575 fixed_array->Size());
1576 } else {
1577 heap->RecordObjectStats(FIXED_ARRAY_TYPE,
1578 fast_type,
1579 fixed_array->Size());
1580 }
1581 }
1582}
1583
1584
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001585void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
1586 MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) {
verwaest@chromium.org753aee42012-07-17 16:15:42 +00001587 Heap* heap = map->GetHeap();
1588 int object_size = obj->Size();
1589 heap->RecordObjectStats(map->instance_type(), -1, object_size);
yangguo@chromium.org304cc332012-07-24 07:59:48 +00001590 non_count_table_.GetVisitorById(id)(map, obj);
1591 if (obj->IsJSObject()) {
1592 JSObject* object = JSObject::cast(obj);
1593 ObjectStatsCountFixedArray(object->elements(),
1594 DICTIONARY_ELEMENTS_SUB_TYPE,
1595 FAST_ELEMENTS_SUB_TYPE);
1596 ObjectStatsCountFixedArray(object->properties(),
1597 DICTIONARY_PROPERTIES_SUB_TYPE,
1598 FAST_PROPERTIES_SUB_TYPE);
1599 }
verwaest@chromium.org753aee42012-07-17 16:15:42 +00001600}
1601
1602
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001603template<MarkCompactMarkingVisitor::VisitorId id>
1604void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(
yangguo@chromium.org304cc332012-07-24 07:59:48 +00001605 Map* map, HeapObject* obj) {
1606 ObjectStatsVisitBase(id, map, obj);
1607}
1608
1609
1610template<>
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001611class MarkCompactMarkingVisitor::ObjectStatsTracker<
1612 MarkCompactMarkingVisitor::kVisitMap> {
yangguo@chromium.org304cc332012-07-24 07:59:48 +00001613 public:
1614 static inline void Visit(Map* map, HeapObject* obj) {
1615 Heap* heap = map->GetHeap();
1616 Map* map_obj = Map::cast(obj);
1617 ASSERT(map->instance_type() == MAP_TYPE);
1618 DescriptorArray* array = map_obj->instance_descriptors();
verwaest@chromium.org06ab2ec2012-10-09 17:00:13 +00001619 if (map_obj->owns_descriptors() &&
1620 array != heap->empty_descriptor_array()) {
yangguo@chromium.org304cc332012-07-24 07:59:48 +00001621 int fixed_array_size = array->Size();
1622 heap->RecordObjectStats(FIXED_ARRAY_TYPE,
1623 DESCRIPTOR_ARRAY_SUB_TYPE,
1624 fixed_array_size);
1625 }
1626 if (map_obj->HasTransitionArray()) {
1627 int fixed_array_size = map_obj->transitions()->Size();
1628 heap->RecordObjectStats(FIXED_ARRAY_TYPE,
1629 TRANSITION_ARRAY_SUB_TYPE,
1630 fixed_array_size);
1631 }
jkummerow@chromium.orgba72ec82013-07-22 09:21:20 +00001632 if (map_obj->has_code_cache()) {
1633 CodeCache* cache = CodeCache::cast(map_obj->code_cache());
yangguo@chromium.org304cc332012-07-24 07:59:48 +00001634 heap->RecordObjectStats(
1635 FIXED_ARRAY_TYPE,
1636 MAP_CODE_CACHE_SUB_TYPE,
jkummerow@chromium.orgba72ec82013-07-22 09:21:20 +00001637 cache->default_cache()->Size());
1638 if (!cache->normal_type_cache()->IsUndefined()) {
1639 heap->RecordObjectStats(
1640 FIXED_ARRAY_TYPE,
1641 MAP_CODE_CACHE_SUB_TYPE,
1642 FixedArray::cast(cache->normal_type_cache())->Size());
1643 }
yangguo@chromium.org304cc332012-07-24 07:59:48 +00001644 }
1645 ObjectStatsVisitBase(kVisitMap, map, obj);
1646 }
1647};
1648
1649
verwaest@chromium.org753aee42012-07-17 16:15:42 +00001650template<>
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001651class MarkCompactMarkingVisitor::ObjectStatsTracker<
1652 MarkCompactMarkingVisitor::kVisitCode> {
verwaest@chromium.org753aee42012-07-17 16:15:42 +00001653 public:
1654 static inline void Visit(Map* map, HeapObject* obj) {
1655 Heap* heap = map->GetHeap();
1656 int object_size = obj->Size();
1657 ASSERT(map->instance_type() == CODE_TYPE);
verwaest@chromium.org753aee42012-07-17 16:15:42 +00001658 heap->RecordObjectStats(CODE_TYPE, Code::cast(obj)->kind(), object_size);
yangguo@chromium.org304cc332012-07-24 07:59:48 +00001659 ObjectStatsVisitBase(kVisitCode, map, obj);
1660 }
1661};
1662
1663
1664template<>
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001665class MarkCompactMarkingVisitor::ObjectStatsTracker<
1666 MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
yangguo@chromium.org304cc332012-07-24 07:59:48 +00001667 public:
1668 static inline void Visit(Map* map, HeapObject* obj) {
1669 Heap* heap = map->GetHeap();
1670 SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
1671 if (sfi->scope_info() != heap->empty_fixed_array()) {
1672 heap->RecordObjectStats(
1673 FIXED_ARRAY_TYPE,
1674 SCOPE_INFO_SUB_TYPE,
1675 FixedArray::cast(sfi->scope_info())->Size());
1676 }
1677 ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
1678 }
1679};
1680
1681
1682template<>
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001683class MarkCompactMarkingVisitor::ObjectStatsTracker<
1684 MarkCompactMarkingVisitor::kVisitFixedArray> {
yangguo@chromium.org304cc332012-07-24 07:59:48 +00001685 public:
1686 static inline void Visit(Map* map, HeapObject* obj) {
1687 Heap* heap = map->GetHeap();
1688 FixedArray* fixed_array = FixedArray::cast(obj);
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00001689 if (fixed_array == heap->string_table()) {
yangguo@chromium.org304cc332012-07-24 07:59:48 +00001690 heap->RecordObjectStats(
1691 FIXED_ARRAY_TYPE,
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00001692 STRING_TABLE_SUB_TYPE,
yangguo@chromium.org304cc332012-07-24 07:59:48 +00001693 fixed_array->Size());
1694 }
1695 ObjectStatsVisitBase(kVisitFixedArray, map, obj);
verwaest@chromium.org753aee42012-07-17 16:15:42 +00001696 }
1697};
1698
1699
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001700void MarkCompactMarkingVisitor::Initialize() {
1701 StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
verwaest@chromium.org753aee42012-07-17 16:15:42 +00001702
verwaest@chromium.org753aee42012-07-17 16:15:42 +00001703 table_.Register(kVisitJSRegExp,
1704 &VisitRegExpAndFlushCode);
1705
verwaest@chromium.org753aee42012-07-17 16:15:42 +00001706 if (FLAG_track_gc_object_stats) {
1707 // Copy the visitor table to make call-through possible.
1708 non_count_table_.CopyFrom(&table_);
1709#define VISITOR_ID_COUNT_FUNCTION(id) \
1710 table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
1711 VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION)
1712#undef VISITOR_ID_COUNT_FUNCTION
1713 }
1714}
1715
1716
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001717VisitorDispatchTable<MarkCompactMarkingVisitor::Callback>
1718 MarkCompactMarkingVisitor::non_count_table_;
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001719
1720
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001721class CodeMarkingVisitor : public ThreadVisitor {
1722 public:
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001723 explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1724 : collector_(collector) {}
1725
vegorov@chromium.org74f333b2011-04-06 11:17:46 +00001726 void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +00001727 collector_->PrepareThreadForCodeFlushing(isolate, top);
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001728 }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001729
1730 private:
1731 MarkCompactCollector* collector_;
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001732};
1733
1734
1735class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1736 public:
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001737 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1738 : collector_(collector) {}
1739
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +00001740 void VisitPointers(Object** start, Object** end) {
1741 for (Object** p = start; p < end; p++) VisitPointer(p);
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001742 }
1743
1744 void VisitPointer(Object** slot) {
1745 Object* obj = *slot;
kasperl@chromium.orga5551262010-12-07 12:49:48 +00001746 if (obj->IsSharedFunctionInfo()) {
1747 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001748 MarkBit shared_mark = Marking::MarkBitFrom(shared);
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +00001749 MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1750 collector_->MarkObject(shared->code(), code_mark);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001751 collector_->MarkObject(shared, shared_mark);
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001752 }
1753 }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001754
1755 private:
1756 MarkCompactCollector* collector_;
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001757};
1758
1759
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +00001760void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1761 ThreadLocalTop* top) {
1762 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1763 // Note: for the frame that has a pending lazy deoptimization
1764 // StackFrame::unchecked_code will return a non-optimized code object for
1765 // the outermost function and StackFrame::LookupCode will return
1766 // actual optimized code object.
1767 StackFrame* frame = it.frame();
1768 Code* code = frame->unchecked_code();
1769 MarkBit code_mark = Marking::MarkBitFrom(code);
1770 MarkObject(code, code_mark);
1771 if (frame->is_optimized()) {
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00001772 MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(),
1773 frame->LookupCode());
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +00001774 }
1775 }
1776}
1777
1778
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001779void MarkCompactCollector::PrepareForCodeFlushing() {
verwaest@chromium.orge4ee6de2012-11-06 12:13:00 +00001780 // Enable code flushing for non-incremental cycles.
1781 if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
1782 EnableCodeFlushing(!was_marked_incrementally_);
mstarzinger@chromium.orgb1016112012-11-02 15:55:00 +00001783 }
1784
verwaest@chromium.orge4ee6de2012-11-06 12:13:00 +00001785 // If code flushing is disabled, there is no need to prepare for it.
1786 if (!is_code_flushing_enabled()) return;
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001787
ager@chromium.org5b2fbee2010-09-08 06:38:15 +00001788 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1789 // relies on it being marked before any other descriptor array.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001790 HeapObject* descriptor_array = heap()->empty_descriptor_array();
1791 MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1792 MarkObject(descriptor_array, descriptor_array_mark);
ager@chromium.org5b2fbee2010-09-08 06:38:15 +00001793
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001794 // Make sure we are not referencing the code from the stack.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001795 ASSERT(this == heap()->mark_compact_collector());
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +00001796 PrepareThreadForCodeFlushing(heap()->isolate(),
1797 heap()->isolate()->thread_local_top());
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001798
1799 // Iterate the archived stacks in all threads to check if
1800 // the code is referenced.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001801 CodeMarkingVisitor code_marking_visitor(this);
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001802 heap()->isolate()->thread_manager()->IterateArchivedThreads(
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001803 &code_marking_visitor);
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001804
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001805 SharedFunctionInfoMarkingVisitor visitor(this);
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001806 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1807 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001808
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001809 ProcessMarkingDeque();
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00001810}
1811
1812
mads.s.ager31e71382008-08-13 09:32:07 +00001813// Visitor class for marking heap roots.
1814class RootMarkingVisitor : public ObjectVisitor {
1815 public:
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001816 explicit RootMarkingVisitor(Heap* heap)
1817 : collector_(heap->mark_compact_collector()) { }
1818
mads.s.ager31e71382008-08-13 09:32:07 +00001819 void VisitPointer(Object** p) {
1820 MarkObjectByPointer(p);
1821 }
1822
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +00001823 void VisitPointers(Object** start, Object** end) {
1824 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
mads.s.ager31e71382008-08-13 09:32:07 +00001825 }
1826
mads.s.ager31e71382008-08-13 09:32:07 +00001827 private:
mads.s.ager31e71382008-08-13 09:32:07 +00001828 void MarkObjectByPointer(Object** p) {
1829 if (!(*p)->IsHeapObject()) return;
1830
1831 // Replace flat cons strings in place.
1832 HeapObject* object = ShortCircuitConsString(p);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001833 MarkBit mark_bit = Marking::MarkBitFrom(object);
1834 if (mark_bit.Get()) return;
mads.s.ager31e71382008-08-13 09:32:07 +00001835
mads.s.ager31e71382008-08-13 09:32:07 +00001836 Map* map = object->map();
1837 // Mark the object.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001838 collector_->SetMark(object, mark_bit);
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00001839
mads.s.ager31e71382008-08-13 09:32:07 +00001840 // Mark the map pointer and body, and push them on the marking stack.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001841 MarkBit map_mark = Marking::MarkBitFrom(map);
1842 collector_->MarkObject(map, map_mark);
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00001843 MarkCompactMarkingVisitor::IterateBody(map, object);
mads.s.ager31e71382008-08-13 09:32:07 +00001844
1845 // Mark all the objects reachable from the map and body. May leave
1846 // overflowed objects in the heap.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001847 collector_->EmptyMarkingDeque();
mads.s.ager31e71382008-08-13 09:32:07 +00001848 }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00001849
1850 MarkCompactCollector* collector_;
mads.s.ager31e71382008-08-13 09:32:07 +00001851};
1852
1853
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00001854// Helper class for pruning the string table.
1855class StringTableCleaner : public ObjectVisitor {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001856 public:
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00001857 explicit StringTableCleaner(Heap* heap)
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001858 : heap_(heap), pointers_removed_(0) { }
kmillikin@chromium.org13bd2942009-12-16 15:36:05 +00001859
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +00001860 virtual void VisitPointers(Object** start, Object** end) {
1861 // Visit all HeapObject pointers in [start, end).
1862 for (Object** p = start; p < end; p++) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001863 Object* o = *p;
1864 if (o->IsHeapObject() &&
1865 !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00001866 // Check if the internalized string being pruned is external. We need to
1867 // delete the associated external data as this string is going away.
ager@chromium.org6f10e412009-02-13 10:11:16 +00001868
ager@chromium.org6f10e412009-02-13 10:11:16 +00001869 // Since no objects have yet been moved we can safely access the map of
1870 // the object.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001871 if (o->IsExternalString()) {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001872 heap_->FinalizeExternalString(String::cast(*p));
ager@chromium.org6f10e412009-02-13 10:11:16 +00001873 }
jkummerow@chromium.orgc3b37122011-11-07 10:14:12 +00001874 // Set the entry to the_hole_value (as deleted).
1875 *p = heap_->the_hole_value();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001876 pointers_removed_++;
1877 }
1878 }
1879 }
1880
1881 int PointersRemoved() {
1882 return pointers_removed_;
1883 }
jkummerow@chromium.orge297f592011-06-08 10:05:15 +00001884
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001885 private:
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00001886 Heap* heap_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001887 int pointers_removed_;
1888};
1889
1890
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00001891// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1892// are retained.
1893class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1894 public:
1895 virtual Object* RetainAs(Object* object) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001896 if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00001897 return object;
1898 } else {
1899 return NULL;
1900 }
1901 }
1902};
1903
1904
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001905// Fill the marking stack with overflowed objects returned by the given
1906// iterator. Stop when the marking stack is filled or the end of the space
1907// is reached, whichever comes first.
1908template<class T>
1909static void DiscoverGreyObjectsWithIterator(Heap* heap,
1910 MarkingDeque* marking_deque,
1911 T* it) {
1912 // The caller should ensure that the marking stack is initially not full,
1913 // so that we don't waste effort pointlessly scanning for objects.
1914 ASSERT(!marking_deque->IsFull());
1915
1916 Map* filler_map = heap->one_pointer_filler_map();
1917 for (HeapObject* object = it->Next();
1918 object != NULL;
1919 object = it->Next()) {
1920 MarkBit markbit = Marking::MarkBitFrom(object);
1921 if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1922 Marking::GreyToBlack(markbit);
ulan@chromium.org2efb9002012-01-19 15:36:35 +00001923 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001924 marking_deque->PushBlack(object);
1925 if (marking_deque->IsFull()) return;
1926 }
1927 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001928}
1929
1930
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001931static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
mads.s.ager31e71382008-08-13 09:32:07 +00001932
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001933
danno@chromium.orgbee51992013-07-10 14:57:15 +00001934static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
1935 MemoryChunk* p) {
danno@chromium.org2c26cb12012-05-03 09:06:43 +00001936 ASSERT(!marking_deque->IsFull());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001937 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1938 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
1939 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
1940 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1941
jkummerow@chromium.org10480472013-07-17 08:22:15 +00001942 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1943 Address cell_base = it.CurrentCellBase();
1944 MarkBit::CellType* cell = it.CurrentCell();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001945
jkummerow@chromium.org10480472013-07-17 08:22:15 +00001946 const MarkBit::CellType current_cell = *cell;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001947 if (current_cell == 0) continue;
1948
jkummerow@chromium.org10480472013-07-17 08:22:15 +00001949 MarkBit::CellType grey_objects;
1950 if (it.HasNext()) {
1951 const MarkBit::CellType next_cell = *(cell+1);
1952 grey_objects = current_cell &
1953 ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
1954 } else {
1955 grey_objects = current_cell & (current_cell >> 1);
1956 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001957
1958 int offset = 0;
1959 while (grey_objects != 0) {
1960 int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
1961 grey_objects >>= trailing_zeros;
1962 offset += trailing_zeros;
jkummerow@chromium.org10480472013-07-17 08:22:15 +00001963 MarkBit markbit(cell, 1 << offset, false);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001964 ASSERT(Marking::IsGrey(markbit));
1965 Marking::GreyToBlack(markbit);
1966 Address addr = cell_base + offset * kPointerSize;
1967 HeapObject* object = HeapObject::FromAddress(addr);
ulan@chromium.org2efb9002012-01-19 15:36:35 +00001968 MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00001969 marking_deque->PushBlack(object);
1970 if (marking_deque->IsFull()) return;
1971 offset += 2;
1972 grey_objects >>= 2;
1973 }
1974
1975 grey_objects >>= (Bitmap::kBitsPerCell - 1);
1976 }
1977}
1978
1979
danno@chromium.org169691d2013-07-15 08:01:13 +00001980int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage(
1981 NewSpace* new_space,
1982 NewSpacePage* p) {
1983 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1984 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
1985 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
1986 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1987
1988 MarkBit::CellType* cells = p->markbits()->cells();
1989 int survivors_size = 0;
1990
jkummerow@chromium.org10480472013-07-17 08:22:15 +00001991 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1992 Address cell_base = it.CurrentCellBase();
1993 MarkBit::CellType* cell = it.CurrentCell();
danno@chromium.org169691d2013-07-15 08:01:13 +00001994
jkummerow@chromium.org10480472013-07-17 08:22:15 +00001995 MarkBit::CellType current_cell = *cell;
danno@chromium.org169691d2013-07-15 08:01:13 +00001996 if (current_cell == 0) continue;
1997
1998 int offset = 0;
1999 while (current_cell != 0) {
2000 int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(current_cell);
2001 current_cell >>= trailing_zeros;
2002 offset += trailing_zeros;
2003 Address address = cell_base + offset * kPointerSize;
2004 HeapObject* object = HeapObject::FromAddress(address);
2005
2006 int size = object->Size();
2007 survivors_size += size;
2008
jkummerow@chromium.orgd8a3a142013-10-03 12:15:05 +00002009 if (FLAG_trace_track_allocation_sites && object->IsJSObject()) {
2010 if (AllocationMemento::FindForJSObject(JSObject::cast(object), true)
2011 != NULL) {
2012 heap()->allocation_mementos_found_++;
2013 }
2014 }
2015
danno@chromium.org169691d2013-07-15 08:01:13 +00002016 offset++;
2017 current_cell >>= 1;
2018 // Aggressively promote young survivors to the old space.
2019 if (TryPromoteObject(object, size)) {
2020 continue;
2021 }
2022
2023 // Promotion failed. Just migrate object to another semispace.
2024 MaybeObject* allocation = new_space->AllocateRaw(size);
2025 if (allocation->IsFailure()) {
2026 if (!new_space->AddFreshPage()) {
2027 // Shouldn't happen. We are sweeping linearly, and to-space
2028 // has the same number of pages as from-space, so there is
2029 // always room.
2030 UNREACHABLE();
2031 }
2032 allocation = new_space->AllocateRaw(size);
2033 ASSERT(!allocation->IsFailure());
2034 }
2035 Object* target = allocation->ToObjectUnchecked();
2036
2037 MigrateObject(HeapObject::cast(target)->address(),
2038 object->address(),
2039 size,
2040 NEW_SPACE);
2041 }
jkummerow@chromium.org10480472013-07-17 08:22:15 +00002042 *cells = 0;
danno@chromium.org169691d2013-07-15 08:01:13 +00002043 }
2044 return survivors_size;
2045}
2046
2047
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002048static void DiscoverGreyObjectsInSpace(Heap* heap,
2049 MarkingDeque* marking_deque,
2050 PagedSpace* space) {
2051 if (!space->was_swept_conservatively()) {
2052 HeapObjectIterator it(space);
2053 DiscoverGreyObjectsWithIterator(heap, marking_deque, &it);
2054 } else {
2055 PageIterator it(space);
2056 while (it.has_next()) {
2057 Page* p = it.next();
2058 DiscoverGreyObjectsOnPage(marking_deque, p);
2059 if (marking_deque->IsFull()) return;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002060 }
2061 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002062}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002063
2064
danno@chromium.orgbee51992013-07-10 14:57:15 +00002065static void DiscoverGreyObjectsInNewSpace(Heap* heap,
2066 MarkingDeque* marking_deque) {
2067 NewSpace* space = heap->new_space();
2068 NewSpacePageIterator it(space->bottom(), space->top());
2069 while (it.has_next()) {
2070 NewSpacePage* page = it.next();
2071 DiscoverGreyObjectsOnPage(marking_deque, page);
2072 if (marking_deque->IsFull()) return;
2073 }
2074}
2075
2076
ager@chromium.org9085a012009-05-11 19:22:57 +00002077bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002078 Object* o = *p;
2079 if (!o->IsHeapObject()) return false;
2080 HeapObject* heap_object = HeapObject::cast(o);
2081 MarkBit mark = Marking::MarkBitFrom(heap_object);
2082 return !mark.Get();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002083}
2084
2085
mmassi@chromium.org49a44672012-12-04 13:52:03 +00002086bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
2087 Object** p) {
2088 Object* o = *p;
2089 ASSERT(o->IsHeapObject());
2090 HeapObject* heap_object = HeapObject::cast(o);
2091 MarkBit mark = Marking::MarkBitFrom(heap_object);
2092 return !mark.Get();
2093}
2094
2095
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00002096void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00002097 StringTable* string_table = heap()->string_table();
2098 // Mark the string table itself.
2099 MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
2100 SetMark(string_table, string_table_mark);
ager@chromium.org5ec48922009-05-05 07:25:34 +00002101 // Explicitly mark the prefix.
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00002102 string_table->IteratePrefix(visitor);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002103 ProcessMarkingDeque();
ager@chromium.org5ec48922009-05-05 07:25:34 +00002104}
2105
2106
2107void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
2108 // Mark the heap roots including global variables, stack variables,
2109 // etc., and all objects reachable from them.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002110 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002111
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00002112 // Handle the string table specially.
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00002113 MarkStringTable(visitor);
mads.s.ager31e71382008-08-13 09:32:07 +00002114
jkummerow@chromium.org25b0e212013-10-04 15:38:52 +00002115 MarkWeakObjectToCodeTable();
2116
mads.s.ager31e71382008-08-13 09:32:07 +00002117 // There may be overflowed objects in the heap. Visit them now.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002118 while (marking_deque_.overflowed()) {
2119 RefillMarkingDeque();
2120 EmptyMarkingDeque();
mads.s.ager31e71382008-08-13 09:32:07 +00002121 }
kasper.lund7276f142008-07-30 08:49:36 +00002122}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002123
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002124
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00002125void MarkCompactCollector::MarkImplicitRefGroups() {
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002126 List<ImplicitRefGroup*>* ref_groups =
svenpanne@chromium.org876cca82013-03-18 14:43:20 +00002127 isolate()->global_handles()->implicit_ref_groups();
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00002128
karlklose@chromium.org44bc7082011-04-11 12:33:05 +00002129 int last = 0;
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00002130 for (int i = 0; i < ref_groups->length(); i++) {
2131 ImplicitRefGroup* entry = ref_groups->at(i);
karlklose@chromium.org44bc7082011-04-11 12:33:05 +00002132 ASSERT(entry != NULL);
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00002133
danno@chromium.orgca29dd82013-04-26 11:59:48 +00002134 if (!IsMarked(*entry->parent)) {
karlklose@chromium.org44bc7082011-04-11 12:33:05 +00002135 (*ref_groups)[last++] = entry;
2136 continue;
2137 }
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00002138
danno@chromium.orgca29dd82013-04-26 11:59:48 +00002139 Object*** children = entry->children;
karlklose@chromium.org44bc7082011-04-11 12:33:05 +00002140 // A parent object is marked, so mark all child heap objects.
danno@chromium.orgca29dd82013-04-26 11:59:48 +00002141 for (size_t j = 0; j < entry->length; ++j) {
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00002142 if ((*children[j])->IsHeapObject()) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002143 HeapObject* child = HeapObject::cast(*children[j]);
2144 MarkBit mark = Marking::MarkBitFrom(child);
2145 MarkObject(child, mark);
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00002146 }
2147 }
2148
karlklose@chromium.org44bc7082011-04-11 12:33:05 +00002149 // Once the entire group has been marked, dispose it because it's
2150 // not needed anymore.
danno@chromium.orgca29dd82013-04-26 11:59:48 +00002151 delete entry;
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00002152 }
karlklose@chromium.org44bc7082011-04-11 12:33:05 +00002153 ref_groups->Rewind(last);
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00002154}
2155
2156
jkummerow@chromium.org25b0e212013-10-04 15:38:52 +00002157void MarkCompactCollector::MarkWeakObjectToCodeTable() {
2158 HeapObject* weak_object_to_code_table =
2159 HeapObject::cast(heap()->weak_object_to_code_table());
2160 if (!IsMarked(weak_object_to_code_table)) {
2161 MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table);
2162 SetMark(weak_object_to_code_table, mark);
2163 }
2164}
2165
2166
mads.s.ager31e71382008-08-13 09:32:07 +00002167// Mark all objects reachable from the objects on the marking stack.
2168// Before: the marking stack contains zero or more heap object pointers.
2169// After: the marking stack is empty, and all objects reachable from the
2170// marking stack have been marked, or are overflowed in the heap.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002171void MarkCompactCollector::EmptyMarkingDeque() {
2172 while (!marking_deque_.IsEmpty()) {
verwaest@chromium.orgd4be0f02013-06-05 13:39:03 +00002173 HeapObject* object = marking_deque_.Pop();
2174 ASSERT(object->IsHeapObject());
2175 ASSERT(heap()->Contains(object));
2176 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
kasper.lund7276f142008-07-30 08:49:36 +00002177
verwaest@chromium.orgd4be0f02013-06-05 13:39:03 +00002178 Map* map = object->map();
2179 MarkBit map_mark = Marking::MarkBitFrom(map);
2180 MarkObject(map, map_mark);
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00002181
verwaest@chromium.orgd4be0f02013-06-05 13:39:03 +00002182 MarkCompactMarkingVisitor::IterateBody(map, object);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002183 }
kasper.lund7276f142008-07-30 08:49:36 +00002184}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002185
kasper.lund7276f142008-07-30 08:49:36 +00002186
mads.s.ager31e71382008-08-13 09:32:07 +00002187// Sweep the heap for overflowed objects, clear their overflow bits, and
2188// push them on the marking stack. Stop early if the marking stack fills
2189// before sweeping completes. If sweeping completes, there are no remaining
2190// overflowed objects in the heap so the overflow flag on the markings stack
2191// is cleared.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002192void MarkCompactCollector::RefillMarkingDeque() {
2193 ASSERT(marking_deque_.overflowed());
mads.s.ager31e71382008-08-13 09:32:07 +00002194
danno@chromium.orgbee51992013-07-10 14:57:15 +00002195 DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002196 if (marking_deque_.IsFull()) return;
mads.s.ager31e71382008-08-13 09:32:07 +00002197
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002198 DiscoverGreyObjectsInSpace(heap(),
2199 &marking_deque_,
2200 heap()->old_pointer_space());
2201 if (marking_deque_.IsFull()) return;
ager@chromium.org9258b6b2008-09-11 09:11:10 +00002202
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002203 DiscoverGreyObjectsInSpace(heap(),
2204 &marking_deque_,
2205 heap()->old_data_space());
2206 if (marking_deque_.IsFull()) return;
mads.s.ager31e71382008-08-13 09:32:07 +00002207
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002208 DiscoverGreyObjectsInSpace(heap(),
2209 &marking_deque_,
2210 heap()->code_space());
2211 if (marking_deque_.IsFull()) return;
mads.s.ager31e71382008-08-13 09:32:07 +00002212
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002213 DiscoverGreyObjectsInSpace(heap(),
2214 &marking_deque_,
2215 heap()->map_space());
2216 if (marking_deque_.IsFull()) return;
mads.s.ager31e71382008-08-13 09:32:07 +00002217
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002218 DiscoverGreyObjectsInSpace(heap(),
2219 &marking_deque_,
2220 heap()->cell_space());
2221 if (marking_deque_.IsFull()) return;
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00002222
danno@chromium.org41728482013-06-12 22:31:22 +00002223 DiscoverGreyObjectsInSpace(heap(),
2224 &marking_deque_,
2225 heap()->property_cell_space());
2226 if (marking_deque_.IsFull()) return;
2227
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002228 LargeObjectIterator lo_it(heap()->lo_space());
2229 DiscoverGreyObjectsWithIterator(heap(),
2230 &marking_deque_,
2231 &lo_it);
2232 if (marking_deque_.IsFull()) return;
mads.s.ager31e71382008-08-13 09:32:07 +00002233
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002234 marking_deque_.ClearOverflowed();
mads.s.ager31e71382008-08-13 09:32:07 +00002235}
2236
2237
2238// Mark all objects reachable (transitively) from objects on the marking
2239// stack. Before: the marking stack contains zero or more heap object
2240// pointers. After: the marking stack is empty and there are no overflowed
2241// objects in the heap.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002242void MarkCompactCollector::ProcessMarkingDeque() {
2243 EmptyMarkingDeque();
2244 while (marking_deque_.overflowed()) {
2245 RefillMarkingDeque();
2246 EmptyMarkingDeque();
mads.s.ager31e71382008-08-13 09:32:07 +00002247 }
2248}
2249
2250
verwaest@chromium.orgd4be0f02013-06-05 13:39:03 +00002251// Mark all objects reachable (transitively) from objects on the marking
2252// stack including references only considered in the atomic marking pause.
2253void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) {
kasper.lund7276f142008-07-30 08:49:36 +00002254 bool work_to_do = true;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002255 ASSERT(marking_deque_.IsEmpty());
kasper.lund7276f142008-07-30 08:49:36 +00002256 while (work_to_do) {
svenpanne@chromium.org876cca82013-03-18 14:43:20 +00002257 isolate()->global_handles()->IterateObjectGroups(
mmassi@chromium.org49a44672012-12-04 13:52:03 +00002258 visitor, &IsUnmarkedHeapObjectWithHeap);
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00002259 MarkImplicitRefGroups();
jkummerow@chromium.orgba72ec82013-07-22 09:21:20 +00002260 ProcessWeakCollections();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002261 work_to_do = !marking_deque_.IsEmpty();
2262 ProcessMarkingDeque();
kasper.lund7276f142008-07-30 08:49:36 +00002263 }
2264}
2265
2266
machenbach@chromium.orgc1789ee2013-07-05 07:09:57 +00002267void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
2268 for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
2269 !it.done(); it.Advance()) {
2270 if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
2271 return;
2272 }
2273 if (it.frame()->type() == StackFrame::OPTIMIZED) {
2274 Code* code = it.frame()->LookupCode();
2275 if (!code->CanDeoptAt(it.frame()->pc())) {
2276 code->CodeIterateBody(visitor);
2277 }
2278 ProcessMarkingDeque();
2279 return;
2280 }
2281 }
2282}
2283
2284
kasper.lund7276f142008-07-30 08:49:36 +00002285void MarkCompactCollector::MarkLiveObjects() {
erik.corry@gmail.com9dfbea42010-05-21 12:58:28 +00002286 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00002287 // The recursive GC marker detects when it is nearing stack overflow,
2288 // and switches to a different marking system. JS interrupts interfere
2289 // with the C stack limit check.
svenpanne@chromium.org876cca82013-03-18 14:43:20 +00002290 PostponeInterruptsScope postpone(isolate());
ager@chromium.org5f0c45f2010-12-17 08:51:21 +00002291
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002292 bool incremental_marking_overflowed = false;
2293 IncrementalMarking* incremental_marking = heap_->incremental_marking();
2294 if (was_marked_incrementally_) {
2295 // Finalize the incremental marking and check whether we had an overflow.
2296 // Both markers use grey color to mark overflowed objects so
2297 // non-incremental marker can deal with them as if overflow
2298 // occured during normal marking.
2299 // But incremental marker uses a separate marking deque
ulan@chromium.org56c14af2012-09-20 12:51:09 +00002300 // so we have to explicitly copy its overflow state.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002301 incremental_marking->Finalize();
2302 incremental_marking_overflowed =
2303 incremental_marking->marking_deque()->overflowed();
2304 incremental_marking->marking_deque()->ClearOverflowed();
2305 } else {
2306 // Abort any pending incremental activities e.g. incremental sweeping.
2307 incremental_marking->Abort();
2308 }
2309
kasper.lund7276f142008-07-30 08:49:36 +00002310#ifdef DEBUG
2311 ASSERT(state_ == PREPARE_GC);
2312 state_ = MARK_LIVE_OBJECTS;
2313#endif
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002314 // The to space contains live objects, a page in from space is used as a
2315 // marking stack.
2316 Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
2317 Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
2318 if (FLAG_force_marking_deque_overflows) {
2319 marking_deque_end = marking_deque_start + 64 * kPointerSize;
2320 }
2321 marking_deque_.Initialize(marking_deque_start,
2322 marking_deque_end);
2323 ASSERT(!marking_deque_.overflowed());
kasper.lund7276f142008-07-30 08:49:36 +00002324
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002325 if (incremental_marking_overflowed) {
2326 // There are overflowed objects left in the heap after incremental marking.
2327 marking_deque_.SetOverflowed();
2328 }
kasper.lund7276f142008-07-30 08:49:36 +00002329
ricow@chromium.org0b9f8502010-08-18 07:45:01 +00002330 PrepareForCodeFlushing();
2331
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +00002332 if (was_marked_incrementally_) {
2333 // There is no write barrier on cells so we have to scan them now at the end
2334 // of the incremental marking.
2335 {
2336 HeapObjectIterator cell_iterator(heap()->cell_space());
2337 HeapObject* cell;
2338 while ((cell = cell_iterator.Next()) != NULL) {
danno@chromium.org41728482013-06-12 22:31:22 +00002339 ASSERT(cell->IsCell());
2340 if (IsMarked(cell)) {
2341 int offset = Cell::kValueOffset;
2342 MarkCompactMarkingVisitor::VisitPointer(
2343 heap(),
2344 reinterpret_cast<Object**>(cell->address() + offset));
2345 }
2346 }
2347 }
2348 {
2349 HeapObjectIterator js_global_property_cell_iterator(
2350 heap()->property_cell_space());
2351 HeapObject* cell;
2352 while ((cell = js_global_property_cell_iterator.Next()) != NULL) {
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00002353 ASSERT(cell->IsPropertyCell());
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +00002354 if (IsMarked(cell)) {
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00002355 MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell);
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +00002356 }
2357 }
2358 }
2359 }
2360
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002361 RootMarkingVisitor root_visitor(heap());
ager@chromium.org5ec48922009-05-05 07:25:34 +00002362 MarkRoots(&root_visitor);
kasper.lund7276f142008-07-30 08:49:36 +00002363
machenbach@chromium.orgc1789ee2013-07-05 07:09:57 +00002364 ProcessTopOptimizedFrame(&root_visitor);
2365
ager@chromium.org9085a012009-05-11 19:22:57 +00002366 // The objects reachable from the roots are marked, yet unreachable
ricow@chromium.orgbadaffc2011-03-17 12:15:27 +00002367 // objects are unmarked. Mark objects reachable due to host
verwaest@chromium.orgd4be0f02013-06-05 13:39:03 +00002368 // application specific logic or through Harmony weak maps.
2369 ProcessEphemeralMarking(&root_visitor);
kasper.lund7276f142008-07-30 08:49:36 +00002370
verwaest@chromium.orgd4be0f02013-06-05 13:39:03 +00002371 // The objects reachable from the roots, weak maps or object groups
2372 // are marked, yet unreachable objects are unmarked. Mark objects
2373 // reachable only from weak global handles.
kasper.lund7276f142008-07-30 08:49:36 +00002374 //
ager@chromium.org9085a012009-05-11 19:22:57 +00002375 // First we identify nonlive weak handles and mark them as pending
2376 // destruction.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002377 heap()->isolate()->global_handles()->IdentifyWeakHandles(
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002378 &IsUnmarkedHeapObject);
ager@chromium.org9085a012009-05-11 19:22:57 +00002379 // Then we mark the objects and process the transitive closure.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002380 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002381 while (marking_deque_.overflowed()) {
2382 RefillMarkingDeque();
2383 EmptyMarkingDeque();
mads.s.ager31e71382008-08-13 09:32:07 +00002384 }
kasper.lund7276f142008-07-30 08:49:36 +00002385
verwaest@chromium.orgd4be0f02013-06-05 13:39:03 +00002386 // Repeat host application specific and Harmony weak maps marking to
2387 // mark unmarked objects reachable from the weak roots.
2388 ProcessEphemeralMarking(&root_visitor);
kasper.lund7276f142008-07-30 08:49:36 +00002389
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002390 AfterMarking();
2391}
2392
2393
2394void MarkCompactCollector::AfterMarking() {
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00002395 // Object literal map caches reference strings (cache keys) and maps
ricow@chromium.org4f693d62011-07-04 14:01:31 +00002396 // (cache values). At this point still useful maps have already been
2397 // marked. Mark the keys for the alive values before we process the
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00002398 // string table.
ricow@chromium.org4f693d62011-07-04 14:01:31 +00002399 ProcessMapCaches();
2400
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00002401 // Prune the string table removing all strings only pointed to by the
2402 // string table. Cannot use string_table() here because the string
kasper.lund7276f142008-07-30 08:49:36 +00002403 // table is marked.
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00002404 StringTable* string_table = heap()->string_table();
2405 StringTableCleaner v(heap());
2406 string_table->IterateElements(&v);
2407 string_table->ElementsRemoved(v.PointersRemoved());
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002408 heap()->external_string_table_.Iterate(&v);
2409 heap()->external_string_table_.CleanUp();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002410
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00002411 // Process the weak references.
2412 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002413 heap()->ProcessWeakReferences(&mark_compact_object_retainer);
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00002414
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002415 // Remove object groups after marking phase.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00002416 heap()->isolate()->global_handles()->RemoveObjectGroups();
2417 heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
kasperl@chromium.orga5551262010-12-07 12:49:48 +00002418
2419 // Flush code from collected candidates.
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002420 if (is_code_flushing_enabled()) {
2421 code_flusher_->ProcessCandidates();
verwaest@chromium.orge4ee6de2012-11-06 12:13:00 +00002422 // If incremental marker does not support code flushing, we need to
2423 // disable it before incremental marking steps for next cycle.
2424 if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
2425 EnableCodeFlushing(false);
2426 }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002427 }
ager@chromium.org9ee27ae2011-03-02 13:43:26 +00002428
yangguo@chromium.org78d1ad42012-02-09 13:53:47 +00002429 if (!FLAG_watch_ic_patching) {
2430 // Clean up dead objects from the runtime profiler.
2431 heap()->isolate()->runtime_profiler()->RemoveDeadSamples();
2432 }
jkummerow@chromium.org28583c92012-07-16 11:31:55 +00002433
2434 if (FLAG_track_gc_object_stats) {
2435 heap()->CheckpointObjectStats();
2436 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002437}
2438
2439
ricow@chromium.org4f693d62011-07-04 14:01:31 +00002440void MarkCompactCollector::ProcessMapCaches() {
yangguo@chromium.org46839fb2012-08-28 09:06:19 +00002441 Object* raw_context = heap()->native_contexts_list_;
ricow@chromium.org4f693d62011-07-04 14:01:31 +00002442 while (raw_context != heap()->undefined_value()) {
2443 Context* context = reinterpret_cast<Context*>(raw_context);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002444 if (IsMarked(context)) {
ricow@chromium.org4f693d62011-07-04 14:01:31 +00002445 HeapObject* raw_map_cache =
2446 HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
2447 // A map cache may be reachable from the stack. In this case
2448 // it's already transitively marked and it's too late to clean
2449 // up its parts.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002450 if (!IsMarked(raw_map_cache) &&
ricow@chromium.org4f693d62011-07-04 14:01:31 +00002451 raw_map_cache != heap()->undefined_value()) {
2452 MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
2453 int existing_elements = map_cache->NumberOfElements();
2454 int used_elements = 0;
2455 for (int i = MapCache::kElementsStartIndex;
2456 i < map_cache->length();
2457 i += MapCache::kEntrySize) {
2458 Object* raw_key = map_cache->get(i);
2459 if (raw_key == heap()->undefined_value() ||
jkummerow@chromium.orgc3b37122011-11-07 10:14:12 +00002460 raw_key == heap()->the_hole_value()) continue;
ricow@chromium.org4f693d62011-07-04 14:01:31 +00002461 STATIC_ASSERT(MapCache::kEntrySize == 2);
2462 Object* raw_map = map_cache->get(i + 1);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002463 if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
ricow@chromium.org4f693d62011-07-04 14:01:31 +00002464 ++used_elements;
2465 } else {
2466 // Delete useless entries with unmarked maps.
2467 ASSERT(raw_map->IsMap());
jkummerow@chromium.orgc3b37122011-11-07 10:14:12 +00002468 map_cache->set_the_hole(i);
2469 map_cache->set_the_hole(i + 1);
ricow@chromium.org4f693d62011-07-04 14:01:31 +00002470 }
2471 }
2472 if (used_elements == 0) {
2473 context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
2474 } else {
2475 // Note: we don't actually shrink the cache here to avoid
2476 // extra complexity during GC. We rely on subsequent cache
2477 // usages (EnsureCapacity) to do this.
2478 map_cache->ElementsRemoved(existing_elements - used_elements);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002479 MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
2480 MarkObject(map_cache, map_cache_markbit);
ricow@chromium.org4f693d62011-07-04 14:01:31 +00002481 }
2482 }
2483 }
2484 // Move to next element in the list.
2485 raw_context = context->get(Context::NEXT_CONTEXT_LINK);
2486 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002487 ProcessMarkingDeque();
ricow@chromium.org4f693d62011-07-04 14:01:31 +00002488}
2489
2490
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002491void MarkCompactCollector::ReattachInitialMaps() {
2492 HeapObjectIterator map_iterator(heap()->map_space());
2493 for (HeapObject* obj = map_iterator.Next();
2494 obj != NULL;
2495 obj = map_iterator.Next()) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002496 Map* map = Map::cast(obj);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002497
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002498 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
2499 if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue;
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002500
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002501 if (map->attached_to_shared_function_info()) {
2502 JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
2503 }
2504 }
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002505}
2506
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002507
yangguo@chromium.org003650e2013-01-24 16:31:08 +00002508void MarkCompactCollector::ClearNonLiveReferences() {
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002509 // Iterate over the map space, setting map transitions that go from
jkummerow@chromium.org212d9642012-05-11 15:02:09 +00002510 // a marked map to an unmarked map to null transitions. This action
2511 // is carried out only on maps of JSObjects and related subtypes.
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00002512 HeapObjectIterator map_iterator(heap()->map_space());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002513 for (HeapObject* obj = map_iterator.Next();
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00002514 obj != NULL;
2515 obj = map_iterator.Next()) {
2516 Map* map = Map::cast(obj);
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002517
yangguo@chromium.org003650e2013-01-24 16:31:08 +00002518 if (!map->CanTransition()) continue;
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00002519
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00002520 MarkBit map_mark = Marking::MarkBitFrom(map);
2521 if (map_mark.Get() && map->attached_to_shared_function_info()) {
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00002522 // This map is used for inobject slack tracking and has been detached
2523 // from SharedFunctionInfo during the mark phase.
2524 // Since it survived the GC, reattach it now.
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00002525 JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map);
whesse@chromium.org4a1fe7d2010-09-27 12:32:04 +00002526 }
2527
yangguo@chromium.org659ceec2012-01-26 07:37:54 +00002528 ClearNonLivePrototypeTransitions(map);
2529 ClearNonLiveMapTransitions(map, map_mark);
yangguo@chromium.org003650e2013-01-24 16:31:08 +00002530
2531 if (map_mark.Get()) {
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00002532 ClearNonLiveDependentCode(map->dependent_code());
yangguo@chromium.org003650e2013-01-24 16:31:08 +00002533 } else {
jkummerow@chromium.org25b0e212013-10-04 15:38:52 +00002534 ClearAndDeoptimizeDependentCode(map->dependent_code());
2535 map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
yangguo@chromium.org003650e2013-01-24 16:31:08 +00002536 }
yangguo@chromium.org659ceec2012-01-26 07:37:54 +00002537 }
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00002538
2539 // Iterate over property cell space, removing dependent code that is not
2540 // otherwise kept alive by strong references.
2541 HeapObjectIterator cell_iterator(heap_->property_cell_space());
2542 for (HeapObject* cell = cell_iterator.Next();
2543 cell != NULL;
2544 cell = cell_iterator.Next()) {
2545 if (IsMarked(cell)) {
2546 ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
2547 }
2548 }
jkummerow@chromium.org25b0e212013-10-04 15:38:52 +00002549
2550 if (heap_->weak_object_to_code_table()->IsHashTable()) {
2551 WeakHashTable* table =
2552 WeakHashTable::cast(heap_->weak_object_to_code_table());
2553 uint32_t capacity = table->Capacity();
2554 for (uint32_t i = 0; i < capacity; i++) {
2555 uint32_t key_index = table->EntryToIndex(i);
2556 Object* key = table->get(key_index);
2557 if (!table->IsKey(key)) continue;
2558 uint32_t value_index = table->EntryToValueIndex(i);
2559 Object* value = table->get(value_index);
2560 if (IsMarked(key)) {
2561 if (!IsMarked(value)) {
2562 HeapObject* obj = HeapObject::cast(value);
2563 MarkBit mark = Marking::MarkBitFrom(obj);
2564 SetMark(obj, mark);
2565 }
2566 ClearNonLiveDependentCode(DependentCode::cast(value));
2567 } else {
2568 ClearAndDeoptimizeDependentCode(DependentCode::cast(value));
2569 table->set(key_index, heap_->the_hole_value());
2570 table->set(value_index, heap_->the_hole_value());
2571 }
2572 }
2573 }
yangguo@chromium.org659ceec2012-01-26 07:37:54 +00002574}
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002575
yangguo@chromium.org659ceec2012-01-26 07:37:54 +00002576
2577void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
2578 int number_of_transitions = map->NumberOfProtoTransitions();
danno@chromium.org81cac2b2012-07-10 11:28:27 +00002579 FixedArray* prototype_transitions = map->GetPrototypeTransitions();
yangguo@chromium.org659ceec2012-01-26 07:37:54 +00002580
2581 int new_number_of_transitions = 0;
2582 const int header = Map::kProtoTransitionHeaderSize;
2583 const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
2584 const int map_offset = header + Map::kProtoTransitionMapOffset;
2585 const int step = Map::kProtoTransitionElementsPerEntry;
2586 for (int i = 0; i < number_of_transitions; i++) {
2587 Object* prototype = prototype_transitions->get(proto_offset + i * step);
2588 Object* cached_map = prototype_transitions->get(map_offset + i * step);
2589 if (IsMarked(prototype) && IsMarked(cached_map)) {
2590 int proto_index = proto_offset + new_number_of_transitions * step;
2591 int map_index = map_offset + new_number_of_transitions * step;
2592 if (new_number_of_transitions != i) {
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00002593 prototype_transitions->set(
yangguo@chromium.org659ceec2012-01-26 07:37:54 +00002594 proto_index,
2595 prototype,
2596 UPDATE_WRITE_BARRIER);
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00002597 prototype_transitions->set(
yangguo@chromium.org659ceec2012-01-26 07:37:54 +00002598 map_index,
2599 cached_map,
2600 SKIP_WRITE_BARRIER);
erik.corry@gmail.com3847bd52011-04-27 10:38:56 +00002601 }
yangguo@chromium.org659ceec2012-01-26 07:37:54 +00002602 Object** slot =
2603 HeapObject::RawField(prototype_transitions,
2604 FixedArray::OffsetOfElementAt(proto_index));
2605 RecordSlot(slot, slot, prototype);
2606 new_number_of_transitions++;
2607 }
2608 }
2609
2610 if (new_number_of_transitions != number_of_transitions) {
2611 map->SetNumberOfProtoTransitions(new_number_of_transitions);
2612 }
2613
2614 // Fill slots that became free with undefined value.
2615 for (int i = new_number_of_transitions * step;
2616 i < number_of_transitions * step;
2617 i++) {
mstarzinger@chromium.org1f410f92013-08-29 08:13:16 +00002618 prototype_transitions->set_undefined(header + i);
yangguo@chromium.org659ceec2012-01-26 07:37:54 +00002619 }
2620}
2621
2622
2623void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
2624 MarkBit map_mark) {
jkummerow@chromium.org212d9642012-05-11 15:02:09 +00002625 Object* potential_parent = map->GetBackPointer();
2626 if (!potential_parent->IsMap()) return;
2627 Map* parent = Map::cast(potential_parent);
yangguo@chromium.org659ceec2012-01-26 07:37:54 +00002628
jkummerow@chromium.org212d9642012-05-11 15:02:09 +00002629 // Follow back pointer, check whether we are dealing with a map transition
2630 // from a live map to a dead path and in case clear transitions of parent.
yangguo@chromium.org659ceec2012-01-26 07:37:54 +00002631 bool current_is_alive = map_mark.Get();
jkummerow@chromium.org212d9642012-05-11 15:02:09 +00002632 bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
2633 if (!current_is_alive && parent_is_alive) {
2634 parent->ClearNonLiveTransitions(heap());
kasperl@chromium.org9bbf9682008-10-30 11:53:07 +00002635 }
2636}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002637
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00002638
jkummerow@chromium.org25b0e212013-10-04 15:38:52 +00002639void MarkCompactCollector::ClearAndDeoptimizeDependentCode(
2640 DependentCode* entries) {
rossberg@chromium.org79e79022013-06-03 15:43:46 +00002641 DisallowHeapAllocation no_allocation;
ulan@chromium.org2e04b582013-02-21 14:06:02 +00002642 DependentCode::GroupStartIndexes starts(entries);
2643 int number_of_entries = starts.number_of_entries();
2644 if (number_of_entries == 0) return;
2645 for (int i = 0; i < number_of_entries; i++) {
danno@chromium.org41728482013-06-12 22:31:22 +00002646 // If the entry is compilation info then the map must be alive,
2647 // and ClearAndDeoptimizeDependentCode shouldn't be called.
2648 ASSERT(entries->is_code_at(i));
ulan@chromium.org2e04b582013-02-21 14:06:02 +00002649 Code* code = entries->code_at(i);
jkummerow@chromium.orgfb732b12013-07-26 10:27:09 +00002650
jkummerow@chromium.org3d00d0a2013-09-04 13:57:32 +00002651 if (IsMarked(code) && !code->marked_for_deoptimization()) {
2652 code->set_marked_for_deoptimization(true);
2653 have_code_to_deoptimize_ = true;
yangguo@chromium.org003650e2013-01-24 16:31:08 +00002654 }
danno@chromium.org41728482013-06-12 22:31:22 +00002655 entries->clear_at(i);
yangguo@chromium.org003650e2013-01-24 16:31:08 +00002656 }
yangguo@chromium.org003650e2013-01-24 16:31:08 +00002657}
2658
2659
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00002660void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
rossberg@chromium.org79e79022013-06-03 15:43:46 +00002661 DisallowHeapAllocation no_allocation;
ulan@chromium.org2e04b582013-02-21 14:06:02 +00002662 DependentCode::GroupStartIndexes starts(entries);
2663 int number_of_entries = starts.number_of_entries();
2664 if (number_of_entries == 0) return;
2665 int new_number_of_entries = 0;
2666 // Go through all groups, remove dead codes and compact.
2667 for (int g = 0; g < DependentCode::kGroupCount; g++) {
2668 int group_number_of_entries = 0;
2669 for (int i = starts.at(g); i < starts.at(g + 1); i++) {
danno@chromium.org41728482013-06-12 22:31:22 +00002670 Object* obj = entries->object_at(i);
2671 ASSERT(obj->IsCode() || IsMarked(obj));
2672 if (IsMarked(obj) &&
jkummerow@chromium.orgfb732b12013-07-26 10:27:09 +00002673 (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) {
ulan@chromium.org2e04b582013-02-21 14:06:02 +00002674 if (new_number_of_entries + group_number_of_entries != i) {
danno@chromium.org41728482013-06-12 22:31:22 +00002675 entries->set_object_at(
2676 new_number_of_entries + group_number_of_entries, obj);
ulan@chromium.org2e04b582013-02-21 14:06:02 +00002677 }
danno@chromium.org41728482013-06-12 22:31:22 +00002678 Object** slot = entries->slot_at(new_number_of_entries +
2679 group_number_of_entries);
2680 RecordSlot(slot, slot, obj);
ulan@chromium.org2e04b582013-02-21 14:06:02 +00002681 group_number_of_entries++;
yangguo@chromium.org003650e2013-01-24 16:31:08 +00002682 }
2683 }
ulan@chromium.org2e04b582013-02-21 14:06:02 +00002684 entries->set_number_of_entries(
2685 static_cast<DependentCode::DependencyGroup>(g),
2686 group_number_of_entries);
2687 new_number_of_entries += group_number_of_entries;
yangguo@chromium.org003650e2013-01-24 16:31:08 +00002688 }
ulan@chromium.org2e04b582013-02-21 14:06:02 +00002689 for (int i = new_number_of_entries; i < number_of_entries; i++) {
danno@chromium.org41728482013-06-12 22:31:22 +00002690 entries->clear_at(i);
yangguo@chromium.org003650e2013-01-24 16:31:08 +00002691 }
yangguo@chromium.org003650e2013-01-24 16:31:08 +00002692}
2693
2694
jkummerow@chromium.orgba72ec82013-07-22 09:21:20 +00002695void MarkCompactCollector::ProcessWeakCollections() {
2696 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS);
2697 Object* weak_collection_obj = encountered_weak_collections();
2698 while (weak_collection_obj != Smi::FromInt(0)) {
2699 ASSERT(MarkCompactCollector::IsMarked(
2700 HeapObject::cast(weak_collection_obj)));
2701 JSWeakCollection* weak_collection =
2702 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2703 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
jkummerow@chromium.org28faa982012-04-13 09:58:30 +00002704 Object** anchor = reinterpret_cast<Object**>(table->address());
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00002705 for (int i = 0; i < table->Capacity(); i++) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002706 if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
jkummerow@chromium.org28faa982012-04-13 09:58:30 +00002707 Object** key_slot =
2708 HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
2709 ObjectHashTable::EntryToIndex(i)));
2710 RecordSlot(anchor, key_slot, *key_slot);
2711 Object** value_slot =
2712 HeapObject::RawField(table, FixedArray::OffsetOfElementAt(
2713 ObjectHashTable::EntryToValueIndex(i)));
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00002714 MarkCompactMarkingVisitor::MarkObjectByPointer(
2715 this, anchor, value_slot);
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00002716 }
2717 }
jkummerow@chromium.orgba72ec82013-07-22 09:21:20 +00002718 weak_collection_obj = weak_collection->next();
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00002719 }
2720}
2721
2722
jkummerow@chromium.orgba72ec82013-07-22 09:21:20 +00002723void MarkCompactCollector::ClearWeakCollections() {
2724 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR);
2725 Object* weak_collection_obj = encountered_weak_collections();
2726 while (weak_collection_obj != Smi::FromInt(0)) {
2727 ASSERT(MarkCompactCollector::IsMarked(
2728 HeapObject::cast(weak_collection_obj)));
2729 JSWeakCollection* weak_collection =
2730 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2731 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00002732 for (int i = 0; i < table->Capacity(); i++) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002733 if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
jkummerow@chromium.orgc3b37122011-11-07 10:14:12 +00002734 table->RemoveEntry(i);
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00002735 }
2736 }
jkummerow@chromium.orgba72ec82013-07-22 09:21:20 +00002737 weak_collection_obj = weak_collection->next();
2738 weak_collection->set_next(Smi::FromInt(0));
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00002739 }
jkummerow@chromium.orgba72ec82013-07-22 09:21:20 +00002740 set_encountered_weak_collections(Smi::FromInt(0));
kmillikin@chromium.org7c2628c2011-08-10 11:27:35 +00002741}
2742
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002743
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002744// We scavange new space simultaneously with sweeping. This is done in two
2745// passes.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002746//
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002747// The first pass migrates all alive objects from one semispace to another or
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002748// promotes them to old space. Forwarding address is written directly into
2749// first word of object without any encoding. If object is dead we write
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002750// NULL as a forwarding address.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002751//
2752// The second pass updates pointers to new space in all spaces. It is possible
2753// to encounter pointers to dead new space objects during traversal of pointers
2754// to new space. We should clear them to avoid encountering them during next
2755// pointer iteration. This is an issue if the store buffer overflows and we
2756// have to scan the entire old space, including dead objects, looking for
2757// pointers to new space.
2758void MarkCompactCollector::MigrateObject(Address dst,
2759 Address src,
2760 int size,
2761 AllocationSpace dest) {
mstarzinger@chromium.orga2e1a402013-10-15 08:25:05 +00002762 HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst, size));
jkummerow@chromium.orgfb7a7c42013-10-02 11:41:02 +00002763 ASSERT(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest));
2764 ASSERT(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize);
hpayer@chromium.org83fa61b2013-07-24 09:36:58 +00002765 if (dest == OLD_POINTER_SPACE) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002766 Address src_slot = src;
2767 Address dst_slot = dst;
2768 ASSERT(IsAligned(size, kPointerSize));
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002769
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002770 for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2771 Object* value = Memory::Object_at(src_slot);
2772
2773 Memory::Object_at(dst_slot) = value;
2774
2775 if (heap_->InNewSpace(value)) {
2776 heap_->store_buffer()->Mark(dst_slot);
2777 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2778 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2779 &migration_slots_buffer_,
2780 reinterpret_cast<Object**>(dst_slot),
2781 SlotsBuffer::IGNORE_OVERFLOW);
2782 }
2783
2784 src_slot += kPointerSize;
2785 dst_slot += kPointerSize;
2786 }
2787
2788 if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) {
2789 Address code_entry_slot = dst + JSFunction::kCodeEntryOffset;
2790 Address code_entry = Memory::Address_at(code_entry_slot);
2791
2792 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2793 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2794 &migration_slots_buffer_,
2795 SlotsBuffer::CODE_ENTRY_SLOT,
2796 code_entry_slot,
2797 SlotsBuffer::IGNORE_OVERFLOW);
2798 }
2799 }
2800 } else if (dest == CODE_SPACE) {
svenpanne@chromium.org876cca82013-03-18 14:43:20 +00002801 PROFILE(isolate(), CodeMoveEvent(src, dst));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002802 heap()->MoveBlock(dst, src, size);
2803 SlotsBuffer::AddTo(&slots_buffer_allocator_,
2804 &migration_slots_buffer_,
2805 SlotsBuffer::RELOCATED_CODE_OBJECT,
2806 dst,
2807 SlotsBuffer::IGNORE_OVERFLOW);
2808 Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
2809 } else {
2810 ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
2811 heap()->MoveBlock(dst, src, size);
2812 }
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002813 Memory::Address_at(src) = dst;
2814}
2815
2816
2817// Visitor for updating pointers from live objects in old spaces to new space.
2818// It does not expect to encounter pointers to dead objects.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002819class PointersUpdatingVisitor: public ObjectVisitor {
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002820 public:
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002821 explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002822
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002823 void VisitPointer(Object** p) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002824 UpdatePointer(p);
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002825 }
2826
danno@chromium.org2ab0c3b2012-10-05 08:50:56 +00002827 void VisitPointers(Object** start, Object** end) {
2828 for (Object** p = start; p < end; p++) UpdatePointer(p);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002829 }
2830
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +00002831 void VisitEmbeddedPointer(RelocInfo* rinfo) {
2832 ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
2833 Object* target = rinfo->target_object();
rossberg@chromium.org89e18f52012-10-22 13:09:53 +00002834 Object* old_target = target;
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +00002835 VisitPointer(&target);
rossberg@chromium.org89e18f52012-10-22 13:09:53 +00002836 // Avoid unnecessary changes that might unnecessary flush the instruction
2837 // cache.
2838 if (target != old_target) {
2839 rinfo->set_target_object(target);
2840 }
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002841 }
2842
2843 void VisitCodeTarget(RelocInfo* rinfo) {
2844 ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
2845 Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
rossberg@chromium.org89e18f52012-10-22 13:09:53 +00002846 Object* old_target = target;
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002847 VisitPointer(&target);
rossberg@chromium.org89e18f52012-10-22 13:09:53 +00002848 if (target != old_target) {
2849 rinfo->set_target_address(Code::cast(target)->instruction_start());
2850 }
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002851 }
2852
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +00002853 void VisitCodeAgeSequence(RelocInfo* rinfo) {
2854 ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
2855 Object* stub = rinfo->code_age_stub();
2856 ASSERT(stub != NULL);
2857 VisitPointer(&stub);
2858 if (stub != rinfo->code_age_stub()) {
2859 rinfo->set_code_age_stub(Code::cast(stub));
2860 }
2861 }
2862
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002863 void VisitDebugTarget(RelocInfo* rinfo) {
vegorov@chromium.org2356e6f2010-06-09 09:38:56 +00002864 ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
2865 rinfo->IsPatchedReturnSequence()) ||
2866 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
2867 rinfo->IsPatchedDebugBreakSlotSequence()));
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002868 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
2869 VisitPointer(&target);
2870 rinfo->set_call_address(Code::cast(target)->instruction_start());
2871 }
jkummerow@chromium.orge297f592011-06-08 10:05:15 +00002872
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002873 static inline void UpdateSlot(Heap* heap, Object** slot) {
2874 Object* obj = *slot;
2875
2876 if (!obj->IsHeapObject()) return;
2877
2878 HeapObject* heap_obj = HeapObject::cast(obj);
2879
2880 MapWord map_word = heap_obj->map_word();
2881 if (map_word.IsForwardingAddress()) {
2882 ASSERT(heap->InFromSpace(heap_obj) ||
2883 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
2884 HeapObject* target = map_word.ToForwardingAddress();
2885 *slot = target;
2886 ASSERT(!heap->InFromSpace(target) &&
2887 !MarkCompactCollector::IsOnEvacuationCandidate(target));
2888 }
2889 }
2890
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002891 private:
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002892 inline void UpdatePointer(Object** p) {
2893 UpdateSlot(heap_, p);
2894 }
2895
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00002896 Heap* heap_;
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002897};
2898
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002899
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002900static void UpdatePointer(HeapObject** p, HeapObject* object) {
2901 ASSERT(*p == object);
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002902
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002903 Address old_addr = object->address();
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002904
2905 Address new_addr = Memory::Address_at(old_addr);
2906
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002907 // The new space sweep will overwrite the map word of dead objects
2908 // with NULL. In this case we do not need to transfer this entry to
2909 // the store buffer which we are rebuilding.
2910 if (new_addr != NULL) {
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002911 *p = HeapObject::FromAddress(new_addr);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002912 } else {
2913 // We have to zap this pointer, because the store buffer may overflow later,
2914 // and then we have to scan the entire heap and we don't want to find
2915 // spurious newspace pointers in the old space.
mstarzinger@chromium.org15613d02012-05-23 12:04:37 +00002916 // TODO(mstarzinger): This was changed to a sentinel value to track down
2917 // rare crashes, change it back to Smi::FromInt(0) later.
2918 *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0x0f100d00 >> 1)); // flood
ricow@chromium.org30ce4112010-05-31 10:38:25 +00002919 }
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002920}
2921
2922
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002923static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
2924 Object** p) {
2925 MapWord map_word = HeapObject::cast(*p)->map_word();
2926
2927 if (map_word.IsForwardingAddress()) {
2928 return String::cast(map_word.ToForwardingAddress());
2929 }
2930
2931 return String::cast(*p);
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002932}
2933
2934
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002935bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
2936 int object_size) {
hpayer@chromium.org83fa61b2013-07-24 09:36:58 +00002937 // TODO(hpayer): Replace that check with an assert.
2938 CHECK(object_size <= Page::kMaxNonCodeHeapObjectSize);
2939
2940 OldSpace* target_space = heap()->TargetSpace(object);
2941
2942 ASSERT(target_space == heap()->old_pointer_space() ||
2943 target_space == heap()->old_data_space());
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002944 Object* result;
mstarzinger@chromium.orga2e1a402013-10-15 08:25:05 +00002945 MaybeObject* maybe_result = target_space->AllocateRaw(
2946 object_size,
2947 PagedSpace::MOVE_OBJECT);
hpayer@chromium.org83fa61b2013-07-24 09:36:58 +00002948 if (maybe_result->ToObject(&result)) {
2949 HeapObject* target = HeapObject::cast(result);
2950 MigrateObject(target->address(),
2951 object->address(),
2952 object_size,
2953 target_space->identity());
2954 heap()->mark_compact_collector()->tracer()->
2955 increment_promoted_objects_size(object_size);
2956 return true;
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002957 }
2958
2959 return false;
2960}
2961
2962
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002963void MarkCompactCollector::EvacuateNewSpace() {
svenpanne@chromium.orgecb9dd62011-12-01 08:22:35 +00002964 // There are soft limits in the allocation code, designed trigger a mark
2965 // sweep collection by failing allocations. But since we are already in
2966 // a mark-sweep allocation, there is no sense in trying to trigger one.
2967 AlwaysAllocateScope scope;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002968 heap()->CheckNewSpaceExpansionCriteria();
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002969
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002970 NewSpace* new_space = heap()->new_space();
2971
2972 // Store allocation range before flipping semispaces.
2973 Address from_bottom = new_space->bottom();
2974 Address from_top = new_space->top();
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002975
2976 // Flip the semispaces. After flipping, to space is empty, from space has
2977 // live objects.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002978 new_space->Flip();
2979 new_space->ResetAllocationInfo();
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002980
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002981 int survivors_size = 0;
2982
2983 // First pass: traverse all objects in inactive semispace, remove marks,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002984 // migrate live objects and write forwarding addresses. This stage puts
2985 // new entries in the store buffer and may cause some pages to be marked
2986 // scan-on-scavenge.
danno@chromium.org169691d2013-07-15 08:01:13 +00002987 NewSpacePageIterator it(from_bottom, from_top);
2988 while (it.has_next()) {
2989 NewSpacePage* p = it.next();
2990 survivors_size += DiscoverAndPromoteBlackObjectsOnPage(new_space, p);
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00002991 }
2992
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00002993 heap_->IncrementYoungSurvivorsCounter(survivors_size);
2994 new_space->set_age_mark(new_space->top());
2995}
2996
2997
2998void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
2999 AlwaysAllocateScope always_allocate;
3000 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3001 ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003002 p->MarkSweptPrecisely();
3003
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003004 int offsets[16];
3005
jkummerow@chromium.org10480472013-07-17 08:22:15 +00003006 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3007 Address cell_base = it.CurrentCellBase();
3008 MarkBit::CellType* cell = it.CurrentCell();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003009
jkummerow@chromium.org10480472013-07-17 08:22:15 +00003010 if (*cell == 0) continue;
3011
3012 int live_objects = MarkWordToObjectStarts(*cell, offsets);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003013 for (int i = 0; i < live_objects; i++) {
3014 Address object_addr = cell_base + offsets[i] * kPointerSize;
3015 HeapObject* object = HeapObject::FromAddress(object_addr);
3016 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
3017
3018 int size = object->Size();
3019
mstarzinger@chromium.orga2e1a402013-10-15 08:25:05 +00003020 MaybeObject* target = space->AllocateRaw(size, PagedSpace::MOVE_OBJECT);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003021 if (target->IsFailure()) {
3022 // OS refused to give us memory.
3023 V8::FatalProcessOutOfMemory("Evacuation");
3024 return;
3025 }
3026
3027 Object* target_object = target->ToObjectUnchecked();
3028
3029 MigrateObject(HeapObject::cast(target_object)->address(),
3030 object_addr,
3031 size,
3032 space->identity());
3033 ASSERT(object->map_word().IsForwardingAddress());
3034 }
3035
3036 // Clear marking bits for current cell.
jkummerow@chromium.org10480472013-07-17 08:22:15 +00003037 *cell = 0;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003038 }
3039 p->ResetLiveBytes();
3040}
3041
3042
3043void MarkCompactCollector::EvacuatePages() {
3044 int npages = evacuation_candidates_.length();
3045 for (int i = 0; i < npages; i++) {
3046 Page* p = evacuation_candidates_[i];
3047 ASSERT(p->IsEvacuationCandidate() ||
3048 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3049 if (p->IsEvacuationCandidate()) {
3050 // During compaction we might have to request a new page.
3051 // Check that space still have room for that.
3052 if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
3053 EvacuateLiveObjectsFromPage(p);
3054 } else {
3055 // Without room for expansion evacuation is not guaranteed to succeed.
3056 // Pessimistically abandon unevacuated pages.
3057 for (int j = i; j < npages; j++) {
3058 Page* page = evacuation_candidates_[j];
3059 slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
3060 page->ClearEvacuationCandidate();
3061 page->SetFlag(Page::RESCAN_ON_EVACUATION);
ulan@chromium.org6e196bf2013-03-13 09:38:22 +00003062 page->InsertAfter(static_cast<PagedSpace*>(page->owner())->anchor());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003063 }
3064 return;
3065 }
3066 }
3067 }
3068}
3069
3070
3071class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3072 public:
3073 virtual Object* RetainAs(Object* object) {
3074 if (object->IsHeapObject()) {
3075 HeapObject* heap_object = HeapObject::cast(object);
3076 MapWord map_word = heap_object->map_word();
3077 if (map_word.IsForwardingAddress()) {
3078 return map_word.ToForwardingAddress();
3079 }
3080 }
3081 return object;
3082 }
3083};
3084
3085
dslomov@chromium.orge97852d2013-09-12 09:02:59 +00003086static inline void UpdateSlot(Isolate* isolate,
3087 ObjectVisitor* v,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003088 SlotsBuffer::SlotType slot_type,
3089 Address addr) {
3090 switch (slot_type) {
3091 case SlotsBuffer::CODE_TARGET_SLOT: {
3092 RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
dslomov@chromium.orge97852d2013-09-12 09:02:59 +00003093 rinfo.Visit(isolate, v);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003094 break;
3095 }
3096 case SlotsBuffer::CODE_ENTRY_SLOT: {
3097 v->VisitCodeEntry(addr);
3098 break;
3099 }
3100 case SlotsBuffer::RELOCATED_CODE_OBJECT: {
3101 HeapObject* obj = HeapObject::FromAddress(addr);
3102 Code::cast(obj)->CodeIterateBody(v);
3103 break;
3104 }
3105 case SlotsBuffer::DEBUG_TARGET_SLOT: {
3106 RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
dslomov@chromium.orge97852d2013-09-12 09:02:59 +00003107 if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003108 break;
3109 }
3110 case SlotsBuffer::JS_RETURN_SLOT: {
3111 RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
dslomov@chromium.orge97852d2013-09-12 09:02:59 +00003112 if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(isolate, v);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003113 break;
3114 }
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +00003115 case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
3116 RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
dslomov@chromium.orge97852d2013-09-12 09:02:59 +00003117 rinfo.Visit(isolate, v);
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +00003118 break;
3119 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003120 default:
3121 UNREACHABLE();
3122 break;
3123 }
3124}
3125
3126
3127enum SweepingMode {
3128 SWEEP_ONLY,
3129 SWEEP_AND_VISIT_LIVE_OBJECTS
3130};
3131
3132
3133enum SkipListRebuildingMode {
3134 REBUILD_SKIP_LIST,
3135 IGNORE_SKIP_LIST
3136};
3137
3138
3139// Sweep a space precisely. After this has been done the space can
3140// be iterated precisely, hitting only the live objects. Code space
3141// is always swept precisely because we want to be able to iterate
3142// over it. Map space is swept precisely, because it is not compacted.
3143// Slots in live objects pointing into evacuation candidates are updated
3144// if requested.
3145template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode>
3146static void SweepPrecisely(PagedSpace* space,
3147 Page* p,
3148 ObjectVisitor* v) {
3149 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
3150 ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3151 space->identity() == CODE_SPACE);
3152 ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3153
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003154 double start_time = 0.0;
3155 if (FLAG_print_cumulative_gc_stat) {
3156 start_time = OS::TimeCurrentMillis();
3157 }
3158
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003159 p->MarkSweptPrecisely();
3160
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00003161 Address free_start = p->area_start();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003162 ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003163 int offsets[16];
3164
3165 SkipList* skip_list = p->skip_list();
3166 int curr_region = -1;
3167 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3168 skip_list->Clear();
3169 }
3170
jkummerow@chromium.org10480472013-07-17 08:22:15 +00003171 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3172 Address cell_base = it.CurrentCellBase();
3173 MarkBit::CellType* cell = it.CurrentCell();
3174 int live_objects = MarkWordToObjectStarts(*cell, offsets);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003175 int live_index = 0;
3176 for ( ; live_objects != 0; live_objects--) {
jkummerow@chromium.org10480472013-07-17 08:22:15 +00003177 Address free_end = cell_base + offsets[live_index++] * kPointerSize;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003178 if (free_end != free_start) {
3179 space->Free(free_start, static_cast<int>(free_end - free_start));
machenbach@chromium.orgc1789ee2013-07-05 07:09:57 +00003180#ifdef ENABLE_GDB_JIT_INTERFACE
3181 if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3182 GDBJITInterface::RemoveCodeRange(free_start, free_end);
3183 }
3184#endif
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003185 }
3186 HeapObject* live_object = HeapObject::FromAddress(free_end);
3187 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
3188 Map* map = live_object->map();
3189 int size = live_object->SizeFromMap(map);
3190 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3191 live_object->IterateBody(map->instance_type(), size, v);
3192 }
3193 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3194 int new_region_start =
3195 SkipList::RegionNumber(free_end);
3196 int new_region_end =
3197 SkipList::RegionNumber(free_end + size - kPointerSize);
3198 if (new_region_start != curr_region ||
3199 new_region_end != curr_region) {
3200 skip_list->AddObject(free_end, size);
3201 curr_region = new_region_end;
3202 }
3203 }
3204 free_start = free_end + size;
3205 }
3206 // Clear marking bits for current cell.
jkummerow@chromium.org10480472013-07-17 08:22:15 +00003207 *cell = 0;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003208 }
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00003209 if (free_start != p->area_end()) {
3210 space->Free(free_start, static_cast<int>(p->area_end() - free_start));
machenbach@chromium.orgc1789ee2013-07-05 07:09:57 +00003211#ifdef ENABLE_GDB_JIT_INTERFACE
3212 if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
3213 GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
3214 }
3215#endif
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003216 }
3217 p->ResetLiveBytes();
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003218 if (FLAG_print_cumulative_gc_stat) {
3219 space->heap()->AddSweepingTime(OS::TimeCurrentMillis() - start_time);
3220 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003221}
3222
3223
3224static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
3225 Page* p = Page::FromAddress(code->address());
3226
3227 if (p->IsEvacuationCandidate() ||
3228 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3229 return false;
3230 }
3231
3232 Address code_start = code->address();
3233 Address code_end = code_start + code->Size();
3234
3235 uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
3236 uint32_t end_index =
3237 MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
3238
3239 Bitmap* b = p->markbits();
3240
3241 MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
3242 MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
3243
3244 MarkBit::CellType* start_cell = start_mark_bit.cell();
3245 MarkBit::CellType* end_cell = end_mark_bit.cell();
3246
3247 if (value) {
3248 MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
3249 MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
3250
3251 if (start_cell == end_cell) {
3252 *start_cell |= start_mask & end_mask;
3253 } else {
3254 *start_cell |= start_mask;
3255 for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
3256 *cell = ~0;
3257 }
3258 *end_cell |= end_mask;
3259 }
3260 } else {
3261 for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
3262 *cell = 0;
3263 }
3264 }
3265
3266 return true;
3267}
3268
3269
3270static bool IsOnInvalidatedCodeObject(Address addr) {
3271 // We did not record any slots in large objects thus
3272 // we can safely go to the page from the slot address.
3273 Page* p = Page::FromAddress(addr);
3274
3275 // First check owner's identity because old pointer and old data spaces
3276 // are swept lazily and might still have non-zero mark-bits on some
3277 // pages.
3278 if (p->owner()->identity() != CODE_SPACE) return false;
3279
3280 // In code space only bits on evacuation candidates (but we don't record
3281 // any slots on them) and under invalidated code objects are non-zero.
3282 MarkBit mark_bit =
3283 p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
3284
3285 return mark_bit.Get();
3286}
3287
3288
3289void MarkCompactCollector::InvalidateCode(Code* code) {
3290 if (heap_->incremental_marking()->IsCompacting() &&
3291 !ShouldSkipEvacuationSlotRecording(code)) {
3292 ASSERT(compacting_);
3293
3294 // If the object is white than no slots were recorded on it yet.
3295 MarkBit mark_bit = Marking::MarkBitFrom(code);
3296 if (Marking::IsWhite(mark_bit)) return;
3297
3298 invalidated_code_.Add(code);
3299 }
3300}
3301
3302
jkummerow@chromium.orgfb732b12013-07-26 10:27:09 +00003303// Return true if the given code is deoptimized or will be deoptimized.
3304bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
jkummerow@chromium.org3d00d0a2013-09-04 13:57:32 +00003305 return code->marked_for_deoptimization();
jkummerow@chromium.orgfb732b12013-07-26 10:27:09 +00003306}
3307
3308
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003309bool MarkCompactCollector::MarkInvalidatedCode() {
3310 bool code_marked = false;
3311
3312 int length = invalidated_code_.length();
3313 for (int i = 0; i < length; i++) {
3314 Code* code = invalidated_code_[i];
3315
3316 if (SetMarkBitsUnderInvalidatedCode(code, true)) {
3317 code_marked = true;
3318 }
3319 }
3320
3321 return code_marked;
3322}
3323
3324
3325void MarkCompactCollector::RemoveDeadInvalidatedCode() {
3326 int length = invalidated_code_.length();
3327 for (int i = 0; i < length; i++) {
3328 if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
3329 }
3330}
3331
3332
3333void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
3334 int length = invalidated_code_.length();
3335 for (int i = 0; i < length; i++) {
3336 Code* code = invalidated_code_[i];
3337 if (code != NULL) {
3338 code->Iterate(visitor);
3339 SetMarkBitsUnderInvalidatedCode(code, false);
3340 }
3341 }
3342 invalidated_code_.Rewind(0);
3343}
3344
3345
3346void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
danno@chromium.orgca29dd82013-04-26 11:59:48 +00003347 Heap::RelocationLock relocation_lock(heap());
3348
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003349 bool code_slots_filtering_required;
3350 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
3351 code_slots_filtering_required = MarkInvalidatedCode();
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003352 EvacuateNewSpace();
3353 }
3354
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003355 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
3356 EvacuatePages();
3357 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003358
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00003359 // Second pass: find pointers to new space and update them.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003360 PointersUpdatingVisitor updating_visitor(heap());
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00003361
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003362 { GCTracer::Scope gc_scope(tracer_,
3363 GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
3364 // Update pointers in to space.
3365 SemiSpaceIterator to_it(heap()->new_space()->bottom(),
3366 heap()->new_space()->top());
3367 for (HeapObject* object = to_it.Next();
3368 object != NULL;
3369 object = to_it.Next()) {
3370 Map* map = object->map();
3371 object->IterateBody(map->instance_type(),
3372 object->SizeFromMap(map),
3373 &updating_visitor);
3374 }
lrn@chromium.org25156de2010-04-06 13:10:27 +00003375 }
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00003376
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003377 { GCTracer::Scope gc_scope(tracer_,
3378 GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
3379 // Update roots.
3380 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003381 }
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00003382
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003383 { GCTracer::Scope gc_scope(tracer_,
3384 GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003385 StoreBufferRebuildScope scope(heap_,
3386 heap_->store_buffer(),
3387 &Heap::ScavengeStoreBufferCallback);
danno@chromium.org169691d2013-07-15 08:01:13 +00003388 heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps(
3389 &UpdatePointer);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003390 }
ricow@chromium.org30ce4112010-05-31 10:38:25 +00003391
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003392 { GCTracer::Scope gc_scope(tracer_,
3393 GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
3394 SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3395 migration_slots_buffer_,
3396 code_slots_filtering_required);
3397 if (FLAG_trace_fragmentation) {
3398 PrintF(" migration slots buffer: %d\n",
3399 SlotsBuffer::SizeOfChain(migration_slots_buffer_));
3400 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003401
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003402 if (compacting_ && was_marked_incrementally_) {
3403 // It's difficult to filter out slots recorded for large objects.
3404 LargeObjectIterator it(heap_->lo_space());
3405 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3406 // LargeObjectSpace is not swept yet thus we have to skip
3407 // dead objects explicitly.
3408 if (!IsMarked(obj)) continue;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003409
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003410 Page* p = Page::FromAddress(obj->address());
3411 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3412 obj->Iterate(&updating_visitor);
3413 p->ClearFlag(Page::RESCAN_ON_EVACUATION);
3414 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003415 }
3416 }
3417 }
3418
3419 int npages = evacuation_candidates_.length();
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003420 { GCTracer::Scope gc_scope(
3421 tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
3422 for (int i = 0; i < npages; i++) {
3423 Page* p = evacuation_candidates_[i];
3424 ASSERT(p->IsEvacuationCandidate() ||
3425 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003426
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003427 if (p->IsEvacuationCandidate()) {
3428 SlotsBuffer::UpdateSlotsRecordedIn(heap_,
3429 p->slots_buffer(),
3430 code_slots_filtering_required);
3431 if (FLAG_trace_fragmentation) {
3432 PrintF(" page %p slots buffer: %d\n",
3433 reinterpret_cast<void*>(p),
3434 SlotsBuffer::SizeOfChain(p->slots_buffer()));
3435 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003436
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003437 // Important: skip list should be cleared only after roots were updated
3438 // because root iteration traverses the stack and might have to find
3439 // code objects from non-updated pc pointing into evacuation candidate.
3440 SkipList* list = p->skip_list();
3441 if (list != NULL) list->Clear();
3442 } else {
3443 if (FLAG_gc_verbose) {
3444 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3445 reinterpret_cast<intptr_t>(p));
3446 }
3447 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3448 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003449
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003450 switch (space->identity()) {
3451 case OLD_DATA_SPACE:
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003452 SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003453 break;
3454 case OLD_POINTER_SPACE:
3455 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>(
3456 space, p, &updating_visitor);
3457 break;
3458 case CODE_SPACE:
3459 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>(
3460 space, p, &updating_visitor);
3461 break;
3462 default:
3463 UNREACHABLE();
3464 break;
3465 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003466 }
3467 }
3468 }
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00003469
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003470 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
3471
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00003472 // Update pointers from cells.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003473 HeapObjectIterator cell_iterator(heap_->cell_space());
3474 for (HeapObject* cell = cell_iterator.Next();
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00003475 cell != NULL;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003476 cell = cell_iterator.Next()) {
danno@chromium.org41728482013-06-12 22:31:22 +00003477 if (cell->IsCell()) {
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00003478 Cell::BodyDescriptor::IterateBody(cell, &updating_visitor);
danno@chromium.org41728482013-06-12 22:31:22 +00003479 }
3480 }
3481
3482 HeapObjectIterator js_global_property_cell_iterator(
3483 heap_->property_cell_space());
3484 for (HeapObject* cell = js_global_property_cell_iterator.Next();
3485 cell != NULL;
3486 cell = js_global_property_cell_iterator.Next()) {
dslomov@chromium.orgb752d402013-06-18 11:54:54 +00003487 if (cell->IsPropertyCell()) {
mstarzinger@chromium.org1510d582013-06-28 14:00:48 +00003488 PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00003489 }
3490 }
3491
jkummerow@chromium.org3d00d0a2013-09-04 13:57:32 +00003492 // Update the head of the native contexts list in the heap.
yangguo@chromium.org46839fb2012-08-28 09:06:19 +00003493 updating_visitor.VisitPointer(heap_->native_contexts_list_address());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003494
yangguo@chromium.org4a9f6552013-03-04 14:46:33 +00003495 heap_->string_table()->Iterate(&updating_visitor);
jkummerow@chromium.org25b0e212013-10-04 15:38:52 +00003496 updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address());
3497 if (heap_->weak_object_to_code_table()->IsHashTable()) {
3498 WeakHashTable* table =
3499 WeakHashTable::cast(heap_->weak_object_to_code_table());
3500 table->Iterate(&updating_visitor);
3501 table->Rehash(heap_->undefined_value());
3502 }
whesse@chromium.org4a5224e2010-10-20 12:37:07 +00003503
whesse@chromium.orgb6e43bb2010-04-14 09:36:28 +00003504 // Update pointers from external string table.
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003505 heap_->UpdateReferencesInExternalStringTable(
3506 &UpdateReferenceInExternalStringTableEntry);
ager@chromium.org9ee27ae2011-03-02 13:43:26 +00003507
yangguo@chromium.org78d1ad42012-02-09 13:53:47 +00003508 if (!FLAG_watch_ic_patching) {
3509 // Update JSFunction pointers from the runtime profiler.
3510 heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact(
3511 &updating_visitor);
3512 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003513
3514 EvacuationWeakObjectRetainer evacuation_object_retainer;
3515 heap()->ProcessWeakReferences(&evacuation_object_retainer);
3516
3517 // Visit invalidated code (we ignored all slots on it) and clear mark-bits
3518 // under it.
3519 ProcessInvalidatedCode(&updating_visitor);
3520
mstarzinger@chromium.org88d326b2012-04-23 12:57:22 +00003521 heap_->isolate()->inner_pointer_to_code_cache()->Flush();
3522
svenpanne@chromium.orgc859c4f2012-10-15 11:51:39 +00003523#ifdef VERIFY_HEAP
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003524 if (FLAG_verify_heap) {
3525 VerifyEvacuation(heap_);
3526 }
3527#endif
3528
3529 slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
3530 ASSERT(migration_slots_buffer_ == NULL);
ulan@chromium.org2e04b582013-02-21 14:06:02 +00003531}
3532
3533
ulan@chromium.org6e196bf2013-03-13 09:38:22 +00003534void MarkCompactCollector::UnlinkEvacuationCandidates() {
3535 int npages = evacuation_candidates_.length();
3536 for (int i = 0; i < npages; i++) {
3537 Page* p = evacuation_candidates_[i];
3538 if (!p->IsEvacuationCandidate()) continue;
3539 p->Unlink();
3540 p->ClearSweptPrecisely();
3541 p->ClearSweptConservatively();
3542 }
3543}
3544
3545
ulan@chromium.org2e04b582013-02-21 14:06:02 +00003546void MarkCompactCollector::ReleaseEvacuationCandidates() {
3547 int npages = evacuation_candidates_.length();
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003548 for (int i = 0; i < npages; i++) {
3549 Page* p = evacuation_candidates_[i];
3550 if (!p->IsEvacuationCandidate()) continue;
3551 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00003552 space->Free(p->area_start(), p->area_size());
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003553 p->set_scan_on_scavenge(false);
3554 slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
rossberg@chromium.org994edf62012-02-06 10:12:55 +00003555 p->ResetLiveBytes();
ulan@chromium.org6e196bf2013-03-13 09:38:22 +00003556 space->ReleasePage(p, false);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003557 }
3558 evacuation_candidates_.Rewind(0);
3559 compacting_ = false;
ulan@chromium.org6e196bf2013-03-13 09:38:22 +00003560 heap()->FreeQueuedChunks();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003561}
3562
3563
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003564static const int kStartTableEntriesPerLine = 5;
3565static const int kStartTableLines = 171;
3566static const int kStartTableInvalidLine = 127;
3567static const int kStartTableUnusedEntry = 126;
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00003568
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003569#define _ kStartTableUnusedEntry
3570#define X kStartTableInvalidLine
3571// Mark-bit to object start offset table.
3572//
3573// The line is indexed by the mark bits in a byte. The first number on
3574// the line describes the number of live object starts for the line and the
3575// other numbers on the line describe the offsets (in words) of the object
3576// starts.
3577//
3578// Since objects are at least 2 words large we don't have entries for two
3579// consecutive 1 bits. All entries after 170 have at least 2 consecutive bits.
3580char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
3581 0, _, _, _, _, // 0
3582 1, 0, _, _, _, // 1
3583 1, 1, _, _, _, // 2
3584 X, _, _, _, _, // 3
3585 1, 2, _, _, _, // 4
3586 2, 0, 2, _, _, // 5
3587 X, _, _, _, _, // 6
3588 X, _, _, _, _, // 7
3589 1, 3, _, _, _, // 8
3590 2, 0, 3, _, _, // 9
3591 2, 1, 3, _, _, // 10
3592 X, _, _, _, _, // 11
3593 X, _, _, _, _, // 12
3594 X, _, _, _, _, // 13
3595 X, _, _, _, _, // 14
3596 X, _, _, _, _, // 15
3597 1, 4, _, _, _, // 16
3598 2, 0, 4, _, _, // 17
3599 2, 1, 4, _, _, // 18
3600 X, _, _, _, _, // 19
3601 2, 2, 4, _, _, // 20
3602 3, 0, 2, 4, _, // 21
3603 X, _, _, _, _, // 22
3604 X, _, _, _, _, // 23
3605 X, _, _, _, _, // 24
3606 X, _, _, _, _, // 25
3607 X, _, _, _, _, // 26
3608 X, _, _, _, _, // 27
3609 X, _, _, _, _, // 28
3610 X, _, _, _, _, // 29
3611 X, _, _, _, _, // 30
3612 X, _, _, _, _, // 31
3613 1, 5, _, _, _, // 32
3614 2, 0, 5, _, _, // 33
3615 2, 1, 5, _, _, // 34
3616 X, _, _, _, _, // 35
3617 2, 2, 5, _, _, // 36
3618 3, 0, 2, 5, _, // 37
3619 X, _, _, _, _, // 38
3620 X, _, _, _, _, // 39
3621 2, 3, 5, _, _, // 40
3622 3, 0, 3, 5, _, // 41
3623 3, 1, 3, 5, _, // 42
3624 X, _, _, _, _, // 43
3625 X, _, _, _, _, // 44
3626 X, _, _, _, _, // 45
3627 X, _, _, _, _, // 46
3628 X, _, _, _, _, // 47
3629 X, _, _, _, _, // 48
3630 X, _, _, _, _, // 49
3631 X, _, _, _, _, // 50
3632 X, _, _, _, _, // 51
3633 X, _, _, _, _, // 52
3634 X, _, _, _, _, // 53
3635 X, _, _, _, _, // 54
3636 X, _, _, _, _, // 55
3637 X, _, _, _, _, // 56
3638 X, _, _, _, _, // 57
3639 X, _, _, _, _, // 58
3640 X, _, _, _, _, // 59
3641 X, _, _, _, _, // 60
3642 X, _, _, _, _, // 61
3643 X, _, _, _, _, // 62
3644 X, _, _, _, _, // 63
3645 1, 6, _, _, _, // 64
3646 2, 0, 6, _, _, // 65
3647 2, 1, 6, _, _, // 66
3648 X, _, _, _, _, // 67
3649 2, 2, 6, _, _, // 68
3650 3, 0, 2, 6, _, // 69
3651 X, _, _, _, _, // 70
3652 X, _, _, _, _, // 71
3653 2, 3, 6, _, _, // 72
3654 3, 0, 3, 6, _, // 73
3655 3, 1, 3, 6, _, // 74
3656 X, _, _, _, _, // 75
3657 X, _, _, _, _, // 76
3658 X, _, _, _, _, // 77
3659 X, _, _, _, _, // 78
3660 X, _, _, _, _, // 79
3661 2, 4, 6, _, _, // 80
3662 3, 0, 4, 6, _, // 81
3663 3, 1, 4, 6, _, // 82
3664 X, _, _, _, _, // 83
3665 3, 2, 4, 6, _, // 84
3666 4, 0, 2, 4, 6, // 85
3667 X, _, _, _, _, // 86
3668 X, _, _, _, _, // 87
3669 X, _, _, _, _, // 88
3670 X, _, _, _, _, // 89
3671 X, _, _, _, _, // 90
3672 X, _, _, _, _, // 91
3673 X, _, _, _, _, // 92
3674 X, _, _, _, _, // 93
3675 X, _, _, _, _, // 94
3676 X, _, _, _, _, // 95
3677 X, _, _, _, _, // 96
3678 X, _, _, _, _, // 97
3679 X, _, _, _, _, // 98
3680 X, _, _, _, _, // 99
3681 X, _, _, _, _, // 100
3682 X, _, _, _, _, // 101
3683 X, _, _, _, _, // 102
3684 X, _, _, _, _, // 103
3685 X, _, _, _, _, // 104
3686 X, _, _, _, _, // 105
3687 X, _, _, _, _, // 106
3688 X, _, _, _, _, // 107
3689 X, _, _, _, _, // 108
3690 X, _, _, _, _, // 109
3691 X, _, _, _, _, // 110
3692 X, _, _, _, _, // 111
3693 X, _, _, _, _, // 112
3694 X, _, _, _, _, // 113
3695 X, _, _, _, _, // 114
3696 X, _, _, _, _, // 115
3697 X, _, _, _, _, // 116
3698 X, _, _, _, _, // 117
3699 X, _, _, _, _, // 118
3700 X, _, _, _, _, // 119
3701 X, _, _, _, _, // 120
3702 X, _, _, _, _, // 121
3703 X, _, _, _, _, // 122
3704 X, _, _, _, _, // 123
3705 X, _, _, _, _, // 124
3706 X, _, _, _, _, // 125
3707 X, _, _, _, _, // 126
3708 X, _, _, _, _, // 127
3709 1, 7, _, _, _, // 128
3710 2, 0, 7, _, _, // 129
3711 2, 1, 7, _, _, // 130
3712 X, _, _, _, _, // 131
3713 2, 2, 7, _, _, // 132
3714 3, 0, 2, 7, _, // 133
3715 X, _, _, _, _, // 134
3716 X, _, _, _, _, // 135
3717 2, 3, 7, _, _, // 136
3718 3, 0, 3, 7, _, // 137
3719 3, 1, 3, 7, _, // 138
3720 X, _, _, _, _, // 139
3721 X, _, _, _, _, // 140
3722 X, _, _, _, _, // 141
3723 X, _, _, _, _, // 142
3724 X, _, _, _, _, // 143
3725 2, 4, 7, _, _, // 144
3726 3, 0, 4, 7, _, // 145
3727 3, 1, 4, 7, _, // 146
3728 X, _, _, _, _, // 147
3729 3, 2, 4, 7, _, // 148
3730 4, 0, 2, 4, 7, // 149
3731 X, _, _, _, _, // 150
3732 X, _, _, _, _, // 151
3733 X, _, _, _, _, // 152
3734 X, _, _, _, _, // 153
3735 X, _, _, _, _, // 154
3736 X, _, _, _, _, // 155
3737 X, _, _, _, _, // 156
3738 X, _, _, _, _, // 157
3739 X, _, _, _, _, // 158
3740 X, _, _, _, _, // 159
3741 2, 5, 7, _, _, // 160
3742 3, 0, 5, 7, _, // 161
3743 3, 1, 5, 7, _, // 162
3744 X, _, _, _, _, // 163
3745 3, 2, 5, 7, _, // 164
3746 4, 0, 2, 5, 7, // 165
3747 X, _, _, _, _, // 166
3748 X, _, _, _, _, // 167
3749 3, 3, 5, 7, _, // 168
3750 4, 0, 3, 5, 7, // 169
3751 4, 1, 3, 5, 7 // 170
3752};
3753#undef _
3754#undef X
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00003755
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00003756
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003757// Takes a word of mark bits. Returns the number of objects that start in the
3758// range. Puts the offsets of the words in the supplied array.
3759static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
3760 int objects = 0;
3761 int offset = 0;
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00003762
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003763 // No consecutive 1 bits.
3764 ASSERT((mark_bits & 0x180) != 0x180);
3765 ASSERT((mark_bits & 0x18000) != 0x18000);
3766 ASSERT((mark_bits & 0x1800000) != 0x1800000);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00003767
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003768 while (mark_bits != 0) {
3769 int byte = (mark_bits & 0xff);
3770 mark_bits >>= 8;
3771 if (byte != 0) {
3772 ASSERT(byte < kStartTableLines); // No consecutive 1 bits.
3773 char* table = kStartTable + byte * kStartTableEntriesPerLine;
3774 int objects_in_these_8_words = table[0];
3775 ASSERT(objects_in_these_8_words != kStartTableInvalidLine);
3776 ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine);
3777 for (int i = 0; i < objects_in_these_8_words; i++) {
3778 starts[objects++] = offset + table[1 + i];
3779 }
3780 }
3781 offset += 8;
3782 }
3783 return objects;
3784}
3785
3786
3787static inline Address DigestFreeStart(Address approximate_free_start,
3788 uint32_t free_start_cell) {
3789 ASSERT(free_start_cell != 0);
3790
3791 // No consecutive 1 bits.
3792 ASSERT((free_start_cell & (free_start_cell << 1)) == 0);
3793
3794 int offsets[16];
3795 uint32_t cell = free_start_cell;
3796 int offset_of_last_live;
3797 if ((cell & 0x80000000u) != 0) {
3798 // This case would overflow below.
3799 offset_of_last_live = 31;
3800 } else {
3801 // Remove all but one bit, the most significant. This is an optimization
3802 // that may or may not be worthwhile.
3803 cell |= cell >> 16;
3804 cell |= cell >> 8;
3805 cell |= cell >> 4;
3806 cell |= cell >> 2;
3807 cell |= cell >> 1;
3808 cell = (cell + 1) >> 1;
3809 int live_objects = MarkWordToObjectStarts(cell, offsets);
3810 ASSERT(live_objects == 1);
3811 offset_of_last_live = offsets[live_objects - 1];
3812 }
3813 Address last_live_start =
3814 approximate_free_start + offset_of_last_live * kPointerSize;
3815 HeapObject* last_live = HeapObject::FromAddress(last_live_start);
3816 Address free_start = last_live_start + last_live->Size();
3817 return free_start;
3818}
3819
3820
3821static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
3822 ASSERT(cell != 0);
3823
3824 // No consecutive 1 bits.
3825 ASSERT((cell & (cell << 1)) == 0);
3826
3827 int offsets[16];
3828 if (cell == 0x80000000u) { // Avoid overflow below.
3829 return block_address + 31 * kPointerSize;
3830 }
3831 uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
3832 ASSERT((first_set_bit & cell) == first_set_bit);
3833 int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
3834 ASSERT(live_objects == 1);
3835 USE(live_objects);
3836 return block_address + offsets[0] * kPointerSize;
3837}
3838
3839
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003840template<MarkCompactCollector::SweepingParallelism mode>
3841static intptr_t Free(PagedSpace* space,
3842 FreeList* free_list,
3843 Address start,
3844 int size) {
3845 if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) {
3846 return space->Free(start, size);
3847 } else {
3848 return size - free_list->Free(start, size);
3849 }
3850}
3851
3852
3853// Force instantiation of templatized SweepConservatively method for
3854// SWEEP_SEQUENTIALLY mode.
3855template intptr_t MarkCompactCollector::
3856 SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
3857 PagedSpace*, FreeList*, Page*);
3858
3859
3860// Force instantiation of templatized SweepConservatively method for
3861// SWEEP_IN_PARALLEL mode.
3862template intptr_t MarkCompactCollector::
3863 SweepConservatively<MarkCompactCollector::SWEEP_IN_PARALLEL>(
3864 PagedSpace*, FreeList*, Page*);
3865
3866
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003867// Sweeps a space conservatively. After this has been done the larger free
3868// spaces have been put on the free list and the smaller ones have been
3869// ignored and left untouched. A free space is always either ignored or put
3870// on the free list, never split up into two parts. This is important
3871// because it means that any FreeSpace maps left actually describe a region of
3872// memory that can be ignored when scanning. Dead objects other than free
3873// spaces will not contain the free space map.
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003874template<MarkCompactCollector::SweepingParallelism mode>
3875intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
3876 FreeList* free_list,
3877 Page* p) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003878 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003879 ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
3880 free_list != NULL) ||
3881 (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
3882 free_list == NULL));
3883
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003884 p->MarkSweptConservatively();
3885
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003886 intptr_t freed_bytes = 0;
jkummerow@chromium.org10480472013-07-17 08:22:15 +00003887 size_t size = 0;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003888
3889 // Skip over all the dead objects at the start of the page and mark them free.
jkummerow@chromium.org10480472013-07-17 08:22:15 +00003890 Address cell_base = 0;
3891 MarkBit::CellType* cell = NULL;
3892 MarkBitCellIterator it(p);
3893 for (; !it.Done(); it.Advance()) {
3894 cell_base = it.CurrentCellBase();
3895 cell = it.CurrentCell();
3896 if (*cell != 0) break;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003897 }
jkummerow@chromium.org10480472013-07-17 08:22:15 +00003898
3899 if (it.Done()) {
3900 size = p->area_end() - p->area_start();
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003901 freed_bytes += Free<mode>(space, free_list, p->area_start(),
3902 static_cast<int>(size));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003903 ASSERT_EQ(0, p->LiveBytes());
3904 return freed_bytes;
3905 }
jkummerow@chromium.org10480472013-07-17 08:22:15 +00003906
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003907 // Grow the size of the start-of-page free space a little to get up to the
3908 // first live object.
jkummerow@chromium.org10480472013-07-17 08:22:15 +00003909 Address free_end = StartOfLiveObject(cell_base, *cell);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003910 // Free the first free space.
yangguo@chromium.orgab30bb82012-02-24 14:41:46 +00003911 size = free_end - p->area_start();
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003912 freed_bytes += Free<mode>(space, free_list, p->area_start(),
3913 static_cast<int>(size));
3914
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003915 // The start of the current free area is represented in undigested form by
3916 // the address of the last 32-word section that contained a live object and
3917 // the marking bitmap for that cell, which describes where the live object
3918 // started. Unless we find a large free space in the bitmap we will not
3919 // digest this pair into a real address. We start the iteration here at the
3920 // first word in the marking bit map that indicates a live object.
jkummerow@chromium.org10480472013-07-17 08:22:15 +00003921 Address free_start = cell_base;
3922 MarkBit::CellType free_start_cell = *cell;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003923
jkummerow@chromium.org10480472013-07-17 08:22:15 +00003924 for (; !it.Done(); it.Advance()) {
3925 cell_base = it.CurrentCellBase();
3926 cell = it.CurrentCell();
3927 if (*cell != 0) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003928 // We have a live object. Check approximately whether it is more than 32
3929 // words since the last live object.
jkummerow@chromium.org10480472013-07-17 08:22:15 +00003930 if (cell_base - free_start > 32 * kPointerSize) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003931 free_start = DigestFreeStart(free_start, free_start_cell);
jkummerow@chromium.org10480472013-07-17 08:22:15 +00003932 if (cell_base - free_start > 32 * kPointerSize) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003933 // Now that we know the exact start of the free space it still looks
3934 // like we have a large enough free space to be worth bothering with.
3935 // so now we need to find the start of the first live object at the
3936 // end of the free space.
jkummerow@chromium.org10480472013-07-17 08:22:15 +00003937 free_end = StartOfLiveObject(cell_base, *cell);
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003938 freed_bytes += Free<mode>(space, free_list, free_start,
3939 static_cast<int>(free_end - free_start));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003940 }
3941 }
3942 // Update our undigested record of where the current free area started.
jkummerow@chromium.org10480472013-07-17 08:22:15 +00003943 free_start = cell_base;
3944 free_start_cell = *cell;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003945 // Clear marking bits for current cell.
jkummerow@chromium.org10480472013-07-17 08:22:15 +00003946 *cell = 0;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003947 }
3948 }
3949
3950 // Handle the free space at the end of the page.
jkummerow@chromium.org10480472013-07-17 08:22:15 +00003951 if (cell_base - free_start > 32 * kPointerSize) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003952 free_start = DigestFreeStart(free_start, free_start_cell);
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003953 freed_bytes += Free<mode>(space, free_list, free_start,
jkummerow@chromium.org10480472013-07-17 08:22:15 +00003954 static_cast<int>(p->area_end() - free_start));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003955 }
3956
3957 p->ResetLiveBytes();
3958 return freed_bytes;
3959}
3960
3961
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003962void MarkCompactCollector::SweepInParallel(PagedSpace* space,
3963 FreeList* private_free_list,
3964 FreeList* free_list) {
3965 PageIterator it(space);
3966 while (it.has_next()) {
3967 Page* p = it.next();
3968
3969 if (p->TryParallelSweeping()) {
3970 SweepConservatively<SWEEP_IN_PARALLEL>(space, private_free_list, p);
3971 free_list->Concatenate(private_free_list);
3972 }
3973 }
3974}
3975
3976
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003977void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003978 space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
hpayer@chromium.org7c3372b2013-02-13 17:26:04 +00003979 sweeper == LAZY_CONSERVATIVE ||
ulan@chromium.org2e04b582013-02-21 14:06:02 +00003980 sweeper == PARALLEL_CONSERVATIVE ||
3981 sweeper == CONCURRENT_CONSERVATIVE);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003982 space->ClearStats();
3983
3984 PageIterator it(space);
3985
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00003986 int pages_swept = 0;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003987 bool lazy_sweeping_active = false;
3988 bool unused_page_present = false;
ulan@chromium.org750145a2013-03-07 15:14:13 +00003989 bool parallel_sweeping_active = false;
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00003990
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00003991 while (it.has_next()) {
3992 Page* p = it.next();
3993
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00003994 ASSERT(p->parallel_sweeping() == 0);
ulan@chromium.org6e196bf2013-03-13 09:38:22 +00003995 ASSERT(!p->IsEvacuationCandidate());
3996
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00003997 // Clear sweeping flags indicating that marking bits are still intact.
3998 p->ClearSweptPrecisely();
3999 p->ClearSweptConservatively();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00004000
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004001 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
4002 // Will be processed in EvacuateNewSpaceAndCandidates.
ulan@chromium.org6e196bf2013-03-13 09:38:22 +00004003 ASSERT(evacuation_candidates_.length() > 0);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004004 continue;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00004005 }
fschneider@chromium.org013f3e12010-04-26 13:27:52 +00004006
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004007 // One unused page is kept, all further are released before sweeping them.
4008 if (p->LiveBytes() == 0) {
4009 if (unused_page_present) {
4010 if (FLAG_gc_verbose) {
4011 PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
4012 reinterpret_cast<intptr_t>(p));
4013 }
ulan@chromium.org2efb9002012-01-19 15:36:35 +00004014 // Adjust unswept free bytes because releasing a page expects said
4015 // counter to be accurate for unswept pages.
4016 space->IncreaseUnsweptFreeBytes(p);
ulan@chromium.org6e196bf2013-03-13 09:38:22 +00004017 space->ReleasePage(p, true);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004018 continue;
4019 }
4020 unused_page_present = true;
4021 }
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00004022
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004023 switch (sweeper) {
4024 case CONSERVATIVE: {
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00004025 if (FLAG_gc_verbose) {
4026 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
4027 reinterpret_cast<intptr_t>(p));
4028 }
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00004029 SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00004030 pages_swept++;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004031 break;
4032 }
4033 case LAZY_CONSERVATIVE: {
ulan@chromium.org750145a2013-03-07 15:14:13 +00004034 if (lazy_sweeping_active) {
4035 if (FLAG_gc_verbose) {
4036 PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n",
4037 reinterpret_cast<intptr_t>(p));
4038 }
4039 space->IncreaseUnsweptFreeBytes(p);
4040 } else {
4041 if (FLAG_gc_verbose) {
4042 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
4043 reinterpret_cast<intptr_t>(p));
4044 }
4045 SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
4046 pages_swept++;
4047 space->SetPagesToSweep(p->next_page());
4048 lazy_sweeping_active = true;
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00004049 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004050 break;
4051 }
ulan@chromium.org2e04b582013-02-21 14:06:02 +00004052 case CONCURRENT_CONSERVATIVE:
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00004053 case PARALLEL_CONSERVATIVE: {
ulan@chromium.org750145a2013-03-07 15:14:13 +00004054 if (!parallel_sweeping_active) {
4055 if (FLAG_gc_verbose) {
4056 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
4057 reinterpret_cast<intptr_t>(p));
4058 }
4059 SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
4060 pages_swept++;
4061 parallel_sweeping_active = true;
4062 } else {
4063 if (FLAG_gc_verbose) {
4064 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
4065 reinterpret_cast<intptr_t>(p));
4066 }
4067 p->set_parallel_sweeping(1);
4068 space->IncreaseUnsweptFreeBytes(p);
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00004069 }
mstarzinger@chromium.orge3b8d0f2013-02-01 09:06:41 +00004070 break;
4071 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004072 case PRECISE: {
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00004073 if (FLAG_gc_verbose) {
4074 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
4075 reinterpret_cast<intptr_t>(p));
4076 }
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004077 if (space->identity() == CODE_SPACE) {
4078 SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL);
4079 } else {
4080 SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL);
4081 }
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00004082 pages_swept++;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004083 break;
4084 }
4085 default: {
4086 UNREACHABLE();
4087 }
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00004088 }
4089 }
4090
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00004091 if (FLAG_gc_verbose) {
4092 PrintF("SweepSpace: %s (%d pages swept)\n",
4093 AllocationSpaceName(space->identity()),
4094 pages_swept);
4095 }
4096
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004097 // Give pages that are queued to be freed back to the OS.
4098 heap()->FreeQueuedChunks();
4099}
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00004100
fschneider@chromium.org0c20e672010-01-14 15:28:53 +00004101
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00004102void MarkCompactCollector::SweepSpaces() {
erik.corry@gmail.com9dfbea42010-05-21 12:58:28 +00004103 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004104#ifdef DEBUG
4105 state_ = SWEEP_SPACES;
4106#endif
4107 SweeperType how_to_sweep =
4108 FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
ulan@chromium.org2e04b582013-02-21 14:06:02 +00004109 if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
4110 if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
ricow@chromium.org64e3a4b2011-12-13 08:07:27 +00004111 if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004112 if (sweep_precisely_) how_to_sweep = PRECISE;
ulan@chromium.org6e196bf2013-03-13 09:38:22 +00004113
4114 // Unlink evacuation candidates before sweeper threads access the list of
4115 // pages to avoid race condition.
4116 UnlinkEvacuationCandidates();
4117
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00004118 // Noncompacting collections simply sweep the spaces to clear the mark
4119 // bits and free the nonlive blocks (for old and map spaces). We sweep
4120 // the map space last because freeing non-live maps overwrites them and
4121 // the other spaces rely on possibly non-live maps to get the sizes for
4122 // non-live objects.
ulan@chromium.org750145a2013-03-07 15:14:13 +00004123 SequentialSweepingScope scope(this);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004124 SweepSpace(heap()->old_pointer_space(), how_to_sweep);
4125 SweepSpace(heap()->old_data_space(), how_to_sweep);
4126
ulan@chromium.org2e04b582013-02-21 14:06:02 +00004127 if (how_to_sweep == PARALLEL_CONSERVATIVE ||
4128 how_to_sweep == CONCURRENT_CONSERVATIVE) {
hpayer@chromium.org7c3372b2013-02-13 17:26:04 +00004129 // TODO(hpayer): fix race with concurrent sweeper
4130 StartSweeperThreads();
ulan@chromium.org2e04b582013-02-21 14:06:02 +00004131 }
4132
4133 if (how_to_sweep == PARALLEL_CONSERVATIVE) {
4134 WaitUntilSweepingCompleted();
hpayer@chromium.org7c3372b2013-02-13 17:26:04 +00004135 }
4136
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004137 RemoveDeadInvalidatedCode();
4138 SweepSpace(heap()->code_space(), PRECISE);
4139
4140 SweepSpace(heap()->cell_space(), PRECISE);
danno@chromium.org41728482013-06-12 22:31:22 +00004141 SweepSpace(heap()->property_cell_space(), PRECISE);
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004142
mstarzinger@chromium.org1b3afd12011-11-29 14:28:56 +00004143 EvacuateNewSpaceAndCandidates();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00004144
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004145 // ClearNonLiveTransitions depends on precise sweeping of map space to
4146 // detect whether unmarked map became dead in this collection or in one
4147 // of the previous ones.
4148 SweepSpace(heap()->map_space(), PRECISE);
mads.s.agercbaa0602008-08-14 13:41:48 +00004149
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004150 // Deallocate unmarked objects and clear marked bits for marked objects.
4151 heap_->lo_space()->FreeUnmarkedObjects();
ulan@chromium.org2e04b582013-02-21 14:06:02 +00004152
ulan@chromium.org6e196bf2013-03-13 09:38:22 +00004153 // Deallocate evacuated candidate pages.
4154 ReleaseEvacuationCandidates();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00004155}
4156
4157
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00004158void MarkCompactCollector::EnableCodeFlushing(bool enable) {
verwaest@chromium.orge4ee6de2012-11-06 12:13:00 +00004159#ifdef ENABLE_DEBUGGER_SUPPORT
svenpanne@chromium.org876cca82013-03-18 14:43:20 +00004160 if (isolate()->debug()->IsLoaded() ||
4161 isolate()->debug()->has_break_points()) {
verwaest@chromium.orge4ee6de2012-11-06 12:13:00 +00004162 enable = false;
4163 }
4164#endif
4165
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00004166 if (enable) {
4167 if (code_flusher_ != NULL) return;
svenpanne@chromium.org876cca82013-03-18 14:43:20 +00004168 code_flusher_ = new CodeFlusher(isolate());
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00004169 } else {
4170 if (code_flusher_ == NULL) return;
mvstanton@chromium.orge4ac3ef2012-11-12 14:53:34 +00004171 code_flusher_->EvictAllCandidates();
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00004172 delete code_flusher_;
4173 code_flusher_ = NULL;
4174 }
jkummerow@chromium.org4e308cf2013-05-17 13:39:16 +00004175
4176 if (FLAG_trace_code_flushing) {
4177 PrintF("[code-flushing is now %s]\n", enable ? "on" : "off");
4178 }
sgjesse@chromium.orgea88ce92011-03-23 11:19:56 +00004179}
4180
4181
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004182// TODO(1466) ReportDeleteIfNeeded is not called currently.
4183// Our profiling tools do not expect intersections between
4184// code objects. We should either reenable it or change our tools.
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00004185void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
4186 Isolate* isolate) {
erik.corry@gmail.com0511e242011-01-19 11:11:08 +00004187#ifdef ENABLE_GDB_JIT_INTERFACE
4188 if (obj->IsCode()) {
4189 GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
4190 }
4191#endif
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00004192 if (obj->IsCode()) {
kmillikin@chromium.orgc36ce6e2011-04-04 08:25:31 +00004193 PROFILE(isolate, CodeDeleteEvent(obj->address()));
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00004194 }
sgjesse@chromium.orgb302e562010-02-03 11:26:59 +00004195}
4196
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00004197
svenpanne@chromium.org876cca82013-03-18 14:43:20 +00004198Isolate* MarkCompactCollector::isolate() const {
4199 return heap_->isolate();
4200}
4201
4202
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004203void MarkCompactCollector::Initialize() {
verwaest@chromium.orgb6d052d2012-07-27 08:03:27 +00004204 MarkCompactMarkingVisitor::Initialize();
4205 IncrementalMarking::Initialize();
erik.corry@gmail.com4a6c3272010-11-18 12:04:40 +00004206}
4207
4208
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004209bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
4210 return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
4211}
4212
4213
4214bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
4215 SlotsBuffer** buffer_address,
4216 SlotType type,
4217 Address addr,
4218 AdditionMode mode) {
4219 SlotsBuffer* buffer = *buffer_address;
4220 if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
4221 if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
4222 allocator->DeallocateChain(buffer_address);
4223 return false;
4224 }
4225 buffer = allocator->AllocateBuffer(buffer);
4226 *buffer_address = buffer;
4227 }
4228 ASSERT(buffer->HasSpaceForTypedSlot());
4229 buffer->Add(reinterpret_cast<ObjectSlot>(type));
4230 buffer->Add(reinterpret_cast<ObjectSlot>(addr));
4231 return true;
4232}
4233
4234
4235static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
4236 if (RelocInfo::IsCodeTarget(rmode)) {
4237 return SlotsBuffer::CODE_TARGET_SLOT;
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +00004238 } else if (RelocInfo::IsEmbeddedObject(rmode)) {
4239 return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004240 } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
4241 return SlotsBuffer::DEBUG_TARGET_SLOT;
4242 } else if (RelocInfo::IsJSReturn(rmode)) {
4243 return SlotsBuffer::JS_RETURN_SLOT;
4244 }
4245 UNREACHABLE();
4246 return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
4247}
4248
4249
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +00004250void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
4251 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004252 if (target_page->IsEvacuationCandidate() &&
4253 (rinfo->host() == NULL ||
4254 !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
4255 if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4256 target_page->slots_buffer_address(),
4257 SlotTypeForRMode(rinfo->rmode()),
4258 rinfo->pc(),
4259 SlotsBuffer::FAIL_ON_OVERFLOW)) {
4260 EvictEvacuationCandidate(target_page);
4261 }
4262 }
4263}
4264
4265
4266void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
rossberg@chromium.orgb4b2aa62011-10-13 09:49:59 +00004267 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004268 if (target_page->IsEvacuationCandidate() &&
4269 !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
4270 if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
4271 target_page->slots_buffer_address(),
4272 SlotsBuffer::CODE_ENTRY_SLOT,
4273 slot,
4274 SlotsBuffer::FAIL_ON_OVERFLOW)) {
4275 EvictEvacuationCandidate(target_page);
4276 }
4277 }
4278}
4279
4280
verwaest@chromium.org33e09c82012-10-10 17:07:22 +00004281void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
4282 ASSERT(heap()->gc_state() == Heap::MARK_COMPACT);
4283 if (is_compacting()) {
svenpanne@chromium.org876cca82013-03-18 14:43:20 +00004284 Code* host = isolate()->inner_pointer_to_code_cache()->
verwaest@chromium.org33e09c82012-10-10 17:07:22 +00004285 GcSafeFindCodeForInnerPointer(pc);
4286 MarkBit mark_bit = Marking::MarkBitFrom(host);
4287 if (Marking::IsBlack(mark_bit)) {
4288 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4289 RecordRelocSlot(&rinfo, target);
4290 }
4291 }
4292}
4293
4294
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004295static inline SlotsBuffer::SlotType DecodeSlotType(
4296 SlotsBuffer::ObjectSlot slot) {
4297 return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
4298}
4299
4300
4301void SlotsBuffer::UpdateSlots(Heap* heap) {
4302 PointersUpdatingVisitor v(heap);
4303
4304 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4305 ObjectSlot slot = slots_[slot_idx];
4306 if (!IsTypedSlot(slot)) {
4307 PointersUpdatingVisitor::UpdateSlot(heap, slot);
4308 } else {
4309 ++slot_idx;
4310 ASSERT(slot_idx < idx_);
dslomov@chromium.orge97852d2013-09-12 09:02:59 +00004311 UpdateSlot(heap->isolate(),
4312 &v,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004313 DecodeSlotType(slot),
4314 reinterpret_cast<Address>(slots_[slot_idx]));
4315 }
4316 }
4317}
4318
4319
4320void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
4321 PointersUpdatingVisitor v(heap);
4322
4323 for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
4324 ObjectSlot slot = slots_[slot_idx];
4325 if (!IsTypedSlot(slot)) {
4326 if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
4327 PointersUpdatingVisitor::UpdateSlot(heap, slot);
4328 }
4329 } else {
4330 ++slot_idx;
4331 ASSERT(slot_idx < idx_);
4332 Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
4333 if (!IsOnInvalidatedCodeObject(pc)) {
dslomov@chromium.orge97852d2013-09-12 09:02:59 +00004334 UpdateSlot(heap->isolate(),
4335 &v,
erik.corry@gmail.comc3b670f2011-10-05 21:44:48 +00004336 DecodeSlotType(slot),
4337 reinterpret_cast<Address>(slots_[slot_idx]));
4338 }
4339 }
4340 }
4341}
4342
4343
4344SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
4345 return new SlotsBuffer(next_buffer);
4346}
4347
4348
4349void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
4350 delete buffer;
4351}
4352
4353
4354void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
4355 SlotsBuffer* buffer = *buffer_address;
4356 while (buffer != NULL) {
4357 SlotsBuffer* next_buffer = buffer->next();
4358 DeallocateBuffer(buffer);
4359 buffer = next_buffer;
4360 }
4361 *buffer_address = NULL;
ager@chromium.orgea4f62e2010-08-16 16:28:43 +00004362}
4363
4364
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00004365} } // namespace v8::internal