Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 1 | // Copyright 2012 the V8 project authors. All rights reserved. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2 | // Redistribution and use in source and binary forms, with or without |
| 3 | // modification, are permitted provided that the following conditions are |
| 4 | // met: |
| 5 | // |
| 6 | // * Redistributions of source code must retain the above copyright |
| 7 | // notice, this list of conditions and the following disclaimer. |
| 8 | // * Redistributions in binary form must reproduce the above |
| 9 | // copyright notice, this list of conditions and the following |
| 10 | // disclaimer in the documentation and/or other materials provided |
| 11 | // with the distribution. |
| 12 | // * Neither the name of Google Inc. nor the names of its |
| 13 | // contributors may be used to endorse or promote products derived |
| 14 | // from this software without specific prior written permission. |
| 15 | // |
| 16 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | |
| 28 | #include "v8.h" |
| 29 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 30 | #include "code-stubs.h" |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 31 | #include "compilation-cache.h" |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 32 | #include "deoptimizer.h" |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 33 | #include "execution.h" |
Ben Murdoch | b8e0da2 | 2011-05-16 14:20:40 +0100 | [diff] [blame] | 34 | #include "gdb-jit.h" |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 35 | #include "global-handles.h" |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 36 | #include "heap-profiler.h" |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 37 | #include "ic-inl.h" |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 38 | #include "incremental-marking.h" |
Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame] | 39 | #include "liveobjectlist-inl.h" |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 40 | #include "mark-compact.h" |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 41 | #include "objects-visiting.h" |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 42 | #include "objects-visiting-inl.h" |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 43 | #include "stub-cache.h" |
| 44 | |
| 45 | namespace v8 { |
| 46 | namespace internal { |
| 47 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 48 | |
| 49 | const char* Marking::kWhiteBitPattern = "00"; |
| 50 | const char* Marking::kBlackBitPattern = "10"; |
| 51 | const char* Marking::kGreyBitPattern = "11"; |
| 52 | const char* Marking::kImpossibleBitPattern = "01"; |
| 53 | |
| 54 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 55 | // ------------------------------------------------------------------------- |
| 56 | // MarkCompactCollector |
| 57 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 58 | MarkCompactCollector::MarkCompactCollector() : // NOLINT |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 59 | #ifdef DEBUG |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 60 | state_(IDLE), |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 61 | #endif |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 62 | sweep_precisely_(false), |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 63 | reduce_memory_footprint_(false), |
| 64 | abort_incremental_marking_(false), |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 65 | compacting_(false), |
| 66 | was_marked_incrementally_(false), |
| 67 | collect_maps_(FLAG_collect_maps), |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 68 | flush_monomorphic_ics_(false), |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 69 | tracer_(NULL), |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 70 | migration_slots_buffer_(NULL), |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 71 | heap_(NULL), |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 72 | code_flusher_(NULL), |
| 73 | encountered_weak_maps_(NULL) { } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 74 | |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 75 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 76 | #ifdef DEBUG |
| 77 | class VerifyMarkingVisitor: public ObjectVisitor { |
| 78 | public: |
| 79 | void VisitPointers(Object** start, Object** end) { |
| 80 | for (Object** current = start; current < end; current++) { |
| 81 | if ((*current)->IsHeapObject()) { |
| 82 | HeapObject* object = HeapObject::cast(*current); |
| 83 | ASSERT(HEAP->mark_compact_collector()->IsMarked(object)); |
| 84 | } |
| 85 | } |
| 86 | } |
| 87 | }; |
| 88 | |
| 89 | |
| 90 | static void VerifyMarking(Address bottom, Address top) { |
| 91 | VerifyMarkingVisitor visitor; |
| 92 | HeapObject* object; |
| 93 | Address next_object_must_be_here_or_later = bottom; |
| 94 | |
| 95 | for (Address current = bottom; |
| 96 | current < top; |
| 97 | current += kPointerSize) { |
| 98 | object = HeapObject::FromAddress(current); |
| 99 | if (MarkCompactCollector::IsMarked(object)) { |
| 100 | ASSERT(current >= next_object_must_be_here_or_later); |
| 101 | object->Iterate(&visitor); |
| 102 | next_object_must_be_here_or_later = current + object->Size(); |
| 103 | } |
| 104 | } |
| 105 | } |
| 106 | |
| 107 | |
| 108 | static void VerifyMarking(NewSpace* space) { |
| 109 | Address end = space->top(); |
| 110 | NewSpacePageIterator it(space->bottom(), end); |
| 111 | // The bottom position is at the start of its page. Allows us to use |
| 112 | // page->area_start() as start of range on all pages. |
| 113 | ASSERT_EQ(space->bottom(), |
| 114 | NewSpacePage::FromAddress(space->bottom())->area_start()); |
| 115 | while (it.has_next()) { |
| 116 | NewSpacePage* page = it.next(); |
| 117 | Address limit = it.has_next() ? page->area_end() : end; |
| 118 | ASSERT(limit == end || !page->Contains(end)); |
| 119 | VerifyMarking(page->area_start(), limit); |
| 120 | } |
| 121 | } |
| 122 | |
| 123 | |
| 124 | static void VerifyMarking(PagedSpace* space) { |
| 125 | PageIterator it(space); |
| 126 | |
| 127 | while (it.has_next()) { |
| 128 | Page* p = it.next(); |
| 129 | VerifyMarking(p->area_start(), p->area_end()); |
| 130 | } |
| 131 | } |
| 132 | |
| 133 | |
| 134 | static void VerifyMarking(Heap* heap) { |
| 135 | VerifyMarking(heap->old_pointer_space()); |
| 136 | VerifyMarking(heap->old_data_space()); |
| 137 | VerifyMarking(heap->code_space()); |
| 138 | VerifyMarking(heap->cell_space()); |
| 139 | VerifyMarking(heap->map_space()); |
| 140 | VerifyMarking(heap->new_space()); |
| 141 | |
| 142 | VerifyMarkingVisitor visitor; |
| 143 | |
| 144 | LargeObjectIterator it(heap->lo_space()); |
| 145 | for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 146 | if (MarkCompactCollector::IsMarked(obj)) { |
| 147 | obj->Iterate(&visitor); |
| 148 | } |
| 149 | } |
| 150 | |
| 151 | heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG); |
| 152 | } |
| 153 | |
| 154 | |
| 155 | class VerifyEvacuationVisitor: public ObjectVisitor { |
| 156 | public: |
| 157 | void VisitPointers(Object** start, Object** end) { |
| 158 | for (Object** current = start; current < end; current++) { |
| 159 | if ((*current)->IsHeapObject()) { |
| 160 | HeapObject* object = HeapObject::cast(*current); |
| 161 | CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object)); |
| 162 | } |
| 163 | } |
| 164 | } |
| 165 | }; |
| 166 | |
| 167 | |
| 168 | static void VerifyEvacuation(Address bottom, Address top) { |
| 169 | VerifyEvacuationVisitor visitor; |
| 170 | HeapObject* object; |
| 171 | Address next_object_must_be_here_or_later = bottom; |
| 172 | |
| 173 | for (Address current = bottom; |
| 174 | current < top; |
| 175 | current += kPointerSize) { |
| 176 | object = HeapObject::FromAddress(current); |
| 177 | if (MarkCompactCollector::IsMarked(object)) { |
| 178 | ASSERT(current >= next_object_must_be_here_or_later); |
| 179 | object->Iterate(&visitor); |
| 180 | next_object_must_be_here_or_later = current + object->Size(); |
| 181 | } |
| 182 | } |
| 183 | } |
| 184 | |
| 185 | |
| 186 | static void VerifyEvacuation(NewSpace* space) { |
| 187 | NewSpacePageIterator it(space->bottom(), space->top()); |
| 188 | VerifyEvacuationVisitor visitor; |
| 189 | |
| 190 | while (it.has_next()) { |
| 191 | NewSpacePage* page = it.next(); |
| 192 | Address current = page->area_start(); |
| 193 | Address limit = it.has_next() ? page->area_end() : space->top(); |
| 194 | ASSERT(limit == space->top() || !page->Contains(space->top())); |
| 195 | while (current < limit) { |
| 196 | HeapObject* object = HeapObject::FromAddress(current); |
| 197 | object->Iterate(&visitor); |
| 198 | current += object->Size(); |
| 199 | } |
| 200 | } |
| 201 | } |
| 202 | |
| 203 | |
| 204 | static void VerifyEvacuation(PagedSpace* space) { |
| 205 | PageIterator it(space); |
| 206 | |
| 207 | while (it.has_next()) { |
| 208 | Page* p = it.next(); |
| 209 | if (p->IsEvacuationCandidate()) continue; |
| 210 | VerifyEvacuation(p->area_start(), p->area_end()); |
| 211 | } |
| 212 | } |
| 213 | |
| 214 | |
| 215 | static void VerifyEvacuation(Heap* heap) { |
| 216 | VerifyEvacuation(heap->old_pointer_space()); |
| 217 | VerifyEvacuation(heap->old_data_space()); |
| 218 | VerifyEvacuation(heap->code_space()); |
| 219 | VerifyEvacuation(heap->cell_space()); |
| 220 | VerifyEvacuation(heap->map_space()); |
| 221 | VerifyEvacuation(heap->new_space()); |
| 222 | |
| 223 | VerifyEvacuationVisitor visitor; |
| 224 | heap->IterateStrongRoots(&visitor, VISIT_ALL); |
| 225 | } |
| 226 | #endif |
| 227 | |
| 228 | |
| 229 | void MarkCompactCollector::AddEvacuationCandidate(Page* p) { |
| 230 | p->MarkEvacuationCandidate(); |
| 231 | evacuation_candidates_.Add(p); |
| 232 | } |
| 233 | |
| 234 | |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 235 | static void TraceFragmentation(PagedSpace* space) { |
| 236 | int number_of_pages = space->CountTotalPages(); |
| 237 | intptr_t reserved = (number_of_pages * space->AreaSize()); |
| 238 | intptr_t free = reserved - space->SizeOfObjects(); |
| 239 | PrintF("[%s]: %d pages, %d (%.1f%%) free\n", |
| 240 | AllocationSpaceName(space->identity()), |
| 241 | number_of_pages, |
| 242 | static_cast<int>(free), |
| 243 | static_cast<double>(free) * 100 / reserved); |
| 244 | } |
| 245 | |
| 246 | |
| 247 | bool MarkCompactCollector::StartCompaction(CompactionMode mode) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 248 | if (!compacting_) { |
| 249 | ASSERT(evacuation_candidates_.length() == 0); |
| 250 | |
| 251 | CollectEvacuationCandidates(heap()->old_pointer_space()); |
| 252 | CollectEvacuationCandidates(heap()->old_data_space()); |
| 253 | |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 254 | if (FLAG_compact_code_space && mode == NON_INCREMENTAL_COMPACTION) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 255 | CollectEvacuationCandidates(heap()->code_space()); |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 256 | } else if (FLAG_trace_fragmentation) { |
| 257 | TraceFragmentation(heap()->code_space()); |
| 258 | } |
| 259 | |
| 260 | if (FLAG_trace_fragmentation) { |
| 261 | TraceFragmentation(heap()->map_space()); |
| 262 | TraceFragmentation(heap()->cell_space()); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 263 | } |
| 264 | |
| 265 | heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists(); |
| 266 | heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists(); |
| 267 | heap()->code_space()->EvictEvacuationCandidatesFromFreeLists(); |
| 268 | |
| 269 | compacting_ = evacuation_candidates_.length() > 0; |
| 270 | } |
| 271 | |
| 272 | return compacting_; |
| 273 | } |
| 274 | |
| 275 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 276 | void MarkCompactCollector::CollectGarbage() { |
| 277 | // Make sure that Prepare() has been called. The individual steps below will |
| 278 | // update the state as they proceed. |
| 279 | ASSERT(state_ == PREPARE_GC); |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 280 | ASSERT(encountered_weak_maps_ == Smi::FromInt(0)); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 281 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 282 | MarkLiveObjects(); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 283 | ASSERT(heap_->incremental_marking()->IsStopped()); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 284 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 285 | if (collect_maps_) ClearNonLiveTransitions(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 286 | |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 287 | ClearWeakMaps(); |
| 288 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 289 | #ifdef DEBUG |
| 290 | if (FLAG_verify_heap) { |
| 291 | VerifyMarking(heap_); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 292 | } |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 293 | #endif |
| 294 | |
| 295 | SweepSpaces(); |
| 296 | |
| 297 | if (!collect_maps_) ReattachInitialMaps(); |
| 298 | |
| 299 | heap_->isolate()->inner_pointer_to_code_cache()->Flush(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 300 | |
| 301 | Finish(); |
| 302 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 303 | tracer_ = NULL; |
| 304 | } |
| 305 | |
| 306 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 307 | #ifdef DEBUG |
| 308 | void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) { |
| 309 | PageIterator it(space); |
| 310 | |
| 311 | while (it.has_next()) { |
| 312 | Page* p = it.next(); |
| 313 | CHECK(p->markbits()->IsClean()); |
| 314 | CHECK_EQ(0, p->LiveBytes()); |
| 315 | } |
| 316 | } |
| 317 | |
| 318 | void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) { |
| 319 | NewSpacePageIterator it(space->bottom(), space->top()); |
| 320 | |
| 321 | while (it.has_next()) { |
| 322 | NewSpacePage* p = it.next(); |
| 323 | CHECK(p->markbits()->IsClean()); |
| 324 | CHECK_EQ(0, p->LiveBytes()); |
| 325 | } |
| 326 | } |
| 327 | |
| 328 | void MarkCompactCollector::VerifyMarkbitsAreClean() { |
| 329 | VerifyMarkbitsAreClean(heap_->old_pointer_space()); |
| 330 | VerifyMarkbitsAreClean(heap_->old_data_space()); |
| 331 | VerifyMarkbitsAreClean(heap_->code_space()); |
| 332 | VerifyMarkbitsAreClean(heap_->cell_space()); |
| 333 | VerifyMarkbitsAreClean(heap_->map_space()); |
| 334 | VerifyMarkbitsAreClean(heap_->new_space()); |
| 335 | |
| 336 | LargeObjectIterator it(heap_->lo_space()); |
| 337 | for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 338 | MarkBit mark_bit = Marking::MarkBitFrom(obj); |
| 339 | ASSERT(Marking::IsWhite(mark_bit)); |
| 340 | } |
| 341 | } |
| 342 | #endif |
| 343 | |
| 344 | |
| 345 | static void ClearMarkbitsInPagedSpace(PagedSpace* space) { |
| 346 | PageIterator it(space); |
| 347 | |
| 348 | while (it.has_next()) { |
| 349 | Bitmap::Clear(it.next()); |
| 350 | } |
| 351 | } |
| 352 | |
| 353 | |
| 354 | static void ClearMarkbitsInNewSpace(NewSpace* space) { |
| 355 | NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd()); |
| 356 | |
| 357 | while (it.has_next()) { |
| 358 | Bitmap::Clear(it.next()); |
| 359 | } |
| 360 | } |
| 361 | |
| 362 | |
| 363 | void MarkCompactCollector::ClearMarkbits() { |
| 364 | ClearMarkbitsInPagedSpace(heap_->code_space()); |
| 365 | ClearMarkbitsInPagedSpace(heap_->map_space()); |
| 366 | ClearMarkbitsInPagedSpace(heap_->old_pointer_space()); |
| 367 | ClearMarkbitsInPagedSpace(heap_->old_data_space()); |
| 368 | ClearMarkbitsInPagedSpace(heap_->cell_space()); |
| 369 | ClearMarkbitsInNewSpace(heap_->new_space()); |
| 370 | |
| 371 | LargeObjectIterator it(heap_->lo_space()); |
| 372 | for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 373 | MarkBit mark_bit = Marking::MarkBitFrom(obj); |
| 374 | mark_bit.Clear(); |
| 375 | mark_bit.Next().Clear(); |
| 376 | } |
| 377 | } |
| 378 | |
| 379 | |
| 380 | bool Marking::TransferMark(Address old_start, Address new_start) { |
| 381 | // This is only used when resizing an object. |
| 382 | ASSERT(MemoryChunk::FromAddress(old_start) == |
| 383 | MemoryChunk::FromAddress(new_start)); |
| 384 | |
| 385 | // If the mark doesn't move, we don't check the color of the object. |
| 386 | // It doesn't matter whether the object is black, since it hasn't changed |
| 387 | // size, so the adjustment to the live data count will be zero anyway. |
| 388 | if (old_start == new_start) return false; |
| 389 | |
| 390 | MarkBit new_mark_bit = MarkBitFrom(new_start); |
| 391 | MarkBit old_mark_bit = MarkBitFrom(old_start); |
| 392 | |
| 393 | #ifdef DEBUG |
| 394 | ObjectColor old_color = Color(old_mark_bit); |
| 395 | #endif |
| 396 | |
| 397 | if (Marking::IsBlack(old_mark_bit)) { |
| 398 | old_mark_bit.Clear(); |
| 399 | ASSERT(IsWhite(old_mark_bit)); |
| 400 | Marking::MarkBlack(new_mark_bit); |
| 401 | return true; |
| 402 | } else if (Marking::IsGrey(old_mark_bit)) { |
| 403 | ASSERT(heap_->incremental_marking()->IsMarking()); |
| 404 | old_mark_bit.Clear(); |
| 405 | old_mark_bit.Next().Clear(); |
| 406 | ASSERT(IsWhite(old_mark_bit)); |
| 407 | heap_->incremental_marking()->WhiteToGreyAndPush( |
| 408 | HeapObject::FromAddress(new_start), new_mark_bit); |
| 409 | heap_->incremental_marking()->RestartIfNotMarking(); |
| 410 | } |
| 411 | |
| 412 | #ifdef DEBUG |
| 413 | ObjectColor new_color = Color(new_mark_bit); |
| 414 | ASSERT(new_color == old_color); |
| 415 | #endif |
| 416 | |
| 417 | return false; |
| 418 | } |
| 419 | |
| 420 | |
| 421 | const char* AllocationSpaceName(AllocationSpace space) { |
| 422 | switch (space) { |
| 423 | case NEW_SPACE: return "NEW_SPACE"; |
| 424 | case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE"; |
| 425 | case OLD_DATA_SPACE: return "OLD_DATA_SPACE"; |
| 426 | case CODE_SPACE: return "CODE_SPACE"; |
| 427 | case MAP_SPACE: return "MAP_SPACE"; |
| 428 | case CELL_SPACE: return "CELL_SPACE"; |
| 429 | case LO_SPACE: return "LO_SPACE"; |
| 430 | default: |
| 431 | UNREACHABLE(); |
| 432 | } |
| 433 | |
| 434 | return NULL; |
| 435 | } |
| 436 | |
| 437 | |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 438 | // Returns zero for pages that have so little fragmentation that it is not |
| 439 | // worth defragmenting them. Otherwise a positive integer that gives an |
| 440 | // estimate of fragmentation on an arbitrary scale. |
| 441 | static int FreeListFragmentation(PagedSpace* space, Page* p) { |
| 442 | // If page was not swept then there are no free list items on it. |
| 443 | if (!p->WasSwept()) { |
| 444 | if (FLAG_trace_fragmentation) { |
| 445 | PrintF("%p [%s]: %d bytes live (unswept)\n", |
| 446 | reinterpret_cast<void*>(p), |
| 447 | AllocationSpaceName(space->identity()), |
| 448 | p->LiveBytes()); |
| 449 | } |
| 450 | return 0; |
| 451 | } |
| 452 | |
| 453 | FreeList::SizeStats sizes; |
| 454 | space->CountFreeListItems(p, &sizes); |
| 455 | |
| 456 | intptr_t ratio; |
| 457 | intptr_t ratio_threshold; |
| 458 | intptr_t area_size = space->AreaSize(); |
| 459 | if (space->identity() == CODE_SPACE) { |
| 460 | ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / |
| 461 | area_size; |
| 462 | ratio_threshold = 10; |
| 463 | } else { |
| 464 | ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / |
| 465 | area_size; |
| 466 | ratio_threshold = 15; |
| 467 | } |
| 468 | |
| 469 | if (FLAG_trace_fragmentation) { |
| 470 | PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n", |
| 471 | reinterpret_cast<void*>(p), |
| 472 | AllocationSpaceName(space->identity()), |
| 473 | static_cast<int>(sizes.small_size_), |
| 474 | static_cast<double>(sizes.small_size_ * 100) / |
| 475 | area_size, |
| 476 | static_cast<int>(sizes.medium_size_), |
| 477 | static_cast<double>(sizes.medium_size_ * 100) / |
| 478 | area_size, |
| 479 | static_cast<int>(sizes.large_size_), |
| 480 | static_cast<double>(sizes.large_size_ * 100) / |
| 481 | area_size, |
| 482 | static_cast<int>(sizes.huge_size_), |
| 483 | static_cast<double>(sizes.huge_size_ * 100) / |
| 484 | area_size, |
| 485 | (ratio > ratio_threshold) ? "[fragmented]" : ""); |
| 486 | } |
| 487 | |
| 488 | if (FLAG_always_compact && sizes.Total() != area_size) { |
| 489 | return 1; |
| 490 | } |
| 491 | |
| 492 | if (ratio <= ratio_threshold) return 0; // Not fragmented. |
| 493 | |
| 494 | return static_cast<int>(ratio - ratio_threshold); |
| 495 | } |
| 496 | |
| 497 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 498 | void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { |
| 499 | ASSERT(space->identity() == OLD_POINTER_SPACE || |
| 500 | space->identity() == OLD_DATA_SPACE || |
| 501 | space->identity() == CODE_SPACE); |
| 502 | |
| 503 | int number_of_pages = space->CountTotalPages(); |
| 504 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 505 | const int kMaxMaxEvacuationCandidates = 1000; |
| 506 | int max_evacuation_candidates = Min( |
| 507 | kMaxMaxEvacuationCandidates, |
| 508 | static_cast<int>(sqrt(static_cast<double>(number_of_pages / 2)) + 1)); |
| 509 | |
| 510 | if (FLAG_stress_compaction || FLAG_always_compact) { |
| 511 | max_evacuation_candidates = kMaxMaxEvacuationCandidates; |
| 512 | } |
| 513 | |
| 514 | class Candidate { |
| 515 | public: |
| 516 | Candidate() : fragmentation_(0), page_(NULL) { } |
| 517 | Candidate(int f, Page* p) : fragmentation_(f), page_(p) { } |
| 518 | |
| 519 | int fragmentation() { return fragmentation_; } |
| 520 | Page* page() { return page_; } |
| 521 | |
| 522 | private: |
| 523 | int fragmentation_; |
| 524 | Page* page_; |
| 525 | }; |
| 526 | |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 527 | enum CompactionMode { |
| 528 | COMPACT_FREE_LISTS, |
| 529 | REDUCE_MEMORY_FOOTPRINT |
| 530 | }; |
| 531 | |
| 532 | CompactionMode mode = COMPACT_FREE_LISTS; |
| 533 | |
| 534 | intptr_t reserved = number_of_pages * space->AreaSize(); |
| 535 | intptr_t over_reserved = reserved - space->SizeOfObjects(); |
| 536 | static const intptr_t kFreenessThreshold = 50; |
| 537 | |
| 538 | if (over_reserved >= 2 * space->AreaSize() && |
| 539 | reduce_memory_footprint_) { |
| 540 | mode = REDUCE_MEMORY_FOOTPRINT; |
| 541 | |
| 542 | // We expect that empty pages are easier to compact so slightly bump the |
| 543 | // limit. |
| 544 | max_evacuation_candidates += 2; |
| 545 | |
| 546 | if (FLAG_trace_fragmentation) { |
| 547 | PrintF("Estimated over reserved memory: %.1f MB (setting threshold %d)\n", |
| 548 | static_cast<double>(over_reserved) / MB, |
| 549 | static_cast<int>(kFreenessThreshold)); |
| 550 | } |
| 551 | } |
| 552 | |
| 553 | intptr_t estimated_release = 0; |
| 554 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 555 | Candidate candidates[kMaxMaxEvacuationCandidates]; |
| 556 | |
| 557 | int count = 0; |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 558 | int fragmentation = 0; |
| 559 | Candidate* least = NULL; |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 560 | |
| 561 | PageIterator it(space); |
| 562 | if (it.has_next()) it.next(); // Never compact the first page. |
| 563 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 564 | while (it.has_next()) { |
| 565 | Page* p = it.next(); |
| 566 | p->ClearEvacuationCandidate(); |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 567 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 568 | if (FLAG_stress_compaction) { |
| 569 | int counter = space->heap()->ms_count(); |
| 570 | uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits; |
| 571 | if ((counter & 1) == (page_number & 1)) fragmentation = 1; |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 572 | } else if (mode == REDUCE_MEMORY_FOOTPRINT) { |
| 573 | // Don't try to release too many pages. |
| 574 | if (estimated_release >= ((over_reserved * 3) / 4)) { |
| 575 | continue; |
| 576 | } |
| 577 | |
| 578 | intptr_t free_bytes = 0; |
| 579 | |
| 580 | if (!p->WasSwept()) { |
| 581 | free_bytes = (p->area_size() - p->LiveBytes()); |
| 582 | } else { |
| 583 | FreeList::SizeStats sizes; |
| 584 | space->CountFreeListItems(p, &sizes); |
| 585 | free_bytes = sizes.Total(); |
| 586 | } |
| 587 | |
| 588 | int free_pct = static_cast<int>(free_bytes * 100) / p->area_size(); |
| 589 | |
| 590 | if (free_pct >= kFreenessThreshold) { |
| 591 | estimated_release += 2 * p->area_size() - free_bytes; |
| 592 | fragmentation = free_pct; |
| 593 | } else { |
| 594 | fragmentation = 0; |
| 595 | } |
| 596 | |
| 597 | if (FLAG_trace_fragmentation) { |
| 598 | PrintF("%p [%s]: %d (%.2f%%) free %s\n", |
| 599 | reinterpret_cast<void*>(p), |
| 600 | AllocationSpaceName(space->identity()), |
| 601 | static_cast<int>(free_bytes), |
| 602 | static_cast<double>(free_bytes * 100) / p->area_size(), |
| 603 | (fragmentation > 0) ? "[fragmented]" : ""); |
| 604 | } |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 605 | } else { |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 606 | fragmentation = FreeListFragmentation(space, p); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 607 | } |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 608 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 609 | if (fragmentation != 0) { |
| 610 | if (count < max_evacuation_candidates) { |
| 611 | candidates[count++] = Candidate(fragmentation, p); |
| 612 | } else { |
| 613 | if (least == NULL) { |
| 614 | for (int i = 0; i < max_evacuation_candidates; i++) { |
| 615 | if (least == NULL || |
| 616 | candidates[i].fragmentation() < least->fragmentation()) { |
| 617 | least = candidates + i; |
| 618 | } |
| 619 | } |
| 620 | } |
| 621 | if (least->fragmentation() < fragmentation) { |
| 622 | *least = Candidate(fragmentation, p); |
| 623 | least = NULL; |
| 624 | } |
| 625 | } |
| 626 | } |
| 627 | } |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 628 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 629 | for (int i = 0; i < count; i++) { |
| 630 | AddEvacuationCandidate(candidates[i].page()); |
| 631 | } |
| 632 | |
| 633 | if (count > 0 && FLAG_trace_fragmentation) { |
| 634 | PrintF("Collected %d evacuation candidates for space %s\n", |
| 635 | count, |
| 636 | AllocationSpaceName(space->identity())); |
| 637 | } |
| 638 | } |
| 639 | |
| 640 | |
| 641 | void MarkCompactCollector::AbortCompaction() { |
| 642 | if (compacting_) { |
| 643 | int npages = evacuation_candidates_.length(); |
| 644 | for (int i = 0; i < npages; i++) { |
| 645 | Page* p = evacuation_candidates_[i]; |
| 646 | slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); |
| 647 | p->ClearEvacuationCandidate(); |
| 648 | p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); |
| 649 | } |
| 650 | compacting_ = false; |
| 651 | evacuation_candidates_.Rewind(0); |
| 652 | invalidated_code_.Rewind(0); |
| 653 | } |
| 654 | ASSERT_EQ(0, evacuation_candidates_.length()); |
| 655 | } |
| 656 | |
| 657 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 658 | void MarkCompactCollector::Prepare(GCTracer* tracer) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 659 | was_marked_incrementally_ = heap()->incremental_marking()->IsMarking(); |
| 660 | |
| 661 | // Disable collection of maps if incremental marking is enabled. |
| 662 | // Map collection algorithm relies on a special map transition tree traversal |
| 663 | // order which is not implemented for incremental marking. |
| 664 | collect_maps_ = FLAG_collect_maps && !was_marked_incrementally_; |
| 665 | |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 666 | // Monomorphic ICs are preserved when possible, but need to be flushed |
| 667 | // when they might be keeping a Context alive, or when the heap is about |
| 668 | // to be serialized. |
| 669 | flush_monomorphic_ics_ = |
| 670 | heap()->isolate()->context_exit_happened() || Serializer::enabled(); |
| 671 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 672 | // Rather than passing the tracer around we stash it in a static member |
| 673 | // variable. |
| 674 | tracer_ = tracer; |
| 675 | |
| 676 | #ifdef DEBUG |
| 677 | ASSERT(state_ == IDLE); |
| 678 | state_ = PREPARE_GC; |
| 679 | #endif |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 680 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 681 | ASSERT(!FLAG_never_compact || !FLAG_always_compact); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 682 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 683 | if (collect_maps_) CreateBackPointers(); |
Ben Murdoch | b8e0da2 | 2011-05-16 14:20:40 +0100 | [diff] [blame] | 684 | #ifdef ENABLE_GDB_JIT_INTERFACE |
| 685 | if (FLAG_gdbjit) { |
| 686 | // If GDBJIT interface is active disable compaction. |
| 687 | compacting_collection_ = false; |
| 688 | } |
| 689 | #endif |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 690 | |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 691 | // Clear marking bits if incremental marking is aborted. |
| 692 | if (was_marked_incrementally_ && abort_incremental_marking_) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 693 | heap()->incremental_marking()->Abort(); |
| 694 | ClearMarkbits(); |
| 695 | AbortCompaction(); |
| 696 | was_marked_incrementally_ = false; |
| 697 | } |
| 698 | |
| 699 | // Don't start compaction if we are in the middle of incremental |
| 700 | // marking cycle. We did not collect any slots. |
| 701 | if (!FLAG_never_compact && !was_marked_incrementally_) { |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 702 | StartCompaction(NON_INCREMENTAL_COMPACTION); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 703 | } |
| 704 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 705 | PagedSpaces spaces; |
Leon Clarke | d91b9f7 | 2010-01-27 17:25:45 +0000 | [diff] [blame] | 706 | for (PagedSpace* space = spaces.next(); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 707 | space != NULL; |
| 708 | space = spaces.next()) { |
| 709 | space->PrepareForMarkCompact(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 710 | } |
| 711 | |
| 712 | #ifdef DEBUG |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 713 | if (!was_marked_incrementally_ && FLAG_verify_heap) { |
| 714 | VerifyMarkbitsAreClean(); |
| 715 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 716 | #endif |
| 717 | } |
| 718 | |
| 719 | |
| 720 | void MarkCompactCollector::Finish() { |
| 721 | #ifdef DEBUG |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 722 | ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 723 | state_ = IDLE; |
| 724 | #endif |
| 725 | // The stub cache is not traversed during GC; clear the cache to |
| 726 | // force lazy re-initialization of it. This must be done after the |
| 727 | // GC, because it relies on the new address of certain old space |
| 728 | // objects (empty string, illegal builtin). |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 729 | heap()->isolate()->stub_cache()->Clear(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 730 | |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 731 | heap()->external_string_table_.CleanUp(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 732 | } |
| 733 | |
| 734 | |
| 735 | // ------------------------------------------------------------------------- |
| 736 | // Phase 1: tracing and marking live objects. |
| 737 | // before: all objects are in normal state. |
| 738 | // after: a live object's map pointer is marked as '00'. |
| 739 | |
| 740 | // Marking all live objects in the heap as part of mark-sweep or mark-compact |
| 741 | // collection. Before marking, all objects are in their normal state. After |
| 742 | // marking, live objects' map pointers are marked indicating that the object |
| 743 | // has been found reachable. |
| 744 | // |
| 745 | // The marking algorithm is a (mostly) depth-first (because of possible stack |
| 746 | // overflow) traversal of the graph of objects reachable from the roots. It |
| 747 | // uses an explicit stack of pointers rather than recursion. The young |
| 748 | // generation's inactive ('from') space is used as a marking stack. The |
| 749 | // objects in the marking stack are the ones that have been reached and marked |
| 750 | // but their children have not yet been visited. |
| 751 | // |
| 752 | // The marking stack can overflow during traversal. In that case, we set an |
| 753 | // overflow flag. When the overflow flag is set, we continue marking objects |
| 754 | // reachable from the objects on the marking stack, but no longer push them on |
| 755 | // the marking stack. Instead, we mark them as both marked and overflowed. |
| 756 | // When the stack is in the overflowed state, objects marked as overflowed |
| 757 | // have been reached and marked but their children have not been visited yet. |
| 758 | // After emptying the marking stack, we clear the overflow flag and traverse |
| 759 | // the heap looking for objects marked as overflowed, push them on the stack, |
| 760 | // and continue with marking. This process repeats until all reachable |
| 761 | // objects have been marked. |
| 762 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 763 | class CodeFlusher { |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 764 | public: |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 765 | explicit CodeFlusher(Isolate* isolate) |
| 766 | : isolate_(isolate), |
| 767 | jsfunction_candidates_head_(NULL), |
| 768 | shared_function_info_candidates_head_(NULL) {} |
| 769 | |
| 770 | void AddCandidate(SharedFunctionInfo* shared_info) { |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 771 | SetNextCandidate(shared_info, shared_function_info_candidates_head_); |
| 772 | shared_function_info_candidates_head_ = shared_info; |
| 773 | } |
| 774 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 775 | void AddCandidate(JSFunction* function) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 776 | ASSERT(function->code() == function->shared()->code()); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 777 | |
| 778 | SetNextCandidate(function, jsfunction_candidates_head_); |
| 779 | jsfunction_candidates_head_ = function; |
| 780 | } |
| 781 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 782 | void ProcessCandidates() { |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 783 | ProcessSharedFunctionInfoCandidates(); |
| 784 | ProcessJSFunctionCandidates(); |
| 785 | } |
| 786 | |
| 787 | private: |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 788 | void ProcessJSFunctionCandidates() { |
| 789 | Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 790 | |
| 791 | JSFunction* candidate = jsfunction_candidates_head_; |
| 792 | JSFunction* next_candidate; |
| 793 | while (candidate != NULL) { |
| 794 | next_candidate = GetNextCandidate(candidate); |
| 795 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 796 | SharedFunctionInfo* shared = candidate->shared(); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 797 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 798 | Code* code = shared->code(); |
| 799 | MarkBit code_mark = Marking::MarkBitFrom(code); |
| 800 | if (!code_mark.Get()) { |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 801 | shared->set_code(lazy_compile); |
| 802 | candidate->set_code(lazy_compile); |
| 803 | } else { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 804 | candidate->set_code(shared->code()); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 805 | } |
| 806 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 807 | // We are in the middle of a GC cycle so the write barrier in the code |
| 808 | // setter did not record the slot update and we have to do that manually. |
| 809 | Address slot = candidate->address() + JSFunction::kCodeEntryOffset; |
| 810 | Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot)); |
| 811 | isolate_->heap()->mark_compact_collector()-> |
| 812 | RecordCodeEntrySlot(slot, target); |
| 813 | |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 814 | RecordSharedFunctionInfoCodeSlot(shared); |
| 815 | |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 816 | candidate = next_candidate; |
| 817 | } |
| 818 | |
| 819 | jsfunction_candidates_head_ = NULL; |
| 820 | } |
| 821 | |
| 822 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 823 | void ProcessSharedFunctionInfoCandidates() { |
| 824 | Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kLazyCompile); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 825 | |
| 826 | SharedFunctionInfo* candidate = shared_function_info_candidates_head_; |
| 827 | SharedFunctionInfo* next_candidate; |
| 828 | while (candidate != NULL) { |
| 829 | next_candidate = GetNextCandidate(candidate); |
| 830 | SetNextCandidate(candidate, NULL); |
| 831 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 832 | Code* code = candidate->code(); |
| 833 | MarkBit code_mark = Marking::MarkBitFrom(code); |
| 834 | if (!code_mark.Get()) { |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 835 | candidate->set_code(lazy_compile); |
| 836 | } |
| 837 | |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 838 | RecordSharedFunctionInfoCodeSlot(candidate); |
| 839 | |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 840 | candidate = next_candidate; |
| 841 | } |
| 842 | |
| 843 | shared_function_info_candidates_head_ = NULL; |
| 844 | } |
| 845 | |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 846 | void RecordSharedFunctionInfoCodeSlot(SharedFunctionInfo* shared) { |
| 847 | Object** slot = HeapObject::RawField(shared, |
| 848 | SharedFunctionInfo::kCodeOffset); |
| 849 | isolate_->heap()->mark_compact_collector()-> |
| 850 | RecordSlot(slot, slot, HeapObject::cast(*slot)); |
| 851 | } |
| 852 | |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 853 | static JSFunction** GetNextCandidateField(JSFunction* candidate) { |
| 854 | return reinterpret_cast<JSFunction**>( |
| 855 | candidate->address() + JSFunction::kCodeEntryOffset); |
| 856 | } |
| 857 | |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 858 | static JSFunction* GetNextCandidate(JSFunction* candidate) { |
| 859 | return *GetNextCandidateField(candidate); |
| 860 | } |
| 861 | |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 862 | static void SetNextCandidate(JSFunction* candidate, |
| 863 | JSFunction* next_candidate) { |
| 864 | *GetNextCandidateField(candidate) = next_candidate; |
| 865 | } |
| 866 | |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 867 | static SharedFunctionInfo** GetNextCandidateField( |
| 868 | SharedFunctionInfo* candidate) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 869 | Code* code = candidate->code(); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 870 | return reinterpret_cast<SharedFunctionInfo**>( |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 871 | code->address() + Code::kGCMetadataOffset); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 872 | } |
| 873 | |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 874 | static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) { |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 875 | return reinterpret_cast<SharedFunctionInfo*>( |
| 876 | candidate->code()->gc_metadata()); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 877 | } |
| 878 | |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 879 | static void SetNextCandidate(SharedFunctionInfo* candidate, |
| 880 | SharedFunctionInfo* next_candidate) { |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 881 | candidate->code()->set_gc_metadata(next_candidate); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 882 | } |
| 883 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 884 | Isolate* isolate_; |
| 885 | JSFunction* jsfunction_candidates_head_; |
| 886 | SharedFunctionInfo* shared_function_info_candidates_head_; |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 887 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 888 | DISALLOW_COPY_AND_ASSIGN(CodeFlusher); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 889 | }; |
| 890 | |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 891 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 892 | MarkCompactCollector::~MarkCompactCollector() { |
| 893 | if (code_flusher_ != NULL) { |
| 894 | delete code_flusher_; |
| 895 | code_flusher_ = NULL; |
| 896 | } |
| 897 | } |
| 898 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 899 | |
| 900 | static inline HeapObject* ShortCircuitConsString(Object** p) { |
| 901 | // Optimization: If the heap object pointed to by p is a non-symbol |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 902 | // cons string whose right substring is HEAP->empty_string, update |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 903 | // it in place to its left substring. Return the updated value. |
| 904 | // |
| 905 | // Here we assume that if we change *p, we replace it with a heap object |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 906 | // (i.e., the left substring of a cons string is always a heap object). |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 907 | // |
| 908 | // The check performed is: |
| 909 | // object->IsConsString() && !object->IsSymbol() && |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 910 | // (ConsString::cast(object)->second() == HEAP->empty_string()) |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 911 | // except the maps for the object and its possible substrings might be |
| 912 | // marked. |
| 913 | HeapObject* object = HeapObject::cast(*p); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 914 | if (!FLAG_clever_optimizations) return object; |
| 915 | Map* map = object->map(); |
| 916 | InstanceType type = map->instance_type(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 917 | if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object; |
| 918 | |
| 919 | Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second(); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 920 | Heap* heap = map->GetHeap(); |
| 921 | if (second != heap->empty_string()) { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 922 | return object; |
| 923 | } |
| 924 | |
| 925 | // Since we don't have the object's start, it is impossible to update the |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 926 | // page dirty marks. Therefore, we only replace the string with its left |
| 927 | // substring when page dirty marks do not change. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 928 | Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first(); |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 929 | if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 930 | |
| 931 | *p = first; |
| 932 | return HeapObject::cast(first); |
| 933 | } |
| 934 | |
| 935 | |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 936 | class StaticMarkingVisitor : public StaticVisitorBase { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 937 | public: |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 938 | static inline void IterateBody(Map* map, HeapObject* obj) { |
| 939 | table_.GetVisitor(map)(map, obj); |
| 940 | } |
| 941 | |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 942 | static void Initialize() { |
| 943 | table_.Register(kVisitShortcutCandidate, |
| 944 | &FixedBodyVisitor<StaticMarkingVisitor, |
| 945 | ConsString::BodyDescriptor, |
| 946 | void>::Visit); |
| 947 | |
| 948 | table_.Register(kVisitConsString, |
| 949 | &FixedBodyVisitor<StaticMarkingVisitor, |
| 950 | ConsString::BodyDescriptor, |
| 951 | void>::Visit); |
| 952 | |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 953 | table_.Register(kVisitSlicedString, |
| 954 | &FixedBodyVisitor<StaticMarkingVisitor, |
| 955 | SlicedString::BodyDescriptor, |
| 956 | void>::Visit); |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 957 | |
| 958 | table_.Register(kVisitFixedArray, |
| 959 | &FlexibleBodyVisitor<StaticMarkingVisitor, |
| 960 | FixedArray::BodyDescriptor, |
| 961 | void>::Visit); |
| 962 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 963 | table_.Register(kVisitGlobalContext, &VisitGlobalContext); |
| 964 | |
Ben Murdoch | 3fb3ca8 | 2011-12-02 17:19:32 +0000 | [diff] [blame] | 965 | table_.Register(kVisitFixedDoubleArray, DataObjectVisitor::Visit); |
| 966 | |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 967 | table_.Register(kVisitByteArray, &DataObjectVisitor::Visit); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 968 | table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit); |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 969 | table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit); |
| 970 | table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit); |
| 971 | |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 972 | table_.Register(kVisitJSWeakMap, &VisitJSWeakMap); |
| 973 | |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 974 | table_.Register(kVisitOddball, |
| 975 | &FixedBodyVisitor<StaticMarkingVisitor, |
| 976 | Oddball::BodyDescriptor, |
| 977 | void>::Visit); |
| 978 | table_.Register(kVisitMap, |
| 979 | &FixedBodyVisitor<StaticMarkingVisitor, |
| 980 | Map::BodyDescriptor, |
| 981 | void>::Visit); |
| 982 | |
| 983 | table_.Register(kVisitCode, &VisitCode); |
| 984 | |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 985 | table_.Register(kVisitSharedFunctionInfo, |
| 986 | &VisitSharedFunctionInfoAndFlushCode); |
| 987 | |
| 988 | table_.Register(kVisitJSFunction, |
| 989 | &VisitJSFunctionAndFlushCode); |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 990 | |
Ben Murdoch | 257744e | 2011-11-30 15:57:28 +0000 | [diff] [blame] | 991 | table_.Register(kVisitJSRegExp, |
| 992 | &VisitRegExpAndFlushCode); |
| 993 | |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 994 | table_.Register(kVisitPropertyCell, |
| 995 | &FixedBodyVisitor<StaticMarkingVisitor, |
| 996 | JSGlobalPropertyCell::BodyDescriptor, |
| 997 | void>::Visit); |
| 998 | |
| 999 | table_.RegisterSpecializations<DataObjectVisitor, |
| 1000 | kVisitDataObject, |
| 1001 | kVisitDataObjectGeneric>(); |
| 1002 | |
| 1003 | table_.RegisterSpecializations<JSObjectVisitor, |
| 1004 | kVisitJSObject, |
| 1005 | kVisitJSObjectGeneric>(); |
| 1006 | |
| 1007 | table_.RegisterSpecializations<StructObjectVisitor, |
| 1008 | kVisitStruct, |
| 1009 | kVisitStructGeneric>(); |
| 1010 | } |
| 1011 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1012 | INLINE(static void VisitPointer(Heap* heap, Object** p)) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1013 | MarkObjectByPointer(heap->mark_compact_collector(), p, p); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1014 | } |
| 1015 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1016 | INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1017 | // Mark all objects pointed to in [start, end). |
| 1018 | const int kMinRangeForMarkingRecursion = 64; |
| 1019 | if (end - start >= kMinRangeForMarkingRecursion) { |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1020 | if (VisitUnmarkedObjects(heap, start, end)) return; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1021 | // We are close to a stack overflow, so just mark the objects. |
| 1022 | } |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1023 | MarkCompactCollector* collector = heap->mark_compact_collector(); |
| 1024 | for (Object** p = start; p < end; p++) { |
| 1025 | MarkObjectByPointer(collector, start, p); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1026 | } |
| 1027 | } |
| 1028 | |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 1029 | static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo) { |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1030 | ASSERT(rinfo->rmode() == RelocInfo::GLOBAL_PROPERTY_CELL); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1031 | JSGlobalPropertyCell* cell = |
| 1032 | JSGlobalPropertyCell::cast(rinfo->target_cell()); |
| 1033 | MarkBit mark = Marking::MarkBitFrom(cell); |
| 1034 | heap->mark_compact_collector()->MarkObject(cell, mark); |
| 1035 | } |
| 1036 | |
| 1037 | static inline void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo) { |
| 1038 | ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); |
| 1039 | // TODO(mstarzinger): We do not short-circuit cons strings here, verify |
| 1040 | // that there can be no such embedded pointers and add assertion here. |
| 1041 | HeapObject* object = HeapObject::cast(rinfo->target_object()); |
| 1042 | heap->mark_compact_collector()->RecordRelocSlot(rinfo, object); |
| 1043 | MarkBit mark = Marking::MarkBitFrom(object); |
| 1044 | heap->mark_compact_collector()->MarkObject(object, mark); |
| 1045 | } |
| 1046 | |
| 1047 | static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo) { |
| 1048 | ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); |
| 1049 | Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 1050 | if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub() |
| 1051 | && (target->ic_state() == MEGAMORPHIC || |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 1052 | heap->mark_compact_collector()->flush_monomorphic_ics_ || |
| 1053 | target->ic_age() != heap->global_ic_age())) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1054 | IC::Clear(rinfo->pc()); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1055 | target = Code::GetCodeFromTargetAddress(rinfo->target_address()); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1056 | } |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 1057 | MarkBit code_mark = Marking::MarkBitFrom(target); |
| 1058 | heap->mark_compact_collector()->MarkObject(target, code_mark); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1059 | heap->mark_compact_collector()->RecordRelocSlot(rinfo, target); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1060 | } |
| 1061 | |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 1062 | static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo) { |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 1063 | ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && |
| 1064 | rinfo->IsPatchedReturnSequence()) || |
| 1065 | (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && |
| 1066 | rinfo->IsPatchedDebugBreakSlotSequence())); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1067 | Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address()); |
| 1068 | MarkBit code_mark = Marking::MarkBitFrom(target); |
| 1069 | heap->mark_compact_collector()->MarkObject(target, code_mark); |
| 1070 | heap->mark_compact_collector()->RecordRelocSlot(rinfo, target); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1071 | } |
| 1072 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1073 | // Mark object pointed to by p. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1074 | INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector, |
| 1075 | Object** anchor_slot, |
| 1076 | Object** p)) { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1077 | if (!(*p)->IsHeapObject()) return; |
| 1078 | HeapObject* object = ShortCircuitConsString(p); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1079 | collector->RecordSlot(anchor_slot, p, object); |
| 1080 | MarkBit mark = Marking::MarkBitFrom(object); |
| 1081 | collector->MarkObject(object, mark); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1082 | } |
| 1083 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1084 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1085 | // Visit an unmarked object. |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 1086 | INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector, |
| 1087 | HeapObject* obj)) { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1088 | #ifdef DEBUG |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 1089 | ASSERT(Isolate::Current()->heap()->Contains(obj)); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1090 | ASSERT(!HEAP->mark_compact_collector()->IsMarked(obj)); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1091 | #endif |
| 1092 | Map* map = obj->map(); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1093 | Heap* heap = obj->GetHeap(); |
| 1094 | MarkBit mark = Marking::MarkBitFrom(obj); |
| 1095 | heap->mark_compact_collector()->SetMark(obj, mark); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1096 | // Mark the map pointer and the body. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1097 | MarkBit map_mark = Marking::MarkBitFrom(map); |
| 1098 | heap->mark_compact_collector()->MarkObject(map, map_mark); |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1099 | IterateBody(map, obj); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1100 | } |
| 1101 | |
| 1102 | // Visit all unmarked objects pointed to by [start, end). |
| 1103 | // Returns false if the operation fails (lack of stack space). |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1104 | static inline bool VisitUnmarkedObjects(Heap* heap, |
| 1105 | Object** start, |
| 1106 | Object** end) { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1107 | // Return false is we are close to the stack limit. |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1108 | StackLimitCheck check(heap->isolate()); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1109 | if (check.HasOverflowed()) return false; |
| 1110 | |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 1111 | MarkCompactCollector* collector = heap->mark_compact_collector(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1112 | // Visit the unmarked objects. |
| 1113 | for (Object** p = start; p < end; p++) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1114 | Object* o = *p; |
| 1115 | if (!o->IsHeapObject()) continue; |
| 1116 | collector->RecordSlot(start, p, o); |
| 1117 | HeapObject* obj = HeapObject::cast(o); |
| 1118 | MarkBit mark = Marking::MarkBitFrom(obj); |
| 1119 | if (mark.Get()) continue; |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 1120 | VisitUnmarkedObject(collector, obj); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1121 | } |
| 1122 | return true; |
| 1123 | } |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1124 | |
| 1125 | static inline void VisitExternalReference(Address* p) { } |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1126 | static inline void VisitExternalReference(RelocInfo* rinfo) { } |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1127 | static inline void VisitRuntimeEntry(RelocInfo* rinfo) { } |
| 1128 | |
| 1129 | private: |
| 1130 | class DataObjectVisitor { |
| 1131 | public: |
| 1132 | template<int size> |
| 1133 | static void VisitSpecialized(Map* map, HeapObject* object) { |
| 1134 | } |
| 1135 | |
| 1136 | static void Visit(Map* map, HeapObject* object) { |
| 1137 | } |
| 1138 | }; |
| 1139 | |
| 1140 | typedef FlexibleBodyVisitor<StaticMarkingVisitor, |
| 1141 | JSObject::BodyDescriptor, |
| 1142 | void> JSObjectVisitor; |
| 1143 | |
| 1144 | typedef FlexibleBodyVisitor<StaticMarkingVisitor, |
| 1145 | StructBodyDescriptor, |
| 1146 | void> StructObjectVisitor; |
| 1147 | |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 1148 | static void VisitJSWeakMap(Map* map, HeapObject* object) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1149 | MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector(); |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 1150 | JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(object); |
| 1151 | |
| 1152 | // Enqueue weak map in linked list of encountered weak maps. |
| 1153 | ASSERT(weak_map->next() == Smi::FromInt(0)); |
| 1154 | weak_map->set_next(collector->encountered_weak_maps()); |
| 1155 | collector->set_encountered_weak_maps(weak_map); |
| 1156 | |
| 1157 | // Skip visiting the backing hash table containing the mappings. |
| 1158 | int object_size = JSWeakMap::BodyDescriptor::SizeOf(map, object); |
| 1159 | BodyVisitorBase<StaticMarkingVisitor>::IteratePointers( |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1160 | map->GetHeap(), |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 1161 | object, |
| 1162 | JSWeakMap::BodyDescriptor::kStartOffset, |
| 1163 | JSWeakMap::kTableOffset); |
| 1164 | BodyVisitorBase<StaticMarkingVisitor>::IteratePointers( |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1165 | map->GetHeap(), |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 1166 | object, |
| 1167 | JSWeakMap::kTableOffset + kPointerSize, |
| 1168 | object_size); |
| 1169 | |
| 1170 | // Mark the backing hash table without pushing it on the marking stack. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1171 | ObjectHashTable* table = ObjectHashTable::cast(weak_map->table()); |
| 1172 | ASSERT(!MarkCompactCollector::IsMarked(table)); |
| 1173 | collector->SetMark(table, Marking::MarkBitFrom(table)); |
| 1174 | collector->MarkObject(table->map(), Marking::MarkBitFrom(table->map())); |
| 1175 | ASSERT(MarkCompactCollector::IsMarked(table->map())); |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 1176 | } |
| 1177 | |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1178 | static void VisitCode(Map* map, HeapObject* object) { |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 1179 | Heap* heap = map->GetHeap(); |
| 1180 | Code* code = reinterpret_cast<Code*>(object); |
| 1181 | if (FLAG_cleanup_code_caches_at_gc) { |
| 1182 | Object* raw_info = code->type_feedback_info(); |
| 1183 | if (raw_info->IsTypeFeedbackInfo()) { |
| 1184 | TypeFeedbackCells* type_feedback_cells = |
| 1185 | TypeFeedbackInfo::cast(raw_info)->type_feedback_cells(); |
| 1186 | for (int i = 0; i < type_feedback_cells->CellCount(); i++) { |
| 1187 | ASSERT(type_feedback_cells->AstId(i)->IsSmi()); |
| 1188 | JSGlobalPropertyCell* cell = type_feedback_cells->Cell(i); |
| 1189 | cell->set_value(TypeFeedbackCells::RawUninitializedSentinel(heap)); |
| 1190 | } |
| 1191 | } |
| 1192 | } |
| 1193 | code->CodeIterateBody<StaticMarkingVisitor>(heap); |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1194 | } |
| 1195 | |
| 1196 | // Code flushing support. |
| 1197 | |
| 1198 | // How many collections newly compiled code object will survive before being |
| 1199 | // flushed. |
| 1200 | static const int kCodeAgeThreshold = 5; |
| 1201 | |
Ben Murdoch | 257744e | 2011-11-30 15:57:28 +0000 | [diff] [blame] | 1202 | static const int kRegExpCodeThreshold = 5; |
| 1203 | |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 1204 | inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1205 | Object* undefined = heap->undefined_value(); |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1206 | return (info->script() != undefined) && |
| 1207 | (reinterpret_cast<Script*>(info->script())->source() != undefined); |
| 1208 | } |
| 1209 | |
| 1210 | |
| 1211 | inline static bool IsCompiled(JSFunction* function) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1212 | return function->code() != |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 1213 | function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile); |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1214 | } |
| 1215 | |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1216 | inline static bool IsCompiled(SharedFunctionInfo* function) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1217 | return function->code() != |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 1218 | function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile); |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1219 | } |
| 1220 | |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 1221 | inline static bool IsFlushable(Heap* heap, JSFunction* function) { |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1222 | SharedFunctionInfo* shared_info = function->unchecked_shared(); |
| 1223 | |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1224 | // Code is either on stack, in compilation cache or referenced |
| 1225 | // by optimized version of function. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1226 | MarkBit code_mark = Marking::MarkBitFrom(function->code()); |
| 1227 | if (code_mark.Get()) { |
| 1228 | if (!Marking::MarkBitFrom(shared_info).Get()) { |
| 1229 | shared_info->set_code_age(0); |
| 1230 | } |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1231 | return false; |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1232 | } |
| 1233 | |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1234 | // We do not flush code for optimized functions. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1235 | if (function->code() != shared_info->code()) { |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1236 | return false; |
| 1237 | } |
| 1238 | |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 1239 | return IsFlushable(heap, shared_info); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1240 | } |
| 1241 | |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 1242 | inline static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info) { |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1243 | // Code is either on stack, in compilation cache or referenced |
| 1244 | // by optimized version of function. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1245 | MarkBit code_mark = |
| 1246 | Marking::MarkBitFrom(shared_info->code()); |
| 1247 | if (code_mark.Get()) { |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1248 | return false; |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1249 | } |
| 1250 | |
| 1251 | // The function must be compiled and have the source code available, |
| 1252 | // to be able to recompile it in case we need the function again. |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 1253 | if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) { |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1254 | return false; |
| 1255 | } |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1256 | |
| 1257 | // We never flush code for Api functions. |
| 1258 | Object* function_data = shared_info->function_data(); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1259 | if (function_data->IsFunctionTemplateInfo()) { |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1260 | return false; |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1261 | } |
| 1262 | |
| 1263 | // Only flush code for functions. |
Ben Murdoch | 692be65 | 2012-01-10 18:47:50 +0000 | [diff] [blame] | 1264 | if (shared_info->code()->kind() != Code::FUNCTION) { |
| 1265 | return false; |
| 1266 | } |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1267 | |
| 1268 | // Function must be lazy compilable. |
Ben Murdoch | 692be65 | 2012-01-10 18:47:50 +0000 | [diff] [blame] | 1269 | if (!shared_info->allows_lazy_compilation()) { |
| 1270 | return false; |
| 1271 | } |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1272 | |
| 1273 | // If this is a full script wrapped in a function we do no flush the code. |
Ben Murdoch | 692be65 | 2012-01-10 18:47:50 +0000 | [diff] [blame] | 1274 | if (shared_info->is_toplevel()) { |
| 1275 | return false; |
| 1276 | } |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1277 | |
| 1278 | // Age this shared function info. |
| 1279 | if (shared_info->code_age() < kCodeAgeThreshold) { |
| 1280 | shared_info->set_code_age(shared_info->code_age() + 1); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1281 | return false; |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1282 | } |
| 1283 | |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1284 | return true; |
| 1285 | } |
| 1286 | |
| 1287 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1288 | static bool FlushCodeForFunction(Heap* heap, JSFunction* function) { |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 1289 | if (!IsFlushable(heap, function)) return false; |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1290 | |
| 1291 | // This function's code looks flushable. But we have to postpone the |
| 1292 | // decision until we see all functions that point to the same |
| 1293 | // SharedFunctionInfo because some of them might be optimized. |
| 1294 | // That would make the nonoptimized version of the code nonflushable, |
| 1295 | // because it is required for bailing out from optimized code. |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1296 | heap->mark_compact_collector()->code_flusher()->AddCandidate(function); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1297 | return true; |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1298 | } |
| 1299 | |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1300 | static inline bool IsValidNotBuiltinContext(Object* ctx) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1301 | return ctx->IsContext() && |
| 1302 | !Context::cast(ctx)->global()->IsJSBuiltinsObject(); |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1303 | } |
| 1304 | |
| 1305 | |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1306 | static void VisitSharedFunctionInfoGeneric(Map* map, HeapObject* object) { |
Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 1307 | SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1308 | |
| 1309 | if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap(); |
| 1310 | |
Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 1311 | FixedBodyVisitor<StaticMarkingVisitor, |
| 1312 | SharedFunctionInfo::BodyDescriptor, |
| 1313 | void>::Visit(map, object); |
| 1314 | } |
| 1315 | |
| 1316 | |
Ben Murdoch | 257744e | 2011-11-30 15:57:28 +0000 | [diff] [blame] | 1317 | static void UpdateRegExpCodeAgeAndFlush(Heap* heap, |
| 1318 | JSRegExp* re, |
| 1319 | bool is_ascii) { |
| 1320 | // Make sure that the fixed array is in fact initialized on the RegExp. |
| 1321 | // We could potentially trigger a GC when initializing the RegExp. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1322 | if (HeapObject::cast(re->data())->map()->instance_type() != |
| 1323 | FIXED_ARRAY_TYPE) return; |
Ben Murdoch | 257744e | 2011-11-30 15:57:28 +0000 | [diff] [blame] | 1324 | |
| 1325 | // Make sure this is a RegExp that actually contains code. |
| 1326 | if (re->TypeTagUnchecked() != JSRegExp::IRREGEXP) return; |
| 1327 | |
| 1328 | Object* code = re->DataAtUnchecked(JSRegExp::code_index(is_ascii)); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1329 | if (!code->IsSmi() && |
| 1330 | HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) { |
Ben Murdoch | 257744e | 2011-11-30 15:57:28 +0000 | [diff] [blame] | 1331 | // Save a copy that can be reinstated if we need the code again. |
| 1332 | re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii), |
| 1333 | code, |
| 1334 | heap); |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 1335 | |
| 1336 | // Saving a copy might create a pointer into compaction candidate |
| 1337 | // that was not observed by marker. This might happen if JSRegExp data |
| 1338 | // was marked through the compilation cache before marker reached JSRegExp |
| 1339 | // object. |
| 1340 | FixedArray* data = FixedArray::cast(re->data()); |
| 1341 | Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii); |
| 1342 | heap->mark_compact_collector()-> |
| 1343 | RecordSlot(slot, slot, code); |
| 1344 | |
Ben Murdoch | 257744e | 2011-11-30 15:57:28 +0000 | [diff] [blame] | 1345 | // Set a number in the 0-255 range to guarantee no smi overflow. |
| 1346 | re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii), |
| 1347 | Smi::FromInt(heap->sweep_generation() & 0xff), |
| 1348 | heap); |
| 1349 | } else if (code->IsSmi()) { |
| 1350 | int value = Smi::cast(code)->value(); |
| 1351 | // The regexp has not been compiled yet or there was a compilation error. |
| 1352 | if (value == JSRegExp::kUninitializedValue || |
| 1353 | value == JSRegExp::kCompilationErrorValue) { |
| 1354 | return; |
| 1355 | } |
| 1356 | |
| 1357 | // Check if we should flush now. |
| 1358 | if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) { |
| 1359 | re->SetDataAtUnchecked(JSRegExp::code_index(is_ascii), |
| 1360 | Smi::FromInt(JSRegExp::kUninitializedValue), |
| 1361 | heap); |
| 1362 | re->SetDataAtUnchecked(JSRegExp::saved_code_index(is_ascii), |
| 1363 | Smi::FromInt(JSRegExp::kUninitializedValue), |
| 1364 | heap); |
| 1365 | } |
| 1366 | } |
| 1367 | } |
| 1368 | |
| 1369 | |
| 1370 | // Works by setting the current sweep_generation (as a smi) in the |
| 1371 | // code object place in the data array of the RegExp and keeps a copy |
| 1372 | // around that can be reinstated if we reuse the RegExp before flushing. |
| 1373 | // If we did not use the code for kRegExpCodeThreshold mark sweep GCs |
| 1374 | // we flush the code. |
| 1375 | static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1376 | Heap* heap = map->GetHeap(); |
Ben Murdoch | 257744e | 2011-11-30 15:57:28 +0000 | [diff] [blame] | 1377 | MarkCompactCollector* collector = heap->mark_compact_collector(); |
| 1378 | if (!collector->is_code_flushing_enabled()) { |
| 1379 | VisitJSRegExpFields(map, object); |
| 1380 | return; |
| 1381 | } |
| 1382 | JSRegExp* re = reinterpret_cast<JSRegExp*>(object); |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 1383 | // Flush code or set age on both ASCII and two byte code. |
Ben Murdoch | 257744e | 2011-11-30 15:57:28 +0000 | [diff] [blame] | 1384 | UpdateRegExpCodeAgeAndFlush(heap, re, true); |
| 1385 | UpdateRegExpCodeAgeAndFlush(heap, re, false); |
| 1386 | // Visit the fields of the RegExp, including the updated FixedArray. |
| 1387 | VisitJSRegExpFields(map, object); |
| 1388 | } |
| 1389 | |
| 1390 | |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1391 | static void VisitSharedFunctionInfoAndFlushCode(Map* map, |
| 1392 | HeapObject* object) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1393 | MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector(); |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1394 | if (!collector->is_code_flushing_enabled()) { |
| 1395 | VisitSharedFunctionInfoGeneric(map, object); |
| 1396 | return; |
| 1397 | } |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1398 | VisitSharedFunctionInfoAndFlushCodeGeneric(map, object, false); |
| 1399 | } |
| 1400 | |
| 1401 | |
| 1402 | static void VisitSharedFunctionInfoAndFlushCodeGeneric( |
| 1403 | Map* map, HeapObject* object, bool known_flush_code_candidate) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1404 | Heap* heap = map->GetHeap(); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1405 | SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(object); |
| 1406 | |
| 1407 | if (shared->IsInobjectSlackTrackingInProgress()) shared->DetachInitialMap(); |
| 1408 | |
| 1409 | if (!known_flush_code_candidate) { |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 1410 | known_flush_code_candidate = IsFlushable(heap, shared); |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1411 | if (known_flush_code_candidate) { |
| 1412 | heap->mark_compact_collector()->code_flusher()->AddCandidate(shared); |
| 1413 | } |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1414 | } |
| 1415 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1416 | VisitSharedFunctionInfoFields(heap, object, known_flush_code_candidate); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1417 | } |
| 1418 | |
| 1419 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1420 | static void VisitCodeEntry(Heap* heap, Address entry_address) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1421 | Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address)); |
| 1422 | MarkBit mark = Marking::MarkBitFrom(code); |
| 1423 | heap->mark_compact_collector()->MarkObject(code, mark); |
| 1424 | heap->mark_compact_collector()-> |
| 1425 | RecordCodeEntrySlot(entry_address, code); |
| 1426 | } |
| 1427 | |
| 1428 | static void VisitGlobalContext(Map* map, HeapObject* object) { |
| 1429 | FixedBodyVisitor<StaticMarkingVisitor, |
| 1430 | Context::MarkCompactBodyDescriptor, |
| 1431 | void>::Visit(map, object); |
| 1432 | |
| 1433 | MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector(); |
| 1434 | for (int idx = Context::FIRST_WEAK_SLOT; |
| 1435 | idx < Context::GLOBAL_CONTEXT_SLOTS; |
| 1436 | ++idx) { |
| 1437 | Object** slot = |
| 1438 | HeapObject::RawField(object, FixedArray::OffsetOfElementAt(idx)); |
| 1439 | collector->RecordSlot(slot, slot, *slot); |
Steve Block | 791712a | 2010-08-27 10:21:07 +0100 | [diff] [blame] | 1440 | } |
| 1441 | } |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1442 | |
Steve Block | 791712a | 2010-08-27 10:21:07 +0100 | [diff] [blame] | 1443 | static void VisitJSFunctionAndFlushCode(Map* map, HeapObject* object) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1444 | Heap* heap = map->GetHeap(); |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1445 | MarkCompactCollector* collector = heap->mark_compact_collector(); |
| 1446 | if (!collector->is_code_flushing_enabled()) { |
| 1447 | VisitJSFunction(map, object); |
| 1448 | return; |
| 1449 | } |
| 1450 | |
Steve Block | 791712a | 2010-08-27 10:21:07 +0100 | [diff] [blame] | 1451 | JSFunction* jsfunction = reinterpret_cast<JSFunction*>(object); |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1452 | // The function must have a valid context and not be a builtin. |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1453 | bool flush_code_candidate = false; |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1454 | if (IsValidNotBuiltinContext(jsfunction->unchecked_context())) { |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1455 | flush_code_candidate = FlushCodeForFunction(heap, jsfunction); |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1456 | } |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1457 | |
| 1458 | if (!flush_code_candidate) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1459 | Code* code = jsfunction->shared()->code(); |
| 1460 | MarkBit code_mark = Marking::MarkBitFrom(code); |
| 1461 | collector->MarkObject(code, code_mark); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1462 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1463 | if (jsfunction->code()->kind() == Code::OPTIMIZED_FUNCTION) { |
| 1464 | collector->MarkInlinedFunctionsCode(jsfunction->code()); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1465 | } |
| 1466 | } |
| 1467 | |
| 1468 | VisitJSFunctionFields(map, |
| 1469 | reinterpret_cast<JSFunction*>(object), |
| 1470 | flush_code_candidate); |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1471 | } |
| 1472 | |
Steve Block | 791712a | 2010-08-27 10:21:07 +0100 | [diff] [blame] | 1473 | |
| 1474 | static void VisitJSFunction(Map* map, HeapObject* object) { |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1475 | VisitJSFunctionFields(map, |
| 1476 | reinterpret_cast<JSFunction*>(object), |
| 1477 | false); |
| 1478 | } |
Steve Block | 791712a | 2010-08-27 10:21:07 +0100 | [diff] [blame] | 1479 | |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1480 | |
| 1481 | #define SLOT_ADDR(obj, offset) \ |
| 1482 | reinterpret_cast<Object**>((obj)->address() + offset) |
| 1483 | |
| 1484 | |
| 1485 | static inline void VisitJSFunctionFields(Map* map, |
| 1486 | JSFunction* object, |
| 1487 | bool flush_code_candidate) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1488 | Heap* heap = map->GetHeap(); |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1489 | |
| 1490 | VisitPointers(heap, |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1491 | HeapObject::RawField(object, JSFunction::kPropertiesOffset), |
| 1492 | HeapObject::RawField(object, JSFunction::kCodeEntryOffset)); |
Steve Block | 791712a | 2010-08-27 10:21:07 +0100 | [diff] [blame] | 1493 | |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1494 | if (!flush_code_candidate) { |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1495 | VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1496 | } else { |
| 1497 | // Don't visit code object. |
| 1498 | |
| 1499 | // Visit shared function info to avoid double checking of it's |
| 1500 | // flushability. |
| 1501 | SharedFunctionInfo* shared_info = object->unchecked_shared(); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1502 | MarkBit shared_info_mark = Marking::MarkBitFrom(shared_info); |
| 1503 | if (!shared_info_mark.Get()) { |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1504 | Map* shared_info_map = shared_info->map(); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1505 | MarkBit shared_info_map_mark = |
| 1506 | Marking::MarkBitFrom(shared_info_map); |
| 1507 | heap->mark_compact_collector()->SetMark(shared_info, shared_info_mark); |
| 1508 | heap->mark_compact_collector()->MarkObject(shared_info_map, |
| 1509 | shared_info_map_mark); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1510 | VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map, |
| 1511 | shared_info, |
| 1512 | true); |
| 1513 | } |
| 1514 | } |
Steve Block | 791712a | 2010-08-27 10:21:07 +0100 | [diff] [blame] | 1515 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1516 | VisitPointers( |
| 1517 | heap, |
| 1518 | HeapObject::RawField(object, |
| 1519 | JSFunction::kCodeEntryOffset + kPointerSize), |
| 1520 | HeapObject::RawField(object, |
| 1521 | JSFunction::kNonWeakFieldsEndOffset)); |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 1522 | |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1523 | // Don't visit the next function list field as it is a weak reference. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1524 | Object** next_function = |
| 1525 | HeapObject::RawField(object, JSFunction::kNextFunctionLinkOffset); |
| 1526 | heap->mark_compact_collector()->RecordSlot( |
| 1527 | next_function, next_function, *next_function); |
Steve Block | 791712a | 2010-08-27 10:21:07 +0100 | [diff] [blame] | 1528 | } |
| 1529 | |
Ben Murdoch | 257744e | 2011-11-30 15:57:28 +0000 | [diff] [blame] | 1530 | static inline void VisitJSRegExpFields(Map* map, |
| 1531 | HeapObject* object) { |
| 1532 | int last_property_offset = |
| 1533 | JSRegExp::kSize + kPointerSize * map->inobject_properties(); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1534 | VisitPointers(map->GetHeap(), |
Ben Murdoch | 257744e | 2011-11-30 15:57:28 +0000 | [diff] [blame] | 1535 | SLOT_ADDR(object, JSRegExp::kPropertiesOffset), |
| 1536 | SLOT_ADDR(object, last_property_offset)); |
| 1537 | } |
| 1538 | |
Steve Block | 791712a | 2010-08-27 10:21:07 +0100 | [diff] [blame] | 1539 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1540 | static void VisitSharedFunctionInfoFields(Heap* heap, |
| 1541 | HeapObject* object, |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1542 | bool flush_code_candidate) { |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1543 | VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kNameOffset)); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1544 | |
| 1545 | if (!flush_code_candidate) { |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1546 | VisitPointer(heap, SLOT_ADDR(object, SharedFunctionInfo::kCodeOffset)); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1547 | } |
| 1548 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1549 | VisitPointers(heap, |
| 1550 | SLOT_ADDR(object, SharedFunctionInfo::kScopeInfoOffset), |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1551 | SLOT_ADDR(object, SharedFunctionInfo::kSize)); |
| 1552 | } |
| 1553 | |
| 1554 | #undef SLOT_ADDR |
| 1555 | |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1556 | typedef void (*Callback)(Map* map, HeapObject* object); |
| 1557 | |
| 1558 | static VisitorDispatchTable<Callback> table_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1559 | }; |
| 1560 | |
| 1561 | |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1562 | VisitorDispatchTable<StaticMarkingVisitor::Callback> |
| 1563 | StaticMarkingVisitor::table_; |
| 1564 | |
| 1565 | |
| 1566 | class MarkingVisitor : public ObjectVisitor { |
| 1567 | public: |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1568 | explicit MarkingVisitor(Heap* heap) : heap_(heap) { } |
| 1569 | |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1570 | void VisitPointer(Object** p) { |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1571 | StaticMarkingVisitor::VisitPointer(heap_, p); |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1572 | } |
| 1573 | |
| 1574 | void VisitPointers(Object** start, Object** end) { |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1575 | StaticMarkingVisitor::VisitPointers(heap_, start, end); |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1576 | } |
| 1577 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1578 | private: |
| 1579 | Heap* heap_; |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1580 | }; |
| 1581 | |
| 1582 | |
| 1583 | class CodeMarkingVisitor : public ThreadVisitor { |
| 1584 | public: |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1585 | explicit CodeMarkingVisitor(MarkCompactCollector* collector) |
| 1586 | : collector_(collector) {} |
| 1587 | |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 1588 | void VisitThread(Isolate* isolate, ThreadLocalTop* top) { |
Ben Murdoch | 692be65 | 2012-01-10 18:47:50 +0000 | [diff] [blame] | 1589 | collector_->PrepareThreadForCodeFlushing(isolate, top); |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1590 | } |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1591 | |
| 1592 | private: |
| 1593 | MarkCompactCollector* collector_; |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1594 | }; |
| 1595 | |
| 1596 | |
| 1597 | class SharedFunctionInfoMarkingVisitor : public ObjectVisitor { |
| 1598 | public: |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1599 | explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector) |
| 1600 | : collector_(collector) {} |
| 1601 | |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1602 | void VisitPointers(Object** start, Object** end) { |
| 1603 | for (Object** p = start; p < end; p++) VisitPointer(p); |
| 1604 | } |
| 1605 | |
| 1606 | void VisitPointer(Object** slot) { |
| 1607 | Object* obj = *slot; |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 1608 | if (obj->IsSharedFunctionInfo()) { |
| 1609 | SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1610 | MarkBit shared_mark = Marking::MarkBitFrom(shared); |
| 1611 | MarkBit code_mark = Marking::MarkBitFrom(shared->code()); |
| 1612 | collector_->MarkObject(shared->code(), code_mark); |
| 1613 | collector_->MarkObject(shared, shared_mark); |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1614 | } |
| 1615 | } |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1616 | |
| 1617 | private: |
| 1618 | MarkCompactCollector* collector_; |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1619 | }; |
| 1620 | |
| 1621 | |
Ben Murdoch | 692be65 | 2012-01-10 18:47:50 +0000 | [diff] [blame] | 1622 | void MarkCompactCollector::MarkInlinedFunctionsCode(Code* code) { |
| 1623 | // For optimized functions we should retain both non-optimized version |
| 1624 | // of it's code and non-optimized version of all inlined functions. |
| 1625 | // This is required to support bailing out from inlined code. |
| 1626 | DeoptimizationInputData* data = |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1627 | DeoptimizationInputData::cast(code->deoptimization_data()); |
Ben Murdoch | 692be65 | 2012-01-10 18:47:50 +0000 | [diff] [blame] | 1628 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1629 | FixedArray* literals = data->LiteralArray(); |
Ben Murdoch | 692be65 | 2012-01-10 18:47:50 +0000 | [diff] [blame] | 1630 | |
| 1631 | for (int i = 0, count = data->InlinedFunctionCount()->value(); |
| 1632 | i < count; |
| 1633 | i++) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1634 | JSFunction* inlined = JSFunction::cast(literals->get(i)); |
| 1635 | Code* inlined_code = inlined->shared()->code(); |
| 1636 | MarkBit inlined_code_mark = Marking::MarkBitFrom(inlined_code); |
| 1637 | MarkObject(inlined_code, inlined_code_mark); |
Ben Murdoch | 692be65 | 2012-01-10 18:47:50 +0000 | [diff] [blame] | 1638 | } |
| 1639 | } |
| 1640 | |
| 1641 | |
| 1642 | void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate, |
| 1643 | ThreadLocalTop* top) { |
| 1644 | for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) { |
| 1645 | // Note: for the frame that has a pending lazy deoptimization |
| 1646 | // StackFrame::unchecked_code will return a non-optimized code object for |
| 1647 | // the outermost function and StackFrame::LookupCode will return |
| 1648 | // actual optimized code object. |
| 1649 | StackFrame* frame = it.frame(); |
| 1650 | Code* code = frame->unchecked_code(); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1651 | MarkBit code_mark = Marking::MarkBitFrom(code); |
| 1652 | MarkObject(code, code_mark); |
Ben Murdoch | 692be65 | 2012-01-10 18:47:50 +0000 | [diff] [blame] | 1653 | if (frame->is_optimized()) { |
| 1654 | MarkInlinedFunctionsCode(frame->LookupCode()); |
| 1655 | } |
| 1656 | } |
| 1657 | } |
| 1658 | |
| 1659 | |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1660 | void MarkCompactCollector::PrepareForCodeFlushing() { |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 1661 | ASSERT(heap() == Isolate::Current()->heap()); |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1662 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1663 | // TODO(1609) Currently incremental marker does not support code flushing. |
| 1664 | if (!FLAG_flush_code || was_marked_incrementally_) { |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1665 | EnableCodeFlushing(false); |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1666 | return; |
| 1667 | } |
| 1668 | |
| 1669 | #ifdef ENABLE_DEBUGGER_SUPPORT |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 1670 | if (heap()->isolate()->debug()->IsLoaded() || |
| 1671 | heap()->isolate()->debug()->has_break_points()) { |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1672 | EnableCodeFlushing(false); |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1673 | return; |
| 1674 | } |
| 1675 | #endif |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1676 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1677 | EnableCodeFlushing(true); |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1678 | |
Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 1679 | // Ensure that empty descriptor array is marked. Method MarkDescriptorArray |
| 1680 | // relies on it being marked before any other descriptor array. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1681 | HeapObject* descriptor_array = heap()->empty_descriptor_array(); |
| 1682 | MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array); |
| 1683 | MarkObject(descriptor_array, descriptor_array_mark); |
Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 1684 | |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1685 | // Make sure we are not referencing the code from the stack. |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 1686 | ASSERT(this == heap()->mark_compact_collector()); |
Ben Murdoch | 692be65 | 2012-01-10 18:47:50 +0000 | [diff] [blame] | 1687 | PrepareThreadForCodeFlushing(heap()->isolate(), |
| 1688 | heap()->isolate()->thread_local_top()); |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1689 | |
| 1690 | // Iterate the archived stacks in all threads to check if |
| 1691 | // the code is referenced. |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1692 | CodeMarkingVisitor code_marking_visitor(this); |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 1693 | heap()->isolate()->thread_manager()->IterateArchivedThreads( |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1694 | &code_marking_visitor); |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1695 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1696 | SharedFunctionInfoMarkingVisitor visitor(this); |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 1697 | heap()->isolate()->compilation_cache()->IterateFunctions(&visitor); |
| 1698 | heap()->isolate()->handle_scope_implementer()->Iterate(&visitor); |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1699 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1700 | ProcessMarkingDeque(); |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1701 | } |
| 1702 | |
| 1703 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1704 | // Visitor class for marking heap roots. |
| 1705 | class RootMarkingVisitor : public ObjectVisitor { |
| 1706 | public: |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1707 | explicit RootMarkingVisitor(Heap* heap) |
| 1708 | : collector_(heap->mark_compact_collector()) { } |
| 1709 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1710 | void VisitPointer(Object** p) { |
| 1711 | MarkObjectByPointer(p); |
| 1712 | } |
| 1713 | |
| 1714 | void VisitPointers(Object** start, Object** end) { |
| 1715 | for (Object** p = start; p < end; p++) MarkObjectByPointer(p); |
| 1716 | } |
| 1717 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1718 | private: |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1719 | void MarkObjectByPointer(Object** p) { |
| 1720 | if (!(*p)->IsHeapObject()) return; |
| 1721 | |
| 1722 | // Replace flat cons strings in place. |
| 1723 | HeapObject* object = ShortCircuitConsString(p); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1724 | MarkBit mark_bit = Marking::MarkBitFrom(object); |
| 1725 | if (mark_bit.Get()) return; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1726 | |
| 1727 | Map* map = object->map(); |
| 1728 | // Mark the object. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1729 | collector_->SetMark(object, mark_bit); |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1730 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1731 | // Mark the map pointer and body, and push them on the marking stack. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1732 | MarkBit map_mark = Marking::MarkBitFrom(map); |
| 1733 | collector_->MarkObject(map, map_mark); |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1734 | StaticMarkingVisitor::IterateBody(map, object); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1735 | |
| 1736 | // Mark all the objects reachable from the map and body. May leave |
| 1737 | // overflowed objects in the heap. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1738 | collector_->EmptyMarkingDeque(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1739 | } |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1740 | |
| 1741 | MarkCompactCollector* collector_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1742 | }; |
| 1743 | |
| 1744 | |
| 1745 | // Helper class for pruning the symbol table. |
| 1746 | class SymbolTableCleaner : public ObjectVisitor { |
| 1747 | public: |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 1748 | explicit SymbolTableCleaner(Heap* heap) |
| 1749 | : heap_(heap), pointers_removed_(0) { } |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 1750 | |
| 1751 | virtual void VisitPointers(Object** start, Object** end) { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1752 | // Visit all HeapObject pointers in [start, end). |
| 1753 | for (Object** p = start; p < end; p++) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1754 | Object* o = *p; |
| 1755 | if (o->IsHeapObject() && |
| 1756 | !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1757 | // Check if the symbol being pruned is an external symbol. We need to |
| 1758 | // delete the associated external data as this symbol is going away. |
| 1759 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1760 | // Since no objects have yet been moved we can safely access the map of |
| 1761 | // the object. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1762 | if (o->IsExternalString()) { |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 1763 | heap_->FinalizeExternalString(String::cast(*p)); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1764 | } |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1765 | // Set the entry to the_hole_value (as deleted). |
| 1766 | *p = heap_->the_hole_value(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1767 | pointers_removed_++; |
| 1768 | } |
| 1769 | } |
| 1770 | } |
| 1771 | |
| 1772 | int PointersRemoved() { |
| 1773 | return pointers_removed_; |
| 1774 | } |
Ben Murdoch | 3fb3ca8 | 2011-12-02 17:19:32 +0000 | [diff] [blame] | 1775 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1776 | private: |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 1777 | Heap* heap_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1778 | int pointers_removed_; |
| 1779 | }; |
| 1780 | |
| 1781 | |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 1782 | // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects |
| 1783 | // are retained. |
| 1784 | class MarkCompactWeakObjectRetainer : public WeakObjectRetainer { |
| 1785 | public: |
| 1786 | virtual Object* RetainAs(Object* object) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1787 | if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) { |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 1788 | return object; |
| 1789 | } else { |
| 1790 | return NULL; |
| 1791 | } |
| 1792 | } |
| 1793 | }; |
| 1794 | |
| 1795 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1796 | void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) { |
| 1797 | ASSERT(IsMarked(object)); |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1798 | ASSERT(HEAP->Contains(object)); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1799 | if (object->IsMap()) { |
| 1800 | Map* map = Map::cast(object); |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 1801 | heap_->ClearCacheOnMap(map); |
Ben Murdoch | 3fb3ca8 | 2011-12-02 17:19:32 +0000 | [diff] [blame] | 1802 | |
| 1803 | // When map collection is enabled we have to mark through map's transitions |
| 1804 | // in a special way to make transition links weak. |
| 1805 | // Only maps for subclasses of JSReceiver can have transitions. |
| 1806 | STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1807 | if (collect_maps_ && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1808 | MarkMapContents(map); |
| 1809 | } else { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1810 | marking_deque_.PushBlack(map); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1811 | } |
| 1812 | } else { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1813 | marking_deque_.PushBlack(object); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1814 | } |
| 1815 | } |
| 1816 | |
| 1817 | |
| 1818 | void MarkCompactCollector::MarkMapContents(Map* map) { |
Steve Block | 053d10c | 2011-06-13 19:13:29 +0100 | [diff] [blame] | 1819 | // Mark prototype transitions array but don't push it into marking stack. |
| 1820 | // This will make references from it weak. We will clean dead prototype |
| 1821 | // transitions in ClearNonLiveTransitions. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1822 | FixedArray* prototype_transitions = map->prototype_transitions(); |
| 1823 | MarkBit mark = Marking::MarkBitFrom(prototype_transitions); |
| 1824 | if (!mark.Get()) { |
| 1825 | mark.Set(); |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 1826 | MemoryChunk::IncrementLiveBytesFromGC(prototype_transitions->address(), |
| 1827 | prototype_transitions->Size()); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1828 | } |
Steve Block | 053d10c | 2011-06-13 19:13:29 +0100 | [diff] [blame] | 1829 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1830 | Object** raw_descriptor_array_slot = |
| 1831 | HeapObject::RawField(map, Map::kInstanceDescriptorsOrBitField3Offset); |
| 1832 | Object* raw_descriptor_array = *raw_descriptor_array_slot; |
Ben Murdoch | 257744e | 2011-11-30 15:57:28 +0000 | [diff] [blame] | 1833 | if (!raw_descriptor_array->IsSmi()) { |
| 1834 | MarkDescriptorArray( |
| 1835 | reinterpret_cast<DescriptorArray*>(raw_descriptor_array)); |
| 1836 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1837 | |
| 1838 | // Mark the Object* fields of the Map. |
| 1839 | // Since the descriptor array has been marked already, it is fine |
| 1840 | // that one of these fields contains a pointer to it. |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 1841 | Object** start_slot = HeapObject::RawField(map, |
| 1842 | Map::kPointerFieldsBeginOffset); |
| 1843 | |
| 1844 | Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset); |
| 1845 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1846 | StaticMarkingVisitor::VisitPointers(map->GetHeap(), start_slot, end_slot); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1847 | } |
| 1848 | |
| 1849 | |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 1850 | void MarkCompactCollector::MarkAccessorPairSlot(HeapObject* accessors, |
| 1851 | int offset) { |
| 1852 | Object** slot = HeapObject::RawField(accessors, offset); |
| 1853 | HeapObject* accessor = HeapObject::cast(*slot); |
| 1854 | if (accessor->IsMap()) return; |
| 1855 | RecordSlot(slot, slot, accessor); |
| 1856 | MarkObjectAndPush(accessor); |
| 1857 | } |
| 1858 | |
| 1859 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1860 | void MarkCompactCollector::MarkDescriptorArray( |
| 1861 | DescriptorArray* descriptors) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1862 | MarkBit descriptors_mark = Marking::MarkBitFrom(descriptors); |
| 1863 | if (descriptors_mark.Get()) return; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1864 | // Empty descriptor array is marked as a root before any maps are marked. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1865 | ASSERT(descriptors != heap()->empty_descriptor_array()); |
| 1866 | SetMark(descriptors, descriptors_mark); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1867 | |
| 1868 | FixedArray* contents = reinterpret_cast<FixedArray*>( |
| 1869 | descriptors->get(DescriptorArray::kContentArrayIndex)); |
| 1870 | ASSERT(contents->IsHeapObject()); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1871 | ASSERT(!IsMarked(contents)); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1872 | ASSERT(contents->IsFixedArray()); |
| 1873 | ASSERT(contents->length() >= 2); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1874 | MarkBit contents_mark = Marking::MarkBitFrom(contents); |
| 1875 | SetMark(contents, contents_mark); |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 1876 | // Contents contains (value, details) pairs. If the details say that the type |
| 1877 | // of descriptor is MAP_TRANSITION, CONSTANT_TRANSITION, |
| 1878 | // EXTERNAL_ARRAY_TRANSITION or NULL_DESCRIPTOR, we don't mark the value as |
| 1879 | // live. Only for MAP_TRANSITION, EXTERNAL_ARRAY_TRANSITION and |
| 1880 | // CONSTANT_TRANSITION is the value an Object* (a Map*). |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1881 | for (int i = 0; i < contents->length(); i += 2) { |
| 1882 | // If the pair (value, details) at index i, i+1 is not |
| 1883 | // a transition or null descriptor, mark the value. |
| 1884 | PropertyDetails details(Smi::cast(contents->get(i + 1))); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1885 | |
| 1886 | Object** slot = contents->data_start() + i; |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 1887 | if (!(*slot)->IsHeapObject()) continue; |
| 1888 | HeapObject* value = HeapObject::cast(*slot); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1889 | |
| 1890 | RecordSlot(slot, slot, *slot); |
| 1891 | |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 1892 | switch (details.type()) { |
| 1893 | case NORMAL: |
| 1894 | case FIELD: |
| 1895 | case CONSTANT_FUNCTION: |
| 1896 | case HANDLER: |
| 1897 | case INTERCEPTOR: |
| 1898 | MarkObjectAndPush(value); |
| 1899 | break; |
| 1900 | case CALLBACKS: |
| 1901 | if (!value->IsAccessorPair()) { |
| 1902 | MarkObjectAndPush(value); |
| 1903 | } else if (!MarkObjectWithoutPush(value)) { |
| 1904 | MarkAccessorPairSlot(value, AccessorPair::kGetterOffset); |
| 1905 | MarkAccessorPairSlot(value, AccessorPair::kSetterOffset); |
| 1906 | } |
| 1907 | break; |
| 1908 | case ELEMENTS_TRANSITION: |
| 1909 | // For maps with multiple elements transitions, the transition maps are |
| 1910 | // stored in a FixedArray. Keep the fixed array alive but not the maps |
| 1911 | // that it refers to. |
| 1912 | if (value->IsFixedArray()) MarkObjectWithoutPush(value); |
| 1913 | break; |
| 1914 | case MAP_TRANSITION: |
| 1915 | case CONSTANT_TRANSITION: |
| 1916 | case NULL_DESCRIPTOR: |
| 1917 | break; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1918 | } |
| 1919 | } |
| 1920 | // The DescriptorArray descriptors contains a pointer to its contents array, |
| 1921 | // but the contents array is already marked. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1922 | marking_deque_.PushBlack(descriptors); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1923 | } |
| 1924 | |
| 1925 | |
| 1926 | void MarkCompactCollector::CreateBackPointers() { |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 1927 | HeapObjectIterator iterator(heap()->map_space()); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1928 | for (HeapObject* next_object = iterator.Next(); |
| 1929 | next_object != NULL; next_object = iterator.Next()) { |
| 1930 | if (next_object->IsMap()) { // Could also be FreeSpace object on free list. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1931 | Map* map = Map::cast(next_object); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1932 | STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); |
Ben Murdoch | 3fb3ca8 | 2011-12-02 17:19:32 +0000 | [diff] [blame] | 1933 | if (map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1934 | map->CreateBackPointers(); |
| 1935 | } else { |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 1936 | ASSERT(map->instance_descriptors() == heap()->empty_descriptor_array()); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1937 | } |
| 1938 | } |
| 1939 | } |
| 1940 | } |
| 1941 | |
| 1942 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1943 | // Fill the marking stack with overflowed objects returned by the given |
| 1944 | // iterator. Stop when the marking stack is filled or the end of the space |
| 1945 | // is reached, whichever comes first. |
| 1946 | template<class T> |
| 1947 | static void DiscoverGreyObjectsWithIterator(Heap* heap, |
| 1948 | MarkingDeque* marking_deque, |
| 1949 | T* it) { |
| 1950 | // The caller should ensure that the marking stack is initially not full, |
| 1951 | // so that we don't waste effort pointlessly scanning for objects. |
| 1952 | ASSERT(!marking_deque->IsFull()); |
| 1953 | |
| 1954 | Map* filler_map = heap->one_pointer_filler_map(); |
| 1955 | for (HeapObject* object = it->Next(); |
| 1956 | object != NULL; |
| 1957 | object = it->Next()) { |
| 1958 | MarkBit markbit = Marking::MarkBitFrom(object); |
| 1959 | if ((object->map() != filler_map) && Marking::IsGrey(markbit)) { |
| 1960 | Marking::GreyToBlack(markbit); |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 1961 | MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size()); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1962 | marking_deque->PushBlack(object); |
| 1963 | if (marking_deque->IsFull()) return; |
| 1964 | } |
| 1965 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1966 | } |
| 1967 | |
| 1968 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1969 | static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1970 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 1971 | |
| 1972 | static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) { |
| 1973 | ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); |
| 1974 | ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); |
| 1975 | ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); |
| 1976 | ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); |
| 1977 | |
| 1978 | MarkBit::CellType* cells = p->markbits()->cells(); |
| 1979 | |
| 1980 | int last_cell_index = |
| 1981 | Bitmap::IndexToCell( |
| 1982 | Bitmap::CellAlignIndex( |
| 1983 | p->AddressToMarkbitIndex(p->area_end()))); |
| 1984 | |
| 1985 | Address cell_base = p->area_start(); |
| 1986 | int cell_index = Bitmap::IndexToCell( |
| 1987 | Bitmap::CellAlignIndex( |
| 1988 | p->AddressToMarkbitIndex(cell_base))); |
| 1989 | |
| 1990 | |
| 1991 | for (; |
| 1992 | cell_index < last_cell_index; |
| 1993 | cell_index++, cell_base += 32 * kPointerSize) { |
| 1994 | ASSERT((unsigned)cell_index == |
| 1995 | Bitmap::IndexToCell( |
| 1996 | Bitmap::CellAlignIndex( |
| 1997 | p->AddressToMarkbitIndex(cell_base)))); |
| 1998 | |
| 1999 | const MarkBit::CellType current_cell = cells[cell_index]; |
| 2000 | if (current_cell == 0) continue; |
| 2001 | |
| 2002 | const MarkBit::CellType next_cell = cells[cell_index + 1]; |
| 2003 | MarkBit::CellType grey_objects = current_cell & |
| 2004 | ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1))); |
| 2005 | |
| 2006 | int offset = 0; |
| 2007 | while (grey_objects != 0) { |
| 2008 | int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects); |
| 2009 | grey_objects >>= trailing_zeros; |
| 2010 | offset += trailing_zeros; |
| 2011 | MarkBit markbit(&cells[cell_index], 1 << offset, false); |
| 2012 | ASSERT(Marking::IsGrey(markbit)); |
| 2013 | Marking::GreyToBlack(markbit); |
| 2014 | Address addr = cell_base + offset * kPointerSize; |
| 2015 | HeapObject* object = HeapObject::FromAddress(addr); |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 2016 | MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size()); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2017 | marking_deque->PushBlack(object); |
| 2018 | if (marking_deque->IsFull()) return; |
| 2019 | offset += 2; |
| 2020 | grey_objects >>= 2; |
| 2021 | } |
| 2022 | |
| 2023 | grey_objects >>= (Bitmap::kBitsPerCell - 1); |
| 2024 | } |
| 2025 | } |
| 2026 | |
| 2027 | |
| 2028 | static void DiscoverGreyObjectsInSpace(Heap* heap, |
| 2029 | MarkingDeque* marking_deque, |
| 2030 | PagedSpace* space) { |
| 2031 | if (!space->was_swept_conservatively()) { |
| 2032 | HeapObjectIterator it(space); |
| 2033 | DiscoverGreyObjectsWithIterator(heap, marking_deque, &it); |
| 2034 | } else { |
| 2035 | PageIterator it(space); |
| 2036 | while (it.has_next()) { |
| 2037 | Page* p = it.next(); |
| 2038 | DiscoverGreyObjectsOnPage(marking_deque, p); |
| 2039 | if (marking_deque->IsFull()) return; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2040 | } |
| 2041 | } |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2042 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2043 | |
| 2044 | |
| 2045 | bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2046 | Object* o = *p; |
| 2047 | if (!o->IsHeapObject()) return false; |
| 2048 | HeapObject* heap_object = HeapObject::cast(o); |
| 2049 | MarkBit mark = Marking::MarkBitFrom(heap_object); |
| 2050 | return !mark.Get(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2051 | } |
| 2052 | |
| 2053 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2054 | void MarkCompactCollector::MarkSymbolTable() { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2055 | SymbolTable* symbol_table = heap()->symbol_table(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2056 | // Mark the symbol table itself. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2057 | MarkBit symbol_table_mark = Marking::MarkBitFrom(symbol_table); |
| 2058 | SetMark(symbol_table, symbol_table_mark); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2059 | // Explicitly mark the prefix. |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 2060 | MarkingVisitor marker(heap()); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2061 | symbol_table->IteratePrefix(&marker); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2062 | ProcessMarkingDeque(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2063 | } |
| 2064 | |
| 2065 | |
| 2066 | void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) { |
| 2067 | // Mark the heap roots including global variables, stack variables, |
| 2068 | // etc., and all objects reachable from them. |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 2069 | heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2070 | |
| 2071 | // Handle the symbol table specially. |
| 2072 | MarkSymbolTable(); |
| 2073 | |
| 2074 | // There may be overflowed objects in the heap. Visit them now. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2075 | while (marking_deque_.overflowed()) { |
| 2076 | RefillMarkingDeque(); |
| 2077 | EmptyMarkingDeque(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2078 | } |
| 2079 | } |
| 2080 | |
| 2081 | |
| 2082 | void MarkCompactCollector::MarkObjectGroups() { |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 2083 | List<ObjectGroup*>* object_groups = |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 2084 | heap()->isolate()->global_handles()->object_groups(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2085 | |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 2086 | int last = 0; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2087 | for (int i = 0; i < object_groups->length(); i++) { |
| 2088 | ObjectGroup* entry = object_groups->at(i); |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 2089 | ASSERT(entry != NULL); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2090 | |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 2091 | Object*** objects = entry->objects_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2092 | bool group_marked = false; |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 2093 | for (size_t j = 0; j < entry->length_; j++) { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2094 | Object* object = *objects[j]; |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2095 | if (object->IsHeapObject()) { |
| 2096 | HeapObject* heap_object = HeapObject::cast(object); |
| 2097 | MarkBit mark = Marking::MarkBitFrom(heap_object); |
| 2098 | if (mark.Get()) { |
| 2099 | group_marked = true; |
| 2100 | break; |
| 2101 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2102 | } |
| 2103 | } |
| 2104 | |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 2105 | if (!group_marked) { |
| 2106 | (*object_groups)[last++] = entry; |
| 2107 | continue; |
| 2108 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2109 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2110 | // An object in the group is marked, so mark as grey all white heap |
| 2111 | // objects in the group. |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 2112 | for (size_t j = 0; j < entry->length_; ++j) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2113 | Object* object = *objects[j]; |
| 2114 | if (object->IsHeapObject()) { |
| 2115 | HeapObject* heap_object = HeapObject::cast(object); |
| 2116 | MarkBit mark = Marking::MarkBitFrom(heap_object); |
| 2117 | MarkObject(heap_object, mark); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2118 | } |
| 2119 | } |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 2120 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2121 | // Once the entire group has been colored grey, set the object group |
| 2122 | // to NULL so it won't be processed again. |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 2123 | entry->Dispose(); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2124 | object_groups->at(i) = NULL; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2125 | } |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 2126 | object_groups->Rewind(last); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2127 | } |
| 2128 | |
| 2129 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 2130 | void MarkCompactCollector::MarkImplicitRefGroups() { |
| 2131 | List<ImplicitRefGroup*>* ref_groups = |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 2132 | heap()->isolate()->global_handles()->implicit_ref_groups(); |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 2133 | |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 2134 | int last = 0; |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 2135 | for (int i = 0; i < ref_groups->length(); i++) { |
| 2136 | ImplicitRefGroup* entry = ref_groups->at(i); |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 2137 | ASSERT(entry != NULL); |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 2138 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2139 | if (!IsMarked(*entry->parent_)) { |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 2140 | (*ref_groups)[last++] = entry; |
| 2141 | continue; |
| 2142 | } |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 2143 | |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 2144 | Object*** children = entry->children_; |
| 2145 | // A parent object is marked, so mark all child heap objects. |
| 2146 | for (size_t j = 0; j < entry->length_; ++j) { |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 2147 | if ((*children[j])->IsHeapObject()) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2148 | HeapObject* child = HeapObject::cast(*children[j]); |
| 2149 | MarkBit mark = Marking::MarkBitFrom(child); |
| 2150 | MarkObject(child, mark); |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 2151 | } |
| 2152 | } |
| 2153 | |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 2154 | // Once the entire group has been marked, dispose it because it's |
| 2155 | // not needed anymore. |
| 2156 | entry->Dispose(); |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 2157 | } |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 2158 | ref_groups->Rewind(last); |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 2159 | } |
| 2160 | |
| 2161 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2162 | // Mark all objects reachable from the objects on the marking stack. |
| 2163 | // Before: the marking stack contains zero or more heap object pointers. |
| 2164 | // After: the marking stack is empty, and all objects reachable from the |
| 2165 | // marking stack have been marked, or are overflowed in the heap. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2166 | void MarkCompactCollector::EmptyMarkingDeque() { |
| 2167 | while (!marking_deque_.IsEmpty()) { |
| 2168 | while (!marking_deque_.IsEmpty()) { |
| 2169 | HeapObject* object = marking_deque_.Pop(); |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 2170 | ASSERT(object->IsHeapObject()); |
| 2171 | ASSERT(heap()->Contains(object)); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2172 | ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object))); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2173 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2174 | Map* map = object->map(); |
| 2175 | MarkBit map_mark = Marking::MarkBitFrom(map); |
| 2176 | MarkObject(map, map_mark); |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 2177 | |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 2178 | StaticMarkingVisitor::IterateBody(map, object); |
| 2179 | } |
| 2180 | |
| 2181 | // Process encountered weak maps, mark objects only reachable by those |
| 2182 | // weak maps and repeat until fix-point is reached. |
| 2183 | ProcessWeakMaps(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2184 | } |
| 2185 | } |
| 2186 | |
| 2187 | |
| 2188 | // Sweep the heap for overflowed objects, clear their overflow bits, and |
| 2189 | // push them on the marking stack. Stop early if the marking stack fills |
| 2190 | // before sweeping completes. If sweeping completes, there are no remaining |
| 2191 | // overflowed objects in the heap so the overflow flag on the markings stack |
| 2192 | // is cleared. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2193 | void MarkCompactCollector::RefillMarkingDeque() { |
| 2194 | ASSERT(marking_deque_.overflowed()); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2195 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2196 | SemiSpaceIterator new_it(heap()->new_space()); |
| 2197 | DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &new_it); |
| 2198 | if (marking_deque_.IsFull()) return; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2199 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2200 | DiscoverGreyObjectsInSpace(heap(), |
| 2201 | &marking_deque_, |
| 2202 | heap()->old_pointer_space()); |
| 2203 | if (marking_deque_.IsFull()) return; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2204 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2205 | DiscoverGreyObjectsInSpace(heap(), |
| 2206 | &marking_deque_, |
| 2207 | heap()->old_data_space()); |
| 2208 | if (marking_deque_.IsFull()) return; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2209 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2210 | DiscoverGreyObjectsInSpace(heap(), |
| 2211 | &marking_deque_, |
| 2212 | heap()->code_space()); |
| 2213 | if (marking_deque_.IsFull()) return; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2214 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2215 | DiscoverGreyObjectsInSpace(heap(), |
| 2216 | &marking_deque_, |
| 2217 | heap()->map_space()); |
| 2218 | if (marking_deque_.IsFull()) return; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2219 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2220 | DiscoverGreyObjectsInSpace(heap(), |
| 2221 | &marking_deque_, |
| 2222 | heap()->cell_space()); |
| 2223 | if (marking_deque_.IsFull()) return; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2224 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2225 | LargeObjectIterator lo_it(heap()->lo_space()); |
| 2226 | DiscoverGreyObjectsWithIterator(heap(), |
| 2227 | &marking_deque_, |
| 2228 | &lo_it); |
| 2229 | if (marking_deque_.IsFull()) return; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2230 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2231 | marking_deque_.ClearOverflowed(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2232 | } |
| 2233 | |
| 2234 | |
| 2235 | // Mark all objects reachable (transitively) from objects on the marking |
| 2236 | // stack. Before: the marking stack contains zero or more heap object |
| 2237 | // pointers. After: the marking stack is empty and there are no overflowed |
| 2238 | // objects in the heap. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2239 | void MarkCompactCollector::ProcessMarkingDeque() { |
| 2240 | EmptyMarkingDeque(); |
| 2241 | while (marking_deque_.overflowed()) { |
| 2242 | RefillMarkingDeque(); |
| 2243 | EmptyMarkingDeque(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2244 | } |
| 2245 | } |
| 2246 | |
| 2247 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 2248 | void MarkCompactCollector::ProcessExternalMarking() { |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2249 | bool work_to_do = true; |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2250 | ASSERT(marking_deque_.IsEmpty()); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2251 | while (work_to_do) { |
| 2252 | MarkObjectGroups(); |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 2253 | MarkImplicitRefGroups(); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2254 | work_to_do = !marking_deque_.IsEmpty(); |
| 2255 | ProcessMarkingDeque(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2256 | } |
| 2257 | } |
| 2258 | |
| 2259 | |
| 2260 | void MarkCompactCollector::MarkLiveObjects() { |
Leon Clarke | f7060e2 | 2010-06-03 12:02:55 +0100 | [diff] [blame] | 2261 | GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 2262 | // The recursive GC marker detects when it is nearing stack overflow, |
| 2263 | // and switches to a different marking system. JS interrupts interfere |
| 2264 | // with the C stack limit check. |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 2265 | PostponeInterruptsScope postpone(heap()->isolate()); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 2266 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2267 | bool incremental_marking_overflowed = false; |
| 2268 | IncrementalMarking* incremental_marking = heap_->incremental_marking(); |
| 2269 | if (was_marked_incrementally_) { |
| 2270 | // Finalize the incremental marking and check whether we had an overflow. |
| 2271 | // Both markers use grey color to mark overflowed objects so |
| 2272 | // non-incremental marker can deal with them as if overflow |
| 2273 | // occured during normal marking. |
| 2274 | // But incremental marker uses a separate marking deque |
| 2275 | // so we have to explicitly copy it's overflow state. |
| 2276 | incremental_marking->Finalize(); |
| 2277 | incremental_marking_overflowed = |
| 2278 | incremental_marking->marking_deque()->overflowed(); |
| 2279 | incremental_marking->marking_deque()->ClearOverflowed(); |
| 2280 | } else { |
| 2281 | // Abort any pending incremental activities e.g. incremental sweeping. |
| 2282 | incremental_marking->Abort(); |
| 2283 | } |
| 2284 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2285 | #ifdef DEBUG |
| 2286 | ASSERT(state_ == PREPARE_GC); |
| 2287 | state_ = MARK_LIVE_OBJECTS; |
| 2288 | #endif |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2289 | // The to space contains live objects, a page in from space is used as a |
| 2290 | // marking stack. |
| 2291 | Address marking_deque_start = heap()->new_space()->FromSpacePageLow(); |
| 2292 | Address marking_deque_end = heap()->new_space()->FromSpacePageHigh(); |
| 2293 | if (FLAG_force_marking_deque_overflows) { |
| 2294 | marking_deque_end = marking_deque_start + 64 * kPointerSize; |
| 2295 | } |
| 2296 | marking_deque_.Initialize(marking_deque_start, |
| 2297 | marking_deque_end); |
| 2298 | ASSERT(!marking_deque_.overflowed()); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2299 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2300 | if (incremental_marking_overflowed) { |
| 2301 | // There are overflowed objects left in the heap after incremental marking. |
| 2302 | marking_deque_.SetOverflowed(); |
| 2303 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2304 | |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 2305 | PrepareForCodeFlushing(); |
| 2306 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2307 | if (was_marked_incrementally_) { |
| 2308 | // There is no write barrier on cells so we have to scan them now at the end |
| 2309 | // of the incremental marking. |
| 2310 | { |
| 2311 | HeapObjectIterator cell_iterator(heap()->cell_space()); |
| 2312 | HeapObject* cell; |
| 2313 | while ((cell = cell_iterator.Next()) != NULL) { |
| 2314 | ASSERT(cell->IsJSGlobalPropertyCell()); |
| 2315 | if (IsMarked(cell)) { |
| 2316 | int offset = JSGlobalPropertyCell::kValueOffset; |
| 2317 | StaticMarkingVisitor::VisitPointer( |
| 2318 | heap(), |
| 2319 | reinterpret_cast<Object**>(cell->address() + offset)); |
| 2320 | } |
| 2321 | } |
| 2322 | } |
| 2323 | } |
| 2324 | |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 2325 | RootMarkingVisitor root_visitor(heap()); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2326 | MarkRoots(&root_visitor); |
| 2327 | |
| 2328 | // The objects reachable from the roots are marked, yet unreachable |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 2329 | // objects are unmarked. Mark objects reachable due to host |
| 2330 | // application specific logic. |
| 2331 | ProcessExternalMarking(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2332 | |
| 2333 | // The objects reachable from the roots or object groups are marked, |
| 2334 | // yet unreachable objects are unmarked. Mark objects reachable |
| 2335 | // only from weak global handles. |
| 2336 | // |
| 2337 | // First we identify nonlive weak handles and mark them as pending |
| 2338 | // destruction. |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 2339 | heap()->isolate()->global_handles()->IdentifyWeakHandles( |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 2340 | &IsUnmarkedHeapObject); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2341 | // Then we mark the objects and process the transitive closure. |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 2342 | heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2343 | while (marking_deque_.overflowed()) { |
| 2344 | RefillMarkingDeque(); |
| 2345 | EmptyMarkingDeque(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2346 | } |
| 2347 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 2348 | // Repeat host application specific marking to mark unmarked objects |
| 2349 | // reachable from the weak roots. |
| 2350 | ProcessExternalMarking(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2351 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2352 | AfterMarking(); |
| 2353 | } |
| 2354 | |
| 2355 | |
| 2356 | void MarkCompactCollector::AfterMarking() { |
Ben Murdoch | 3fb3ca8 | 2011-12-02 17:19:32 +0000 | [diff] [blame] | 2357 | // Object literal map caches reference symbols (cache keys) and maps |
| 2358 | // (cache values). At this point still useful maps have already been |
| 2359 | // marked. Mark the keys for the alive values before we process the |
| 2360 | // symbol table. |
| 2361 | ProcessMapCaches(); |
| 2362 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2363 | // Prune the symbol table removing all symbols only pointed to by the |
| 2364 | // symbol table. Cannot use symbol_table() here because the symbol |
| 2365 | // table is marked. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2366 | SymbolTable* symbol_table = heap()->symbol_table(); |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 2367 | SymbolTableCleaner v(heap()); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2368 | symbol_table->IterateElements(&v); |
| 2369 | symbol_table->ElementsRemoved(v.PointersRemoved()); |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 2370 | heap()->external_string_table_.Iterate(&v); |
| 2371 | heap()->external_string_table_.CleanUp(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2372 | |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 2373 | // Process the weak references. |
| 2374 | MarkCompactWeakObjectRetainer mark_compact_object_retainer; |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 2375 | heap()->ProcessWeakReferences(&mark_compact_object_retainer); |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 2376 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2377 | // Remove object groups after marking phase. |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 2378 | heap()->isolate()->global_handles()->RemoveObjectGroups(); |
| 2379 | heap()->isolate()->global_handles()->RemoveImplicitRefGroups(); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 2380 | |
| 2381 | // Flush code from collected candidates. |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 2382 | if (is_code_flushing_enabled()) { |
| 2383 | code_flusher_->ProcessCandidates(); |
| 2384 | } |
Ben Murdoch | e0cee9b | 2011-05-25 10:26:03 +0100 | [diff] [blame] | 2385 | |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 2386 | if (!FLAG_watch_ic_patching) { |
| 2387 | // Clean up dead objects from the runtime profiler. |
| 2388 | heap()->isolate()->runtime_profiler()->RemoveDeadSamples(); |
| 2389 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2390 | } |
| 2391 | |
| 2392 | |
Ben Murdoch | 3fb3ca8 | 2011-12-02 17:19:32 +0000 | [diff] [blame] | 2393 | void MarkCompactCollector::ProcessMapCaches() { |
| 2394 | Object* raw_context = heap()->global_contexts_list_; |
| 2395 | while (raw_context != heap()->undefined_value()) { |
| 2396 | Context* context = reinterpret_cast<Context*>(raw_context); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2397 | if (IsMarked(context)) { |
Ben Murdoch | 3fb3ca8 | 2011-12-02 17:19:32 +0000 | [diff] [blame] | 2398 | HeapObject* raw_map_cache = |
| 2399 | HeapObject::cast(context->get(Context::MAP_CACHE_INDEX)); |
| 2400 | // A map cache may be reachable from the stack. In this case |
| 2401 | // it's already transitively marked and it's too late to clean |
| 2402 | // up its parts. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2403 | if (!IsMarked(raw_map_cache) && |
Ben Murdoch | 3fb3ca8 | 2011-12-02 17:19:32 +0000 | [diff] [blame] | 2404 | raw_map_cache != heap()->undefined_value()) { |
| 2405 | MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache); |
| 2406 | int existing_elements = map_cache->NumberOfElements(); |
| 2407 | int used_elements = 0; |
| 2408 | for (int i = MapCache::kElementsStartIndex; |
| 2409 | i < map_cache->length(); |
| 2410 | i += MapCache::kEntrySize) { |
| 2411 | Object* raw_key = map_cache->get(i); |
| 2412 | if (raw_key == heap()->undefined_value() || |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2413 | raw_key == heap()->the_hole_value()) continue; |
Ben Murdoch | 3fb3ca8 | 2011-12-02 17:19:32 +0000 | [diff] [blame] | 2414 | STATIC_ASSERT(MapCache::kEntrySize == 2); |
| 2415 | Object* raw_map = map_cache->get(i + 1); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2416 | if (raw_map->IsHeapObject() && IsMarked(raw_map)) { |
Ben Murdoch | 3fb3ca8 | 2011-12-02 17:19:32 +0000 | [diff] [blame] | 2417 | ++used_elements; |
| 2418 | } else { |
| 2419 | // Delete useless entries with unmarked maps. |
| 2420 | ASSERT(raw_map->IsMap()); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2421 | map_cache->set_the_hole(i); |
| 2422 | map_cache->set_the_hole(i + 1); |
Ben Murdoch | 3fb3ca8 | 2011-12-02 17:19:32 +0000 | [diff] [blame] | 2423 | } |
| 2424 | } |
| 2425 | if (used_elements == 0) { |
| 2426 | context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value()); |
| 2427 | } else { |
| 2428 | // Note: we don't actually shrink the cache here to avoid |
| 2429 | // extra complexity during GC. We rely on subsequent cache |
| 2430 | // usages (EnsureCapacity) to do this. |
| 2431 | map_cache->ElementsRemoved(existing_elements - used_elements); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2432 | MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache); |
| 2433 | MarkObject(map_cache, map_cache_markbit); |
Ben Murdoch | 3fb3ca8 | 2011-12-02 17:19:32 +0000 | [diff] [blame] | 2434 | } |
| 2435 | } |
| 2436 | } |
| 2437 | // Move to next element in the list. |
| 2438 | raw_context = context->get(Context::NEXT_CONTEXT_LINK); |
| 2439 | } |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2440 | ProcessMarkingDeque(); |
Ben Murdoch | 3fb3ca8 | 2011-12-02 17:19:32 +0000 | [diff] [blame] | 2441 | } |
| 2442 | |
| 2443 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2444 | void MarkCompactCollector::ReattachInitialMaps() { |
| 2445 | HeapObjectIterator map_iterator(heap()->map_space()); |
| 2446 | for (HeapObject* obj = map_iterator.Next(); |
| 2447 | obj != NULL; |
| 2448 | obj = map_iterator.Next()) { |
| 2449 | if (obj->IsFreeSpace()) continue; |
| 2450 | Map* map = Map::cast(obj); |
| 2451 | |
| 2452 | STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); |
| 2453 | if (map->instance_type() < FIRST_JS_RECEIVER_TYPE) continue; |
| 2454 | |
| 2455 | if (map->attached_to_shared_function_info()) { |
| 2456 | JSFunction::cast(map->constructor())->shared()->AttachInitialMap(map); |
| 2457 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2458 | } |
| 2459 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2460 | |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 2461 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2462 | void MarkCompactCollector::ClearNonLiveTransitions() { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2463 | HeapObjectIterator map_iterator(heap()->map_space()); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2464 | // Iterate over the map space, setting map transitions that go from |
| 2465 | // a marked map to an unmarked map to null transitions. At the same time, |
| 2466 | // set all the prototype fields of maps back to their original value, |
| 2467 | // dropping the back pointers temporarily stored in the prototype field. |
| 2468 | // Setting the prototype field requires following the linked list of |
| 2469 | // back pointers, reversing them all at once. This allows us to find |
| 2470 | // those maps with map transitions that need to be nulled, and only |
| 2471 | // scan the descriptor arrays of those maps, not all maps. |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 2472 | // All of these actions are carried out only on maps of JSObjects |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2473 | // and related subtypes. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2474 | for (HeapObject* obj = map_iterator.Next(); |
| 2475 | obj != NULL; obj = map_iterator.Next()) { |
Leon Clarke | d91b9f7 | 2010-01-27 17:25:45 +0000 | [diff] [blame] | 2476 | Map* map = reinterpret_cast<Map*>(obj); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2477 | MarkBit map_mark = Marking::MarkBitFrom(map); |
| 2478 | if (map->IsFreeSpace()) continue; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2479 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2480 | ASSERT(map->IsMap()); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2481 | // Only JSObject and subtypes have map transitions and back pointers. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2482 | STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE); |
| 2483 | if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue; |
Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 2484 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2485 | if (map_mark.Get() && |
| 2486 | map->attached_to_shared_function_info()) { |
Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 2487 | // This map is used for inobject slack tracking and has been detached |
| 2488 | // from SharedFunctionInfo during the mark phase. |
| 2489 | // Since it survived the GC, reattach it now. |
| 2490 | map->unchecked_constructor()->unchecked_shared()->AttachInitialMap(map); |
| 2491 | } |
| 2492 | |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 2493 | ClearNonLivePrototypeTransitions(map); |
| 2494 | ClearNonLiveMapTransitions(map, map_mark); |
| 2495 | } |
| 2496 | } |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2497 | |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 2498 | |
| 2499 | void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) { |
| 2500 | int number_of_transitions = map->NumberOfProtoTransitions(); |
| 2501 | FixedArray* prototype_transitions = map->prototype_transitions(); |
| 2502 | |
| 2503 | int new_number_of_transitions = 0; |
| 2504 | const int header = Map::kProtoTransitionHeaderSize; |
| 2505 | const int proto_offset = header + Map::kProtoTransitionPrototypeOffset; |
| 2506 | const int map_offset = header + Map::kProtoTransitionMapOffset; |
| 2507 | const int step = Map::kProtoTransitionElementsPerEntry; |
| 2508 | for (int i = 0; i < number_of_transitions; i++) { |
| 2509 | Object* prototype = prototype_transitions->get(proto_offset + i * step); |
| 2510 | Object* cached_map = prototype_transitions->get(map_offset + i * step); |
| 2511 | if (IsMarked(prototype) && IsMarked(cached_map)) { |
| 2512 | int proto_index = proto_offset + new_number_of_transitions * step; |
| 2513 | int map_index = map_offset + new_number_of_transitions * step; |
| 2514 | if (new_number_of_transitions != i) { |
| 2515 | prototype_transitions->set_unchecked( |
| 2516 | heap_, |
| 2517 | proto_index, |
| 2518 | prototype, |
| 2519 | UPDATE_WRITE_BARRIER); |
| 2520 | prototype_transitions->set_unchecked( |
| 2521 | heap_, |
| 2522 | map_index, |
| 2523 | cached_map, |
| 2524 | SKIP_WRITE_BARRIER); |
Steve Block | 053d10c | 2011-06-13 19:13:29 +0100 | [diff] [blame] | 2525 | } |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 2526 | Object** slot = |
| 2527 | HeapObject::RawField(prototype_transitions, |
| 2528 | FixedArray::OffsetOfElementAt(proto_index)); |
| 2529 | RecordSlot(slot, slot, prototype); |
| 2530 | new_number_of_transitions++; |
| 2531 | } |
| 2532 | } |
Steve Block | 053d10c | 2011-06-13 19:13:29 +0100 | [diff] [blame] | 2533 | |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 2534 | if (new_number_of_transitions != number_of_transitions) { |
| 2535 | map->SetNumberOfProtoTransitions(new_number_of_transitions); |
| 2536 | } |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2537 | |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 2538 | // Fill slots that became free with undefined value. |
| 2539 | for (int i = new_number_of_transitions * step; |
| 2540 | i < number_of_transitions * step; |
| 2541 | i++) { |
| 2542 | prototype_transitions->set_undefined(heap_, header + i); |
| 2543 | } |
| 2544 | } |
| 2545 | |
| 2546 | |
| 2547 | void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map, |
| 2548 | MarkBit map_mark) { |
| 2549 | // Follow the chain of back pointers to find the prototype. |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 2550 | Object* real_prototype = map; |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 2551 | while (real_prototype->IsMap()) { |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 2552 | real_prototype = Map::cast(real_prototype)->prototype(); |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 2553 | ASSERT(real_prototype->IsHeapObject()); |
| 2554 | } |
| 2555 | |
| 2556 | // Follow back pointers, setting them to prototype, clearing map transitions |
| 2557 | // when necessary. |
| 2558 | Map* current = map; |
| 2559 | bool current_is_alive = map_mark.Get(); |
| 2560 | bool on_dead_path = !current_is_alive; |
| 2561 | while (current->IsMap()) { |
| 2562 | Object* next = current->prototype(); |
| 2563 | // There should never be a dead map above a live map. |
| 2564 | ASSERT(on_dead_path || current_is_alive); |
| 2565 | |
| 2566 | // A live map above a dead map indicates a dead transition. This test will |
| 2567 | // always be false on the first iteration. |
| 2568 | if (on_dead_path && current_is_alive) { |
| 2569 | on_dead_path = false; |
| 2570 | current->ClearNonLiveTransitions(heap(), real_prototype); |
Steve Block | 053d10c | 2011-06-13 19:13:29 +0100 | [diff] [blame] | 2571 | } |
| 2572 | |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 2573 | Object** slot = HeapObject::RawField(current, Map::kPrototypeOffset); |
| 2574 | *slot = real_prototype; |
| 2575 | if (current_is_alive) RecordSlot(slot, slot, real_prototype); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2576 | |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 2577 | current = reinterpret_cast<Map*>(next); |
| 2578 | current_is_alive = Marking::MarkBitFrom(current).Get(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2579 | } |
| 2580 | } |
| 2581 | |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 2582 | |
| 2583 | void MarkCompactCollector::ProcessWeakMaps() { |
| 2584 | Object* weak_map_obj = encountered_weak_maps(); |
| 2585 | while (weak_map_obj != Smi::FromInt(0)) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2586 | ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj))); |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 2587 | JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2588 | ObjectHashTable* table = ObjectHashTable::cast(weak_map->table()); |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 2589 | for (int i = 0; i < table->Capacity(); i++) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2590 | if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) { |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 2591 | Object* value = table->get(table->EntryToValueIndex(i)); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2592 | StaticMarkingVisitor::VisitPointer(heap(), &value); |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 2593 | table->set_unchecked(heap(), |
| 2594 | table->EntryToValueIndex(i), |
| 2595 | value, |
| 2596 | UPDATE_WRITE_BARRIER); |
| 2597 | } |
| 2598 | } |
| 2599 | weak_map_obj = weak_map->next(); |
| 2600 | } |
| 2601 | } |
| 2602 | |
| 2603 | |
| 2604 | void MarkCompactCollector::ClearWeakMaps() { |
| 2605 | Object* weak_map_obj = encountered_weak_maps(); |
| 2606 | while (weak_map_obj != Smi::FromInt(0)) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2607 | ASSERT(MarkCompactCollector::IsMarked(HeapObject::cast(weak_map_obj))); |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 2608 | JSWeakMap* weak_map = reinterpret_cast<JSWeakMap*>(weak_map_obj); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2609 | ObjectHashTable* table = ObjectHashTable::cast(weak_map->table()); |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 2610 | for (int i = 0; i < table->Capacity(); i++) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2611 | if (!MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) { |
| 2612 | table->RemoveEntry(i); |
Ben Murdoch | 69a99ed | 2011-11-30 16:03:39 +0000 | [diff] [blame] | 2613 | } |
| 2614 | } |
| 2615 | weak_map_obj = weak_map->next(); |
| 2616 | weak_map->set_next(Smi::FromInt(0)); |
| 2617 | } |
| 2618 | set_encountered_weak_maps(Smi::FromInt(0)); |
| 2619 | } |
| 2620 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2621 | |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2622 | // We scavange new space simultaneously with sweeping. This is done in two |
| 2623 | // passes. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2624 | // |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2625 | // The first pass migrates all alive objects from one semispace to another or |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2626 | // promotes them to old space. Forwarding address is written directly into |
| 2627 | // first word of object without any encoding. If object is dead we write |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2628 | // NULL as a forwarding address. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2629 | // |
| 2630 | // The second pass updates pointers to new space in all spaces. It is possible |
| 2631 | // to encounter pointers to dead new space objects during traversal of pointers |
| 2632 | // to new space. We should clear them to avoid encountering them during next |
| 2633 | // pointer iteration. This is an issue if the store buffer overflows and we |
| 2634 | // have to scan the entire old space, including dead objects, looking for |
| 2635 | // pointers to new space. |
| 2636 | void MarkCompactCollector::MigrateObject(Address dst, |
| 2637 | Address src, |
| 2638 | int size, |
| 2639 | AllocationSpace dest) { |
| 2640 | HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst)); |
| 2641 | if (dest == OLD_POINTER_SPACE || dest == LO_SPACE) { |
| 2642 | Address src_slot = src; |
| 2643 | Address dst_slot = dst; |
| 2644 | ASSERT(IsAligned(size, kPointerSize)); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2645 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2646 | for (int remaining = size / kPointerSize; remaining > 0; remaining--) { |
| 2647 | Object* value = Memory::Object_at(src_slot); |
| 2648 | |
| 2649 | Memory::Object_at(dst_slot) = value; |
| 2650 | |
| 2651 | if (heap_->InNewSpace(value)) { |
| 2652 | heap_->store_buffer()->Mark(dst_slot); |
| 2653 | } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) { |
| 2654 | SlotsBuffer::AddTo(&slots_buffer_allocator_, |
| 2655 | &migration_slots_buffer_, |
| 2656 | reinterpret_cast<Object**>(dst_slot), |
| 2657 | SlotsBuffer::IGNORE_OVERFLOW); |
| 2658 | } |
| 2659 | |
| 2660 | src_slot += kPointerSize; |
| 2661 | dst_slot += kPointerSize; |
| 2662 | } |
| 2663 | |
| 2664 | if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) { |
| 2665 | Address code_entry_slot = dst + JSFunction::kCodeEntryOffset; |
| 2666 | Address code_entry = Memory::Address_at(code_entry_slot); |
| 2667 | |
| 2668 | if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) { |
| 2669 | SlotsBuffer::AddTo(&slots_buffer_allocator_, |
| 2670 | &migration_slots_buffer_, |
| 2671 | SlotsBuffer::CODE_ENTRY_SLOT, |
| 2672 | code_entry_slot, |
| 2673 | SlotsBuffer::IGNORE_OVERFLOW); |
| 2674 | } |
| 2675 | } |
| 2676 | } else if (dest == CODE_SPACE) { |
| 2677 | PROFILE(heap()->isolate(), CodeMoveEvent(src, dst)); |
| 2678 | heap()->MoveBlock(dst, src, size); |
| 2679 | SlotsBuffer::AddTo(&slots_buffer_allocator_, |
| 2680 | &migration_slots_buffer_, |
| 2681 | SlotsBuffer::RELOCATED_CODE_OBJECT, |
| 2682 | dst, |
| 2683 | SlotsBuffer::IGNORE_OVERFLOW); |
| 2684 | Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src); |
| 2685 | } else { |
| 2686 | ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE); |
| 2687 | heap()->MoveBlock(dst, src, size); |
| 2688 | } |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2689 | Memory::Address_at(src) = dst; |
| 2690 | } |
| 2691 | |
| 2692 | |
| 2693 | // Visitor for updating pointers from live objects in old spaces to new space. |
| 2694 | // It does not expect to encounter pointers to dead objects. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2695 | class PointersUpdatingVisitor: public ObjectVisitor { |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2696 | public: |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2697 | explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { } |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 2698 | |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2699 | void VisitPointer(Object** p) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2700 | UpdatePointer(p); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2701 | } |
| 2702 | |
| 2703 | void VisitPointers(Object** start, Object** end) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2704 | for (Object** p = start; p < end; p++) UpdatePointer(p); |
| 2705 | } |
| 2706 | |
| 2707 | void VisitEmbeddedPointer(RelocInfo* rinfo) { |
| 2708 | ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); |
| 2709 | Object* target = rinfo->target_object(); |
| 2710 | VisitPointer(&target); |
| 2711 | rinfo->set_target_object(target); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2712 | } |
| 2713 | |
| 2714 | void VisitCodeTarget(RelocInfo* rinfo) { |
| 2715 | ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode())); |
| 2716 | Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address()); |
| 2717 | VisitPointer(&target); |
| 2718 | rinfo->set_target_address(Code::cast(target)->instruction_start()); |
| 2719 | } |
| 2720 | |
| 2721 | void VisitDebugTarget(RelocInfo* rinfo) { |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 2722 | ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && |
| 2723 | rinfo->IsPatchedReturnSequence()) || |
| 2724 | (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && |
| 2725 | rinfo->IsPatchedDebugBreakSlotSequence())); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2726 | Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address()); |
| 2727 | VisitPointer(&target); |
| 2728 | rinfo->set_call_address(Code::cast(target)->instruction_start()); |
| 2729 | } |
Ben Murdoch | 3fb3ca8 | 2011-12-02 17:19:32 +0000 | [diff] [blame] | 2730 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2731 | static inline void UpdateSlot(Heap* heap, Object** slot) { |
| 2732 | Object* obj = *slot; |
| 2733 | |
| 2734 | if (!obj->IsHeapObject()) return; |
| 2735 | |
| 2736 | HeapObject* heap_obj = HeapObject::cast(obj); |
| 2737 | |
| 2738 | MapWord map_word = heap_obj->map_word(); |
| 2739 | if (map_word.IsForwardingAddress()) { |
| 2740 | ASSERT(heap->InFromSpace(heap_obj) || |
| 2741 | MarkCompactCollector::IsOnEvacuationCandidate(heap_obj)); |
| 2742 | HeapObject* target = map_word.ToForwardingAddress(); |
| 2743 | *slot = target; |
| 2744 | ASSERT(!heap->InFromSpace(target) && |
| 2745 | !MarkCompactCollector::IsOnEvacuationCandidate(target)); |
| 2746 | } |
| 2747 | } |
| 2748 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 2749 | private: |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2750 | inline void UpdatePointer(Object** p) { |
| 2751 | UpdateSlot(heap_, p); |
| 2752 | } |
| 2753 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 2754 | Heap* heap_; |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2755 | }; |
| 2756 | |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 2757 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2758 | static void UpdatePointer(HeapObject** p, HeapObject* object) { |
| 2759 | ASSERT(*p == object); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2760 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2761 | Address old_addr = object->address(); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2762 | |
| 2763 | Address new_addr = Memory::Address_at(old_addr); |
| 2764 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2765 | // The new space sweep will overwrite the map word of dead objects |
| 2766 | // with NULL. In this case we do not need to transfer this entry to |
| 2767 | // the store buffer which we are rebuilding. |
| 2768 | if (new_addr != NULL) { |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 2769 | *p = HeapObject::FromAddress(new_addr); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2770 | } else { |
| 2771 | // We have to zap this pointer, because the store buffer may overflow later, |
| 2772 | // and then we have to scan the entire heap and we don't want to find |
| 2773 | // spurious newspace pointers in the old space. |
| 2774 | *p = reinterpret_cast<HeapObject*>(Smi::FromInt(0)); |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 2775 | } |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2776 | } |
| 2777 | |
| 2778 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2779 | static String* UpdateReferenceInExternalStringTableEntry(Heap* heap, |
| 2780 | Object** p) { |
| 2781 | MapWord map_word = HeapObject::cast(*p)->map_word(); |
| 2782 | |
| 2783 | if (map_word.IsForwardingAddress()) { |
| 2784 | return String::cast(map_word.ToForwardingAddress()); |
| 2785 | } |
| 2786 | |
| 2787 | return String::cast(*p); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2788 | } |
| 2789 | |
| 2790 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2791 | bool MarkCompactCollector::TryPromoteObject(HeapObject* object, |
| 2792 | int object_size) { |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2793 | Object* result; |
| 2794 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2795 | if (object_size > Page::kMaxNonCodeHeapObjectSize) { |
John Reck | 5913587 | 2010-11-02 12:39:01 -0700 | [diff] [blame] | 2796 | MaybeObject* maybe_result = |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2797 | heap()->lo_space()->AllocateRaw(object_size, NOT_EXECUTABLE); |
John Reck | 5913587 | 2010-11-02 12:39:01 -0700 | [diff] [blame] | 2798 | if (maybe_result->ToObject(&result)) { |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2799 | HeapObject* target = HeapObject::cast(result); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2800 | MigrateObject(target->address(), |
| 2801 | object->address(), |
| 2802 | object_size, |
| 2803 | LO_SPACE); |
| 2804 | heap()->mark_compact_collector()->tracer()-> |
Leon Clarke | f7060e2 | 2010-06-03 12:02:55 +0100 | [diff] [blame] | 2805 | increment_promoted_objects_size(object_size); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2806 | return true; |
| 2807 | } |
| 2808 | } else { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2809 | OldSpace* target_space = heap()->TargetSpace(object); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2810 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2811 | ASSERT(target_space == heap()->old_pointer_space() || |
| 2812 | target_space == heap()->old_data_space()); |
John Reck | 5913587 | 2010-11-02 12:39:01 -0700 | [diff] [blame] | 2813 | MaybeObject* maybe_result = target_space->AllocateRaw(object_size); |
| 2814 | if (maybe_result->ToObject(&result)) { |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2815 | HeapObject* target = HeapObject::cast(result); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2816 | MigrateObject(target->address(), |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 2817 | object->address(), |
| 2818 | object_size, |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2819 | target_space->identity()); |
| 2820 | heap()->mark_compact_collector()->tracer()-> |
Leon Clarke | f7060e2 | 2010-06-03 12:02:55 +0100 | [diff] [blame] | 2821 | increment_promoted_objects_size(object_size); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2822 | return true; |
| 2823 | } |
| 2824 | } |
| 2825 | |
| 2826 | return false; |
| 2827 | } |
| 2828 | |
| 2829 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2830 | void MarkCompactCollector::EvacuateNewSpace() { |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 2831 | // There are soft limits in the allocation code, designed trigger a mark |
| 2832 | // sweep collection by failing allocations. But since we are already in |
| 2833 | // a mark-sweep allocation, there is no sense in trying to trigger one. |
| 2834 | AlwaysAllocateScope scope; |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2835 | heap()->CheckNewSpaceExpansionCriteria(); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2836 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2837 | NewSpace* new_space = heap()->new_space(); |
| 2838 | |
| 2839 | // Store allocation range before flipping semispaces. |
| 2840 | Address from_bottom = new_space->bottom(); |
| 2841 | Address from_top = new_space->top(); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2842 | |
| 2843 | // Flip the semispaces. After flipping, to space is empty, from space has |
| 2844 | // live objects. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2845 | new_space->Flip(); |
| 2846 | new_space->ResetAllocationInfo(); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2847 | |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2848 | int survivors_size = 0; |
| 2849 | |
| 2850 | // First pass: traverse all objects in inactive semispace, remove marks, |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2851 | // migrate live objects and write forwarding addresses. This stage puts |
| 2852 | // new entries in the store buffer and may cause some pages to be marked |
| 2853 | // scan-on-scavenge. |
| 2854 | SemiSpaceIterator from_it(from_bottom, from_top); |
| 2855 | for (HeapObject* object = from_it.Next(); |
| 2856 | object != NULL; |
| 2857 | object = from_it.Next()) { |
| 2858 | MarkBit mark_bit = Marking::MarkBitFrom(object); |
| 2859 | if (mark_bit.Get()) { |
| 2860 | mark_bit.Clear(); |
| 2861 | // Don't bother decrementing live bytes count. We'll discard the |
| 2862 | // entire page at the end. |
| 2863 | int size = object->Size(); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2864 | survivors_size += size; |
| 2865 | |
| 2866 | // Aggressively promote young survivors to the old space. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2867 | if (TryPromoteObject(object, size)) { |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2868 | continue; |
| 2869 | } |
| 2870 | |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 2871 | // Promotion failed. Just migrate object to another semispace. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2872 | MaybeObject* allocation = new_space->AllocateRaw(size); |
| 2873 | if (allocation->IsFailure()) { |
| 2874 | if (!new_space->AddFreshPage()) { |
| 2875 | // Shouldn't happen. We are sweeping linearly, and to-space |
| 2876 | // has the same number of pages as from-space, so there is |
| 2877 | // always room. |
| 2878 | UNREACHABLE(); |
| 2879 | } |
| 2880 | allocation = new_space->AllocateRaw(size); |
| 2881 | ASSERT(!allocation->IsFailure()); |
| 2882 | } |
| 2883 | Object* target = allocation->ToObjectUnchecked(); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2884 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2885 | MigrateObject(HeapObject::cast(target)->address(), |
| 2886 | object->address(), |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 2887 | size, |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2888 | NEW_SPACE); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2889 | } else { |
Steve Block | 1e0659c | 2011-05-24 12:43:12 +0100 | [diff] [blame] | 2890 | // Process the dead object before we write a NULL into its header. |
| 2891 | LiveObjectList::ProcessNonLive(object); |
| 2892 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2893 | // Mark dead objects in the new space with null in their map field. |
| 2894 | Memory::Address_at(object->address()) = NULL; |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2895 | } |
| 2896 | } |
| 2897 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2898 | heap_->IncrementYoungSurvivorsCounter(survivors_size); |
| 2899 | new_space->set_age_mark(new_space->top()); |
| 2900 | } |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 2901 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 2902 | |
| 2903 | void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) { |
| 2904 | AlwaysAllocateScope always_allocate; |
| 2905 | PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| 2906 | ASSERT(p->IsEvacuationCandidate() && !p->WasSwept()); |
| 2907 | MarkBit::CellType* cells = p->markbits()->cells(); |
| 2908 | p->MarkSweptPrecisely(); |
| 2909 | |
| 2910 | int last_cell_index = |
| 2911 | Bitmap::IndexToCell( |
| 2912 | Bitmap::CellAlignIndex( |
| 2913 | p->AddressToMarkbitIndex(p->area_end()))); |
| 2914 | |
| 2915 | Address cell_base = p->area_start(); |
| 2916 | int cell_index = Bitmap::IndexToCell( |
| 2917 | Bitmap::CellAlignIndex( |
| 2918 | p->AddressToMarkbitIndex(cell_base))); |
| 2919 | |
| 2920 | int offsets[16]; |
| 2921 | |
| 2922 | for (; |
| 2923 | cell_index < last_cell_index; |
| 2924 | cell_index++, cell_base += 32 * kPointerSize) { |
| 2925 | ASSERT((unsigned)cell_index == |
| 2926 | Bitmap::IndexToCell( |
| 2927 | Bitmap::CellAlignIndex( |
| 2928 | p->AddressToMarkbitIndex(cell_base)))); |
| 2929 | if (cells[cell_index] == 0) continue; |
| 2930 | |
| 2931 | int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets); |
| 2932 | for (int i = 0; i < live_objects; i++) { |
| 2933 | Address object_addr = cell_base + offsets[i] * kPointerSize; |
| 2934 | HeapObject* object = HeapObject::FromAddress(object_addr); |
| 2935 | ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object))); |
| 2936 | |
| 2937 | int size = object->Size(); |
| 2938 | |
| 2939 | MaybeObject* target = space->AllocateRaw(size); |
| 2940 | if (target->IsFailure()) { |
| 2941 | // OS refused to give us memory. |
| 2942 | V8::FatalProcessOutOfMemory("Evacuation"); |
| 2943 | return; |
| 2944 | } |
| 2945 | |
| 2946 | Object* target_object = target->ToObjectUnchecked(); |
| 2947 | |
| 2948 | MigrateObject(HeapObject::cast(target_object)->address(), |
| 2949 | object_addr, |
| 2950 | size, |
| 2951 | space->identity()); |
| 2952 | ASSERT(object->map_word().IsForwardingAddress()); |
| 2953 | } |
| 2954 | |
| 2955 | // Clear marking bits for current cell. |
| 2956 | cells[cell_index] = 0; |
| 2957 | } |
| 2958 | p->ResetLiveBytes(); |
| 2959 | } |
| 2960 | |
| 2961 | |
| 2962 | void MarkCompactCollector::EvacuatePages() { |
| 2963 | int npages = evacuation_candidates_.length(); |
| 2964 | for (int i = 0; i < npages; i++) { |
| 2965 | Page* p = evacuation_candidates_[i]; |
| 2966 | ASSERT(p->IsEvacuationCandidate() || |
| 2967 | p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
| 2968 | if (p->IsEvacuationCandidate()) { |
| 2969 | // During compaction we might have to request a new page. |
| 2970 | // Check that space still have room for that. |
| 2971 | if (static_cast<PagedSpace*>(p->owner())->CanExpand()) { |
| 2972 | EvacuateLiveObjectsFromPage(p); |
| 2973 | } else { |
| 2974 | // Without room for expansion evacuation is not guaranteed to succeed. |
| 2975 | // Pessimistically abandon unevacuated pages. |
| 2976 | for (int j = i; j < npages; j++) { |
| 2977 | Page* page = evacuation_candidates_[j]; |
| 2978 | slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address()); |
| 2979 | page->ClearEvacuationCandidate(); |
| 2980 | page->SetFlag(Page::RESCAN_ON_EVACUATION); |
| 2981 | } |
| 2982 | return; |
| 2983 | } |
| 2984 | } |
| 2985 | } |
| 2986 | } |
| 2987 | |
| 2988 | |
| 2989 | class EvacuationWeakObjectRetainer : public WeakObjectRetainer { |
| 2990 | public: |
| 2991 | virtual Object* RetainAs(Object* object) { |
| 2992 | if (object->IsHeapObject()) { |
| 2993 | HeapObject* heap_object = HeapObject::cast(object); |
| 2994 | MapWord map_word = heap_object->map_word(); |
| 2995 | if (map_word.IsForwardingAddress()) { |
| 2996 | return map_word.ToForwardingAddress(); |
| 2997 | } |
| 2998 | } |
| 2999 | return object; |
| 3000 | } |
| 3001 | }; |
| 3002 | |
| 3003 | |
| 3004 | static inline void UpdateSlot(ObjectVisitor* v, |
| 3005 | SlotsBuffer::SlotType slot_type, |
| 3006 | Address addr) { |
| 3007 | switch (slot_type) { |
| 3008 | case SlotsBuffer::CODE_TARGET_SLOT: { |
| 3009 | RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL); |
| 3010 | rinfo.Visit(v); |
| 3011 | break; |
| 3012 | } |
| 3013 | case SlotsBuffer::CODE_ENTRY_SLOT: { |
| 3014 | v->VisitCodeEntry(addr); |
| 3015 | break; |
| 3016 | } |
| 3017 | case SlotsBuffer::RELOCATED_CODE_OBJECT: { |
| 3018 | HeapObject* obj = HeapObject::FromAddress(addr); |
| 3019 | Code::cast(obj)->CodeIterateBody(v); |
| 3020 | break; |
| 3021 | } |
| 3022 | case SlotsBuffer::DEBUG_TARGET_SLOT: { |
| 3023 | RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL); |
| 3024 | if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(v); |
| 3025 | break; |
| 3026 | } |
| 3027 | case SlotsBuffer::JS_RETURN_SLOT: { |
| 3028 | RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL); |
| 3029 | if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(v); |
| 3030 | break; |
| 3031 | } |
| 3032 | case SlotsBuffer::EMBEDDED_OBJECT_SLOT: { |
| 3033 | RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL); |
| 3034 | rinfo.Visit(v); |
| 3035 | break; |
| 3036 | } |
| 3037 | default: |
| 3038 | UNREACHABLE(); |
| 3039 | break; |
| 3040 | } |
| 3041 | } |
| 3042 | |
| 3043 | |
| 3044 | enum SweepingMode { |
| 3045 | SWEEP_ONLY, |
| 3046 | SWEEP_AND_VISIT_LIVE_OBJECTS |
| 3047 | }; |
| 3048 | |
| 3049 | |
| 3050 | enum SkipListRebuildingMode { |
| 3051 | REBUILD_SKIP_LIST, |
| 3052 | IGNORE_SKIP_LIST |
| 3053 | }; |
| 3054 | |
| 3055 | |
| 3056 | // Sweep a space precisely. After this has been done the space can |
| 3057 | // be iterated precisely, hitting only the live objects. Code space |
| 3058 | // is always swept precisely because we want to be able to iterate |
| 3059 | // over it. Map space is swept precisely, because it is not compacted. |
| 3060 | // Slots in live objects pointing into evacuation candidates are updated |
| 3061 | // if requested. |
| 3062 | template<SweepingMode sweeping_mode, SkipListRebuildingMode skip_list_mode> |
| 3063 | static void SweepPrecisely(PagedSpace* space, |
| 3064 | Page* p, |
| 3065 | ObjectVisitor* v) { |
| 3066 | ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); |
| 3067 | ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST, |
| 3068 | space->identity() == CODE_SPACE); |
| 3069 | ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); |
| 3070 | |
| 3071 | MarkBit::CellType* cells = p->markbits()->cells(); |
| 3072 | p->MarkSweptPrecisely(); |
| 3073 | |
| 3074 | int last_cell_index = |
| 3075 | Bitmap::IndexToCell( |
| 3076 | Bitmap::CellAlignIndex( |
| 3077 | p->AddressToMarkbitIndex(p->area_end()))); |
| 3078 | |
| 3079 | Address free_start = p->area_start(); |
| 3080 | int cell_index = |
| 3081 | Bitmap::IndexToCell( |
| 3082 | Bitmap::CellAlignIndex( |
| 3083 | p->AddressToMarkbitIndex(free_start))); |
| 3084 | |
| 3085 | ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); |
| 3086 | Address object_address = free_start; |
| 3087 | int offsets[16]; |
| 3088 | |
| 3089 | SkipList* skip_list = p->skip_list(); |
| 3090 | int curr_region = -1; |
| 3091 | if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) { |
| 3092 | skip_list->Clear(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3093 | } |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 3094 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3095 | for (; |
| 3096 | cell_index < last_cell_index; |
| 3097 | cell_index++, object_address += 32 * kPointerSize) { |
| 3098 | ASSERT((unsigned)cell_index == |
| 3099 | Bitmap::IndexToCell( |
| 3100 | Bitmap::CellAlignIndex( |
| 3101 | p->AddressToMarkbitIndex(object_address)))); |
| 3102 | int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets); |
| 3103 | int live_index = 0; |
| 3104 | for ( ; live_objects != 0; live_objects--) { |
| 3105 | Address free_end = object_address + offsets[live_index++] * kPointerSize; |
| 3106 | if (free_end != free_start) { |
| 3107 | space->Free(free_start, static_cast<int>(free_end - free_start)); |
| 3108 | } |
| 3109 | HeapObject* live_object = HeapObject::FromAddress(free_end); |
| 3110 | ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object))); |
| 3111 | Map* map = live_object->map(); |
| 3112 | int size = live_object->SizeFromMap(map); |
| 3113 | if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) { |
| 3114 | live_object->IterateBody(map->instance_type(), size, v); |
| 3115 | } |
| 3116 | if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) { |
| 3117 | int new_region_start = |
| 3118 | SkipList::RegionNumber(free_end); |
| 3119 | int new_region_end = |
| 3120 | SkipList::RegionNumber(free_end + size - kPointerSize); |
| 3121 | if (new_region_start != curr_region || |
| 3122 | new_region_end != curr_region) { |
| 3123 | skip_list->AddObject(free_end, size); |
| 3124 | curr_region = new_region_end; |
| 3125 | } |
| 3126 | } |
| 3127 | free_start = free_end + size; |
| 3128 | } |
| 3129 | // Clear marking bits for current cell. |
| 3130 | cells[cell_index] = 0; |
| 3131 | } |
| 3132 | if (free_start != p->area_end()) { |
| 3133 | space->Free(free_start, static_cast<int>(p->area_end() - free_start)); |
| 3134 | } |
| 3135 | p->ResetLiveBytes(); |
| 3136 | } |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 3137 | |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 3138 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3139 | static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) { |
| 3140 | Page* p = Page::FromAddress(code->address()); |
| 3141 | |
| 3142 | if (p->IsEvacuationCandidate() || |
| 3143 | p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { |
| 3144 | return false; |
| 3145 | } |
| 3146 | |
| 3147 | Address code_start = code->address(); |
| 3148 | Address code_end = code_start + code->Size(); |
| 3149 | |
| 3150 | uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start); |
| 3151 | uint32_t end_index = |
| 3152 | MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize); |
| 3153 | |
| 3154 | Bitmap* b = p->markbits(); |
| 3155 | |
| 3156 | MarkBit start_mark_bit = b->MarkBitFromIndex(start_index); |
| 3157 | MarkBit end_mark_bit = b->MarkBitFromIndex(end_index); |
| 3158 | |
| 3159 | MarkBit::CellType* start_cell = start_mark_bit.cell(); |
| 3160 | MarkBit::CellType* end_cell = end_mark_bit.cell(); |
| 3161 | |
| 3162 | if (value) { |
| 3163 | MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1); |
| 3164 | MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1; |
| 3165 | |
| 3166 | if (start_cell == end_cell) { |
| 3167 | *start_cell |= start_mask & end_mask; |
| 3168 | } else { |
| 3169 | *start_cell |= start_mask; |
| 3170 | for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) { |
| 3171 | *cell = ~0; |
| 3172 | } |
| 3173 | *end_cell |= end_mask; |
| 3174 | } |
| 3175 | } else { |
| 3176 | for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) { |
| 3177 | *cell = 0; |
| 3178 | } |
| 3179 | } |
| 3180 | |
| 3181 | return true; |
| 3182 | } |
| 3183 | |
| 3184 | |
| 3185 | static bool IsOnInvalidatedCodeObject(Address addr) { |
| 3186 | // We did not record any slots in large objects thus |
| 3187 | // we can safely go to the page from the slot address. |
| 3188 | Page* p = Page::FromAddress(addr); |
| 3189 | |
| 3190 | // First check owner's identity because old pointer and old data spaces |
| 3191 | // are swept lazily and might still have non-zero mark-bits on some |
| 3192 | // pages. |
| 3193 | if (p->owner()->identity() != CODE_SPACE) return false; |
| 3194 | |
| 3195 | // In code space only bits on evacuation candidates (but we don't record |
| 3196 | // any slots on them) and under invalidated code objects are non-zero. |
| 3197 | MarkBit mark_bit = |
| 3198 | p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr)); |
| 3199 | |
| 3200 | return mark_bit.Get(); |
| 3201 | } |
| 3202 | |
| 3203 | |
| 3204 | void MarkCompactCollector::InvalidateCode(Code* code) { |
| 3205 | if (heap_->incremental_marking()->IsCompacting() && |
| 3206 | !ShouldSkipEvacuationSlotRecording(code)) { |
| 3207 | ASSERT(compacting_); |
| 3208 | |
| 3209 | // If the object is white than no slots were recorded on it yet. |
| 3210 | MarkBit mark_bit = Marking::MarkBitFrom(code); |
| 3211 | if (Marking::IsWhite(mark_bit)) return; |
| 3212 | |
| 3213 | invalidated_code_.Add(code); |
| 3214 | } |
| 3215 | } |
| 3216 | |
| 3217 | |
| 3218 | bool MarkCompactCollector::MarkInvalidatedCode() { |
| 3219 | bool code_marked = false; |
| 3220 | |
| 3221 | int length = invalidated_code_.length(); |
| 3222 | for (int i = 0; i < length; i++) { |
| 3223 | Code* code = invalidated_code_[i]; |
| 3224 | |
| 3225 | if (SetMarkBitsUnderInvalidatedCode(code, true)) { |
| 3226 | code_marked = true; |
| 3227 | } |
| 3228 | } |
| 3229 | |
| 3230 | return code_marked; |
| 3231 | } |
| 3232 | |
| 3233 | |
| 3234 | void MarkCompactCollector::RemoveDeadInvalidatedCode() { |
| 3235 | int length = invalidated_code_.length(); |
| 3236 | for (int i = 0; i < length; i++) { |
| 3237 | if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL; |
| 3238 | } |
| 3239 | } |
| 3240 | |
| 3241 | |
| 3242 | void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) { |
| 3243 | int length = invalidated_code_.length(); |
| 3244 | for (int i = 0; i < length; i++) { |
| 3245 | Code* code = invalidated_code_[i]; |
| 3246 | if (code != NULL) { |
| 3247 | code->Iterate(visitor); |
| 3248 | SetMarkBitsUnderInvalidatedCode(code, false); |
| 3249 | } |
| 3250 | } |
| 3251 | invalidated_code_.Rewind(0); |
| 3252 | } |
| 3253 | |
| 3254 | |
| 3255 | void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
| 3256 | bool code_slots_filtering_required; |
| 3257 | { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE); |
| 3258 | code_slots_filtering_required = MarkInvalidatedCode(); |
| 3259 | |
| 3260 | EvacuateNewSpace(); |
| 3261 | } |
| 3262 | |
| 3263 | |
| 3264 | { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES); |
| 3265 | EvacuatePages(); |
| 3266 | } |
| 3267 | |
| 3268 | // Second pass: find pointers to new space and update them. |
| 3269 | PointersUpdatingVisitor updating_visitor(heap()); |
| 3270 | |
| 3271 | { GCTracer::Scope gc_scope(tracer_, |
| 3272 | GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS); |
| 3273 | // Update pointers in to space. |
| 3274 | SemiSpaceIterator to_it(heap()->new_space()->bottom(), |
| 3275 | heap()->new_space()->top()); |
| 3276 | for (HeapObject* object = to_it.Next(); |
| 3277 | object != NULL; |
| 3278 | object = to_it.Next()) { |
| 3279 | Map* map = object->map(); |
| 3280 | object->IterateBody(map->instance_type(), |
| 3281 | object->SizeFromMap(map), |
| 3282 | &updating_visitor); |
| 3283 | } |
| 3284 | } |
| 3285 | |
| 3286 | { GCTracer::Scope gc_scope(tracer_, |
| 3287 | GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS); |
| 3288 | // Update roots. |
| 3289 | heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); |
| 3290 | LiveObjectList::IterateElements(&updating_visitor); |
| 3291 | } |
| 3292 | |
| 3293 | { GCTracer::Scope gc_scope(tracer_, |
| 3294 | GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS); |
| 3295 | StoreBufferRebuildScope scope(heap_, |
| 3296 | heap_->store_buffer(), |
| 3297 | &Heap::ScavengeStoreBufferCallback); |
| 3298 | heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer); |
| 3299 | } |
| 3300 | |
| 3301 | { GCTracer::Scope gc_scope(tracer_, |
| 3302 | GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED); |
| 3303 | SlotsBuffer::UpdateSlotsRecordedIn(heap_, |
| 3304 | migration_slots_buffer_, |
| 3305 | code_slots_filtering_required); |
| 3306 | if (FLAG_trace_fragmentation) { |
| 3307 | PrintF(" migration slots buffer: %d\n", |
| 3308 | SlotsBuffer::SizeOfChain(migration_slots_buffer_)); |
| 3309 | } |
| 3310 | |
| 3311 | if (compacting_ && was_marked_incrementally_) { |
| 3312 | // It's difficult to filter out slots recorded for large objects. |
| 3313 | LargeObjectIterator it(heap_->lo_space()); |
| 3314 | for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 3315 | // LargeObjectSpace is not swept yet thus we have to skip |
| 3316 | // dead objects explicitly. |
| 3317 | if (!IsMarked(obj)) continue; |
| 3318 | |
| 3319 | Page* p = Page::FromAddress(obj->address()); |
| 3320 | if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { |
| 3321 | obj->Iterate(&updating_visitor); |
| 3322 | p->ClearFlag(Page::RESCAN_ON_EVACUATION); |
| 3323 | } |
| 3324 | } |
| 3325 | } |
| 3326 | } |
| 3327 | |
| 3328 | int npages = evacuation_candidates_.length(); |
| 3329 | { GCTracer::Scope gc_scope( |
| 3330 | tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED); |
| 3331 | for (int i = 0; i < npages; i++) { |
| 3332 | Page* p = evacuation_candidates_[i]; |
| 3333 | ASSERT(p->IsEvacuationCandidate() || |
| 3334 | p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
| 3335 | |
| 3336 | if (p->IsEvacuationCandidate()) { |
| 3337 | SlotsBuffer::UpdateSlotsRecordedIn(heap_, |
| 3338 | p->slots_buffer(), |
| 3339 | code_slots_filtering_required); |
| 3340 | if (FLAG_trace_fragmentation) { |
| 3341 | PrintF(" page %p slots buffer: %d\n", |
| 3342 | reinterpret_cast<void*>(p), |
| 3343 | SlotsBuffer::SizeOfChain(p->slots_buffer())); |
| 3344 | } |
| 3345 | |
| 3346 | // Important: skip list should be cleared only after roots were updated |
| 3347 | // because root iteration traverses the stack and might have to find |
| 3348 | // code objects from non-updated pc pointing into evacuation candidate. |
| 3349 | SkipList* list = p->skip_list(); |
| 3350 | if (list != NULL) list->Clear(); |
| 3351 | } else { |
| 3352 | if (FLAG_gc_verbose) { |
| 3353 | PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", |
| 3354 | reinterpret_cast<intptr_t>(p)); |
| 3355 | } |
| 3356 | PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| 3357 | p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); |
| 3358 | |
| 3359 | switch (space->identity()) { |
| 3360 | case OLD_DATA_SPACE: |
| 3361 | SweepConservatively(space, p); |
| 3362 | break; |
| 3363 | case OLD_POINTER_SPACE: |
| 3364 | SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>( |
| 3365 | space, p, &updating_visitor); |
| 3366 | break; |
| 3367 | case CODE_SPACE: |
| 3368 | SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>( |
| 3369 | space, p, &updating_visitor); |
| 3370 | break; |
| 3371 | default: |
| 3372 | UNREACHABLE(); |
| 3373 | break; |
| 3374 | } |
| 3375 | } |
| 3376 | } |
| 3377 | } |
| 3378 | |
| 3379 | GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 3380 | |
| 3381 | // Update pointers from cells. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3382 | HeapObjectIterator cell_iterator(heap_->cell_space()); |
| 3383 | for (HeapObject* cell = cell_iterator.Next(); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 3384 | cell != NULL; |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3385 | cell = cell_iterator.Next()) { |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 3386 | if (cell->IsJSGlobalPropertyCell()) { |
| 3387 | Address value_address = |
| 3388 | reinterpret_cast<Address>(cell) + |
| 3389 | (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); |
| 3390 | updating_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); |
| 3391 | } |
| 3392 | } |
| 3393 | |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 3394 | // Update pointer from the global contexts list. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3395 | updating_visitor.VisitPointer(heap_->global_contexts_list_address()); |
| 3396 | |
| 3397 | heap_->symbol_table()->Iterate(&updating_visitor); |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 3398 | |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 3399 | // Update pointers from external string table. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3400 | heap_->UpdateReferencesInExternalStringTable( |
| 3401 | &UpdateReferenceInExternalStringTableEntry); |
Ben Murdoch | e0cee9b | 2011-05-25 10:26:03 +0100 | [diff] [blame] | 3402 | |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 3403 | if (!FLAG_watch_ic_patching) { |
| 3404 | // Update JSFunction pointers from the runtime profiler. |
| 3405 | heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact( |
| 3406 | &updating_visitor); |
| 3407 | } |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3408 | |
| 3409 | EvacuationWeakObjectRetainer evacuation_object_retainer; |
| 3410 | heap()->ProcessWeakReferences(&evacuation_object_retainer); |
| 3411 | |
| 3412 | // Visit invalidated code (we ignored all slots on it) and clear mark-bits |
| 3413 | // under it. |
| 3414 | ProcessInvalidatedCode(&updating_visitor); |
| 3415 | |
| 3416 | #ifdef DEBUG |
| 3417 | if (FLAG_verify_heap) { |
| 3418 | VerifyEvacuation(heap_); |
| 3419 | } |
| 3420 | #endif |
| 3421 | |
| 3422 | slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_); |
| 3423 | ASSERT(migration_slots_buffer_ == NULL); |
| 3424 | for (int i = 0; i < npages; i++) { |
| 3425 | Page* p = evacuation_candidates_[i]; |
| 3426 | if (!p->IsEvacuationCandidate()) continue; |
| 3427 | PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| 3428 | space->Free(p->area_start(), p->area_size()); |
| 3429 | p->set_scan_on_scavenge(false); |
| 3430 | slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); |
Ben Murdoch | 5d4cdbf | 2012-04-11 10:23:59 +0100 | [diff] [blame^] | 3431 | p->ResetLiveBytes(); |
| 3432 | space->ReleasePage(p); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3433 | } |
| 3434 | evacuation_candidates_.Rewind(0); |
| 3435 | compacting_ = false; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3436 | } |
| 3437 | |
| 3438 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3439 | static const int kStartTableEntriesPerLine = 5; |
| 3440 | static const int kStartTableLines = 171; |
| 3441 | static const int kStartTableInvalidLine = 127; |
| 3442 | static const int kStartTableUnusedEntry = 126; |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 3443 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3444 | #define _ kStartTableUnusedEntry |
| 3445 | #define X kStartTableInvalidLine |
| 3446 | // Mark-bit to object start offset table. |
| 3447 | // |
| 3448 | // The line is indexed by the mark bits in a byte. The first number on |
| 3449 | // the line describes the number of live object starts for the line and the |
| 3450 | // other numbers on the line describe the offsets (in words) of the object |
| 3451 | // starts. |
| 3452 | // |
| 3453 | // Since objects are at least 2 words large we don't have entries for two |
| 3454 | // consecutive 1 bits. All entries after 170 have at least 2 consecutive bits. |
| 3455 | char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = { |
| 3456 | 0, _, _, _, _, // 0 |
| 3457 | 1, 0, _, _, _, // 1 |
| 3458 | 1, 1, _, _, _, // 2 |
| 3459 | X, _, _, _, _, // 3 |
| 3460 | 1, 2, _, _, _, // 4 |
| 3461 | 2, 0, 2, _, _, // 5 |
| 3462 | X, _, _, _, _, // 6 |
| 3463 | X, _, _, _, _, // 7 |
| 3464 | 1, 3, _, _, _, // 8 |
| 3465 | 2, 0, 3, _, _, // 9 |
| 3466 | 2, 1, 3, _, _, // 10 |
| 3467 | X, _, _, _, _, // 11 |
| 3468 | X, _, _, _, _, // 12 |
| 3469 | X, _, _, _, _, // 13 |
| 3470 | X, _, _, _, _, // 14 |
| 3471 | X, _, _, _, _, // 15 |
| 3472 | 1, 4, _, _, _, // 16 |
| 3473 | 2, 0, 4, _, _, // 17 |
| 3474 | 2, 1, 4, _, _, // 18 |
| 3475 | X, _, _, _, _, // 19 |
| 3476 | 2, 2, 4, _, _, // 20 |
| 3477 | 3, 0, 2, 4, _, // 21 |
| 3478 | X, _, _, _, _, // 22 |
| 3479 | X, _, _, _, _, // 23 |
| 3480 | X, _, _, _, _, // 24 |
| 3481 | X, _, _, _, _, // 25 |
| 3482 | X, _, _, _, _, // 26 |
| 3483 | X, _, _, _, _, // 27 |
| 3484 | X, _, _, _, _, // 28 |
| 3485 | X, _, _, _, _, // 29 |
| 3486 | X, _, _, _, _, // 30 |
| 3487 | X, _, _, _, _, // 31 |
| 3488 | 1, 5, _, _, _, // 32 |
| 3489 | 2, 0, 5, _, _, // 33 |
| 3490 | 2, 1, 5, _, _, // 34 |
| 3491 | X, _, _, _, _, // 35 |
| 3492 | 2, 2, 5, _, _, // 36 |
| 3493 | 3, 0, 2, 5, _, // 37 |
| 3494 | X, _, _, _, _, // 38 |
| 3495 | X, _, _, _, _, // 39 |
| 3496 | 2, 3, 5, _, _, // 40 |
| 3497 | 3, 0, 3, 5, _, // 41 |
| 3498 | 3, 1, 3, 5, _, // 42 |
| 3499 | X, _, _, _, _, // 43 |
| 3500 | X, _, _, _, _, // 44 |
| 3501 | X, _, _, _, _, // 45 |
| 3502 | X, _, _, _, _, // 46 |
| 3503 | X, _, _, _, _, // 47 |
| 3504 | X, _, _, _, _, // 48 |
| 3505 | X, _, _, _, _, // 49 |
| 3506 | X, _, _, _, _, // 50 |
| 3507 | X, _, _, _, _, // 51 |
| 3508 | X, _, _, _, _, // 52 |
| 3509 | X, _, _, _, _, // 53 |
| 3510 | X, _, _, _, _, // 54 |
| 3511 | X, _, _, _, _, // 55 |
| 3512 | X, _, _, _, _, // 56 |
| 3513 | X, _, _, _, _, // 57 |
| 3514 | X, _, _, _, _, // 58 |
| 3515 | X, _, _, _, _, // 59 |
| 3516 | X, _, _, _, _, // 60 |
| 3517 | X, _, _, _, _, // 61 |
| 3518 | X, _, _, _, _, // 62 |
| 3519 | X, _, _, _, _, // 63 |
| 3520 | 1, 6, _, _, _, // 64 |
| 3521 | 2, 0, 6, _, _, // 65 |
| 3522 | 2, 1, 6, _, _, // 66 |
| 3523 | X, _, _, _, _, // 67 |
| 3524 | 2, 2, 6, _, _, // 68 |
| 3525 | 3, 0, 2, 6, _, // 69 |
| 3526 | X, _, _, _, _, // 70 |
| 3527 | X, _, _, _, _, // 71 |
| 3528 | 2, 3, 6, _, _, // 72 |
| 3529 | 3, 0, 3, 6, _, // 73 |
| 3530 | 3, 1, 3, 6, _, // 74 |
| 3531 | X, _, _, _, _, // 75 |
| 3532 | X, _, _, _, _, // 76 |
| 3533 | X, _, _, _, _, // 77 |
| 3534 | X, _, _, _, _, // 78 |
| 3535 | X, _, _, _, _, // 79 |
| 3536 | 2, 4, 6, _, _, // 80 |
| 3537 | 3, 0, 4, 6, _, // 81 |
| 3538 | 3, 1, 4, 6, _, // 82 |
| 3539 | X, _, _, _, _, // 83 |
| 3540 | 3, 2, 4, 6, _, // 84 |
| 3541 | 4, 0, 2, 4, 6, // 85 |
| 3542 | X, _, _, _, _, // 86 |
| 3543 | X, _, _, _, _, // 87 |
| 3544 | X, _, _, _, _, // 88 |
| 3545 | X, _, _, _, _, // 89 |
| 3546 | X, _, _, _, _, // 90 |
| 3547 | X, _, _, _, _, // 91 |
| 3548 | X, _, _, _, _, // 92 |
| 3549 | X, _, _, _, _, // 93 |
| 3550 | X, _, _, _, _, // 94 |
| 3551 | X, _, _, _, _, // 95 |
| 3552 | X, _, _, _, _, // 96 |
| 3553 | X, _, _, _, _, // 97 |
| 3554 | X, _, _, _, _, // 98 |
| 3555 | X, _, _, _, _, // 99 |
| 3556 | X, _, _, _, _, // 100 |
| 3557 | X, _, _, _, _, // 101 |
| 3558 | X, _, _, _, _, // 102 |
| 3559 | X, _, _, _, _, // 103 |
| 3560 | X, _, _, _, _, // 104 |
| 3561 | X, _, _, _, _, // 105 |
| 3562 | X, _, _, _, _, // 106 |
| 3563 | X, _, _, _, _, // 107 |
| 3564 | X, _, _, _, _, // 108 |
| 3565 | X, _, _, _, _, // 109 |
| 3566 | X, _, _, _, _, // 110 |
| 3567 | X, _, _, _, _, // 111 |
| 3568 | X, _, _, _, _, // 112 |
| 3569 | X, _, _, _, _, // 113 |
| 3570 | X, _, _, _, _, // 114 |
| 3571 | X, _, _, _, _, // 115 |
| 3572 | X, _, _, _, _, // 116 |
| 3573 | X, _, _, _, _, // 117 |
| 3574 | X, _, _, _, _, // 118 |
| 3575 | X, _, _, _, _, // 119 |
| 3576 | X, _, _, _, _, // 120 |
| 3577 | X, _, _, _, _, // 121 |
| 3578 | X, _, _, _, _, // 122 |
| 3579 | X, _, _, _, _, // 123 |
| 3580 | X, _, _, _, _, // 124 |
| 3581 | X, _, _, _, _, // 125 |
| 3582 | X, _, _, _, _, // 126 |
| 3583 | X, _, _, _, _, // 127 |
| 3584 | 1, 7, _, _, _, // 128 |
| 3585 | 2, 0, 7, _, _, // 129 |
| 3586 | 2, 1, 7, _, _, // 130 |
| 3587 | X, _, _, _, _, // 131 |
| 3588 | 2, 2, 7, _, _, // 132 |
| 3589 | 3, 0, 2, 7, _, // 133 |
| 3590 | X, _, _, _, _, // 134 |
| 3591 | X, _, _, _, _, // 135 |
| 3592 | 2, 3, 7, _, _, // 136 |
| 3593 | 3, 0, 3, 7, _, // 137 |
| 3594 | 3, 1, 3, 7, _, // 138 |
| 3595 | X, _, _, _, _, // 139 |
| 3596 | X, _, _, _, _, // 140 |
| 3597 | X, _, _, _, _, // 141 |
| 3598 | X, _, _, _, _, // 142 |
| 3599 | X, _, _, _, _, // 143 |
| 3600 | 2, 4, 7, _, _, // 144 |
| 3601 | 3, 0, 4, 7, _, // 145 |
| 3602 | 3, 1, 4, 7, _, // 146 |
| 3603 | X, _, _, _, _, // 147 |
| 3604 | 3, 2, 4, 7, _, // 148 |
| 3605 | 4, 0, 2, 4, 7, // 149 |
| 3606 | X, _, _, _, _, // 150 |
| 3607 | X, _, _, _, _, // 151 |
| 3608 | X, _, _, _, _, // 152 |
| 3609 | X, _, _, _, _, // 153 |
| 3610 | X, _, _, _, _, // 154 |
| 3611 | X, _, _, _, _, // 155 |
| 3612 | X, _, _, _, _, // 156 |
| 3613 | X, _, _, _, _, // 157 |
| 3614 | X, _, _, _, _, // 158 |
| 3615 | X, _, _, _, _, // 159 |
| 3616 | 2, 5, 7, _, _, // 160 |
| 3617 | 3, 0, 5, 7, _, // 161 |
| 3618 | 3, 1, 5, 7, _, // 162 |
| 3619 | X, _, _, _, _, // 163 |
| 3620 | 3, 2, 5, 7, _, // 164 |
| 3621 | 4, 0, 2, 5, 7, // 165 |
| 3622 | X, _, _, _, _, // 166 |
| 3623 | X, _, _, _, _, // 167 |
| 3624 | 3, 3, 5, 7, _, // 168 |
| 3625 | 4, 0, 3, 5, 7, // 169 |
| 3626 | 4, 1, 3, 5, 7 // 170 |
| 3627 | }; |
| 3628 | #undef _ |
| 3629 | #undef X |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 3630 | |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 3631 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3632 | // Takes a word of mark bits. Returns the number of objects that start in the |
| 3633 | // range. Puts the offsets of the words in the supplied array. |
| 3634 | static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) { |
| 3635 | int objects = 0; |
| 3636 | int offset = 0; |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 3637 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3638 | // No consecutive 1 bits. |
| 3639 | ASSERT((mark_bits & 0x180) != 0x180); |
| 3640 | ASSERT((mark_bits & 0x18000) != 0x18000); |
| 3641 | ASSERT((mark_bits & 0x1800000) != 0x1800000); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 3642 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3643 | while (mark_bits != 0) { |
| 3644 | int byte = (mark_bits & 0xff); |
| 3645 | mark_bits >>= 8; |
| 3646 | if (byte != 0) { |
| 3647 | ASSERT(byte < kStartTableLines); // No consecutive 1 bits. |
| 3648 | char* table = kStartTable + byte * kStartTableEntriesPerLine; |
| 3649 | int objects_in_these_8_words = table[0]; |
| 3650 | ASSERT(objects_in_these_8_words != kStartTableInvalidLine); |
| 3651 | ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine); |
| 3652 | for (int i = 0; i < objects_in_these_8_words; i++) { |
| 3653 | starts[objects++] = offset + table[1 + i]; |
| 3654 | } |
| 3655 | } |
| 3656 | offset += 8; |
| 3657 | } |
| 3658 | return objects; |
| 3659 | } |
| 3660 | |
| 3661 | |
| 3662 | static inline Address DigestFreeStart(Address approximate_free_start, |
| 3663 | uint32_t free_start_cell) { |
| 3664 | ASSERT(free_start_cell != 0); |
| 3665 | |
| 3666 | // No consecutive 1 bits. |
| 3667 | ASSERT((free_start_cell & (free_start_cell << 1)) == 0); |
| 3668 | |
| 3669 | int offsets[16]; |
| 3670 | uint32_t cell = free_start_cell; |
| 3671 | int offset_of_last_live; |
| 3672 | if ((cell & 0x80000000u) != 0) { |
| 3673 | // This case would overflow below. |
| 3674 | offset_of_last_live = 31; |
| 3675 | } else { |
| 3676 | // Remove all but one bit, the most significant. This is an optimization |
| 3677 | // that may or may not be worthwhile. |
| 3678 | cell |= cell >> 16; |
| 3679 | cell |= cell >> 8; |
| 3680 | cell |= cell >> 4; |
| 3681 | cell |= cell >> 2; |
| 3682 | cell |= cell >> 1; |
| 3683 | cell = (cell + 1) >> 1; |
| 3684 | int live_objects = MarkWordToObjectStarts(cell, offsets); |
| 3685 | ASSERT(live_objects == 1); |
| 3686 | offset_of_last_live = offsets[live_objects - 1]; |
| 3687 | } |
| 3688 | Address last_live_start = |
| 3689 | approximate_free_start + offset_of_last_live * kPointerSize; |
| 3690 | HeapObject* last_live = HeapObject::FromAddress(last_live_start); |
| 3691 | Address free_start = last_live_start + last_live->Size(); |
| 3692 | return free_start; |
| 3693 | } |
| 3694 | |
| 3695 | |
| 3696 | static inline Address StartOfLiveObject(Address block_address, uint32_t cell) { |
| 3697 | ASSERT(cell != 0); |
| 3698 | |
| 3699 | // No consecutive 1 bits. |
| 3700 | ASSERT((cell & (cell << 1)) == 0); |
| 3701 | |
| 3702 | int offsets[16]; |
| 3703 | if (cell == 0x80000000u) { // Avoid overflow below. |
| 3704 | return block_address + 31 * kPointerSize; |
| 3705 | } |
| 3706 | uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1; |
| 3707 | ASSERT((first_set_bit & cell) == first_set_bit); |
| 3708 | int live_objects = MarkWordToObjectStarts(first_set_bit, offsets); |
| 3709 | ASSERT(live_objects == 1); |
| 3710 | USE(live_objects); |
| 3711 | return block_address + offsets[0] * kPointerSize; |
| 3712 | } |
| 3713 | |
| 3714 | |
| 3715 | // Sweeps a space conservatively. After this has been done the larger free |
| 3716 | // spaces have been put on the free list and the smaller ones have been |
| 3717 | // ignored and left untouched. A free space is always either ignored or put |
| 3718 | // on the free list, never split up into two parts. This is important |
| 3719 | // because it means that any FreeSpace maps left actually describe a region of |
| 3720 | // memory that can be ignored when scanning. Dead objects other than free |
| 3721 | // spaces will not contain the free space map. |
| 3722 | intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { |
| 3723 | ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); |
| 3724 | MarkBit::CellType* cells = p->markbits()->cells(); |
| 3725 | p->MarkSweptConservatively(); |
| 3726 | |
| 3727 | int last_cell_index = |
| 3728 | Bitmap::IndexToCell( |
| 3729 | Bitmap::CellAlignIndex( |
| 3730 | p->AddressToMarkbitIndex(p->area_end()))); |
| 3731 | |
| 3732 | int cell_index = |
| 3733 | Bitmap::IndexToCell( |
| 3734 | Bitmap::CellAlignIndex( |
| 3735 | p->AddressToMarkbitIndex(p->area_start()))); |
| 3736 | |
| 3737 | intptr_t freed_bytes = 0; |
| 3738 | |
| 3739 | // This is the start of the 32 word block that we are currently looking at. |
| 3740 | Address block_address = p->area_start(); |
| 3741 | |
| 3742 | // Skip over all the dead objects at the start of the page and mark them free. |
| 3743 | for (; |
| 3744 | cell_index < last_cell_index; |
| 3745 | cell_index++, block_address += 32 * kPointerSize) { |
| 3746 | if (cells[cell_index] != 0) break; |
| 3747 | } |
| 3748 | size_t size = block_address - p->area_start(); |
| 3749 | if (cell_index == last_cell_index) { |
| 3750 | freed_bytes += static_cast<int>(space->Free(p->area_start(), |
| 3751 | static_cast<int>(size))); |
| 3752 | ASSERT_EQ(0, p->LiveBytes()); |
| 3753 | return freed_bytes; |
| 3754 | } |
| 3755 | // Grow the size of the start-of-page free space a little to get up to the |
| 3756 | // first live object. |
| 3757 | Address free_end = StartOfLiveObject(block_address, cells[cell_index]); |
| 3758 | // Free the first free space. |
| 3759 | size = free_end - p->area_start(); |
| 3760 | freed_bytes += space->Free(p->area_start(), |
| 3761 | static_cast<int>(size)); |
| 3762 | // The start of the current free area is represented in undigested form by |
| 3763 | // the address of the last 32-word section that contained a live object and |
| 3764 | // the marking bitmap for that cell, which describes where the live object |
| 3765 | // started. Unless we find a large free space in the bitmap we will not |
| 3766 | // digest this pair into a real address. We start the iteration here at the |
| 3767 | // first word in the marking bit map that indicates a live object. |
| 3768 | Address free_start = block_address; |
| 3769 | uint32_t free_start_cell = cells[cell_index]; |
| 3770 | |
| 3771 | for ( ; |
| 3772 | cell_index < last_cell_index; |
| 3773 | cell_index++, block_address += 32 * kPointerSize) { |
| 3774 | ASSERT((unsigned)cell_index == |
| 3775 | Bitmap::IndexToCell( |
| 3776 | Bitmap::CellAlignIndex( |
| 3777 | p->AddressToMarkbitIndex(block_address)))); |
| 3778 | uint32_t cell = cells[cell_index]; |
| 3779 | if (cell != 0) { |
| 3780 | // We have a live object. Check approximately whether it is more than 32 |
| 3781 | // words since the last live object. |
| 3782 | if (block_address - free_start > 32 * kPointerSize) { |
| 3783 | free_start = DigestFreeStart(free_start, free_start_cell); |
| 3784 | if (block_address - free_start > 32 * kPointerSize) { |
| 3785 | // Now that we know the exact start of the free space it still looks |
| 3786 | // like we have a large enough free space to be worth bothering with. |
| 3787 | // so now we need to find the start of the first live object at the |
| 3788 | // end of the free space. |
| 3789 | free_end = StartOfLiveObject(block_address, cell); |
| 3790 | freed_bytes += space->Free(free_start, |
| 3791 | static_cast<int>(free_end - free_start)); |
| 3792 | } |
| 3793 | } |
| 3794 | // Update our undigested record of where the current free area started. |
| 3795 | free_start = block_address; |
| 3796 | free_start_cell = cell; |
| 3797 | // Clear marking bits for current cell. |
| 3798 | cells[cell_index] = 0; |
| 3799 | } |
| 3800 | } |
| 3801 | |
| 3802 | // Handle the free space at the end of the page. |
| 3803 | if (block_address - free_start > 32 * kPointerSize) { |
| 3804 | free_start = DigestFreeStart(free_start, free_start_cell); |
| 3805 | freed_bytes += space->Free(free_start, |
| 3806 | static_cast<int>(block_address - free_start)); |
| 3807 | } |
| 3808 | |
| 3809 | p->ResetLiveBytes(); |
| 3810 | return freed_bytes; |
| 3811 | } |
| 3812 | |
| 3813 | |
| 3814 | void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { |
| 3815 | space->set_was_swept_conservatively(sweeper == CONSERVATIVE || |
| 3816 | sweeper == LAZY_CONSERVATIVE); |
| 3817 | |
| 3818 | space->ClearStats(); |
| 3819 | |
| 3820 | PageIterator it(space); |
| 3821 | |
| 3822 | intptr_t freed_bytes = 0; |
| 3823 | int pages_swept = 0; |
| 3824 | intptr_t newspace_size = space->heap()->new_space()->Size(); |
| 3825 | bool lazy_sweeping_active = false; |
| 3826 | bool unused_page_present = false; |
| 3827 | |
| 3828 | intptr_t old_space_size = heap()->PromotedSpaceSize(); |
| 3829 | intptr_t space_left = |
| 3830 | Min(heap()->OldGenPromotionLimit(old_space_size), |
| 3831 | heap()->OldGenAllocationLimit(old_space_size)) - old_space_size; |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 3832 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3833 | while (it.has_next()) { |
| 3834 | Page* p = it.next(); |
| 3835 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3836 | // Clear sweeping flags indicating that marking bits are still intact. |
| 3837 | p->ClearSweptPrecisely(); |
| 3838 | p->ClearSweptConservatively(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3839 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3840 | if (p->IsEvacuationCandidate()) { |
| 3841 | ASSERT(evacuation_candidates_.length() > 0); |
| 3842 | continue; |
| 3843 | } |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 3844 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3845 | if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { |
| 3846 | // Will be processed in EvacuateNewSpaceAndCandidates. |
| 3847 | continue; |
| 3848 | } |
| 3849 | |
| 3850 | // One unused page is kept, all further are released before sweeping them. |
| 3851 | if (p->LiveBytes() == 0) { |
| 3852 | if (unused_page_present) { |
| 3853 | if (FLAG_gc_verbose) { |
| 3854 | PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n", |
| 3855 | reinterpret_cast<intptr_t>(p)); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3856 | } |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 3857 | // Adjust unswept free bytes because releasing a page expects said |
| 3858 | // counter to be accurate for unswept pages. |
| 3859 | space->IncreaseUnsweptFreeBytes(p); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3860 | space->ReleasePage(p); |
| 3861 | continue; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3862 | } |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3863 | unused_page_present = true; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3864 | } |
| 3865 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3866 | if (lazy_sweeping_active) { |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 3867 | if (FLAG_gc_verbose) { |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3868 | PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n", |
| 3869 | reinterpret_cast<intptr_t>(p)); |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 3870 | } |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 3871 | space->IncreaseUnsweptFreeBytes(p); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3872 | continue; |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 3873 | } |
| 3874 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3875 | switch (sweeper) { |
| 3876 | case CONSERVATIVE: { |
| 3877 | if (FLAG_gc_verbose) { |
| 3878 | PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n", |
| 3879 | reinterpret_cast<intptr_t>(p)); |
| 3880 | } |
| 3881 | SweepConservatively(space, p); |
| 3882 | pages_swept++; |
| 3883 | break; |
| 3884 | } |
| 3885 | case LAZY_CONSERVATIVE: { |
| 3886 | if (FLAG_gc_verbose) { |
| 3887 | PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n", |
| 3888 | reinterpret_cast<intptr_t>(p)); |
| 3889 | } |
| 3890 | freed_bytes += SweepConservatively(space, p); |
| 3891 | pages_swept++; |
| 3892 | if (space_left + freed_bytes > newspace_size) { |
| 3893 | space->SetPagesToSweep(p->next_page()); |
| 3894 | lazy_sweeping_active = true; |
| 3895 | } else { |
| 3896 | if (FLAG_gc_verbose) { |
| 3897 | PrintF("Only %" V8PRIdPTR " bytes freed. Still sweeping.\n", |
| 3898 | freed_bytes); |
| 3899 | } |
| 3900 | } |
| 3901 | break; |
| 3902 | } |
| 3903 | case PRECISE: { |
| 3904 | if (FLAG_gc_verbose) { |
| 3905 | PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n", |
| 3906 | reinterpret_cast<intptr_t>(p)); |
| 3907 | } |
| 3908 | if (space->identity() == CODE_SPACE) { |
| 3909 | SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL); |
| 3910 | } else { |
| 3911 | SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL); |
| 3912 | } |
| 3913 | pages_swept++; |
| 3914 | break; |
| 3915 | } |
| 3916 | default: { |
| 3917 | UNREACHABLE(); |
| 3918 | } |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 3919 | } |
| 3920 | } |
| 3921 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3922 | if (FLAG_gc_verbose) { |
| 3923 | PrintF("SweepSpace: %s (%d pages swept)\n", |
| 3924 | AllocationSpaceName(space->identity()), |
| 3925 | pages_swept); |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 3926 | } |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3927 | |
| 3928 | // Give pages that are queued to be freed back to the OS. |
| 3929 | heap()->FreeQueuedChunks(); |
| 3930 | } |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 3931 | |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 3932 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3933 | void MarkCompactCollector::SweepSpaces() { |
Leon Clarke | f7060e2 | 2010-06-03 12:02:55 +0100 | [diff] [blame] | 3934 | GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP); |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3935 | #ifdef DEBUG |
| 3936 | state_ = SWEEP_SPACES; |
| 3937 | #endif |
| 3938 | SweeperType how_to_sweep = |
| 3939 | FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE; |
Ben Murdoch | c7cc028 | 2012-03-05 14:35:55 +0000 | [diff] [blame] | 3940 | if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE; |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3941 | if (sweep_precisely_) how_to_sweep = PRECISE; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3942 | // Noncompacting collections simply sweep the spaces to clear the mark |
| 3943 | // bits and free the nonlive blocks (for old and map spaces). We sweep |
| 3944 | // the map space last because freeing non-live maps overwrites them and |
| 3945 | // the other spaces rely on possibly non-live maps to get the sizes for |
| 3946 | // non-live objects. |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3947 | SweepSpace(heap()->old_pointer_space(), how_to_sweep); |
| 3948 | SweepSpace(heap()->old_data_space(), how_to_sweep); |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 3949 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3950 | RemoveDeadInvalidatedCode(); |
| 3951 | SweepSpace(heap()->code_space(), PRECISE); |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 3952 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3953 | SweepSpace(heap()->cell_space(), PRECISE); |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 3954 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3955 | EvacuateNewSpaceAndCandidates(); |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 3956 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3957 | // ClearNonLiveTransitions depends on precise sweeping of map space to |
| 3958 | // detect whether unmarked map became dead in this collection or in one |
| 3959 | // of the previous ones. |
| 3960 | SweepSpace(heap()->map_space(), PRECISE); |
Leon Clarke | e46be81 | 2010-01-19 14:06:41 +0000 | [diff] [blame] | 3961 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3962 | // Deallocate unmarked objects and clear marked bits for marked objects. |
| 3963 | heap_->lo_space()->FreeUnmarkedObjects(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 3964 | } |
| 3965 | |
| 3966 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 3967 | void MarkCompactCollector::EnableCodeFlushing(bool enable) { |
| 3968 | if (enable) { |
| 3969 | if (code_flusher_ != NULL) return; |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 3970 | code_flusher_ = new CodeFlusher(heap()->isolate()); |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 3971 | } else { |
| 3972 | if (code_flusher_ == NULL) return; |
| 3973 | delete code_flusher_; |
| 3974 | code_flusher_ = NULL; |
| 3975 | } |
| 3976 | } |
| 3977 | |
| 3978 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3979 | // TODO(1466) ReportDeleteIfNeeded is not called currently. |
| 3980 | // Our profiling tools do not expect intersections between |
| 3981 | // code objects. We should either reenable it or change our tools. |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 3982 | void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj, |
| 3983 | Isolate* isolate) { |
Ben Murdoch | b8e0da2 | 2011-05-16 14:20:40 +0100 | [diff] [blame] | 3984 | #ifdef ENABLE_GDB_JIT_INTERFACE |
| 3985 | if (obj->IsCode()) { |
| 3986 | GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj)); |
| 3987 | } |
| 3988 | #endif |
Leon Clarke | d91b9f7 | 2010-01-27 17:25:45 +0000 | [diff] [blame] | 3989 | if (obj->IsCode()) { |
Ben Murdoch | 8b112d2 | 2011-06-08 16:22:53 +0100 | [diff] [blame] | 3990 | PROFILE(isolate, CodeDeleteEvent(obj->address())); |
Leon Clarke | d91b9f7 | 2010-01-27 17:25:45 +0000 | [diff] [blame] | 3991 | } |
Leon Clarke | d91b9f7 | 2010-01-27 17:25:45 +0000 | [diff] [blame] | 3992 | } |
| 3993 | |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 3994 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 3995 | void MarkCompactCollector::Initialize() { |
| 3996 | StaticMarkingVisitor::Initialize(); |
Shimeng (Simon) Wang | 8a31eba | 2010-12-06 19:01:33 -0800 | [diff] [blame] | 3997 | } |
| 3998 | |
| 3999 | |
Ben Murdoch | 592a9fc | 2012-03-05 11:04:45 +0000 | [diff] [blame] | 4000 | bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) { |
| 4001 | return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES; |
| 4002 | } |
| 4003 | |
| 4004 | |
| 4005 | bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator, |
| 4006 | SlotsBuffer** buffer_address, |
| 4007 | SlotType type, |
| 4008 | Address addr, |
| 4009 | AdditionMode mode) { |
| 4010 | SlotsBuffer* buffer = *buffer_address; |
| 4011 | if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) { |
| 4012 | if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) { |
| 4013 | allocator->DeallocateChain(buffer_address); |
| 4014 | return false; |
| 4015 | } |
| 4016 | buffer = allocator->AllocateBuffer(buffer); |
| 4017 | *buffer_address = buffer; |
| 4018 | } |
| 4019 | ASSERT(buffer->HasSpaceForTypedSlot()); |
| 4020 | buffer->Add(reinterpret_cast<ObjectSlot>(type)); |
| 4021 | buffer->Add(reinterpret_cast<ObjectSlot>(addr)); |
| 4022 | return true; |
| 4023 | } |
| 4024 | |
| 4025 | |
| 4026 | static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) { |
| 4027 | if (RelocInfo::IsCodeTarget(rmode)) { |
| 4028 | return SlotsBuffer::CODE_TARGET_SLOT; |
| 4029 | } else if (RelocInfo::IsEmbeddedObject(rmode)) { |
| 4030 | return SlotsBuffer::EMBEDDED_OBJECT_SLOT; |
| 4031 | } else if (RelocInfo::IsDebugBreakSlot(rmode)) { |
| 4032 | return SlotsBuffer::DEBUG_TARGET_SLOT; |
| 4033 | } else if (RelocInfo::IsJSReturn(rmode)) { |
| 4034 | return SlotsBuffer::JS_RETURN_SLOT; |
| 4035 | } |
| 4036 | UNREACHABLE(); |
| 4037 | return SlotsBuffer::NUMBER_OF_SLOT_TYPES; |
| 4038 | } |
| 4039 | |
| 4040 | |
| 4041 | void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) { |
| 4042 | Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target)); |
| 4043 | if (target_page->IsEvacuationCandidate() && |
| 4044 | (rinfo->host() == NULL || |
| 4045 | !ShouldSkipEvacuationSlotRecording(rinfo->host()))) { |
| 4046 | if (!SlotsBuffer::AddTo(&slots_buffer_allocator_, |
| 4047 | target_page->slots_buffer_address(), |
| 4048 | SlotTypeForRMode(rinfo->rmode()), |
| 4049 | rinfo->pc(), |
| 4050 | SlotsBuffer::FAIL_ON_OVERFLOW)) { |
| 4051 | EvictEvacuationCandidate(target_page); |
| 4052 | } |
| 4053 | } |
| 4054 | } |
| 4055 | |
| 4056 | |
| 4057 | void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) { |
| 4058 | Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target)); |
| 4059 | if (target_page->IsEvacuationCandidate() && |
| 4060 | !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) { |
| 4061 | if (!SlotsBuffer::AddTo(&slots_buffer_allocator_, |
| 4062 | target_page->slots_buffer_address(), |
| 4063 | SlotsBuffer::CODE_ENTRY_SLOT, |
| 4064 | slot, |
| 4065 | SlotsBuffer::FAIL_ON_OVERFLOW)) { |
| 4066 | EvictEvacuationCandidate(target_page); |
| 4067 | } |
| 4068 | } |
| 4069 | } |
| 4070 | |
| 4071 | |
| 4072 | static inline SlotsBuffer::SlotType DecodeSlotType( |
| 4073 | SlotsBuffer::ObjectSlot slot) { |
| 4074 | return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot)); |
| 4075 | } |
| 4076 | |
| 4077 | |
| 4078 | void SlotsBuffer::UpdateSlots(Heap* heap) { |
| 4079 | PointersUpdatingVisitor v(heap); |
| 4080 | |
| 4081 | for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) { |
| 4082 | ObjectSlot slot = slots_[slot_idx]; |
| 4083 | if (!IsTypedSlot(slot)) { |
| 4084 | PointersUpdatingVisitor::UpdateSlot(heap, slot); |
| 4085 | } else { |
| 4086 | ++slot_idx; |
| 4087 | ASSERT(slot_idx < idx_); |
| 4088 | UpdateSlot(&v, |
| 4089 | DecodeSlotType(slot), |
| 4090 | reinterpret_cast<Address>(slots_[slot_idx])); |
| 4091 | } |
| 4092 | } |
| 4093 | } |
| 4094 | |
| 4095 | |
| 4096 | void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) { |
| 4097 | PointersUpdatingVisitor v(heap); |
| 4098 | |
| 4099 | for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) { |
| 4100 | ObjectSlot slot = slots_[slot_idx]; |
| 4101 | if (!IsTypedSlot(slot)) { |
| 4102 | if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) { |
| 4103 | PointersUpdatingVisitor::UpdateSlot(heap, slot); |
| 4104 | } |
| 4105 | } else { |
| 4106 | ++slot_idx; |
| 4107 | ASSERT(slot_idx < idx_); |
| 4108 | Address pc = reinterpret_cast<Address>(slots_[slot_idx]); |
| 4109 | if (!IsOnInvalidatedCodeObject(pc)) { |
| 4110 | UpdateSlot(&v, |
| 4111 | DecodeSlotType(slot), |
| 4112 | reinterpret_cast<Address>(slots_[slot_idx])); |
| 4113 | } |
| 4114 | } |
| 4115 | } |
| 4116 | } |
| 4117 | |
| 4118 | |
| 4119 | SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) { |
| 4120 | return new SlotsBuffer(next_buffer); |
| 4121 | } |
| 4122 | |
| 4123 | |
| 4124 | void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) { |
| 4125 | delete buffer; |
| 4126 | } |
| 4127 | |
| 4128 | |
| 4129 | void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) { |
| 4130 | SlotsBuffer* buffer = *buffer_address; |
| 4131 | while (buffer != NULL) { |
| 4132 | SlotsBuffer* next_buffer = buffer->next(); |
| 4133 | DeallocateBuffer(buffer); |
| 4134 | buffer = next_buffer; |
| 4135 | } |
| 4136 | *buffer_address = NULL; |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 4137 | } |
| 4138 | |
| 4139 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 4140 | } } // namespace v8::internal |