Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1 | // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 | // Redistribution and use in source and binary forms, with or without |
| 3 | // modification, are permitted provided that the following conditions are |
| 4 | // met: |
| 5 | // |
| 6 | // * Redistributions of source code must retain the above copyright |
| 7 | // notice, this list of conditions and the following disclaimer. |
| 8 | // * Redistributions in binary form must reproduce the above |
| 9 | // copyright notice, this list of conditions and the following |
| 10 | // disclaimer in the documentation and/or other materials provided |
| 11 | // with the distribution. |
| 12 | // * Neither the name of Google Inc. nor the names of its |
| 13 | // contributors may be used to endorse or promote products derived |
| 14 | // from this software without specific prior written permission. |
| 15 | // |
| 16 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | |
| 28 | #include "v8.h" |
| 29 | |
| 30 | #include "store-buffer.h" |
| 31 | #include "store-buffer-inl.h" |
| 32 | #include "v8-counters.h" |
| 33 | |
| 34 | namespace v8 { |
| 35 | namespace internal { |
| 36 | |
| 37 | StoreBuffer::StoreBuffer(Heap* heap) |
| 38 | : heap_(heap), |
| 39 | start_(NULL), |
| 40 | limit_(NULL), |
| 41 | old_start_(NULL), |
| 42 | old_limit_(NULL), |
| 43 | old_top_(NULL), |
| 44 | old_reserved_limit_(NULL), |
| 45 | old_buffer_is_sorted_(false), |
| 46 | old_buffer_is_filtered_(false), |
| 47 | during_gc_(false), |
| 48 | store_buffer_rebuilding_enabled_(false), |
| 49 | callback_(NULL), |
| 50 | may_move_store_buffer_entries_(true), |
| 51 | virtual_memory_(NULL), |
| 52 | hash_set_1_(NULL), |
| 53 | hash_set_2_(NULL), |
| 54 | hash_sets_are_empty_(true) { |
| 55 | } |
| 56 | |
| 57 | |
| 58 | void StoreBuffer::SetUp() { |
| 59 | virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3); |
| 60 | uintptr_t start_as_int = |
| 61 | reinterpret_cast<uintptr_t>(virtual_memory_->address()); |
| 62 | start_ = |
| 63 | reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2)); |
| 64 | limit_ = start_ + (kStoreBufferSize / kPointerSize); |
| 65 | |
| 66 | old_virtual_memory_ = |
| 67 | new VirtualMemory(kOldStoreBufferLength * kPointerSize); |
| 68 | old_top_ = old_start_ = |
| 69 | reinterpret_cast<Address*>(old_virtual_memory_->address()); |
| 70 | // Don't know the alignment requirements of the OS, but it is certainly not |
| 71 | // less than 0xfff. |
| 72 | ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0); |
| 73 | int initial_length = static_cast<int>(OS::CommitPageSize() / kPointerSize); |
| 74 | ASSERT(initial_length > 0); |
| 75 | ASSERT(initial_length <= kOldStoreBufferLength); |
| 76 | old_limit_ = old_start_ + initial_length; |
| 77 | old_reserved_limit_ = old_start_ + kOldStoreBufferLength; |
| 78 | |
| 79 | CHECK(old_virtual_memory_->Commit( |
| 80 | reinterpret_cast<void*>(old_start_), |
| 81 | (old_limit_ - old_start_) * kPointerSize, |
| 82 | false)); |
| 83 | |
| 84 | ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address()); |
| 85 | ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address()); |
| 86 | Address* vm_limit = reinterpret_cast<Address*>( |
| 87 | reinterpret_cast<char*>(virtual_memory_->address()) + |
| 88 | virtual_memory_->size()); |
| 89 | ASSERT(start_ <= vm_limit); |
| 90 | ASSERT(limit_ <= vm_limit); |
| 91 | USE(vm_limit); |
| 92 | ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0); |
| 93 | ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) == |
| 94 | 0); |
| 95 | |
| 96 | CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_), |
| 97 | kStoreBufferSize, |
| 98 | false)); // Not executable. |
| 99 | heap_->public_set_store_buffer_top(start_); |
| 100 | |
| 101 | hash_set_1_ = new uintptr_t[kHashSetLength]; |
| 102 | hash_set_2_ = new uintptr_t[kHashSetLength]; |
| 103 | hash_sets_are_empty_ = false; |
| 104 | |
| 105 | ClearFilteringHashSets(); |
| 106 | } |
| 107 | |
| 108 | |
| 109 | void StoreBuffer::TearDown() { |
| 110 | delete virtual_memory_; |
| 111 | delete old_virtual_memory_; |
| 112 | delete[] hash_set_1_; |
| 113 | delete[] hash_set_2_; |
| 114 | old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL; |
| 115 | start_ = limit_ = NULL; |
| 116 | heap_->public_set_store_buffer_top(start_); |
| 117 | } |
| 118 | |
| 119 | |
| 120 | void StoreBuffer::StoreBufferOverflow(Isolate* isolate) { |
| 121 | isolate->heap()->store_buffer()->Compact(); |
| 122 | } |
| 123 | |
| 124 | |
| 125 | #if V8_TARGET_ARCH_X64 |
| 126 | static int CompareAddresses(const void* void_a, const void* void_b) { |
| 127 | intptr_t a = |
| 128 | reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a)); |
| 129 | intptr_t b = |
| 130 | reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b)); |
| 131 | // Unfortunately if int is smaller than intptr_t there is no branch-free |
| 132 | // way to return a number with the same sign as the difference between the |
| 133 | // pointers. |
| 134 | if (a == b) return 0; |
| 135 | if (a < b) return -1; |
| 136 | ASSERT(a > b); |
| 137 | return 1; |
| 138 | } |
| 139 | #else |
| 140 | static int CompareAddresses(const void* void_a, const void* void_b) { |
| 141 | intptr_t a = |
| 142 | reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_a)); |
| 143 | intptr_t b = |
| 144 | reinterpret_cast<intptr_t>(*reinterpret_cast<const Address*>(void_b)); |
| 145 | ASSERT(sizeof(1) == sizeof(a)); |
| 146 | // Shift down to avoid wraparound. |
| 147 | return (a >> kPointerSizeLog2) - (b >> kPointerSizeLog2); |
| 148 | } |
| 149 | #endif |
| 150 | |
| 151 | |
| 152 | void StoreBuffer::Uniq() { |
| 153 | // Remove adjacent duplicates and cells that do not point at new space. |
| 154 | Address previous = NULL; |
| 155 | Address* write = old_start_; |
| 156 | ASSERT(may_move_store_buffer_entries_); |
| 157 | for (Address* read = old_start_; read < old_top_; read++) { |
| 158 | Address current = *read; |
| 159 | if (current != previous) { |
| 160 | if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) { |
| 161 | *write++ = current; |
| 162 | } |
| 163 | } |
| 164 | previous = current; |
| 165 | } |
| 166 | old_top_ = write; |
| 167 | } |
| 168 | |
| 169 | |
| 170 | void StoreBuffer::EnsureSpace(intptr_t space_needed) { |
| 171 | while (old_limit_ - old_top_ < space_needed && |
| 172 | old_limit_ < old_reserved_limit_) { |
| 173 | size_t grow = old_limit_ - old_start_; // Double size. |
| 174 | CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_), |
| 175 | grow * kPointerSize, |
| 176 | false)); |
| 177 | old_limit_ += grow; |
| 178 | } |
| 179 | |
| 180 | if (old_limit_ - old_top_ >= space_needed) return; |
| 181 | |
| 182 | if (old_buffer_is_filtered_) return; |
| 183 | ASSERT(may_move_store_buffer_entries_); |
| 184 | Compact(); |
| 185 | |
| 186 | old_buffer_is_filtered_ = true; |
| 187 | bool page_has_scan_on_scavenge_flag = false; |
| 188 | |
| 189 | PointerChunkIterator it(heap_); |
| 190 | MemoryChunk* chunk; |
| 191 | while ((chunk = it.next()) != NULL) { |
| 192 | if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true; |
| 193 | } |
| 194 | |
| 195 | if (page_has_scan_on_scavenge_flag) { |
| 196 | Filter(MemoryChunk::SCAN_ON_SCAVENGE); |
| 197 | } |
| 198 | |
| 199 | // If filtering out the entries from scan_on_scavenge pages got us down to |
| 200 | // less than half full, then we are satisfied with that. |
| 201 | if (old_limit_ - old_top_ > old_top_ - old_start_) return; |
| 202 | |
| 203 | // Sample 1 entry in 97 and filter out the pages where we estimate that more |
| 204 | // than 1 in 8 pointers are to new space. |
| 205 | static const int kSampleFinenesses = 5; |
| 206 | static const struct Samples { |
| 207 | int prime_sample_step; |
| 208 | int threshold; |
| 209 | } samples[kSampleFinenesses] = { |
| 210 | { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 }, |
| 211 | { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 }, |
| 212 | { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 }, |
| 213 | { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 }, |
| 214 | { 1, 0} |
| 215 | }; |
| 216 | for (int i = kSampleFinenesses - 1; i >= 0; i--) { |
| 217 | ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold); |
| 218 | // As a last resort we mark all pages as being exempt from the store buffer. |
| 219 | ASSERT(i != 0 || old_top_ == old_start_); |
| 220 | if (old_limit_ - old_top_ > old_top_ - old_start_) return; |
| 221 | } |
| 222 | UNREACHABLE(); |
| 223 | } |
| 224 | |
| 225 | |
| 226 | // Sample the store buffer to see if some pages are taking up a lot of space |
| 227 | // in the store buffer. |
| 228 | void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) { |
| 229 | PointerChunkIterator it(heap_); |
| 230 | MemoryChunk* chunk; |
| 231 | while ((chunk = it.next()) != NULL) { |
| 232 | chunk->set_store_buffer_counter(0); |
| 233 | } |
| 234 | bool created_new_scan_on_scavenge_pages = false; |
| 235 | MemoryChunk* previous_chunk = NULL; |
| 236 | for (Address* p = old_start_; p < old_top_; p += prime_sample_step) { |
| 237 | Address addr = *p; |
| 238 | MemoryChunk* containing_chunk = NULL; |
| 239 | if (previous_chunk != NULL && previous_chunk->Contains(addr)) { |
| 240 | containing_chunk = previous_chunk; |
| 241 | } else { |
| 242 | containing_chunk = MemoryChunk::FromAnyPointerAddress(addr); |
| 243 | } |
| 244 | int old_counter = containing_chunk->store_buffer_counter(); |
| 245 | if (old_counter == threshold) { |
| 246 | containing_chunk->set_scan_on_scavenge(true); |
| 247 | created_new_scan_on_scavenge_pages = true; |
| 248 | } |
| 249 | containing_chunk->set_store_buffer_counter(old_counter + 1); |
| 250 | previous_chunk = containing_chunk; |
| 251 | } |
| 252 | if (created_new_scan_on_scavenge_pages) { |
| 253 | Filter(MemoryChunk::SCAN_ON_SCAVENGE); |
| 254 | } |
| 255 | old_buffer_is_filtered_ = true; |
| 256 | } |
| 257 | |
| 258 | |
| 259 | void StoreBuffer::Filter(int flag) { |
| 260 | Address* new_top = old_start_; |
| 261 | MemoryChunk* previous_chunk = NULL; |
| 262 | for (Address* p = old_start_; p < old_top_; p++) { |
| 263 | Address addr = *p; |
| 264 | MemoryChunk* containing_chunk = NULL; |
| 265 | if (previous_chunk != NULL && previous_chunk->Contains(addr)) { |
| 266 | containing_chunk = previous_chunk; |
| 267 | } else { |
| 268 | containing_chunk = MemoryChunk::FromAnyPointerAddress(addr); |
| 269 | previous_chunk = containing_chunk; |
| 270 | } |
| 271 | if (!containing_chunk->IsFlagSet(flag)) { |
| 272 | *new_top++ = addr; |
| 273 | } |
| 274 | } |
| 275 | old_top_ = new_top; |
| 276 | |
| 277 | // Filtering hash sets are inconsistent with the store buffer after this |
| 278 | // operation. |
| 279 | ClearFilteringHashSets(); |
| 280 | } |
| 281 | |
| 282 | |
| 283 | void StoreBuffer::SortUniq() { |
| 284 | Compact(); |
| 285 | if (old_buffer_is_sorted_) return; |
| 286 | qsort(reinterpret_cast<void*>(old_start_), |
| 287 | old_top_ - old_start_, |
| 288 | sizeof(*old_top_), |
| 289 | &CompareAddresses); |
| 290 | Uniq(); |
| 291 | |
| 292 | old_buffer_is_sorted_ = true; |
| 293 | |
| 294 | // Filtering hash sets are inconsistent with the store buffer after this |
| 295 | // operation. |
| 296 | ClearFilteringHashSets(); |
| 297 | } |
| 298 | |
| 299 | |
| 300 | bool StoreBuffer::PrepareForIteration() { |
| 301 | Compact(); |
| 302 | PointerChunkIterator it(heap_); |
| 303 | MemoryChunk* chunk; |
| 304 | bool page_has_scan_on_scavenge_flag = false; |
| 305 | while ((chunk = it.next()) != NULL) { |
| 306 | if (chunk->scan_on_scavenge()) page_has_scan_on_scavenge_flag = true; |
| 307 | } |
| 308 | |
| 309 | if (page_has_scan_on_scavenge_flag) { |
| 310 | Filter(MemoryChunk::SCAN_ON_SCAVENGE); |
| 311 | } |
| 312 | |
| 313 | // Filtering hash sets are inconsistent with the store buffer after |
| 314 | // iteration. |
| 315 | ClearFilteringHashSets(); |
| 316 | |
| 317 | return page_has_scan_on_scavenge_flag; |
| 318 | } |
| 319 | |
| 320 | |
| 321 | #ifdef DEBUG |
| 322 | void StoreBuffer::Clean() { |
| 323 | ClearFilteringHashSets(); |
| 324 | Uniq(); // Also removes things that no longer point to new space. |
| 325 | CheckForFullBuffer(); |
| 326 | } |
| 327 | |
| 328 | |
| 329 | static Address* in_store_buffer_1_element_cache = NULL; |
| 330 | |
| 331 | |
| 332 | bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) { |
| 333 | if (!FLAG_enable_slow_asserts) return true; |
| 334 | if (in_store_buffer_1_element_cache != NULL && |
| 335 | *in_store_buffer_1_element_cache == cell_address) { |
| 336 | return true; |
| 337 | } |
| 338 | Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top()); |
| 339 | for (Address* current = top - 1; current >= start_; current--) { |
| 340 | if (*current == cell_address) { |
| 341 | in_store_buffer_1_element_cache = current; |
| 342 | return true; |
| 343 | } |
| 344 | } |
| 345 | for (Address* current = old_top_ - 1; current >= old_start_; current--) { |
| 346 | if (*current == cell_address) { |
| 347 | in_store_buffer_1_element_cache = current; |
| 348 | return true; |
| 349 | } |
| 350 | } |
| 351 | return false; |
| 352 | } |
| 353 | #endif |
| 354 | |
| 355 | |
| 356 | void StoreBuffer::ClearFilteringHashSets() { |
| 357 | if (!hash_sets_are_empty_) { |
| 358 | memset(reinterpret_cast<void*>(hash_set_1_), |
| 359 | 0, |
| 360 | sizeof(uintptr_t) * kHashSetLength); |
| 361 | memset(reinterpret_cast<void*>(hash_set_2_), |
| 362 | 0, |
| 363 | sizeof(uintptr_t) * kHashSetLength); |
| 364 | hash_sets_are_empty_ = true; |
| 365 | } |
| 366 | } |
| 367 | |
| 368 | |
| 369 | void StoreBuffer::GCPrologue() { |
| 370 | ClearFilteringHashSets(); |
| 371 | during_gc_ = true; |
| 372 | } |
| 373 | |
| 374 | |
| 375 | #ifdef DEBUG |
| 376 | static void DummyScavengePointer(HeapObject** p, HeapObject* o) { |
| 377 | // Do nothing. |
| 378 | } |
| 379 | |
| 380 | |
| 381 | void StoreBuffer::VerifyPointers(PagedSpace* space, |
| 382 | RegionCallback region_callback) { |
| 383 | PageIterator it(space); |
| 384 | |
| 385 | while (it.has_next()) { |
| 386 | Page* page = it.next(); |
| 387 | FindPointersToNewSpaceOnPage( |
| 388 | reinterpret_cast<PagedSpace*>(page->owner()), |
| 389 | page, |
| 390 | region_callback, |
| 391 | &DummyScavengePointer); |
| 392 | } |
| 393 | } |
| 394 | |
| 395 | |
| 396 | void StoreBuffer::VerifyPointers(LargeObjectSpace* space) { |
| 397 | LargeObjectIterator it(space); |
| 398 | for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { |
| 399 | if (object->IsFixedArray()) { |
| 400 | Address slot_address = object->address(); |
| 401 | Address end = object->address() + object->Size(); |
| 402 | |
| 403 | while (slot_address < end) { |
| 404 | HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address); |
| 405 | // When we are not in GC the Heap::InNewSpace() predicate |
| 406 | // checks that pointers which satisfy predicate point into |
| 407 | // the active semispace. |
| 408 | heap_->InNewSpace(*slot); |
| 409 | slot_address += kPointerSize; |
| 410 | } |
| 411 | } |
| 412 | } |
| 413 | } |
| 414 | #endif |
| 415 | |
| 416 | |
| 417 | void StoreBuffer::Verify() { |
| 418 | #ifdef DEBUG |
| 419 | VerifyPointers(heap_->old_pointer_space(), |
| 420 | &StoreBuffer::FindPointersToNewSpaceInRegion); |
| 421 | VerifyPointers(heap_->map_space(), |
| 422 | &StoreBuffer::FindPointersToNewSpaceInMapsRegion); |
| 423 | VerifyPointers(heap_->lo_space()); |
| 424 | #endif |
| 425 | } |
| 426 | |
| 427 | |
| 428 | void StoreBuffer::GCEpilogue() { |
| 429 | during_gc_ = false; |
| 430 | if (FLAG_verify_heap) { |
| 431 | Verify(); |
| 432 | } |
| 433 | } |
| 434 | |
| 435 | |
| 436 | void StoreBuffer::FindPointersToNewSpaceInRegion( |
| 437 | Address start, Address end, ObjectSlotCallback slot_callback) { |
| 438 | for (Address slot_address = start; |
| 439 | slot_address < end; |
| 440 | slot_address += kPointerSize) { |
| 441 | Object** slot = reinterpret_cast<Object**>(slot_address); |
| 442 | if (heap_->InNewSpace(*slot)) { |
| 443 | HeapObject* object = reinterpret_cast<HeapObject*>(*slot); |
| 444 | ASSERT(object->IsHeapObject()); |
| 445 | slot_callback(reinterpret_cast<HeapObject**>(slot), object); |
| 446 | if (heap_->InNewSpace(*slot)) { |
| 447 | EnterDirectlyIntoStoreBuffer(slot_address); |
| 448 | } |
| 449 | } |
| 450 | } |
| 451 | } |
| 452 | |
| 453 | |
| 454 | // Compute start address of the first map following given addr. |
| 455 | static inline Address MapStartAlign(Address addr) { |
| 456 | Address page = Page::FromAddress(addr)->area_start(); |
| 457 | return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize); |
| 458 | } |
| 459 | |
| 460 | |
| 461 | // Compute end address of the first map preceding given addr. |
| 462 | static inline Address MapEndAlign(Address addr) { |
| 463 | Address page = Page::FromAllocationTop(addr)->area_start(); |
| 464 | return page + ((addr - page) / Map::kSize * Map::kSize); |
| 465 | } |
| 466 | |
| 467 | |
| 468 | void StoreBuffer::FindPointersToNewSpaceInMaps( |
| 469 | Address start, |
| 470 | Address end, |
| 471 | ObjectSlotCallback slot_callback) { |
| 472 | ASSERT(MapStartAlign(start) == start); |
| 473 | ASSERT(MapEndAlign(end) == end); |
| 474 | |
| 475 | Address map_address = start; |
| 476 | while (map_address < end) { |
| 477 | ASSERT(!heap_->InNewSpace(Memory::Object_at(map_address))); |
| 478 | ASSERT(Memory::Object_at(map_address)->IsMap()); |
| 479 | |
| 480 | Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset; |
| 481 | Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset; |
| 482 | |
| 483 | FindPointersToNewSpaceInRegion(pointer_fields_start, |
| 484 | pointer_fields_end, |
| 485 | slot_callback); |
| 486 | map_address += Map::kSize; |
| 487 | } |
| 488 | } |
| 489 | |
| 490 | |
| 491 | void StoreBuffer::FindPointersToNewSpaceInMapsRegion( |
| 492 | Address start, |
| 493 | Address end, |
| 494 | ObjectSlotCallback slot_callback) { |
| 495 | Address map_aligned_start = MapStartAlign(start); |
| 496 | Address map_aligned_end = MapEndAlign(end); |
| 497 | |
| 498 | ASSERT(map_aligned_start == start); |
| 499 | ASSERT(map_aligned_end == end); |
| 500 | |
| 501 | FindPointersToNewSpaceInMaps(map_aligned_start, |
| 502 | map_aligned_end, |
| 503 | slot_callback); |
| 504 | } |
| 505 | |
| 506 | |
| 507 | // This function iterates over all the pointers in a paged space in the heap, |
| 508 | // looking for pointers into new space. Within the pages there may be dead |
| 509 | // objects that have not been overwritten by free spaces or fillers because of |
| 510 | // lazy sweeping. These dead objects may not contain pointers to new space. |
| 511 | // The garbage areas that have been swept properly (these will normally be the |
| 512 | // large ones) will be marked with free space and filler map words. In |
| 513 | // addition any area that has never been used at all for object allocation must |
| 514 | // be marked with a free space or filler. Because the free space and filler |
| 515 | // maps do not move we can always recognize these even after a compaction. |
| 516 | // Normal objects like FixedArrays and JSObjects should not contain references |
| 517 | // to these maps. The special garbage section (see comment in spaces.h) is |
| 518 | // skipped since it can contain absolutely anything. Any objects that are |
| 519 | // allocated during iteration may or may not be visited by the iteration, but |
| 520 | // they will not be partially visited. |
| 521 | void StoreBuffer::FindPointersToNewSpaceOnPage( |
| 522 | PagedSpace* space, |
| 523 | Page* page, |
| 524 | RegionCallback region_callback, |
| 525 | ObjectSlotCallback slot_callback) { |
| 526 | Address visitable_start = page->area_start(); |
| 527 | Address end_of_page = page->area_end(); |
| 528 | |
| 529 | Address visitable_end = visitable_start; |
| 530 | |
| 531 | Object* free_space_map = heap_->free_space_map(); |
| 532 | Object* two_pointer_filler_map = heap_->two_pointer_filler_map(); |
| 533 | |
| 534 | while (visitable_end < end_of_page) { |
| 535 | Object* o = *reinterpret_cast<Object**>(visitable_end); |
| 536 | // Skip fillers but not things that look like fillers in the special |
| 537 | // garbage section which can contain anything. |
| 538 | if (o == free_space_map || |
| 539 | o == two_pointer_filler_map || |
| 540 | (visitable_end == space->top() && visitable_end != space->limit())) { |
| 541 | if (visitable_start != visitable_end) { |
| 542 | // After calling this the special garbage section may have moved. |
| 543 | (this->*region_callback)(visitable_start, |
| 544 | visitable_end, |
| 545 | slot_callback); |
| 546 | if (visitable_end >= space->top() && visitable_end < space->limit()) { |
| 547 | visitable_end = space->limit(); |
| 548 | visitable_start = visitable_end; |
| 549 | continue; |
| 550 | } |
| 551 | } |
| 552 | if (visitable_end == space->top() && visitable_end != space->limit()) { |
| 553 | visitable_start = visitable_end = space->limit(); |
| 554 | } else { |
| 555 | // At this point we are either at the start of a filler or we are at |
| 556 | // the point where the space->top() used to be before the |
| 557 | // visit_pointer_region call above. Either way we can skip the |
| 558 | // object at the current spot: We don't promise to visit objects |
| 559 | // allocated during heap traversal, and if space->top() moved then it |
| 560 | // must be because an object was allocated at this point. |
| 561 | visitable_start = |
| 562 | visitable_end + HeapObject::FromAddress(visitable_end)->Size(); |
| 563 | visitable_end = visitable_start; |
| 564 | } |
| 565 | } else { |
| 566 | ASSERT(o != free_space_map); |
| 567 | ASSERT(o != two_pointer_filler_map); |
| 568 | ASSERT(visitable_end < space->top() || visitable_end >= space->limit()); |
| 569 | visitable_end += kPointerSize; |
| 570 | } |
| 571 | } |
| 572 | ASSERT(visitable_end == end_of_page); |
| 573 | if (visitable_start != visitable_end) { |
| 574 | (this->*region_callback)(visitable_start, |
| 575 | visitable_end, |
| 576 | slot_callback); |
| 577 | } |
| 578 | } |
| 579 | |
| 580 | |
| 581 | void StoreBuffer::IteratePointersInStoreBuffer( |
| 582 | ObjectSlotCallback slot_callback) { |
| 583 | Address* limit = old_top_; |
| 584 | old_top_ = old_start_; |
| 585 | { |
| 586 | DontMoveStoreBufferEntriesScope scope(this); |
| 587 | for (Address* current = old_start_; current < limit; current++) { |
| 588 | #ifdef DEBUG |
| 589 | Address* saved_top = old_top_; |
| 590 | #endif |
| 591 | Object** slot = reinterpret_cast<Object**>(*current); |
| 592 | Object* object = *slot; |
| 593 | if (heap_->InFromSpace(object)) { |
| 594 | HeapObject* heap_object = reinterpret_cast<HeapObject*>(object); |
| 595 | slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object); |
| 596 | if (heap_->InNewSpace(*slot)) { |
| 597 | EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot)); |
| 598 | } |
| 599 | } |
| 600 | ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top); |
| 601 | } |
| 602 | } |
| 603 | } |
| 604 | |
| 605 | |
| 606 | void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) { |
| 607 | // We do not sort or remove duplicated entries from the store buffer because |
| 608 | // we expect that callback will rebuild the store buffer thus removing |
| 609 | // all duplicates and pointers to old space. |
| 610 | bool some_pages_to_scan = PrepareForIteration(); |
| 611 | |
| 612 | // TODO(gc): we want to skip slots on evacuation candidates |
| 613 | // but we can't simply figure that out from slot address |
| 614 | // because slot can belong to a large object. |
| 615 | IteratePointersInStoreBuffer(slot_callback); |
| 616 | |
| 617 | // We are done scanning all the pointers that were in the store buffer, but |
| 618 | // there may be some pages marked scan_on_scavenge that have pointers to new |
| 619 | // space that are not in the store buffer. We must scan them now. As we |
| 620 | // scan, the surviving pointers to new space will be added to the store |
| 621 | // buffer. If there are still a lot of pointers to new space then we will |
| 622 | // keep the scan_on_scavenge flag on the page and discard the pointers that |
| 623 | // were added to the store buffer. If there are not many pointers to new |
| 624 | // space left on the page we will keep the pointers in the store buffer and |
| 625 | // remove the flag from the page. |
| 626 | if (some_pages_to_scan) { |
| 627 | if (callback_ != NULL) { |
| 628 | (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent); |
| 629 | } |
| 630 | PointerChunkIterator it(heap_); |
| 631 | MemoryChunk* chunk; |
| 632 | while ((chunk = it.next()) != NULL) { |
| 633 | if (chunk->scan_on_scavenge()) { |
| 634 | chunk->set_scan_on_scavenge(false); |
| 635 | if (callback_ != NULL) { |
| 636 | (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent); |
| 637 | } |
| 638 | if (chunk->owner() == heap_->lo_space()) { |
| 639 | LargePage* large_page = reinterpret_cast<LargePage*>(chunk); |
| 640 | HeapObject* array = large_page->GetObject(); |
| 641 | ASSERT(array->IsFixedArray()); |
| 642 | Address start = array->address(); |
| 643 | Address end = start + array->Size(); |
| 644 | FindPointersToNewSpaceInRegion(start, end, slot_callback); |
| 645 | } else { |
| 646 | Page* page = reinterpret_cast<Page*>(chunk); |
| 647 | PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); |
| 648 | FindPointersToNewSpaceOnPage( |
| 649 | owner, |
| 650 | page, |
| 651 | (owner == heap_->map_space() ? |
| 652 | &StoreBuffer::FindPointersToNewSpaceInMapsRegion : |
| 653 | &StoreBuffer::FindPointersToNewSpaceInRegion), |
| 654 | slot_callback); |
| 655 | } |
| 656 | } |
| 657 | } |
| 658 | if (callback_ != NULL) { |
| 659 | (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent); |
| 660 | } |
| 661 | } |
| 662 | } |
| 663 | |
| 664 | |
| 665 | void StoreBuffer::Compact() { |
| 666 | Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top()); |
| 667 | |
| 668 | if (top == start_) return; |
| 669 | |
| 670 | // There's no check of the limit in the loop below so we check here for |
| 671 | // the worst case (compaction doesn't eliminate any pointers). |
| 672 | ASSERT(top <= limit_); |
| 673 | heap_->public_set_store_buffer_top(start_); |
| 674 | EnsureSpace(top - start_); |
| 675 | ASSERT(may_move_store_buffer_entries_); |
| 676 | // Goes through the addresses in the store buffer attempting to remove |
| 677 | // duplicates. In the interest of speed this is a lossy operation. Some |
| 678 | // duplicates will remain. We have two hash sets with different hash |
| 679 | // functions to reduce the number of unnecessary clashes. |
| 680 | hash_sets_are_empty_ = false; // Hash sets are in use. |
| 681 | for (Address* current = start_; current < top; current++) { |
| 682 | ASSERT(!heap_->cell_space()->Contains(*current)); |
| 683 | ASSERT(!heap_->code_space()->Contains(*current)); |
| 684 | ASSERT(!heap_->old_data_space()->Contains(*current)); |
| 685 | uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current); |
| 686 | // Shift out the last bits including any tags. |
| 687 | int_addr >>= kPointerSizeLog2; |
| 688 | int hash1 = |
| 689 | ((int_addr ^ (int_addr >> kHashSetLengthLog2)) & (kHashSetLength - 1)); |
| 690 | if (hash_set_1_[hash1] == int_addr) continue; |
| 691 | uintptr_t hash2 = (int_addr - (int_addr >> kHashSetLengthLog2)); |
| 692 | hash2 ^= hash2 >> (kHashSetLengthLog2 * 2); |
| 693 | hash2 &= (kHashSetLength - 1); |
| 694 | if (hash_set_2_[hash2] == int_addr) continue; |
| 695 | if (hash_set_1_[hash1] == 0) { |
| 696 | hash_set_1_[hash1] = int_addr; |
| 697 | } else if (hash_set_2_[hash2] == 0) { |
| 698 | hash_set_2_[hash2] = int_addr; |
| 699 | } else { |
| 700 | // Rather than slowing down we just throw away some entries. This will |
| 701 | // cause some duplicates to remain undetected. |
| 702 | hash_set_1_[hash1] = int_addr; |
| 703 | hash_set_2_[hash2] = 0; |
| 704 | } |
| 705 | old_buffer_is_sorted_ = false; |
| 706 | old_buffer_is_filtered_ = false; |
| 707 | *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); |
| 708 | ASSERT(old_top_ <= old_limit_); |
| 709 | } |
| 710 | heap_->isolate()->counters()->store_buffer_compactions()->Increment(); |
| 711 | CheckForFullBuffer(); |
| 712 | } |
| 713 | |
| 714 | |
| 715 | void StoreBuffer::CheckForFullBuffer() { |
| 716 | EnsureSpace(kStoreBufferSize * 2); |
| 717 | } |
| 718 | |
| 719 | } } // namespace v8::internal |