Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1 | // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #include "src/v8.h" |
| 6 | |
| 7 | #include "src/base/bits.h" |
| 8 | #include "src/base/platform/platform.h" |
| 9 | #include "src/full-codegen.h" |
| 10 | #include "src/heap/mark-compact.h" |
| 11 | #include "src/macro-assembler.h" |
| 12 | #include "src/msan.h" |
| 13 | |
| 14 | namespace v8 { |
| 15 | namespace internal { |
| 16 | |
| 17 | |
| 18 | // ---------------------------------------------------------------------------- |
| 19 | // HeapObjectIterator |
| 20 | |
| 21 | HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { |
| 22 | // You can't actually iterate over the anchor page. It is not a real page, |
| 23 | // just an anchor for the double linked page list. Initialize as if we have |
| 24 | // reached the end of the anchor page, then the first iteration will move on |
| 25 | // to the first page. |
| 26 | Initialize(space, NULL, NULL, kAllPagesInSpace, NULL); |
| 27 | } |
| 28 | |
| 29 | |
| 30 | HeapObjectIterator::HeapObjectIterator(PagedSpace* space, |
| 31 | HeapObjectCallback size_func) { |
| 32 | // You can't actually iterate over the anchor page. It is not a real page, |
| 33 | // just an anchor for the double linked page list. Initialize the current |
| 34 | // address and end as NULL, then the first iteration will move on |
| 35 | // to the first page. |
| 36 | Initialize(space, NULL, NULL, kAllPagesInSpace, size_func); |
| 37 | } |
| 38 | |
| 39 | |
| 40 | HeapObjectIterator::HeapObjectIterator(Page* page, |
| 41 | HeapObjectCallback size_func) { |
| 42 | Space* owner = page->owner(); |
| 43 | DCHECK(owner == page->heap()->old_pointer_space() || |
| 44 | owner == page->heap()->old_data_space() || |
| 45 | owner == page->heap()->map_space() || |
| 46 | owner == page->heap()->cell_space() || |
| 47 | owner == page->heap()->property_cell_space() || |
| 48 | owner == page->heap()->code_space()); |
| 49 | Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(), |
| 50 | page->area_end(), kOnePageOnly, size_func); |
| 51 | DCHECK(page->WasSwept() || page->SweepingCompleted()); |
| 52 | } |
| 53 | |
| 54 | |
| 55 | void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end, |
| 56 | HeapObjectIterator::PageMode mode, |
| 57 | HeapObjectCallback size_f) { |
| 58 | space_ = space; |
| 59 | cur_addr_ = cur; |
| 60 | cur_end_ = end; |
| 61 | page_mode_ = mode; |
| 62 | size_func_ = size_f; |
| 63 | } |
| 64 | |
| 65 | |
| 66 | // We have hit the end of the page and should advance to the next block of |
| 67 | // objects. This happens at the end of the page. |
| 68 | bool HeapObjectIterator::AdvanceToNextPage() { |
| 69 | DCHECK(cur_addr_ == cur_end_); |
| 70 | if (page_mode_ == kOnePageOnly) return false; |
| 71 | Page* cur_page; |
| 72 | if (cur_addr_ == NULL) { |
| 73 | cur_page = space_->anchor(); |
| 74 | } else { |
| 75 | cur_page = Page::FromAddress(cur_addr_ - 1); |
| 76 | DCHECK(cur_addr_ == cur_page->area_end()); |
| 77 | } |
| 78 | cur_page = cur_page->next_page(); |
| 79 | if (cur_page == space_->anchor()) return false; |
| 80 | cur_addr_ = cur_page->area_start(); |
| 81 | cur_end_ = cur_page->area_end(); |
| 82 | DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted()); |
| 83 | return true; |
| 84 | } |
| 85 | |
| 86 | |
| 87 | // ----------------------------------------------------------------------------- |
| 88 | // CodeRange |
| 89 | |
| 90 | |
| 91 | CodeRange::CodeRange(Isolate* isolate) |
| 92 | : isolate_(isolate), |
| 93 | code_range_(NULL), |
| 94 | free_list_(0), |
| 95 | allocation_list_(0), |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 96 | current_allocation_block_index_(0), |
| 97 | emergency_block_() {} |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 98 | |
| 99 | |
| 100 | bool CodeRange::SetUp(size_t requested) { |
| 101 | DCHECK(code_range_ == NULL); |
| 102 | |
| 103 | if (requested == 0) { |
| 104 | // When a target requires the code range feature, we put all code objects |
| 105 | // in a kMaximalCodeRangeSize range of virtual address space, so that |
| 106 | // they can call each other with near calls. |
| 107 | if (kRequiresCodeRange) { |
| 108 | requested = kMaximalCodeRangeSize; |
| 109 | } else { |
| 110 | return true; |
| 111 | } |
| 112 | } |
| 113 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 114 | if (requested <= kMinimumCodeRangeSize) { |
| 115 | requested = kMinimumCodeRangeSize; |
| 116 | } |
| 117 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 118 | DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize); |
| 119 | code_range_ = new base::VirtualMemory(requested); |
| 120 | CHECK(code_range_ != NULL); |
| 121 | if (!code_range_->IsReserved()) { |
| 122 | delete code_range_; |
| 123 | code_range_ = NULL; |
| 124 | return false; |
| 125 | } |
| 126 | |
| 127 | // We are sure that we have mapped a block of requested addresses. |
| 128 | DCHECK(code_range_->size() == requested); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 129 | Address base = reinterpret_cast<Address>(code_range_->address()); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 130 | |
| 131 | // On some platforms, specifically Win64, we need to reserve some pages at |
| 132 | // the beginning of an executable space. |
| 133 | if (kReservedCodeRangePages) { |
| 134 | if (!code_range_->Commit( |
| 135 | base, kReservedCodeRangePages * base::OS::CommitPageSize(), true)) { |
| 136 | delete code_range_; |
| 137 | code_range_ = NULL; |
| 138 | return false; |
| 139 | } |
| 140 | base += kReservedCodeRangePages * base::OS::CommitPageSize(); |
| 141 | } |
| 142 | Address aligned_base = RoundUp(base, MemoryChunk::kAlignment); |
| 143 | size_t size = code_range_->size() - (aligned_base - base) - |
| 144 | kReservedCodeRangePages * base::OS::CommitPageSize(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 145 | allocation_list_.Add(FreeBlock(aligned_base, size)); |
| 146 | current_allocation_block_index_ = 0; |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 147 | |
| 148 | LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested)); |
| 149 | ReserveEmergencyBlock(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 150 | return true; |
| 151 | } |
| 152 | |
| 153 | |
| 154 | int CodeRange::CompareFreeBlockAddress(const FreeBlock* left, |
| 155 | const FreeBlock* right) { |
| 156 | // The entire point of CodeRange is that the difference between two |
| 157 | // addresses in the range can be represented as a signed 32-bit int, |
| 158 | // so the cast is semantically correct. |
| 159 | return static_cast<int>(left->start - right->start); |
| 160 | } |
| 161 | |
| 162 | |
| 163 | bool CodeRange::GetNextAllocationBlock(size_t requested) { |
| 164 | for (current_allocation_block_index_++; |
| 165 | current_allocation_block_index_ < allocation_list_.length(); |
| 166 | current_allocation_block_index_++) { |
| 167 | if (requested <= allocation_list_[current_allocation_block_index_].size) { |
| 168 | return true; // Found a large enough allocation block. |
| 169 | } |
| 170 | } |
| 171 | |
| 172 | // Sort and merge the free blocks on the free list and the allocation list. |
| 173 | free_list_.AddAll(allocation_list_); |
| 174 | allocation_list_.Clear(); |
| 175 | free_list_.Sort(&CompareFreeBlockAddress); |
| 176 | for (int i = 0; i < free_list_.length();) { |
| 177 | FreeBlock merged = free_list_[i]; |
| 178 | i++; |
| 179 | // Add adjacent free blocks to the current merged block. |
| 180 | while (i < free_list_.length() && |
| 181 | free_list_[i].start == merged.start + merged.size) { |
| 182 | merged.size += free_list_[i].size; |
| 183 | i++; |
| 184 | } |
| 185 | if (merged.size > 0) { |
| 186 | allocation_list_.Add(merged); |
| 187 | } |
| 188 | } |
| 189 | free_list_.Clear(); |
| 190 | |
| 191 | for (current_allocation_block_index_ = 0; |
| 192 | current_allocation_block_index_ < allocation_list_.length(); |
| 193 | current_allocation_block_index_++) { |
| 194 | if (requested <= allocation_list_[current_allocation_block_index_].size) { |
| 195 | return true; // Found a large enough allocation block. |
| 196 | } |
| 197 | } |
| 198 | current_allocation_block_index_ = 0; |
| 199 | // Code range is full or too fragmented. |
| 200 | return false; |
| 201 | } |
| 202 | |
| 203 | |
| 204 | Address CodeRange::AllocateRawMemory(const size_t requested_size, |
| 205 | const size_t commit_size, |
| 206 | size_t* allocated) { |
| 207 | DCHECK(commit_size <= requested_size); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 208 | FreeBlock current; |
| 209 | if (!ReserveBlock(requested_size, ¤t)) { |
| 210 | *allocated = 0; |
| 211 | return NULL; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 212 | } |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 213 | *allocated = current.size; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 214 | DCHECK(*allocated <= current.size); |
| 215 | DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment)); |
| 216 | if (!isolate_->memory_allocator()->CommitExecutableMemory( |
| 217 | code_range_, current.start, commit_size, *allocated)) { |
| 218 | *allocated = 0; |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 219 | ReleaseBlock(¤t); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 220 | return NULL; |
| 221 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 222 | return current.start; |
| 223 | } |
| 224 | |
| 225 | |
| 226 | bool CodeRange::CommitRawMemory(Address start, size_t length) { |
| 227 | return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE); |
| 228 | } |
| 229 | |
| 230 | |
| 231 | bool CodeRange::UncommitRawMemory(Address start, size_t length) { |
| 232 | return code_range_->Uncommit(start, length); |
| 233 | } |
| 234 | |
| 235 | |
| 236 | void CodeRange::FreeRawMemory(Address address, size_t length) { |
| 237 | DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment)); |
| 238 | free_list_.Add(FreeBlock(address, length)); |
| 239 | code_range_->Uncommit(address, length); |
| 240 | } |
| 241 | |
| 242 | |
| 243 | void CodeRange::TearDown() { |
| 244 | delete code_range_; // Frees all memory in the virtual memory range. |
| 245 | code_range_ = NULL; |
| 246 | free_list_.Free(); |
| 247 | allocation_list_.Free(); |
| 248 | } |
| 249 | |
| 250 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 251 | bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) { |
| 252 | DCHECK(allocation_list_.length() == 0 || |
| 253 | current_allocation_block_index_ < allocation_list_.length()); |
| 254 | if (allocation_list_.length() == 0 || |
| 255 | requested_size > allocation_list_[current_allocation_block_index_].size) { |
| 256 | // Find an allocation block large enough. |
| 257 | if (!GetNextAllocationBlock(requested_size)) return false; |
| 258 | } |
| 259 | // Commit the requested memory at the start of the current allocation block. |
| 260 | size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment); |
| 261 | *block = allocation_list_[current_allocation_block_index_]; |
| 262 | // Don't leave a small free block, useless for a large object or chunk. |
| 263 | if (aligned_requested < (block->size - Page::kPageSize)) { |
| 264 | block->size = aligned_requested; |
| 265 | } |
| 266 | DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment)); |
| 267 | allocation_list_[current_allocation_block_index_].start += block->size; |
| 268 | allocation_list_[current_allocation_block_index_].size -= block->size; |
| 269 | return true; |
| 270 | } |
| 271 | |
| 272 | |
| 273 | void CodeRange::ReleaseBlock(const FreeBlock* block) { free_list_.Add(*block); } |
| 274 | |
| 275 | |
| 276 | void CodeRange::ReserveEmergencyBlock() { |
| 277 | const size_t requested_size = MemoryAllocator::CodePageAreaSize(); |
| 278 | if (emergency_block_.size == 0) { |
| 279 | ReserveBlock(requested_size, &emergency_block_); |
| 280 | } else { |
| 281 | DCHECK(emergency_block_.size >= requested_size); |
| 282 | } |
| 283 | } |
| 284 | |
| 285 | |
| 286 | void CodeRange::ReleaseEmergencyBlock() { |
| 287 | if (emergency_block_.size != 0) { |
| 288 | ReleaseBlock(&emergency_block_); |
| 289 | emergency_block_.size = 0; |
| 290 | } |
| 291 | } |
| 292 | |
| 293 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 294 | // ----------------------------------------------------------------------------- |
| 295 | // MemoryAllocator |
| 296 | // |
| 297 | |
| 298 | MemoryAllocator::MemoryAllocator(Isolate* isolate) |
| 299 | : isolate_(isolate), |
| 300 | capacity_(0), |
| 301 | capacity_executable_(0), |
| 302 | size_(0), |
| 303 | size_executable_(0), |
| 304 | lowest_ever_allocated_(reinterpret_cast<void*>(-1)), |
| 305 | highest_ever_allocated_(reinterpret_cast<void*>(0)) {} |
| 306 | |
| 307 | |
| 308 | bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { |
| 309 | capacity_ = RoundUp(capacity, Page::kPageSize); |
| 310 | capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); |
| 311 | DCHECK_GE(capacity_, capacity_executable_); |
| 312 | |
| 313 | size_ = 0; |
| 314 | size_executable_ = 0; |
| 315 | |
| 316 | return true; |
| 317 | } |
| 318 | |
| 319 | |
| 320 | void MemoryAllocator::TearDown() { |
| 321 | // Check that spaces were torn down before MemoryAllocator. |
| 322 | DCHECK(size_ == 0); |
| 323 | // TODO(gc) this will be true again when we fix FreeMemory. |
| 324 | // DCHECK(size_executable_ == 0); |
| 325 | capacity_ = 0; |
| 326 | capacity_executable_ = 0; |
| 327 | } |
| 328 | |
| 329 | |
| 330 | bool MemoryAllocator::CommitMemory(Address base, size_t size, |
| 331 | Executability executable) { |
| 332 | if (!base::VirtualMemory::CommitRegion(base, size, |
| 333 | executable == EXECUTABLE)) { |
| 334 | return false; |
| 335 | } |
| 336 | UpdateAllocatedSpaceLimits(base, base + size); |
| 337 | return true; |
| 338 | } |
| 339 | |
| 340 | |
| 341 | void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation, |
| 342 | Executability executable) { |
| 343 | // TODO(gc) make code_range part of memory allocator? |
| 344 | DCHECK(reservation->IsReserved()); |
| 345 | size_t size = reservation->size(); |
| 346 | DCHECK(size_ >= size); |
| 347 | size_ -= size; |
| 348 | |
| 349 | isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); |
| 350 | |
| 351 | if (executable == EXECUTABLE) { |
| 352 | DCHECK(size_executable_ >= size); |
| 353 | size_executable_ -= size; |
| 354 | } |
| 355 | // Code which is part of the code-range does not have its own VirtualMemory. |
| 356 | DCHECK(isolate_->code_range() == NULL || |
| 357 | !isolate_->code_range()->contains( |
| 358 | static_cast<Address>(reservation->address()))); |
| 359 | DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL || |
| 360 | !isolate_->code_range()->valid()); |
| 361 | reservation->Release(); |
| 362 | } |
| 363 | |
| 364 | |
| 365 | void MemoryAllocator::FreeMemory(Address base, size_t size, |
| 366 | Executability executable) { |
| 367 | // TODO(gc) make code_range part of memory allocator? |
| 368 | DCHECK(size_ >= size); |
| 369 | size_ -= size; |
| 370 | |
| 371 | isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); |
| 372 | |
| 373 | if (executable == EXECUTABLE) { |
| 374 | DCHECK(size_executable_ >= size); |
| 375 | size_executable_ -= size; |
| 376 | } |
| 377 | if (isolate_->code_range() != NULL && |
| 378 | isolate_->code_range()->contains(static_cast<Address>(base))) { |
| 379 | DCHECK(executable == EXECUTABLE); |
| 380 | isolate_->code_range()->FreeRawMemory(base, size); |
| 381 | } else { |
| 382 | DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL || |
| 383 | !isolate_->code_range()->valid()); |
| 384 | bool result = base::VirtualMemory::ReleaseRegion(base, size); |
| 385 | USE(result); |
| 386 | DCHECK(result); |
| 387 | } |
| 388 | } |
| 389 | |
| 390 | |
| 391 | Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment, |
| 392 | base::VirtualMemory* controller) { |
| 393 | base::VirtualMemory reservation(size, alignment); |
| 394 | |
| 395 | if (!reservation.IsReserved()) return NULL; |
| 396 | size_ += reservation.size(); |
| 397 | Address base = |
| 398 | RoundUp(static_cast<Address>(reservation.address()), alignment); |
| 399 | controller->TakeControl(&reservation); |
| 400 | return base; |
| 401 | } |
| 402 | |
| 403 | |
| 404 | Address MemoryAllocator::AllocateAlignedMemory( |
| 405 | size_t reserve_size, size_t commit_size, size_t alignment, |
| 406 | Executability executable, base::VirtualMemory* controller) { |
| 407 | DCHECK(commit_size <= reserve_size); |
| 408 | base::VirtualMemory reservation; |
| 409 | Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation); |
| 410 | if (base == NULL) return NULL; |
| 411 | |
| 412 | if (executable == EXECUTABLE) { |
| 413 | if (!CommitExecutableMemory(&reservation, base, commit_size, |
| 414 | reserve_size)) { |
| 415 | base = NULL; |
| 416 | } |
| 417 | } else { |
| 418 | if (reservation.Commit(base, commit_size, false)) { |
| 419 | UpdateAllocatedSpaceLimits(base, base + commit_size); |
| 420 | } else { |
| 421 | base = NULL; |
| 422 | } |
| 423 | } |
| 424 | |
| 425 | if (base == NULL) { |
| 426 | // Failed to commit the body. Release the mapping and any partially |
| 427 | // commited regions inside it. |
| 428 | reservation.Release(); |
| 429 | return NULL; |
| 430 | } |
| 431 | |
| 432 | controller->TakeControl(&reservation); |
| 433 | return base; |
| 434 | } |
| 435 | |
| 436 | |
| 437 | void Page::InitializeAsAnchor(PagedSpace* owner) { |
| 438 | set_owner(owner); |
| 439 | set_prev_page(this); |
| 440 | set_next_page(this); |
| 441 | } |
| 442 | |
| 443 | |
| 444 | NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start, |
| 445 | SemiSpace* semi_space) { |
| 446 | Address area_start = start + NewSpacePage::kObjectStartOffset; |
| 447 | Address area_end = start + Page::kPageSize; |
| 448 | |
| 449 | MemoryChunk* chunk = |
| 450 | MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start, |
| 451 | area_end, NOT_EXECUTABLE, semi_space); |
| 452 | chunk->set_next_chunk(NULL); |
| 453 | chunk->set_prev_chunk(NULL); |
| 454 | chunk->initialize_scan_on_scavenge(true); |
| 455 | bool in_to_space = (semi_space->id() != kFromSpace); |
| 456 | chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE |
| 457 | : MemoryChunk::IN_FROM_SPACE); |
| 458 | DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE |
| 459 | : MemoryChunk::IN_TO_SPACE)); |
| 460 | NewSpacePage* page = static_cast<NewSpacePage*>(chunk); |
| 461 | heap->incremental_marking()->SetNewSpacePageFlags(page); |
| 462 | return page; |
| 463 | } |
| 464 | |
| 465 | |
| 466 | void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) { |
| 467 | set_owner(semi_space); |
| 468 | set_next_chunk(this); |
| 469 | set_prev_chunk(this); |
| 470 | // Flags marks this invalid page as not being in new-space. |
| 471 | // All real new-space pages will be in new-space. |
| 472 | SetFlags(0, ~0); |
| 473 | } |
| 474 | |
| 475 | |
| 476 | MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, |
| 477 | Address area_start, Address area_end, |
| 478 | Executability executable, Space* owner) { |
| 479 | MemoryChunk* chunk = FromAddress(base); |
| 480 | |
| 481 | DCHECK(base == chunk->address()); |
| 482 | |
| 483 | chunk->heap_ = heap; |
| 484 | chunk->size_ = size; |
| 485 | chunk->area_start_ = area_start; |
| 486 | chunk->area_end_ = area_end; |
| 487 | chunk->flags_ = 0; |
| 488 | chunk->set_owner(owner); |
| 489 | chunk->InitializeReservedMemory(); |
| 490 | chunk->slots_buffer_ = NULL; |
| 491 | chunk->skip_list_ = NULL; |
| 492 | chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; |
| 493 | chunk->progress_bar_ = 0; |
| 494 | chunk->high_water_mark_ = static_cast<int>(area_start - base); |
| 495 | chunk->set_parallel_sweeping(SWEEPING_DONE); |
| 496 | chunk->available_in_small_free_list_ = 0; |
| 497 | chunk->available_in_medium_free_list_ = 0; |
| 498 | chunk->available_in_large_free_list_ = 0; |
| 499 | chunk->available_in_huge_free_list_ = 0; |
| 500 | chunk->non_available_small_blocks_ = 0; |
| 501 | chunk->ResetLiveBytes(); |
| 502 | Bitmap::Clear(chunk); |
| 503 | chunk->initialize_scan_on_scavenge(false); |
| 504 | chunk->SetFlag(WAS_SWEPT); |
| 505 | |
| 506 | DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); |
| 507 | DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); |
| 508 | |
| 509 | if (executable == EXECUTABLE) { |
| 510 | chunk->SetFlag(IS_EXECUTABLE); |
| 511 | } |
| 512 | |
| 513 | if (owner == heap->old_data_space()) { |
| 514 | chunk->SetFlag(CONTAINS_ONLY_DATA); |
| 515 | } |
| 516 | |
| 517 | return chunk; |
| 518 | } |
| 519 | |
| 520 | |
| 521 | // Commit MemoryChunk area to the requested size. |
| 522 | bool MemoryChunk::CommitArea(size_t requested) { |
| 523 | size_t guard_size = |
| 524 | IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0; |
| 525 | size_t header_size = area_start() - address() - guard_size; |
| 526 | size_t commit_size = |
| 527 | RoundUp(header_size + requested, base::OS::CommitPageSize()); |
| 528 | size_t committed_size = RoundUp(header_size + (area_end() - area_start()), |
| 529 | base::OS::CommitPageSize()); |
| 530 | |
| 531 | if (commit_size > committed_size) { |
| 532 | // Commit size should be less or equal than the reserved size. |
| 533 | DCHECK(commit_size <= size() - 2 * guard_size); |
| 534 | // Append the committed area. |
| 535 | Address start = address() + committed_size + guard_size; |
| 536 | size_t length = commit_size - committed_size; |
| 537 | if (reservation_.IsReserved()) { |
| 538 | Executability executable = |
| 539 | IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; |
| 540 | if (!heap()->isolate()->memory_allocator()->CommitMemory(start, length, |
| 541 | executable)) { |
| 542 | return false; |
| 543 | } |
| 544 | } else { |
| 545 | CodeRange* code_range = heap_->isolate()->code_range(); |
| 546 | DCHECK(code_range != NULL && code_range->valid() && |
| 547 | IsFlagSet(IS_EXECUTABLE)); |
| 548 | if (!code_range->CommitRawMemory(start, length)) return false; |
| 549 | } |
| 550 | |
| 551 | if (Heap::ShouldZapGarbage()) { |
| 552 | heap_->isolate()->memory_allocator()->ZapBlock(start, length); |
| 553 | } |
| 554 | } else if (commit_size < committed_size) { |
| 555 | DCHECK(commit_size > 0); |
| 556 | // Shrink the committed area. |
| 557 | size_t length = committed_size - commit_size; |
| 558 | Address start = address() + committed_size + guard_size - length; |
| 559 | if (reservation_.IsReserved()) { |
| 560 | if (!reservation_.Uncommit(start, length)) return false; |
| 561 | } else { |
| 562 | CodeRange* code_range = heap_->isolate()->code_range(); |
| 563 | DCHECK(code_range != NULL && code_range->valid() && |
| 564 | IsFlagSet(IS_EXECUTABLE)); |
| 565 | if (!code_range->UncommitRawMemory(start, length)) return false; |
| 566 | } |
| 567 | } |
| 568 | |
| 569 | area_end_ = area_start_ + requested; |
| 570 | return true; |
| 571 | } |
| 572 | |
| 573 | |
| 574 | void MemoryChunk::InsertAfter(MemoryChunk* other) { |
| 575 | MemoryChunk* other_next = other->next_chunk(); |
| 576 | |
| 577 | set_next_chunk(other_next); |
| 578 | set_prev_chunk(other); |
| 579 | other_next->set_prev_chunk(this); |
| 580 | other->set_next_chunk(this); |
| 581 | } |
| 582 | |
| 583 | |
| 584 | void MemoryChunk::Unlink() { |
| 585 | MemoryChunk* next_element = next_chunk(); |
| 586 | MemoryChunk* prev_element = prev_chunk(); |
| 587 | next_element->set_prev_chunk(prev_element); |
| 588 | prev_element->set_next_chunk(next_element); |
| 589 | set_prev_chunk(NULL); |
| 590 | set_next_chunk(NULL); |
| 591 | } |
| 592 | |
| 593 | |
| 594 | MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, |
| 595 | intptr_t commit_area_size, |
| 596 | Executability executable, |
| 597 | Space* owner) { |
| 598 | DCHECK(commit_area_size <= reserve_area_size); |
| 599 | |
| 600 | size_t chunk_size; |
| 601 | Heap* heap = isolate_->heap(); |
| 602 | Address base = NULL; |
| 603 | base::VirtualMemory reservation; |
| 604 | Address area_start = NULL; |
| 605 | Address area_end = NULL; |
| 606 | |
| 607 | // |
| 608 | // MemoryChunk layout: |
| 609 | // |
| 610 | // Executable |
| 611 | // +----------------------------+<- base aligned with MemoryChunk::kAlignment |
| 612 | // | Header | |
| 613 | // +----------------------------+<- base + CodePageGuardStartOffset |
| 614 | // | Guard | |
| 615 | // +----------------------------+<- area_start_ |
| 616 | // | Area | |
| 617 | // +----------------------------+<- area_end_ (area_start + commit_area_size) |
| 618 | // | Committed but not used | |
| 619 | // +----------------------------+<- aligned at OS page boundary |
| 620 | // | Reserved but not committed | |
| 621 | // +----------------------------+<- aligned at OS page boundary |
| 622 | // | Guard | |
| 623 | // +----------------------------+<- base + chunk_size |
| 624 | // |
| 625 | // Non-executable |
| 626 | // +----------------------------+<- base aligned with MemoryChunk::kAlignment |
| 627 | // | Header | |
| 628 | // +----------------------------+<- area_start_ (base + kObjectStartOffset) |
| 629 | // | Area | |
| 630 | // +----------------------------+<- area_end_ (area_start + commit_area_size) |
| 631 | // | Committed but not used | |
| 632 | // +----------------------------+<- aligned at OS page boundary |
| 633 | // | Reserved but not committed | |
| 634 | // +----------------------------+<- base + chunk_size |
| 635 | // |
| 636 | |
| 637 | if (executable == EXECUTABLE) { |
| 638 | chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size, |
| 639 | base::OS::CommitPageSize()) + |
| 640 | CodePageGuardSize(); |
| 641 | |
| 642 | // Check executable memory limit. |
| 643 | if (size_executable_ + chunk_size > capacity_executable_) { |
| 644 | LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory", |
| 645 | "V8 Executable Allocation capacity exceeded")); |
| 646 | return NULL; |
| 647 | } |
| 648 | |
| 649 | // Size of header (not executable) plus area (executable). |
| 650 | size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size, |
| 651 | base::OS::CommitPageSize()); |
| 652 | // Allocate executable memory either from code range or from the |
| 653 | // OS. |
| 654 | if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) { |
| 655 | base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size, |
| 656 | &chunk_size); |
| 657 | DCHECK( |
| 658 | IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment)); |
| 659 | if (base == NULL) return NULL; |
| 660 | size_ += chunk_size; |
| 661 | // Update executable memory size. |
| 662 | size_executable_ += chunk_size; |
| 663 | } else { |
| 664 | base = AllocateAlignedMemory(chunk_size, commit_size, |
| 665 | MemoryChunk::kAlignment, executable, |
| 666 | &reservation); |
| 667 | if (base == NULL) return NULL; |
| 668 | // Update executable memory size. |
| 669 | size_executable_ += reservation.size(); |
| 670 | } |
| 671 | |
| 672 | if (Heap::ShouldZapGarbage()) { |
| 673 | ZapBlock(base, CodePageGuardStartOffset()); |
| 674 | ZapBlock(base + CodePageAreaStartOffset(), commit_area_size); |
| 675 | } |
| 676 | |
| 677 | area_start = base + CodePageAreaStartOffset(); |
| 678 | area_end = area_start + commit_area_size; |
| 679 | } else { |
| 680 | chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size, |
| 681 | base::OS::CommitPageSize()); |
| 682 | size_t commit_size = |
| 683 | RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size, |
| 684 | base::OS::CommitPageSize()); |
| 685 | base = |
| 686 | AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment, |
| 687 | executable, &reservation); |
| 688 | |
| 689 | if (base == NULL) return NULL; |
| 690 | |
| 691 | if (Heap::ShouldZapGarbage()) { |
| 692 | ZapBlock(base, Page::kObjectStartOffset + commit_area_size); |
| 693 | } |
| 694 | |
| 695 | area_start = base + Page::kObjectStartOffset; |
| 696 | area_end = area_start + commit_area_size; |
| 697 | } |
| 698 | |
| 699 | // Use chunk_size for statistics and callbacks because we assume that they |
| 700 | // treat reserved but not-yet committed memory regions of chunks as allocated. |
| 701 | isolate_->counters()->memory_allocated()->Increment( |
| 702 | static_cast<int>(chunk_size)); |
| 703 | |
| 704 | LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); |
| 705 | if (owner != NULL) { |
| 706 | ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); |
| 707 | PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); |
| 708 | } |
| 709 | |
| 710 | MemoryChunk* result = MemoryChunk::Initialize( |
| 711 | heap, base, chunk_size, area_start, area_end, executable, owner); |
| 712 | result->set_reserved_memory(&reservation); |
| 713 | return result; |
| 714 | } |
| 715 | |
| 716 | |
| 717 | void Page::ResetFreeListStatistics() { |
| 718 | non_available_small_blocks_ = 0; |
| 719 | available_in_small_free_list_ = 0; |
| 720 | available_in_medium_free_list_ = 0; |
| 721 | available_in_large_free_list_ = 0; |
| 722 | available_in_huge_free_list_ = 0; |
| 723 | } |
| 724 | |
| 725 | |
| 726 | Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner, |
| 727 | Executability executable) { |
| 728 | MemoryChunk* chunk = AllocateChunk(size, size, executable, owner); |
| 729 | |
| 730 | if (chunk == NULL) return NULL; |
| 731 | |
| 732 | return Page::Initialize(isolate_->heap(), chunk, executable, owner); |
| 733 | } |
| 734 | |
| 735 | |
| 736 | LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, |
| 737 | Space* owner, |
| 738 | Executability executable) { |
| 739 | MemoryChunk* chunk = |
| 740 | AllocateChunk(object_size, object_size, executable, owner); |
| 741 | if (chunk == NULL) return NULL; |
| 742 | return LargePage::Initialize(isolate_->heap(), chunk); |
| 743 | } |
| 744 | |
| 745 | |
| 746 | void MemoryAllocator::Free(MemoryChunk* chunk) { |
| 747 | LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); |
| 748 | if (chunk->owner() != NULL) { |
| 749 | ObjectSpace space = |
| 750 | static_cast<ObjectSpace>(1 << chunk->owner()->identity()); |
| 751 | PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); |
| 752 | } |
| 753 | |
| 754 | isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk), |
| 755 | chunk->IsEvacuationCandidate()); |
| 756 | |
| 757 | delete chunk->slots_buffer(); |
| 758 | delete chunk->skip_list(); |
| 759 | |
| 760 | base::VirtualMemory* reservation = chunk->reserved_memory(); |
| 761 | if (reservation->IsReserved()) { |
| 762 | FreeMemory(reservation, chunk->executable()); |
| 763 | } else { |
| 764 | FreeMemory(chunk->address(), chunk->size(), chunk->executable()); |
| 765 | } |
| 766 | } |
| 767 | |
| 768 | |
| 769 | bool MemoryAllocator::CommitBlock(Address start, size_t size, |
| 770 | Executability executable) { |
| 771 | if (!CommitMemory(start, size, executable)) return false; |
| 772 | |
| 773 | if (Heap::ShouldZapGarbage()) { |
| 774 | ZapBlock(start, size); |
| 775 | } |
| 776 | |
| 777 | isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); |
| 778 | return true; |
| 779 | } |
| 780 | |
| 781 | |
| 782 | bool MemoryAllocator::UncommitBlock(Address start, size_t size) { |
| 783 | if (!base::VirtualMemory::UncommitRegion(start, size)) return false; |
| 784 | isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); |
| 785 | return true; |
| 786 | } |
| 787 | |
| 788 | |
| 789 | void MemoryAllocator::ZapBlock(Address start, size_t size) { |
| 790 | for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) { |
| 791 | Memory::Address_at(start + s) = kZapValue; |
| 792 | } |
| 793 | } |
| 794 | |
| 795 | |
| 796 | void MemoryAllocator::PerformAllocationCallback(ObjectSpace space, |
| 797 | AllocationAction action, |
| 798 | size_t size) { |
| 799 | for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { |
| 800 | MemoryAllocationCallbackRegistration registration = |
| 801 | memory_allocation_callbacks_[i]; |
| 802 | if ((registration.space & space) == space && |
| 803 | (registration.action & action) == action) |
| 804 | registration.callback(space, action, static_cast<int>(size)); |
| 805 | } |
| 806 | } |
| 807 | |
| 808 | |
| 809 | bool MemoryAllocator::MemoryAllocationCallbackRegistered( |
| 810 | MemoryAllocationCallback callback) { |
| 811 | for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { |
| 812 | if (memory_allocation_callbacks_[i].callback == callback) return true; |
| 813 | } |
| 814 | return false; |
| 815 | } |
| 816 | |
| 817 | |
| 818 | void MemoryAllocator::AddMemoryAllocationCallback( |
| 819 | MemoryAllocationCallback callback, ObjectSpace space, |
| 820 | AllocationAction action) { |
| 821 | DCHECK(callback != NULL); |
| 822 | MemoryAllocationCallbackRegistration registration(callback, space, action); |
| 823 | DCHECK(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback)); |
| 824 | return memory_allocation_callbacks_.Add(registration); |
| 825 | } |
| 826 | |
| 827 | |
| 828 | void MemoryAllocator::RemoveMemoryAllocationCallback( |
| 829 | MemoryAllocationCallback callback) { |
| 830 | DCHECK(callback != NULL); |
| 831 | for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { |
| 832 | if (memory_allocation_callbacks_[i].callback == callback) { |
| 833 | memory_allocation_callbacks_.Remove(i); |
| 834 | return; |
| 835 | } |
| 836 | } |
| 837 | UNREACHABLE(); |
| 838 | } |
| 839 | |
| 840 | |
| 841 | #ifdef DEBUG |
| 842 | void MemoryAllocator::ReportStatistics() { |
| 843 | float pct = static_cast<float>(capacity_ - size_) / capacity_; |
| 844 | PrintF(" capacity: %" V8_PTR_PREFIX |
| 845 | "d" |
| 846 | ", used: %" V8_PTR_PREFIX |
| 847 | "d" |
| 848 | ", available: %%%d\n\n", |
| 849 | capacity_, size_, static_cast<int>(pct * 100)); |
| 850 | } |
| 851 | #endif |
| 852 | |
| 853 | |
| 854 | int MemoryAllocator::CodePageGuardStartOffset() { |
| 855 | // We are guarding code pages: the first OS page after the header |
| 856 | // will be protected as non-writable. |
| 857 | return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize()); |
| 858 | } |
| 859 | |
| 860 | |
| 861 | int MemoryAllocator::CodePageGuardSize() { |
| 862 | return static_cast<int>(base::OS::CommitPageSize()); |
| 863 | } |
| 864 | |
| 865 | |
| 866 | int MemoryAllocator::CodePageAreaStartOffset() { |
| 867 | // We are guarding code pages: the first OS page after the header |
| 868 | // will be protected as non-writable. |
| 869 | return CodePageGuardStartOffset() + CodePageGuardSize(); |
| 870 | } |
| 871 | |
| 872 | |
| 873 | int MemoryAllocator::CodePageAreaEndOffset() { |
| 874 | // We are guarding code pages: the last OS page will be protected as |
| 875 | // non-writable. |
| 876 | return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize()); |
| 877 | } |
| 878 | |
| 879 | |
| 880 | bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm, |
| 881 | Address start, size_t commit_size, |
| 882 | size_t reserved_size) { |
| 883 | // Commit page header (not executable). |
| 884 | if (!vm->Commit(start, CodePageGuardStartOffset(), false)) { |
| 885 | return false; |
| 886 | } |
| 887 | |
| 888 | // Create guard page after the header. |
| 889 | if (!vm->Guard(start + CodePageGuardStartOffset())) { |
| 890 | return false; |
| 891 | } |
| 892 | |
| 893 | // Commit page body (executable). |
| 894 | if (!vm->Commit(start + CodePageAreaStartOffset(), |
| 895 | commit_size - CodePageGuardStartOffset(), true)) { |
| 896 | return false; |
| 897 | } |
| 898 | |
| 899 | // Create guard page before the end. |
| 900 | if (!vm->Guard(start + reserved_size - CodePageGuardSize())) { |
| 901 | return false; |
| 902 | } |
| 903 | |
| 904 | UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() + |
| 905 | commit_size - |
| 906 | CodePageGuardStartOffset()); |
| 907 | return true; |
| 908 | } |
| 909 | |
| 910 | |
| 911 | // ----------------------------------------------------------------------------- |
| 912 | // MemoryChunk implementation |
| 913 | |
| 914 | void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) { |
| 915 | MemoryChunk* chunk = MemoryChunk::FromAddress(address); |
| 916 | if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) { |
| 917 | static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by); |
| 918 | } |
| 919 | chunk->IncrementLiveBytes(by); |
| 920 | } |
| 921 | |
| 922 | |
| 923 | // ----------------------------------------------------------------------------- |
| 924 | // PagedSpace implementation |
| 925 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 926 | STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) == |
| 927 | ObjectSpace::kObjectSpaceNewSpace); |
| 928 | STATIC_ASSERT(static_cast<ObjectSpace>(1 |
| 929 | << AllocationSpace::OLD_POINTER_SPACE) == |
| 930 | ObjectSpace::kObjectSpaceOldPointerSpace); |
| 931 | STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_DATA_SPACE) == |
| 932 | ObjectSpace::kObjectSpaceOldDataSpace); |
| 933 | STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) == |
| 934 | ObjectSpace::kObjectSpaceCodeSpace); |
| 935 | STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CELL_SPACE) == |
| 936 | ObjectSpace::kObjectSpaceCellSpace); |
| 937 | STATIC_ASSERT( |
| 938 | static_cast<ObjectSpace>(1 << AllocationSpace::PROPERTY_CELL_SPACE) == |
| 939 | ObjectSpace::kObjectSpacePropertyCellSpace); |
| 940 | STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) == |
| 941 | ObjectSpace::kObjectSpaceMapSpace); |
| 942 | |
| 943 | |
| 944 | PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace space, |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 945 | Executability executable) |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 946 | : Space(heap, space, executable), |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 947 | free_list_(this), |
| 948 | unswept_free_bytes_(0), |
| 949 | end_of_unswept_pages_(NULL), |
| 950 | emergency_memory_(NULL) { |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 951 | area_size_ = MemoryAllocator::PageAreaSize(space); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 952 | max_capacity_ = |
| 953 | (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) * AreaSize(); |
| 954 | accounting_stats_.Clear(); |
| 955 | |
| 956 | allocation_info_.set_top(NULL); |
| 957 | allocation_info_.set_limit(NULL); |
| 958 | |
| 959 | anchor_.InitializeAsAnchor(this); |
| 960 | } |
| 961 | |
| 962 | |
| 963 | bool PagedSpace::SetUp() { return true; } |
| 964 | |
| 965 | |
| 966 | bool PagedSpace::HasBeenSetUp() { return true; } |
| 967 | |
| 968 | |
| 969 | void PagedSpace::TearDown() { |
| 970 | PageIterator iterator(this); |
| 971 | while (iterator.has_next()) { |
| 972 | heap()->isolate()->memory_allocator()->Free(iterator.next()); |
| 973 | } |
| 974 | anchor_.set_next_page(&anchor_); |
| 975 | anchor_.set_prev_page(&anchor_); |
| 976 | accounting_stats_.Clear(); |
| 977 | } |
| 978 | |
| 979 | |
| 980 | size_t PagedSpace::CommittedPhysicalMemory() { |
| 981 | if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory(); |
| 982 | MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
| 983 | size_t size = 0; |
| 984 | PageIterator it(this); |
| 985 | while (it.has_next()) { |
| 986 | size += it.next()->CommittedPhysicalMemory(); |
| 987 | } |
| 988 | return size; |
| 989 | } |
| 990 | |
| 991 | |
| 992 | Object* PagedSpace::FindObject(Address addr) { |
| 993 | // Note: this function can only be called on iterable spaces. |
| 994 | DCHECK(!heap()->mark_compact_collector()->in_use()); |
| 995 | |
| 996 | if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found. |
| 997 | |
| 998 | Page* p = Page::FromAddress(addr); |
| 999 | HeapObjectIterator it(p, NULL); |
| 1000 | for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 1001 | Address cur = obj->address(); |
| 1002 | Address next = cur + obj->Size(); |
| 1003 | if ((cur <= addr) && (addr < next)) return obj; |
| 1004 | } |
| 1005 | |
| 1006 | UNREACHABLE(); |
| 1007 | return Smi::FromInt(0); |
| 1008 | } |
| 1009 | |
| 1010 | |
| 1011 | bool PagedSpace::CanExpand() { |
| 1012 | DCHECK(max_capacity_ % AreaSize() == 0); |
| 1013 | |
| 1014 | if (Capacity() == max_capacity_) return false; |
| 1015 | |
| 1016 | DCHECK(Capacity() < max_capacity_); |
| 1017 | |
| 1018 | // Are we going to exceed capacity for this space? |
| 1019 | if ((Capacity() + Page::kPageSize) > max_capacity_) return false; |
| 1020 | |
| 1021 | return true; |
| 1022 | } |
| 1023 | |
| 1024 | |
| 1025 | bool PagedSpace::Expand() { |
| 1026 | if (!CanExpand()) return false; |
| 1027 | |
| 1028 | intptr_t size = AreaSize(); |
| 1029 | |
| 1030 | if (anchor_.next_page() == &anchor_) { |
| 1031 | size = SizeOfFirstPage(); |
| 1032 | } |
| 1033 | |
| 1034 | Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this, |
| 1035 | executable()); |
| 1036 | if (p == NULL) return false; |
| 1037 | |
| 1038 | DCHECK(Capacity() <= max_capacity_); |
| 1039 | |
| 1040 | p->InsertAfter(anchor_.prev_page()); |
| 1041 | |
| 1042 | return true; |
| 1043 | } |
| 1044 | |
| 1045 | |
| 1046 | intptr_t PagedSpace::SizeOfFirstPage() { |
| 1047 | // If using an ool constant pool then transfer the constant pool allowance |
| 1048 | // from the code space to the old pointer space. |
| 1049 | static const int constant_pool_delta = FLAG_enable_ool_constant_pool ? 48 : 0; |
| 1050 | int size = 0; |
| 1051 | switch (identity()) { |
| 1052 | case OLD_POINTER_SPACE: |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1053 | size = (128 + constant_pool_delta) * kPointerSize * KB; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1054 | break; |
| 1055 | case OLD_DATA_SPACE: |
| 1056 | size = 192 * KB; |
| 1057 | break; |
| 1058 | case MAP_SPACE: |
| 1059 | size = 16 * kPointerSize * KB; |
| 1060 | break; |
| 1061 | case CELL_SPACE: |
| 1062 | size = 16 * kPointerSize * KB; |
| 1063 | break; |
| 1064 | case PROPERTY_CELL_SPACE: |
| 1065 | size = 8 * kPointerSize * KB; |
| 1066 | break; |
| 1067 | case CODE_SPACE: { |
| 1068 | CodeRange* code_range = heap()->isolate()->code_range(); |
| 1069 | if (code_range != NULL && code_range->valid()) { |
| 1070 | // When code range exists, code pages are allocated in a special way |
| 1071 | // (from the reserved code range). That part of the code is not yet |
| 1072 | // upgraded to handle small pages. |
| 1073 | size = AreaSize(); |
| 1074 | } else { |
| 1075 | size = RoundUp((480 - constant_pool_delta) * KB * |
| 1076 | FullCodeGenerator::kBootCodeSizeMultiplier / 100, |
| 1077 | kPointerSize); |
| 1078 | } |
| 1079 | break; |
| 1080 | } |
| 1081 | default: |
| 1082 | UNREACHABLE(); |
| 1083 | } |
| 1084 | return Min(size, AreaSize()); |
| 1085 | } |
| 1086 | |
| 1087 | |
| 1088 | int PagedSpace::CountTotalPages() { |
| 1089 | PageIterator it(this); |
| 1090 | int count = 0; |
| 1091 | while (it.has_next()) { |
| 1092 | it.next(); |
| 1093 | count++; |
| 1094 | } |
| 1095 | return count; |
| 1096 | } |
| 1097 | |
| 1098 | |
| 1099 | void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) { |
| 1100 | sizes->huge_size_ = page->available_in_huge_free_list(); |
| 1101 | sizes->small_size_ = page->available_in_small_free_list(); |
| 1102 | sizes->medium_size_ = page->available_in_medium_free_list(); |
| 1103 | sizes->large_size_ = page->available_in_large_free_list(); |
| 1104 | } |
| 1105 | |
| 1106 | |
| 1107 | void PagedSpace::ResetFreeListStatistics() { |
| 1108 | PageIterator page_iterator(this); |
| 1109 | while (page_iterator.has_next()) { |
| 1110 | Page* page = page_iterator.next(); |
| 1111 | page->ResetFreeListStatistics(); |
| 1112 | } |
| 1113 | } |
| 1114 | |
| 1115 | |
| 1116 | void PagedSpace::IncreaseCapacity(int size) { |
| 1117 | accounting_stats_.ExpandSpace(size); |
| 1118 | } |
| 1119 | |
| 1120 | |
| 1121 | void PagedSpace::ReleasePage(Page* page) { |
| 1122 | DCHECK(page->LiveBytes() == 0); |
| 1123 | DCHECK(AreaSize() == page->area_size()); |
| 1124 | |
| 1125 | if (page->WasSwept()) { |
| 1126 | intptr_t size = free_list_.EvictFreeListItems(page); |
| 1127 | accounting_stats_.AllocateBytes(size); |
| 1128 | DCHECK_EQ(AreaSize(), static_cast<int>(size)); |
| 1129 | } else { |
| 1130 | DecreaseUnsweptFreeBytes(page); |
| 1131 | } |
| 1132 | |
| 1133 | if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) { |
| 1134 | heap()->decrement_scan_on_scavenge_pages(); |
| 1135 | page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE); |
| 1136 | } |
| 1137 | |
| 1138 | DCHECK(!free_list_.ContainsPageFreeListItems(page)); |
| 1139 | |
| 1140 | if (Page::FromAllocationTop(allocation_info_.top()) == page) { |
| 1141 | allocation_info_.set_top(NULL); |
| 1142 | allocation_info_.set_limit(NULL); |
| 1143 | } |
| 1144 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1145 | // If page is still in a list, unlink it from that list. |
| 1146 | if (page->next_chunk() != NULL) { |
| 1147 | DCHECK(page->prev_chunk() != NULL); |
| 1148 | page->Unlink(); |
| 1149 | } |
| 1150 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1151 | if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { |
| 1152 | heap()->isolate()->memory_allocator()->Free(page); |
| 1153 | } else { |
| 1154 | heap()->QueueMemoryChunkForFree(page); |
| 1155 | } |
| 1156 | |
| 1157 | DCHECK(Capacity() > 0); |
| 1158 | accounting_stats_.ShrinkSpace(AreaSize()); |
| 1159 | } |
| 1160 | |
| 1161 | |
| 1162 | void PagedSpace::CreateEmergencyMemory() { |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1163 | if (identity() == CODE_SPACE) { |
| 1164 | // Make the emergency block available to the allocator. |
| 1165 | CodeRange* code_range = heap()->isolate()->code_range(); |
| 1166 | if (code_range != NULL && code_range->valid()) { |
| 1167 | code_range->ReleaseEmergencyBlock(); |
| 1168 | } |
| 1169 | DCHECK(MemoryAllocator::CodePageAreaSize() == AreaSize()); |
| 1170 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1171 | emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk( |
| 1172 | AreaSize(), AreaSize(), executable(), this); |
| 1173 | } |
| 1174 | |
| 1175 | |
| 1176 | void PagedSpace::FreeEmergencyMemory() { |
| 1177 | Page* page = static_cast<Page*>(emergency_memory_); |
| 1178 | DCHECK(page->LiveBytes() == 0); |
| 1179 | DCHECK(AreaSize() == page->area_size()); |
| 1180 | DCHECK(!free_list_.ContainsPageFreeListItems(page)); |
| 1181 | heap()->isolate()->memory_allocator()->Free(page); |
| 1182 | emergency_memory_ = NULL; |
| 1183 | } |
| 1184 | |
| 1185 | |
| 1186 | void PagedSpace::UseEmergencyMemory() { |
| 1187 | Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this); |
| 1188 | page->InsertAfter(anchor_.prev_page()); |
| 1189 | emergency_memory_ = NULL; |
| 1190 | } |
| 1191 | |
| 1192 | |
| 1193 | #ifdef DEBUG |
| 1194 | void PagedSpace::Print() {} |
| 1195 | #endif |
| 1196 | |
| 1197 | #ifdef VERIFY_HEAP |
| 1198 | void PagedSpace::Verify(ObjectVisitor* visitor) { |
| 1199 | bool allocation_pointer_found_in_space = |
| 1200 | (allocation_info_.top() == allocation_info_.limit()); |
| 1201 | PageIterator page_iterator(this); |
| 1202 | while (page_iterator.has_next()) { |
| 1203 | Page* page = page_iterator.next(); |
| 1204 | CHECK(page->owner() == this); |
| 1205 | if (page == Page::FromAllocationTop(allocation_info_.top())) { |
| 1206 | allocation_pointer_found_in_space = true; |
| 1207 | } |
| 1208 | CHECK(page->WasSwept()); |
| 1209 | HeapObjectIterator it(page, NULL); |
| 1210 | Address end_of_previous_object = page->area_start(); |
| 1211 | Address top = page->area_end(); |
| 1212 | int black_size = 0; |
| 1213 | for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { |
| 1214 | CHECK(end_of_previous_object <= object->address()); |
| 1215 | |
| 1216 | // The first word should be a map, and we expect all map pointers to |
| 1217 | // be in map space. |
| 1218 | Map* map = object->map(); |
| 1219 | CHECK(map->IsMap()); |
| 1220 | CHECK(heap()->map_space()->Contains(map)); |
| 1221 | |
| 1222 | // Perform space-specific object verification. |
| 1223 | VerifyObject(object); |
| 1224 | |
| 1225 | // The object itself should look OK. |
| 1226 | object->ObjectVerify(); |
| 1227 | |
| 1228 | // All the interior pointers should be contained in the heap. |
| 1229 | int size = object->Size(); |
| 1230 | object->IterateBody(map->instance_type(), size, visitor); |
| 1231 | if (Marking::IsBlack(Marking::MarkBitFrom(object))) { |
| 1232 | black_size += size; |
| 1233 | } |
| 1234 | |
| 1235 | CHECK(object->address() + size <= top); |
| 1236 | end_of_previous_object = object->address() + size; |
| 1237 | } |
| 1238 | CHECK_LE(black_size, page->LiveBytes()); |
| 1239 | } |
| 1240 | CHECK(allocation_pointer_found_in_space); |
| 1241 | } |
| 1242 | #endif // VERIFY_HEAP |
| 1243 | |
| 1244 | // ----------------------------------------------------------------------------- |
| 1245 | // NewSpace implementation |
| 1246 | |
| 1247 | |
| 1248 | bool NewSpace::SetUp(int reserved_semispace_capacity, |
| 1249 | int maximum_semispace_capacity) { |
| 1250 | // Set up new space based on the preallocated memory block defined by |
| 1251 | // start and size. The provided space is divided into two semi-spaces. |
| 1252 | // To support fast containment testing in the new space, the size of |
| 1253 | // this chunk must be a power of two and it must be aligned to its size. |
| 1254 | int initial_semispace_capacity = heap()->InitialSemiSpaceSize(); |
| 1255 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1256 | int target_semispace_capacity = heap()->TargetSemiSpaceSize(); |
| 1257 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1258 | size_t size = 2 * reserved_semispace_capacity; |
| 1259 | Address base = heap()->isolate()->memory_allocator()->ReserveAlignedMemory( |
| 1260 | size, size, &reservation_); |
| 1261 | if (base == NULL) return false; |
| 1262 | |
| 1263 | chunk_base_ = base; |
| 1264 | chunk_size_ = static_cast<uintptr_t>(size); |
| 1265 | LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_)); |
| 1266 | |
| 1267 | DCHECK(initial_semispace_capacity <= maximum_semispace_capacity); |
| 1268 | DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity)); |
| 1269 | |
| 1270 | // Allocate and set up the histogram arrays if necessary. |
| 1271 | allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); |
| 1272 | promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); |
| 1273 | |
| 1274 | #define SET_NAME(name) \ |
| 1275 | allocated_histogram_[name].set_name(#name); \ |
| 1276 | promoted_histogram_[name].set_name(#name); |
| 1277 | INSTANCE_TYPE_LIST(SET_NAME) |
| 1278 | #undef SET_NAME |
| 1279 | |
| 1280 | DCHECK(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize()); |
| 1281 | DCHECK(static_cast<intptr_t>(chunk_size_) >= |
| 1282 | 2 * heap()->ReservedSemiSpaceSize()); |
| 1283 | DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0)); |
| 1284 | |
| 1285 | to_space_.SetUp(chunk_base_, initial_semispace_capacity, |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1286 | target_semispace_capacity, maximum_semispace_capacity); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1287 | from_space_.SetUp(chunk_base_ + reserved_semispace_capacity, |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1288 | initial_semispace_capacity, target_semispace_capacity, |
| 1289 | maximum_semispace_capacity); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1290 | if (!to_space_.Commit()) { |
| 1291 | return false; |
| 1292 | } |
| 1293 | DCHECK(!from_space_.is_committed()); // No need to use memory yet. |
| 1294 | |
| 1295 | start_ = chunk_base_; |
| 1296 | address_mask_ = ~(2 * reserved_semispace_capacity - 1); |
| 1297 | object_mask_ = address_mask_ | kHeapObjectTagMask; |
| 1298 | object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag; |
| 1299 | |
| 1300 | ResetAllocationInfo(); |
| 1301 | |
| 1302 | return true; |
| 1303 | } |
| 1304 | |
| 1305 | |
| 1306 | void NewSpace::TearDown() { |
| 1307 | if (allocated_histogram_) { |
| 1308 | DeleteArray(allocated_histogram_); |
| 1309 | allocated_histogram_ = NULL; |
| 1310 | } |
| 1311 | if (promoted_histogram_) { |
| 1312 | DeleteArray(promoted_histogram_); |
| 1313 | promoted_histogram_ = NULL; |
| 1314 | } |
| 1315 | |
| 1316 | start_ = NULL; |
| 1317 | allocation_info_.set_top(NULL); |
| 1318 | allocation_info_.set_limit(NULL); |
| 1319 | |
| 1320 | to_space_.TearDown(); |
| 1321 | from_space_.TearDown(); |
| 1322 | |
| 1323 | LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_)); |
| 1324 | |
| 1325 | DCHECK(reservation_.IsReserved()); |
| 1326 | heap()->isolate()->memory_allocator()->FreeMemory(&reservation_, |
| 1327 | NOT_EXECUTABLE); |
| 1328 | chunk_base_ = NULL; |
| 1329 | chunk_size_ = 0; |
| 1330 | } |
| 1331 | |
| 1332 | |
| 1333 | void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); } |
| 1334 | |
| 1335 | |
| 1336 | void NewSpace::Grow() { |
| 1337 | // Double the semispace size but only up to maximum capacity. |
| 1338 | DCHECK(TotalCapacity() < MaximumCapacity()); |
| 1339 | int new_capacity = |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1340 | Min(MaximumCapacity(), |
| 1341 | FLAG_semi_space_growth_factor * static_cast<int>(TotalCapacity())); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1342 | if (to_space_.GrowTo(new_capacity)) { |
| 1343 | // Only grow from space if we managed to grow to-space. |
| 1344 | if (!from_space_.GrowTo(new_capacity)) { |
| 1345 | // If we managed to grow to-space but couldn't grow from-space, |
| 1346 | // attempt to shrink to-space. |
| 1347 | if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) { |
| 1348 | // We are in an inconsistent state because we could not |
| 1349 | // commit/uncommit memory from new space. |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1350 | CHECK(false); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1351 | } |
| 1352 | } |
| 1353 | } |
| 1354 | DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
| 1355 | } |
| 1356 | |
| 1357 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1358 | bool NewSpace::GrowOnePage() { |
| 1359 | if (TotalCapacity() == MaximumCapacity()) return false; |
| 1360 | int new_capacity = static_cast<int>(TotalCapacity()) + Page::kPageSize; |
| 1361 | if (to_space_.GrowTo(new_capacity)) { |
| 1362 | // Only grow from space if we managed to grow to-space and the from space |
| 1363 | // is actually committed. |
| 1364 | if (from_space_.is_committed()) { |
| 1365 | if (!from_space_.GrowTo(new_capacity)) { |
| 1366 | // If we managed to grow to-space but couldn't grow from-space, |
| 1367 | // attempt to shrink to-space. |
| 1368 | if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) { |
| 1369 | // We are in an inconsistent state because we could not |
| 1370 | // commit/uncommit memory from new space. |
| 1371 | CHECK(false); |
| 1372 | } |
| 1373 | return false; |
| 1374 | } |
| 1375 | } else { |
| 1376 | if (!from_space_.SetTotalCapacity(new_capacity)) { |
| 1377 | // Can't really happen, but better safe than sorry. |
| 1378 | CHECK(false); |
| 1379 | } |
| 1380 | } |
| 1381 | DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
| 1382 | return true; |
| 1383 | } |
| 1384 | return false; |
| 1385 | } |
| 1386 | |
| 1387 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1388 | void NewSpace::Shrink() { |
| 1389 | int new_capacity = Max(InitialTotalCapacity(), 2 * SizeAsInt()); |
| 1390 | int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize); |
| 1391 | if (rounded_new_capacity < TotalCapacity() && |
| 1392 | to_space_.ShrinkTo(rounded_new_capacity)) { |
| 1393 | // Only shrink from-space if we managed to shrink to-space. |
| 1394 | from_space_.Reset(); |
| 1395 | if (!from_space_.ShrinkTo(rounded_new_capacity)) { |
| 1396 | // If we managed to shrink to-space but couldn't shrink from |
| 1397 | // space, attempt to grow to-space again. |
| 1398 | if (!to_space_.GrowTo(from_space_.TotalCapacity())) { |
| 1399 | // We are in an inconsistent state because we could not |
| 1400 | // commit/uncommit memory from new space. |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1401 | CHECK(false); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1402 | } |
| 1403 | } |
| 1404 | } |
| 1405 | DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
| 1406 | } |
| 1407 | |
| 1408 | |
| 1409 | void NewSpace::UpdateAllocationInfo() { |
| 1410 | MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
| 1411 | allocation_info_.set_top(to_space_.page_low()); |
| 1412 | allocation_info_.set_limit(to_space_.page_high()); |
| 1413 | UpdateInlineAllocationLimit(0); |
| 1414 | DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
| 1415 | } |
| 1416 | |
| 1417 | |
| 1418 | void NewSpace::ResetAllocationInfo() { |
| 1419 | to_space_.Reset(); |
| 1420 | UpdateAllocationInfo(); |
| 1421 | pages_used_ = 0; |
| 1422 | // Clear all mark-bits in the to-space. |
| 1423 | NewSpacePageIterator it(&to_space_); |
| 1424 | while (it.has_next()) { |
| 1425 | Bitmap::Clear(it.next()); |
| 1426 | } |
| 1427 | } |
| 1428 | |
| 1429 | |
| 1430 | void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) { |
| 1431 | if (heap()->inline_allocation_disabled()) { |
| 1432 | // Lowest limit when linear allocation was disabled. |
| 1433 | Address high = to_space_.page_high(); |
| 1434 | Address new_top = allocation_info_.top() + size_in_bytes; |
| 1435 | allocation_info_.set_limit(Min(new_top, high)); |
| 1436 | } else if (inline_allocation_limit_step() == 0) { |
| 1437 | // Normal limit is the end of the current page. |
| 1438 | allocation_info_.set_limit(to_space_.page_high()); |
| 1439 | } else { |
| 1440 | // Lower limit during incremental marking. |
| 1441 | Address high = to_space_.page_high(); |
| 1442 | Address new_top = allocation_info_.top() + size_in_bytes; |
| 1443 | Address new_limit = new_top + inline_allocation_limit_step_; |
| 1444 | allocation_info_.set_limit(Min(new_limit, high)); |
| 1445 | } |
| 1446 | DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
| 1447 | } |
| 1448 | |
| 1449 | |
| 1450 | bool NewSpace::AddFreshPage() { |
| 1451 | Address top = allocation_info_.top(); |
| 1452 | if (NewSpacePage::IsAtStart(top)) { |
| 1453 | // The current page is already empty. Don't try to make another. |
| 1454 | |
| 1455 | // We should only get here if someone asks to allocate more |
| 1456 | // than what can be stored in a single page. |
| 1457 | // TODO(gc): Change the limit on new-space allocation to prevent this |
| 1458 | // from happening (all such allocations should go directly to LOSpace). |
| 1459 | return false; |
| 1460 | } |
| 1461 | if (!to_space_.AdvancePage()) { |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1462 | // Check if we reached the target capacity yet. If not, try to commit a page |
| 1463 | // and continue. |
| 1464 | if ((to_space_.TotalCapacity() < to_space_.TargetCapacity()) && |
| 1465 | GrowOnePage()) { |
| 1466 | if (!to_space_.AdvancePage()) { |
| 1467 | // It doesn't make sense that we managed to commit a page, but can't use |
| 1468 | // it. |
| 1469 | CHECK(false); |
| 1470 | } |
| 1471 | } else { |
| 1472 | // Failed to get a new page in to-space. |
| 1473 | return false; |
| 1474 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1475 | } |
| 1476 | |
| 1477 | // Clear remainder of current page. |
| 1478 | Address limit = NewSpacePage::FromLimit(top)->area_end(); |
| 1479 | if (heap()->gc_state() == Heap::SCAVENGE) { |
| 1480 | heap()->promotion_queue()->SetNewLimit(limit); |
| 1481 | } |
| 1482 | |
| 1483 | int remaining_in_page = static_cast<int>(limit - top); |
| 1484 | heap()->CreateFillerObjectAt(top, remaining_in_page); |
| 1485 | pages_used_++; |
| 1486 | UpdateAllocationInfo(); |
| 1487 | |
| 1488 | return true; |
| 1489 | } |
| 1490 | |
| 1491 | |
| 1492 | AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes) { |
| 1493 | Address old_top = allocation_info_.top(); |
| 1494 | Address high = to_space_.page_high(); |
| 1495 | if (allocation_info_.limit() < high) { |
| 1496 | // Either the limit has been lowered because linear allocation was disabled |
| 1497 | // or because incremental marking wants to get a chance to do a step. Set |
| 1498 | // the new limit accordingly. |
| 1499 | Address new_top = old_top + size_in_bytes; |
| 1500 | int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_); |
| 1501 | heap()->incremental_marking()->Step(bytes_allocated, |
| 1502 | IncrementalMarking::GC_VIA_STACK_GUARD); |
| 1503 | UpdateInlineAllocationLimit(size_in_bytes); |
| 1504 | top_on_previous_step_ = new_top; |
| 1505 | return AllocateRaw(size_in_bytes); |
| 1506 | } else if (AddFreshPage()) { |
| 1507 | // Switched to new page. Try allocating again. |
| 1508 | int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_); |
| 1509 | heap()->incremental_marking()->Step(bytes_allocated, |
| 1510 | IncrementalMarking::GC_VIA_STACK_GUARD); |
| 1511 | top_on_previous_step_ = to_space_.page_low(); |
| 1512 | return AllocateRaw(size_in_bytes); |
| 1513 | } else { |
| 1514 | return AllocationResult::Retry(); |
| 1515 | } |
| 1516 | } |
| 1517 | |
| 1518 | |
| 1519 | #ifdef VERIFY_HEAP |
| 1520 | // We do not use the SemiSpaceIterator because verification doesn't assume |
| 1521 | // that it works (it depends on the invariants we are checking). |
| 1522 | void NewSpace::Verify() { |
| 1523 | // The allocation pointer should be in the space or at the very end. |
| 1524 | DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
| 1525 | |
| 1526 | // There should be objects packed in from the low address up to the |
| 1527 | // allocation pointer. |
| 1528 | Address current = to_space_.first_page()->area_start(); |
| 1529 | CHECK_EQ(current, to_space_.space_start()); |
| 1530 | |
| 1531 | while (current != top()) { |
| 1532 | if (!NewSpacePage::IsAtEnd(current)) { |
| 1533 | // The allocation pointer should not be in the middle of an object. |
| 1534 | CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) || |
| 1535 | current < top()); |
| 1536 | |
| 1537 | HeapObject* object = HeapObject::FromAddress(current); |
| 1538 | |
| 1539 | // The first word should be a map, and we expect all map pointers to |
| 1540 | // be in map space. |
| 1541 | Map* map = object->map(); |
| 1542 | CHECK(map->IsMap()); |
| 1543 | CHECK(heap()->map_space()->Contains(map)); |
| 1544 | |
| 1545 | // The object should not be code or a map. |
| 1546 | CHECK(!object->IsMap()); |
| 1547 | CHECK(!object->IsCode()); |
| 1548 | |
| 1549 | // The object itself should look OK. |
| 1550 | object->ObjectVerify(); |
| 1551 | |
| 1552 | // All the interior pointers should be contained in the heap. |
| 1553 | VerifyPointersVisitor visitor; |
| 1554 | int size = object->Size(); |
| 1555 | object->IterateBody(map->instance_type(), size, &visitor); |
| 1556 | |
| 1557 | current += size; |
| 1558 | } else { |
| 1559 | // At end of page, switch to next page. |
| 1560 | NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page(); |
| 1561 | // Next page should be valid. |
| 1562 | CHECK(!page->is_anchor()); |
| 1563 | current = page->area_start(); |
| 1564 | } |
| 1565 | } |
| 1566 | |
| 1567 | // Check semi-spaces. |
| 1568 | CHECK_EQ(from_space_.id(), kFromSpace); |
| 1569 | CHECK_EQ(to_space_.id(), kToSpace); |
| 1570 | from_space_.Verify(); |
| 1571 | to_space_.Verify(); |
| 1572 | } |
| 1573 | #endif |
| 1574 | |
| 1575 | // ----------------------------------------------------------------------------- |
| 1576 | // SemiSpace implementation |
| 1577 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1578 | void SemiSpace::SetUp(Address start, int initial_capacity, int target_capacity, |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1579 | int maximum_capacity) { |
| 1580 | // Creates a space in the young generation. The constructor does not |
| 1581 | // allocate memory from the OS. A SemiSpace is given a contiguous chunk of |
| 1582 | // memory of size 'capacity' when set up, and does not grow or shrink |
| 1583 | // otherwise. In the mark-compact collector, the memory region of the from |
| 1584 | // space is used as the marking stack. It requires contiguous memory |
| 1585 | // addresses. |
| 1586 | DCHECK(maximum_capacity >= Page::kPageSize); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1587 | DCHECK(initial_capacity <= target_capacity); |
| 1588 | DCHECK(target_capacity <= maximum_capacity); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1589 | initial_total_capacity_ = RoundDown(initial_capacity, Page::kPageSize); |
| 1590 | total_capacity_ = initial_capacity; |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1591 | target_capacity_ = RoundDown(target_capacity, Page::kPageSize); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1592 | maximum_total_capacity_ = RoundDown(maximum_capacity, Page::kPageSize); |
| 1593 | maximum_committed_ = 0; |
| 1594 | committed_ = false; |
| 1595 | start_ = start; |
| 1596 | address_mask_ = ~(maximum_capacity - 1); |
| 1597 | object_mask_ = address_mask_ | kHeapObjectTagMask; |
| 1598 | object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; |
| 1599 | age_mark_ = start_; |
| 1600 | } |
| 1601 | |
| 1602 | |
| 1603 | void SemiSpace::TearDown() { |
| 1604 | start_ = NULL; |
| 1605 | total_capacity_ = 0; |
| 1606 | } |
| 1607 | |
| 1608 | |
| 1609 | bool SemiSpace::Commit() { |
| 1610 | DCHECK(!is_committed()); |
| 1611 | int pages = total_capacity_ / Page::kPageSize; |
| 1612 | if (!heap()->isolate()->memory_allocator()->CommitBlock( |
| 1613 | start_, total_capacity_, executable())) { |
| 1614 | return false; |
| 1615 | } |
| 1616 | |
| 1617 | NewSpacePage* current = anchor(); |
| 1618 | for (int i = 0; i < pages; i++) { |
| 1619 | NewSpacePage* new_page = |
| 1620 | NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this); |
| 1621 | new_page->InsertAfter(current); |
| 1622 | current = new_page; |
| 1623 | } |
| 1624 | |
| 1625 | SetCapacity(total_capacity_); |
| 1626 | committed_ = true; |
| 1627 | Reset(); |
| 1628 | return true; |
| 1629 | } |
| 1630 | |
| 1631 | |
| 1632 | bool SemiSpace::Uncommit() { |
| 1633 | DCHECK(is_committed()); |
| 1634 | Address start = start_ + maximum_total_capacity_ - total_capacity_; |
| 1635 | if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, |
| 1636 | total_capacity_)) { |
| 1637 | return false; |
| 1638 | } |
| 1639 | anchor()->set_next_page(anchor()); |
| 1640 | anchor()->set_prev_page(anchor()); |
| 1641 | |
| 1642 | committed_ = false; |
| 1643 | return true; |
| 1644 | } |
| 1645 | |
| 1646 | |
| 1647 | size_t SemiSpace::CommittedPhysicalMemory() { |
| 1648 | if (!is_committed()) return 0; |
| 1649 | size_t size = 0; |
| 1650 | NewSpacePageIterator it(this); |
| 1651 | while (it.has_next()) { |
| 1652 | size += it.next()->CommittedPhysicalMemory(); |
| 1653 | } |
| 1654 | return size; |
| 1655 | } |
| 1656 | |
| 1657 | |
| 1658 | bool SemiSpace::GrowTo(int new_capacity) { |
| 1659 | if (!is_committed()) { |
| 1660 | if (!Commit()) return false; |
| 1661 | } |
| 1662 | DCHECK((new_capacity & Page::kPageAlignmentMask) == 0); |
| 1663 | DCHECK(new_capacity <= maximum_total_capacity_); |
| 1664 | DCHECK(new_capacity > total_capacity_); |
| 1665 | int pages_before = total_capacity_ / Page::kPageSize; |
| 1666 | int pages_after = new_capacity / Page::kPageSize; |
| 1667 | |
| 1668 | size_t delta = new_capacity - total_capacity_; |
| 1669 | |
| 1670 | DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); |
| 1671 | if (!heap()->isolate()->memory_allocator()->CommitBlock( |
| 1672 | start_ + total_capacity_, delta, executable())) { |
| 1673 | return false; |
| 1674 | } |
| 1675 | SetCapacity(new_capacity); |
| 1676 | NewSpacePage* last_page = anchor()->prev_page(); |
| 1677 | DCHECK(last_page != anchor()); |
| 1678 | for (int i = pages_before; i < pages_after; i++) { |
| 1679 | Address page_address = start_ + i * Page::kPageSize; |
| 1680 | NewSpacePage* new_page = |
| 1681 | NewSpacePage::Initialize(heap(), page_address, this); |
| 1682 | new_page->InsertAfter(last_page); |
| 1683 | Bitmap::Clear(new_page); |
| 1684 | // Duplicate the flags that was set on the old page. |
| 1685 | new_page->SetFlags(last_page->GetFlags(), |
| 1686 | NewSpacePage::kCopyOnFlipFlagsMask); |
| 1687 | last_page = new_page; |
| 1688 | } |
| 1689 | return true; |
| 1690 | } |
| 1691 | |
| 1692 | |
| 1693 | bool SemiSpace::ShrinkTo(int new_capacity) { |
| 1694 | DCHECK((new_capacity & Page::kPageAlignmentMask) == 0); |
| 1695 | DCHECK(new_capacity >= initial_total_capacity_); |
| 1696 | DCHECK(new_capacity < total_capacity_); |
| 1697 | if (is_committed()) { |
| 1698 | size_t delta = total_capacity_ - new_capacity; |
| 1699 | DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); |
| 1700 | |
| 1701 | MemoryAllocator* allocator = heap()->isolate()->memory_allocator(); |
| 1702 | if (!allocator->UncommitBlock(start_ + new_capacity, delta)) { |
| 1703 | return false; |
| 1704 | } |
| 1705 | |
| 1706 | int pages_after = new_capacity / Page::kPageSize; |
| 1707 | NewSpacePage* new_last_page = |
| 1708 | NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize); |
| 1709 | new_last_page->set_next_page(anchor()); |
| 1710 | anchor()->set_prev_page(new_last_page); |
| 1711 | DCHECK((current_page_ >= first_page()) && (current_page_ <= new_last_page)); |
| 1712 | } |
| 1713 | |
| 1714 | SetCapacity(new_capacity); |
| 1715 | |
| 1716 | return true; |
| 1717 | } |
| 1718 | |
| 1719 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 1720 | bool SemiSpace::SetTotalCapacity(int new_capacity) { |
| 1721 | CHECK(!is_committed()); |
| 1722 | if (new_capacity >= initial_total_capacity_ && |
| 1723 | new_capacity <= maximum_total_capacity_) { |
| 1724 | total_capacity_ = new_capacity; |
| 1725 | return true; |
| 1726 | } |
| 1727 | return false; |
| 1728 | } |
| 1729 | |
| 1730 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1731 | void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) { |
| 1732 | anchor_.set_owner(this); |
| 1733 | // Fixup back-pointers to anchor. Address of anchor changes |
| 1734 | // when we swap. |
| 1735 | anchor_.prev_page()->set_next_page(&anchor_); |
| 1736 | anchor_.next_page()->set_prev_page(&anchor_); |
| 1737 | |
| 1738 | bool becomes_to_space = (id_ == kFromSpace); |
| 1739 | id_ = becomes_to_space ? kToSpace : kFromSpace; |
| 1740 | NewSpacePage* page = anchor_.next_page(); |
| 1741 | while (page != &anchor_) { |
| 1742 | page->set_owner(this); |
| 1743 | page->SetFlags(flags, mask); |
| 1744 | if (becomes_to_space) { |
| 1745 | page->ClearFlag(MemoryChunk::IN_FROM_SPACE); |
| 1746 | page->SetFlag(MemoryChunk::IN_TO_SPACE); |
| 1747 | page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK); |
| 1748 | page->ResetLiveBytes(); |
| 1749 | } else { |
| 1750 | page->SetFlag(MemoryChunk::IN_FROM_SPACE); |
| 1751 | page->ClearFlag(MemoryChunk::IN_TO_SPACE); |
| 1752 | } |
| 1753 | DCHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)); |
| 1754 | DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) || |
| 1755 | page->IsFlagSet(MemoryChunk::IN_FROM_SPACE)); |
| 1756 | page = page->next_page(); |
| 1757 | } |
| 1758 | } |
| 1759 | |
| 1760 | |
| 1761 | void SemiSpace::Reset() { |
| 1762 | DCHECK(anchor_.next_page() != &anchor_); |
| 1763 | current_page_ = anchor_.next_page(); |
| 1764 | } |
| 1765 | |
| 1766 | |
| 1767 | void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) { |
| 1768 | // We won't be swapping semispaces without data in them. |
| 1769 | DCHECK(from->anchor_.next_page() != &from->anchor_); |
| 1770 | DCHECK(to->anchor_.next_page() != &to->anchor_); |
| 1771 | |
| 1772 | // Swap bits. |
| 1773 | SemiSpace tmp = *from; |
| 1774 | *from = *to; |
| 1775 | *to = tmp; |
| 1776 | |
| 1777 | // Fixup back-pointers to the page list anchor now that its address |
| 1778 | // has changed. |
| 1779 | // Swap to/from-space bits on pages. |
| 1780 | // Copy GC flags from old active space (from-space) to new (to-space). |
| 1781 | intptr_t flags = from->current_page()->GetFlags(); |
| 1782 | to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask); |
| 1783 | |
| 1784 | from->FlipPages(0, 0); |
| 1785 | } |
| 1786 | |
| 1787 | |
| 1788 | void SemiSpace::SetCapacity(int new_capacity) { |
| 1789 | total_capacity_ = new_capacity; |
| 1790 | if (total_capacity_ > maximum_committed_) { |
| 1791 | maximum_committed_ = total_capacity_; |
| 1792 | } |
| 1793 | } |
| 1794 | |
| 1795 | |
| 1796 | void SemiSpace::set_age_mark(Address mark) { |
| 1797 | DCHECK(NewSpacePage::FromLimit(mark)->semi_space() == this); |
| 1798 | age_mark_ = mark; |
| 1799 | // Mark all pages up to the one containing mark. |
| 1800 | NewSpacePageIterator it(space_start(), mark); |
| 1801 | while (it.has_next()) { |
| 1802 | it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK); |
| 1803 | } |
| 1804 | } |
| 1805 | |
| 1806 | |
| 1807 | #ifdef DEBUG |
| 1808 | void SemiSpace::Print() {} |
| 1809 | #endif |
| 1810 | |
| 1811 | #ifdef VERIFY_HEAP |
| 1812 | void SemiSpace::Verify() { |
| 1813 | bool is_from_space = (id_ == kFromSpace); |
| 1814 | NewSpacePage* page = anchor_.next_page(); |
| 1815 | CHECK(anchor_.semi_space() == this); |
| 1816 | while (page != &anchor_) { |
| 1817 | CHECK(page->semi_space() == this); |
| 1818 | CHECK(page->InNewSpace()); |
| 1819 | CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE |
| 1820 | : MemoryChunk::IN_TO_SPACE)); |
| 1821 | CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE |
| 1822 | : MemoryChunk::IN_FROM_SPACE)); |
| 1823 | CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING)); |
| 1824 | if (!is_from_space) { |
| 1825 | // The pointers-from-here-are-interesting flag isn't updated dynamically |
| 1826 | // on from-space pages, so it might be out of sync with the marking state. |
| 1827 | if (page->heap()->incremental_marking()->IsMarking()) { |
| 1828 | CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING)); |
| 1829 | } else { |
| 1830 | CHECK( |
| 1831 | !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING)); |
| 1832 | } |
| 1833 | // TODO(gc): Check that the live_bytes_count_ field matches the |
| 1834 | // black marking on the page (if we make it match in new-space). |
| 1835 | } |
| 1836 | CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)); |
| 1837 | CHECK(page->prev_page()->next_page() == page); |
| 1838 | page = page->next_page(); |
| 1839 | } |
| 1840 | } |
| 1841 | #endif |
| 1842 | |
| 1843 | #ifdef DEBUG |
| 1844 | void SemiSpace::AssertValidRange(Address start, Address end) { |
| 1845 | // Addresses belong to same semi-space |
| 1846 | NewSpacePage* page = NewSpacePage::FromLimit(start); |
| 1847 | NewSpacePage* end_page = NewSpacePage::FromLimit(end); |
| 1848 | SemiSpace* space = page->semi_space(); |
| 1849 | CHECK_EQ(space, end_page->semi_space()); |
| 1850 | // Start address is before end address, either on same page, |
| 1851 | // or end address is on a later page in the linked list of |
| 1852 | // semi-space pages. |
| 1853 | if (page == end_page) { |
| 1854 | CHECK(start <= end); |
| 1855 | } else { |
| 1856 | while (page != end_page) { |
| 1857 | page = page->next_page(); |
| 1858 | CHECK_NE(page, space->anchor()); |
| 1859 | } |
| 1860 | } |
| 1861 | } |
| 1862 | #endif |
| 1863 | |
| 1864 | |
| 1865 | // ----------------------------------------------------------------------------- |
| 1866 | // SemiSpaceIterator implementation. |
| 1867 | SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) { |
| 1868 | Initialize(space->bottom(), space->top(), NULL); |
| 1869 | } |
| 1870 | |
| 1871 | |
| 1872 | SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, |
| 1873 | HeapObjectCallback size_func) { |
| 1874 | Initialize(space->bottom(), space->top(), size_func); |
| 1875 | } |
| 1876 | |
| 1877 | |
| 1878 | SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) { |
| 1879 | Initialize(start, space->top(), NULL); |
| 1880 | } |
| 1881 | |
| 1882 | |
| 1883 | SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) { |
| 1884 | Initialize(from, to, NULL); |
| 1885 | } |
| 1886 | |
| 1887 | |
| 1888 | void SemiSpaceIterator::Initialize(Address start, Address end, |
| 1889 | HeapObjectCallback size_func) { |
| 1890 | SemiSpace::AssertValidRange(start, end); |
| 1891 | current_ = start; |
| 1892 | limit_ = end; |
| 1893 | size_func_ = size_func; |
| 1894 | } |
| 1895 | |
| 1896 | |
| 1897 | #ifdef DEBUG |
| 1898 | // heap_histograms is shared, always clear it before using it. |
| 1899 | static void ClearHistograms(Isolate* isolate) { |
| 1900 | // We reset the name each time, though it hasn't changed. |
| 1901 | #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name); |
| 1902 | INSTANCE_TYPE_LIST(DEF_TYPE_NAME) |
| 1903 | #undef DEF_TYPE_NAME |
| 1904 | |
| 1905 | #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear(); |
| 1906 | INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM) |
| 1907 | #undef CLEAR_HISTOGRAM |
| 1908 | |
| 1909 | isolate->js_spill_information()->Clear(); |
| 1910 | } |
| 1911 | |
| 1912 | |
| 1913 | static void ClearCodeKindStatistics(int* code_kind_statistics) { |
| 1914 | for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { |
| 1915 | code_kind_statistics[i] = 0; |
| 1916 | } |
| 1917 | } |
| 1918 | |
| 1919 | |
| 1920 | static void ReportCodeKindStatistics(int* code_kind_statistics) { |
| 1921 | PrintF("\n Code kind histograms: \n"); |
| 1922 | for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { |
| 1923 | if (code_kind_statistics[i] > 0) { |
| 1924 | PrintF(" %-20s: %10d bytes\n", |
| 1925 | Code::Kind2String(static_cast<Code::Kind>(i)), |
| 1926 | code_kind_statistics[i]); |
| 1927 | } |
| 1928 | } |
| 1929 | PrintF("\n"); |
| 1930 | } |
| 1931 | |
| 1932 | |
| 1933 | static int CollectHistogramInfo(HeapObject* obj) { |
| 1934 | Isolate* isolate = obj->GetIsolate(); |
| 1935 | InstanceType type = obj->map()->instance_type(); |
| 1936 | DCHECK(0 <= type && type <= LAST_TYPE); |
| 1937 | DCHECK(isolate->heap_histograms()[type].name() != NULL); |
| 1938 | isolate->heap_histograms()[type].increment_number(1); |
| 1939 | isolate->heap_histograms()[type].increment_bytes(obj->Size()); |
| 1940 | |
| 1941 | if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) { |
| 1942 | JSObject::cast(obj) |
| 1943 | ->IncrementSpillStatistics(isolate->js_spill_information()); |
| 1944 | } |
| 1945 | |
| 1946 | return obj->Size(); |
| 1947 | } |
| 1948 | |
| 1949 | |
| 1950 | static void ReportHistogram(Isolate* isolate, bool print_spill) { |
| 1951 | PrintF("\n Object Histogram:\n"); |
| 1952 | for (int i = 0; i <= LAST_TYPE; i++) { |
| 1953 | if (isolate->heap_histograms()[i].number() > 0) { |
| 1954 | PrintF(" %-34s%10d (%10d bytes)\n", |
| 1955 | isolate->heap_histograms()[i].name(), |
| 1956 | isolate->heap_histograms()[i].number(), |
| 1957 | isolate->heap_histograms()[i].bytes()); |
| 1958 | } |
| 1959 | } |
| 1960 | PrintF("\n"); |
| 1961 | |
| 1962 | // Summarize string types. |
| 1963 | int string_number = 0; |
| 1964 | int string_bytes = 0; |
| 1965 | #define INCREMENT(type, size, name, camel_name) \ |
| 1966 | string_number += isolate->heap_histograms()[type].number(); \ |
| 1967 | string_bytes += isolate->heap_histograms()[type].bytes(); |
| 1968 | STRING_TYPE_LIST(INCREMENT) |
| 1969 | #undef INCREMENT |
| 1970 | if (string_number > 0) { |
| 1971 | PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number, |
| 1972 | string_bytes); |
| 1973 | } |
| 1974 | |
| 1975 | if (FLAG_collect_heap_spill_statistics && print_spill) { |
| 1976 | isolate->js_spill_information()->Print(); |
| 1977 | } |
| 1978 | } |
| 1979 | #endif // DEBUG |
| 1980 | |
| 1981 | |
| 1982 | // Support for statistics gathering for --heap-stats and --log-gc. |
| 1983 | void NewSpace::ClearHistograms() { |
| 1984 | for (int i = 0; i <= LAST_TYPE; i++) { |
| 1985 | allocated_histogram_[i].clear(); |
| 1986 | promoted_histogram_[i].clear(); |
| 1987 | } |
| 1988 | } |
| 1989 | |
| 1990 | |
| 1991 | // Because the copying collector does not touch garbage objects, we iterate |
| 1992 | // the new space before a collection to get a histogram of allocated objects. |
| 1993 | // This only happens when --log-gc flag is set. |
| 1994 | void NewSpace::CollectStatistics() { |
| 1995 | ClearHistograms(); |
| 1996 | SemiSpaceIterator it(this); |
| 1997 | for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) |
| 1998 | RecordAllocation(obj); |
| 1999 | } |
| 2000 | |
| 2001 | |
| 2002 | static void DoReportStatistics(Isolate* isolate, HistogramInfo* info, |
| 2003 | const char* description) { |
| 2004 | LOG(isolate, HeapSampleBeginEvent("NewSpace", description)); |
| 2005 | // Lump all the string types together. |
| 2006 | int string_number = 0; |
| 2007 | int string_bytes = 0; |
| 2008 | #define INCREMENT(type, size, name, camel_name) \ |
| 2009 | string_number += info[type].number(); \ |
| 2010 | string_bytes += info[type].bytes(); |
| 2011 | STRING_TYPE_LIST(INCREMENT) |
| 2012 | #undef INCREMENT |
| 2013 | if (string_number > 0) { |
| 2014 | LOG(isolate, |
| 2015 | HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes)); |
| 2016 | } |
| 2017 | |
| 2018 | // Then do the other types. |
| 2019 | for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) { |
| 2020 | if (info[i].number() > 0) { |
| 2021 | LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(), |
| 2022 | info[i].bytes())); |
| 2023 | } |
| 2024 | } |
| 2025 | LOG(isolate, HeapSampleEndEvent("NewSpace", description)); |
| 2026 | } |
| 2027 | |
| 2028 | |
| 2029 | void NewSpace::ReportStatistics() { |
| 2030 | #ifdef DEBUG |
| 2031 | if (FLAG_heap_stats) { |
| 2032 | float pct = static_cast<float>(Available()) / TotalCapacity(); |
| 2033 | PrintF(" capacity: %" V8_PTR_PREFIX |
| 2034 | "d" |
| 2035 | ", available: %" V8_PTR_PREFIX "d, %%%d\n", |
| 2036 | TotalCapacity(), Available(), static_cast<int>(pct * 100)); |
| 2037 | PrintF("\n Object Histogram:\n"); |
| 2038 | for (int i = 0; i <= LAST_TYPE; i++) { |
| 2039 | if (allocated_histogram_[i].number() > 0) { |
| 2040 | PrintF(" %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(), |
| 2041 | allocated_histogram_[i].number(), |
| 2042 | allocated_histogram_[i].bytes()); |
| 2043 | } |
| 2044 | } |
| 2045 | PrintF("\n"); |
| 2046 | } |
| 2047 | #endif // DEBUG |
| 2048 | |
| 2049 | if (FLAG_log_gc) { |
| 2050 | Isolate* isolate = heap()->isolate(); |
| 2051 | DoReportStatistics(isolate, allocated_histogram_, "allocated"); |
| 2052 | DoReportStatistics(isolate, promoted_histogram_, "promoted"); |
| 2053 | } |
| 2054 | } |
| 2055 | |
| 2056 | |
| 2057 | void NewSpace::RecordAllocation(HeapObject* obj) { |
| 2058 | InstanceType type = obj->map()->instance_type(); |
| 2059 | DCHECK(0 <= type && type <= LAST_TYPE); |
| 2060 | allocated_histogram_[type].increment_number(1); |
| 2061 | allocated_histogram_[type].increment_bytes(obj->Size()); |
| 2062 | } |
| 2063 | |
| 2064 | |
| 2065 | void NewSpace::RecordPromotion(HeapObject* obj) { |
| 2066 | InstanceType type = obj->map()->instance_type(); |
| 2067 | DCHECK(0 <= type && type <= LAST_TYPE); |
| 2068 | promoted_histogram_[type].increment_number(1); |
| 2069 | promoted_histogram_[type].increment_bytes(obj->Size()); |
| 2070 | } |
| 2071 | |
| 2072 | |
| 2073 | size_t NewSpace::CommittedPhysicalMemory() { |
| 2074 | if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory(); |
| 2075 | MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
| 2076 | size_t size = to_space_.CommittedPhysicalMemory(); |
| 2077 | if (from_space_.is_committed()) { |
| 2078 | size += from_space_.CommittedPhysicalMemory(); |
| 2079 | } |
| 2080 | return size; |
| 2081 | } |
| 2082 | |
| 2083 | |
| 2084 | // ----------------------------------------------------------------------------- |
| 2085 | // Free lists for old object spaces implementation |
| 2086 | |
| 2087 | void FreeListNode::set_size(Heap* heap, int size_in_bytes) { |
| 2088 | DCHECK(size_in_bytes > 0); |
| 2089 | DCHECK(IsAligned(size_in_bytes, kPointerSize)); |
| 2090 | |
| 2091 | // We write a map and possibly size information to the block. If the block |
| 2092 | // is big enough to be a FreeSpace with at least one extra word (the next |
| 2093 | // pointer), we set its map to be the free space map and its size to an |
| 2094 | // appropriate array length for the desired size from HeapObject::Size(). |
| 2095 | // If the block is too small (eg, one or two words), to hold both a size |
| 2096 | // field and a next pointer, we give it a filler map that gives it the |
| 2097 | // correct size. |
| 2098 | if (size_in_bytes > FreeSpace::kHeaderSize) { |
| 2099 | // Can't use FreeSpace::cast because it fails during deserialization. |
| 2100 | // We have to set the size first with a release store before we store |
| 2101 | // the map because a concurrent store buffer scan on scavenge must not |
| 2102 | // observe a map with an invalid size. |
| 2103 | FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this); |
| 2104 | this_as_free_space->nobarrier_set_size(size_in_bytes); |
| 2105 | synchronized_set_map_no_write_barrier(heap->raw_unchecked_free_space_map()); |
| 2106 | } else if (size_in_bytes == kPointerSize) { |
| 2107 | set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map()); |
| 2108 | } else if (size_in_bytes == 2 * kPointerSize) { |
| 2109 | set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map()); |
| 2110 | } else { |
| 2111 | UNREACHABLE(); |
| 2112 | } |
| 2113 | // We would like to DCHECK(Size() == size_in_bytes) but this would fail during |
| 2114 | // deserialization because the free space map is not done yet. |
| 2115 | } |
| 2116 | |
| 2117 | |
| 2118 | FreeListNode* FreeListNode::next() { |
| 2119 | DCHECK(IsFreeListNode(this)); |
| 2120 | if (map() == GetHeap()->raw_unchecked_free_space_map()) { |
| 2121 | DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize); |
| 2122 | return reinterpret_cast<FreeListNode*>( |
| 2123 | Memory::Address_at(address() + kNextOffset)); |
| 2124 | } else { |
| 2125 | return reinterpret_cast<FreeListNode*>( |
| 2126 | Memory::Address_at(address() + kPointerSize)); |
| 2127 | } |
| 2128 | } |
| 2129 | |
| 2130 | |
| 2131 | FreeListNode** FreeListNode::next_address() { |
| 2132 | DCHECK(IsFreeListNode(this)); |
| 2133 | if (map() == GetHeap()->raw_unchecked_free_space_map()) { |
| 2134 | DCHECK(Size() >= kNextOffset + kPointerSize); |
| 2135 | return reinterpret_cast<FreeListNode**>(address() + kNextOffset); |
| 2136 | } else { |
| 2137 | return reinterpret_cast<FreeListNode**>(address() + kPointerSize); |
| 2138 | } |
| 2139 | } |
| 2140 | |
| 2141 | |
| 2142 | void FreeListNode::set_next(FreeListNode* next) { |
| 2143 | DCHECK(IsFreeListNode(this)); |
| 2144 | // While we are booting the VM the free space map will actually be null. So |
| 2145 | // we have to make sure that we don't try to use it for anything at that |
| 2146 | // stage. |
| 2147 | if (map() == GetHeap()->raw_unchecked_free_space_map()) { |
| 2148 | DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize); |
| 2149 | base::NoBarrier_Store( |
| 2150 | reinterpret_cast<base::AtomicWord*>(address() + kNextOffset), |
| 2151 | reinterpret_cast<base::AtomicWord>(next)); |
| 2152 | } else { |
| 2153 | base::NoBarrier_Store( |
| 2154 | reinterpret_cast<base::AtomicWord*>(address() + kPointerSize), |
| 2155 | reinterpret_cast<base::AtomicWord>(next)); |
| 2156 | } |
| 2157 | } |
| 2158 | |
| 2159 | |
| 2160 | intptr_t FreeListCategory::Concatenate(FreeListCategory* category) { |
| 2161 | intptr_t free_bytes = 0; |
| 2162 | if (category->top() != NULL) { |
| 2163 | // This is safe (not going to deadlock) since Concatenate operations |
| 2164 | // are never performed on the same free lists at the same time in |
| 2165 | // reverse order. |
| 2166 | base::LockGuard<base::Mutex> target_lock_guard(mutex()); |
| 2167 | base::LockGuard<base::Mutex> source_lock_guard(category->mutex()); |
| 2168 | DCHECK(category->end_ != NULL); |
| 2169 | free_bytes = category->available(); |
| 2170 | if (end_ == NULL) { |
| 2171 | end_ = category->end(); |
| 2172 | } else { |
| 2173 | category->end()->set_next(top()); |
| 2174 | } |
| 2175 | set_top(category->top()); |
| 2176 | base::NoBarrier_Store(&top_, category->top_); |
| 2177 | available_ += category->available(); |
| 2178 | category->Reset(); |
| 2179 | } |
| 2180 | return free_bytes; |
| 2181 | } |
| 2182 | |
| 2183 | |
| 2184 | void FreeListCategory::Reset() { |
| 2185 | set_top(NULL); |
| 2186 | set_end(NULL); |
| 2187 | set_available(0); |
| 2188 | } |
| 2189 | |
| 2190 | |
| 2191 | intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) { |
| 2192 | int sum = 0; |
| 2193 | FreeListNode* t = top(); |
| 2194 | FreeListNode** n = &t; |
| 2195 | while (*n != NULL) { |
| 2196 | if (Page::FromAddress((*n)->address()) == p) { |
| 2197 | FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n); |
| 2198 | sum += free_space->Size(); |
| 2199 | *n = (*n)->next(); |
| 2200 | } else { |
| 2201 | n = (*n)->next_address(); |
| 2202 | } |
| 2203 | } |
| 2204 | set_top(t); |
| 2205 | if (top() == NULL) { |
| 2206 | set_end(NULL); |
| 2207 | } |
| 2208 | available_ -= sum; |
| 2209 | return sum; |
| 2210 | } |
| 2211 | |
| 2212 | |
| 2213 | bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) { |
| 2214 | FreeListNode* node = top(); |
| 2215 | while (node != NULL) { |
| 2216 | if (Page::FromAddress(node->address()) == p) return true; |
| 2217 | node = node->next(); |
| 2218 | } |
| 2219 | return false; |
| 2220 | } |
| 2221 | |
| 2222 | |
| 2223 | FreeListNode* FreeListCategory::PickNodeFromList(int* node_size) { |
| 2224 | FreeListNode* node = top(); |
| 2225 | |
| 2226 | if (node == NULL) return NULL; |
| 2227 | |
| 2228 | while (node != NULL && |
| 2229 | Page::FromAddress(node->address())->IsEvacuationCandidate()) { |
| 2230 | available_ -= reinterpret_cast<FreeSpace*>(node)->Size(); |
| 2231 | node = node->next(); |
| 2232 | } |
| 2233 | |
| 2234 | if (node != NULL) { |
| 2235 | set_top(node->next()); |
| 2236 | *node_size = reinterpret_cast<FreeSpace*>(node)->Size(); |
| 2237 | available_ -= *node_size; |
| 2238 | } else { |
| 2239 | set_top(NULL); |
| 2240 | } |
| 2241 | |
| 2242 | if (top() == NULL) { |
| 2243 | set_end(NULL); |
| 2244 | } |
| 2245 | |
| 2246 | return node; |
| 2247 | } |
| 2248 | |
| 2249 | |
| 2250 | FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes, |
| 2251 | int* node_size) { |
| 2252 | FreeListNode* node = PickNodeFromList(node_size); |
| 2253 | if (node != NULL && *node_size < size_in_bytes) { |
| 2254 | Free(node, *node_size); |
| 2255 | *node_size = 0; |
| 2256 | return NULL; |
| 2257 | } |
| 2258 | return node; |
| 2259 | } |
| 2260 | |
| 2261 | |
| 2262 | void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) { |
| 2263 | node->set_next(top()); |
| 2264 | set_top(node); |
| 2265 | if (end_ == NULL) { |
| 2266 | end_ = node; |
| 2267 | } |
| 2268 | available_ += size_in_bytes; |
| 2269 | } |
| 2270 | |
| 2271 | |
| 2272 | void FreeListCategory::RepairFreeList(Heap* heap) { |
| 2273 | FreeListNode* n = top(); |
| 2274 | while (n != NULL) { |
| 2275 | Map** map_location = reinterpret_cast<Map**>(n->address()); |
| 2276 | if (*map_location == NULL) { |
| 2277 | *map_location = heap->free_space_map(); |
| 2278 | } else { |
| 2279 | DCHECK(*map_location == heap->free_space_map()); |
| 2280 | } |
| 2281 | n = n->next(); |
| 2282 | } |
| 2283 | } |
| 2284 | |
| 2285 | |
| 2286 | FreeList::FreeList(PagedSpace* owner) : owner_(owner), heap_(owner->heap()) { |
| 2287 | Reset(); |
| 2288 | } |
| 2289 | |
| 2290 | |
| 2291 | intptr_t FreeList::Concatenate(FreeList* free_list) { |
| 2292 | intptr_t free_bytes = 0; |
| 2293 | free_bytes += small_list_.Concatenate(free_list->small_list()); |
| 2294 | free_bytes += medium_list_.Concatenate(free_list->medium_list()); |
| 2295 | free_bytes += large_list_.Concatenate(free_list->large_list()); |
| 2296 | free_bytes += huge_list_.Concatenate(free_list->huge_list()); |
| 2297 | return free_bytes; |
| 2298 | } |
| 2299 | |
| 2300 | |
| 2301 | void FreeList::Reset() { |
| 2302 | small_list_.Reset(); |
| 2303 | medium_list_.Reset(); |
| 2304 | large_list_.Reset(); |
| 2305 | huge_list_.Reset(); |
| 2306 | } |
| 2307 | |
| 2308 | |
| 2309 | int FreeList::Free(Address start, int size_in_bytes) { |
| 2310 | if (size_in_bytes == 0) return 0; |
| 2311 | |
| 2312 | FreeListNode* node = FreeListNode::FromAddress(start); |
| 2313 | node->set_size(heap_, size_in_bytes); |
| 2314 | Page* page = Page::FromAddress(start); |
| 2315 | |
| 2316 | // Early return to drop too-small blocks on the floor. |
| 2317 | if (size_in_bytes < kSmallListMin) { |
| 2318 | page->add_non_available_small_blocks(size_in_bytes); |
| 2319 | return size_in_bytes; |
| 2320 | } |
| 2321 | |
| 2322 | // Insert other blocks at the head of a free list of the appropriate |
| 2323 | // magnitude. |
| 2324 | if (size_in_bytes <= kSmallListMax) { |
| 2325 | small_list_.Free(node, size_in_bytes); |
| 2326 | page->add_available_in_small_free_list(size_in_bytes); |
| 2327 | } else if (size_in_bytes <= kMediumListMax) { |
| 2328 | medium_list_.Free(node, size_in_bytes); |
| 2329 | page->add_available_in_medium_free_list(size_in_bytes); |
| 2330 | } else if (size_in_bytes <= kLargeListMax) { |
| 2331 | large_list_.Free(node, size_in_bytes); |
| 2332 | page->add_available_in_large_free_list(size_in_bytes); |
| 2333 | } else { |
| 2334 | huge_list_.Free(node, size_in_bytes); |
| 2335 | page->add_available_in_huge_free_list(size_in_bytes); |
| 2336 | } |
| 2337 | |
| 2338 | DCHECK(IsVeryLong() || available() == SumFreeLists()); |
| 2339 | return 0; |
| 2340 | } |
| 2341 | |
| 2342 | |
| 2343 | FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) { |
| 2344 | FreeListNode* node = NULL; |
| 2345 | Page* page = NULL; |
| 2346 | |
| 2347 | if (size_in_bytes <= kSmallAllocationMax) { |
| 2348 | node = small_list_.PickNodeFromList(node_size); |
| 2349 | if (node != NULL) { |
| 2350 | DCHECK(size_in_bytes <= *node_size); |
| 2351 | page = Page::FromAddress(node->address()); |
| 2352 | page->add_available_in_small_free_list(-(*node_size)); |
| 2353 | DCHECK(IsVeryLong() || available() == SumFreeLists()); |
| 2354 | return node; |
| 2355 | } |
| 2356 | } |
| 2357 | |
| 2358 | if (size_in_bytes <= kMediumAllocationMax) { |
| 2359 | node = medium_list_.PickNodeFromList(node_size); |
| 2360 | if (node != NULL) { |
| 2361 | DCHECK(size_in_bytes <= *node_size); |
| 2362 | page = Page::FromAddress(node->address()); |
| 2363 | page->add_available_in_medium_free_list(-(*node_size)); |
| 2364 | DCHECK(IsVeryLong() || available() == SumFreeLists()); |
| 2365 | return node; |
| 2366 | } |
| 2367 | } |
| 2368 | |
| 2369 | if (size_in_bytes <= kLargeAllocationMax) { |
| 2370 | node = large_list_.PickNodeFromList(node_size); |
| 2371 | if (node != NULL) { |
| 2372 | DCHECK(size_in_bytes <= *node_size); |
| 2373 | page = Page::FromAddress(node->address()); |
| 2374 | page->add_available_in_large_free_list(-(*node_size)); |
| 2375 | DCHECK(IsVeryLong() || available() == SumFreeLists()); |
| 2376 | return node; |
| 2377 | } |
| 2378 | } |
| 2379 | |
| 2380 | int huge_list_available = huge_list_.available(); |
| 2381 | FreeListNode* top_node = huge_list_.top(); |
| 2382 | for (FreeListNode** cur = &top_node; *cur != NULL; |
| 2383 | cur = (*cur)->next_address()) { |
| 2384 | FreeListNode* cur_node = *cur; |
| 2385 | while (cur_node != NULL && |
| 2386 | Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) { |
| 2387 | int size = reinterpret_cast<FreeSpace*>(cur_node)->Size(); |
| 2388 | huge_list_available -= size; |
| 2389 | page = Page::FromAddress(cur_node->address()); |
| 2390 | page->add_available_in_huge_free_list(-size); |
| 2391 | cur_node = cur_node->next(); |
| 2392 | } |
| 2393 | |
| 2394 | *cur = cur_node; |
| 2395 | if (cur_node == NULL) { |
| 2396 | huge_list_.set_end(NULL); |
| 2397 | break; |
| 2398 | } |
| 2399 | |
| 2400 | DCHECK((*cur)->map() == heap_->raw_unchecked_free_space_map()); |
| 2401 | FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur); |
| 2402 | int size = cur_as_free_space->Size(); |
| 2403 | if (size >= size_in_bytes) { |
| 2404 | // Large enough node found. Unlink it from the list. |
| 2405 | node = *cur; |
| 2406 | *cur = node->next(); |
| 2407 | *node_size = size; |
| 2408 | huge_list_available -= size; |
| 2409 | page = Page::FromAddress(node->address()); |
| 2410 | page->add_available_in_huge_free_list(-size); |
| 2411 | break; |
| 2412 | } |
| 2413 | } |
| 2414 | |
| 2415 | huge_list_.set_top(top_node); |
| 2416 | if (huge_list_.top() == NULL) { |
| 2417 | huge_list_.set_end(NULL); |
| 2418 | } |
| 2419 | huge_list_.set_available(huge_list_available); |
| 2420 | |
| 2421 | if (node != NULL) { |
| 2422 | DCHECK(IsVeryLong() || available() == SumFreeLists()); |
| 2423 | return node; |
| 2424 | } |
| 2425 | |
| 2426 | if (size_in_bytes <= kSmallListMax) { |
| 2427 | node = small_list_.PickNodeFromList(size_in_bytes, node_size); |
| 2428 | if (node != NULL) { |
| 2429 | DCHECK(size_in_bytes <= *node_size); |
| 2430 | page = Page::FromAddress(node->address()); |
| 2431 | page->add_available_in_small_free_list(-(*node_size)); |
| 2432 | } |
| 2433 | } else if (size_in_bytes <= kMediumListMax) { |
| 2434 | node = medium_list_.PickNodeFromList(size_in_bytes, node_size); |
| 2435 | if (node != NULL) { |
| 2436 | DCHECK(size_in_bytes <= *node_size); |
| 2437 | page = Page::FromAddress(node->address()); |
| 2438 | page->add_available_in_medium_free_list(-(*node_size)); |
| 2439 | } |
| 2440 | } else if (size_in_bytes <= kLargeListMax) { |
| 2441 | node = large_list_.PickNodeFromList(size_in_bytes, node_size); |
| 2442 | if (node != NULL) { |
| 2443 | DCHECK(size_in_bytes <= *node_size); |
| 2444 | page = Page::FromAddress(node->address()); |
| 2445 | page->add_available_in_large_free_list(-(*node_size)); |
| 2446 | } |
| 2447 | } |
| 2448 | |
| 2449 | DCHECK(IsVeryLong() || available() == SumFreeLists()); |
| 2450 | return node; |
| 2451 | } |
| 2452 | |
| 2453 | |
| 2454 | // Allocation on the old space free list. If it succeeds then a new linear |
| 2455 | // allocation space has been set up with the top and limit of the space. If |
| 2456 | // the allocation fails then NULL is returned, and the caller can perform a GC |
| 2457 | // or allocate a new page before retrying. |
| 2458 | HeapObject* FreeList::Allocate(int size_in_bytes) { |
| 2459 | DCHECK(0 < size_in_bytes); |
| 2460 | DCHECK(size_in_bytes <= kMaxBlockSize); |
| 2461 | DCHECK(IsAligned(size_in_bytes, kPointerSize)); |
| 2462 | // Don't free list allocate if there is linear space available. |
| 2463 | DCHECK(owner_->limit() - owner_->top() < size_in_bytes); |
| 2464 | |
| 2465 | int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); |
| 2466 | // Mark the old linear allocation area with a free space map so it can be |
| 2467 | // skipped when scanning the heap. This also puts it back in the free list |
| 2468 | // if it is big enough. |
| 2469 | owner_->Free(owner_->top(), old_linear_size); |
| 2470 | |
| 2471 | owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes - |
| 2472 | old_linear_size); |
| 2473 | |
| 2474 | int new_node_size = 0; |
| 2475 | FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size); |
| 2476 | if (new_node == NULL) { |
| 2477 | owner_->SetTopAndLimit(NULL, NULL); |
| 2478 | return NULL; |
| 2479 | } |
| 2480 | |
| 2481 | int bytes_left = new_node_size - size_in_bytes; |
| 2482 | DCHECK(bytes_left >= 0); |
| 2483 | |
| 2484 | #ifdef DEBUG |
| 2485 | for (int i = 0; i < size_in_bytes / kPointerSize; i++) { |
| 2486 | reinterpret_cast<Object**>(new_node->address())[i] = |
| 2487 | Smi::FromInt(kCodeZapValue); |
| 2488 | } |
| 2489 | #endif |
| 2490 | |
| 2491 | // The old-space-step might have finished sweeping and restarted marking. |
| 2492 | // Verify that it did not turn the page of the new node into an evacuation |
| 2493 | // candidate. |
| 2494 | DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node)); |
| 2495 | |
| 2496 | const int kThreshold = IncrementalMarking::kAllocatedThreshold; |
| 2497 | |
| 2498 | // Memory in the linear allocation area is counted as allocated. We may free |
| 2499 | // a little of this again immediately - see below. |
| 2500 | owner_->Allocate(new_node_size); |
| 2501 | |
| 2502 | if (owner_->heap()->inline_allocation_disabled()) { |
| 2503 | // Keep the linear allocation area empty if requested to do so, just |
| 2504 | // return area back to the free list instead. |
| 2505 | owner_->Free(new_node->address() + size_in_bytes, bytes_left); |
| 2506 | DCHECK(owner_->top() == NULL && owner_->limit() == NULL); |
| 2507 | } else if (bytes_left > kThreshold && |
| 2508 | owner_->heap()->incremental_marking()->IsMarkingIncomplete() && |
| 2509 | FLAG_incremental_marking_steps) { |
| 2510 | int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); |
| 2511 | // We don't want to give too large linear areas to the allocator while |
| 2512 | // incremental marking is going on, because we won't check again whether |
| 2513 | // we want to do another increment until the linear area is used up. |
| 2514 | owner_->Free(new_node->address() + size_in_bytes + linear_size, |
| 2515 | new_node_size - size_in_bytes - linear_size); |
| 2516 | owner_->SetTopAndLimit(new_node->address() + size_in_bytes, |
| 2517 | new_node->address() + size_in_bytes + linear_size); |
| 2518 | } else if (bytes_left > 0) { |
| 2519 | // Normally we give the rest of the node to the allocator as its new |
| 2520 | // linear allocation area. |
| 2521 | owner_->SetTopAndLimit(new_node->address() + size_in_bytes, |
| 2522 | new_node->address() + new_node_size); |
| 2523 | } else { |
| 2524 | // TODO(gc) Try not freeing linear allocation region when bytes_left |
| 2525 | // are zero. |
| 2526 | owner_->SetTopAndLimit(NULL, NULL); |
| 2527 | } |
| 2528 | |
| 2529 | return new_node; |
| 2530 | } |
| 2531 | |
| 2532 | |
| 2533 | intptr_t FreeList::EvictFreeListItems(Page* p) { |
| 2534 | intptr_t sum = huge_list_.EvictFreeListItemsInList(p); |
| 2535 | p->set_available_in_huge_free_list(0); |
| 2536 | |
| 2537 | if (sum < p->area_size()) { |
| 2538 | sum += small_list_.EvictFreeListItemsInList(p) + |
| 2539 | medium_list_.EvictFreeListItemsInList(p) + |
| 2540 | large_list_.EvictFreeListItemsInList(p); |
| 2541 | p->set_available_in_small_free_list(0); |
| 2542 | p->set_available_in_medium_free_list(0); |
| 2543 | p->set_available_in_large_free_list(0); |
| 2544 | } |
| 2545 | |
| 2546 | return sum; |
| 2547 | } |
| 2548 | |
| 2549 | |
| 2550 | bool FreeList::ContainsPageFreeListItems(Page* p) { |
| 2551 | return huge_list_.EvictFreeListItemsInList(p) || |
| 2552 | small_list_.EvictFreeListItemsInList(p) || |
| 2553 | medium_list_.EvictFreeListItemsInList(p) || |
| 2554 | large_list_.EvictFreeListItemsInList(p); |
| 2555 | } |
| 2556 | |
| 2557 | |
| 2558 | void FreeList::RepairLists(Heap* heap) { |
| 2559 | small_list_.RepairFreeList(heap); |
| 2560 | medium_list_.RepairFreeList(heap); |
| 2561 | large_list_.RepairFreeList(heap); |
| 2562 | huge_list_.RepairFreeList(heap); |
| 2563 | } |
| 2564 | |
| 2565 | |
| 2566 | #ifdef DEBUG |
| 2567 | intptr_t FreeListCategory::SumFreeList() { |
| 2568 | intptr_t sum = 0; |
| 2569 | FreeListNode* cur = top(); |
| 2570 | while (cur != NULL) { |
| 2571 | DCHECK(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map()); |
| 2572 | FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur); |
| 2573 | sum += cur_as_free_space->nobarrier_size(); |
| 2574 | cur = cur->next(); |
| 2575 | } |
| 2576 | return sum; |
| 2577 | } |
| 2578 | |
| 2579 | |
| 2580 | static const int kVeryLongFreeList = 500; |
| 2581 | |
| 2582 | |
| 2583 | int FreeListCategory::FreeListLength() { |
| 2584 | int length = 0; |
| 2585 | FreeListNode* cur = top(); |
| 2586 | while (cur != NULL) { |
| 2587 | length++; |
| 2588 | cur = cur->next(); |
| 2589 | if (length == kVeryLongFreeList) return length; |
| 2590 | } |
| 2591 | return length; |
| 2592 | } |
| 2593 | |
| 2594 | |
| 2595 | bool FreeList::IsVeryLong() { |
| 2596 | if (small_list_.FreeListLength() == kVeryLongFreeList) return true; |
| 2597 | if (medium_list_.FreeListLength() == kVeryLongFreeList) return true; |
| 2598 | if (large_list_.FreeListLength() == kVeryLongFreeList) return true; |
| 2599 | if (huge_list_.FreeListLength() == kVeryLongFreeList) return true; |
| 2600 | return false; |
| 2601 | } |
| 2602 | |
| 2603 | |
| 2604 | // This can take a very long time because it is linear in the number of entries |
| 2605 | // on the free list, so it should not be called if FreeListLength returns |
| 2606 | // kVeryLongFreeList. |
| 2607 | intptr_t FreeList::SumFreeLists() { |
| 2608 | intptr_t sum = small_list_.SumFreeList(); |
| 2609 | sum += medium_list_.SumFreeList(); |
| 2610 | sum += large_list_.SumFreeList(); |
| 2611 | sum += huge_list_.SumFreeList(); |
| 2612 | return sum; |
| 2613 | } |
| 2614 | #endif |
| 2615 | |
| 2616 | |
| 2617 | // ----------------------------------------------------------------------------- |
| 2618 | // OldSpace implementation |
| 2619 | |
| 2620 | void PagedSpace::PrepareForMarkCompact() { |
| 2621 | // We don't have a linear allocation area while sweeping. It will be restored |
| 2622 | // on the first allocation after the sweep. |
| 2623 | EmptyAllocationInfo(); |
| 2624 | |
| 2625 | // This counter will be increased for pages which will be swept by the |
| 2626 | // sweeper threads. |
| 2627 | unswept_free_bytes_ = 0; |
| 2628 | |
| 2629 | // Clear the free list before a full GC---it will be rebuilt afterward. |
| 2630 | free_list_.Reset(); |
| 2631 | } |
| 2632 | |
| 2633 | |
| 2634 | intptr_t PagedSpace::SizeOfObjects() { |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 2635 | DCHECK(!FLAG_concurrent_sweeping || |
| 2636 | heap()->mark_compact_collector()->sweeping_in_progress() || |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2637 | (unswept_free_bytes_ == 0)); |
| 2638 | return Size() - unswept_free_bytes_ - (limit() - top()); |
| 2639 | } |
| 2640 | |
| 2641 | |
| 2642 | // After we have booted, we have created a map which represents free space |
| 2643 | // on the heap. If there was already a free list then the elements on it |
| 2644 | // were created with the wrong FreeSpaceMap (normally NULL), so we need to |
| 2645 | // fix them. |
| 2646 | void PagedSpace::RepairFreeListsAfterBoot() { free_list_.RepairLists(heap()); } |
| 2647 | |
| 2648 | |
| 2649 | void PagedSpace::EvictEvacuationCandidatesFromFreeLists() { |
| 2650 | if (allocation_info_.top() >= allocation_info_.limit()) return; |
| 2651 | |
| 2652 | if (Page::FromAllocationTop(allocation_info_.top()) |
| 2653 | ->IsEvacuationCandidate()) { |
| 2654 | // Create filler object to keep page iterable if it was iterable. |
| 2655 | int remaining = |
| 2656 | static_cast<int>(allocation_info_.limit() - allocation_info_.top()); |
| 2657 | heap()->CreateFillerObjectAt(allocation_info_.top(), remaining); |
| 2658 | |
| 2659 | allocation_info_.set_top(NULL); |
| 2660 | allocation_info_.set_limit(NULL); |
| 2661 | } |
| 2662 | } |
| 2663 | |
| 2664 | |
| 2665 | HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation( |
| 2666 | int size_in_bytes) { |
| 2667 | MarkCompactCollector* collector = heap()->mark_compact_collector(); |
| 2668 | if (collector->sweeping_in_progress()) { |
| 2669 | // Wait for the sweeper threads here and complete the sweeping phase. |
| 2670 | collector->EnsureSweepingCompleted(); |
| 2671 | |
| 2672 | // After waiting for the sweeper threads, there may be new free-list |
| 2673 | // entries. |
| 2674 | return free_list_.Allocate(size_in_bytes); |
| 2675 | } |
| 2676 | return NULL; |
| 2677 | } |
| 2678 | |
| 2679 | |
| 2680 | HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { |
| 2681 | // Allocation in this space has failed. |
| 2682 | |
| 2683 | MarkCompactCollector* collector = heap()->mark_compact_collector(); |
| 2684 | // Sweeping is still in progress. |
| 2685 | if (collector->sweeping_in_progress()) { |
| 2686 | // First try to refill the free-list, concurrent sweeper threads |
| 2687 | // may have freed some objects in the meantime. |
| 2688 | collector->RefillFreeList(this); |
| 2689 | |
| 2690 | // Retry the free list allocation. |
| 2691 | HeapObject* object = free_list_.Allocate(size_in_bytes); |
| 2692 | if (object != NULL) return object; |
| 2693 | |
| 2694 | // If sweeping is still in progress try to sweep pages on the main thread. |
| 2695 | int free_chunk = collector->SweepInParallel(this, size_in_bytes); |
| 2696 | collector->RefillFreeList(this); |
| 2697 | if (free_chunk >= size_in_bytes) { |
| 2698 | HeapObject* object = free_list_.Allocate(size_in_bytes); |
| 2699 | // We should be able to allocate an object here since we just freed that |
| 2700 | // much memory. |
| 2701 | DCHECK(object != NULL); |
| 2702 | if (object != NULL) return object; |
| 2703 | } |
| 2704 | } |
| 2705 | |
| 2706 | // Free list allocation failed and there is no next page. Fail if we have |
| 2707 | // hit the old generation size limit that should cause a garbage |
| 2708 | // collection. |
| 2709 | if (!heap()->always_allocate() && |
| 2710 | heap()->OldGenerationAllocationLimitReached()) { |
| 2711 | // If sweeper threads are active, wait for them at that point and steal |
| 2712 | // elements form their free-lists. |
| 2713 | HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes); |
| 2714 | if (object != NULL) return object; |
| 2715 | } |
| 2716 | |
| 2717 | // Try to expand the space and allocate in the new next page. |
| 2718 | if (Expand()) { |
| 2719 | DCHECK(CountTotalPages() > 1 || size_in_bytes <= free_list_.available()); |
| 2720 | return free_list_.Allocate(size_in_bytes); |
| 2721 | } |
| 2722 | |
| 2723 | // If sweeper threads are active, wait for them at that point and steal |
| 2724 | // elements form their free-lists. Allocation may still fail their which |
| 2725 | // would indicate that there is not enough memory for the given allocation. |
| 2726 | return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes); |
| 2727 | } |
| 2728 | |
| 2729 | |
| 2730 | #ifdef DEBUG |
| 2731 | void PagedSpace::ReportCodeStatistics(Isolate* isolate) { |
| 2732 | CommentStatistic* comments_statistics = |
| 2733 | isolate->paged_space_comments_statistics(); |
| 2734 | ReportCodeKindStatistics(isolate->code_kind_statistics()); |
| 2735 | PrintF( |
| 2736 | "Code comment statistics (\" [ comment-txt : size/ " |
| 2737 | "count (average)\"):\n"); |
| 2738 | for (int i = 0; i <= CommentStatistic::kMaxComments; i++) { |
| 2739 | const CommentStatistic& cs = comments_statistics[i]; |
| 2740 | if (cs.size > 0) { |
| 2741 | PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count, |
| 2742 | cs.size / cs.count); |
| 2743 | } |
| 2744 | } |
| 2745 | PrintF("\n"); |
| 2746 | } |
| 2747 | |
| 2748 | |
| 2749 | void PagedSpace::ResetCodeStatistics(Isolate* isolate) { |
| 2750 | CommentStatistic* comments_statistics = |
| 2751 | isolate->paged_space_comments_statistics(); |
| 2752 | ClearCodeKindStatistics(isolate->code_kind_statistics()); |
| 2753 | for (int i = 0; i < CommentStatistic::kMaxComments; i++) { |
| 2754 | comments_statistics[i].Clear(); |
| 2755 | } |
| 2756 | comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown"; |
| 2757 | comments_statistics[CommentStatistic::kMaxComments].size = 0; |
| 2758 | comments_statistics[CommentStatistic::kMaxComments].count = 0; |
| 2759 | } |
| 2760 | |
| 2761 | |
| 2762 | // Adds comment to 'comment_statistics' table. Performance OK as long as |
| 2763 | // 'kMaxComments' is small |
| 2764 | static void EnterComment(Isolate* isolate, const char* comment, int delta) { |
| 2765 | CommentStatistic* comments_statistics = |
| 2766 | isolate->paged_space_comments_statistics(); |
| 2767 | // Do not count empty comments |
| 2768 | if (delta <= 0) return; |
| 2769 | CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments]; |
| 2770 | // Search for a free or matching entry in 'comments_statistics': 'cs' |
| 2771 | // points to result. |
| 2772 | for (int i = 0; i < CommentStatistic::kMaxComments; i++) { |
| 2773 | if (comments_statistics[i].comment == NULL) { |
| 2774 | cs = &comments_statistics[i]; |
| 2775 | cs->comment = comment; |
| 2776 | break; |
| 2777 | } else if (strcmp(comments_statistics[i].comment, comment) == 0) { |
| 2778 | cs = &comments_statistics[i]; |
| 2779 | break; |
| 2780 | } |
| 2781 | } |
| 2782 | // Update entry for 'comment' |
| 2783 | cs->size += delta; |
| 2784 | cs->count += 1; |
| 2785 | } |
| 2786 | |
| 2787 | |
| 2788 | // Call for each nested comment start (start marked with '[ xxx', end marked |
| 2789 | // with ']'. RelocIterator 'it' must point to a comment reloc info. |
| 2790 | static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) { |
| 2791 | DCHECK(!it->done()); |
| 2792 | DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT); |
| 2793 | const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data()); |
| 2794 | if (tmp[0] != '[') { |
| 2795 | // Not a nested comment; skip |
| 2796 | return; |
| 2797 | } |
| 2798 | |
| 2799 | // Search for end of nested comment or a new nested comment |
| 2800 | const char* const comment_txt = |
| 2801 | reinterpret_cast<const char*>(it->rinfo()->data()); |
| 2802 | const byte* prev_pc = it->rinfo()->pc(); |
| 2803 | int flat_delta = 0; |
| 2804 | it->next(); |
| 2805 | while (true) { |
| 2806 | // All nested comments must be terminated properly, and therefore exit |
| 2807 | // from loop. |
| 2808 | DCHECK(!it->done()); |
| 2809 | if (it->rinfo()->rmode() == RelocInfo::COMMENT) { |
| 2810 | const char* const txt = |
| 2811 | reinterpret_cast<const char*>(it->rinfo()->data()); |
| 2812 | flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc); |
| 2813 | if (txt[0] == ']') break; // End of nested comment |
| 2814 | // A new comment |
| 2815 | CollectCommentStatistics(isolate, it); |
| 2816 | // Skip code that was covered with previous comment |
| 2817 | prev_pc = it->rinfo()->pc(); |
| 2818 | } |
| 2819 | it->next(); |
| 2820 | } |
| 2821 | EnterComment(isolate, comment_txt, flat_delta); |
| 2822 | } |
| 2823 | |
| 2824 | |
| 2825 | // Collects code size statistics: |
| 2826 | // - by code kind |
| 2827 | // - by code comment |
| 2828 | void PagedSpace::CollectCodeStatistics() { |
| 2829 | Isolate* isolate = heap()->isolate(); |
| 2830 | HeapObjectIterator obj_it(this); |
| 2831 | for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { |
| 2832 | if (obj->IsCode()) { |
| 2833 | Code* code = Code::cast(obj); |
| 2834 | isolate->code_kind_statistics()[code->kind()] += code->Size(); |
| 2835 | RelocIterator it(code); |
| 2836 | int delta = 0; |
| 2837 | const byte* prev_pc = code->instruction_start(); |
| 2838 | while (!it.done()) { |
| 2839 | if (it.rinfo()->rmode() == RelocInfo::COMMENT) { |
| 2840 | delta += static_cast<int>(it.rinfo()->pc() - prev_pc); |
| 2841 | CollectCommentStatistics(isolate, &it); |
| 2842 | prev_pc = it.rinfo()->pc(); |
| 2843 | } |
| 2844 | it.next(); |
| 2845 | } |
| 2846 | |
| 2847 | DCHECK(code->instruction_start() <= prev_pc && |
| 2848 | prev_pc <= code->instruction_end()); |
| 2849 | delta += static_cast<int>(code->instruction_end() - prev_pc); |
| 2850 | EnterComment(isolate, "NoComment", delta); |
| 2851 | } |
| 2852 | } |
| 2853 | } |
| 2854 | |
| 2855 | |
| 2856 | void PagedSpace::ReportStatistics() { |
| 2857 | int pct = static_cast<int>(Available() * 100 / Capacity()); |
| 2858 | PrintF(" capacity: %" V8_PTR_PREFIX |
| 2859 | "d" |
| 2860 | ", waste: %" V8_PTR_PREFIX |
| 2861 | "d" |
| 2862 | ", available: %" V8_PTR_PREFIX "d, %%%d\n", |
| 2863 | Capacity(), Waste(), Available(), pct); |
| 2864 | |
| 2865 | if (heap()->mark_compact_collector()->sweeping_in_progress()) { |
| 2866 | heap()->mark_compact_collector()->EnsureSweepingCompleted(); |
| 2867 | } |
| 2868 | ClearHistograms(heap()->isolate()); |
| 2869 | HeapObjectIterator obj_it(this); |
| 2870 | for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) |
| 2871 | CollectHistogramInfo(obj); |
| 2872 | ReportHistogram(heap()->isolate(), true); |
| 2873 | } |
| 2874 | #endif |
| 2875 | |
| 2876 | |
| 2877 | // ----------------------------------------------------------------------------- |
| 2878 | // MapSpace implementation |
| 2879 | // TODO(mvstanton): this is weird...the compiler can't make a vtable unless |
| 2880 | // there is at least one non-inlined virtual function. I would prefer to hide |
| 2881 | // the VerifyObject definition behind VERIFY_HEAP. |
| 2882 | |
| 2883 | void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); } |
| 2884 | |
| 2885 | |
| 2886 | // ----------------------------------------------------------------------------- |
| 2887 | // CellSpace and PropertyCellSpace implementation |
| 2888 | // TODO(mvstanton): this is weird...the compiler can't make a vtable unless |
| 2889 | // there is at least one non-inlined virtual function. I would prefer to hide |
| 2890 | // the VerifyObject definition behind VERIFY_HEAP. |
| 2891 | |
| 2892 | void CellSpace::VerifyObject(HeapObject* object) { CHECK(object->IsCell()); } |
| 2893 | |
| 2894 | |
| 2895 | void PropertyCellSpace::VerifyObject(HeapObject* object) { |
| 2896 | CHECK(object->IsPropertyCell()); |
| 2897 | } |
| 2898 | |
| 2899 | |
| 2900 | // ----------------------------------------------------------------------------- |
| 2901 | // LargeObjectIterator |
| 2902 | |
| 2903 | LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) { |
| 2904 | current_ = space->first_page_; |
| 2905 | size_func_ = NULL; |
| 2906 | } |
| 2907 | |
| 2908 | |
| 2909 | LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space, |
| 2910 | HeapObjectCallback size_func) { |
| 2911 | current_ = space->first_page_; |
| 2912 | size_func_ = size_func; |
| 2913 | } |
| 2914 | |
| 2915 | |
| 2916 | HeapObject* LargeObjectIterator::Next() { |
| 2917 | if (current_ == NULL) return NULL; |
| 2918 | |
| 2919 | HeapObject* object = current_->GetObject(); |
| 2920 | current_ = current_->next_page(); |
| 2921 | return object; |
| 2922 | } |
| 2923 | |
| 2924 | |
| 2925 | // ----------------------------------------------------------------------------- |
| 2926 | // LargeObjectSpace |
| 2927 | static bool ComparePointers(void* key1, void* key2) { return key1 == key2; } |
| 2928 | |
| 2929 | |
| 2930 | LargeObjectSpace::LargeObjectSpace(Heap* heap, intptr_t max_capacity, |
| 2931 | AllocationSpace id) |
| 2932 | : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis |
| 2933 | max_capacity_(max_capacity), |
| 2934 | first_page_(NULL), |
| 2935 | size_(0), |
| 2936 | page_count_(0), |
| 2937 | objects_size_(0), |
| 2938 | chunk_map_(ComparePointers, 1024) {} |
| 2939 | |
| 2940 | |
| 2941 | bool LargeObjectSpace::SetUp() { |
| 2942 | first_page_ = NULL; |
| 2943 | size_ = 0; |
| 2944 | maximum_committed_ = 0; |
| 2945 | page_count_ = 0; |
| 2946 | objects_size_ = 0; |
| 2947 | chunk_map_.Clear(); |
| 2948 | return true; |
| 2949 | } |
| 2950 | |
| 2951 | |
| 2952 | void LargeObjectSpace::TearDown() { |
| 2953 | while (first_page_ != NULL) { |
| 2954 | LargePage* page = first_page_; |
| 2955 | first_page_ = first_page_->next_page(); |
| 2956 | LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address())); |
| 2957 | |
| 2958 | ObjectSpace space = static_cast<ObjectSpace>(1 << identity()); |
| 2959 | heap()->isolate()->memory_allocator()->PerformAllocationCallback( |
| 2960 | space, kAllocationActionFree, page->size()); |
| 2961 | heap()->isolate()->memory_allocator()->Free(page); |
| 2962 | } |
| 2963 | SetUp(); |
| 2964 | } |
| 2965 | |
| 2966 | |
| 2967 | AllocationResult LargeObjectSpace::AllocateRaw(int object_size, |
| 2968 | Executability executable) { |
| 2969 | // Check if we want to force a GC before growing the old space further. |
| 2970 | // If so, fail the allocation. |
| 2971 | if (!heap()->always_allocate() && |
| 2972 | heap()->OldGenerationAllocationLimitReached()) { |
| 2973 | return AllocationResult::Retry(identity()); |
| 2974 | } |
| 2975 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 2976 | if (!CanAllocateSize(object_size)) return AllocationResult::Retry(identity()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2977 | |
| 2978 | LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage( |
| 2979 | object_size, this, executable); |
| 2980 | if (page == NULL) return AllocationResult::Retry(identity()); |
| 2981 | DCHECK(page->area_size() >= object_size); |
| 2982 | |
| 2983 | size_ += static_cast<int>(page->size()); |
| 2984 | objects_size_ += object_size; |
| 2985 | page_count_++; |
| 2986 | page->set_next_page(first_page_); |
| 2987 | first_page_ = page; |
| 2988 | |
| 2989 | if (size_ > maximum_committed_) { |
| 2990 | maximum_committed_ = size_; |
| 2991 | } |
| 2992 | |
| 2993 | // Register all MemoryChunk::kAlignment-aligned chunks covered by |
| 2994 | // this large page in the chunk map. |
| 2995 | uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment; |
| 2996 | uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment; |
| 2997 | for (uintptr_t key = base; key <= limit; key++) { |
| 2998 | HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key), |
| 2999 | static_cast<uint32_t>(key), true); |
| 3000 | DCHECK(entry != NULL); |
| 3001 | entry->value = page; |
| 3002 | } |
| 3003 | |
| 3004 | HeapObject* object = page->GetObject(); |
| 3005 | |
| 3006 | MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size); |
| 3007 | |
| 3008 | if (Heap::ShouldZapGarbage()) { |
| 3009 | // Make the object consistent so the heap can be verified in OldSpaceStep. |
| 3010 | // We only need to do this in debug builds or if verify_heap is on. |
| 3011 | reinterpret_cast<Object**>(object->address())[0] = |
| 3012 | heap()->fixed_array_map(); |
| 3013 | reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0); |
| 3014 | } |
| 3015 | |
| 3016 | heap()->incremental_marking()->OldSpaceStep(object_size); |
| 3017 | return object; |
| 3018 | } |
| 3019 | |
| 3020 | |
| 3021 | size_t LargeObjectSpace::CommittedPhysicalMemory() { |
| 3022 | if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory(); |
| 3023 | size_t size = 0; |
| 3024 | LargePage* current = first_page_; |
| 3025 | while (current != NULL) { |
| 3026 | size += current->CommittedPhysicalMemory(); |
| 3027 | current = current->next_page(); |
| 3028 | } |
| 3029 | return size; |
| 3030 | } |
| 3031 | |
| 3032 | |
| 3033 | // GC support |
| 3034 | Object* LargeObjectSpace::FindObject(Address a) { |
| 3035 | LargePage* page = FindPage(a); |
| 3036 | if (page != NULL) { |
| 3037 | return page->GetObject(); |
| 3038 | } |
| 3039 | return Smi::FromInt(0); // Signaling not found. |
| 3040 | } |
| 3041 | |
| 3042 | |
| 3043 | LargePage* LargeObjectSpace::FindPage(Address a) { |
| 3044 | uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment; |
| 3045 | HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key), |
| 3046 | static_cast<uint32_t>(key), false); |
| 3047 | if (e != NULL) { |
| 3048 | DCHECK(e->value != NULL); |
| 3049 | LargePage* page = reinterpret_cast<LargePage*>(e->value); |
| 3050 | DCHECK(page->is_valid()); |
| 3051 | if (page->Contains(a)) { |
| 3052 | return page; |
| 3053 | } |
| 3054 | } |
| 3055 | return NULL; |
| 3056 | } |
| 3057 | |
| 3058 | |
| 3059 | void LargeObjectSpace::FreeUnmarkedObjects() { |
| 3060 | LargePage* previous = NULL; |
| 3061 | LargePage* current = first_page_; |
| 3062 | while (current != NULL) { |
| 3063 | HeapObject* object = current->GetObject(); |
| 3064 | // Can this large page contain pointers to non-trivial objects. No other |
| 3065 | // pointer object is this big. |
| 3066 | bool is_pointer_object = object->IsFixedArray(); |
| 3067 | MarkBit mark_bit = Marking::MarkBitFrom(object); |
| 3068 | if (mark_bit.Get()) { |
| 3069 | mark_bit.Clear(); |
| 3070 | Page::FromAddress(object->address())->ResetProgressBar(); |
| 3071 | Page::FromAddress(object->address())->ResetLiveBytes(); |
| 3072 | previous = current; |
| 3073 | current = current->next_page(); |
| 3074 | } else { |
| 3075 | LargePage* page = current; |
| 3076 | // Cut the chunk out from the chunk list. |
| 3077 | current = current->next_page(); |
| 3078 | if (previous == NULL) { |
| 3079 | first_page_ = current; |
| 3080 | } else { |
| 3081 | previous->set_next_page(current); |
| 3082 | } |
| 3083 | |
| 3084 | // Free the chunk. |
| 3085 | heap()->mark_compact_collector()->ReportDeleteIfNeeded(object, |
| 3086 | heap()->isolate()); |
| 3087 | size_ -= static_cast<int>(page->size()); |
| 3088 | objects_size_ -= object->Size(); |
| 3089 | page_count_--; |
| 3090 | |
| 3091 | // Remove entries belonging to this page. |
| 3092 | // Use variable alignment to help pass length check (<= 80 characters) |
| 3093 | // of single line in tools/presubmit.py. |
| 3094 | const intptr_t alignment = MemoryChunk::kAlignment; |
| 3095 | uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment; |
| 3096 | uintptr_t limit = base + (page->size() - 1) / alignment; |
| 3097 | for (uintptr_t key = base; key <= limit; key++) { |
| 3098 | chunk_map_.Remove(reinterpret_cast<void*>(key), |
| 3099 | static_cast<uint32_t>(key)); |
| 3100 | } |
| 3101 | |
| 3102 | if (is_pointer_object) { |
| 3103 | heap()->QueueMemoryChunkForFree(page); |
| 3104 | } else { |
| 3105 | heap()->isolate()->memory_allocator()->Free(page); |
| 3106 | } |
| 3107 | } |
| 3108 | } |
| 3109 | heap()->FreeQueuedChunks(); |
| 3110 | } |
| 3111 | |
| 3112 | |
| 3113 | bool LargeObjectSpace::Contains(HeapObject* object) { |
| 3114 | Address address = object->address(); |
| 3115 | MemoryChunk* chunk = MemoryChunk::FromAddress(address); |
| 3116 | |
| 3117 | bool owned = (chunk->owner() == this); |
| 3118 | |
| 3119 | SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject()); |
| 3120 | |
| 3121 | return owned; |
| 3122 | } |
| 3123 | |
| 3124 | |
| 3125 | #ifdef VERIFY_HEAP |
| 3126 | // We do not assume that the large object iterator works, because it depends |
| 3127 | // on the invariants we are checking during verification. |
| 3128 | void LargeObjectSpace::Verify() { |
| 3129 | for (LargePage* chunk = first_page_; chunk != NULL; |
| 3130 | chunk = chunk->next_page()) { |
| 3131 | // Each chunk contains an object that starts at the large object page's |
| 3132 | // object area start. |
| 3133 | HeapObject* object = chunk->GetObject(); |
| 3134 | Page* page = Page::FromAddress(object->address()); |
| 3135 | CHECK(object->address() == page->area_start()); |
| 3136 | |
| 3137 | // The first word should be a map, and we expect all map pointers to be |
| 3138 | // in map space. |
| 3139 | Map* map = object->map(); |
| 3140 | CHECK(map->IsMap()); |
| 3141 | CHECK(heap()->map_space()->Contains(map)); |
| 3142 | |
| 3143 | // We have only code, sequential strings, external strings |
| 3144 | // (sequential strings that have been morphed into external |
| 3145 | // strings), fixed arrays, byte arrays, and constant pool arrays in the |
| 3146 | // large object space. |
| 3147 | CHECK(object->IsCode() || object->IsSeqString() || |
| 3148 | object->IsExternalString() || object->IsFixedArray() || |
| 3149 | object->IsFixedDoubleArray() || object->IsByteArray() || |
| 3150 | object->IsConstantPoolArray()); |
| 3151 | |
| 3152 | // The object itself should look OK. |
| 3153 | object->ObjectVerify(); |
| 3154 | |
| 3155 | // Byte arrays and strings don't have interior pointers. |
| 3156 | if (object->IsCode()) { |
| 3157 | VerifyPointersVisitor code_visitor; |
| 3158 | object->IterateBody(map->instance_type(), object->Size(), &code_visitor); |
| 3159 | } else if (object->IsFixedArray()) { |
| 3160 | FixedArray* array = FixedArray::cast(object); |
| 3161 | for (int j = 0; j < array->length(); j++) { |
| 3162 | Object* element = array->get(j); |
| 3163 | if (element->IsHeapObject()) { |
| 3164 | HeapObject* element_object = HeapObject::cast(element); |
| 3165 | CHECK(heap()->Contains(element_object)); |
| 3166 | CHECK(element_object->map()->IsMap()); |
| 3167 | } |
| 3168 | } |
| 3169 | } |
| 3170 | } |
| 3171 | } |
| 3172 | #endif |
| 3173 | |
| 3174 | |
| 3175 | #ifdef DEBUG |
| 3176 | void LargeObjectSpace::Print() { |
| 3177 | OFStream os(stdout); |
| 3178 | LargeObjectIterator it(this); |
| 3179 | for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 3180 | obj->Print(os); |
| 3181 | } |
| 3182 | } |
| 3183 | |
| 3184 | |
| 3185 | void LargeObjectSpace::ReportStatistics() { |
| 3186 | PrintF(" size: %" V8_PTR_PREFIX "d\n", size_); |
| 3187 | int num_objects = 0; |
| 3188 | ClearHistograms(heap()->isolate()); |
| 3189 | LargeObjectIterator it(this); |
| 3190 | for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 3191 | num_objects++; |
| 3192 | CollectHistogramInfo(obj); |
| 3193 | } |
| 3194 | |
| 3195 | PrintF( |
| 3196 | " number of objects %d, " |
| 3197 | "size of objects %" V8_PTR_PREFIX "d\n", |
| 3198 | num_objects, objects_size_); |
| 3199 | if (num_objects > 0) ReportHistogram(heap()->isolate(), false); |
| 3200 | } |
| 3201 | |
| 3202 | |
| 3203 | void LargeObjectSpace::CollectCodeStatistics() { |
| 3204 | Isolate* isolate = heap()->isolate(); |
| 3205 | LargeObjectIterator obj_it(this); |
| 3206 | for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { |
| 3207 | if (obj->IsCode()) { |
| 3208 | Code* code = Code::cast(obj); |
| 3209 | isolate->code_kind_statistics()[code->kind()] += code->Size(); |
| 3210 | } |
| 3211 | } |
| 3212 | } |
| 3213 | |
| 3214 | |
| 3215 | void Page::Print() { |
| 3216 | // Make a best-effort to print the objects in the page. |
| 3217 | PrintF("Page@%p in %s\n", this->address(), |
| 3218 | AllocationSpaceName(this->owner()->identity())); |
| 3219 | printf(" --------------------------------------\n"); |
| 3220 | HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction()); |
| 3221 | unsigned mark_size = 0; |
| 3222 | for (HeapObject* object = objects.Next(); object != NULL; |
| 3223 | object = objects.Next()) { |
| 3224 | bool is_marked = Marking::MarkBitFrom(object).Get(); |
| 3225 | PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little. |
| 3226 | if (is_marked) { |
| 3227 | mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object); |
| 3228 | } |
| 3229 | object->ShortPrint(); |
| 3230 | PrintF("\n"); |
| 3231 | } |
| 3232 | printf(" --------------------------------------\n"); |
| 3233 | printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 3234 | } |
| 3235 | |
| 3236 | #endif // DEBUG |
| 3237 | } |
| 3238 | } // namespace v8::internal |