Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1 | // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 5 | #include "src/heap/spaces.h" |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 6 | |
| 7 | #include "src/base/bits.h" |
| 8 | #include "src/base/platform/platform.h" |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 9 | #include "src/full-codegen/full-codegen.h" |
| 10 | #include "src/heap/slots-buffer.h" |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 11 | #include "src/macro-assembler.h" |
| 12 | #include "src/msan.h" |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 13 | #include "src/snapshot/snapshot.h" |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 14 | |
| 15 | namespace v8 { |
| 16 | namespace internal { |
| 17 | |
| 18 | |
| 19 | // ---------------------------------------------------------------------------- |
| 20 | // HeapObjectIterator |
| 21 | |
| 22 | HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { |
| 23 | // You can't actually iterate over the anchor page. It is not a real page, |
| 24 | // just an anchor for the double linked page list. Initialize as if we have |
| 25 | // reached the end of the anchor page, then the first iteration will move on |
| 26 | // to the first page. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 27 | Initialize(space, NULL, NULL, kAllPagesInSpace); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 28 | } |
| 29 | |
| 30 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 31 | HeapObjectIterator::HeapObjectIterator(Page* page) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 32 | Space* owner = page->owner(); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 33 | DCHECK(owner == page->heap()->old_space() || |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 34 | owner == page->heap()->map_space() || |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 35 | owner == page->heap()->code_space()); |
| 36 | Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(), |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 37 | page->area_end(), kOnePageOnly); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 38 | DCHECK(page->WasSwept() || page->SweepingCompleted()); |
| 39 | } |
| 40 | |
| 41 | |
| 42 | void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end, |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 43 | HeapObjectIterator::PageMode mode) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 44 | space_ = space; |
| 45 | cur_addr_ = cur; |
| 46 | cur_end_ = end; |
| 47 | page_mode_ = mode; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 48 | } |
| 49 | |
| 50 | |
| 51 | // We have hit the end of the page and should advance to the next block of |
| 52 | // objects. This happens at the end of the page. |
| 53 | bool HeapObjectIterator::AdvanceToNextPage() { |
| 54 | DCHECK(cur_addr_ == cur_end_); |
| 55 | if (page_mode_ == kOnePageOnly) return false; |
| 56 | Page* cur_page; |
| 57 | if (cur_addr_ == NULL) { |
| 58 | cur_page = space_->anchor(); |
| 59 | } else { |
| 60 | cur_page = Page::FromAddress(cur_addr_ - 1); |
| 61 | DCHECK(cur_addr_ == cur_page->area_end()); |
| 62 | } |
| 63 | cur_page = cur_page->next_page(); |
| 64 | if (cur_page == space_->anchor()) return false; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 65 | cur_page->heap()->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted( |
| 66 | cur_page); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 67 | cur_addr_ = cur_page->area_start(); |
| 68 | cur_end_ = cur_page->area_end(); |
| 69 | DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted()); |
| 70 | return true; |
| 71 | } |
| 72 | |
| 73 | |
| 74 | // ----------------------------------------------------------------------------- |
| 75 | // CodeRange |
| 76 | |
| 77 | |
| 78 | CodeRange::CodeRange(Isolate* isolate) |
| 79 | : isolate_(isolate), |
| 80 | code_range_(NULL), |
| 81 | free_list_(0), |
| 82 | allocation_list_(0), |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 83 | current_allocation_block_index_(0) {} |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 84 | |
| 85 | |
| 86 | bool CodeRange::SetUp(size_t requested) { |
| 87 | DCHECK(code_range_ == NULL); |
| 88 | |
| 89 | if (requested == 0) { |
| 90 | // When a target requires the code range feature, we put all code objects |
| 91 | // in a kMaximalCodeRangeSize range of virtual address space, so that |
| 92 | // they can call each other with near calls. |
| 93 | if (kRequiresCodeRange) { |
| 94 | requested = kMaximalCodeRangeSize; |
| 95 | } else { |
| 96 | return true; |
| 97 | } |
| 98 | } |
| 99 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 100 | if (requested <= kMinimumCodeRangeSize) { |
| 101 | requested = kMinimumCodeRangeSize; |
| 102 | } |
| 103 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 104 | DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 105 | #ifdef V8_TARGET_ARCH_MIPS64 |
| 106 | // To use pseudo-relative jumps such as j/jal instructions which have 28-bit |
| 107 | // encoded immediate, the addresses have to be in range of 256Mb aligned |
| 108 | // region. |
| 109 | code_range_ = new base::VirtualMemory(requested, kMaximalCodeRangeSize); |
| 110 | #else |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 111 | code_range_ = new base::VirtualMemory(requested); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 112 | #endif |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 113 | CHECK(code_range_ != NULL); |
| 114 | if (!code_range_->IsReserved()) { |
| 115 | delete code_range_; |
| 116 | code_range_ = NULL; |
| 117 | return false; |
| 118 | } |
| 119 | |
| 120 | // We are sure that we have mapped a block of requested addresses. |
| 121 | DCHECK(code_range_->size() == requested); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 122 | Address base = reinterpret_cast<Address>(code_range_->address()); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 123 | |
| 124 | // On some platforms, specifically Win64, we need to reserve some pages at |
| 125 | // the beginning of an executable space. |
| 126 | if (kReservedCodeRangePages) { |
| 127 | if (!code_range_->Commit( |
| 128 | base, kReservedCodeRangePages * base::OS::CommitPageSize(), true)) { |
| 129 | delete code_range_; |
| 130 | code_range_ = NULL; |
| 131 | return false; |
| 132 | } |
| 133 | base += kReservedCodeRangePages * base::OS::CommitPageSize(); |
| 134 | } |
| 135 | Address aligned_base = RoundUp(base, MemoryChunk::kAlignment); |
| 136 | size_t size = code_range_->size() - (aligned_base - base) - |
| 137 | kReservedCodeRangePages * base::OS::CommitPageSize(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 138 | allocation_list_.Add(FreeBlock(aligned_base, size)); |
| 139 | current_allocation_block_index_ = 0; |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 140 | |
| 141 | LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 142 | return true; |
| 143 | } |
| 144 | |
| 145 | |
| 146 | int CodeRange::CompareFreeBlockAddress(const FreeBlock* left, |
| 147 | const FreeBlock* right) { |
| 148 | // The entire point of CodeRange is that the difference between two |
| 149 | // addresses in the range can be represented as a signed 32-bit int, |
| 150 | // so the cast is semantically correct. |
| 151 | return static_cast<int>(left->start - right->start); |
| 152 | } |
| 153 | |
| 154 | |
| 155 | bool CodeRange::GetNextAllocationBlock(size_t requested) { |
| 156 | for (current_allocation_block_index_++; |
| 157 | current_allocation_block_index_ < allocation_list_.length(); |
| 158 | current_allocation_block_index_++) { |
| 159 | if (requested <= allocation_list_[current_allocation_block_index_].size) { |
| 160 | return true; // Found a large enough allocation block. |
| 161 | } |
| 162 | } |
| 163 | |
| 164 | // Sort and merge the free blocks on the free list and the allocation list. |
| 165 | free_list_.AddAll(allocation_list_); |
| 166 | allocation_list_.Clear(); |
| 167 | free_list_.Sort(&CompareFreeBlockAddress); |
| 168 | for (int i = 0; i < free_list_.length();) { |
| 169 | FreeBlock merged = free_list_[i]; |
| 170 | i++; |
| 171 | // Add adjacent free blocks to the current merged block. |
| 172 | while (i < free_list_.length() && |
| 173 | free_list_[i].start == merged.start + merged.size) { |
| 174 | merged.size += free_list_[i].size; |
| 175 | i++; |
| 176 | } |
| 177 | if (merged.size > 0) { |
| 178 | allocation_list_.Add(merged); |
| 179 | } |
| 180 | } |
| 181 | free_list_.Clear(); |
| 182 | |
| 183 | for (current_allocation_block_index_ = 0; |
| 184 | current_allocation_block_index_ < allocation_list_.length(); |
| 185 | current_allocation_block_index_++) { |
| 186 | if (requested <= allocation_list_[current_allocation_block_index_].size) { |
| 187 | return true; // Found a large enough allocation block. |
| 188 | } |
| 189 | } |
| 190 | current_allocation_block_index_ = 0; |
| 191 | // Code range is full or too fragmented. |
| 192 | return false; |
| 193 | } |
| 194 | |
| 195 | |
| 196 | Address CodeRange::AllocateRawMemory(const size_t requested_size, |
| 197 | const size_t commit_size, |
| 198 | size_t* allocated) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 199 | // request_size includes guards while committed_size does not. Make sure |
| 200 | // callers know about the invariant. |
| 201 | CHECK_LE(commit_size, |
| 202 | requested_size - 2 * MemoryAllocator::CodePageGuardSize()); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 203 | FreeBlock current; |
| 204 | if (!ReserveBlock(requested_size, ¤t)) { |
| 205 | *allocated = 0; |
| 206 | return NULL; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 207 | } |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 208 | *allocated = current.size; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 209 | DCHECK(*allocated <= current.size); |
| 210 | DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment)); |
| 211 | if (!isolate_->memory_allocator()->CommitExecutableMemory( |
| 212 | code_range_, current.start, commit_size, *allocated)) { |
| 213 | *allocated = 0; |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 214 | ReleaseBlock(¤t); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 215 | return NULL; |
| 216 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 217 | return current.start; |
| 218 | } |
| 219 | |
| 220 | |
| 221 | bool CodeRange::CommitRawMemory(Address start, size_t length) { |
| 222 | return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE); |
| 223 | } |
| 224 | |
| 225 | |
| 226 | bool CodeRange::UncommitRawMemory(Address start, size_t length) { |
| 227 | return code_range_->Uncommit(start, length); |
| 228 | } |
| 229 | |
| 230 | |
| 231 | void CodeRange::FreeRawMemory(Address address, size_t length) { |
| 232 | DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment)); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 233 | base::LockGuard<base::Mutex> guard(&code_range_mutex_); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 234 | free_list_.Add(FreeBlock(address, length)); |
| 235 | code_range_->Uncommit(address, length); |
| 236 | } |
| 237 | |
| 238 | |
| 239 | void CodeRange::TearDown() { |
| 240 | delete code_range_; // Frees all memory in the virtual memory range. |
| 241 | code_range_ = NULL; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 242 | base::LockGuard<base::Mutex> guard(&code_range_mutex_); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 243 | free_list_.Free(); |
| 244 | allocation_list_.Free(); |
| 245 | } |
| 246 | |
| 247 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 248 | bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 249 | base::LockGuard<base::Mutex> guard(&code_range_mutex_); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 250 | DCHECK(allocation_list_.length() == 0 || |
| 251 | current_allocation_block_index_ < allocation_list_.length()); |
| 252 | if (allocation_list_.length() == 0 || |
| 253 | requested_size > allocation_list_[current_allocation_block_index_].size) { |
| 254 | // Find an allocation block large enough. |
| 255 | if (!GetNextAllocationBlock(requested_size)) return false; |
| 256 | } |
| 257 | // Commit the requested memory at the start of the current allocation block. |
| 258 | size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment); |
| 259 | *block = allocation_list_[current_allocation_block_index_]; |
| 260 | // Don't leave a small free block, useless for a large object or chunk. |
| 261 | if (aligned_requested < (block->size - Page::kPageSize)) { |
| 262 | block->size = aligned_requested; |
| 263 | } |
| 264 | DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment)); |
| 265 | allocation_list_[current_allocation_block_index_].start += block->size; |
| 266 | allocation_list_[current_allocation_block_index_].size -= block->size; |
| 267 | return true; |
| 268 | } |
| 269 | |
| 270 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 271 | void CodeRange::ReleaseBlock(const FreeBlock* block) { |
| 272 | base::LockGuard<base::Mutex> guard(&code_range_mutex_); |
| 273 | free_list_.Add(*block); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 274 | } |
| 275 | |
| 276 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 277 | // ----------------------------------------------------------------------------- |
| 278 | // MemoryAllocator |
| 279 | // |
| 280 | |
| 281 | MemoryAllocator::MemoryAllocator(Isolate* isolate) |
| 282 | : isolate_(isolate), |
| 283 | capacity_(0), |
| 284 | capacity_executable_(0), |
| 285 | size_(0), |
| 286 | size_executable_(0), |
| 287 | lowest_ever_allocated_(reinterpret_cast<void*>(-1)), |
| 288 | highest_ever_allocated_(reinterpret_cast<void*>(0)) {} |
| 289 | |
| 290 | |
| 291 | bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { |
| 292 | capacity_ = RoundUp(capacity, Page::kPageSize); |
| 293 | capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); |
| 294 | DCHECK_GE(capacity_, capacity_executable_); |
| 295 | |
| 296 | size_ = 0; |
| 297 | size_executable_ = 0; |
| 298 | |
| 299 | return true; |
| 300 | } |
| 301 | |
| 302 | |
| 303 | void MemoryAllocator::TearDown() { |
| 304 | // Check that spaces were torn down before MemoryAllocator. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 305 | DCHECK(size_.Value() == 0); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 306 | // TODO(gc) this will be true again when we fix FreeMemory. |
| 307 | // DCHECK(size_executable_ == 0); |
| 308 | capacity_ = 0; |
| 309 | capacity_executable_ = 0; |
| 310 | } |
| 311 | |
| 312 | |
| 313 | bool MemoryAllocator::CommitMemory(Address base, size_t size, |
| 314 | Executability executable) { |
| 315 | if (!base::VirtualMemory::CommitRegion(base, size, |
| 316 | executable == EXECUTABLE)) { |
| 317 | return false; |
| 318 | } |
| 319 | UpdateAllocatedSpaceLimits(base, base + size); |
| 320 | return true; |
| 321 | } |
| 322 | |
| 323 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 324 | void MemoryAllocator::FreeNewSpaceMemory(Address addr, |
| 325 | base::VirtualMemory* reservation, |
| 326 | Executability executable) { |
| 327 | LOG(isolate_, DeleteEvent("NewSpace", addr)); |
| 328 | |
| 329 | DCHECK(reservation->IsReserved()); |
| 330 | const intptr_t size = static_cast<intptr_t>(reservation->size()); |
| 331 | DCHECK(size_.Value() >= size); |
| 332 | size_.Increment(-size); |
| 333 | isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); |
| 334 | FreeMemory(reservation, NOT_EXECUTABLE); |
| 335 | } |
| 336 | |
| 337 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 338 | void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation, |
| 339 | Executability executable) { |
| 340 | // TODO(gc) make code_range part of memory allocator? |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 341 | // Code which is part of the code-range does not have its own VirtualMemory. |
| 342 | DCHECK(isolate_->code_range() == NULL || |
| 343 | !isolate_->code_range()->contains( |
| 344 | static_cast<Address>(reservation->address()))); |
| 345 | DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL || |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 346 | !isolate_->code_range()->valid() || |
| 347 | reservation->size() <= Page::kPageSize); |
| 348 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 349 | reservation->Release(); |
| 350 | } |
| 351 | |
| 352 | |
| 353 | void MemoryAllocator::FreeMemory(Address base, size_t size, |
| 354 | Executability executable) { |
| 355 | // TODO(gc) make code_range part of memory allocator? |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 356 | if (isolate_->code_range() != NULL && |
| 357 | isolate_->code_range()->contains(static_cast<Address>(base))) { |
| 358 | DCHECK(executable == EXECUTABLE); |
| 359 | isolate_->code_range()->FreeRawMemory(base, size); |
| 360 | } else { |
| 361 | DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL || |
| 362 | !isolate_->code_range()->valid()); |
| 363 | bool result = base::VirtualMemory::ReleaseRegion(base, size); |
| 364 | USE(result); |
| 365 | DCHECK(result); |
| 366 | } |
| 367 | } |
| 368 | |
| 369 | |
| 370 | Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment, |
| 371 | base::VirtualMemory* controller) { |
| 372 | base::VirtualMemory reservation(size, alignment); |
| 373 | |
| 374 | if (!reservation.IsReserved()) return NULL; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 375 | size_.Increment(static_cast<intptr_t>(reservation.size())); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 376 | Address base = |
| 377 | RoundUp(static_cast<Address>(reservation.address()), alignment); |
| 378 | controller->TakeControl(&reservation); |
| 379 | return base; |
| 380 | } |
| 381 | |
| 382 | |
| 383 | Address MemoryAllocator::AllocateAlignedMemory( |
| 384 | size_t reserve_size, size_t commit_size, size_t alignment, |
| 385 | Executability executable, base::VirtualMemory* controller) { |
| 386 | DCHECK(commit_size <= reserve_size); |
| 387 | base::VirtualMemory reservation; |
| 388 | Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation); |
| 389 | if (base == NULL) return NULL; |
| 390 | |
| 391 | if (executable == EXECUTABLE) { |
| 392 | if (!CommitExecutableMemory(&reservation, base, commit_size, |
| 393 | reserve_size)) { |
| 394 | base = NULL; |
| 395 | } |
| 396 | } else { |
| 397 | if (reservation.Commit(base, commit_size, false)) { |
| 398 | UpdateAllocatedSpaceLimits(base, base + commit_size); |
| 399 | } else { |
| 400 | base = NULL; |
| 401 | } |
| 402 | } |
| 403 | |
| 404 | if (base == NULL) { |
| 405 | // Failed to commit the body. Release the mapping and any partially |
| 406 | // commited regions inside it. |
| 407 | reservation.Release(); |
| 408 | return NULL; |
| 409 | } |
| 410 | |
| 411 | controller->TakeControl(&reservation); |
| 412 | return base; |
| 413 | } |
| 414 | |
| 415 | |
| 416 | void Page::InitializeAsAnchor(PagedSpace* owner) { |
| 417 | set_owner(owner); |
| 418 | set_prev_page(this); |
| 419 | set_next_page(this); |
| 420 | } |
| 421 | |
| 422 | |
| 423 | NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start, |
| 424 | SemiSpace* semi_space) { |
| 425 | Address area_start = start + NewSpacePage::kObjectStartOffset; |
| 426 | Address area_end = start + Page::kPageSize; |
| 427 | |
| 428 | MemoryChunk* chunk = |
| 429 | MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start, |
| 430 | area_end, NOT_EXECUTABLE, semi_space); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 431 | chunk->initialize_scan_on_scavenge(true); |
| 432 | bool in_to_space = (semi_space->id() != kFromSpace); |
| 433 | chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE |
| 434 | : MemoryChunk::IN_FROM_SPACE); |
| 435 | DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE |
| 436 | : MemoryChunk::IN_TO_SPACE)); |
| 437 | NewSpacePage* page = static_cast<NewSpacePage*>(chunk); |
| 438 | heap->incremental_marking()->SetNewSpacePageFlags(page); |
| 439 | return page; |
| 440 | } |
| 441 | |
| 442 | |
| 443 | void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) { |
| 444 | set_owner(semi_space); |
| 445 | set_next_chunk(this); |
| 446 | set_prev_chunk(this); |
| 447 | // Flags marks this invalid page as not being in new-space. |
| 448 | // All real new-space pages will be in new-space. |
| 449 | SetFlags(0, ~0); |
| 450 | } |
| 451 | |
| 452 | |
| 453 | MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, |
| 454 | Address area_start, Address area_end, |
| 455 | Executability executable, Space* owner) { |
| 456 | MemoryChunk* chunk = FromAddress(base); |
| 457 | |
| 458 | DCHECK(base == chunk->address()); |
| 459 | |
| 460 | chunk->heap_ = heap; |
| 461 | chunk->size_ = size; |
| 462 | chunk->area_start_ = area_start; |
| 463 | chunk->area_end_ = area_end; |
| 464 | chunk->flags_ = 0; |
| 465 | chunk->set_owner(owner); |
| 466 | chunk->InitializeReservedMemory(); |
| 467 | chunk->slots_buffer_ = NULL; |
| 468 | chunk->skip_list_ = NULL; |
| 469 | chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; |
| 470 | chunk->progress_bar_ = 0; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 471 | chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base)); |
| 472 | chunk->parallel_sweeping_state().SetValue(kSweepingDone); |
| 473 | chunk->parallel_compaction_state().SetValue(kCompactingDone); |
| 474 | chunk->mutex_ = NULL; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 475 | chunk->available_in_small_free_list_ = 0; |
| 476 | chunk->available_in_medium_free_list_ = 0; |
| 477 | chunk->available_in_large_free_list_ = 0; |
| 478 | chunk->available_in_huge_free_list_ = 0; |
| 479 | chunk->non_available_small_blocks_ = 0; |
| 480 | chunk->ResetLiveBytes(); |
| 481 | Bitmap::Clear(chunk); |
| 482 | chunk->initialize_scan_on_scavenge(false); |
| 483 | chunk->SetFlag(WAS_SWEPT); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 484 | chunk->set_next_chunk(nullptr); |
| 485 | chunk->set_prev_chunk(nullptr); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 486 | |
| 487 | DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); |
| 488 | DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); |
| 489 | |
| 490 | if (executable == EXECUTABLE) { |
| 491 | chunk->SetFlag(IS_EXECUTABLE); |
| 492 | } |
| 493 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 494 | return chunk; |
| 495 | } |
| 496 | |
| 497 | |
| 498 | // Commit MemoryChunk area to the requested size. |
| 499 | bool MemoryChunk::CommitArea(size_t requested) { |
| 500 | size_t guard_size = |
| 501 | IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0; |
| 502 | size_t header_size = area_start() - address() - guard_size; |
| 503 | size_t commit_size = |
| 504 | RoundUp(header_size + requested, base::OS::CommitPageSize()); |
| 505 | size_t committed_size = RoundUp(header_size + (area_end() - area_start()), |
| 506 | base::OS::CommitPageSize()); |
| 507 | |
| 508 | if (commit_size > committed_size) { |
| 509 | // Commit size should be less or equal than the reserved size. |
| 510 | DCHECK(commit_size <= size() - 2 * guard_size); |
| 511 | // Append the committed area. |
| 512 | Address start = address() + committed_size + guard_size; |
| 513 | size_t length = commit_size - committed_size; |
| 514 | if (reservation_.IsReserved()) { |
| 515 | Executability executable = |
| 516 | IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; |
| 517 | if (!heap()->isolate()->memory_allocator()->CommitMemory(start, length, |
| 518 | executable)) { |
| 519 | return false; |
| 520 | } |
| 521 | } else { |
| 522 | CodeRange* code_range = heap_->isolate()->code_range(); |
| 523 | DCHECK(code_range != NULL && code_range->valid() && |
| 524 | IsFlagSet(IS_EXECUTABLE)); |
| 525 | if (!code_range->CommitRawMemory(start, length)) return false; |
| 526 | } |
| 527 | |
| 528 | if (Heap::ShouldZapGarbage()) { |
| 529 | heap_->isolate()->memory_allocator()->ZapBlock(start, length); |
| 530 | } |
| 531 | } else if (commit_size < committed_size) { |
| 532 | DCHECK(commit_size > 0); |
| 533 | // Shrink the committed area. |
| 534 | size_t length = committed_size - commit_size; |
| 535 | Address start = address() + committed_size + guard_size - length; |
| 536 | if (reservation_.IsReserved()) { |
| 537 | if (!reservation_.Uncommit(start, length)) return false; |
| 538 | } else { |
| 539 | CodeRange* code_range = heap_->isolate()->code_range(); |
| 540 | DCHECK(code_range != NULL && code_range->valid() && |
| 541 | IsFlagSet(IS_EXECUTABLE)); |
| 542 | if (!code_range->UncommitRawMemory(start, length)) return false; |
| 543 | } |
| 544 | } |
| 545 | |
| 546 | area_end_ = area_start_ + requested; |
| 547 | return true; |
| 548 | } |
| 549 | |
| 550 | |
| 551 | void MemoryChunk::InsertAfter(MemoryChunk* other) { |
| 552 | MemoryChunk* other_next = other->next_chunk(); |
| 553 | |
| 554 | set_next_chunk(other_next); |
| 555 | set_prev_chunk(other); |
| 556 | other_next->set_prev_chunk(this); |
| 557 | other->set_next_chunk(this); |
| 558 | } |
| 559 | |
| 560 | |
| 561 | void MemoryChunk::Unlink() { |
| 562 | MemoryChunk* next_element = next_chunk(); |
| 563 | MemoryChunk* prev_element = prev_chunk(); |
| 564 | next_element->set_prev_chunk(prev_element); |
| 565 | prev_element->set_next_chunk(next_element); |
| 566 | set_prev_chunk(NULL); |
| 567 | set_next_chunk(NULL); |
| 568 | } |
| 569 | |
| 570 | |
| 571 | MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, |
| 572 | intptr_t commit_area_size, |
| 573 | Executability executable, |
| 574 | Space* owner) { |
| 575 | DCHECK(commit_area_size <= reserve_area_size); |
| 576 | |
| 577 | size_t chunk_size; |
| 578 | Heap* heap = isolate_->heap(); |
| 579 | Address base = NULL; |
| 580 | base::VirtualMemory reservation; |
| 581 | Address area_start = NULL; |
| 582 | Address area_end = NULL; |
| 583 | |
| 584 | // |
| 585 | // MemoryChunk layout: |
| 586 | // |
| 587 | // Executable |
| 588 | // +----------------------------+<- base aligned with MemoryChunk::kAlignment |
| 589 | // | Header | |
| 590 | // +----------------------------+<- base + CodePageGuardStartOffset |
| 591 | // | Guard | |
| 592 | // +----------------------------+<- area_start_ |
| 593 | // | Area | |
| 594 | // +----------------------------+<- area_end_ (area_start + commit_area_size) |
| 595 | // | Committed but not used | |
| 596 | // +----------------------------+<- aligned at OS page boundary |
| 597 | // | Reserved but not committed | |
| 598 | // +----------------------------+<- aligned at OS page boundary |
| 599 | // | Guard | |
| 600 | // +----------------------------+<- base + chunk_size |
| 601 | // |
| 602 | // Non-executable |
| 603 | // +----------------------------+<- base aligned with MemoryChunk::kAlignment |
| 604 | // | Header | |
| 605 | // +----------------------------+<- area_start_ (base + kObjectStartOffset) |
| 606 | // | Area | |
| 607 | // +----------------------------+<- area_end_ (area_start + commit_area_size) |
| 608 | // | Committed but not used | |
| 609 | // +----------------------------+<- aligned at OS page boundary |
| 610 | // | Reserved but not committed | |
| 611 | // +----------------------------+<- base + chunk_size |
| 612 | // |
| 613 | |
| 614 | if (executable == EXECUTABLE) { |
| 615 | chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size, |
| 616 | base::OS::CommitPageSize()) + |
| 617 | CodePageGuardSize(); |
| 618 | |
| 619 | // Check executable memory limit. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 620 | if ((size_executable_.Value() + static_cast<intptr_t>(chunk_size)) > |
| 621 | capacity_executable_) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 622 | LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory", |
| 623 | "V8 Executable Allocation capacity exceeded")); |
| 624 | return NULL; |
| 625 | } |
| 626 | |
| 627 | // Size of header (not executable) plus area (executable). |
| 628 | size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size, |
| 629 | base::OS::CommitPageSize()); |
| 630 | // Allocate executable memory either from code range or from the |
| 631 | // OS. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 632 | #ifdef V8_TARGET_ARCH_MIPS64 |
| 633 | // Use code range only for large object space on mips64 to keep address |
| 634 | // range within 256-MB memory region. |
| 635 | if (isolate_->code_range() != NULL && isolate_->code_range()->valid() && |
| 636 | reserve_area_size > CodePageAreaSize()) { |
| 637 | #else |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 638 | if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 639 | #endif |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 640 | base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size, |
| 641 | &chunk_size); |
| 642 | DCHECK( |
| 643 | IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment)); |
| 644 | if (base == NULL) return NULL; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 645 | size_.Increment(static_cast<intptr_t>(chunk_size)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 646 | // Update executable memory size. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 647 | size_executable_.Increment(static_cast<intptr_t>(chunk_size)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 648 | } else { |
| 649 | base = AllocateAlignedMemory(chunk_size, commit_size, |
| 650 | MemoryChunk::kAlignment, executable, |
| 651 | &reservation); |
| 652 | if (base == NULL) return NULL; |
| 653 | // Update executable memory size. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 654 | size_executable_.Increment(static_cast<intptr_t>(reservation.size())); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 655 | } |
| 656 | |
| 657 | if (Heap::ShouldZapGarbage()) { |
| 658 | ZapBlock(base, CodePageGuardStartOffset()); |
| 659 | ZapBlock(base + CodePageAreaStartOffset(), commit_area_size); |
| 660 | } |
| 661 | |
| 662 | area_start = base + CodePageAreaStartOffset(); |
| 663 | area_end = area_start + commit_area_size; |
| 664 | } else { |
| 665 | chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size, |
| 666 | base::OS::CommitPageSize()); |
| 667 | size_t commit_size = |
| 668 | RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size, |
| 669 | base::OS::CommitPageSize()); |
| 670 | base = |
| 671 | AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment, |
| 672 | executable, &reservation); |
| 673 | |
| 674 | if (base == NULL) return NULL; |
| 675 | |
| 676 | if (Heap::ShouldZapGarbage()) { |
| 677 | ZapBlock(base, Page::kObjectStartOffset + commit_area_size); |
| 678 | } |
| 679 | |
| 680 | area_start = base + Page::kObjectStartOffset; |
| 681 | area_end = area_start + commit_area_size; |
| 682 | } |
| 683 | |
| 684 | // Use chunk_size for statistics and callbacks because we assume that they |
| 685 | // treat reserved but not-yet committed memory regions of chunks as allocated. |
| 686 | isolate_->counters()->memory_allocated()->Increment( |
| 687 | static_cast<int>(chunk_size)); |
| 688 | |
| 689 | LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); |
| 690 | if (owner != NULL) { |
| 691 | ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); |
| 692 | PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); |
| 693 | } |
| 694 | |
| 695 | MemoryChunk* result = MemoryChunk::Initialize( |
| 696 | heap, base, chunk_size, area_start, area_end, executable, owner); |
| 697 | result->set_reserved_memory(&reservation); |
| 698 | return result; |
| 699 | } |
| 700 | |
| 701 | |
| 702 | void Page::ResetFreeListStatistics() { |
| 703 | non_available_small_blocks_ = 0; |
| 704 | available_in_small_free_list_ = 0; |
| 705 | available_in_medium_free_list_ = 0; |
| 706 | available_in_large_free_list_ = 0; |
| 707 | available_in_huge_free_list_ = 0; |
| 708 | } |
| 709 | |
| 710 | |
| 711 | Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner, |
| 712 | Executability executable) { |
| 713 | MemoryChunk* chunk = AllocateChunk(size, size, executable, owner); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 714 | if (chunk == NULL) return NULL; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 715 | return Page::Initialize(isolate_->heap(), chunk, executable, owner); |
| 716 | } |
| 717 | |
| 718 | |
| 719 | LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, |
| 720 | Space* owner, |
| 721 | Executability executable) { |
| 722 | MemoryChunk* chunk = |
| 723 | AllocateChunk(object_size, object_size, executable, owner); |
| 724 | if (chunk == NULL) return NULL; |
| 725 | return LargePage::Initialize(isolate_->heap(), chunk); |
| 726 | } |
| 727 | |
| 728 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 729 | void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) { |
| 730 | DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 731 | LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); |
| 732 | if (chunk->owner() != NULL) { |
| 733 | ObjectSpace space = |
| 734 | static_cast<ObjectSpace>(1 << chunk->owner()->identity()); |
| 735 | PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); |
| 736 | } |
| 737 | |
| 738 | isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk), |
| 739 | chunk->IsEvacuationCandidate()); |
| 740 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 741 | intptr_t size; |
| 742 | base::VirtualMemory* reservation = chunk->reserved_memory(); |
| 743 | if (reservation->IsReserved()) { |
| 744 | size = static_cast<intptr_t>(reservation->size()); |
| 745 | } else { |
| 746 | size = static_cast<intptr_t>(chunk->size()); |
| 747 | } |
| 748 | DCHECK(size_.Value() >= size); |
| 749 | size_.Increment(-size); |
| 750 | isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); |
| 751 | |
| 752 | if (chunk->executable() == EXECUTABLE) { |
| 753 | DCHECK(size_executable_.Value() >= size); |
| 754 | size_executable_.Increment(-size); |
| 755 | } |
| 756 | |
| 757 | chunk->SetFlag(MemoryChunk::PRE_FREED); |
| 758 | } |
| 759 | |
| 760 | |
| 761 | void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) { |
| 762 | DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED)); |
| 763 | chunk->ReleaseAllocatedMemory(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 764 | |
| 765 | base::VirtualMemory* reservation = chunk->reserved_memory(); |
| 766 | if (reservation->IsReserved()) { |
| 767 | FreeMemory(reservation, chunk->executable()); |
| 768 | } else { |
| 769 | FreeMemory(chunk->address(), chunk->size(), chunk->executable()); |
| 770 | } |
| 771 | } |
| 772 | |
| 773 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 774 | void MemoryAllocator::Free(MemoryChunk* chunk) { |
| 775 | PreFreeMemory(chunk); |
| 776 | PerformFreeMemory(chunk); |
| 777 | } |
| 778 | |
| 779 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 780 | bool MemoryAllocator::CommitBlock(Address start, size_t size, |
| 781 | Executability executable) { |
| 782 | if (!CommitMemory(start, size, executable)) return false; |
| 783 | |
| 784 | if (Heap::ShouldZapGarbage()) { |
| 785 | ZapBlock(start, size); |
| 786 | } |
| 787 | |
| 788 | isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); |
| 789 | return true; |
| 790 | } |
| 791 | |
| 792 | |
| 793 | bool MemoryAllocator::UncommitBlock(Address start, size_t size) { |
| 794 | if (!base::VirtualMemory::UncommitRegion(start, size)) return false; |
| 795 | isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); |
| 796 | return true; |
| 797 | } |
| 798 | |
| 799 | |
| 800 | void MemoryAllocator::ZapBlock(Address start, size_t size) { |
| 801 | for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) { |
| 802 | Memory::Address_at(start + s) = kZapValue; |
| 803 | } |
| 804 | } |
| 805 | |
| 806 | |
| 807 | void MemoryAllocator::PerformAllocationCallback(ObjectSpace space, |
| 808 | AllocationAction action, |
| 809 | size_t size) { |
| 810 | for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { |
| 811 | MemoryAllocationCallbackRegistration registration = |
| 812 | memory_allocation_callbacks_[i]; |
| 813 | if ((registration.space & space) == space && |
| 814 | (registration.action & action) == action) |
| 815 | registration.callback(space, action, static_cast<int>(size)); |
| 816 | } |
| 817 | } |
| 818 | |
| 819 | |
| 820 | bool MemoryAllocator::MemoryAllocationCallbackRegistered( |
| 821 | MemoryAllocationCallback callback) { |
| 822 | for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { |
| 823 | if (memory_allocation_callbacks_[i].callback == callback) return true; |
| 824 | } |
| 825 | return false; |
| 826 | } |
| 827 | |
| 828 | |
| 829 | void MemoryAllocator::AddMemoryAllocationCallback( |
| 830 | MemoryAllocationCallback callback, ObjectSpace space, |
| 831 | AllocationAction action) { |
| 832 | DCHECK(callback != NULL); |
| 833 | MemoryAllocationCallbackRegistration registration(callback, space, action); |
| 834 | DCHECK(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback)); |
| 835 | return memory_allocation_callbacks_.Add(registration); |
| 836 | } |
| 837 | |
| 838 | |
| 839 | void MemoryAllocator::RemoveMemoryAllocationCallback( |
| 840 | MemoryAllocationCallback callback) { |
| 841 | DCHECK(callback != NULL); |
| 842 | for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { |
| 843 | if (memory_allocation_callbacks_[i].callback == callback) { |
| 844 | memory_allocation_callbacks_.Remove(i); |
| 845 | return; |
| 846 | } |
| 847 | } |
| 848 | UNREACHABLE(); |
| 849 | } |
| 850 | |
| 851 | |
| 852 | #ifdef DEBUG |
| 853 | void MemoryAllocator::ReportStatistics() { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 854 | intptr_t size = Size(); |
| 855 | float pct = static_cast<float>(capacity_ - size) / capacity_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 856 | PrintF(" capacity: %" V8_PTR_PREFIX |
| 857 | "d" |
| 858 | ", used: %" V8_PTR_PREFIX |
| 859 | "d" |
| 860 | ", available: %%%d\n\n", |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 861 | capacity_, size, static_cast<int>(pct * 100)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 862 | } |
| 863 | #endif |
| 864 | |
| 865 | |
| 866 | int MemoryAllocator::CodePageGuardStartOffset() { |
| 867 | // We are guarding code pages: the first OS page after the header |
| 868 | // will be protected as non-writable. |
| 869 | return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize()); |
| 870 | } |
| 871 | |
| 872 | |
| 873 | int MemoryAllocator::CodePageGuardSize() { |
| 874 | return static_cast<int>(base::OS::CommitPageSize()); |
| 875 | } |
| 876 | |
| 877 | |
| 878 | int MemoryAllocator::CodePageAreaStartOffset() { |
| 879 | // We are guarding code pages: the first OS page after the header |
| 880 | // will be protected as non-writable. |
| 881 | return CodePageGuardStartOffset() + CodePageGuardSize(); |
| 882 | } |
| 883 | |
| 884 | |
| 885 | int MemoryAllocator::CodePageAreaEndOffset() { |
| 886 | // We are guarding code pages: the last OS page will be protected as |
| 887 | // non-writable. |
| 888 | return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize()); |
| 889 | } |
| 890 | |
| 891 | |
| 892 | bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm, |
| 893 | Address start, size_t commit_size, |
| 894 | size_t reserved_size) { |
| 895 | // Commit page header (not executable). |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 896 | Address header = start; |
| 897 | size_t header_size = CodePageGuardStartOffset(); |
| 898 | if (vm->Commit(header, header_size, false)) { |
| 899 | // Create guard page after the header. |
| 900 | if (vm->Guard(start + CodePageGuardStartOffset())) { |
| 901 | // Commit page body (executable). |
| 902 | Address body = start + CodePageAreaStartOffset(); |
| 903 | size_t body_size = commit_size - CodePageGuardStartOffset(); |
| 904 | if (vm->Commit(body, body_size, true)) { |
| 905 | // Create guard page before the end. |
| 906 | if (vm->Guard(start + reserved_size - CodePageGuardSize())) { |
| 907 | UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() + |
| 908 | commit_size - |
| 909 | CodePageGuardStartOffset()); |
| 910 | return true; |
| 911 | } |
| 912 | vm->Uncommit(body, body_size); |
| 913 | } |
| 914 | } |
| 915 | vm->Uncommit(header, header_size); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 916 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 917 | return false; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 918 | } |
| 919 | |
| 920 | |
| 921 | // ----------------------------------------------------------------------------- |
| 922 | // MemoryChunk implementation |
| 923 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 924 | void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) { |
| 925 | MemoryChunk* chunk = MemoryChunk::FromAddress(object->address()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 926 | if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 927 | static_cast<PagedSpace*>(chunk->owner())->Allocate(by); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 928 | } |
| 929 | chunk->IncrementLiveBytes(by); |
| 930 | } |
| 931 | |
| 932 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 933 | void MemoryChunk::ReleaseAllocatedMemory() { |
| 934 | delete slots_buffer_; |
| 935 | delete skip_list_; |
| 936 | delete mutex_; |
| 937 | } |
| 938 | |
| 939 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 940 | // ----------------------------------------------------------------------------- |
| 941 | // PagedSpace implementation |
| 942 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 943 | STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) == |
| 944 | ObjectSpace::kObjectSpaceNewSpace); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 945 | STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_SPACE) == |
| 946 | ObjectSpace::kObjectSpaceOldSpace); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 947 | STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) == |
| 948 | ObjectSpace::kObjectSpaceCodeSpace); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 949 | STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) == |
| 950 | ObjectSpace::kObjectSpaceMapSpace); |
| 951 | |
| 952 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 953 | PagedSpace::PagedSpace(Heap* heap, AllocationSpace space, |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 954 | Executability executable) |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 955 | : Space(heap, space, executable), |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 956 | free_list_(this), |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 957 | end_of_unswept_pages_(NULL) { |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 958 | area_size_ = MemoryAllocator::PageAreaSize(space); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 959 | accounting_stats_.Clear(); |
| 960 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 961 | allocation_info_.Reset(nullptr, nullptr); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 962 | |
| 963 | anchor_.InitializeAsAnchor(this); |
| 964 | } |
| 965 | |
| 966 | |
| 967 | bool PagedSpace::SetUp() { return true; } |
| 968 | |
| 969 | |
| 970 | bool PagedSpace::HasBeenSetUp() { return true; } |
| 971 | |
| 972 | |
| 973 | void PagedSpace::TearDown() { |
| 974 | PageIterator iterator(this); |
| 975 | while (iterator.has_next()) { |
| 976 | heap()->isolate()->memory_allocator()->Free(iterator.next()); |
| 977 | } |
| 978 | anchor_.set_next_page(&anchor_); |
| 979 | anchor_.set_prev_page(&anchor_); |
| 980 | accounting_stats_.Clear(); |
| 981 | } |
| 982 | |
| 983 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 984 | void PagedSpace::AddMemory(Address start, intptr_t size) { |
| 985 | accounting_stats_.ExpandSpace(static_cast<int>(size)); |
| 986 | Free(start, static_cast<int>(size)); |
| 987 | } |
| 988 | |
| 989 | |
| 990 | FreeSpace* PagedSpace::TryRemoveMemory(intptr_t size_in_bytes) { |
| 991 | FreeSpace* free_space = free_list()->TryRemoveMemory(size_in_bytes); |
| 992 | if (free_space != nullptr) { |
| 993 | accounting_stats_.DecreaseCapacity(free_space->size()); |
| 994 | } |
| 995 | return free_space; |
| 996 | } |
| 997 | |
| 998 | |
| 999 | void PagedSpace::DivideUponCompactionSpaces(CompactionSpaceCollection** other, |
| 1000 | int num, intptr_t limit) { |
| 1001 | DCHECK_GT(num, 0); |
| 1002 | DCHECK(other != nullptr); |
| 1003 | |
| 1004 | if (limit == 0) limit = std::numeric_limits<intptr_t>::max(); |
| 1005 | |
| 1006 | EmptyAllocationInfo(); |
| 1007 | |
| 1008 | bool memory_available = true; |
| 1009 | bool spaces_need_memory = true; |
| 1010 | FreeSpace* node = nullptr; |
| 1011 | CompactionSpace* current_space = nullptr; |
| 1012 | // Iterate over spaces and memory as long as we have memory and there are |
| 1013 | // spaces in need of some. |
| 1014 | while (memory_available && spaces_need_memory) { |
| 1015 | spaces_need_memory = false; |
| 1016 | // Round-robin over all spaces. |
| 1017 | for (int i = 0; i < num; i++) { |
| 1018 | current_space = other[i]->Get(identity()); |
| 1019 | if (current_space->free_list()->Available() < limit) { |
| 1020 | // Space has not reached its limit. Try to get some memory. |
| 1021 | spaces_need_memory = true; |
| 1022 | node = TryRemoveMemory(limit - current_space->free_list()->Available()); |
| 1023 | if (node != nullptr) { |
| 1024 | CHECK(current_space->identity() == identity()); |
| 1025 | current_space->AddMemory(node->address(), node->size()); |
| 1026 | } else { |
| 1027 | memory_available = false; |
| 1028 | break; |
| 1029 | } |
| 1030 | } |
| 1031 | } |
| 1032 | } |
| 1033 | } |
| 1034 | |
| 1035 | |
| 1036 | void PagedSpace::RefillFreeList() { |
| 1037 | MarkCompactCollector* collector = heap()->mark_compact_collector(); |
| 1038 | FreeList* free_list = nullptr; |
| 1039 | if (this == heap()->old_space()) { |
| 1040 | free_list = collector->free_list_old_space().get(); |
| 1041 | } else if (this == heap()->code_space()) { |
| 1042 | free_list = collector->free_list_code_space().get(); |
| 1043 | } else if (this == heap()->map_space()) { |
| 1044 | free_list = collector->free_list_map_space().get(); |
| 1045 | } else { |
| 1046 | // Any PagedSpace might invoke RefillFreeList. We filter all but our old |
| 1047 | // generation spaces out. |
| 1048 | return; |
| 1049 | } |
| 1050 | DCHECK(free_list != nullptr); |
| 1051 | intptr_t added = free_list_.Concatenate(free_list); |
| 1052 | accounting_stats_.IncreaseCapacity(added); |
| 1053 | } |
| 1054 | |
| 1055 | |
| 1056 | void CompactionSpace::RefillFreeList() { |
| 1057 | MarkCompactCollector* collector = heap()->mark_compact_collector(); |
| 1058 | FreeList* free_list = nullptr; |
| 1059 | if (identity() == OLD_SPACE) { |
| 1060 | free_list = collector->free_list_old_space().get(); |
| 1061 | } else if (identity() == CODE_SPACE) { |
| 1062 | free_list = collector->free_list_code_space().get(); |
| 1063 | } else { |
| 1064 | // Compaction spaces only represent old or code space. |
| 1065 | UNREACHABLE(); |
| 1066 | } |
| 1067 | DCHECK(free_list != nullptr); |
| 1068 | intptr_t refilled = 0; |
| 1069 | while (refilled < kCompactionMemoryWanted) { |
| 1070 | FreeSpace* node = |
| 1071 | free_list->TryRemoveMemory(kCompactionMemoryWanted - refilled); |
| 1072 | if (node == nullptr) return; |
| 1073 | refilled += node->size(); |
| 1074 | AddMemory(node->address(), node->size()); |
| 1075 | } |
| 1076 | } |
| 1077 | |
| 1078 | |
| 1079 | void PagedSpace::MoveOverFreeMemory(PagedSpace* other) { |
| 1080 | DCHECK(identity() == other->identity()); |
| 1081 | // Destroy the linear allocation space of {other}. This is needed to |
| 1082 | // (a) not waste the memory and |
| 1083 | // (b) keep the rest of the chunk in an iterable state (filler is needed). |
| 1084 | other->EmptyAllocationInfo(); |
| 1085 | |
| 1086 | // Move over the free list. Concatenate makes sure that the source free list |
| 1087 | // gets properly reset after moving over all nodes. |
| 1088 | intptr_t added = free_list_.Concatenate(other->free_list()); |
| 1089 | |
| 1090 | // Moved memory is not recorded as allocated memory, but rather increases and |
| 1091 | // decreases capacity of the corresponding spaces. |
| 1092 | other->accounting_stats_.DecreaseCapacity(added); |
| 1093 | accounting_stats_.IncreaseCapacity(added); |
| 1094 | } |
| 1095 | |
| 1096 | |
| 1097 | void PagedSpace::MergeCompactionSpace(CompactionSpace* other) { |
| 1098 | // Unmerged fields: |
| 1099 | // area_size_ |
| 1100 | // anchor_ |
| 1101 | |
| 1102 | MoveOverFreeMemory(other); |
| 1103 | |
| 1104 | // Update and clear accounting statistics. |
| 1105 | accounting_stats_.Merge(other->accounting_stats_); |
| 1106 | other->accounting_stats_.Clear(); |
| 1107 | |
| 1108 | // The linear allocation area of {other} should be destroyed now. |
| 1109 | DCHECK(other->top() == nullptr); |
| 1110 | DCHECK(other->limit() == nullptr); |
| 1111 | |
| 1112 | DCHECK(other->end_of_unswept_pages_ == nullptr); |
| 1113 | |
| 1114 | AccountCommitted(other->CommittedMemory()); |
| 1115 | |
| 1116 | // Move over pages. |
| 1117 | PageIterator it(other); |
| 1118 | Page* p = nullptr; |
| 1119 | while (it.has_next()) { |
| 1120 | p = it.next(); |
| 1121 | p->Unlink(); |
| 1122 | p->set_owner(this); |
| 1123 | p->InsertAfter(anchor_.prev_page()); |
| 1124 | } |
| 1125 | } |
| 1126 | |
| 1127 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1128 | size_t PagedSpace::CommittedPhysicalMemory() { |
| 1129 | if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory(); |
| 1130 | MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
| 1131 | size_t size = 0; |
| 1132 | PageIterator it(this); |
| 1133 | while (it.has_next()) { |
| 1134 | size += it.next()->CommittedPhysicalMemory(); |
| 1135 | } |
| 1136 | return size; |
| 1137 | } |
| 1138 | |
| 1139 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1140 | bool PagedSpace::ContainsSafe(Address addr) { |
| 1141 | Page* p = Page::FromAddress(addr); |
| 1142 | PageIterator iterator(this); |
| 1143 | while (iterator.has_next()) { |
| 1144 | if (iterator.next() == p) return true; |
| 1145 | } |
| 1146 | return false; |
| 1147 | } |
| 1148 | |
| 1149 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1150 | Object* PagedSpace::FindObject(Address addr) { |
| 1151 | // Note: this function can only be called on iterable spaces. |
| 1152 | DCHECK(!heap()->mark_compact_collector()->in_use()); |
| 1153 | |
| 1154 | if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found. |
| 1155 | |
| 1156 | Page* p = Page::FromAddress(addr); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1157 | HeapObjectIterator it(p); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1158 | for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 1159 | Address cur = obj->address(); |
| 1160 | Address next = cur + obj->Size(); |
| 1161 | if ((cur <= addr) && (addr < next)) return obj; |
| 1162 | } |
| 1163 | |
| 1164 | UNREACHABLE(); |
| 1165 | return Smi::FromInt(0); |
| 1166 | } |
| 1167 | |
| 1168 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1169 | bool PagedSpace::CanExpand(size_t size) { |
| 1170 | DCHECK(heap()->mark_compact_collector()->is_compacting() || |
| 1171 | Capacity() <= heap()->MaxOldGenerationSize()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1172 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1173 | // Are we going to exceed capacity for this space? At this point we can be |
| 1174 | // way over the maximum size because of AlwaysAllocate scopes and large |
| 1175 | // objects. |
| 1176 | if (!heap()->CanExpandOldGeneration(static_cast<int>(size))) return false; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1177 | |
| 1178 | return true; |
| 1179 | } |
| 1180 | |
| 1181 | |
| 1182 | bool PagedSpace::Expand() { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1183 | intptr_t size = AreaSize(); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1184 | if (snapshotable() && !HasPages()) { |
| 1185 | size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1186 | } |
| 1187 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1188 | if (!CanExpand(size)) return false; |
| 1189 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1190 | Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this, |
| 1191 | executable()); |
| 1192 | if (p == NULL) return false; |
| 1193 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1194 | AccountCommitted(static_cast<intptr_t>(p->size())); |
| 1195 | |
| 1196 | // Pages created during bootstrapping may contain immortal immovable objects. |
| 1197 | if (!heap()->deserialization_complete()) p->MarkNeverEvacuate(); |
| 1198 | |
| 1199 | DCHECK(Capacity() <= heap()->MaxOldGenerationSize()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1200 | |
| 1201 | p->InsertAfter(anchor_.prev_page()); |
| 1202 | |
| 1203 | return true; |
| 1204 | } |
| 1205 | |
| 1206 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1207 | int PagedSpace::CountTotalPages() { |
| 1208 | PageIterator it(this); |
| 1209 | int count = 0; |
| 1210 | while (it.has_next()) { |
| 1211 | it.next(); |
| 1212 | count++; |
| 1213 | } |
| 1214 | return count; |
| 1215 | } |
| 1216 | |
| 1217 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1218 | void PagedSpace::ResetFreeListStatistics() { |
| 1219 | PageIterator page_iterator(this); |
| 1220 | while (page_iterator.has_next()) { |
| 1221 | Page* page = page_iterator.next(); |
| 1222 | page->ResetFreeListStatistics(); |
| 1223 | } |
| 1224 | } |
| 1225 | |
| 1226 | |
| 1227 | void PagedSpace::IncreaseCapacity(int size) { |
| 1228 | accounting_stats_.ExpandSpace(size); |
| 1229 | } |
| 1230 | |
| 1231 | |
| 1232 | void PagedSpace::ReleasePage(Page* page) { |
| 1233 | DCHECK(page->LiveBytes() == 0); |
| 1234 | DCHECK(AreaSize() == page->area_size()); |
| 1235 | |
| 1236 | if (page->WasSwept()) { |
| 1237 | intptr_t size = free_list_.EvictFreeListItems(page); |
| 1238 | accounting_stats_.AllocateBytes(size); |
| 1239 | DCHECK_EQ(AreaSize(), static_cast<int>(size)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1240 | } |
| 1241 | |
| 1242 | if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) { |
| 1243 | heap()->decrement_scan_on_scavenge_pages(); |
| 1244 | page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE); |
| 1245 | } |
| 1246 | |
| 1247 | DCHECK(!free_list_.ContainsPageFreeListItems(page)); |
| 1248 | |
| 1249 | if (Page::FromAllocationTop(allocation_info_.top()) == page) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1250 | allocation_info_.Reset(nullptr, nullptr); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1251 | } |
| 1252 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1253 | // If page is still in a list, unlink it from that list. |
| 1254 | if (page->next_chunk() != NULL) { |
| 1255 | DCHECK(page->prev_chunk() != NULL); |
| 1256 | page->Unlink(); |
| 1257 | } |
| 1258 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1259 | AccountUncommitted(static_cast<intptr_t>(page->size())); |
| 1260 | heap()->QueueMemoryChunkForFree(page); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1261 | |
| 1262 | DCHECK(Capacity() > 0); |
| 1263 | accounting_stats_.ShrinkSpace(AreaSize()); |
| 1264 | } |
| 1265 | |
| 1266 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1267 | #ifdef DEBUG |
| 1268 | void PagedSpace::Print() {} |
| 1269 | #endif |
| 1270 | |
| 1271 | #ifdef VERIFY_HEAP |
| 1272 | void PagedSpace::Verify(ObjectVisitor* visitor) { |
| 1273 | bool allocation_pointer_found_in_space = |
| 1274 | (allocation_info_.top() == allocation_info_.limit()); |
| 1275 | PageIterator page_iterator(this); |
| 1276 | while (page_iterator.has_next()) { |
| 1277 | Page* page = page_iterator.next(); |
| 1278 | CHECK(page->owner() == this); |
| 1279 | if (page == Page::FromAllocationTop(allocation_info_.top())) { |
| 1280 | allocation_pointer_found_in_space = true; |
| 1281 | } |
| 1282 | CHECK(page->WasSwept()); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1283 | HeapObjectIterator it(page); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1284 | Address end_of_previous_object = page->area_start(); |
| 1285 | Address top = page->area_end(); |
| 1286 | int black_size = 0; |
| 1287 | for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { |
| 1288 | CHECK(end_of_previous_object <= object->address()); |
| 1289 | |
| 1290 | // The first word should be a map, and we expect all map pointers to |
| 1291 | // be in map space. |
| 1292 | Map* map = object->map(); |
| 1293 | CHECK(map->IsMap()); |
| 1294 | CHECK(heap()->map_space()->Contains(map)); |
| 1295 | |
| 1296 | // Perform space-specific object verification. |
| 1297 | VerifyObject(object); |
| 1298 | |
| 1299 | // The object itself should look OK. |
| 1300 | object->ObjectVerify(); |
| 1301 | |
| 1302 | // All the interior pointers should be contained in the heap. |
| 1303 | int size = object->Size(); |
| 1304 | object->IterateBody(map->instance_type(), size, visitor); |
| 1305 | if (Marking::IsBlack(Marking::MarkBitFrom(object))) { |
| 1306 | black_size += size; |
| 1307 | } |
| 1308 | |
| 1309 | CHECK(object->address() + size <= top); |
| 1310 | end_of_previous_object = object->address() + size; |
| 1311 | } |
| 1312 | CHECK_LE(black_size, page->LiveBytes()); |
| 1313 | } |
| 1314 | CHECK(allocation_pointer_found_in_space); |
| 1315 | } |
| 1316 | #endif // VERIFY_HEAP |
| 1317 | |
| 1318 | // ----------------------------------------------------------------------------- |
| 1319 | // NewSpace implementation |
| 1320 | |
| 1321 | |
| 1322 | bool NewSpace::SetUp(int reserved_semispace_capacity, |
| 1323 | int maximum_semispace_capacity) { |
| 1324 | // Set up new space based on the preallocated memory block defined by |
| 1325 | // start and size. The provided space is divided into two semi-spaces. |
| 1326 | // To support fast containment testing in the new space, the size of |
| 1327 | // this chunk must be a power of two and it must be aligned to its size. |
| 1328 | int initial_semispace_capacity = heap()->InitialSemiSpaceSize(); |
| 1329 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1330 | int target_semispace_capacity = heap()->TargetSemiSpaceSize(); |
| 1331 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1332 | size_t size = 2 * reserved_semispace_capacity; |
| 1333 | Address base = heap()->isolate()->memory_allocator()->ReserveAlignedMemory( |
| 1334 | size, size, &reservation_); |
| 1335 | if (base == NULL) return false; |
| 1336 | |
| 1337 | chunk_base_ = base; |
| 1338 | chunk_size_ = static_cast<uintptr_t>(size); |
| 1339 | LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_)); |
| 1340 | |
| 1341 | DCHECK(initial_semispace_capacity <= maximum_semispace_capacity); |
| 1342 | DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity)); |
| 1343 | |
| 1344 | // Allocate and set up the histogram arrays if necessary. |
| 1345 | allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); |
| 1346 | promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); |
| 1347 | |
| 1348 | #define SET_NAME(name) \ |
| 1349 | allocated_histogram_[name].set_name(#name); \ |
| 1350 | promoted_histogram_[name].set_name(#name); |
| 1351 | INSTANCE_TYPE_LIST(SET_NAME) |
| 1352 | #undef SET_NAME |
| 1353 | |
| 1354 | DCHECK(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize()); |
| 1355 | DCHECK(static_cast<intptr_t>(chunk_size_) >= |
| 1356 | 2 * heap()->ReservedSemiSpaceSize()); |
| 1357 | DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0)); |
| 1358 | |
| 1359 | to_space_.SetUp(chunk_base_, initial_semispace_capacity, |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1360 | target_semispace_capacity, maximum_semispace_capacity); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1361 | from_space_.SetUp(chunk_base_ + reserved_semispace_capacity, |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1362 | initial_semispace_capacity, target_semispace_capacity, |
| 1363 | maximum_semispace_capacity); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1364 | if (!to_space_.Commit()) { |
| 1365 | return false; |
| 1366 | } |
| 1367 | DCHECK(!from_space_.is_committed()); // No need to use memory yet. |
| 1368 | |
| 1369 | start_ = chunk_base_; |
| 1370 | address_mask_ = ~(2 * reserved_semispace_capacity - 1); |
| 1371 | object_mask_ = address_mask_ | kHeapObjectTagMask; |
| 1372 | object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag; |
| 1373 | |
| 1374 | ResetAllocationInfo(); |
| 1375 | |
| 1376 | return true; |
| 1377 | } |
| 1378 | |
| 1379 | |
| 1380 | void NewSpace::TearDown() { |
| 1381 | if (allocated_histogram_) { |
| 1382 | DeleteArray(allocated_histogram_); |
| 1383 | allocated_histogram_ = NULL; |
| 1384 | } |
| 1385 | if (promoted_histogram_) { |
| 1386 | DeleteArray(promoted_histogram_); |
| 1387 | promoted_histogram_ = NULL; |
| 1388 | } |
| 1389 | |
| 1390 | start_ = NULL; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1391 | allocation_info_.Reset(nullptr, nullptr); |
| 1392 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1393 | |
| 1394 | to_space_.TearDown(); |
| 1395 | from_space_.TearDown(); |
| 1396 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1397 | heap()->isolate()->memory_allocator()->FreeNewSpaceMemory( |
| 1398 | chunk_base_, &reservation_, NOT_EXECUTABLE); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1399 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1400 | chunk_base_ = NULL; |
| 1401 | chunk_size_ = 0; |
| 1402 | } |
| 1403 | |
| 1404 | |
| 1405 | void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); } |
| 1406 | |
| 1407 | |
| 1408 | void NewSpace::Grow() { |
| 1409 | // Double the semispace size but only up to maximum capacity. |
| 1410 | DCHECK(TotalCapacity() < MaximumCapacity()); |
| 1411 | int new_capacity = |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1412 | Min(MaximumCapacity(), |
| 1413 | FLAG_semi_space_growth_factor * static_cast<int>(TotalCapacity())); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1414 | if (to_space_.GrowTo(new_capacity)) { |
| 1415 | // Only grow from space if we managed to grow to-space. |
| 1416 | if (!from_space_.GrowTo(new_capacity)) { |
| 1417 | // If we managed to grow to-space but couldn't grow from-space, |
| 1418 | // attempt to shrink to-space. |
| 1419 | if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) { |
| 1420 | // We are in an inconsistent state because we could not |
| 1421 | // commit/uncommit memory from new space. |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1422 | CHECK(false); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1423 | } |
| 1424 | } |
| 1425 | } |
| 1426 | DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
| 1427 | } |
| 1428 | |
| 1429 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1430 | bool NewSpace::GrowOnePage() { |
| 1431 | if (TotalCapacity() == MaximumCapacity()) return false; |
| 1432 | int new_capacity = static_cast<int>(TotalCapacity()) + Page::kPageSize; |
| 1433 | if (to_space_.GrowTo(new_capacity)) { |
| 1434 | // Only grow from space if we managed to grow to-space and the from space |
| 1435 | // is actually committed. |
| 1436 | if (from_space_.is_committed()) { |
| 1437 | if (!from_space_.GrowTo(new_capacity)) { |
| 1438 | // If we managed to grow to-space but couldn't grow from-space, |
| 1439 | // attempt to shrink to-space. |
| 1440 | if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) { |
| 1441 | // We are in an inconsistent state because we could not |
| 1442 | // commit/uncommit memory from new space. |
| 1443 | CHECK(false); |
| 1444 | } |
| 1445 | return false; |
| 1446 | } |
| 1447 | } else { |
| 1448 | if (!from_space_.SetTotalCapacity(new_capacity)) { |
| 1449 | // Can't really happen, but better safe than sorry. |
| 1450 | CHECK(false); |
| 1451 | } |
| 1452 | } |
| 1453 | DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
| 1454 | return true; |
| 1455 | } |
| 1456 | return false; |
| 1457 | } |
| 1458 | |
| 1459 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1460 | void NewSpace::Shrink() { |
| 1461 | int new_capacity = Max(InitialTotalCapacity(), 2 * SizeAsInt()); |
| 1462 | int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize); |
| 1463 | if (rounded_new_capacity < TotalCapacity() && |
| 1464 | to_space_.ShrinkTo(rounded_new_capacity)) { |
| 1465 | // Only shrink from-space if we managed to shrink to-space. |
| 1466 | from_space_.Reset(); |
| 1467 | if (!from_space_.ShrinkTo(rounded_new_capacity)) { |
| 1468 | // If we managed to shrink to-space but couldn't shrink from |
| 1469 | // space, attempt to grow to-space again. |
| 1470 | if (!to_space_.GrowTo(from_space_.TotalCapacity())) { |
| 1471 | // We are in an inconsistent state because we could not |
| 1472 | // commit/uncommit memory from new space. |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1473 | CHECK(false); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1474 | } |
| 1475 | } |
| 1476 | } |
| 1477 | DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
| 1478 | } |
| 1479 | |
| 1480 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1481 | void LocalAllocationBuffer::Close() { |
| 1482 | if (IsValid()) { |
| 1483 | heap_->CreateFillerObjectAt( |
| 1484 | allocation_info_.top(), |
| 1485 | static_cast<int>(allocation_info_.limit() - allocation_info_.top())); |
| 1486 | } |
| 1487 | } |
| 1488 | |
| 1489 | |
| 1490 | LocalAllocationBuffer::LocalAllocationBuffer(Heap* heap, |
| 1491 | AllocationInfo allocation_info) |
| 1492 | : heap_(heap), allocation_info_(allocation_info) { |
| 1493 | if (IsValid()) { |
| 1494 | heap_->CreateFillerObjectAt( |
| 1495 | allocation_info_.top(), |
| 1496 | static_cast<int>(allocation_info_.limit() - allocation_info_.top())); |
| 1497 | } |
| 1498 | } |
| 1499 | |
| 1500 | |
| 1501 | LocalAllocationBuffer::LocalAllocationBuffer( |
| 1502 | const LocalAllocationBuffer& other) { |
| 1503 | *this = other; |
| 1504 | } |
| 1505 | |
| 1506 | |
| 1507 | LocalAllocationBuffer& LocalAllocationBuffer::operator=( |
| 1508 | const LocalAllocationBuffer& other) { |
| 1509 | Close(); |
| 1510 | heap_ = other.heap_; |
| 1511 | allocation_info_ = other.allocation_info_; |
| 1512 | |
| 1513 | // This is needed since we (a) cannot yet use move-semantics, and (b) want |
| 1514 | // to make the use of the class easy by it as value and (c) implicitly call |
| 1515 | // {Close} upon copy. |
| 1516 | const_cast<LocalAllocationBuffer&>(other) |
| 1517 | .allocation_info_.Reset(nullptr, nullptr); |
| 1518 | return *this; |
| 1519 | } |
| 1520 | |
| 1521 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1522 | void NewSpace::UpdateAllocationInfo() { |
| 1523 | MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1524 | allocation_info_.Reset(to_space_.page_low(), to_space_.page_high()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1525 | UpdateInlineAllocationLimit(0); |
| 1526 | DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
| 1527 | } |
| 1528 | |
| 1529 | |
| 1530 | void NewSpace::ResetAllocationInfo() { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1531 | Address old_top = allocation_info_.top(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1532 | to_space_.Reset(); |
| 1533 | UpdateAllocationInfo(); |
| 1534 | pages_used_ = 0; |
| 1535 | // Clear all mark-bits in the to-space. |
| 1536 | NewSpacePageIterator it(&to_space_); |
| 1537 | while (it.has_next()) { |
| 1538 | Bitmap::Clear(it.next()); |
| 1539 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1540 | InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1541 | } |
| 1542 | |
| 1543 | |
| 1544 | void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) { |
| 1545 | if (heap()->inline_allocation_disabled()) { |
| 1546 | // Lowest limit when linear allocation was disabled. |
| 1547 | Address high = to_space_.page_high(); |
| 1548 | Address new_top = allocation_info_.top() + size_in_bytes; |
| 1549 | allocation_info_.set_limit(Min(new_top, high)); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1550 | } else if (inline_allocation_observers_paused_ || |
| 1551 | top_on_previous_step_ == 0) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1552 | // Normal limit is the end of the current page. |
| 1553 | allocation_info_.set_limit(to_space_.page_high()); |
| 1554 | } else { |
| 1555 | // Lower limit during incremental marking. |
| 1556 | Address high = to_space_.page_high(); |
| 1557 | Address new_top = allocation_info_.top() + size_in_bytes; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1558 | Address new_limit = new_top + GetNextInlineAllocationStepSize() - 1; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1559 | allocation_info_.set_limit(Min(new_limit, high)); |
| 1560 | } |
| 1561 | DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
| 1562 | } |
| 1563 | |
| 1564 | |
| 1565 | bool NewSpace::AddFreshPage() { |
| 1566 | Address top = allocation_info_.top(); |
| 1567 | if (NewSpacePage::IsAtStart(top)) { |
| 1568 | // The current page is already empty. Don't try to make another. |
| 1569 | |
| 1570 | // We should only get here if someone asks to allocate more |
| 1571 | // than what can be stored in a single page. |
| 1572 | // TODO(gc): Change the limit on new-space allocation to prevent this |
| 1573 | // from happening (all such allocations should go directly to LOSpace). |
| 1574 | return false; |
| 1575 | } |
| 1576 | if (!to_space_.AdvancePage()) { |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1577 | // Check if we reached the target capacity yet. If not, try to commit a page |
| 1578 | // and continue. |
| 1579 | if ((to_space_.TotalCapacity() < to_space_.TargetCapacity()) && |
| 1580 | GrowOnePage()) { |
| 1581 | if (!to_space_.AdvancePage()) { |
| 1582 | // It doesn't make sense that we managed to commit a page, but can't use |
| 1583 | // it. |
| 1584 | CHECK(false); |
| 1585 | } |
| 1586 | } else { |
| 1587 | // Failed to get a new page in to-space. |
| 1588 | return false; |
| 1589 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1590 | } |
| 1591 | |
| 1592 | // Clear remainder of current page. |
| 1593 | Address limit = NewSpacePage::FromLimit(top)->area_end(); |
| 1594 | if (heap()->gc_state() == Heap::SCAVENGE) { |
| 1595 | heap()->promotion_queue()->SetNewLimit(limit); |
| 1596 | } |
| 1597 | |
| 1598 | int remaining_in_page = static_cast<int>(limit - top); |
| 1599 | heap()->CreateFillerObjectAt(top, remaining_in_page); |
| 1600 | pages_used_++; |
| 1601 | UpdateAllocationInfo(); |
| 1602 | |
| 1603 | return true; |
| 1604 | } |
| 1605 | |
| 1606 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1607 | bool NewSpace::AddFreshPageSynchronized() { |
| 1608 | base::LockGuard<base::Mutex> guard(&mutex_); |
| 1609 | return AddFreshPage(); |
| 1610 | } |
| 1611 | |
| 1612 | |
| 1613 | bool NewSpace::EnsureAllocation(int size_in_bytes, |
| 1614 | AllocationAlignment alignment) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1615 | Address old_top = allocation_info_.top(); |
| 1616 | Address high = to_space_.page_high(); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1617 | int filler_size = Heap::GetFillToAlign(old_top, alignment); |
| 1618 | int aligned_size_in_bytes = size_in_bytes + filler_size; |
| 1619 | |
| 1620 | if (old_top + aligned_size_in_bytes >= high) { |
| 1621 | // Not enough room in the page, try to allocate a new one. |
| 1622 | if (!AddFreshPage()) { |
| 1623 | return false; |
| 1624 | } |
| 1625 | |
| 1626 | InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0); |
| 1627 | |
| 1628 | old_top = allocation_info_.top(); |
| 1629 | high = to_space_.page_high(); |
| 1630 | filler_size = Heap::GetFillToAlign(old_top, alignment); |
| 1631 | aligned_size_in_bytes = size_in_bytes + filler_size; |
| 1632 | } |
| 1633 | |
| 1634 | DCHECK(old_top + aligned_size_in_bytes < high); |
| 1635 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1636 | if (allocation_info_.limit() < high) { |
| 1637 | // Either the limit has been lowered because linear allocation was disabled |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1638 | // or because incremental marking wants to get a chance to do a step, |
| 1639 | // or because idle scavenge job wants to get a chance to post a task. |
| 1640 | // Set the new limit accordingly. |
| 1641 | Address new_top = old_top + aligned_size_in_bytes; |
| 1642 | Address soon_object = old_top + filler_size; |
| 1643 | InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes); |
| 1644 | UpdateInlineAllocationLimit(aligned_size_in_bytes); |
| 1645 | } |
| 1646 | return true; |
| 1647 | } |
| 1648 | |
| 1649 | |
| 1650 | void NewSpace::StartNextInlineAllocationStep() { |
| 1651 | if (!inline_allocation_observers_paused_) { |
| 1652 | top_on_previous_step_ = |
| 1653 | inline_allocation_observers_.length() ? allocation_info_.top() : 0; |
| 1654 | UpdateInlineAllocationLimit(0); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1655 | } |
| 1656 | } |
| 1657 | |
| 1658 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1659 | intptr_t NewSpace::GetNextInlineAllocationStepSize() { |
| 1660 | intptr_t next_step = 0; |
| 1661 | for (int i = 0; i < inline_allocation_observers_.length(); ++i) { |
| 1662 | InlineAllocationObserver* o = inline_allocation_observers_[i]; |
| 1663 | next_step = next_step ? Min(next_step, o->bytes_to_next_step()) |
| 1664 | : o->bytes_to_next_step(); |
| 1665 | } |
| 1666 | DCHECK(inline_allocation_observers_.length() == 0 || next_step != 0); |
| 1667 | return next_step; |
| 1668 | } |
| 1669 | |
| 1670 | |
| 1671 | void NewSpace::AddInlineAllocationObserver(InlineAllocationObserver* observer) { |
| 1672 | inline_allocation_observers_.Add(observer); |
| 1673 | StartNextInlineAllocationStep(); |
| 1674 | } |
| 1675 | |
| 1676 | |
| 1677 | void NewSpace::RemoveInlineAllocationObserver( |
| 1678 | InlineAllocationObserver* observer) { |
| 1679 | bool removed = inline_allocation_observers_.RemoveElement(observer); |
| 1680 | // Only used in assertion. Suppress unused variable warning. |
| 1681 | static_cast<void>(removed); |
| 1682 | DCHECK(removed); |
| 1683 | StartNextInlineAllocationStep(); |
| 1684 | } |
| 1685 | |
| 1686 | |
| 1687 | void NewSpace::PauseInlineAllocationObservers() { |
| 1688 | // Do a step to account for memory allocated so far. |
| 1689 | InlineAllocationStep(top(), top(), nullptr, 0); |
| 1690 | inline_allocation_observers_paused_ = true; |
| 1691 | top_on_previous_step_ = 0; |
| 1692 | UpdateInlineAllocationLimit(0); |
| 1693 | } |
| 1694 | |
| 1695 | |
| 1696 | void NewSpace::ResumeInlineAllocationObservers() { |
| 1697 | DCHECK(top_on_previous_step_ == 0); |
| 1698 | inline_allocation_observers_paused_ = false; |
| 1699 | StartNextInlineAllocationStep(); |
| 1700 | } |
| 1701 | |
| 1702 | |
| 1703 | void NewSpace::InlineAllocationStep(Address top, Address new_top, |
| 1704 | Address soon_object, size_t size) { |
| 1705 | if (top_on_previous_step_) { |
| 1706 | int bytes_allocated = static_cast<int>(top - top_on_previous_step_); |
| 1707 | for (int i = 0; i < inline_allocation_observers_.length(); ++i) { |
| 1708 | inline_allocation_observers_[i]->InlineAllocationStep(bytes_allocated, |
| 1709 | soon_object, size); |
| 1710 | } |
| 1711 | top_on_previous_step_ = new_top; |
| 1712 | } |
| 1713 | } |
| 1714 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1715 | #ifdef VERIFY_HEAP |
| 1716 | // We do not use the SemiSpaceIterator because verification doesn't assume |
| 1717 | // that it works (it depends on the invariants we are checking). |
| 1718 | void NewSpace::Verify() { |
| 1719 | // The allocation pointer should be in the space or at the very end. |
| 1720 | DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
| 1721 | |
| 1722 | // There should be objects packed in from the low address up to the |
| 1723 | // allocation pointer. |
| 1724 | Address current = to_space_.first_page()->area_start(); |
| 1725 | CHECK_EQ(current, to_space_.space_start()); |
| 1726 | |
| 1727 | while (current != top()) { |
| 1728 | if (!NewSpacePage::IsAtEnd(current)) { |
| 1729 | // The allocation pointer should not be in the middle of an object. |
| 1730 | CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) || |
| 1731 | current < top()); |
| 1732 | |
| 1733 | HeapObject* object = HeapObject::FromAddress(current); |
| 1734 | |
| 1735 | // The first word should be a map, and we expect all map pointers to |
| 1736 | // be in map space. |
| 1737 | Map* map = object->map(); |
| 1738 | CHECK(map->IsMap()); |
| 1739 | CHECK(heap()->map_space()->Contains(map)); |
| 1740 | |
| 1741 | // The object should not be code or a map. |
| 1742 | CHECK(!object->IsMap()); |
| 1743 | CHECK(!object->IsCode()); |
| 1744 | |
| 1745 | // The object itself should look OK. |
| 1746 | object->ObjectVerify(); |
| 1747 | |
| 1748 | // All the interior pointers should be contained in the heap. |
| 1749 | VerifyPointersVisitor visitor; |
| 1750 | int size = object->Size(); |
| 1751 | object->IterateBody(map->instance_type(), size, &visitor); |
| 1752 | |
| 1753 | current += size; |
| 1754 | } else { |
| 1755 | // At end of page, switch to next page. |
| 1756 | NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page(); |
| 1757 | // Next page should be valid. |
| 1758 | CHECK(!page->is_anchor()); |
| 1759 | current = page->area_start(); |
| 1760 | } |
| 1761 | } |
| 1762 | |
| 1763 | // Check semi-spaces. |
| 1764 | CHECK_EQ(from_space_.id(), kFromSpace); |
| 1765 | CHECK_EQ(to_space_.id(), kToSpace); |
| 1766 | from_space_.Verify(); |
| 1767 | to_space_.Verify(); |
| 1768 | } |
| 1769 | #endif |
| 1770 | |
| 1771 | // ----------------------------------------------------------------------------- |
| 1772 | // SemiSpace implementation |
| 1773 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1774 | void SemiSpace::SetUp(Address start, int initial_capacity, int target_capacity, |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1775 | int maximum_capacity) { |
| 1776 | // Creates a space in the young generation. The constructor does not |
| 1777 | // allocate memory from the OS. A SemiSpace is given a contiguous chunk of |
| 1778 | // memory of size 'capacity' when set up, and does not grow or shrink |
| 1779 | // otherwise. In the mark-compact collector, the memory region of the from |
| 1780 | // space is used as the marking stack. It requires contiguous memory |
| 1781 | // addresses. |
| 1782 | DCHECK(maximum_capacity >= Page::kPageSize); |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1783 | DCHECK(initial_capacity <= target_capacity); |
| 1784 | DCHECK(target_capacity <= maximum_capacity); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1785 | initial_total_capacity_ = RoundDown(initial_capacity, Page::kPageSize); |
| 1786 | total_capacity_ = initial_capacity; |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1787 | target_capacity_ = RoundDown(target_capacity, Page::kPageSize); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1788 | maximum_total_capacity_ = RoundDown(maximum_capacity, Page::kPageSize); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1789 | committed_ = false; |
| 1790 | start_ = start; |
| 1791 | address_mask_ = ~(maximum_capacity - 1); |
| 1792 | object_mask_ = address_mask_ | kHeapObjectTagMask; |
| 1793 | object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1794 | age_mark_ = start_ + NewSpacePage::kObjectStartOffset; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1795 | } |
| 1796 | |
| 1797 | |
| 1798 | void SemiSpace::TearDown() { |
| 1799 | start_ = NULL; |
| 1800 | total_capacity_ = 0; |
| 1801 | } |
| 1802 | |
| 1803 | |
| 1804 | bool SemiSpace::Commit() { |
| 1805 | DCHECK(!is_committed()); |
| 1806 | int pages = total_capacity_ / Page::kPageSize; |
| 1807 | if (!heap()->isolate()->memory_allocator()->CommitBlock( |
| 1808 | start_, total_capacity_, executable())) { |
| 1809 | return false; |
| 1810 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1811 | AccountCommitted(total_capacity_); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1812 | |
| 1813 | NewSpacePage* current = anchor(); |
| 1814 | for (int i = 0; i < pages; i++) { |
| 1815 | NewSpacePage* new_page = |
| 1816 | NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this); |
| 1817 | new_page->InsertAfter(current); |
| 1818 | current = new_page; |
| 1819 | } |
| 1820 | |
| 1821 | SetCapacity(total_capacity_); |
| 1822 | committed_ = true; |
| 1823 | Reset(); |
| 1824 | return true; |
| 1825 | } |
| 1826 | |
| 1827 | |
| 1828 | bool SemiSpace::Uncommit() { |
| 1829 | DCHECK(is_committed()); |
| 1830 | Address start = start_ + maximum_total_capacity_ - total_capacity_; |
| 1831 | if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, |
| 1832 | total_capacity_)) { |
| 1833 | return false; |
| 1834 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1835 | AccountUncommitted(total_capacity_); |
| 1836 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1837 | anchor()->set_next_page(anchor()); |
| 1838 | anchor()->set_prev_page(anchor()); |
| 1839 | |
| 1840 | committed_ = false; |
| 1841 | return true; |
| 1842 | } |
| 1843 | |
| 1844 | |
| 1845 | size_t SemiSpace::CommittedPhysicalMemory() { |
| 1846 | if (!is_committed()) return 0; |
| 1847 | size_t size = 0; |
| 1848 | NewSpacePageIterator it(this); |
| 1849 | while (it.has_next()) { |
| 1850 | size += it.next()->CommittedPhysicalMemory(); |
| 1851 | } |
| 1852 | return size; |
| 1853 | } |
| 1854 | |
| 1855 | |
| 1856 | bool SemiSpace::GrowTo(int new_capacity) { |
| 1857 | if (!is_committed()) { |
| 1858 | if (!Commit()) return false; |
| 1859 | } |
| 1860 | DCHECK((new_capacity & Page::kPageAlignmentMask) == 0); |
| 1861 | DCHECK(new_capacity <= maximum_total_capacity_); |
| 1862 | DCHECK(new_capacity > total_capacity_); |
| 1863 | int pages_before = total_capacity_ / Page::kPageSize; |
| 1864 | int pages_after = new_capacity / Page::kPageSize; |
| 1865 | |
| 1866 | size_t delta = new_capacity - total_capacity_; |
| 1867 | |
| 1868 | DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); |
| 1869 | if (!heap()->isolate()->memory_allocator()->CommitBlock( |
| 1870 | start_ + total_capacity_, delta, executable())) { |
| 1871 | return false; |
| 1872 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1873 | AccountCommitted(static_cast<intptr_t>(delta)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1874 | SetCapacity(new_capacity); |
| 1875 | NewSpacePage* last_page = anchor()->prev_page(); |
| 1876 | DCHECK(last_page != anchor()); |
| 1877 | for (int i = pages_before; i < pages_after; i++) { |
| 1878 | Address page_address = start_ + i * Page::kPageSize; |
| 1879 | NewSpacePage* new_page = |
| 1880 | NewSpacePage::Initialize(heap(), page_address, this); |
| 1881 | new_page->InsertAfter(last_page); |
| 1882 | Bitmap::Clear(new_page); |
| 1883 | // Duplicate the flags that was set on the old page. |
| 1884 | new_page->SetFlags(last_page->GetFlags(), |
| 1885 | NewSpacePage::kCopyOnFlipFlagsMask); |
| 1886 | last_page = new_page; |
| 1887 | } |
| 1888 | return true; |
| 1889 | } |
| 1890 | |
| 1891 | |
| 1892 | bool SemiSpace::ShrinkTo(int new_capacity) { |
| 1893 | DCHECK((new_capacity & Page::kPageAlignmentMask) == 0); |
| 1894 | DCHECK(new_capacity >= initial_total_capacity_); |
| 1895 | DCHECK(new_capacity < total_capacity_); |
| 1896 | if (is_committed()) { |
| 1897 | size_t delta = total_capacity_ - new_capacity; |
| 1898 | DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); |
| 1899 | |
| 1900 | MemoryAllocator* allocator = heap()->isolate()->memory_allocator(); |
| 1901 | if (!allocator->UncommitBlock(start_ + new_capacity, delta)) { |
| 1902 | return false; |
| 1903 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 1904 | AccountUncommitted(static_cast<intptr_t>(delta)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1905 | |
| 1906 | int pages_after = new_capacity / Page::kPageSize; |
| 1907 | NewSpacePage* new_last_page = |
| 1908 | NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize); |
| 1909 | new_last_page->set_next_page(anchor()); |
| 1910 | anchor()->set_prev_page(new_last_page); |
| 1911 | DCHECK((current_page_ >= first_page()) && (current_page_ <= new_last_page)); |
| 1912 | } |
| 1913 | |
| 1914 | SetCapacity(new_capacity); |
| 1915 | |
| 1916 | return true; |
| 1917 | } |
| 1918 | |
| 1919 | |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame] | 1920 | bool SemiSpace::SetTotalCapacity(int new_capacity) { |
| 1921 | CHECK(!is_committed()); |
| 1922 | if (new_capacity >= initial_total_capacity_ && |
| 1923 | new_capacity <= maximum_total_capacity_) { |
| 1924 | total_capacity_ = new_capacity; |
| 1925 | return true; |
| 1926 | } |
| 1927 | return false; |
| 1928 | } |
| 1929 | |
| 1930 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1931 | void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) { |
| 1932 | anchor_.set_owner(this); |
| 1933 | // Fixup back-pointers to anchor. Address of anchor changes |
| 1934 | // when we swap. |
| 1935 | anchor_.prev_page()->set_next_page(&anchor_); |
| 1936 | anchor_.next_page()->set_prev_page(&anchor_); |
| 1937 | |
| 1938 | bool becomes_to_space = (id_ == kFromSpace); |
| 1939 | id_ = becomes_to_space ? kToSpace : kFromSpace; |
| 1940 | NewSpacePage* page = anchor_.next_page(); |
| 1941 | while (page != &anchor_) { |
| 1942 | page->set_owner(this); |
| 1943 | page->SetFlags(flags, mask); |
| 1944 | if (becomes_to_space) { |
| 1945 | page->ClearFlag(MemoryChunk::IN_FROM_SPACE); |
| 1946 | page->SetFlag(MemoryChunk::IN_TO_SPACE); |
| 1947 | page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK); |
| 1948 | page->ResetLiveBytes(); |
| 1949 | } else { |
| 1950 | page->SetFlag(MemoryChunk::IN_FROM_SPACE); |
| 1951 | page->ClearFlag(MemoryChunk::IN_TO_SPACE); |
| 1952 | } |
| 1953 | DCHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)); |
| 1954 | DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) || |
| 1955 | page->IsFlagSet(MemoryChunk::IN_FROM_SPACE)); |
| 1956 | page = page->next_page(); |
| 1957 | } |
| 1958 | } |
| 1959 | |
| 1960 | |
| 1961 | void SemiSpace::Reset() { |
| 1962 | DCHECK(anchor_.next_page() != &anchor_); |
| 1963 | current_page_ = anchor_.next_page(); |
| 1964 | } |
| 1965 | |
| 1966 | |
| 1967 | void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) { |
| 1968 | // We won't be swapping semispaces without data in them. |
| 1969 | DCHECK(from->anchor_.next_page() != &from->anchor_); |
| 1970 | DCHECK(to->anchor_.next_page() != &to->anchor_); |
| 1971 | |
| 1972 | // Swap bits. |
| 1973 | SemiSpace tmp = *from; |
| 1974 | *from = *to; |
| 1975 | *to = tmp; |
| 1976 | |
| 1977 | // Fixup back-pointers to the page list anchor now that its address |
| 1978 | // has changed. |
| 1979 | // Swap to/from-space bits on pages. |
| 1980 | // Copy GC flags from old active space (from-space) to new (to-space). |
| 1981 | intptr_t flags = from->current_page()->GetFlags(); |
| 1982 | to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask); |
| 1983 | |
| 1984 | from->FlipPages(0, 0); |
| 1985 | } |
| 1986 | |
| 1987 | |
| 1988 | void SemiSpace::SetCapacity(int new_capacity) { |
| 1989 | total_capacity_ = new_capacity; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1990 | } |
| 1991 | |
| 1992 | |
| 1993 | void SemiSpace::set_age_mark(Address mark) { |
| 1994 | DCHECK(NewSpacePage::FromLimit(mark)->semi_space() == this); |
| 1995 | age_mark_ = mark; |
| 1996 | // Mark all pages up to the one containing mark. |
| 1997 | NewSpacePageIterator it(space_start(), mark); |
| 1998 | while (it.has_next()) { |
| 1999 | it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK); |
| 2000 | } |
| 2001 | } |
| 2002 | |
| 2003 | |
| 2004 | #ifdef DEBUG |
| 2005 | void SemiSpace::Print() {} |
| 2006 | #endif |
| 2007 | |
| 2008 | #ifdef VERIFY_HEAP |
| 2009 | void SemiSpace::Verify() { |
| 2010 | bool is_from_space = (id_ == kFromSpace); |
| 2011 | NewSpacePage* page = anchor_.next_page(); |
| 2012 | CHECK(anchor_.semi_space() == this); |
| 2013 | while (page != &anchor_) { |
| 2014 | CHECK(page->semi_space() == this); |
| 2015 | CHECK(page->InNewSpace()); |
| 2016 | CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE |
| 2017 | : MemoryChunk::IN_TO_SPACE)); |
| 2018 | CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE |
| 2019 | : MemoryChunk::IN_FROM_SPACE)); |
| 2020 | CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING)); |
| 2021 | if (!is_from_space) { |
| 2022 | // The pointers-from-here-are-interesting flag isn't updated dynamically |
| 2023 | // on from-space pages, so it might be out of sync with the marking state. |
| 2024 | if (page->heap()->incremental_marking()->IsMarking()) { |
| 2025 | CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING)); |
| 2026 | } else { |
| 2027 | CHECK( |
| 2028 | !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING)); |
| 2029 | } |
| 2030 | // TODO(gc): Check that the live_bytes_count_ field matches the |
| 2031 | // black marking on the page (if we make it match in new-space). |
| 2032 | } |
| 2033 | CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)); |
| 2034 | CHECK(page->prev_page()->next_page() == page); |
| 2035 | page = page->next_page(); |
| 2036 | } |
| 2037 | } |
| 2038 | #endif |
| 2039 | |
| 2040 | #ifdef DEBUG |
| 2041 | void SemiSpace::AssertValidRange(Address start, Address end) { |
| 2042 | // Addresses belong to same semi-space |
| 2043 | NewSpacePage* page = NewSpacePage::FromLimit(start); |
| 2044 | NewSpacePage* end_page = NewSpacePage::FromLimit(end); |
| 2045 | SemiSpace* space = page->semi_space(); |
| 2046 | CHECK_EQ(space, end_page->semi_space()); |
| 2047 | // Start address is before end address, either on same page, |
| 2048 | // or end address is on a later page in the linked list of |
| 2049 | // semi-space pages. |
| 2050 | if (page == end_page) { |
| 2051 | CHECK(start <= end); |
| 2052 | } else { |
| 2053 | while (page != end_page) { |
| 2054 | page = page->next_page(); |
| 2055 | CHECK_NE(page, space->anchor()); |
| 2056 | } |
| 2057 | } |
| 2058 | } |
| 2059 | #endif |
| 2060 | |
| 2061 | |
| 2062 | // ----------------------------------------------------------------------------- |
| 2063 | // SemiSpaceIterator implementation. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2064 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2065 | SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2066 | Initialize(space->bottom(), space->top()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2067 | } |
| 2068 | |
| 2069 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2070 | void SemiSpaceIterator::Initialize(Address start, Address end) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2071 | SemiSpace::AssertValidRange(start, end); |
| 2072 | current_ = start; |
| 2073 | limit_ = end; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2074 | } |
| 2075 | |
| 2076 | |
| 2077 | #ifdef DEBUG |
| 2078 | // heap_histograms is shared, always clear it before using it. |
| 2079 | static void ClearHistograms(Isolate* isolate) { |
| 2080 | // We reset the name each time, though it hasn't changed. |
| 2081 | #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name); |
| 2082 | INSTANCE_TYPE_LIST(DEF_TYPE_NAME) |
| 2083 | #undef DEF_TYPE_NAME |
| 2084 | |
| 2085 | #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear(); |
| 2086 | INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM) |
| 2087 | #undef CLEAR_HISTOGRAM |
| 2088 | |
| 2089 | isolate->js_spill_information()->Clear(); |
| 2090 | } |
| 2091 | |
| 2092 | |
| 2093 | static void ClearCodeKindStatistics(int* code_kind_statistics) { |
| 2094 | for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { |
| 2095 | code_kind_statistics[i] = 0; |
| 2096 | } |
| 2097 | } |
| 2098 | |
| 2099 | |
| 2100 | static void ReportCodeKindStatistics(int* code_kind_statistics) { |
| 2101 | PrintF("\n Code kind histograms: \n"); |
| 2102 | for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { |
| 2103 | if (code_kind_statistics[i] > 0) { |
| 2104 | PrintF(" %-20s: %10d bytes\n", |
| 2105 | Code::Kind2String(static_cast<Code::Kind>(i)), |
| 2106 | code_kind_statistics[i]); |
| 2107 | } |
| 2108 | } |
| 2109 | PrintF("\n"); |
| 2110 | } |
| 2111 | |
| 2112 | |
| 2113 | static int CollectHistogramInfo(HeapObject* obj) { |
| 2114 | Isolate* isolate = obj->GetIsolate(); |
| 2115 | InstanceType type = obj->map()->instance_type(); |
| 2116 | DCHECK(0 <= type && type <= LAST_TYPE); |
| 2117 | DCHECK(isolate->heap_histograms()[type].name() != NULL); |
| 2118 | isolate->heap_histograms()[type].increment_number(1); |
| 2119 | isolate->heap_histograms()[type].increment_bytes(obj->Size()); |
| 2120 | |
| 2121 | if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) { |
| 2122 | JSObject::cast(obj) |
| 2123 | ->IncrementSpillStatistics(isolate->js_spill_information()); |
| 2124 | } |
| 2125 | |
| 2126 | return obj->Size(); |
| 2127 | } |
| 2128 | |
| 2129 | |
| 2130 | static void ReportHistogram(Isolate* isolate, bool print_spill) { |
| 2131 | PrintF("\n Object Histogram:\n"); |
| 2132 | for (int i = 0; i <= LAST_TYPE; i++) { |
| 2133 | if (isolate->heap_histograms()[i].number() > 0) { |
| 2134 | PrintF(" %-34s%10d (%10d bytes)\n", |
| 2135 | isolate->heap_histograms()[i].name(), |
| 2136 | isolate->heap_histograms()[i].number(), |
| 2137 | isolate->heap_histograms()[i].bytes()); |
| 2138 | } |
| 2139 | } |
| 2140 | PrintF("\n"); |
| 2141 | |
| 2142 | // Summarize string types. |
| 2143 | int string_number = 0; |
| 2144 | int string_bytes = 0; |
| 2145 | #define INCREMENT(type, size, name, camel_name) \ |
| 2146 | string_number += isolate->heap_histograms()[type].number(); \ |
| 2147 | string_bytes += isolate->heap_histograms()[type].bytes(); |
| 2148 | STRING_TYPE_LIST(INCREMENT) |
| 2149 | #undef INCREMENT |
| 2150 | if (string_number > 0) { |
| 2151 | PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number, |
| 2152 | string_bytes); |
| 2153 | } |
| 2154 | |
| 2155 | if (FLAG_collect_heap_spill_statistics && print_spill) { |
| 2156 | isolate->js_spill_information()->Print(); |
| 2157 | } |
| 2158 | } |
| 2159 | #endif // DEBUG |
| 2160 | |
| 2161 | |
| 2162 | // Support for statistics gathering for --heap-stats and --log-gc. |
| 2163 | void NewSpace::ClearHistograms() { |
| 2164 | for (int i = 0; i <= LAST_TYPE; i++) { |
| 2165 | allocated_histogram_[i].clear(); |
| 2166 | promoted_histogram_[i].clear(); |
| 2167 | } |
| 2168 | } |
| 2169 | |
| 2170 | |
| 2171 | // Because the copying collector does not touch garbage objects, we iterate |
| 2172 | // the new space before a collection to get a histogram of allocated objects. |
| 2173 | // This only happens when --log-gc flag is set. |
| 2174 | void NewSpace::CollectStatistics() { |
| 2175 | ClearHistograms(); |
| 2176 | SemiSpaceIterator it(this); |
| 2177 | for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) |
| 2178 | RecordAllocation(obj); |
| 2179 | } |
| 2180 | |
| 2181 | |
| 2182 | static void DoReportStatistics(Isolate* isolate, HistogramInfo* info, |
| 2183 | const char* description) { |
| 2184 | LOG(isolate, HeapSampleBeginEvent("NewSpace", description)); |
| 2185 | // Lump all the string types together. |
| 2186 | int string_number = 0; |
| 2187 | int string_bytes = 0; |
| 2188 | #define INCREMENT(type, size, name, camel_name) \ |
| 2189 | string_number += info[type].number(); \ |
| 2190 | string_bytes += info[type].bytes(); |
| 2191 | STRING_TYPE_LIST(INCREMENT) |
| 2192 | #undef INCREMENT |
| 2193 | if (string_number > 0) { |
| 2194 | LOG(isolate, |
| 2195 | HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes)); |
| 2196 | } |
| 2197 | |
| 2198 | // Then do the other types. |
| 2199 | for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) { |
| 2200 | if (info[i].number() > 0) { |
| 2201 | LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(), |
| 2202 | info[i].bytes())); |
| 2203 | } |
| 2204 | } |
| 2205 | LOG(isolate, HeapSampleEndEvent("NewSpace", description)); |
| 2206 | } |
| 2207 | |
| 2208 | |
| 2209 | void NewSpace::ReportStatistics() { |
| 2210 | #ifdef DEBUG |
| 2211 | if (FLAG_heap_stats) { |
| 2212 | float pct = static_cast<float>(Available()) / TotalCapacity(); |
| 2213 | PrintF(" capacity: %" V8_PTR_PREFIX |
| 2214 | "d" |
| 2215 | ", available: %" V8_PTR_PREFIX "d, %%%d\n", |
| 2216 | TotalCapacity(), Available(), static_cast<int>(pct * 100)); |
| 2217 | PrintF("\n Object Histogram:\n"); |
| 2218 | for (int i = 0; i <= LAST_TYPE; i++) { |
| 2219 | if (allocated_histogram_[i].number() > 0) { |
| 2220 | PrintF(" %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(), |
| 2221 | allocated_histogram_[i].number(), |
| 2222 | allocated_histogram_[i].bytes()); |
| 2223 | } |
| 2224 | } |
| 2225 | PrintF("\n"); |
| 2226 | } |
| 2227 | #endif // DEBUG |
| 2228 | |
| 2229 | if (FLAG_log_gc) { |
| 2230 | Isolate* isolate = heap()->isolate(); |
| 2231 | DoReportStatistics(isolate, allocated_histogram_, "allocated"); |
| 2232 | DoReportStatistics(isolate, promoted_histogram_, "promoted"); |
| 2233 | } |
| 2234 | } |
| 2235 | |
| 2236 | |
| 2237 | void NewSpace::RecordAllocation(HeapObject* obj) { |
| 2238 | InstanceType type = obj->map()->instance_type(); |
| 2239 | DCHECK(0 <= type && type <= LAST_TYPE); |
| 2240 | allocated_histogram_[type].increment_number(1); |
| 2241 | allocated_histogram_[type].increment_bytes(obj->Size()); |
| 2242 | } |
| 2243 | |
| 2244 | |
| 2245 | void NewSpace::RecordPromotion(HeapObject* obj) { |
| 2246 | InstanceType type = obj->map()->instance_type(); |
| 2247 | DCHECK(0 <= type && type <= LAST_TYPE); |
| 2248 | promoted_histogram_[type].increment_number(1); |
| 2249 | promoted_histogram_[type].increment_bytes(obj->Size()); |
| 2250 | } |
| 2251 | |
| 2252 | |
| 2253 | size_t NewSpace::CommittedPhysicalMemory() { |
| 2254 | if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory(); |
| 2255 | MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
| 2256 | size_t size = to_space_.CommittedPhysicalMemory(); |
| 2257 | if (from_space_.is_committed()) { |
| 2258 | size += from_space_.CommittedPhysicalMemory(); |
| 2259 | } |
| 2260 | return size; |
| 2261 | } |
| 2262 | |
| 2263 | |
| 2264 | // ----------------------------------------------------------------------------- |
| 2265 | // Free lists for old object spaces implementation |
| 2266 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2267 | intptr_t FreeListCategory::Concatenate(FreeListCategory* category) { |
| 2268 | intptr_t free_bytes = 0; |
| 2269 | if (category->top() != NULL) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2270 | DCHECK(category->end_ != NULL); |
| 2271 | free_bytes = category->available(); |
| 2272 | if (end_ == NULL) { |
| 2273 | end_ = category->end(); |
| 2274 | } else { |
| 2275 | category->end()->set_next(top()); |
| 2276 | } |
| 2277 | set_top(category->top()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2278 | available_ += category->available(); |
| 2279 | category->Reset(); |
| 2280 | } |
| 2281 | return free_bytes; |
| 2282 | } |
| 2283 | |
| 2284 | |
| 2285 | void FreeListCategory::Reset() { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2286 | set_top(nullptr); |
| 2287 | set_end(nullptr); |
| 2288 | available_ = 0; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2289 | } |
| 2290 | |
| 2291 | |
| 2292 | intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2293 | intptr_t sum = 0; |
| 2294 | FreeSpace* prev_node = nullptr; |
| 2295 | for (FreeSpace* cur_node = top(); cur_node != nullptr; |
| 2296 | cur_node = cur_node->next()) { |
| 2297 | Page* page_for_node = Page::FromAddress(cur_node->address()); |
| 2298 | if (page_for_node == p) { |
| 2299 | // FreeSpace node on eviction page found, unlink it. |
| 2300 | int size = cur_node->size(); |
| 2301 | sum += size; |
| 2302 | DCHECK((prev_node != nullptr) || (top() == cur_node)); |
| 2303 | if (cur_node == top()) { |
| 2304 | set_top(cur_node->next()); |
| 2305 | } |
| 2306 | if (cur_node == end()) { |
| 2307 | set_end(prev_node); |
| 2308 | } |
| 2309 | if (prev_node != nullptr) { |
| 2310 | prev_node->set_next(cur_node->next()); |
| 2311 | } |
| 2312 | continue; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2313 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2314 | prev_node = cur_node; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2315 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2316 | DCHECK_EQ(p->available_in_free_list(type_), sum); |
| 2317 | p->add_available_in_free_list(type_, -sum); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2318 | available_ -= sum; |
| 2319 | return sum; |
| 2320 | } |
| 2321 | |
| 2322 | |
| 2323 | bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2324 | FreeSpace* node = top(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2325 | while (node != NULL) { |
| 2326 | if (Page::FromAddress(node->address()) == p) return true; |
| 2327 | node = node->next(); |
| 2328 | } |
| 2329 | return false; |
| 2330 | } |
| 2331 | |
| 2332 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2333 | FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) { |
| 2334 | FreeSpace* node = top(); |
| 2335 | if (node == nullptr) return nullptr; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2336 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2337 | Page* page = Page::FromAddress(node->address()); |
| 2338 | while ((node != nullptr) && !page->CanAllocate()) { |
| 2339 | available_ -= node->size(); |
| 2340 | page->add_available_in_free_list(type_, -(node->Size())); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2341 | node = node->next(); |
| 2342 | } |
| 2343 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2344 | if (node != nullptr) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2345 | set_top(node->next()); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2346 | *node_size = node->Size(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2347 | available_ -= *node_size; |
| 2348 | } else { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2349 | set_top(nullptr); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2350 | } |
| 2351 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2352 | if (top() == nullptr) { |
| 2353 | set_end(nullptr); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2354 | } |
| 2355 | |
| 2356 | return node; |
| 2357 | } |
| 2358 | |
| 2359 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2360 | FreeSpace* FreeListCategory::PickNodeFromList(int size_in_bytes, |
| 2361 | int* node_size) { |
| 2362 | FreeSpace* node = PickNodeFromList(node_size); |
| 2363 | if ((node != nullptr) && (*node_size < size_in_bytes)) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2364 | Free(node, *node_size); |
| 2365 | *node_size = 0; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2366 | return nullptr; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2367 | } |
| 2368 | return node; |
| 2369 | } |
| 2370 | |
| 2371 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2372 | FreeSpace* FreeListCategory::SearchForNodeInList(int size_in_bytes, |
| 2373 | int* node_size) { |
| 2374 | FreeSpace* prev_non_evac_node = nullptr; |
| 2375 | for (FreeSpace* cur_node = top(); cur_node != nullptr; |
| 2376 | cur_node = cur_node->next()) { |
| 2377 | int size = cur_node->size(); |
| 2378 | Page* page_for_node = Page::FromAddress(cur_node->address()); |
| 2379 | |
| 2380 | if ((size >= size_in_bytes) || !page_for_node->CanAllocate()) { |
| 2381 | // The node is either large enough or contained in an evacuation |
| 2382 | // candidate. In both cases we need to unlink it from the list. |
| 2383 | available_ -= size; |
| 2384 | if (cur_node == top()) { |
| 2385 | set_top(cur_node->next()); |
| 2386 | } |
| 2387 | if (cur_node == end()) { |
| 2388 | set_end(prev_non_evac_node); |
| 2389 | } |
| 2390 | if (prev_non_evac_node != nullptr) { |
| 2391 | prev_non_evac_node->set_next(cur_node->next()); |
| 2392 | } |
| 2393 | // For evacuation candidates we continue. |
| 2394 | if (!page_for_node->CanAllocate()) { |
| 2395 | page_for_node->add_available_in_free_list(type_, -size); |
| 2396 | continue; |
| 2397 | } |
| 2398 | // Otherwise we have a large enough node and can return. |
| 2399 | *node_size = size; |
| 2400 | return cur_node; |
| 2401 | } |
| 2402 | |
| 2403 | prev_non_evac_node = cur_node; |
| 2404 | } |
| 2405 | return nullptr; |
| 2406 | } |
| 2407 | |
| 2408 | |
| 2409 | void FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes) { |
| 2410 | free_space->set_next(top()); |
| 2411 | set_top(free_space); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2412 | if (end_ == NULL) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2413 | end_ = free_space; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2414 | } |
| 2415 | available_ += size_in_bytes; |
| 2416 | } |
| 2417 | |
| 2418 | |
| 2419 | void FreeListCategory::RepairFreeList(Heap* heap) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2420 | FreeSpace* n = top(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2421 | while (n != NULL) { |
| 2422 | Map** map_location = reinterpret_cast<Map**>(n->address()); |
| 2423 | if (*map_location == NULL) { |
| 2424 | *map_location = heap->free_space_map(); |
| 2425 | } else { |
| 2426 | DCHECK(*map_location == heap->free_space_map()); |
| 2427 | } |
| 2428 | n = n->next(); |
| 2429 | } |
| 2430 | } |
| 2431 | |
| 2432 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2433 | FreeList::FreeList(PagedSpace* owner) |
| 2434 | : owner_(owner), |
| 2435 | wasted_bytes_(0), |
| 2436 | small_list_(this, kSmall), |
| 2437 | medium_list_(this, kMedium), |
| 2438 | large_list_(this, kLarge), |
| 2439 | huge_list_(this, kHuge) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2440 | Reset(); |
| 2441 | } |
| 2442 | |
| 2443 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2444 | intptr_t FreeList::Concatenate(FreeList* other) { |
| 2445 | intptr_t usable_bytes = 0; |
| 2446 | intptr_t wasted_bytes = 0; |
| 2447 | |
| 2448 | // This is safe (not going to deadlock) since Concatenate operations |
| 2449 | // are never performed on the same free lists at the same time in |
| 2450 | // reverse order. Furthermore, we only lock if the PagedSpace containing |
| 2451 | // the free list is know to be globally available, i.e., not local. |
| 2452 | if (!owner()->is_local()) mutex_.Lock(); |
| 2453 | if (!other->owner()->is_local()) other->mutex()->Lock(); |
| 2454 | |
| 2455 | wasted_bytes = other->wasted_bytes_; |
| 2456 | wasted_bytes_ += wasted_bytes; |
| 2457 | other->wasted_bytes_ = 0; |
| 2458 | |
| 2459 | usable_bytes += small_list_.Concatenate(other->GetFreeListCategory(kSmall)); |
| 2460 | usable_bytes += medium_list_.Concatenate(other->GetFreeListCategory(kMedium)); |
| 2461 | usable_bytes += large_list_.Concatenate(other->GetFreeListCategory(kLarge)); |
| 2462 | usable_bytes += huge_list_.Concatenate(other->GetFreeListCategory(kHuge)); |
| 2463 | |
| 2464 | if (!other->owner()->is_local()) other->mutex()->Unlock(); |
| 2465 | if (!owner()->is_local()) mutex_.Unlock(); |
| 2466 | return usable_bytes + wasted_bytes; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2467 | } |
| 2468 | |
| 2469 | |
| 2470 | void FreeList::Reset() { |
| 2471 | small_list_.Reset(); |
| 2472 | medium_list_.Reset(); |
| 2473 | large_list_.Reset(); |
| 2474 | huge_list_.Reset(); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2475 | ResetStats(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2476 | } |
| 2477 | |
| 2478 | |
| 2479 | int FreeList::Free(Address start, int size_in_bytes) { |
| 2480 | if (size_in_bytes == 0) return 0; |
| 2481 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2482 | owner()->heap()->CreateFillerObjectAt(start, size_in_bytes); |
| 2483 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2484 | Page* page = Page::FromAddress(start); |
| 2485 | |
| 2486 | // Early return to drop too-small blocks on the floor. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2487 | if (size_in_bytes <= kSmallListMin) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2488 | page->add_non_available_small_blocks(size_in_bytes); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2489 | wasted_bytes_ += size_in_bytes; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2490 | return size_in_bytes; |
| 2491 | } |
| 2492 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2493 | FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2494 | // Insert other blocks at the head of a free list of the appropriate |
| 2495 | // magnitude. |
| 2496 | if (size_in_bytes <= kSmallListMax) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2497 | small_list_.Free(free_space, size_in_bytes); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2498 | page->add_available_in_small_free_list(size_in_bytes); |
| 2499 | } else if (size_in_bytes <= kMediumListMax) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2500 | medium_list_.Free(free_space, size_in_bytes); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2501 | page->add_available_in_medium_free_list(size_in_bytes); |
| 2502 | } else if (size_in_bytes <= kLargeListMax) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2503 | large_list_.Free(free_space, size_in_bytes); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2504 | page->add_available_in_large_free_list(size_in_bytes); |
| 2505 | } else { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2506 | huge_list_.Free(free_space, size_in_bytes); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2507 | page->add_available_in_huge_free_list(size_in_bytes); |
| 2508 | } |
| 2509 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2510 | DCHECK(IsVeryLong() || Available() == SumFreeLists()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2511 | return 0; |
| 2512 | } |
| 2513 | |
| 2514 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2515 | FreeSpace* FreeList::FindNodeIn(FreeListCategoryType category, int* node_size) { |
| 2516 | FreeSpace* node = GetFreeListCategory(category)->PickNodeFromList(node_size); |
| 2517 | if (node != nullptr) { |
| 2518 | Page::FromAddress(node->address()) |
| 2519 | ->add_available_in_free_list(category, -(*node_size)); |
| 2520 | DCHECK(IsVeryLong() || Available() == SumFreeLists()); |
| 2521 | } |
| 2522 | return node; |
| 2523 | } |
| 2524 | |
| 2525 | |
| 2526 | FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) { |
| 2527 | FreeSpace* node = nullptr; |
| 2528 | Page* page = nullptr; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2529 | |
| 2530 | if (size_in_bytes <= kSmallAllocationMax) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2531 | node = FindNodeIn(kSmall, node_size); |
| 2532 | if (node != nullptr) return node; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2533 | } |
| 2534 | |
| 2535 | if (size_in_bytes <= kMediumAllocationMax) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2536 | node = FindNodeIn(kMedium, node_size); |
| 2537 | if (node != nullptr) return node; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2538 | } |
| 2539 | |
| 2540 | if (size_in_bytes <= kLargeAllocationMax) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2541 | node = FindNodeIn(kLarge, node_size); |
| 2542 | if (node != nullptr) return node; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2543 | } |
| 2544 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2545 | node = huge_list_.SearchForNodeInList(size_in_bytes, node_size); |
| 2546 | if (node != nullptr) { |
| 2547 | page = Page::FromAddress(node->address()); |
| 2548 | page->add_available_in_large_free_list(-(*node_size)); |
| 2549 | DCHECK(IsVeryLong() || Available() == SumFreeLists()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2550 | return node; |
| 2551 | } |
| 2552 | |
| 2553 | if (size_in_bytes <= kSmallListMax) { |
| 2554 | node = small_list_.PickNodeFromList(size_in_bytes, node_size); |
| 2555 | if (node != NULL) { |
| 2556 | DCHECK(size_in_bytes <= *node_size); |
| 2557 | page = Page::FromAddress(node->address()); |
| 2558 | page->add_available_in_small_free_list(-(*node_size)); |
| 2559 | } |
| 2560 | } else if (size_in_bytes <= kMediumListMax) { |
| 2561 | node = medium_list_.PickNodeFromList(size_in_bytes, node_size); |
| 2562 | if (node != NULL) { |
| 2563 | DCHECK(size_in_bytes <= *node_size); |
| 2564 | page = Page::FromAddress(node->address()); |
| 2565 | page->add_available_in_medium_free_list(-(*node_size)); |
| 2566 | } |
| 2567 | } else if (size_in_bytes <= kLargeListMax) { |
| 2568 | node = large_list_.PickNodeFromList(size_in_bytes, node_size); |
| 2569 | if (node != NULL) { |
| 2570 | DCHECK(size_in_bytes <= *node_size); |
| 2571 | page = Page::FromAddress(node->address()); |
| 2572 | page->add_available_in_large_free_list(-(*node_size)); |
| 2573 | } |
| 2574 | } |
| 2575 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2576 | DCHECK(IsVeryLong() || Available() == SumFreeLists()); |
| 2577 | return node; |
| 2578 | } |
| 2579 | |
| 2580 | |
| 2581 | FreeSpace* FreeList::TryRemoveMemory(intptr_t hint_size_in_bytes) { |
| 2582 | hint_size_in_bytes = RoundDown(hint_size_in_bytes, kPointerSize); |
| 2583 | base::LockGuard<base::Mutex> guard(&mutex_); |
| 2584 | FreeSpace* node = nullptr; |
| 2585 | int node_size = 0; |
| 2586 | // Try to find a node that fits exactly. |
| 2587 | node = FindNodeFor(static_cast<int>(hint_size_in_bytes), &node_size); |
| 2588 | // If no node could be found get as much memory as possible. |
| 2589 | if (node == nullptr) node = FindNodeIn(kHuge, &node_size); |
| 2590 | if (node == nullptr) node = FindNodeIn(kLarge, &node_size); |
| 2591 | if (node != nullptr) { |
| 2592 | // We round up the size to (kSmallListMin + kPointerSize) to (a) have a |
| 2593 | // size larger then the minimum size required for FreeSpace, and (b) to get |
| 2594 | // a block that can actually be freed into some FreeList later on. |
| 2595 | if (hint_size_in_bytes <= kSmallListMin) { |
| 2596 | hint_size_in_bytes = kSmallListMin + kPointerSize; |
| 2597 | } |
| 2598 | // Give back left overs that were not required by {size_in_bytes}. |
| 2599 | intptr_t left_over = node_size - hint_size_in_bytes; |
| 2600 | |
| 2601 | // Do not bother to return anything below {kSmallListMin} as it would be |
| 2602 | // immediately discarded anyways. |
| 2603 | if (left_over > kSmallListMin) { |
| 2604 | Free(node->address() + hint_size_in_bytes, static_cast<int>(left_over)); |
| 2605 | node->set_size(static_cast<int>(hint_size_in_bytes)); |
| 2606 | } |
| 2607 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2608 | return node; |
| 2609 | } |
| 2610 | |
| 2611 | |
| 2612 | // Allocation on the old space free list. If it succeeds then a new linear |
| 2613 | // allocation space has been set up with the top and limit of the space. If |
| 2614 | // the allocation fails then NULL is returned, and the caller can perform a GC |
| 2615 | // or allocate a new page before retrying. |
| 2616 | HeapObject* FreeList::Allocate(int size_in_bytes) { |
| 2617 | DCHECK(0 < size_in_bytes); |
| 2618 | DCHECK(size_in_bytes <= kMaxBlockSize); |
| 2619 | DCHECK(IsAligned(size_in_bytes, kPointerSize)); |
| 2620 | // Don't free list allocate if there is linear space available. |
| 2621 | DCHECK(owner_->limit() - owner_->top() < size_in_bytes); |
| 2622 | |
| 2623 | int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); |
| 2624 | // Mark the old linear allocation area with a free space map so it can be |
| 2625 | // skipped when scanning the heap. This also puts it back in the free list |
| 2626 | // if it is big enough. |
| 2627 | owner_->Free(owner_->top(), old_linear_size); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2628 | owner_->SetTopAndLimit(nullptr, nullptr); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2629 | |
| 2630 | owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes - |
| 2631 | old_linear_size); |
| 2632 | |
| 2633 | int new_node_size = 0; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2634 | FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size); |
| 2635 | if (new_node == nullptr) return nullptr; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2636 | |
| 2637 | int bytes_left = new_node_size - size_in_bytes; |
| 2638 | DCHECK(bytes_left >= 0); |
| 2639 | |
| 2640 | #ifdef DEBUG |
| 2641 | for (int i = 0; i < size_in_bytes / kPointerSize; i++) { |
| 2642 | reinterpret_cast<Object**>(new_node->address())[i] = |
| 2643 | Smi::FromInt(kCodeZapValue); |
| 2644 | } |
| 2645 | #endif |
| 2646 | |
| 2647 | // The old-space-step might have finished sweeping and restarted marking. |
| 2648 | // Verify that it did not turn the page of the new node into an evacuation |
| 2649 | // candidate. |
| 2650 | DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node)); |
| 2651 | |
| 2652 | const int kThreshold = IncrementalMarking::kAllocatedThreshold; |
| 2653 | |
| 2654 | // Memory in the linear allocation area is counted as allocated. We may free |
| 2655 | // a little of this again immediately - see below. |
| 2656 | owner_->Allocate(new_node_size); |
| 2657 | |
| 2658 | if (owner_->heap()->inline_allocation_disabled()) { |
| 2659 | // Keep the linear allocation area empty if requested to do so, just |
| 2660 | // return area back to the free list instead. |
| 2661 | owner_->Free(new_node->address() + size_in_bytes, bytes_left); |
| 2662 | DCHECK(owner_->top() == NULL && owner_->limit() == NULL); |
| 2663 | } else if (bytes_left > kThreshold && |
| 2664 | owner_->heap()->incremental_marking()->IsMarkingIncomplete() && |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2665 | FLAG_incremental_marking) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2666 | int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); |
| 2667 | // We don't want to give too large linear areas to the allocator while |
| 2668 | // incremental marking is going on, because we won't check again whether |
| 2669 | // we want to do another increment until the linear area is used up. |
| 2670 | owner_->Free(new_node->address() + size_in_bytes + linear_size, |
| 2671 | new_node_size - size_in_bytes - linear_size); |
| 2672 | owner_->SetTopAndLimit(new_node->address() + size_in_bytes, |
| 2673 | new_node->address() + size_in_bytes + linear_size); |
| 2674 | } else if (bytes_left > 0) { |
| 2675 | // Normally we give the rest of the node to the allocator as its new |
| 2676 | // linear allocation area. |
| 2677 | owner_->SetTopAndLimit(new_node->address() + size_in_bytes, |
| 2678 | new_node->address() + new_node_size); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2679 | } |
| 2680 | |
| 2681 | return new_node; |
| 2682 | } |
| 2683 | |
| 2684 | |
| 2685 | intptr_t FreeList::EvictFreeListItems(Page* p) { |
| 2686 | intptr_t sum = huge_list_.EvictFreeListItemsInList(p); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2687 | if (sum < p->area_size()) { |
| 2688 | sum += small_list_.EvictFreeListItemsInList(p) + |
| 2689 | medium_list_.EvictFreeListItemsInList(p) + |
| 2690 | large_list_.EvictFreeListItemsInList(p); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2691 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2692 | return sum; |
| 2693 | } |
| 2694 | |
| 2695 | |
| 2696 | bool FreeList::ContainsPageFreeListItems(Page* p) { |
| 2697 | return huge_list_.EvictFreeListItemsInList(p) || |
| 2698 | small_list_.EvictFreeListItemsInList(p) || |
| 2699 | medium_list_.EvictFreeListItemsInList(p) || |
| 2700 | large_list_.EvictFreeListItemsInList(p); |
| 2701 | } |
| 2702 | |
| 2703 | |
| 2704 | void FreeList::RepairLists(Heap* heap) { |
| 2705 | small_list_.RepairFreeList(heap); |
| 2706 | medium_list_.RepairFreeList(heap); |
| 2707 | large_list_.RepairFreeList(heap); |
| 2708 | huge_list_.RepairFreeList(heap); |
| 2709 | } |
| 2710 | |
| 2711 | |
| 2712 | #ifdef DEBUG |
| 2713 | intptr_t FreeListCategory::SumFreeList() { |
| 2714 | intptr_t sum = 0; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2715 | FreeSpace* cur = top(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2716 | while (cur != NULL) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2717 | DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex)); |
| 2718 | sum += cur->nobarrier_size(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2719 | cur = cur->next(); |
| 2720 | } |
| 2721 | return sum; |
| 2722 | } |
| 2723 | |
| 2724 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2725 | int FreeListCategory::FreeListLength() { |
| 2726 | int length = 0; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2727 | FreeSpace* cur = top(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2728 | while (cur != NULL) { |
| 2729 | length++; |
| 2730 | cur = cur->next(); |
| 2731 | if (length == kVeryLongFreeList) return length; |
| 2732 | } |
| 2733 | return length; |
| 2734 | } |
| 2735 | |
| 2736 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2737 | bool FreeListCategory::IsVeryLong() { |
| 2738 | return FreeListLength() == kVeryLongFreeList; |
| 2739 | } |
| 2740 | |
| 2741 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2742 | bool FreeList::IsVeryLong() { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2743 | return small_list_.IsVeryLong() || medium_list_.IsVeryLong() || |
| 2744 | large_list_.IsVeryLong() || huge_list_.IsVeryLong(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2745 | } |
| 2746 | |
| 2747 | |
| 2748 | // This can take a very long time because it is linear in the number of entries |
| 2749 | // on the free list, so it should not be called if FreeListLength returns |
| 2750 | // kVeryLongFreeList. |
| 2751 | intptr_t FreeList::SumFreeLists() { |
| 2752 | intptr_t sum = small_list_.SumFreeList(); |
| 2753 | sum += medium_list_.SumFreeList(); |
| 2754 | sum += large_list_.SumFreeList(); |
| 2755 | sum += huge_list_.SumFreeList(); |
| 2756 | return sum; |
| 2757 | } |
| 2758 | #endif |
| 2759 | |
| 2760 | |
| 2761 | // ----------------------------------------------------------------------------- |
| 2762 | // OldSpace implementation |
| 2763 | |
| 2764 | void PagedSpace::PrepareForMarkCompact() { |
| 2765 | // We don't have a linear allocation area while sweeping. It will be restored |
| 2766 | // on the first allocation after the sweep. |
| 2767 | EmptyAllocationInfo(); |
| 2768 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2769 | // Clear the free list before a full GC---it will be rebuilt afterward. |
| 2770 | free_list_.Reset(); |
| 2771 | } |
| 2772 | |
| 2773 | |
| 2774 | intptr_t PagedSpace::SizeOfObjects() { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2775 | const intptr_t size = Size() - (limit() - top()); |
| 2776 | CHECK_GE(limit(), top()); |
| 2777 | CHECK_GE(size, 0); |
| 2778 | USE(size); |
| 2779 | return size; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2780 | } |
| 2781 | |
| 2782 | |
| 2783 | // After we have booted, we have created a map which represents free space |
| 2784 | // on the heap. If there was already a free list then the elements on it |
| 2785 | // were created with the wrong FreeSpaceMap (normally NULL), so we need to |
| 2786 | // fix them. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2787 | void PagedSpace::RepairFreeListsAfterDeserialization() { |
| 2788 | free_list_.RepairLists(heap()); |
| 2789 | // Each page may have a small free space that is not tracked by a free list. |
| 2790 | // Update the maps for those free space objects. |
| 2791 | PageIterator iterator(this); |
| 2792 | while (iterator.has_next()) { |
| 2793 | Page* page = iterator.next(); |
| 2794 | int size = static_cast<int>(page->non_available_small_blocks()); |
| 2795 | if (size == 0) continue; |
| 2796 | Address address = page->OffsetToAddress(Page::kPageSize - size); |
| 2797 | heap()->CreateFillerObjectAt(address, size); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2798 | } |
| 2799 | } |
| 2800 | |
| 2801 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2802 | void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() { |
| 2803 | if (allocation_info_.top() >= allocation_info_.limit()) return; |
| 2804 | |
| 2805 | if (!Page::FromAllocationTop(allocation_info_.top())->CanAllocate()) { |
| 2806 | // Create filler object to keep page iterable if it was iterable. |
| 2807 | int remaining = |
| 2808 | static_cast<int>(allocation_info_.limit() - allocation_info_.top()); |
| 2809 | heap()->CreateFillerObjectAt(allocation_info_.top(), remaining); |
| 2810 | allocation_info_.Reset(nullptr, nullptr); |
| 2811 | } |
| 2812 | } |
| 2813 | |
| 2814 | |
| 2815 | HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2816 | MarkCompactCollector* collector = heap()->mark_compact_collector(); |
| 2817 | if (collector->sweeping_in_progress()) { |
| 2818 | // Wait for the sweeper threads here and complete the sweeping phase. |
| 2819 | collector->EnsureSweepingCompleted(); |
| 2820 | |
| 2821 | // After waiting for the sweeper threads, there may be new free-list |
| 2822 | // entries. |
| 2823 | return free_list_.Allocate(size_in_bytes); |
| 2824 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2825 | return nullptr; |
| 2826 | } |
| 2827 | |
| 2828 | |
| 2829 | HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) { |
| 2830 | MarkCompactCollector* collector = heap()->mark_compact_collector(); |
| 2831 | if (collector->sweeping_in_progress()) { |
| 2832 | collector->SweepAndRefill(this); |
| 2833 | return free_list_.Allocate(size_in_bytes); |
| 2834 | } |
| 2835 | return nullptr; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2836 | } |
| 2837 | |
| 2838 | |
| 2839 | HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { |
| 2840 | // Allocation in this space has failed. |
| 2841 | |
| 2842 | MarkCompactCollector* collector = heap()->mark_compact_collector(); |
| 2843 | // Sweeping is still in progress. |
| 2844 | if (collector->sweeping_in_progress()) { |
| 2845 | // First try to refill the free-list, concurrent sweeper threads |
| 2846 | // may have freed some objects in the meantime. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2847 | RefillFreeList(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2848 | |
| 2849 | // Retry the free list allocation. |
| 2850 | HeapObject* object = free_list_.Allocate(size_in_bytes); |
| 2851 | if (object != NULL) return object; |
| 2852 | |
| 2853 | // If sweeping is still in progress try to sweep pages on the main thread. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2854 | collector->SweepInParallel(heap()->paged_space(identity()), size_in_bytes); |
| 2855 | RefillFreeList(); |
| 2856 | object = free_list_.Allocate(size_in_bytes); |
| 2857 | if (object != nullptr) return object; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2858 | } |
| 2859 | |
| 2860 | // Free list allocation failed and there is no next page. Fail if we have |
| 2861 | // hit the old generation size limit that should cause a garbage |
| 2862 | // collection. |
| 2863 | if (!heap()->always_allocate() && |
| 2864 | heap()->OldGenerationAllocationLimitReached()) { |
| 2865 | // If sweeper threads are active, wait for them at that point and steal |
| 2866 | // elements form their free-lists. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2867 | HeapObject* object = SweepAndRetryAllocation(size_in_bytes); |
| 2868 | return object; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2869 | } |
| 2870 | |
| 2871 | // Try to expand the space and allocate in the new next page. |
| 2872 | if (Expand()) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2873 | DCHECK((CountTotalPages() > 1) || |
| 2874 | (size_in_bytes <= free_list_.Available())); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2875 | return free_list_.Allocate(size_in_bytes); |
| 2876 | } |
| 2877 | |
| 2878 | // If sweeper threads are active, wait for them at that point and steal |
| 2879 | // elements form their free-lists. Allocation may still fail their which |
| 2880 | // would indicate that there is not enough memory for the given allocation. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 2881 | return SweepAndRetryAllocation(size_in_bytes); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 2882 | } |
| 2883 | |
| 2884 | |
| 2885 | #ifdef DEBUG |
| 2886 | void PagedSpace::ReportCodeStatistics(Isolate* isolate) { |
| 2887 | CommentStatistic* comments_statistics = |
| 2888 | isolate->paged_space_comments_statistics(); |
| 2889 | ReportCodeKindStatistics(isolate->code_kind_statistics()); |
| 2890 | PrintF( |
| 2891 | "Code comment statistics (\" [ comment-txt : size/ " |
| 2892 | "count (average)\"):\n"); |
| 2893 | for (int i = 0; i <= CommentStatistic::kMaxComments; i++) { |
| 2894 | const CommentStatistic& cs = comments_statistics[i]; |
| 2895 | if (cs.size > 0) { |
| 2896 | PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count, |
| 2897 | cs.size / cs.count); |
| 2898 | } |
| 2899 | } |
| 2900 | PrintF("\n"); |
| 2901 | } |
| 2902 | |
| 2903 | |
| 2904 | void PagedSpace::ResetCodeStatistics(Isolate* isolate) { |
| 2905 | CommentStatistic* comments_statistics = |
| 2906 | isolate->paged_space_comments_statistics(); |
| 2907 | ClearCodeKindStatistics(isolate->code_kind_statistics()); |
| 2908 | for (int i = 0; i < CommentStatistic::kMaxComments; i++) { |
| 2909 | comments_statistics[i].Clear(); |
| 2910 | } |
| 2911 | comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown"; |
| 2912 | comments_statistics[CommentStatistic::kMaxComments].size = 0; |
| 2913 | comments_statistics[CommentStatistic::kMaxComments].count = 0; |
| 2914 | } |
| 2915 | |
| 2916 | |
| 2917 | // Adds comment to 'comment_statistics' table. Performance OK as long as |
| 2918 | // 'kMaxComments' is small |
| 2919 | static void EnterComment(Isolate* isolate, const char* comment, int delta) { |
| 2920 | CommentStatistic* comments_statistics = |
| 2921 | isolate->paged_space_comments_statistics(); |
| 2922 | // Do not count empty comments |
| 2923 | if (delta <= 0) return; |
| 2924 | CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments]; |
| 2925 | // Search for a free or matching entry in 'comments_statistics': 'cs' |
| 2926 | // points to result. |
| 2927 | for (int i = 0; i < CommentStatistic::kMaxComments; i++) { |
| 2928 | if (comments_statistics[i].comment == NULL) { |
| 2929 | cs = &comments_statistics[i]; |
| 2930 | cs->comment = comment; |
| 2931 | break; |
| 2932 | } else if (strcmp(comments_statistics[i].comment, comment) == 0) { |
| 2933 | cs = &comments_statistics[i]; |
| 2934 | break; |
| 2935 | } |
| 2936 | } |
| 2937 | // Update entry for 'comment' |
| 2938 | cs->size += delta; |
| 2939 | cs->count += 1; |
| 2940 | } |
| 2941 | |
| 2942 | |
| 2943 | // Call for each nested comment start (start marked with '[ xxx', end marked |
| 2944 | // with ']'. RelocIterator 'it' must point to a comment reloc info. |
| 2945 | static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) { |
| 2946 | DCHECK(!it->done()); |
| 2947 | DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT); |
| 2948 | const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data()); |
| 2949 | if (tmp[0] != '[') { |
| 2950 | // Not a nested comment; skip |
| 2951 | return; |
| 2952 | } |
| 2953 | |
| 2954 | // Search for end of nested comment or a new nested comment |
| 2955 | const char* const comment_txt = |
| 2956 | reinterpret_cast<const char*>(it->rinfo()->data()); |
| 2957 | const byte* prev_pc = it->rinfo()->pc(); |
| 2958 | int flat_delta = 0; |
| 2959 | it->next(); |
| 2960 | while (true) { |
| 2961 | // All nested comments must be terminated properly, and therefore exit |
| 2962 | // from loop. |
| 2963 | DCHECK(!it->done()); |
| 2964 | if (it->rinfo()->rmode() == RelocInfo::COMMENT) { |
| 2965 | const char* const txt = |
| 2966 | reinterpret_cast<const char*>(it->rinfo()->data()); |
| 2967 | flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc); |
| 2968 | if (txt[0] == ']') break; // End of nested comment |
| 2969 | // A new comment |
| 2970 | CollectCommentStatistics(isolate, it); |
| 2971 | // Skip code that was covered with previous comment |
| 2972 | prev_pc = it->rinfo()->pc(); |
| 2973 | } |
| 2974 | it->next(); |
| 2975 | } |
| 2976 | EnterComment(isolate, comment_txt, flat_delta); |
| 2977 | } |
| 2978 | |
| 2979 | |
| 2980 | // Collects code size statistics: |
| 2981 | // - by code kind |
| 2982 | // - by code comment |
| 2983 | void PagedSpace::CollectCodeStatistics() { |
| 2984 | Isolate* isolate = heap()->isolate(); |
| 2985 | HeapObjectIterator obj_it(this); |
| 2986 | for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { |
| 2987 | if (obj->IsCode()) { |
| 2988 | Code* code = Code::cast(obj); |
| 2989 | isolate->code_kind_statistics()[code->kind()] += code->Size(); |
| 2990 | RelocIterator it(code); |
| 2991 | int delta = 0; |
| 2992 | const byte* prev_pc = code->instruction_start(); |
| 2993 | while (!it.done()) { |
| 2994 | if (it.rinfo()->rmode() == RelocInfo::COMMENT) { |
| 2995 | delta += static_cast<int>(it.rinfo()->pc() - prev_pc); |
| 2996 | CollectCommentStatistics(isolate, &it); |
| 2997 | prev_pc = it.rinfo()->pc(); |
| 2998 | } |
| 2999 | it.next(); |
| 3000 | } |
| 3001 | |
| 3002 | DCHECK(code->instruction_start() <= prev_pc && |
| 3003 | prev_pc <= code->instruction_end()); |
| 3004 | delta += static_cast<int>(code->instruction_end() - prev_pc); |
| 3005 | EnterComment(isolate, "NoComment", delta); |
| 3006 | } |
| 3007 | } |
| 3008 | } |
| 3009 | |
| 3010 | |
| 3011 | void PagedSpace::ReportStatistics() { |
| 3012 | int pct = static_cast<int>(Available() * 100 / Capacity()); |
| 3013 | PrintF(" capacity: %" V8_PTR_PREFIX |
| 3014 | "d" |
| 3015 | ", waste: %" V8_PTR_PREFIX |
| 3016 | "d" |
| 3017 | ", available: %" V8_PTR_PREFIX "d, %%%d\n", |
| 3018 | Capacity(), Waste(), Available(), pct); |
| 3019 | |
| 3020 | if (heap()->mark_compact_collector()->sweeping_in_progress()) { |
| 3021 | heap()->mark_compact_collector()->EnsureSweepingCompleted(); |
| 3022 | } |
| 3023 | ClearHistograms(heap()->isolate()); |
| 3024 | HeapObjectIterator obj_it(this); |
| 3025 | for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) |
| 3026 | CollectHistogramInfo(obj); |
| 3027 | ReportHistogram(heap()->isolate(), true); |
| 3028 | } |
| 3029 | #endif |
| 3030 | |
| 3031 | |
| 3032 | // ----------------------------------------------------------------------------- |
| 3033 | // MapSpace implementation |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3034 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3035 | #ifdef VERIFY_HEAP |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3036 | void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3037 | #endif |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3038 | |
| 3039 | |
| 3040 | // ----------------------------------------------------------------------------- |
| 3041 | // LargeObjectIterator |
| 3042 | |
| 3043 | LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) { |
| 3044 | current_ = space->first_page_; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3045 | } |
| 3046 | |
| 3047 | |
| 3048 | HeapObject* LargeObjectIterator::Next() { |
| 3049 | if (current_ == NULL) return NULL; |
| 3050 | |
| 3051 | HeapObject* object = current_->GetObject(); |
| 3052 | current_ = current_->next_page(); |
| 3053 | return object; |
| 3054 | } |
| 3055 | |
| 3056 | |
| 3057 | // ----------------------------------------------------------------------------- |
| 3058 | // LargeObjectSpace |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3059 | |
| 3060 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3061 | LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id) |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3062 | : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3063 | first_page_(NULL), |
| 3064 | size_(0), |
| 3065 | page_count_(0), |
| 3066 | objects_size_(0), |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3067 | chunk_map_(HashMap::PointersMatch, 1024) {} |
| 3068 | |
| 3069 | |
| 3070 | LargeObjectSpace::~LargeObjectSpace() {} |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3071 | |
| 3072 | |
| 3073 | bool LargeObjectSpace::SetUp() { |
| 3074 | first_page_ = NULL; |
| 3075 | size_ = 0; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3076 | page_count_ = 0; |
| 3077 | objects_size_ = 0; |
| 3078 | chunk_map_.Clear(); |
| 3079 | return true; |
| 3080 | } |
| 3081 | |
| 3082 | |
| 3083 | void LargeObjectSpace::TearDown() { |
| 3084 | while (first_page_ != NULL) { |
| 3085 | LargePage* page = first_page_; |
| 3086 | first_page_ = first_page_->next_page(); |
| 3087 | LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address())); |
| 3088 | |
| 3089 | ObjectSpace space = static_cast<ObjectSpace>(1 << identity()); |
| 3090 | heap()->isolate()->memory_allocator()->PerformAllocationCallback( |
| 3091 | space, kAllocationActionFree, page->size()); |
| 3092 | heap()->isolate()->memory_allocator()->Free(page); |
| 3093 | } |
| 3094 | SetUp(); |
| 3095 | } |
| 3096 | |
| 3097 | |
| 3098 | AllocationResult LargeObjectSpace::AllocateRaw(int object_size, |
| 3099 | Executability executable) { |
| 3100 | // Check if we want to force a GC before growing the old space further. |
| 3101 | // If so, fail the allocation. |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3102 | if (!heap()->CanExpandOldGeneration(object_size)) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3103 | return AllocationResult::Retry(identity()); |
| 3104 | } |
| 3105 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3106 | LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage( |
| 3107 | object_size, this, executable); |
| 3108 | if (page == NULL) return AllocationResult::Retry(identity()); |
| 3109 | DCHECK(page->area_size() >= object_size); |
| 3110 | |
| 3111 | size_ += static_cast<int>(page->size()); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3112 | AccountCommitted(static_cast<intptr_t>(page->size())); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3113 | objects_size_ += object_size; |
| 3114 | page_count_++; |
| 3115 | page->set_next_page(first_page_); |
| 3116 | first_page_ = page; |
| 3117 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3118 | // Register all MemoryChunk::kAlignment-aligned chunks covered by |
| 3119 | // this large page in the chunk map. |
| 3120 | uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment; |
| 3121 | uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment; |
| 3122 | for (uintptr_t key = base; key <= limit; key++) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3123 | HashMap::Entry* entry = chunk_map_.LookupOrInsert( |
| 3124 | reinterpret_cast<void*>(key), static_cast<uint32_t>(key)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3125 | DCHECK(entry != NULL); |
| 3126 | entry->value = page; |
| 3127 | } |
| 3128 | |
| 3129 | HeapObject* object = page->GetObject(); |
| 3130 | |
| 3131 | MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size); |
| 3132 | |
| 3133 | if (Heap::ShouldZapGarbage()) { |
| 3134 | // Make the object consistent so the heap can be verified in OldSpaceStep. |
| 3135 | // We only need to do this in debug builds or if verify_heap is on. |
| 3136 | reinterpret_cast<Object**>(object->address())[0] = |
| 3137 | heap()->fixed_array_map(); |
| 3138 | reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0); |
| 3139 | } |
| 3140 | |
| 3141 | heap()->incremental_marking()->OldSpaceStep(object_size); |
| 3142 | return object; |
| 3143 | } |
| 3144 | |
| 3145 | |
| 3146 | size_t LargeObjectSpace::CommittedPhysicalMemory() { |
| 3147 | if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory(); |
| 3148 | size_t size = 0; |
| 3149 | LargePage* current = first_page_; |
| 3150 | while (current != NULL) { |
| 3151 | size += current->CommittedPhysicalMemory(); |
| 3152 | current = current->next_page(); |
| 3153 | } |
| 3154 | return size; |
| 3155 | } |
| 3156 | |
| 3157 | |
| 3158 | // GC support |
| 3159 | Object* LargeObjectSpace::FindObject(Address a) { |
| 3160 | LargePage* page = FindPage(a); |
| 3161 | if (page != NULL) { |
| 3162 | return page->GetObject(); |
| 3163 | } |
| 3164 | return Smi::FromInt(0); // Signaling not found. |
| 3165 | } |
| 3166 | |
| 3167 | |
| 3168 | LargePage* LargeObjectSpace::FindPage(Address a) { |
| 3169 | uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment; |
| 3170 | HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key), |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3171 | static_cast<uint32_t>(key)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3172 | if (e != NULL) { |
| 3173 | DCHECK(e->value != NULL); |
| 3174 | LargePage* page = reinterpret_cast<LargePage*>(e->value); |
| 3175 | DCHECK(page->is_valid()); |
| 3176 | if (page->Contains(a)) { |
| 3177 | return page; |
| 3178 | } |
| 3179 | } |
| 3180 | return NULL; |
| 3181 | } |
| 3182 | |
| 3183 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3184 | void LargeObjectSpace::ClearMarkingStateOfLiveObjects() { |
| 3185 | LargePage* current = first_page_; |
| 3186 | while (current != NULL) { |
| 3187 | HeapObject* object = current->GetObject(); |
| 3188 | MarkBit mark_bit = Marking::MarkBitFrom(object); |
| 3189 | DCHECK(Marking::IsBlack(mark_bit)); |
| 3190 | Marking::BlackToWhite(mark_bit); |
| 3191 | Page::FromAddress(object->address())->ResetProgressBar(); |
| 3192 | Page::FromAddress(object->address())->ResetLiveBytes(); |
| 3193 | current = current->next_page(); |
| 3194 | } |
| 3195 | } |
| 3196 | |
| 3197 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3198 | void LargeObjectSpace::FreeUnmarkedObjects() { |
| 3199 | LargePage* previous = NULL; |
| 3200 | LargePage* current = first_page_; |
| 3201 | while (current != NULL) { |
| 3202 | HeapObject* object = current->GetObject(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3203 | MarkBit mark_bit = Marking::MarkBitFrom(object); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3204 | DCHECK(!Marking::IsGrey(mark_bit)); |
| 3205 | if (Marking::IsBlack(mark_bit)) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3206 | previous = current; |
| 3207 | current = current->next_page(); |
| 3208 | } else { |
| 3209 | LargePage* page = current; |
| 3210 | // Cut the chunk out from the chunk list. |
| 3211 | current = current->next_page(); |
| 3212 | if (previous == NULL) { |
| 3213 | first_page_ = current; |
| 3214 | } else { |
| 3215 | previous->set_next_page(current); |
| 3216 | } |
| 3217 | |
| 3218 | // Free the chunk. |
| 3219 | heap()->mark_compact_collector()->ReportDeleteIfNeeded(object, |
| 3220 | heap()->isolate()); |
| 3221 | size_ -= static_cast<int>(page->size()); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3222 | AccountUncommitted(static_cast<intptr_t>(page->size())); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3223 | objects_size_ -= object->Size(); |
| 3224 | page_count_--; |
| 3225 | |
| 3226 | // Remove entries belonging to this page. |
| 3227 | // Use variable alignment to help pass length check (<= 80 characters) |
| 3228 | // of single line in tools/presubmit.py. |
| 3229 | const intptr_t alignment = MemoryChunk::kAlignment; |
| 3230 | uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment; |
| 3231 | uintptr_t limit = base + (page->size() - 1) / alignment; |
| 3232 | for (uintptr_t key = base; key <= limit; key++) { |
| 3233 | chunk_map_.Remove(reinterpret_cast<void*>(key), |
| 3234 | static_cast<uint32_t>(key)); |
| 3235 | } |
| 3236 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3237 | heap()->QueueMemoryChunkForFree(page); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3238 | } |
| 3239 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3240 | } |
| 3241 | |
| 3242 | |
| 3243 | bool LargeObjectSpace::Contains(HeapObject* object) { |
| 3244 | Address address = object->address(); |
| 3245 | MemoryChunk* chunk = MemoryChunk::FromAddress(address); |
| 3246 | |
| 3247 | bool owned = (chunk->owner() == this); |
| 3248 | |
| 3249 | SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject()); |
| 3250 | |
| 3251 | return owned; |
| 3252 | } |
| 3253 | |
| 3254 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3255 | bool LargeObjectSpace::Contains(Address address) { |
| 3256 | return FindPage(address) != NULL; |
| 3257 | } |
| 3258 | |
| 3259 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3260 | #ifdef VERIFY_HEAP |
| 3261 | // We do not assume that the large object iterator works, because it depends |
| 3262 | // on the invariants we are checking during verification. |
| 3263 | void LargeObjectSpace::Verify() { |
| 3264 | for (LargePage* chunk = first_page_; chunk != NULL; |
| 3265 | chunk = chunk->next_page()) { |
| 3266 | // Each chunk contains an object that starts at the large object page's |
| 3267 | // object area start. |
| 3268 | HeapObject* object = chunk->GetObject(); |
| 3269 | Page* page = Page::FromAddress(object->address()); |
| 3270 | CHECK(object->address() == page->area_start()); |
| 3271 | |
| 3272 | // The first word should be a map, and we expect all map pointers to be |
| 3273 | // in map space. |
| 3274 | Map* map = object->map(); |
| 3275 | CHECK(map->IsMap()); |
| 3276 | CHECK(heap()->map_space()->Contains(map)); |
| 3277 | |
| 3278 | // We have only code, sequential strings, external strings |
| 3279 | // (sequential strings that have been morphed into external |
| 3280 | // strings), fixed arrays, byte arrays, and constant pool arrays in the |
| 3281 | // large object space. |
| 3282 | CHECK(object->IsCode() || object->IsSeqString() || |
| 3283 | object->IsExternalString() || object->IsFixedArray() || |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3284 | object->IsFixedDoubleArray() || object->IsByteArray()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3285 | |
| 3286 | // The object itself should look OK. |
| 3287 | object->ObjectVerify(); |
| 3288 | |
| 3289 | // Byte arrays and strings don't have interior pointers. |
| 3290 | if (object->IsCode()) { |
| 3291 | VerifyPointersVisitor code_visitor; |
| 3292 | object->IterateBody(map->instance_type(), object->Size(), &code_visitor); |
| 3293 | } else if (object->IsFixedArray()) { |
| 3294 | FixedArray* array = FixedArray::cast(object); |
| 3295 | for (int j = 0; j < array->length(); j++) { |
| 3296 | Object* element = array->get(j); |
| 3297 | if (element->IsHeapObject()) { |
| 3298 | HeapObject* element_object = HeapObject::cast(element); |
| 3299 | CHECK(heap()->Contains(element_object)); |
| 3300 | CHECK(element_object->map()->IsMap()); |
| 3301 | } |
| 3302 | } |
| 3303 | } |
| 3304 | } |
| 3305 | } |
| 3306 | #endif |
| 3307 | |
| 3308 | |
| 3309 | #ifdef DEBUG |
| 3310 | void LargeObjectSpace::Print() { |
| 3311 | OFStream os(stdout); |
| 3312 | LargeObjectIterator it(this); |
| 3313 | for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 3314 | obj->Print(os); |
| 3315 | } |
| 3316 | } |
| 3317 | |
| 3318 | |
| 3319 | void LargeObjectSpace::ReportStatistics() { |
| 3320 | PrintF(" size: %" V8_PTR_PREFIX "d\n", size_); |
| 3321 | int num_objects = 0; |
| 3322 | ClearHistograms(heap()->isolate()); |
| 3323 | LargeObjectIterator it(this); |
| 3324 | for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 3325 | num_objects++; |
| 3326 | CollectHistogramInfo(obj); |
| 3327 | } |
| 3328 | |
| 3329 | PrintF( |
| 3330 | " number of objects %d, " |
| 3331 | "size of objects %" V8_PTR_PREFIX "d\n", |
| 3332 | num_objects, objects_size_); |
| 3333 | if (num_objects > 0) ReportHistogram(heap()->isolate(), false); |
| 3334 | } |
| 3335 | |
| 3336 | |
| 3337 | void LargeObjectSpace::CollectCodeStatistics() { |
| 3338 | Isolate* isolate = heap()->isolate(); |
| 3339 | LargeObjectIterator obj_it(this); |
| 3340 | for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { |
| 3341 | if (obj->IsCode()) { |
| 3342 | Code* code = Code::cast(obj); |
| 3343 | isolate->code_kind_statistics()[code->kind()] += code->Size(); |
| 3344 | } |
| 3345 | } |
| 3346 | } |
| 3347 | |
| 3348 | |
| 3349 | void Page::Print() { |
| 3350 | // Make a best-effort to print the objects in the page. |
| 3351 | PrintF("Page@%p in %s\n", this->address(), |
| 3352 | AllocationSpaceName(this->owner()->identity())); |
| 3353 | printf(" --------------------------------------\n"); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3354 | HeapObjectIterator objects(this); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3355 | unsigned mark_size = 0; |
| 3356 | for (HeapObject* object = objects.Next(); object != NULL; |
| 3357 | object = objects.Next()) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3358 | bool is_marked = Marking::IsBlackOrGrey(Marking::MarkBitFrom(object)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3359 | PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little. |
| 3360 | if (is_marked) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3361 | mark_size += object->Size(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 3362 | } |
| 3363 | object->ShortPrint(); |
| 3364 | PrintF("\n"); |
| 3365 | } |
| 3366 | printf(" --------------------------------------\n"); |
| 3367 | printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 3368 | } |
| 3369 | |
| 3370 | #endif // DEBUG |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame^] | 3371 | } // namespace internal |
| 3372 | } // namespace v8 |