blob: e517c45c0925c3bf124be8050564369de5e98ab6 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2011 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005#include "src/heap/spaces.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006
7#include "src/base/bits.h"
8#include "src/base/platform/platform.h"
Ben Murdochc5610432016-08-08 18:44:38 +01009#include "src/base/platform/semaphore.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000010#include "src/full-codegen/full-codegen.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010011#include "src/heap/slot-set.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000012#include "src/macro-assembler.h"
13#include "src/msan.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000014#include "src/snapshot/snapshot.h"
Ben Murdochc5610432016-08-08 18:44:38 +010015#include "src/v8.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000016
17namespace v8 {
18namespace internal {
19
20
21// ----------------------------------------------------------------------------
22// HeapObjectIterator
23
24HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
25 // You can't actually iterate over the anchor page. It is not a real page,
26 // just an anchor for the double linked page list. Initialize as if we have
27 // reached the end of the anchor page, then the first iteration will move on
28 // to the first page.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000029 Initialize(space, NULL, NULL, kAllPagesInSpace);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000030}
31
32
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000033HeapObjectIterator::HeapObjectIterator(Page* page) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000034 Space* owner = page->owner();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000035 DCHECK(owner == page->heap()->old_space() ||
Ben Murdochb8a8cc12014-11-26 15:28:44 +000036 owner == page->heap()->map_space() ||
Ben Murdochb8a8cc12014-11-26 15:28:44 +000037 owner == page->heap()->code_space());
38 Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000039 page->area_end(), kOnePageOnly);
Ben Murdoch097c5b22016-05-18 11:27:45 +010040 DCHECK(page->SweepingDone());
Ben Murdochb8a8cc12014-11-26 15:28:44 +000041}
42
43
44void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000045 HeapObjectIterator::PageMode mode) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000046 space_ = space;
47 cur_addr_ = cur;
48 cur_end_ = end;
49 page_mode_ = mode;
Ben Murdochb8a8cc12014-11-26 15:28:44 +000050}
51
52
53// We have hit the end of the page and should advance to the next block of
54// objects. This happens at the end of the page.
55bool HeapObjectIterator::AdvanceToNextPage() {
56 DCHECK(cur_addr_ == cur_end_);
57 if (page_mode_ == kOnePageOnly) return false;
58 Page* cur_page;
59 if (cur_addr_ == NULL) {
60 cur_page = space_->anchor();
61 } else {
62 cur_page = Page::FromAddress(cur_addr_ - 1);
63 DCHECK(cur_addr_ == cur_page->area_end());
64 }
65 cur_page = cur_page->next_page();
66 if (cur_page == space_->anchor()) return false;
Ben Murdochc5610432016-08-08 18:44:38 +010067 cur_page->heap()
68 ->mark_compact_collector()
69 ->sweeper()
70 .SweepOrWaitUntilSweepingCompleted(cur_page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000071 cur_addr_ = cur_page->area_start();
72 cur_end_ = cur_page->area_end();
Ben Murdoch097c5b22016-05-18 11:27:45 +010073 DCHECK(cur_page->SweepingDone());
Ben Murdochb8a8cc12014-11-26 15:28:44 +000074 return true;
75}
76
Ben Murdoch097c5b22016-05-18 11:27:45 +010077PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
78 : heap_(heap) {
79 AllSpaces spaces(heap_);
80 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
81 space->PauseAllocationObservers();
82 }
83}
84
85PauseAllocationObserversScope::~PauseAllocationObserversScope() {
86 AllSpaces spaces(heap_);
87 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
88 space->ResumeAllocationObservers();
89 }
90}
Ben Murdochb8a8cc12014-11-26 15:28:44 +000091
92// -----------------------------------------------------------------------------
93// CodeRange
94
95
96CodeRange::CodeRange(Isolate* isolate)
97 : isolate_(isolate),
98 code_range_(NULL),
99 free_list_(0),
100 allocation_list_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000101 current_allocation_block_index_(0) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000102
103
104bool CodeRange::SetUp(size_t requested) {
105 DCHECK(code_range_ == NULL);
106
107 if (requested == 0) {
108 // When a target requires the code range feature, we put all code objects
109 // in a kMaximalCodeRangeSize range of virtual address space, so that
110 // they can call each other with near calls.
111 if (kRequiresCodeRange) {
112 requested = kMaximalCodeRangeSize;
113 } else {
114 return true;
115 }
116 }
117
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400118 if (requested <= kMinimumCodeRangeSize) {
119 requested = kMinimumCodeRangeSize;
120 }
121
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000122 DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000123#ifdef V8_TARGET_ARCH_MIPS64
124 // To use pseudo-relative jumps such as j/jal instructions which have 28-bit
125 // encoded immediate, the addresses have to be in range of 256Mb aligned
126 // region.
127 code_range_ = new base::VirtualMemory(requested, kMaximalCodeRangeSize);
128#else
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000129 code_range_ = new base::VirtualMemory(requested);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000130#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000131 CHECK(code_range_ != NULL);
132 if (!code_range_->IsReserved()) {
133 delete code_range_;
134 code_range_ = NULL;
135 return false;
136 }
137
138 // We are sure that we have mapped a block of requested addresses.
139 DCHECK(code_range_->size() == requested);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000140 Address base = reinterpret_cast<Address>(code_range_->address());
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400141
142 // On some platforms, specifically Win64, we need to reserve some pages at
143 // the beginning of an executable space.
144 if (kReservedCodeRangePages) {
145 if (!code_range_->Commit(
146 base, kReservedCodeRangePages * base::OS::CommitPageSize(), true)) {
147 delete code_range_;
148 code_range_ = NULL;
149 return false;
150 }
151 base += kReservedCodeRangePages * base::OS::CommitPageSize();
152 }
153 Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
154 size_t size = code_range_->size() - (aligned_base - base) -
155 kReservedCodeRangePages * base::OS::CommitPageSize();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000156 allocation_list_.Add(FreeBlock(aligned_base, size));
157 current_allocation_block_index_ = 0;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400158
159 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000160 return true;
161}
162
163
164int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
165 const FreeBlock* right) {
166 // The entire point of CodeRange is that the difference between two
167 // addresses in the range can be represented as a signed 32-bit int,
168 // so the cast is semantically correct.
169 return static_cast<int>(left->start - right->start);
170}
171
172
173bool CodeRange::GetNextAllocationBlock(size_t requested) {
174 for (current_allocation_block_index_++;
175 current_allocation_block_index_ < allocation_list_.length();
176 current_allocation_block_index_++) {
177 if (requested <= allocation_list_[current_allocation_block_index_].size) {
178 return true; // Found a large enough allocation block.
179 }
180 }
181
182 // Sort and merge the free blocks on the free list and the allocation list.
183 free_list_.AddAll(allocation_list_);
184 allocation_list_.Clear();
185 free_list_.Sort(&CompareFreeBlockAddress);
186 for (int i = 0; i < free_list_.length();) {
187 FreeBlock merged = free_list_[i];
188 i++;
189 // Add adjacent free blocks to the current merged block.
190 while (i < free_list_.length() &&
191 free_list_[i].start == merged.start + merged.size) {
192 merged.size += free_list_[i].size;
193 i++;
194 }
195 if (merged.size > 0) {
196 allocation_list_.Add(merged);
197 }
198 }
199 free_list_.Clear();
200
201 for (current_allocation_block_index_ = 0;
202 current_allocation_block_index_ < allocation_list_.length();
203 current_allocation_block_index_++) {
204 if (requested <= allocation_list_[current_allocation_block_index_].size) {
205 return true; // Found a large enough allocation block.
206 }
207 }
208 current_allocation_block_index_ = 0;
209 // Code range is full or too fragmented.
210 return false;
211}
212
213
214Address CodeRange::AllocateRawMemory(const size_t requested_size,
215 const size_t commit_size,
216 size_t* allocated) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000217 // request_size includes guards while committed_size does not. Make sure
218 // callers know about the invariant.
219 CHECK_LE(commit_size,
220 requested_size - 2 * MemoryAllocator::CodePageGuardSize());
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400221 FreeBlock current;
222 if (!ReserveBlock(requested_size, &current)) {
223 *allocated = 0;
224 return NULL;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000225 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400226 *allocated = current.size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000227 DCHECK(*allocated <= current.size);
228 DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
Ben Murdochc5610432016-08-08 18:44:38 +0100229 if (!isolate_->heap()->memory_allocator()->CommitExecutableMemory(
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000230 code_range_, current.start, commit_size, *allocated)) {
231 *allocated = 0;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400232 ReleaseBlock(&current);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000233 return NULL;
234 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000235 return current.start;
236}
237
238
239bool CodeRange::CommitRawMemory(Address start, size_t length) {
Ben Murdochc5610432016-08-08 18:44:38 +0100240 return isolate_->heap()->memory_allocator()->CommitMemory(start, length,
241 EXECUTABLE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000242}
243
244
245bool CodeRange::UncommitRawMemory(Address start, size_t length) {
246 return code_range_->Uncommit(start, length);
247}
248
249
250void CodeRange::FreeRawMemory(Address address, size_t length) {
251 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000252 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000253 free_list_.Add(FreeBlock(address, length));
254 code_range_->Uncommit(address, length);
255}
256
257
258void CodeRange::TearDown() {
259 delete code_range_; // Frees all memory in the virtual memory range.
260 code_range_ = NULL;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000261 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000262 free_list_.Free();
263 allocation_list_.Free();
264}
265
266
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400267bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000268 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400269 DCHECK(allocation_list_.length() == 0 ||
270 current_allocation_block_index_ < allocation_list_.length());
271 if (allocation_list_.length() == 0 ||
272 requested_size > allocation_list_[current_allocation_block_index_].size) {
273 // Find an allocation block large enough.
274 if (!GetNextAllocationBlock(requested_size)) return false;
275 }
276 // Commit the requested memory at the start of the current allocation block.
277 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
278 *block = allocation_list_[current_allocation_block_index_];
279 // Don't leave a small free block, useless for a large object or chunk.
280 if (aligned_requested < (block->size - Page::kPageSize)) {
281 block->size = aligned_requested;
282 }
283 DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
284 allocation_list_[current_allocation_block_index_].start += block->size;
285 allocation_list_[current_allocation_block_index_].size -= block->size;
286 return true;
287}
288
289
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000290void CodeRange::ReleaseBlock(const FreeBlock* block) {
291 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
292 free_list_.Add(*block);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400293}
294
295
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000296// -----------------------------------------------------------------------------
297// MemoryAllocator
298//
299
300MemoryAllocator::MemoryAllocator(Isolate* isolate)
301 : isolate_(isolate),
Ben Murdochc5610432016-08-08 18:44:38 +0100302 code_range_(nullptr),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000303 capacity_(0),
304 capacity_executable_(0),
305 size_(0),
306 size_executable_(0),
307 lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
Ben Murdochc5610432016-08-08 18:44:38 +0100308 highest_ever_allocated_(reinterpret_cast<void*>(0)),
309 unmapper_(this) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000310
Ben Murdochc5610432016-08-08 18:44:38 +0100311bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable,
312 intptr_t code_range_size) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000313 capacity_ = RoundUp(capacity, Page::kPageSize);
314 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
315 DCHECK_GE(capacity_, capacity_executable_);
316
317 size_ = 0;
318 size_executable_ = 0;
319
Ben Murdochc5610432016-08-08 18:44:38 +0100320 code_range_ = new CodeRange(isolate_);
321 if (!code_range_->SetUp(static_cast<size_t>(code_range_size))) return false;
322
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000323 return true;
324}
325
326
327void MemoryAllocator::TearDown() {
Ben Murdochc5610432016-08-08 18:44:38 +0100328 unmapper()->WaitUntilCompleted();
329
330 MemoryChunk* chunk = nullptr;
331 while ((chunk = unmapper()->TryGetPooledMemoryChunkSafe()) != nullptr) {
Ben Murdochda12d292016-06-02 14:46:10 +0100332 FreeMemory(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize,
333 NOT_EXECUTABLE);
334 }
Ben Murdochc5610432016-08-08 18:44:38 +0100335
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000336 // Check that spaces were torn down before MemoryAllocator.
Ben Murdochda12d292016-06-02 14:46:10 +0100337 DCHECK_EQ(size_.Value(), 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000338 // TODO(gc) this will be true again when we fix FreeMemory.
339 // DCHECK(size_executable_ == 0);
340 capacity_ = 0;
341 capacity_executable_ = 0;
Ben Murdochc5610432016-08-08 18:44:38 +0100342
343 if (last_chunk_.IsReserved()) {
344 last_chunk_.Release();
345 }
346
347 delete code_range_;
348 code_range_ = nullptr;
349}
350
351class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public v8::Task {
352 public:
353 explicit UnmapFreeMemoryTask(Unmapper* unmapper) : unmapper_(unmapper) {}
354
355 private:
356 // v8::Task overrides.
357 void Run() override {
358 unmapper_->PerformFreeMemoryOnQueuedChunks();
359 unmapper_->pending_unmapping_tasks_semaphore_.Signal();
360 }
361
362 Unmapper* unmapper_;
363 DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
364};
365
366void MemoryAllocator::Unmapper::FreeQueuedChunks() {
367 if (FLAG_concurrent_sweeping) {
368 V8::GetCurrentPlatform()->CallOnBackgroundThread(
369 new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask);
370 concurrent_unmapping_tasks_active_++;
371 } else {
372 PerformFreeMemoryOnQueuedChunks();
373 }
374}
375
376bool MemoryAllocator::Unmapper::WaitUntilCompleted() {
377 bool waited = false;
378 while (concurrent_unmapping_tasks_active_ > 0) {
379 pending_unmapping_tasks_semaphore_.Wait();
380 concurrent_unmapping_tasks_active_--;
381 waited = true;
382 }
383 return waited;
384}
385
386void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
387 MemoryChunk* chunk = nullptr;
388 // Regular chunks.
389 while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
390 bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
391 allocator_->PerformFreeMemory(chunk);
392 if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
393 }
394 // Non-regular chunks.
395 while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
396 allocator_->PerformFreeMemory(chunk);
397 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000398}
399
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000400bool MemoryAllocator::CommitMemory(Address base, size_t size,
401 Executability executable) {
402 if (!base::VirtualMemory::CommitRegion(base, size,
403 executable == EXECUTABLE)) {
404 return false;
405 }
406 UpdateAllocatedSpaceLimits(base, base + size);
407 return true;
408}
409
410
411void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
412 Executability executable) {
413 // TODO(gc) make code_range part of memory allocator?
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000414 // Code which is part of the code-range does not have its own VirtualMemory.
Ben Murdochc5610432016-08-08 18:44:38 +0100415 DCHECK(code_range() == NULL ||
416 !code_range()->contains(static_cast<Address>(reservation->address())));
417 DCHECK(executable == NOT_EXECUTABLE || code_range() == NULL ||
418 !code_range()->valid() || reservation->size() <= Page::kPageSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000419
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000420 reservation->Release();
421}
422
423
424void MemoryAllocator::FreeMemory(Address base, size_t size,
425 Executability executable) {
426 // TODO(gc) make code_range part of memory allocator?
Ben Murdochc5610432016-08-08 18:44:38 +0100427 if (code_range() != NULL &&
428 code_range()->contains(static_cast<Address>(base))) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000429 DCHECK(executable == EXECUTABLE);
Ben Murdochc5610432016-08-08 18:44:38 +0100430 code_range()->FreeRawMemory(base, size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000431 } else {
Ben Murdochc5610432016-08-08 18:44:38 +0100432 DCHECK(executable == NOT_EXECUTABLE || code_range() == NULL ||
433 !code_range()->valid());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000434 bool result = base::VirtualMemory::ReleaseRegion(base, size);
435 USE(result);
436 DCHECK(result);
437 }
438}
439
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000440Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
441 base::VirtualMemory* controller) {
442 base::VirtualMemory reservation(size, alignment);
443
444 if (!reservation.IsReserved()) return NULL;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000445 size_.Increment(static_cast<intptr_t>(reservation.size()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000446 Address base =
447 RoundUp(static_cast<Address>(reservation.address()), alignment);
448 controller->TakeControl(&reservation);
449 return base;
450}
451
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000452Address MemoryAllocator::AllocateAlignedMemory(
453 size_t reserve_size, size_t commit_size, size_t alignment,
454 Executability executable, base::VirtualMemory* controller) {
455 DCHECK(commit_size <= reserve_size);
456 base::VirtualMemory reservation;
457 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
458 if (base == NULL) return NULL;
459
460 if (executable == EXECUTABLE) {
461 if (!CommitExecutableMemory(&reservation, base, commit_size,
462 reserve_size)) {
463 base = NULL;
464 }
465 } else {
466 if (reservation.Commit(base, commit_size, false)) {
467 UpdateAllocatedSpaceLimits(base, base + commit_size);
468 } else {
469 base = NULL;
470 }
471 }
472
473 if (base == NULL) {
474 // Failed to commit the body. Release the mapping and any partially
475 // commited regions inside it.
476 reservation.Release();
477 return NULL;
478 }
479
480 controller->TakeControl(&reservation);
481 return base;
482}
483
Ben Murdochc5610432016-08-08 18:44:38 +0100484void Page::InitializeAsAnchor(Space* space) {
485 set_owner(space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000486 set_next_chunk(this);
487 set_prev_chunk(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000488 SetFlags(0, ~0);
Ben Murdochc5610432016-08-08 18:44:38 +0100489 SetFlag(ANCHOR);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000490}
491
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000492MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
493 Address area_start, Address area_end,
Ben Murdoch097c5b22016-05-18 11:27:45 +0100494 Executability executable, Space* owner,
495 base::VirtualMemory* reservation) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000496 MemoryChunk* chunk = FromAddress(base);
497
498 DCHECK(base == chunk->address());
499
500 chunk->heap_ = heap;
501 chunk->size_ = size;
502 chunk->area_start_ = area_start;
503 chunk->area_end_ = area_end;
504 chunk->flags_ = 0;
505 chunk->set_owner(owner);
506 chunk->InitializeReservedMemory();
Ben Murdoch097c5b22016-05-18 11:27:45 +0100507 chunk->old_to_new_slots_ = nullptr;
508 chunk->old_to_old_slots_ = nullptr;
Ben Murdochda12d292016-06-02 14:46:10 +0100509 chunk->typed_old_to_old_slots_ = nullptr;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100510 chunk->skip_list_ = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000511 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
512 chunk->progress_bar_ = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000513 chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
Ben Murdoch097c5b22016-05-18 11:27:45 +0100514 chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100515 chunk->mutex_ = nullptr;
516 chunk->available_in_free_list_ = 0;
517 chunk->wasted_memory_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000518 chunk->ResetLiveBytes();
519 Bitmap::Clear(chunk);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000520 chunk->set_next_chunk(nullptr);
521 chunk->set_prev_chunk(nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000522
523 DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
524 DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
525
526 if (executable == EXECUTABLE) {
527 chunk->SetFlag(IS_EXECUTABLE);
528 }
529
Ben Murdoch097c5b22016-05-18 11:27:45 +0100530 if (reservation != nullptr) {
531 chunk->reservation_.TakeControl(reservation);
532 }
533
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000534 return chunk;
535}
536
537
538// Commit MemoryChunk area to the requested size.
539bool MemoryChunk::CommitArea(size_t requested) {
540 size_t guard_size =
541 IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
542 size_t header_size = area_start() - address() - guard_size;
543 size_t commit_size =
544 RoundUp(header_size + requested, base::OS::CommitPageSize());
545 size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
546 base::OS::CommitPageSize());
547
548 if (commit_size > committed_size) {
549 // Commit size should be less or equal than the reserved size.
550 DCHECK(commit_size <= size() - 2 * guard_size);
551 // Append the committed area.
552 Address start = address() + committed_size + guard_size;
553 size_t length = commit_size - committed_size;
554 if (reservation_.IsReserved()) {
555 Executability executable =
556 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
Ben Murdochc5610432016-08-08 18:44:38 +0100557 if (!heap()->memory_allocator()->CommitMemory(start, length,
558 executable)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000559 return false;
560 }
561 } else {
Ben Murdochc5610432016-08-08 18:44:38 +0100562 CodeRange* code_range = heap_->memory_allocator()->code_range();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000563 DCHECK(code_range != NULL && code_range->valid() &&
564 IsFlagSet(IS_EXECUTABLE));
565 if (!code_range->CommitRawMemory(start, length)) return false;
566 }
567
568 if (Heap::ShouldZapGarbage()) {
Ben Murdochc5610432016-08-08 18:44:38 +0100569 heap_->memory_allocator()->ZapBlock(start, length);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000570 }
571 } else if (commit_size < committed_size) {
572 DCHECK(commit_size > 0);
573 // Shrink the committed area.
574 size_t length = committed_size - commit_size;
575 Address start = address() + committed_size + guard_size - length;
576 if (reservation_.IsReserved()) {
577 if (!reservation_.Uncommit(start, length)) return false;
578 } else {
Ben Murdochc5610432016-08-08 18:44:38 +0100579 CodeRange* code_range = heap_->memory_allocator()->code_range();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000580 DCHECK(code_range != NULL && code_range->valid() &&
581 IsFlagSet(IS_EXECUTABLE));
582 if (!code_range->UncommitRawMemory(start, length)) return false;
583 }
584 }
585
586 area_end_ = area_start_ + requested;
587 return true;
588}
589
590
591void MemoryChunk::InsertAfter(MemoryChunk* other) {
592 MemoryChunk* other_next = other->next_chunk();
593
594 set_next_chunk(other_next);
595 set_prev_chunk(other);
596 other_next->set_prev_chunk(this);
597 other->set_next_chunk(this);
598}
599
600
601void MemoryChunk::Unlink() {
602 MemoryChunk* next_element = next_chunk();
603 MemoryChunk* prev_element = prev_chunk();
604 next_element->set_prev_chunk(prev_element);
605 prev_element->set_next_chunk(next_element);
606 set_prev_chunk(NULL);
607 set_next_chunk(NULL);
608}
609
610
611MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
612 intptr_t commit_area_size,
613 Executability executable,
614 Space* owner) {
615 DCHECK(commit_area_size <= reserve_area_size);
616
617 size_t chunk_size;
618 Heap* heap = isolate_->heap();
619 Address base = NULL;
620 base::VirtualMemory reservation;
621 Address area_start = NULL;
622 Address area_end = NULL;
623
624 //
625 // MemoryChunk layout:
626 //
627 // Executable
628 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
629 // | Header |
630 // +----------------------------+<- base + CodePageGuardStartOffset
631 // | Guard |
632 // +----------------------------+<- area_start_
633 // | Area |
634 // +----------------------------+<- area_end_ (area_start + commit_area_size)
635 // | Committed but not used |
636 // +----------------------------+<- aligned at OS page boundary
637 // | Reserved but not committed |
638 // +----------------------------+<- aligned at OS page boundary
639 // | Guard |
640 // +----------------------------+<- base + chunk_size
641 //
642 // Non-executable
643 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
644 // | Header |
645 // +----------------------------+<- area_start_ (base + kObjectStartOffset)
646 // | Area |
647 // +----------------------------+<- area_end_ (area_start + commit_area_size)
648 // | Committed but not used |
649 // +----------------------------+<- aligned at OS page boundary
650 // | Reserved but not committed |
651 // +----------------------------+<- base + chunk_size
652 //
653
654 if (executable == EXECUTABLE) {
655 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
656 base::OS::CommitPageSize()) +
657 CodePageGuardSize();
658
659 // Check executable memory limit.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000660 if ((size_executable_.Value() + static_cast<intptr_t>(chunk_size)) >
661 capacity_executable_) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000662 LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
663 "V8 Executable Allocation capacity exceeded"));
664 return NULL;
665 }
666
667 // Size of header (not executable) plus area (executable).
668 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
669 base::OS::CommitPageSize());
670 // Allocate executable memory either from code range or from the
671 // OS.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000672#ifdef V8_TARGET_ARCH_MIPS64
673 // Use code range only for large object space on mips64 to keep address
674 // range within 256-MB memory region.
Ben Murdochc5610432016-08-08 18:44:38 +0100675 if (code_range() != NULL && code_range()->valid() &&
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000676 reserve_area_size > CodePageAreaSize()) {
677#else
Ben Murdochc5610432016-08-08 18:44:38 +0100678 if (code_range() != NULL && code_range()->valid()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000679#endif
Ben Murdochc5610432016-08-08 18:44:38 +0100680 base =
681 code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000682 DCHECK(
683 IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
684 if (base == NULL) return NULL;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000685 size_.Increment(static_cast<intptr_t>(chunk_size));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000686 // Update executable memory size.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000687 size_executable_.Increment(static_cast<intptr_t>(chunk_size));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000688 } else {
689 base = AllocateAlignedMemory(chunk_size, commit_size,
690 MemoryChunk::kAlignment, executable,
691 &reservation);
692 if (base == NULL) return NULL;
693 // Update executable memory size.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000694 size_executable_.Increment(static_cast<intptr_t>(reservation.size()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000695 }
696
697 if (Heap::ShouldZapGarbage()) {
698 ZapBlock(base, CodePageGuardStartOffset());
699 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
700 }
701
702 area_start = base + CodePageAreaStartOffset();
703 area_end = area_start + commit_area_size;
704 } else {
705 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
706 base::OS::CommitPageSize());
707 size_t commit_size =
708 RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
709 base::OS::CommitPageSize());
710 base =
711 AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
712 executable, &reservation);
713
714 if (base == NULL) return NULL;
715
716 if (Heap::ShouldZapGarbage()) {
717 ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
718 }
719
720 area_start = base + Page::kObjectStartOffset;
721 area_end = area_start + commit_area_size;
722 }
723
724 // Use chunk_size for statistics and callbacks because we assume that they
725 // treat reserved but not-yet committed memory regions of chunks as allocated.
726 isolate_->counters()->memory_allocated()->Increment(
727 static_cast<int>(chunk_size));
728
729 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
730 if (owner != NULL) {
731 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
732 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
733 }
734
Ben Murdochc5610432016-08-08 18:44:38 +0100735 // We cannot use the last chunk in the address space because we would
736 // overflow when comparing top and limit if this chunk is used for a
737 // linear allocation area.
738 if ((reinterpret_cast<uintptr_t>(base) + chunk_size) == 0u) {
739 CHECK(!last_chunk_.IsReserved());
740 last_chunk_.TakeControl(&reservation);
741 UncommitBlock(reinterpret_cast<Address>(last_chunk_.address()),
742 last_chunk_.size());
743 size_.Increment(-static_cast<intptr_t>(chunk_size));
744 if (executable == EXECUTABLE) {
745 size_executable_.Increment(-static_cast<intptr_t>(chunk_size));
746 }
747 CHECK(last_chunk_.IsReserved());
748 return AllocateChunk(reserve_area_size, commit_area_size, executable,
749 owner);
750 }
751
Ben Murdoch097c5b22016-05-18 11:27:45 +0100752 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
753 executable, owner, &reservation);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000754}
755
756
757void Page::ResetFreeListStatistics() {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100758 wasted_memory_ = 0;
759 available_in_free_list_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000760}
761
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000762void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
763 DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000764 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
765 if (chunk->owner() != NULL) {
766 ObjectSpace space =
767 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
768 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
769 }
770
771 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
772 chunk->IsEvacuationCandidate());
773
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000774 intptr_t size;
775 base::VirtualMemory* reservation = chunk->reserved_memory();
776 if (reservation->IsReserved()) {
777 size = static_cast<intptr_t>(reservation->size());
778 } else {
779 size = static_cast<intptr_t>(chunk->size());
780 }
781 DCHECK(size_.Value() >= size);
782 size_.Increment(-size);
783 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
784
785 if (chunk->executable() == EXECUTABLE) {
786 DCHECK(size_executable_.Value() >= size);
787 size_executable_.Increment(-size);
788 }
789
790 chunk->SetFlag(MemoryChunk::PRE_FREED);
791}
792
793
794void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
795 DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
796 chunk->ReleaseAllocatedMemory();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000797
798 base::VirtualMemory* reservation = chunk->reserved_memory();
Ben Murdochc5610432016-08-08 18:44:38 +0100799 if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
800 UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000801 } else {
Ben Murdochc5610432016-08-08 18:44:38 +0100802 if (reservation->IsReserved()) {
803 FreeMemory(reservation, chunk->executable());
804 } else {
805 FreeMemory(chunk->address(), chunk->size(), chunk->executable());
806 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000807 }
808}
809
Ben Murdochc5610432016-08-08 18:44:38 +0100810template <MemoryAllocator::FreeMode mode>
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000811void MemoryAllocator::Free(MemoryChunk* chunk) {
Ben Murdochc5610432016-08-08 18:44:38 +0100812 switch (mode) {
813 case kFull:
814 PreFreeMemory(chunk);
815 PerformFreeMemory(chunk);
816 break;
817 case kPooledAndQueue:
818 DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
819 DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
820 chunk->SetFlag(MemoryChunk::POOLED);
821 // Fall through to kPreFreeAndQueue.
822 case kPreFreeAndQueue:
823 PreFreeMemory(chunk);
824 // The chunks added to this queue will be freed by a concurrent thread.
825 unmapper()->AddMemoryChunkSafe(chunk);
826 break;
827 default:
828 UNREACHABLE();
Ben Murdochda12d292016-06-02 14:46:10 +0100829 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000830}
831
Ben Murdochc5610432016-08-08 18:44:38 +0100832template void MemoryAllocator::Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
833
834template void MemoryAllocator::Free<MemoryAllocator::kPreFreeAndQueue>(
Ben Murdochda12d292016-06-02 14:46:10 +0100835 MemoryChunk* chunk);
836
Ben Murdochc5610432016-08-08 18:44:38 +0100837template void MemoryAllocator::Free<MemoryAllocator::kPooledAndQueue>(
Ben Murdochda12d292016-06-02 14:46:10 +0100838 MemoryChunk* chunk);
839
Ben Murdochc5610432016-08-08 18:44:38 +0100840template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
841Page* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
842 Executability executable) {
Ben Murdochda12d292016-06-02 14:46:10 +0100843 MemoryChunk* chunk = nullptr;
Ben Murdochc5610432016-08-08 18:44:38 +0100844 if (alloc_mode == kPooled) {
Ben Murdochda12d292016-06-02 14:46:10 +0100845 DCHECK_EQ(size, static_cast<intptr_t>(MemoryChunk::kAllocatableMemory));
846 DCHECK_EQ(executable, NOT_EXECUTABLE);
847 chunk = AllocatePagePooled(owner);
848 }
849 if (chunk == nullptr) {
850 chunk = AllocateChunk(size, size, executable, owner);
851 }
852 if (chunk == nullptr) return nullptr;
Ben Murdochc5610432016-08-08 18:44:38 +0100853 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
Ben Murdochda12d292016-06-02 14:46:10 +0100854}
855
Ben Murdochc5610432016-08-08 18:44:38 +0100856template Page*
857MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
858 intptr_t size, PagedSpace* owner, Executability executable);
859template Page*
860MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
861 intptr_t size, SemiSpace* owner, Executability executable);
862template Page*
863MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
864 intptr_t size, SemiSpace* owner, Executability executable);
Ben Murdochda12d292016-06-02 14:46:10 +0100865
Ben Murdochc5610432016-08-08 18:44:38 +0100866LargePage* MemoryAllocator::AllocateLargePage(intptr_t size,
867 LargeObjectSpace* owner,
868 Executability executable) {
869 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
870 if (chunk == nullptr) return nullptr;
871 return LargePage::Initialize(isolate_->heap(), chunk, executable, owner);
872}
Ben Murdochda12d292016-06-02 14:46:10 +0100873
874template <typename SpaceType>
875MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
Ben Murdochc5610432016-08-08 18:44:38 +0100876 MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
877 if (chunk == nullptr) return nullptr;
Ben Murdochda12d292016-06-02 14:46:10 +0100878 const int size = MemoryChunk::kPageSize;
Ben Murdochda12d292016-06-02 14:46:10 +0100879 const Address start = reinterpret_cast<Address>(chunk);
880 const Address area_start = start + MemoryChunk::kObjectStartOffset;
881 const Address area_end = start + size;
Ben Murdoch6f5d0ea2016-06-15 14:45:46 +0100882 if (!CommitBlock(reinterpret_cast<Address>(chunk), size, NOT_EXECUTABLE)) {
883 return nullptr;
884 }
Ben Murdochda12d292016-06-02 14:46:10 +0100885 base::VirtualMemory reservation(start, size);
886 MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
887 NOT_EXECUTABLE, owner, &reservation);
888 size_.Increment(size);
889 return chunk;
890}
891
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000892bool MemoryAllocator::CommitBlock(Address start, size_t size,
893 Executability executable) {
894 if (!CommitMemory(start, size, executable)) return false;
895
896 if (Heap::ShouldZapGarbage()) {
897 ZapBlock(start, size);
898 }
899
900 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
901 return true;
902}
903
904
905bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
906 if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
907 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
908 return true;
909}
910
911
912void MemoryAllocator::ZapBlock(Address start, size_t size) {
913 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
914 Memory::Address_at(start + s) = kZapValue;
915 }
916}
917
918
919void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
920 AllocationAction action,
921 size_t size) {
922 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
923 MemoryAllocationCallbackRegistration registration =
924 memory_allocation_callbacks_[i];
925 if ((registration.space & space) == space &&
926 (registration.action & action) == action)
927 registration.callback(space, action, static_cast<int>(size));
928 }
929}
930
931
932bool MemoryAllocator::MemoryAllocationCallbackRegistered(
933 MemoryAllocationCallback callback) {
934 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
935 if (memory_allocation_callbacks_[i].callback == callback) return true;
936 }
937 return false;
938}
939
940
941void MemoryAllocator::AddMemoryAllocationCallback(
942 MemoryAllocationCallback callback, ObjectSpace space,
943 AllocationAction action) {
944 DCHECK(callback != NULL);
945 MemoryAllocationCallbackRegistration registration(callback, space, action);
946 DCHECK(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
947 return memory_allocation_callbacks_.Add(registration);
948}
949
950
951void MemoryAllocator::RemoveMemoryAllocationCallback(
952 MemoryAllocationCallback callback) {
953 DCHECK(callback != NULL);
954 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
955 if (memory_allocation_callbacks_[i].callback == callback) {
956 memory_allocation_callbacks_.Remove(i);
957 return;
958 }
959 }
960 UNREACHABLE();
961}
962
963
964#ifdef DEBUG
965void MemoryAllocator::ReportStatistics() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000966 intptr_t size = Size();
967 float pct = static_cast<float>(capacity_ - size) / capacity_;
Ben Murdochc5610432016-08-08 18:44:38 +0100968 PrintF(" capacity: %" V8PRIdPTR ", used: %" V8PRIdPTR
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000969 ", available: %%%d\n\n",
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000970 capacity_, size, static_cast<int>(pct * 100));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000971}
972#endif
973
974
975int MemoryAllocator::CodePageGuardStartOffset() {
976 // We are guarding code pages: the first OS page after the header
977 // will be protected as non-writable.
978 return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize());
979}
980
981
982int MemoryAllocator::CodePageGuardSize() {
983 return static_cast<int>(base::OS::CommitPageSize());
984}
985
986
987int MemoryAllocator::CodePageAreaStartOffset() {
988 // We are guarding code pages: the first OS page after the header
989 // will be protected as non-writable.
990 return CodePageGuardStartOffset() + CodePageGuardSize();
991}
992
993
994int MemoryAllocator::CodePageAreaEndOffset() {
995 // We are guarding code pages: the last OS page will be protected as
996 // non-writable.
997 return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize());
998}
999
1000
1001bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
1002 Address start, size_t commit_size,
1003 size_t reserved_size) {
1004 // Commit page header (not executable).
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001005 Address header = start;
1006 size_t header_size = CodePageGuardStartOffset();
1007 if (vm->Commit(header, header_size, false)) {
1008 // Create guard page after the header.
1009 if (vm->Guard(start + CodePageGuardStartOffset())) {
1010 // Commit page body (executable).
1011 Address body = start + CodePageAreaStartOffset();
1012 size_t body_size = commit_size - CodePageGuardStartOffset();
1013 if (vm->Commit(body, body_size, true)) {
1014 // Create guard page before the end.
1015 if (vm->Guard(start + reserved_size - CodePageGuardSize())) {
1016 UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
1017 commit_size -
1018 CodePageGuardStartOffset());
1019 return true;
1020 }
1021 vm->Uncommit(body, body_size);
1022 }
1023 }
1024 vm->Uncommit(header, header_size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001025 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001026 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001027}
1028
1029
1030// -----------------------------------------------------------------------------
1031// MemoryChunk implementation
1032
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001033void MemoryChunk::ReleaseAllocatedMemory() {
Ben Murdochc5610432016-08-08 18:44:38 +01001034 if (skip_list_ != nullptr) {
1035 delete skip_list_;
1036 skip_list_ = nullptr;
1037 }
1038 if (mutex_ != nullptr) {
1039 delete mutex_;
1040 mutex_ = nullptr;
1041 }
1042 if (old_to_new_slots_ != nullptr) ReleaseOldToNewSlots();
1043 if (old_to_old_slots_ != nullptr) ReleaseOldToOldSlots();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001044}
1045
Ben Murdoch097c5b22016-05-18 11:27:45 +01001046static SlotSet* AllocateSlotSet(size_t size, Address page_start) {
1047 size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
1048 DCHECK(pages > 0);
1049 SlotSet* slot_set = new SlotSet[pages];
1050 for (size_t i = 0; i < pages; i++) {
1051 slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
1052 }
1053 return slot_set;
1054}
1055
1056void MemoryChunk::AllocateOldToNewSlots() {
1057 DCHECK(nullptr == old_to_new_slots_);
1058 old_to_new_slots_ = AllocateSlotSet(size_, address());
1059}
1060
1061void MemoryChunk::ReleaseOldToNewSlots() {
1062 delete[] old_to_new_slots_;
1063 old_to_new_slots_ = nullptr;
1064}
1065
1066void MemoryChunk::AllocateOldToOldSlots() {
1067 DCHECK(nullptr == old_to_old_slots_);
1068 old_to_old_slots_ = AllocateSlotSet(size_, address());
1069}
1070
1071void MemoryChunk::ReleaseOldToOldSlots() {
1072 delete[] old_to_old_slots_;
1073 old_to_old_slots_ = nullptr;
1074}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001075
Ben Murdochda12d292016-06-02 14:46:10 +01001076void MemoryChunk::AllocateTypedOldToOldSlots() {
1077 DCHECK(nullptr == typed_old_to_old_slots_);
1078 typed_old_to_old_slots_ = new TypedSlotSet(address());
1079}
1080
1081void MemoryChunk::ReleaseTypedOldToOldSlots() {
1082 delete typed_old_to_old_slots_;
1083 typed_old_to_old_slots_ = nullptr;
1084}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001085// -----------------------------------------------------------------------------
1086// PagedSpace implementation
1087
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001088STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) ==
1089 ObjectSpace::kObjectSpaceNewSpace);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001090STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_SPACE) ==
1091 ObjectSpace::kObjectSpaceOldSpace);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001092STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
1093 ObjectSpace::kObjectSpaceCodeSpace);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001094STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
1095 ObjectSpace::kObjectSpaceMapSpace);
1096
Ben Murdoch097c5b22016-05-18 11:27:45 +01001097void Space::AllocationStep(Address soon_object, int size) {
1098 if (!allocation_observers_paused_) {
1099 for (int i = 0; i < allocation_observers_->length(); ++i) {
1100 AllocationObserver* o = (*allocation_observers_)[i];
1101 o->AllocationStep(size, soon_object, size);
1102 }
1103 }
1104}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001105
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001106PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001107 Executability executable)
Ben Murdochc5610432016-08-08 18:44:38 +01001108 : Space(heap, space, executable), anchor_(this), free_list_(this) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001109 area_size_ = MemoryAllocator::PageAreaSize(space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001110 accounting_stats_.Clear();
1111
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001112 allocation_info_.Reset(nullptr, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001113}
1114
1115
1116bool PagedSpace::SetUp() { return true; }
1117
1118
1119bool PagedSpace::HasBeenSetUp() { return true; }
1120
1121
1122void PagedSpace::TearDown() {
1123 PageIterator iterator(this);
1124 while (iterator.has_next()) {
Ben Murdochc5610432016-08-08 18:44:38 +01001125 heap()->memory_allocator()->Free<MemoryAllocator::kFull>(iterator.next());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001126 }
1127 anchor_.set_next_page(&anchor_);
1128 anchor_.set_prev_page(&anchor_);
1129 accounting_stats_.Clear();
1130}
1131
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001132void PagedSpace::RefillFreeList() {
Ben Murdochda12d292016-06-02 14:46:10 +01001133 // Any PagedSpace might invoke RefillFreeList. We filter all but our old
1134 // generation spaces out.
1135 if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
1136 identity() != MAP_SPACE) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001137 return;
1138 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001139 MarkCompactCollector* collector = heap()->mark_compact_collector();
Ben Murdochda12d292016-06-02 14:46:10 +01001140 intptr_t added = 0;
1141 {
Ben Murdochc5610432016-08-08 18:44:38 +01001142 Page* p = nullptr;
1143 while ((p = collector->sweeper().GetSweptPageSafe(this)) != nullptr) {
Ben Murdochda12d292016-06-02 14:46:10 +01001144 // Only during compaction pages can actually change ownership. This is
1145 // safe because there exists no other competing action on the page links
1146 // during compaction.
1147 if (is_local() && (p->owner() != this)) {
Ben Murdochda12d292016-06-02 14:46:10 +01001148 base::LockGuard<base::Mutex> guard(
1149 reinterpret_cast<PagedSpace*>(p->owner())->mutex());
1150 p->Unlink();
1151 p->set_owner(this);
1152 p->InsertAfter(anchor_.prev_page());
1153 }
1154 added += RelinkFreeListCategories(p);
1155 added += p->wasted_memory();
Ben Murdochc5610432016-08-08 18:44:38 +01001156 if (is_local() && (added > kCompactionMemoryWanted)) break;
Ben Murdochda12d292016-06-02 14:46:10 +01001157 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001158 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001159 accounting_stats_.IncreaseCapacity(added);
1160}
1161
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001162void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
Ben Murdochda12d292016-06-02 14:46:10 +01001163 DCHECK(identity() == other->identity());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001164 // Unmerged fields:
1165 // area_size_
1166 // anchor_
1167
Ben Murdochda12d292016-06-02 14:46:10 +01001168 other->EmptyAllocationInfo();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001169
1170 // Update and clear accounting statistics.
1171 accounting_stats_.Merge(other->accounting_stats_);
1172 other->accounting_stats_.Clear();
1173
1174 // The linear allocation area of {other} should be destroyed now.
1175 DCHECK(other->top() == nullptr);
1176 DCHECK(other->limit() == nullptr);
1177
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001178 AccountCommitted(other->CommittedMemory());
1179
1180 // Move over pages.
1181 PageIterator it(other);
1182 Page* p = nullptr;
1183 while (it.has_next()) {
1184 p = it.next();
Ben Murdochda12d292016-06-02 14:46:10 +01001185
1186 // Relinking requires the category to be unlinked.
1187 other->UnlinkFreeListCategories(p);
1188
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001189 p->Unlink();
1190 p->set_owner(this);
1191 p->InsertAfter(anchor_.prev_page());
Ben Murdochda12d292016-06-02 14:46:10 +01001192 RelinkFreeListCategories(p);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001193 }
1194}
1195
1196
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001197size_t PagedSpace::CommittedPhysicalMemory() {
1198 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
1199 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1200 size_t size = 0;
1201 PageIterator it(this);
1202 while (it.has_next()) {
1203 size += it.next()->CommittedPhysicalMemory();
1204 }
1205 return size;
1206}
1207
Ben Murdoch097c5b22016-05-18 11:27:45 +01001208bool PagedSpace::ContainsSlow(Address addr) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001209 Page* p = Page::FromAddress(addr);
1210 PageIterator iterator(this);
1211 while (iterator.has_next()) {
1212 if (iterator.next() == p) return true;
1213 }
1214 return false;
1215}
1216
1217
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001218Object* PagedSpace::FindObject(Address addr) {
1219 // Note: this function can only be called on iterable spaces.
1220 DCHECK(!heap()->mark_compact_collector()->in_use());
1221
1222 if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found.
1223
1224 Page* p = Page::FromAddress(addr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001225 HeapObjectIterator it(p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001226 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
1227 Address cur = obj->address();
1228 Address next = cur + obj->Size();
1229 if ((cur <= addr) && (addr < next)) return obj;
1230 }
1231
1232 UNREACHABLE();
1233 return Smi::FromInt(0);
1234}
1235
1236
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001237bool PagedSpace::Expand() {
Ben Murdochc5610432016-08-08 18:44:38 +01001238 int size = AreaSize();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001239 if (snapshotable() && !HasPages()) {
1240 size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001241 }
1242
Ben Murdochc5610432016-08-08 18:44:38 +01001243 if (!heap()->CanExpandOldGeneration(size)) return false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001244
Ben Murdochc5610432016-08-08 18:44:38 +01001245 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable());
1246 if (p == nullptr) return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001247
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001248 AccountCommitted(static_cast<intptr_t>(p->size()));
1249
1250 // Pages created during bootstrapping may contain immortal immovable objects.
1251 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
1252
Ben Murdochda12d292016-06-02 14:46:10 +01001253 // When incremental marking was activated, old space pages are allocated
1254 // black.
1255 if (heap()->incremental_marking()->black_allocation() &&
1256 identity() == OLD_SPACE) {
1257 Bitmap::SetAllBits(p);
1258 p->SetFlag(Page::BLACK_PAGE);
1259 if (FLAG_trace_incremental_marking) {
1260 PrintIsolate(heap()->isolate(), "Added black page %p\n", p);
1261 }
1262 }
1263
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001264 DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001265
1266 p->InsertAfter(anchor_.prev_page());
1267
1268 return true;
1269}
1270
1271
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001272int PagedSpace::CountTotalPages() {
1273 PageIterator it(this);
1274 int count = 0;
1275 while (it.has_next()) {
1276 it.next();
1277 count++;
1278 }
1279 return count;
1280}
1281
1282
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001283void PagedSpace::ResetFreeListStatistics() {
1284 PageIterator page_iterator(this);
1285 while (page_iterator.has_next()) {
1286 Page* page = page_iterator.next();
1287 page->ResetFreeListStatistics();
1288 }
1289}
1290
1291
1292void PagedSpace::IncreaseCapacity(int size) {
1293 accounting_stats_.ExpandSpace(size);
1294}
1295
Ben Murdochda12d292016-06-02 14:46:10 +01001296void PagedSpace::ReleasePage(Page* page) {
1297 DCHECK_EQ(page->LiveBytes(), 0);
1298 DCHECK_EQ(AreaSize(), page->area_size());
1299 DCHECK_EQ(page->owner(), this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001300
Ben Murdochda12d292016-06-02 14:46:10 +01001301 free_list_.EvictFreeListItems(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001302 DCHECK(!free_list_.ContainsPageFreeListItems(page));
1303
Ben Murdochc5610432016-08-08 18:44:38 +01001304 if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001305 allocation_info_.Reset(nullptr, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001306 }
1307
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001308 // If page is still in a list, unlink it from that list.
1309 if (page->next_chunk() != NULL) {
1310 DCHECK(page->prev_chunk() != NULL);
1311 page->Unlink();
1312 }
1313
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001314 AccountUncommitted(static_cast<intptr_t>(page->size()));
Ben Murdochc5610432016-08-08 18:44:38 +01001315 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001316
1317 DCHECK(Capacity() > 0);
1318 accounting_stats_.ShrinkSpace(AreaSize());
1319}
1320
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001321#ifdef DEBUG
1322void PagedSpace::Print() {}
1323#endif
1324
1325#ifdef VERIFY_HEAP
1326void PagedSpace::Verify(ObjectVisitor* visitor) {
1327 bool allocation_pointer_found_in_space =
1328 (allocation_info_.top() == allocation_info_.limit());
1329 PageIterator page_iterator(this);
1330 while (page_iterator.has_next()) {
1331 Page* page = page_iterator.next();
1332 CHECK(page->owner() == this);
Ben Murdochc5610432016-08-08 18:44:38 +01001333 if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001334 allocation_pointer_found_in_space = true;
1335 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001336 CHECK(page->SweepingDone());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001337 HeapObjectIterator it(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001338 Address end_of_previous_object = page->area_start();
1339 Address top = page->area_end();
1340 int black_size = 0;
1341 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1342 CHECK(end_of_previous_object <= object->address());
1343
1344 // The first word should be a map, and we expect all map pointers to
1345 // be in map space.
1346 Map* map = object->map();
1347 CHECK(map->IsMap());
1348 CHECK(heap()->map_space()->Contains(map));
1349
1350 // Perform space-specific object verification.
1351 VerifyObject(object);
1352
1353 // The object itself should look OK.
1354 object->ObjectVerify();
1355
1356 // All the interior pointers should be contained in the heap.
1357 int size = object->Size();
1358 object->IterateBody(map->instance_type(), size, visitor);
Ben Murdochda12d292016-06-02 14:46:10 +01001359 if (!page->IsFlagSet(Page::BLACK_PAGE) &&
1360 Marking::IsBlack(Marking::MarkBitFrom(object))) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001361 black_size += size;
1362 }
1363
1364 CHECK(object->address() + size <= top);
1365 end_of_previous_object = object->address() + size;
1366 }
1367 CHECK_LE(black_size, page->LiveBytes());
1368 }
1369 CHECK(allocation_pointer_found_in_space);
1370}
1371#endif // VERIFY_HEAP
1372
1373// -----------------------------------------------------------------------------
1374// NewSpace implementation
1375
Ben Murdochda12d292016-06-02 14:46:10 +01001376bool NewSpace::SetUp(int initial_semispace_capacity,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001377 int maximum_semispace_capacity) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001378 DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
1379 DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity));
1380
Ben Murdochda12d292016-06-02 14:46:10 +01001381 to_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
1382 from_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
1383 if (!to_space_.Commit()) {
1384 return false;
1385 }
1386 DCHECK(!from_space_.is_committed()); // No need to use memory yet.
1387 ResetAllocationInfo();
1388
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001389 // Allocate and set up the histogram arrays if necessary.
1390 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1391 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001392#define SET_NAME(name) \
1393 allocated_histogram_[name].set_name(#name); \
1394 promoted_histogram_[name].set_name(#name);
1395 INSTANCE_TYPE_LIST(SET_NAME)
1396#undef SET_NAME
1397
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001398 return true;
1399}
1400
1401
1402void NewSpace::TearDown() {
1403 if (allocated_histogram_) {
1404 DeleteArray(allocated_histogram_);
1405 allocated_histogram_ = NULL;
1406 }
1407 if (promoted_histogram_) {
1408 DeleteArray(promoted_histogram_);
1409 promoted_histogram_ = NULL;
1410 }
1411
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001412 allocation_info_.Reset(nullptr, nullptr);
1413
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001414 to_space_.TearDown();
1415 from_space_.TearDown();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001416}
1417
1418
1419void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
1420
1421
1422void NewSpace::Grow() {
1423 // Double the semispace size but only up to maximum capacity.
1424 DCHECK(TotalCapacity() < MaximumCapacity());
1425 int new_capacity =
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001426 Min(MaximumCapacity(),
1427 FLAG_semi_space_growth_factor * static_cast<int>(TotalCapacity()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001428 if (to_space_.GrowTo(new_capacity)) {
1429 // Only grow from space if we managed to grow to-space.
1430 if (!from_space_.GrowTo(new_capacity)) {
1431 // If we managed to grow to-space but couldn't grow from-space,
1432 // attempt to shrink to-space.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001433 if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001434 // We are in an inconsistent state because we could not
1435 // commit/uncommit memory from new space.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001436 CHECK(false);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001437 }
1438 }
1439 }
1440 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1441}
1442
1443
1444void NewSpace::Shrink() {
1445 int new_capacity = Max(InitialTotalCapacity(), 2 * SizeAsInt());
1446 int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1447 if (rounded_new_capacity < TotalCapacity() &&
1448 to_space_.ShrinkTo(rounded_new_capacity)) {
1449 // Only shrink from-space if we managed to shrink to-space.
1450 from_space_.Reset();
1451 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1452 // If we managed to shrink to-space but couldn't shrink from
1453 // space, attempt to grow to-space again.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001454 if (!to_space_.GrowTo(from_space_.current_capacity())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001455 // We are in an inconsistent state because we could not
1456 // commit/uncommit memory from new space.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001457 CHECK(false);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001458 }
1459 }
1460 }
1461 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1462}
1463
1464
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001465void LocalAllocationBuffer::Close() {
1466 if (IsValid()) {
1467 heap_->CreateFillerObjectAt(
1468 allocation_info_.top(),
Ben Murdochda12d292016-06-02 14:46:10 +01001469 static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
1470 ClearRecordedSlots::kNo);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001471 }
1472}
1473
1474
1475LocalAllocationBuffer::LocalAllocationBuffer(Heap* heap,
1476 AllocationInfo allocation_info)
1477 : heap_(heap), allocation_info_(allocation_info) {
1478 if (IsValid()) {
1479 heap_->CreateFillerObjectAt(
1480 allocation_info_.top(),
Ben Murdochda12d292016-06-02 14:46:10 +01001481 static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
1482 ClearRecordedSlots::kNo);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001483 }
1484}
1485
1486
1487LocalAllocationBuffer::LocalAllocationBuffer(
1488 const LocalAllocationBuffer& other) {
1489 *this = other;
1490}
1491
1492
1493LocalAllocationBuffer& LocalAllocationBuffer::operator=(
1494 const LocalAllocationBuffer& other) {
1495 Close();
1496 heap_ = other.heap_;
1497 allocation_info_ = other.allocation_info_;
1498
1499 // This is needed since we (a) cannot yet use move-semantics, and (b) want
1500 // to make the use of the class easy by it as value and (c) implicitly call
1501 // {Close} upon copy.
1502 const_cast<LocalAllocationBuffer&>(other)
1503 .allocation_info_.Reset(nullptr, nullptr);
1504 return *this;
1505}
1506
1507
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001508void NewSpace::UpdateAllocationInfo() {
1509 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001510 allocation_info_.Reset(to_space_.page_low(), to_space_.page_high());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001511 UpdateInlineAllocationLimit(0);
1512 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1513}
1514
1515
1516void NewSpace::ResetAllocationInfo() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001517 Address old_top = allocation_info_.top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001518 to_space_.Reset();
1519 UpdateAllocationInfo();
1520 pages_used_ = 0;
1521 // Clear all mark-bits in the to-space.
1522 NewSpacePageIterator it(&to_space_);
1523 while (it.has_next()) {
1524 Bitmap::Clear(it.next());
1525 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001526 InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001527}
1528
1529
1530void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
1531 if (heap()->inline_allocation_disabled()) {
1532 // Lowest limit when linear allocation was disabled.
1533 Address high = to_space_.page_high();
1534 Address new_top = allocation_info_.top() + size_in_bytes;
1535 allocation_info_.set_limit(Min(new_top, high));
Ben Murdoch097c5b22016-05-18 11:27:45 +01001536 } else if (allocation_observers_paused_ || top_on_previous_step_ == 0) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001537 // Normal limit is the end of the current page.
1538 allocation_info_.set_limit(to_space_.page_high());
1539 } else {
1540 // Lower limit during incremental marking.
1541 Address high = to_space_.page_high();
1542 Address new_top = allocation_info_.top() + size_in_bytes;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001543 Address new_limit = new_top + GetNextInlineAllocationStepSize() - 1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001544 allocation_info_.set_limit(Min(new_limit, high));
1545 }
1546 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1547}
1548
1549
1550bool NewSpace::AddFreshPage() {
1551 Address top = allocation_info_.top();
Ben Murdochc5610432016-08-08 18:44:38 +01001552 DCHECK(!Page::IsAtObjectStart(top));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001553 if (!to_space_.AdvancePage()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001554 // No more pages left to advance.
1555 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001556 }
1557
1558 // Clear remainder of current page.
Ben Murdochc5610432016-08-08 18:44:38 +01001559 Address limit = Page::FromAllocationAreaAddress(top)->area_end();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001560 if (heap()->gc_state() == Heap::SCAVENGE) {
1561 heap()->promotion_queue()->SetNewLimit(limit);
1562 }
1563
1564 int remaining_in_page = static_cast<int>(limit - top);
Ben Murdochda12d292016-06-02 14:46:10 +01001565 heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001566 pages_used_++;
1567 UpdateAllocationInfo();
1568
1569 return true;
1570}
1571
1572
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001573bool NewSpace::AddFreshPageSynchronized() {
1574 base::LockGuard<base::Mutex> guard(&mutex_);
1575 return AddFreshPage();
1576}
1577
1578
1579bool NewSpace::EnsureAllocation(int size_in_bytes,
1580 AllocationAlignment alignment) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001581 Address old_top = allocation_info_.top();
1582 Address high = to_space_.page_high();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001583 int filler_size = Heap::GetFillToAlign(old_top, alignment);
1584 int aligned_size_in_bytes = size_in_bytes + filler_size;
1585
Ben Murdochc5610432016-08-08 18:44:38 +01001586 if (old_top + aligned_size_in_bytes > high) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001587 // Not enough room in the page, try to allocate a new one.
1588 if (!AddFreshPage()) {
1589 return false;
1590 }
1591
1592 InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
1593
1594 old_top = allocation_info_.top();
1595 high = to_space_.page_high();
1596 filler_size = Heap::GetFillToAlign(old_top, alignment);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001597 }
1598
Ben Murdochc5610432016-08-08 18:44:38 +01001599 DCHECK(old_top + aligned_size_in_bytes <= high);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001600
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001601 if (allocation_info_.limit() < high) {
1602 // Either the limit has been lowered because linear allocation was disabled
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001603 // or because incremental marking wants to get a chance to do a step,
1604 // or because idle scavenge job wants to get a chance to post a task.
1605 // Set the new limit accordingly.
1606 Address new_top = old_top + aligned_size_in_bytes;
1607 Address soon_object = old_top + filler_size;
1608 InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
1609 UpdateInlineAllocationLimit(aligned_size_in_bytes);
1610 }
1611 return true;
1612}
1613
1614
1615void NewSpace::StartNextInlineAllocationStep() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001616 if (!allocation_observers_paused_) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001617 top_on_previous_step_ =
Ben Murdoch097c5b22016-05-18 11:27:45 +01001618 allocation_observers_->length() ? allocation_info_.top() : 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001619 UpdateInlineAllocationLimit(0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001620 }
1621}
1622
1623
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001624intptr_t NewSpace::GetNextInlineAllocationStepSize() {
1625 intptr_t next_step = 0;
Ben Murdoch097c5b22016-05-18 11:27:45 +01001626 for (int i = 0; i < allocation_observers_->length(); ++i) {
1627 AllocationObserver* o = (*allocation_observers_)[i];
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001628 next_step = next_step ? Min(next_step, o->bytes_to_next_step())
1629 : o->bytes_to_next_step();
1630 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001631 DCHECK(allocation_observers_->length() == 0 || next_step != 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001632 return next_step;
1633}
1634
Ben Murdoch097c5b22016-05-18 11:27:45 +01001635void NewSpace::AddAllocationObserver(AllocationObserver* observer) {
1636 Space::AddAllocationObserver(observer);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001637 StartNextInlineAllocationStep();
1638}
1639
Ben Murdoch097c5b22016-05-18 11:27:45 +01001640void NewSpace::RemoveAllocationObserver(AllocationObserver* observer) {
1641 Space::RemoveAllocationObserver(observer);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001642 StartNextInlineAllocationStep();
1643}
1644
Ben Murdoch097c5b22016-05-18 11:27:45 +01001645void NewSpace::PauseAllocationObservers() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001646 // Do a step to account for memory allocated so far.
1647 InlineAllocationStep(top(), top(), nullptr, 0);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001648 Space::PauseAllocationObservers();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001649 top_on_previous_step_ = 0;
1650 UpdateInlineAllocationLimit(0);
1651}
1652
Ben Murdoch097c5b22016-05-18 11:27:45 +01001653void NewSpace::ResumeAllocationObservers() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001654 DCHECK(top_on_previous_step_ == 0);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001655 Space::ResumeAllocationObservers();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001656 StartNextInlineAllocationStep();
1657}
1658
1659
1660void NewSpace::InlineAllocationStep(Address top, Address new_top,
1661 Address soon_object, size_t size) {
1662 if (top_on_previous_step_) {
1663 int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001664 for (int i = 0; i < allocation_observers_->length(); ++i) {
1665 (*allocation_observers_)[i]->AllocationStep(bytes_allocated, soon_object,
1666 size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001667 }
1668 top_on_previous_step_ = new_top;
1669 }
1670}
1671
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001672#ifdef VERIFY_HEAP
1673// We do not use the SemiSpaceIterator because verification doesn't assume
1674// that it works (it depends on the invariants we are checking).
1675void NewSpace::Verify() {
1676 // The allocation pointer should be in the space or at the very end.
1677 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1678
1679 // There should be objects packed in from the low address up to the
1680 // allocation pointer.
1681 Address current = to_space_.first_page()->area_start();
1682 CHECK_EQ(current, to_space_.space_start());
1683
1684 while (current != top()) {
Ben Murdochc5610432016-08-08 18:44:38 +01001685 if (!Page::IsAlignedToPageSize(current)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001686 // The allocation pointer should not be in the middle of an object.
Ben Murdochc5610432016-08-08 18:44:38 +01001687 CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001688 current < top());
1689
1690 HeapObject* object = HeapObject::FromAddress(current);
1691
1692 // The first word should be a map, and we expect all map pointers to
1693 // be in map space.
1694 Map* map = object->map();
1695 CHECK(map->IsMap());
1696 CHECK(heap()->map_space()->Contains(map));
1697
1698 // The object should not be code or a map.
1699 CHECK(!object->IsMap());
Ben Murdochda12d292016-06-02 14:46:10 +01001700 CHECK(!object->IsAbstractCode());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001701
1702 // The object itself should look OK.
1703 object->ObjectVerify();
1704
1705 // All the interior pointers should be contained in the heap.
1706 VerifyPointersVisitor visitor;
1707 int size = object->Size();
1708 object->IterateBody(map->instance_type(), size, &visitor);
1709
1710 current += size;
1711 } else {
1712 // At end of page, switch to next page.
Ben Murdochc5610432016-08-08 18:44:38 +01001713 Page* page = Page::FromAllocationAreaAddress(current)->next_page();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001714 // Next page should be valid.
1715 CHECK(!page->is_anchor());
1716 current = page->area_start();
1717 }
1718 }
1719
1720 // Check semi-spaces.
1721 CHECK_EQ(from_space_.id(), kFromSpace);
1722 CHECK_EQ(to_space_.id(), kToSpace);
1723 from_space_.Verify();
1724 to_space_.Verify();
1725}
1726#endif
1727
1728// -----------------------------------------------------------------------------
1729// SemiSpace implementation
1730
Ben Murdochda12d292016-06-02 14:46:10 +01001731void SemiSpace::SetUp(int initial_capacity, int maximum_capacity) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001732 DCHECK_GE(maximum_capacity, Page::kPageSize);
1733 minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1734 current_capacity_ = minimum_capacity_;
1735 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001736 committed_ = false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001737}
1738
1739
1740void SemiSpace::TearDown() {
Ben Murdochda12d292016-06-02 14:46:10 +01001741 // Properly uncommit memory to keep the allocator counters in sync.
1742 if (is_committed()) Uncommit();
1743 current_capacity_ = maximum_capacity_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001744}
1745
1746
1747bool SemiSpace::Commit() {
1748 DCHECK(!is_committed());
Ben Murdochc5610432016-08-08 18:44:38 +01001749 Page* current = anchor();
Ben Murdoch097c5b22016-05-18 11:27:45 +01001750 const int num_pages = current_capacity_ / Page::kPageSize;
Ben Murdochda12d292016-06-02 14:46:10 +01001751 for (int pages_added = 0; pages_added < num_pages; pages_added++) {
Ben Murdochc5610432016-08-08 18:44:38 +01001752 Page* new_page =
1753 heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
1754 Page::kAllocatableMemory, this, executable());
Ben Murdochda12d292016-06-02 14:46:10 +01001755 if (new_page == nullptr) {
1756 RewindPages(current, pages_added);
1757 return false;
1758 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001759 new_page->InsertAfter(current);
1760 current = new_page;
1761 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001762 Reset();
Ben Murdochda12d292016-06-02 14:46:10 +01001763 AccountCommitted(current_capacity_);
1764 if (age_mark_ == nullptr) {
1765 age_mark_ = first_page()->area_start();
1766 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001767 committed_ = true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001768 return true;
1769}
1770
1771
1772bool SemiSpace::Uncommit() {
1773 DCHECK(is_committed());
Ben Murdochda12d292016-06-02 14:46:10 +01001774 NewSpacePageIterator it(this);
1775 while (it.has_next()) {
Ben Murdochc5610432016-08-08 18:44:38 +01001776 heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
Ben Murdochda12d292016-06-02 14:46:10 +01001777 it.next());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001778 }
1779 anchor()->set_next_page(anchor());
1780 anchor()->set_prev_page(anchor());
Ben Murdochda12d292016-06-02 14:46:10 +01001781 AccountUncommitted(current_capacity_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001782 committed_ = false;
Ben Murdochc5610432016-08-08 18:44:38 +01001783 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001784 return true;
1785}
1786
1787
1788size_t SemiSpace::CommittedPhysicalMemory() {
1789 if (!is_committed()) return 0;
1790 size_t size = 0;
1791 NewSpacePageIterator it(this);
1792 while (it.has_next()) {
1793 size += it.next()->CommittedPhysicalMemory();
1794 }
1795 return size;
1796}
1797
1798
1799bool SemiSpace::GrowTo(int new_capacity) {
1800 if (!is_committed()) {
1801 if (!Commit()) return false;
1802 }
Ben Murdochc5610432016-08-08 18:44:38 +01001803 DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001804 DCHECK_LE(new_capacity, maximum_capacity_);
1805 DCHECK_GT(new_capacity, current_capacity_);
Ben Murdochda12d292016-06-02 14:46:10 +01001806 const int delta = new_capacity - current_capacity_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001807 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
Ben Murdochc5610432016-08-08 18:44:38 +01001808 const int delta_pages = delta / Page::kPageSize;
1809 Page* last_page = anchor()->prev_page();
Ben Murdoch097c5b22016-05-18 11:27:45 +01001810 DCHECK_NE(last_page, anchor());
Ben Murdochda12d292016-06-02 14:46:10 +01001811 for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
Ben Murdochc5610432016-08-08 18:44:38 +01001812 Page* new_page =
1813 heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
1814 Page::kAllocatableMemory, this, executable());
Ben Murdochda12d292016-06-02 14:46:10 +01001815 if (new_page == nullptr) {
1816 RewindPages(last_page, pages_added);
1817 return false;
1818 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001819 new_page->InsertAfter(last_page);
1820 Bitmap::Clear(new_page);
1821 // Duplicate the flags that was set on the old page.
Ben Murdochc5610432016-08-08 18:44:38 +01001822 new_page->SetFlags(last_page->GetFlags(), Page::kCopyOnFlipFlagsMask);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001823 last_page = new_page;
1824 }
Ben Murdochda12d292016-06-02 14:46:10 +01001825 AccountCommitted(static_cast<intptr_t>(delta));
1826 current_capacity_ = new_capacity;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001827 return true;
1828}
1829
Ben Murdochc5610432016-08-08 18:44:38 +01001830void SemiSpace::RewindPages(Page* start, int num_pages) {
1831 Page* new_last_page = nullptr;
1832 Page* last_page = start;
Ben Murdochda12d292016-06-02 14:46:10 +01001833 while (num_pages > 0) {
1834 DCHECK_NE(last_page, anchor());
1835 new_last_page = last_page->prev_page();
1836 last_page->prev_page()->set_next_page(last_page->next_page());
1837 last_page->next_page()->set_prev_page(last_page->prev_page());
1838 last_page = new_last_page;
1839 num_pages--;
1840 }
1841}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001842
1843bool SemiSpace::ShrinkTo(int new_capacity) {
Ben Murdochc5610432016-08-08 18:44:38 +01001844 DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001845 DCHECK_GE(new_capacity, minimum_capacity_);
1846 DCHECK_LT(new_capacity, current_capacity_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001847 if (is_committed()) {
Ben Murdochda12d292016-06-02 14:46:10 +01001848 const int delta = current_capacity_ - new_capacity;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001849 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
Ben Murdochc5610432016-08-08 18:44:38 +01001850 int delta_pages = delta / Page::kPageSize;
1851 Page* new_last_page;
1852 Page* last_page;
Ben Murdochda12d292016-06-02 14:46:10 +01001853 while (delta_pages > 0) {
1854 last_page = anchor()->prev_page();
1855 new_last_page = last_page->prev_page();
1856 new_last_page->set_next_page(anchor());
1857 anchor()->set_prev_page(new_last_page);
Ben Murdochc5610432016-08-08 18:44:38 +01001858 heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
Ben Murdochda12d292016-06-02 14:46:10 +01001859 last_page);
1860 delta_pages--;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001861 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001862 AccountUncommitted(static_cast<intptr_t>(delta));
Ben Murdochc5610432016-08-08 18:44:38 +01001863 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001864 }
Ben Murdochda12d292016-06-02 14:46:10 +01001865 current_capacity_ = new_capacity;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001866 return true;
1867}
1868
Ben Murdoch097c5b22016-05-18 11:27:45 +01001869void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001870 anchor_.set_owner(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001871 anchor_.prev_page()->set_next_page(&anchor_);
1872 anchor_.next_page()->set_prev_page(&anchor_);
1873
Ben Murdoch097c5b22016-05-18 11:27:45 +01001874 NewSpacePageIterator it(this);
1875 while (it.has_next()) {
Ben Murdochc5610432016-08-08 18:44:38 +01001876 Page* page = it.next();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001877 page->set_owner(this);
1878 page->SetFlags(flags, mask);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001879 if (id_ == kToSpace) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001880 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1881 page->SetFlag(MemoryChunk::IN_TO_SPACE);
1882 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1883 page->ResetLiveBytes();
1884 } else {
1885 page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1886 page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1887 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001888 DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1889 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001890 }
1891}
1892
1893
1894void SemiSpace::Reset() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001895 DCHECK_NE(anchor_.next_page(), &anchor_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001896 current_page_ = anchor_.next_page();
1897}
1898
Ben Murdochc5610432016-08-08 18:44:38 +01001899bool SemiSpace::ReplaceWithEmptyPage(Page* old_page) {
1900 // TODO(mlippautz): We do not have to get a new page here when the semispace
1901 // is uncommitted later on.
1902 Page* new_page = heap()->memory_allocator()->AllocatePage(
1903 Page::kAllocatableMemory, this, executable());
1904 if (new_page == nullptr) return false;
1905 Bitmap::Clear(new_page);
1906 new_page->SetFlags(old_page->GetFlags(), Page::kCopyAllFlags);
1907 new_page->set_next_page(old_page->next_page());
1908 new_page->set_prev_page(old_page->prev_page());
1909 old_page->next_page()->set_prev_page(new_page);
1910 old_page->prev_page()->set_next_page(new_page);
1911 heap()->CreateFillerObjectAt(new_page->area_start(), new_page->area_size(),
1912 ClearRecordedSlots::kNo);
1913 return true;
1914}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001915
1916void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1917 // We won't be swapping semispaces without data in them.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001918 DCHECK_NE(from->anchor_.next_page(), &from->anchor_);
1919 DCHECK_NE(to->anchor_.next_page(), &to->anchor_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001920
Ben Murdoch097c5b22016-05-18 11:27:45 +01001921 intptr_t saved_to_space_flags = to->current_page()->GetFlags();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001922
Ben Murdoch097c5b22016-05-18 11:27:45 +01001923 // We swap all properties but id_.
1924 std::swap(from->current_capacity_, to->current_capacity_);
1925 std::swap(from->maximum_capacity_, to->maximum_capacity_);
1926 std::swap(from->minimum_capacity_, to->minimum_capacity_);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001927 std::swap(from->age_mark_, to->age_mark_);
1928 std::swap(from->committed_, to->committed_);
1929 std::swap(from->anchor_, to->anchor_);
1930 std::swap(from->current_page_, to->current_page_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001931
Ben Murdochc5610432016-08-08 18:44:38 +01001932 to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001933 from->FixPagesFlags(0, 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001934}
1935
1936
1937void SemiSpace::set_age_mark(Address mark) {
Ben Murdochc5610432016-08-08 18:44:38 +01001938 DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001939 age_mark_ = mark;
1940 // Mark all pages up to the one containing mark.
1941 NewSpacePageIterator it(space_start(), mark);
1942 while (it.has_next()) {
1943 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1944 }
1945}
1946
1947
1948#ifdef DEBUG
1949void SemiSpace::Print() {}
1950#endif
1951
1952#ifdef VERIFY_HEAP
1953void SemiSpace::Verify() {
1954 bool is_from_space = (id_ == kFromSpace);
Ben Murdochc5610432016-08-08 18:44:38 +01001955 Page* page = anchor_.next_page();
1956 CHECK(anchor_.owner() == this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001957 while (page != &anchor_) {
Ben Murdochc5610432016-08-08 18:44:38 +01001958 CHECK_EQ(page->owner(), this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001959 CHECK(page->InNewSpace());
1960 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1961 : MemoryChunk::IN_TO_SPACE));
1962 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1963 : MemoryChunk::IN_FROM_SPACE));
1964 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1965 if (!is_from_space) {
1966 // The pointers-from-here-are-interesting flag isn't updated dynamically
1967 // on from-space pages, so it might be out of sync with the marking state.
1968 if (page->heap()->incremental_marking()->IsMarking()) {
1969 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1970 } else {
1971 CHECK(
1972 !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1973 }
1974 // TODO(gc): Check that the live_bytes_count_ field matches the
1975 // black marking on the page (if we make it match in new-space).
1976 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001977 CHECK_EQ(page->prev_page()->next_page(), page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001978 page = page->next_page();
1979 }
1980}
1981#endif
1982
1983#ifdef DEBUG
1984void SemiSpace::AssertValidRange(Address start, Address end) {
1985 // Addresses belong to same semi-space
Ben Murdochc5610432016-08-08 18:44:38 +01001986 Page* page = Page::FromAllocationAreaAddress(start);
1987 Page* end_page = Page::FromAllocationAreaAddress(end);
1988 SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
1989 CHECK_EQ(space, end_page->owner());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001990 // Start address is before end address, either on same page,
1991 // or end address is on a later page in the linked list of
1992 // semi-space pages.
1993 if (page == end_page) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001994 CHECK_LE(start, end);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001995 } else {
1996 while (page != end_page) {
1997 page = page->next_page();
1998 CHECK_NE(page, space->anchor());
1999 }
2000 }
2001}
2002#endif
2003
2004
2005// -----------------------------------------------------------------------------
2006// SemiSpaceIterator implementation.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002007
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002008SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002009 Initialize(space->bottom(), space->top());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002010}
2011
2012
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002013void SemiSpaceIterator::Initialize(Address start, Address end) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002014 SemiSpace::AssertValidRange(start, end);
2015 current_ = start;
2016 limit_ = end;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002017}
2018
2019
2020#ifdef DEBUG
2021// heap_histograms is shared, always clear it before using it.
2022static void ClearHistograms(Isolate* isolate) {
2023// We reset the name each time, though it hasn't changed.
2024#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
2025 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
2026#undef DEF_TYPE_NAME
2027
2028#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
2029 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
2030#undef CLEAR_HISTOGRAM
2031
2032 isolate->js_spill_information()->Clear();
2033}
2034
2035
2036static void ClearCodeKindStatistics(int* code_kind_statistics) {
2037 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
2038 code_kind_statistics[i] = 0;
2039 }
2040}
2041
2042
2043static void ReportCodeKindStatistics(int* code_kind_statistics) {
2044 PrintF("\n Code kind histograms: \n");
2045 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
2046 if (code_kind_statistics[i] > 0) {
2047 PrintF(" %-20s: %10d bytes\n",
2048 Code::Kind2String(static_cast<Code::Kind>(i)),
2049 code_kind_statistics[i]);
2050 }
2051 }
2052 PrintF("\n");
2053}
2054
2055
2056static int CollectHistogramInfo(HeapObject* obj) {
2057 Isolate* isolate = obj->GetIsolate();
2058 InstanceType type = obj->map()->instance_type();
2059 DCHECK(0 <= type && type <= LAST_TYPE);
2060 DCHECK(isolate->heap_histograms()[type].name() != NULL);
2061 isolate->heap_histograms()[type].increment_number(1);
2062 isolate->heap_histograms()[type].increment_bytes(obj->Size());
2063
2064 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
2065 JSObject::cast(obj)
2066 ->IncrementSpillStatistics(isolate->js_spill_information());
2067 }
2068
2069 return obj->Size();
2070}
2071
2072
2073static void ReportHistogram(Isolate* isolate, bool print_spill) {
2074 PrintF("\n Object Histogram:\n");
2075 for (int i = 0; i <= LAST_TYPE; i++) {
2076 if (isolate->heap_histograms()[i].number() > 0) {
2077 PrintF(" %-34s%10d (%10d bytes)\n",
2078 isolate->heap_histograms()[i].name(),
2079 isolate->heap_histograms()[i].number(),
2080 isolate->heap_histograms()[i].bytes());
2081 }
2082 }
2083 PrintF("\n");
2084
2085 // Summarize string types.
2086 int string_number = 0;
2087 int string_bytes = 0;
2088#define INCREMENT(type, size, name, camel_name) \
2089 string_number += isolate->heap_histograms()[type].number(); \
2090 string_bytes += isolate->heap_histograms()[type].bytes();
2091 STRING_TYPE_LIST(INCREMENT)
2092#undef INCREMENT
2093 if (string_number > 0) {
2094 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
2095 string_bytes);
2096 }
2097
2098 if (FLAG_collect_heap_spill_statistics && print_spill) {
2099 isolate->js_spill_information()->Print();
2100 }
2101}
2102#endif // DEBUG
2103
2104
2105// Support for statistics gathering for --heap-stats and --log-gc.
2106void NewSpace::ClearHistograms() {
2107 for (int i = 0; i <= LAST_TYPE; i++) {
2108 allocated_histogram_[i].clear();
2109 promoted_histogram_[i].clear();
2110 }
2111}
2112
2113
2114// Because the copying collector does not touch garbage objects, we iterate
2115// the new space before a collection to get a histogram of allocated objects.
2116// This only happens when --log-gc flag is set.
2117void NewSpace::CollectStatistics() {
2118 ClearHistograms();
2119 SemiSpaceIterator it(this);
2120 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
2121 RecordAllocation(obj);
2122}
2123
2124
2125static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
2126 const char* description) {
2127 LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
2128 // Lump all the string types together.
2129 int string_number = 0;
2130 int string_bytes = 0;
2131#define INCREMENT(type, size, name, camel_name) \
2132 string_number += info[type].number(); \
2133 string_bytes += info[type].bytes();
2134 STRING_TYPE_LIST(INCREMENT)
2135#undef INCREMENT
2136 if (string_number > 0) {
2137 LOG(isolate,
2138 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
2139 }
2140
2141 // Then do the other types.
2142 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
2143 if (info[i].number() > 0) {
2144 LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(),
2145 info[i].bytes()));
2146 }
2147 }
2148 LOG(isolate, HeapSampleEndEvent("NewSpace", description));
2149}
2150
2151
2152void NewSpace::ReportStatistics() {
2153#ifdef DEBUG
2154 if (FLAG_heap_stats) {
2155 float pct = static_cast<float>(Available()) / TotalCapacity();
Ben Murdochc5610432016-08-08 18:44:38 +01002156 PrintF(" capacity: %" V8PRIdPTR ", available: %" V8PRIdPTR ", %%%d\n",
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002157 TotalCapacity(), Available(), static_cast<int>(pct * 100));
2158 PrintF("\n Object Histogram:\n");
2159 for (int i = 0; i <= LAST_TYPE; i++) {
2160 if (allocated_histogram_[i].number() > 0) {
2161 PrintF(" %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(),
2162 allocated_histogram_[i].number(),
2163 allocated_histogram_[i].bytes());
2164 }
2165 }
2166 PrintF("\n");
2167 }
2168#endif // DEBUG
2169
2170 if (FLAG_log_gc) {
2171 Isolate* isolate = heap()->isolate();
2172 DoReportStatistics(isolate, allocated_histogram_, "allocated");
2173 DoReportStatistics(isolate, promoted_histogram_, "promoted");
2174 }
2175}
2176
2177
2178void NewSpace::RecordAllocation(HeapObject* obj) {
2179 InstanceType type = obj->map()->instance_type();
2180 DCHECK(0 <= type && type <= LAST_TYPE);
2181 allocated_histogram_[type].increment_number(1);
2182 allocated_histogram_[type].increment_bytes(obj->Size());
2183}
2184
2185
2186void NewSpace::RecordPromotion(HeapObject* obj) {
2187 InstanceType type = obj->map()->instance_type();
2188 DCHECK(0 <= type && type <= LAST_TYPE);
2189 promoted_histogram_[type].increment_number(1);
2190 promoted_histogram_[type].increment_bytes(obj->Size());
2191}
2192
2193
2194size_t NewSpace::CommittedPhysicalMemory() {
2195 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
2196 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2197 size_t size = to_space_.CommittedPhysicalMemory();
2198 if (from_space_.is_committed()) {
2199 size += from_space_.CommittedPhysicalMemory();
2200 }
2201 return size;
2202}
2203
2204
2205// -----------------------------------------------------------------------------
2206// Free lists for old object spaces implementation
2207
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002208
2209void FreeListCategory::Reset() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002210 set_top(nullptr);
Ben Murdochda12d292016-06-02 14:46:10 +01002211 set_prev(nullptr);
2212 set_next(nullptr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002213 available_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002214}
2215
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002216FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
Ben Murdochda12d292016-06-02 14:46:10 +01002217 DCHECK(page()->CanAllocate());
2218
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002219 FreeSpace* node = top();
2220 if (node == nullptr) return nullptr;
Ben Murdochda12d292016-06-02 14:46:10 +01002221 set_top(node->next());
2222 *node_size = node->Size();
2223 available_ -= *node_size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002224 return node;
2225}
2226
Ben Murdochda12d292016-06-02 14:46:10 +01002227FreeSpace* FreeListCategory::TryPickNodeFromList(int minimum_size,
2228 int* node_size) {
2229 DCHECK(page()->CanAllocate());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002230
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002231 FreeSpace* node = PickNodeFromList(node_size);
Ben Murdochda12d292016-06-02 14:46:10 +01002232 if ((node != nullptr) && (*node_size < minimum_size)) {
2233 Free(node, *node_size, kLinkCategory);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002234 *node_size = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002235 return nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002236 }
2237 return node;
2238}
2239
Ben Murdochda12d292016-06-02 14:46:10 +01002240FreeSpace* FreeListCategory::SearchForNodeInList(int minimum_size,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002241 int* node_size) {
Ben Murdochda12d292016-06-02 14:46:10 +01002242 DCHECK(page()->CanAllocate());
2243
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002244 FreeSpace* prev_non_evac_node = nullptr;
2245 for (FreeSpace* cur_node = top(); cur_node != nullptr;
2246 cur_node = cur_node->next()) {
2247 int size = cur_node->size();
Ben Murdochda12d292016-06-02 14:46:10 +01002248 if (size >= minimum_size) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002249 available_ -= size;
2250 if (cur_node == top()) {
2251 set_top(cur_node->next());
2252 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002253 if (prev_non_evac_node != nullptr) {
2254 prev_non_evac_node->set_next(cur_node->next());
2255 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002256 *node_size = size;
2257 return cur_node;
2258 }
2259
2260 prev_non_evac_node = cur_node;
2261 }
2262 return nullptr;
2263}
2264
Ben Murdochda12d292016-06-02 14:46:10 +01002265bool FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes,
2266 FreeMode mode) {
2267 if (!page()->CanAllocate()) return false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002268
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002269 free_space->set_next(top());
2270 set_top(free_space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002271 available_ += size_in_bytes;
Ben Murdochda12d292016-06-02 14:46:10 +01002272 if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
2273 owner()->AddCategory(this);
2274 }
2275 return true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002276}
2277
2278
2279void FreeListCategory::RepairFreeList(Heap* heap) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002280 FreeSpace* n = top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002281 while (n != NULL) {
2282 Map** map_location = reinterpret_cast<Map**>(n->address());
2283 if (*map_location == NULL) {
2284 *map_location = heap->free_space_map();
2285 } else {
2286 DCHECK(*map_location == heap->free_space_map());
2287 }
2288 n = n->next();
2289 }
2290}
2291
Ben Murdochda12d292016-06-02 14:46:10 +01002292void FreeListCategory::Relink() {
2293 DCHECK(!is_linked());
2294 owner()->AddCategory(this);
2295}
2296
2297void FreeListCategory::Invalidate() {
2298 page()->add_available_in_free_list(-available());
2299 Reset();
2300 type_ = kInvalidCategory;
2301}
2302
Ben Murdoch097c5b22016-05-18 11:27:45 +01002303FreeList::FreeList(PagedSpace* owner) : owner_(owner), wasted_bytes_(0) {
2304 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
Ben Murdochda12d292016-06-02 14:46:10 +01002305 categories_[i] = nullptr;
Ben Murdoch097c5b22016-05-18 11:27:45 +01002306 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002307 Reset();
2308}
2309
2310
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002311void FreeList::Reset() {
Ben Murdochda12d292016-06-02 14:46:10 +01002312 ForAllFreeListCategories(
2313 [](FreeListCategory* category) { category->Reset(); });
Ben Murdoch097c5b22016-05-18 11:27:45 +01002314 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
Ben Murdochda12d292016-06-02 14:46:10 +01002315 categories_[i] = nullptr;
Ben Murdoch097c5b22016-05-18 11:27:45 +01002316 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002317 ResetStats();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002318}
2319
Ben Murdochda12d292016-06-02 14:46:10 +01002320int FreeList::Free(Address start, int size_in_bytes, FreeMode mode) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002321 if (size_in_bytes == 0) return 0;
2322
Ben Murdochda12d292016-06-02 14:46:10 +01002323 owner()->heap()->CreateFillerObjectAt(start, size_in_bytes,
2324 ClearRecordedSlots::kNo);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002325
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002326 Page* page = Page::FromAddress(start);
2327
Ben Murdochda12d292016-06-02 14:46:10 +01002328 // Blocks have to be a minimum size to hold free list items.
2329 if (size_in_bytes < kMinBlockSize) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002330 page->add_wasted_memory(size_in_bytes);
Ben Murdochda12d292016-06-02 14:46:10 +01002331 wasted_bytes_.Increment(size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002332 return size_in_bytes;
2333 }
2334
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002335 FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002336 // Insert other blocks at the head of a free list of the appropriate
2337 // magnitude.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002338 FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
Ben Murdochda12d292016-06-02 14:46:10 +01002339 if (page->free_list_category(type)->Free(free_space, size_in_bytes, mode)) {
2340 page->add_available_in_free_list(size_in_bytes);
2341 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002342 return 0;
2343}
2344
Ben Murdochda12d292016-06-02 14:46:10 +01002345FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, int* node_size) {
2346 FreeListCategoryIterator it(this, type);
2347 FreeSpace* node = nullptr;
2348 while (it.HasNext()) {
2349 FreeListCategory* current = it.Next();
2350 node = current->PickNodeFromList(node_size);
2351 if (node != nullptr) {
2352 Page::FromAddress(node->address())
2353 ->add_available_in_free_list(-(*node_size));
2354 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2355 return node;
2356 }
2357 RemoveCategory(current);
2358 }
2359 return node;
2360}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002361
Ben Murdochda12d292016-06-02 14:46:10 +01002362FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type, int* node_size,
2363 int minimum_size) {
2364 if (categories_[type] == nullptr) return nullptr;
2365 FreeSpace* node =
2366 categories_[type]->TryPickNodeFromList(minimum_size, node_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002367 if (node != nullptr) {
2368 Page::FromAddress(node->address())
Ben Murdoch097c5b22016-05-18 11:27:45 +01002369 ->add_available_in_free_list(-(*node_size));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002370 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2371 }
2372 return node;
2373}
2374
Ben Murdochda12d292016-06-02 14:46:10 +01002375FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
2376 int* node_size, int minimum_size) {
2377 FreeListCategoryIterator it(this, type);
2378 FreeSpace* node = nullptr;
2379 while (it.HasNext()) {
2380 FreeListCategory* current = it.Next();
2381 node = current->SearchForNodeInList(minimum_size, node_size);
2382 if (node != nullptr) {
2383 Page::FromAddress(node->address())
2384 ->add_available_in_free_list(-(*node_size));
2385 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2386 return node;
2387 }
2388 }
2389 return node;
2390}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002391
2392FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
2393 FreeSpace* node = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002394
Ben Murdoch097c5b22016-05-18 11:27:45 +01002395 // First try the allocation fast path: try to allocate the minimum element
2396 // size of a free list category. This operation is constant time.
2397 FreeListCategoryType type =
2398 SelectFastAllocationFreeListCategoryType(size_in_bytes);
2399 for (int i = type; i < kHuge; i++) {
2400 node = FindNodeIn(static_cast<FreeListCategoryType>(i), node_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002401 if (node != nullptr) return node;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002402 }
2403
Ben Murdoch097c5b22016-05-18 11:27:45 +01002404 // Next search the huge list for free list nodes. This takes linear time in
2405 // the number of huge elements.
Ben Murdochda12d292016-06-02 14:46:10 +01002406 node = SearchForNodeInList(kHuge, node_size, size_in_bytes);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002407 if (node != nullptr) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002408 DCHECK(IsVeryLong() || Available() == SumFreeLists());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002409 return node;
2410 }
2411
Ben Murdoch097c5b22016-05-18 11:27:45 +01002412 // We need a huge block of memory, but we didn't find anything in the huge
2413 // list.
2414 if (type == kHuge) return nullptr;
2415
2416 // Now search the best fitting free list for a node that has at least the
Ben Murdochda12d292016-06-02 14:46:10 +01002417 // requested size.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002418 type = SelectFreeListCategoryType(size_in_bytes);
Ben Murdochda12d292016-06-02 14:46:10 +01002419 node = TryFindNodeIn(type, node_size, size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002420
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002421 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2422 return node;
2423}
2424
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002425// Allocation on the old space free list. If it succeeds then a new linear
2426// allocation space has been set up with the top and limit of the space. If
2427// the allocation fails then NULL is returned, and the caller can perform a GC
2428// or allocate a new page before retrying.
2429HeapObject* FreeList::Allocate(int size_in_bytes) {
2430 DCHECK(0 < size_in_bytes);
2431 DCHECK(size_in_bytes <= kMaxBlockSize);
2432 DCHECK(IsAligned(size_in_bytes, kPointerSize));
2433 // Don't free list allocate if there is linear space available.
2434 DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
2435
2436 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2437 // Mark the old linear allocation area with a free space map so it can be
2438 // skipped when scanning the heap. This also puts it back in the free list
2439 // if it is big enough.
2440 owner_->Free(owner_->top(), old_linear_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002441 owner_->SetTopAndLimit(nullptr, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002442
2443 owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
2444 old_linear_size);
2445
2446 int new_node_size = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002447 FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2448 if (new_node == nullptr) return nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002449
2450 int bytes_left = new_node_size - size_in_bytes;
2451 DCHECK(bytes_left >= 0);
2452
2453#ifdef DEBUG
2454 for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2455 reinterpret_cast<Object**>(new_node->address())[i] =
2456 Smi::FromInt(kCodeZapValue);
2457 }
2458#endif
2459
2460 // The old-space-step might have finished sweeping and restarted marking.
2461 // Verify that it did not turn the page of the new node into an evacuation
2462 // candidate.
2463 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2464
2465 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2466
2467 // Memory in the linear allocation area is counted as allocated. We may free
2468 // a little of this again immediately - see below.
2469 owner_->Allocate(new_node_size);
2470
2471 if (owner_->heap()->inline_allocation_disabled()) {
2472 // Keep the linear allocation area empty if requested to do so, just
2473 // return area back to the free list instead.
2474 owner_->Free(new_node->address() + size_in_bytes, bytes_left);
Ben Murdochc5610432016-08-08 18:44:38 +01002475 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2476 new_node->address() + size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002477 } else if (bytes_left > kThreshold &&
2478 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002479 FLAG_incremental_marking) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002480 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2481 // We don't want to give too large linear areas to the allocator while
2482 // incremental marking is going on, because we won't check again whether
2483 // we want to do another increment until the linear area is used up.
2484 owner_->Free(new_node->address() + size_in_bytes + linear_size,
2485 new_node_size - size_in_bytes - linear_size);
2486 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2487 new_node->address() + size_in_bytes + linear_size);
Ben Murdochc5610432016-08-08 18:44:38 +01002488 } else {
2489 DCHECK(bytes_left >= 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002490 // Normally we give the rest of the node to the allocator as its new
2491 // linear allocation area.
2492 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2493 new_node->address() + new_node_size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002494 }
2495
Ben Murdochc5610432016-08-08 18:44:38 +01002496 owner_->AllocationStep(new_node->address(), size_in_bytes);
2497
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002498 return new_node;
2499}
2500
Ben Murdochda12d292016-06-02 14:46:10 +01002501intptr_t FreeList::EvictFreeListItems(Page* page) {
2502 intptr_t sum = 0;
2503 page->ForAllFreeListCategories(
2504 [this, &sum, page](FreeListCategory* category) {
2505 DCHECK_EQ(this, category->owner());
2506 sum += category->available();
2507 RemoveCategory(category);
2508 category->Invalidate();
2509 });
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002510 return sum;
2511}
2512
Ben Murdochda12d292016-06-02 14:46:10 +01002513bool FreeList::ContainsPageFreeListItems(Page* page) {
2514 bool contained = false;
2515 page->ForAllFreeListCategories(
2516 [this, &contained](FreeListCategory* category) {
2517 if (category->owner() == this && category->is_linked()) {
2518 contained = true;
2519 }
2520 });
2521 return contained;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002522}
2523
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002524void FreeList::RepairLists(Heap* heap) {
Ben Murdochda12d292016-06-02 14:46:10 +01002525 ForAllFreeListCategories(
2526 [heap](FreeListCategory* category) { category->RepairFreeList(heap); });
2527}
2528
2529bool FreeList::AddCategory(FreeListCategory* category) {
2530 FreeListCategoryType type = category->type_;
2531 FreeListCategory* top = categories_[type];
2532
2533 if (category->is_empty()) return false;
2534 if (top == category) return false;
2535
2536 // Common double-linked list insertion.
2537 if (top != nullptr) {
2538 top->set_prev(category);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002539 }
Ben Murdochda12d292016-06-02 14:46:10 +01002540 category->set_next(top);
2541 categories_[type] = category;
2542 return true;
2543}
2544
2545void FreeList::RemoveCategory(FreeListCategory* category) {
2546 FreeListCategoryType type = category->type_;
2547 FreeListCategory* top = categories_[type];
2548
2549 // Common double-linked list removal.
2550 if (top == category) {
2551 categories_[type] = category->next();
2552 }
2553 if (category->prev() != nullptr) {
2554 category->prev()->set_next(category->next());
2555 }
2556 if (category->next() != nullptr) {
2557 category->next()->set_prev(category->prev());
2558 }
2559 category->set_next(nullptr);
2560 category->set_prev(nullptr);
2561}
2562
2563void FreeList::PrintCategories(FreeListCategoryType type) {
2564 FreeListCategoryIterator it(this, type);
2565 PrintF("FreeList[%p, top=%p, %d] ", this, categories_[type], type);
2566 while (it.HasNext()) {
2567 FreeListCategory* current = it.Next();
2568 PrintF("%p -> ", current);
2569 }
2570 PrintF("null\n");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002571}
2572
2573
2574#ifdef DEBUG
2575intptr_t FreeListCategory::SumFreeList() {
2576 intptr_t sum = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002577 FreeSpace* cur = top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002578 while (cur != NULL) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002579 DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex));
2580 sum += cur->nobarrier_size();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002581 cur = cur->next();
2582 }
2583 return sum;
2584}
2585
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002586int FreeListCategory::FreeListLength() {
2587 int length = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002588 FreeSpace* cur = top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002589 while (cur != NULL) {
2590 length++;
2591 cur = cur->next();
2592 if (length == kVeryLongFreeList) return length;
2593 }
2594 return length;
2595}
2596
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002597bool FreeList::IsVeryLong() {
Ben Murdochda12d292016-06-02 14:46:10 +01002598 int len = 0;
Ben Murdoch097c5b22016-05-18 11:27:45 +01002599 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
Ben Murdochda12d292016-06-02 14:46:10 +01002600 FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
2601 while (it.HasNext()) {
2602 len += it.Next()->FreeListLength();
2603 if (len >= FreeListCategory::kVeryLongFreeList) return true;
Ben Murdoch097c5b22016-05-18 11:27:45 +01002604 }
2605 }
2606 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002607}
2608
2609
2610// This can take a very long time because it is linear in the number of entries
2611// on the free list, so it should not be called if FreeListLength returns
2612// kVeryLongFreeList.
2613intptr_t FreeList::SumFreeLists() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002614 intptr_t sum = 0;
Ben Murdochda12d292016-06-02 14:46:10 +01002615 ForAllFreeListCategories(
2616 [&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002617 return sum;
2618}
2619#endif
2620
2621
2622// -----------------------------------------------------------------------------
2623// OldSpace implementation
2624
2625void PagedSpace::PrepareForMarkCompact() {
2626 // We don't have a linear allocation area while sweeping. It will be restored
2627 // on the first allocation after the sweep.
2628 EmptyAllocationInfo();
2629
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002630 // Clear the free list before a full GC---it will be rebuilt afterward.
2631 free_list_.Reset();
2632}
2633
2634
2635intptr_t PagedSpace::SizeOfObjects() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002636 const intptr_t size = Size() - (limit() - top());
2637 CHECK_GE(limit(), top());
2638 CHECK_GE(size, 0);
2639 USE(size);
2640 return size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002641}
2642
2643
2644// After we have booted, we have created a map which represents free space
2645// on the heap. If there was already a free list then the elements on it
2646// were created with the wrong FreeSpaceMap (normally NULL), so we need to
2647// fix them.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002648void PagedSpace::RepairFreeListsAfterDeserialization() {
2649 free_list_.RepairLists(heap());
2650 // Each page may have a small free space that is not tracked by a free list.
2651 // Update the maps for those free space objects.
2652 PageIterator iterator(this);
2653 while (iterator.has_next()) {
2654 Page* page = iterator.next();
Ben Murdoch097c5b22016-05-18 11:27:45 +01002655 int size = static_cast<int>(page->wasted_memory());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002656 if (size == 0) continue;
2657 Address address = page->OffsetToAddress(Page::kPageSize - size);
Ben Murdochda12d292016-06-02 14:46:10 +01002658 heap()->CreateFillerObjectAt(address, size, ClearRecordedSlots::kNo);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002659 }
2660}
2661
2662
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002663void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() {
2664 if (allocation_info_.top() >= allocation_info_.limit()) return;
2665
Ben Murdochc5610432016-08-08 18:44:38 +01002666 if (!Page::FromAllocationAreaAddress(allocation_info_.top())->CanAllocate()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002667 // Create filler object to keep page iterable if it was iterable.
2668 int remaining =
2669 static_cast<int>(allocation_info_.limit() - allocation_info_.top());
Ben Murdochda12d292016-06-02 14:46:10 +01002670 heap()->CreateFillerObjectAt(allocation_info_.top(), remaining,
2671 ClearRecordedSlots::kNo);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002672 allocation_info_.Reset(nullptr, nullptr);
2673 }
2674}
2675
2676
2677HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002678 MarkCompactCollector* collector = heap()->mark_compact_collector();
2679 if (collector->sweeping_in_progress()) {
2680 // Wait for the sweeper threads here and complete the sweeping phase.
2681 collector->EnsureSweepingCompleted();
2682
2683 // After waiting for the sweeper threads, there may be new free-list
2684 // entries.
2685 return free_list_.Allocate(size_in_bytes);
2686 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002687 return nullptr;
2688}
2689
2690
2691HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
2692 MarkCompactCollector* collector = heap()->mark_compact_collector();
2693 if (collector->sweeping_in_progress()) {
2694 collector->SweepAndRefill(this);
2695 return free_list_.Allocate(size_in_bytes);
2696 }
2697 return nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002698}
2699
2700
2701HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002702 const int kMaxPagesToSweep = 1;
2703
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002704 // Allocation in this space has failed.
2705
2706 MarkCompactCollector* collector = heap()->mark_compact_collector();
2707 // Sweeping is still in progress.
2708 if (collector->sweeping_in_progress()) {
2709 // First try to refill the free-list, concurrent sweeper threads
2710 // may have freed some objects in the meantime.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002711 RefillFreeList();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002712
2713 // Retry the free list allocation.
2714 HeapObject* object = free_list_.Allocate(size_in_bytes);
2715 if (object != NULL) return object;
2716
2717 // If sweeping is still in progress try to sweep pages on the main thread.
Ben Murdochc5610432016-08-08 18:44:38 +01002718 int max_freed = collector->sweeper().ParallelSweepSpace(
2719 identity(), size_in_bytes, kMaxPagesToSweep);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002720 RefillFreeList();
Ben Murdoch097c5b22016-05-18 11:27:45 +01002721 if (max_freed >= size_in_bytes) {
2722 object = free_list_.Allocate(size_in_bytes);
2723 if (object != nullptr) return object;
2724 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002725 }
2726
2727 // Free list allocation failed and there is no next page. Fail if we have
2728 // hit the old generation size limit that should cause a garbage
2729 // collection.
2730 if (!heap()->always_allocate() &&
2731 heap()->OldGenerationAllocationLimitReached()) {
2732 // If sweeper threads are active, wait for them at that point and steal
2733 // elements form their free-lists.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002734 HeapObject* object = SweepAndRetryAllocation(size_in_bytes);
2735 return object;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002736 }
2737
2738 // Try to expand the space and allocate in the new next page.
2739 if (Expand()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002740 DCHECK((CountTotalPages() > 1) ||
2741 (size_in_bytes <= free_list_.Available()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002742 return free_list_.Allocate(size_in_bytes);
2743 }
2744
2745 // If sweeper threads are active, wait for them at that point and steal
2746 // elements form their free-lists. Allocation may still fail their which
2747 // would indicate that there is not enough memory for the given allocation.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002748 return SweepAndRetryAllocation(size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002749}
2750
2751
2752#ifdef DEBUG
2753void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
2754 CommentStatistic* comments_statistics =
2755 isolate->paged_space_comments_statistics();
2756 ReportCodeKindStatistics(isolate->code_kind_statistics());
2757 PrintF(
2758 "Code comment statistics (\" [ comment-txt : size/ "
2759 "count (average)\"):\n");
2760 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2761 const CommentStatistic& cs = comments_statistics[i];
2762 if (cs.size > 0) {
2763 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2764 cs.size / cs.count);
2765 }
2766 }
2767 PrintF("\n");
2768}
2769
2770
2771void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
2772 CommentStatistic* comments_statistics =
2773 isolate->paged_space_comments_statistics();
2774 ClearCodeKindStatistics(isolate->code_kind_statistics());
2775 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2776 comments_statistics[i].Clear();
2777 }
2778 comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2779 comments_statistics[CommentStatistic::kMaxComments].size = 0;
2780 comments_statistics[CommentStatistic::kMaxComments].count = 0;
2781}
2782
2783
2784// Adds comment to 'comment_statistics' table. Performance OK as long as
2785// 'kMaxComments' is small
2786static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2787 CommentStatistic* comments_statistics =
2788 isolate->paged_space_comments_statistics();
2789 // Do not count empty comments
2790 if (delta <= 0) return;
2791 CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2792 // Search for a free or matching entry in 'comments_statistics': 'cs'
2793 // points to result.
2794 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2795 if (comments_statistics[i].comment == NULL) {
2796 cs = &comments_statistics[i];
2797 cs->comment = comment;
2798 break;
2799 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2800 cs = &comments_statistics[i];
2801 break;
2802 }
2803 }
2804 // Update entry for 'comment'
2805 cs->size += delta;
2806 cs->count += 1;
2807}
2808
2809
2810// Call for each nested comment start (start marked with '[ xxx', end marked
2811// with ']'. RelocIterator 'it' must point to a comment reloc info.
2812static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2813 DCHECK(!it->done());
2814 DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT);
2815 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2816 if (tmp[0] != '[') {
2817 // Not a nested comment; skip
2818 return;
2819 }
2820
2821 // Search for end of nested comment or a new nested comment
2822 const char* const comment_txt =
2823 reinterpret_cast<const char*>(it->rinfo()->data());
2824 const byte* prev_pc = it->rinfo()->pc();
2825 int flat_delta = 0;
2826 it->next();
2827 while (true) {
2828 // All nested comments must be terminated properly, and therefore exit
2829 // from loop.
2830 DCHECK(!it->done());
2831 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2832 const char* const txt =
2833 reinterpret_cast<const char*>(it->rinfo()->data());
2834 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2835 if (txt[0] == ']') break; // End of nested comment
2836 // A new comment
2837 CollectCommentStatistics(isolate, it);
2838 // Skip code that was covered with previous comment
2839 prev_pc = it->rinfo()->pc();
2840 }
2841 it->next();
2842 }
2843 EnterComment(isolate, comment_txt, flat_delta);
2844}
2845
2846
2847// Collects code size statistics:
2848// - by code kind
2849// - by code comment
2850void PagedSpace::CollectCodeStatistics() {
2851 Isolate* isolate = heap()->isolate();
2852 HeapObjectIterator obj_it(this);
2853 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
Ben Murdochda12d292016-06-02 14:46:10 +01002854 if (obj->IsAbstractCode()) {
2855 AbstractCode* code = AbstractCode::cast(obj);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002856 isolate->code_kind_statistics()[code->kind()] += code->Size();
Ben Murdochda12d292016-06-02 14:46:10 +01002857 }
2858 if (obj->IsCode()) {
2859 // TODO(mythria): Also enable this for BytecodeArray when it supports
2860 // RelocInformation.
2861 Code* code = Code::cast(obj);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002862 RelocIterator it(code);
2863 int delta = 0;
2864 const byte* prev_pc = code->instruction_start();
2865 while (!it.done()) {
2866 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2867 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2868 CollectCommentStatistics(isolate, &it);
2869 prev_pc = it.rinfo()->pc();
2870 }
2871 it.next();
2872 }
2873
2874 DCHECK(code->instruction_start() <= prev_pc &&
2875 prev_pc <= code->instruction_end());
2876 delta += static_cast<int>(code->instruction_end() - prev_pc);
2877 EnterComment(isolate, "NoComment", delta);
2878 }
2879 }
2880}
2881
2882
2883void PagedSpace::ReportStatistics() {
2884 int pct = static_cast<int>(Available() * 100 / Capacity());
Ben Murdochc5610432016-08-08 18:44:38 +01002885 PrintF(" capacity: %" V8PRIdPTR ", waste: %" V8PRIdPTR
2886 ", available: %" V8PRIdPTR ", %%%d\n",
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002887 Capacity(), Waste(), Available(), pct);
2888
2889 if (heap()->mark_compact_collector()->sweeping_in_progress()) {
2890 heap()->mark_compact_collector()->EnsureSweepingCompleted();
2891 }
2892 ClearHistograms(heap()->isolate());
2893 HeapObjectIterator obj_it(this);
2894 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2895 CollectHistogramInfo(obj);
2896 ReportHistogram(heap()->isolate(), true);
2897}
2898#endif
2899
2900
2901// -----------------------------------------------------------------------------
2902// MapSpace implementation
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002903
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002904#ifdef VERIFY_HEAP
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002905void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002906#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002907
2908
2909// -----------------------------------------------------------------------------
2910// LargeObjectIterator
2911
2912LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2913 current_ = space->first_page_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002914}
2915
2916
2917HeapObject* LargeObjectIterator::Next() {
2918 if (current_ == NULL) return NULL;
2919
2920 HeapObject* object = current_->GetObject();
2921 current_ = current_->next_page();
2922 return object;
2923}
2924
2925
2926// -----------------------------------------------------------------------------
2927// LargeObjectSpace
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002928
2929
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002930LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002931 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002932 first_page_(NULL),
2933 size_(0),
2934 page_count_(0),
2935 objects_size_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002936 chunk_map_(HashMap::PointersMatch, 1024) {}
2937
2938
2939LargeObjectSpace::~LargeObjectSpace() {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002940
2941
2942bool LargeObjectSpace::SetUp() {
2943 first_page_ = NULL;
2944 size_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002945 page_count_ = 0;
2946 objects_size_ = 0;
2947 chunk_map_.Clear();
2948 return true;
2949}
2950
2951
2952void LargeObjectSpace::TearDown() {
2953 while (first_page_ != NULL) {
2954 LargePage* page = first_page_;
2955 first_page_ = first_page_->next_page();
2956 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2957
2958 ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
Ben Murdochc5610432016-08-08 18:44:38 +01002959 heap()->memory_allocator()->PerformAllocationCallback(
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002960 space, kAllocationActionFree, page->size());
Ben Murdochc5610432016-08-08 18:44:38 +01002961 heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002962 }
2963 SetUp();
2964}
2965
2966
2967AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
2968 Executability executable) {
2969 // Check if we want to force a GC before growing the old space further.
2970 // If so, fail the allocation.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002971 if (!heap()->CanExpandOldGeneration(object_size)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002972 return AllocationResult::Retry(identity());
2973 }
2974
Ben Murdochc5610432016-08-08 18:44:38 +01002975 LargePage* page = heap()->memory_allocator()->AllocateLargePage(
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002976 object_size, this, executable);
2977 if (page == NULL) return AllocationResult::Retry(identity());
2978 DCHECK(page->area_size() >= object_size);
2979
2980 size_ += static_cast<int>(page->size());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002981 AccountCommitted(static_cast<intptr_t>(page->size()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002982 objects_size_ += object_size;
2983 page_count_++;
2984 page->set_next_page(first_page_);
2985 first_page_ = page;
2986
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002987 // Register all MemoryChunk::kAlignment-aligned chunks covered by
2988 // this large page in the chunk map.
2989 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2990 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2991 for (uintptr_t key = base; key <= limit; key++) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002992 HashMap::Entry* entry = chunk_map_.LookupOrInsert(
2993 reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002994 DCHECK(entry != NULL);
2995 entry->value = page;
2996 }
2997
2998 HeapObject* object = page->GetObject();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002999 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size);
3000
3001 if (Heap::ShouldZapGarbage()) {
3002 // Make the object consistent so the heap can be verified in OldSpaceStep.
3003 // We only need to do this in debug builds or if verify_heap is on.
3004 reinterpret_cast<Object**>(object->address())[0] =
3005 heap()->fixed_array_map();
3006 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
3007 }
3008
3009 heap()->incremental_marking()->OldSpaceStep(object_size);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003010 AllocationStep(object->address(), object_size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003011 return object;
3012}
3013
3014
3015size_t LargeObjectSpace::CommittedPhysicalMemory() {
3016 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
3017 size_t size = 0;
3018 LargePage* current = first_page_;
3019 while (current != NULL) {
3020 size += current->CommittedPhysicalMemory();
3021 current = current->next_page();
3022 }
3023 return size;
3024}
3025
3026
3027// GC support
3028Object* LargeObjectSpace::FindObject(Address a) {
3029 LargePage* page = FindPage(a);
3030 if (page != NULL) {
3031 return page->GetObject();
3032 }
3033 return Smi::FromInt(0); // Signaling not found.
3034}
3035
3036
3037LargePage* LargeObjectSpace::FindPage(Address a) {
3038 uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
3039 HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003040 static_cast<uint32_t>(key));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003041 if (e != NULL) {
3042 DCHECK(e->value != NULL);
3043 LargePage* page = reinterpret_cast<LargePage*>(e->value);
Ben Murdochc5610432016-08-08 18:44:38 +01003044 DCHECK(LargePage::IsValid(page));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003045 if (page->Contains(a)) {
3046 return page;
3047 }
3048 }
3049 return NULL;
3050}
3051
3052
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003053void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
3054 LargePage* current = first_page_;
3055 while (current != NULL) {
3056 HeapObject* object = current->GetObject();
3057 MarkBit mark_bit = Marking::MarkBitFrom(object);
3058 DCHECK(Marking::IsBlack(mark_bit));
3059 Marking::BlackToWhite(mark_bit);
3060 Page::FromAddress(object->address())->ResetProgressBar();
3061 Page::FromAddress(object->address())->ResetLiveBytes();
3062 current = current->next_page();
3063 }
3064}
3065
3066
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003067void LargeObjectSpace::FreeUnmarkedObjects() {
3068 LargePage* previous = NULL;
3069 LargePage* current = first_page_;
3070 while (current != NULL) {
3071 HeapObject* object = current->GetObject();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003072 MarkBit mark_bit = Marking::MarkBitFrom(object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003073 DCHECK(!Marking::IsGrey(mark_bit));
3074 if (Marking::IsBlack(mark_bit)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003075 previous = current;
3076 current = current->next_page();
3077 } else {
3078 LargePage* page = current;
3079 // Cut the chunk out from the chunk list.
3080 current = current->next_page();
3081 if (previous == NULL) {
3082 first_page_ = current;
3083 } else {
3084 previous->set_next_page(current);
3085 }
3086
3087 // Free the chunk.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003088 size_ -= static_cast<int>(page->size());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003089 AccountUncommitted(static_cast<intptr_t>(page->size()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003090 objects_size_ -= object->Size();
3091 page_count_--;
3092
3093 // Remove entries belonging to this page.
3094 // Use variable alignment to help pass length check (<= 80 characters)
3095 // of single line in tools/presubmit.py.
3096 const intptr_t alignment = MemoryChunk::kAlignment;
3097 uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment;
3098 uintptr_t limit = base + (page->size() - 1) / alignment;
3099 for (uintptr_t key = base; key <= limit; key++) {
3100 chunk_map_.Remove(reinterpret_cast<void*>(key),
3101 static_cast<uint32_t>(key));
3102 }
3103
Ben Murdochc5610432016-08-08 18:44:38 +01003104 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003105 }
3106 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003107}
3108
3109
3110bool LargeObjectSpace::Contains(HeapObject* object) {
3111 Address address = object->address();
3112 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3113
3114 bool owned = (chunk->owner() == this);
3115
3116 SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
3117
3118 return owned;
3119}
3120
3121
3122#ifdef VERIFY_HEAP
3123// We do not assume that the large object iterator works, because it depends
3124// on the invariants we are checking during verification.
3125void LargeObjectSpace::Verify() {
3126 for (LargePage* chunk = first_page_; chunk != NULL;
3127 chunk = chunk->next_page()) {
3128 // Each chunk contains an object that starts at the large object page's
3129 // object area start.
3130 HeapObject* object = chunk->GetObject();
3131 Page* page = Page::FromAddress(object->address());
3132 CHECK(object->address() == page->area_start());
3133
3134 // The first word should be a map, and we expect all map pointers to be
3135 // in map space.
3136 Map* map = object->map();
3137 CHECK(map->IsMap());
3138 CHECK(heap()->map_space()->Contains(map));
3139
3140 // We have only code, sequential strings, external strings
3141 // (sequential strings that have been morphed into external
3142 // strings), fixed arrays, byte arrays, and constant pool arrays in the
3143 // large object space.
Ben Murdochda12d292016-06-02 14:46:10 +01003144 CHECK(object->IsAbstractCode() || object->IsSeqString() ||
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003145 object->IsExternalString() || object->IsFixedArray() ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003146 object->IsFixedDoubleArray() || object->IsByteArray());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003147
3148 // The object itself should look OK.
3149 object->ObjectVerify();
3150
3151 // Byte arrays and strings don't have interior pointers.
Ben Murdochda12d292016-06-02 14:46:10 +01003152 if (object->IsAbstractCode()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003153 VerifyPointersVisitor code_visitor;
3154 object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
3155 } else if (object->IsFixedArray()) {
3156 FixedArray* array = FixedArray::cast(object);
3157 for (int j = 0; j < array->length(); j++) {
3158 Object* element = array->get(j);
3159 if (element->IsHeapObject()) {
3160 HeapObject* element_object = HeapObject::cast(element);
3161 CHECK(heap()->Contains(element_object));
3162 CHECK(element_object->map()->IsMap());
3163 }
3164 }
3165 }
3166 }
3167}
3168#endif
3169
3170
3171#ifdef DEBUG
3172void LargeObjectSpace::Print() {
3173 OFStream os(stdout);
3174 LargeObjectIterator it(this);
3175 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3176 obj->Print(os);
3177 }
3178}
3179
3180
3181void LargeObjectSpace::ReportStatistics() {
Ben Murdochc5610432016-08-08 18:44:38 +01003182 PrintF(" size: %" V8PRIdPTR "\n", size_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003183 int num_objects = 0;
3184 ClearHistograms(heap()->isolate());
3185 LargeObjectIterator it(this);
3186 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3187 num_objects++;
3188 CollectHistogramInfo(obj);
3189 }
3190
3191 PrintF(
3192 " number of objects %d, "
Ben Murdochc5610432016-08-08 18:44:38 +01003193 "size of objects %" V8PRIdPTR "\n",
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003194 num_objects, objects_size_);
3195 if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
3196}
3197
3198
3199void LargeObjectSpace::CollectCodeStatistics() {
3200 Isolate* isolate = heap()->isolate();
3201 LargeObjectIterator obj_it(this);
3202 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
Ben Murdochda12d292016-06-02 14:46:10 +01003203 if (obj->IsAbstractCode()) {
3204 AbstractCode* code = AbstractCode::cast(obj);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003205 isolate->code_kind_statistics()[code->kind()] += code->Size();
3206 }
3207 }
3208}
3209
3210
3211void Page::Print() {
3212 // Make a best-effort to print the objects in the page.
3213 PrintF("Page@%p in %s\n", this->address(),
3214 AllocationSpaceName(this->owner()->identity()));
3215 printf(" --------------------------------------\n");
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003216 HeapObjectIterator objects(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003217 unsigned mark_size = 0;
3218 for (HeapObject* object = objects.Next(); object != NULL;
3219 object = objects.Next()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003220 bool is_marked = Marking::IsBlackOrGrey(Marking::MarkBitFrom(object));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003221 PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
3222 if (is_marked) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003223 mark_size += object->Size();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003224 }
3225 object->ShortPrint();
3226 PrintF("\n");
3227 }
3228 printf(" --------------------------------------\n");
3229 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3230}
3231
3232#endif // DEBUG
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003233} // namespace internal
3234} // namespace v8