blob: 63e7c33c5822c1b21f3a391fb02b0167d5eb7a48 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2011 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005#include "src/heap/spaces.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006
7#include "src/base/bits.h"
8#include "src/base/platform/platform.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00009#include "src/full-codegen/full-codegen.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010010#include "src/heap/slot-set.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000011#include "src/macro-assembler.h"
12#include "src/msan.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000013#include "src/snapshot/snapshot.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000014
15namespace v8 {
16namespace internal {
17
18
19// ----------------------------------------------------------------------------
20// HeapObjectIterator
21
22HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
23 // You can't actually iterate over the anchor page. It is not a real page,
24 // just an anchor for the double linked page list. Initialize as if we have
25 // reached the end of the anchor page, then the first iteration will move on
26 // to the first page.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000027 Initialize(space, NULL, NULL, kAllPagesInSpace);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000028}
29
30
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000031HeapObjectIterator::HeapObjectIterator(Page* page) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000032 Space* owner = page->owner();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000033 DCHECK(owner == page->heap()->old_space() ||
Ben Murdochb8a8cc12014-11-26 15:28:44 +000034 owner == page->heap()->map_space() ||
Ben Murdochb8a8cc12014-11-26 15:28:44 +000035 owner == page->heap()->code_space());
36 Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000037 page->area_end(), kOnePageOnly);
Ben Murdoch097c5b22016-05-18 11:27:45 +010038 DCHECK(page->SweepingDone());
Ben Murdochb8a8cc12014-11-26 15:28:44 +000039}
40
41
42void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000043 HeapObjectIterator::PageMode mode) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000044 space_ = space;
45 cur_addr_ = cur;
46 cur_end_ = end;
47 page_mode_ = mode;
Ben Murdochb8a8cc12014-11-26 15:28:44 +000048}
49
50
51// We have hit the end of the page and should advance to the next block of
52// objects. This happens at the end of the page.
53bool HeapObjectIterator::AdvanceToNextPage() {
54 DCHECK(cur_addr_ == cur_end_);
55 if (page_mode_ == kOnePageOnly) return false;
56 Page* cur_page;
57 if (cur_addr_ == NULL) {
58 cur_page = space_->anchor();
59 } else {
60 cur_page = Page::FromAddress(cur_addr_ - 1);
61 DCHECK(cur_addr_ == cur_page->area_end());
62 }
63 cur_page = cur_page->next_page();
64 if (cur_page == space_->anchor()) return false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000065 cur_page->heap()->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted(
66 cur_page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000067 cur_addr_ = cur_page->area_start();
68 cur_end_ = cur_page->area_end();
Ben Murdoch097c5b22016-05-18 11:27:45 +010069 DCHECK(cur_page->SweepingDone());
Ben Murdochb8a8cc12014-11-26 15:28:44 +000070 return true;
71}
72
Ben Murdoch097c5b22016-05-18 11:27:45 +010073PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
74 : heap_(heap) {
75 AllSpaces spaces(heap_);
76 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
77 space->PauseAllocationObservers();
78 }
79}
80
81PauseAllocationObserversScope::~PauseAllocationObserversScope() {
82 AllSpaces spaces(heap_);
83 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
84 space->ResumeAllocationObservers();
85 }
86}
Ben Murdochb8a8cc12014-11-26 15:28:44 +000087
88// -----------------------------------------------------------------------------
89// CodeRange
90
91
92CodeRange::CodeRange(Isolate* isolate)
93 : isolate_(isolate),
94 code_range_(NULL),
95 free_list_(0),
96 allocation_list_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000097 current_allocation_block_index_(0) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +000098
99
100bool CodeRange::SetUp(size_t requested) {
101 DCHECK(code_range_ == NULL);
102
103 if (requested == 0) {
104 // When a target requires the code range feature, we put all code objects
105 // in a kMaximalCodeRangeSize range of virtual address space, so that
106 // they can call each other with near calls.
107 if (kRequiresCodeRange) {
108 requested = kMaximalCodeRangeSize;
109 } else {
110 return true;
111 }
112 }
113
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400114 if (requested <= kMinimumCodeRangeSize) {
115 requested = kMinimumCodeRangeSize;
116 }
117
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000118 DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000119#ifdef V8_TARGET_ARCH_MIPS64
120 // To use pseudo-relative jumps such as j/jal instructions which have 28-bit
121 // encoded immediate, the addresses have to be in range of 256Mb aligned
122 // region.
123 code_range_ = new base::VirtualMemory(requested, kMaximalCodeRangeSize);
124#else
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000125 code_range_ = new base::VirtualMemory(requested);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000126#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000127 CHECK(code_range_ != NULL);
128 if (!code_range_->IsReserved()) {
129 delete code_range_;
130 code_range_ = NULL;
131 return false;
132 }
133
134 // We are sure that we have mapped a block of requested addresses.
135 DCHECK(code_range_->size() == requested);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000136 Address base = reinterpret_cast<Address>(code_range_->address());
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400137
138 // On some platforms, specifically Win64, we need to reserve some pages at
139 // the beginning of an executable space.
140 if (kReservedCodeRangePages) {
141 if (!code_range_->Commit(
142 base, kReservedCodeRangePages * base::OS::CommitPageSize(), true)) {
143 delete code_range_;
144 code_range_ = NULL;
145 return false;
146 }
147 base += kReservedCodeRangePages * base::OS::CommitPageSize();
148 }
149 Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
150 size_t size = code_range_->size() - (aligned_base - base) -
151 kReservedCodeRangePages * base::OS::CommitPageSize();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000152 allocation_list_.Add(FreeBlock(aligned_base, size));
153 current_allocation_block_index_ = 0;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400154
155 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000156 return true;
157}
158
159
160int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
161 const FreeBlock* right) {
162 // The entire point of CodeRange is that the difference between two
163 // addresses in the range can be represented as a signed 32-bit int,
164 // so the cast is semantically correct.
165 return static_cast<int>(left->start - right->start);
166}
167
168
169bool CodeRange::GetNextAllocationBlock(size_t requested) {
170 for (current_allocation_block_index_++;
171 current_allocation_block_index_ < allocation_list_.length();
172 current_allocation_block_index_++) {
173 if (requested <= allocation_list_[current_allocation_block_index_].size) {
174 return true; // Found a large enough allocation block.
175 }
176 }
177
178 // Sort and merge the free blocks on the free list and the allocation list.
179 free_list_.AddAll(allocation_list_);
180 allocation_list_.Clear();
181 free_list_.Sort(&CompareFreeBlockAddress);
182 for (int i = 0; i < free_list_.length();) {
183 FreeBlock merged = free_list_[i];
184 i++;
185 // Add adjacent free blocks to the current merged block.
186 while (i < free_list_.length() &&
187 free_list_[i].start == merged.start + merged.size) {
188 merged.size += free_list_[i].size;
189 i++;
190 }
191 if (merged.size > 0) {
192 allocation_list_.Add(merged);
193 }
194 }
195 free_list_.Clear();
196
197 for (current_allocation_block_index_ = 0;
198 current_allocation_block_index_ < allocation_list_.length();
199 current_allocation_block_index_++) {
200 if (requested <= allocation_list_[current_allocation_block_index_].size) {
201 return true; // Found a large enough allocation block.
202 }
203 }
204 current_allocation_block_index_ = 0;
205 // Code range is full or too fragmented.
206 return false;
207}
208
209
210Address CodeRange::AllocateRawMemory(const size_t requested_size,
211 const size_t commit_size,
212 size_t* allocated) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000213 // request_size includes guards while committed_size does not. Make sure
214 // callers know about the invariant.
215 CHECK_LE(commit_size,
216 requested_size - 2 * MemoryAllocator::CodePageGuardSize());
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400217 FreeBlock current;
218 if (!ReserveBlock(requested_size, &current)) {
219 *allocated = 0;
220 return NULL;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000221 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400222 *allocated = current.size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000223 DCHECK(*allocated <= current.size);
224 DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
225 if (!isolate_->memory_allocator()->CommitExecutableMemory(
226 code_range_, current.start, commit_size, *allocated)) {
227 *allocated = 0;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400228 ReleaseBlock(&current);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000229 return NULL;
230 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000231 return current.start;
232}
233
234
235bool CodeRange::CommitRawMemory(Address start, size_t length) {
236 return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
237}
238
239
240bool CodeRange::UncommitRawMemory(Address start, size_t length) {
241 return code_range_->Uncommit(start, length);
242}
243
244
245void CodeRange::FreeRawMemory(Address address, size_t length) {
246 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000247 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000248 free_list_.Add(FreeBlock(address, length));
249 code_range_->Uncommit(address, length);
250}
251
252
253void CodeRange::TearDown() {
254 delete code_range_; // Frees all memory in the virtual memory range.
255 code_range_ = NULL;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000256 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000257 free_list_.Free();
258 allocation_list_.Free();
259}
260
261
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400262bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000263 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400264 DCHECK(allocation_list_.length() == 0 ||
265 current_allocation_block_index_ < allocation_list_.length());
266 if (allocation_list_.length() == 0 ||
267 requested_size > allocation_list_[current_allocation_block_index_].size) {
268 // Find an allocation block large enough.
269 if (!GetNextAllocationBlock(requested_size)) return false;
270 }
271 // Commit the requested memory at the start of the current allocation block.
272 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
273 *block = allocation_list_[current_allocation_block_index_];
274 // Don't leave a small free block, useless for a large object or chunk.
275 if (aligned_requested < (block->size - Page::kPageSize)) {
276 block->size = aligned_requested;
277 }
278 DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
279 allocation_list_[current_allocation_block_index_].start += block->size;
280 allocation_list_[current_allocation_block_index_].size -= block->size;
281 return true;
282}
283
284
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000285void CodeRange::ReleaseBlock(const FreeBlock* block) {
286 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
287 free_list_.Add(*block);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400288}
289
290
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000291// -----------------------------------------------------------------------------
292// MemoryAllocator
293//
294
295MemoryAllocator::MemoryAllocator(Isolate* isolate)
296 : isolate_(isolate),
297 capacity_(0),
298 capacity_executable_(0),
299 size_(0),
300 size_executable_(0),
301 lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
302 highest_ever_allocated_(reinterpret_cast<void*>(0)) {}
303
304
305bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
306 capacity_ = RoundUp(capacity, Page::kPageSize);
307 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
308 DCHECK_GE(capacity_, capacity_executable_);
309
310 size_ = 0;
311 size_executable_ = 0;
312
313 return true;
314}
315
316
317void MemoryAllocator::TearDown() {
Ben Murdochda12d292016-06-02 14:46:10 +0100318 for (MemoryChunk* chunk : chunk_pool_) {
319 FreeMemory(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize,
320 NOT_EXECUTABLE);
321 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000322 // Check that spaces were torn down before MemoryAllocator.
Ben Murdochda12d292016-06-02 14:46:10 +0100323 DCHECK_EQ(size_.Value(), 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000324 // TODO(gc) this will be true again when we fix FreeMemory.
325 // DCHECK(size_executable_ == 0);
326 capacity_ = 0;
327 capacity_executable_ = 0;
328}
329
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000330bool MemoryAllocator::CommitMemory(Address base, size_t size,
331 Executability executable) {
332 if (!base::VirtualMemory::CommitRegion(base, size,
333 executable == EXECUTABLE)) {
334 return false;
335 }
336 UpdateAllocatedSpaceLimits(base, base + size);
337 return true;
338}
339
340
341void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
342 Executability executable) {
343 // TODO(gc) make code_range part of memory allocator?
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000344 // Code which is part of the code-range does not have its own VirtualMemory.
345 DCHECK(isolate_->code_range() == NULL ||
346 !isolate_->code_range()->contains(
347 static_cast<Address>(reservation->address())));
348 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000349 !isolate_->code_range()->valid() ||
350 reservation->size() <= Page::kPageSize);
351
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000352 reservation->Release();
353}
354
355
356void MemoryAllocator::FreeMemory(Address base, size_t size,
357 Executability executable) {
358 // TODO(gc) make code_range part of memory allocator?
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000359 if (isolate_->code_range() != NULL &&
360 isolate_->code_range()->contains(static_cast<Address>(base))) {
361 DCHECK(executable == EXECUTABLE);
362 isolate_->code_range()->FreeRawMemory(base, size);
363 } else {
364 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
365 !isolate_->code_range()->valid());
366 bool result = base::VirtualMemory::ReleaseRegion(base, size);
367 USE(result);
368 DCHECK(result);
369 }
370}
371
372
373Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
374 base::VirtualMemory* controller) {
375 base::VirtualMemory reservation(size, alignment);
376
377 if (!reservation.IsReserved()) return NULL;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000378 size_.Increment(static_cast<intptr_t>(reservation.size()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000379 Address base =
380 RoundUp(static_cast<Address>(reservation.address()), alignment);
381 controller->TakeControl(&reservation);
382 return base;
383}
384
385
386Address MemoryAllocator::AllocateAlignedMemory(
387 size_t reserve_size, size_t commit_size, size_t alignment,
388 Executability executable, base::VirtualMemory* controller) {
389 DCHECK(commit_size <= reserve_size);
390 base::VirtualMemory reservation;
391 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
392 if (base == NULL) return NULL;
393
394 if (executable == EXECUTABLE) {
395 if (!CommitExecutableMemory(&reservation, base, commit_size,
396 reserve_size)) {
397 base = NULL;
398 }
399 } else {
400 if (reservation.Commit(base, commit_size, false)) {
401 UpdateAllocatedSpaceLimits(base, base + commit_size);
402 } else {
403 base = NULL;
404 }
405 }
406
407 if (base == NULL) {
408 // Failed to commit the body. Release the mapping and any partially
409 // commited regions inside it.
410 reservation.Release();
411 return NULL;
412 }
413
414 controller->TakeControl(&reservation);
415 return base;
416}
417
418
419void Page::InitializeAsAnchor(PagedSpace* owner) {
420 set_owner(owner);
421 set_prev_page(this);
422 set_next_page(this);
423}
424
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000425void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
426 set_owner(semi_space);
427 set_next_chunk(this);
428 set_prev_chunk(this);
429 // Flags marks this invalid page as not being in new-space.
430 // All real new-space pages will be in new-space.
431 SetFlags(0, ~0);
432}
433
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000434MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
435 Address area_start, Address area_end,
Ben Murdoch097c5b22016-05-18 11:27:45 +0100436 Executability executable, Space* owner,
437 base::VirtualMemory* reservation) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000438 MemoryChunk* chunk = FromAddress(base);
439
440 DCHECK(base == chunk->address());
441
442 chunk->heap_ = heap;
443 chunk->size_ = size;
444 chunk->area_start_ = area_start;
445 chunk->area_end_ = area_end;
446 chunk->flags_ = 0;
447 chunk->set_owner(owner);
448 chunk->InitializeReservedMemory();
Ben Murdoch097c5b22016-05-18 11:27:45 +0100449 chunk->old_to_new_slots_ = nullptr;
450 chunk->old_to_old_slots_ = nullptr;
Ben Murdochda12d292016-06-02 14:46:10 +0100451 chunk->typed_old_to_old_slots_ = nullptr;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100452 chunk->skip_list_ = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000453 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
454 chunk->progress_bar_ = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000455 chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
Ben Murdoch097c5b22016-05-18 11:27:45 +0100456 chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100457 chunk->mutex_ = nullptr;
458 chunk->available_in_free_list_ = 0;
459 chunk->wasted_memory_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000460 chunk->ResetLiveBytes();
461 Bitmap::Clear(chunk);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000462 chunk->set_next_chunk(nullptr);
463 chunk->set_prev_chunk(nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000464
465 DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
466 DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
467
468 if (executable == EXECUTABLE) {
469 chunk->SetFlag(IS_EXECUTABLE);
470 }
471
Ben Murdoch097c5b22016-05-18 11:27:45 +0100472 if (reservation != nullptr) {
473 chunk->reservation_.TakeControl(reservation);
474 }
475
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000476 return chunk;
477}
478
479
480// Commit MemoryChunk area to the requested size.
481bool MemoryChunk::CommitArea(size_t requested) {
482 size_t guard_size =
483 IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
484 size_t header_size = area_start() - address() - guard_size;
485 size_t commit_size =
486 RoundUp(header_size + requested, base::OS::CommitPageSize());
487 size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
488 base::OS::CommitPageSize());
489
490 if (commit_size > committed_size) {
491 // Commit size should be less or equal than the reserved size.
492 DCHECK(commit_size <= size() - 2 * guard_size);
493 // Append the committed area.
494 Address start = address() + committed_size + guard_size;
495 size_t length = commit_size - committed_size;
496 if (reservation_.IsReserved()) {
497 Executability executable =
498 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
499 if (!heap()->isolate()->memory_allocator()->CommitMemory(start, length,
500 executable)) {
501 return false;
502 }
503 } else {
504 CodeRange* code_range = heap_->isolate()->code_range();
505 DCHECK(code_range != NULL && code_range->valid() &&
506 IsFlagSet(IS_EXECUTABLE));
507 if (!code_range->CommitRawMemory(start, length)) return false;
508 }
509
510 if (Heap::ShouldZapGarbage()) {
511 heap_->isolate()->memory_allocator()->ZapBlock(start, length);
512 }
513 } else if (commit_size < committed_size) {
514 DCHECK(commit_size > 0);
515 // Shrink the committed area.
516 size_t length = committed_size - commit_size;
517 Address start = address() + committed_size + guard_size - length;
518 if (reservation_.IsReserved()) {
519 if (!reservation_.Uncommit(start, length)) return false;
520 } else {
521 CodeRange* code_range = heap_->isolate()->code_range();
522 DCHECK(code_range != NULL && code_range->valid() &&
523 IsFlagSet(IS_EXECUTABLE));
524 if (!code_range->UncommitRawMemory(start, length)) return false;
525 }
526 }
527
528 area_end_ = area_start_ + requested;
529 return true;
530}
531
532
533void MemoryChunk::InsertAfter(MemoryChunk* other) {
534 MemoryChunk* other_next = other->next_chunk();
535
536 set_next_chunk(other_next);
537 set_prev_chunk(other);
538 other_next->set_prev_chunk(this);
539 other->set_next_chunk(this);
540}
541
542
543void MemoryChunk::Unlink() {
544 MemoryChunk* next_element = next_chunk();
545 MemoryChunk* prev_element = prev_chunk();
546 next_element->set_prev_chunk(prev_element);
547 prev_element->set_next_chunk(next_element);
548 set_prev_chunk(NULL);
549 set_next_chunk(NULL);
550}
551
552
553MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
554 intptr_t commit_area_size,
555 Executability executable,
556 Space* owner) {
557 DCHECK(commit_area_size <= reserve_area_size);
558
559 size_t chunk_size;
560 Heap* heap = isolate_->heap();
561 Address base = NULL;
562 base::VirtualMemory reservation;
563 Address area_start = NULL;
564 Address area_end = NULL;
565
566 //
567 // MemoryChunk layout:
568 //
569 // Executable
570 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
571 // | Header |
572 // +----------------------------+<- base + CodePageGuardStartOffset
573 // | Guard |
574 // +----------------------------+<- area_start_
575 // | Area |
576 // +----------------------------+<- area_end_ (area_start + commit_area_size)
577 // | Committed but not used |
578 // +----------------------------+<- aligned at OS page boundary
579 // | Reserved but not committed |
580 // +----------------------------+<- aligned at OS page boundary
581 // | Guard |
582 // +----------------------------+<- base + chunk_size
583 //
584 // Non-executable
585 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
586 // | Header |
587 // +----------------------------+<- area_start_ (base + kObjectStartOffset)
588 // | Area |
589 // +----------------------------+<- area_end_ (area_start + commit_area_size)
590 // | Committed but not used |
591 // +----------------------------+<- aligned at OS page boundary
592 // | Reserved but not committed |
593 // +----------------------------+<- base + chunk_size
594 //
595
596 if (executable == EXECUTABLE) {
597 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
598 base::OS::CommitPageSize()) +
599 CodePageGuardSize();
600
601 // Check executable memory limit.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000602 if ((size_executable_.Value() + static_cast<intptr_t>(chunk_size)) >
603 capacity_executable_) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000604 LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
605 "V8 Executable Allocation capacity exceeded"));
606 return NULL;
607 }
608
609 // Size of header (not executable) plus area (executable).
610 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
611 base::OS::CommitPageSize());
612 // Allocate executable memory either from code range or from the
613 // OS.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000614#ifdef V8_TARGET_ARCH_MIPS64
615 // Use code range only for large object space on mips64 to keep address
616 // range within 256-MB memory region.
617 if (isolate_->code_range() != NULL && isolate_->code_range()->valid() &&
618 reserve_area_size > CodePageAreaSize()) {
619#else
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000620 if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000621#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000622 base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size,
623 &chunk_size);
624 DCHECK(
625 IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
626 if (base == NULL) return NULL;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000627 size_.Increment(static_cast<intptr_t>(chunk_size));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000628 // Update executable memory size.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000629 size_executable_.Increment(static_cast<intptr_t>(chunk_size));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000630 } else {
631 base = AllocateAlignedMemory(chunk_size, commit_size,
632 MemoryChunk::kAlignment, executable,
633 &reservation);
634 if (base == NULL) return NULL;
635 // Update executable memory size.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000636 size_executable_.Increment(static_cast<intptr_t>(reservation.size()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000637 }
638
639 if (Heap::ShouldZapGarbage()) {
640 ZapBlock(base, CodePageGuardStartOffset());
641 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
642 }
643
644 area_start = base + CodePageAreaStartOffset();
645 area_end = area_start + commit_area_size;
646 } else {
647 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
648 base::OS::CommitPageSize());
649 size_t commit_size =
650 RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
651 base::OS::CommitPageSize());
652 base =
653 AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
654 executable, &reservation);
655
656 if (base == NULL) return NULL;
657
658 if (Heap::ShouldZapGarbage()) {
659 ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
660 }
661
662 area_start = base + Page::kObjectStartOffset;
663 area_end = area_start + commit_area_size;
664 }
665
666 // Use chunk_size for statistics and callbacks because we assume that they
667 // treat reserved but not-yet committed memory regions of chunks as allocated.
668 isolate_->counters()->memory_allocated()->Increment(
669 static_cast<int>(chunk_size));
670
671 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
672 if (owner != NULL) {
673 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
674 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
675 }
676
Ben Murdoch097c5b22016-05-18 11:27:45 +0100677 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
678 executable, owner, &reservation);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000679}
680
681
682void Page::ResetFreeListStatistics() {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100683 wasted_memory_ = 0;
684 available_in_free_list_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000685}
686
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000687LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
688 Space* owner,
689 Executability executable) {
690 MemoryChunk* chunk =
691 AllocateChunk(object_size, object_size, executable, owner);
692 if (chunk == NULL) return NULL;
Ben Murdochda12d292016-06-02 14:46:10 +0100693 if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
694 STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
695 FATAL("Code page is too large.");
696 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000697 return LargePage::Initialize(isolate_->heap(), chunk);
698}
699
700
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000701void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
702 DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000703 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
704 if (chunk->owner() != NULL) {
705 ObjectSpace space =
706 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
707 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
708 }
709
710 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
711 chunk->IsEvacuationCandidate());
712
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000713 intptr_t size;
714 base::VirtualMemory* reservation = chunk->reserved_memory();
715 if (reservation->IsReserved()) {
716 size = static_cast<intptr_t>(reservation->size());
717 } else {
718 size = static_cast<intptr_t>(chunk->size());
719 }
720 DCHECK(size_.Value() >= size);
721 size_.Increment(-size);
722 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
723
724 if (chunk->executable() == EXECUTABLE) {
725 DCHECK(size_executable_.Value() >= size);
726 size_executable_.Increment(-size);
727 }
728
729 chunk->SetFlag(MemoryChunk::PRE_FREED);
730}
731
732
733void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
734 DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
735 chunk->ReleaseAllocatedMemory();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000736
737 base::VirtualMemory* reservation = chunk->reserved_memory();
738 if (reservation->IsReserved()) {
739 FreeMemory(reservation, chunk->executable());
740 } else {
741 FreeMemory(chunk->address(), chunk->size(), chunk->executable());
742 }
743}
744
Ben Murdochda12d292016-06-02 14:46:10 +0100745template <MemoryAllocator::AllocationMode mode>
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000746void MemoryAllocator::Free(MemoryChunk* chunk) {
Ben Murdochda12d292016-06-02 14:46:10 +0100747 if (mode == kRegular) {
748 PreFreeMemory(chunk);
749 PerformFreeMemory(chunk);
750 } else {
751 DCHECK_EQ(mode, kPooled);
752 FreePooled(chunk);
753 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000754}
755
Ben Murdochda12d292016-06-02 14:46:10 +0100756template void MemoryAllocator::Free<MemoryAllocator::kRegular>(
757 MemoryChunk* chunk);
758
759template void MemoryAllocator::Free<MemoryAllocator::kPooled>(
760 MemoryChunk* chunk);
761
762template <typename PageType, MemoryAllocator::AllocationMode mode,
763 typename SpaceType>
764PageType* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
765 Executability executable) {
766 MemoryChunk* chunk = nullptr;
767 if (mode == kPooled) {
768 DCHECK_EQ(size, static_cast<intptr_t>(MemoryChunk::kAllocatableMemory));
769 DCHECK_EQ(executable, NOT_EXECUTABLE);
770 chunk = AllocatePagePooled(owner);
771 }
772 if (chunk == nullptr) {
773 chunk = AllocateChunk(size, size, executable, owner);
774 }
775 if (chunk == nullptr) return nullptr;
776 return PageType::Initialize(isolate_->heap(), chunk, executable, owner);
777}
778
779template Page* MemoryAllocator::AllocatePage<Page, MemoryAllocator::kRegular,
780 PagedSpace>(intptr_t, PagedSpace*,
781 Executability);
782
783template NewSpacePage* MemoryAllocator::AllocatePage<
784 NewSpacePage, MemoryAllocator::kPooled, SemiSpace>(intptr_t, SemiSpace*,
785 Executability);
786
787template <typename SpaceType>
788MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
789 if (chunk_pool_.is_empty()) return nullptr;
790 const int size = MemoryChunk::kPageSize;
791 MemoryChunk* chunk = chunk_pool_.RemoveLast();
792 const Address start = reinterpret_cast<Address>(chunk);
793 const Address area_start = start + MemoryChunk::kObjectStartOffset;
794 const Address area_end = start + size;
795 CommitBlock(reinterpret_cast<Address>(chunk), size, NOT_EXECUTABLE);
796 base::VirtualMemory reservation(start, size);
797 MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
798 NOT_EXECUTABLE, owner, &reservation);
799 size_.Increment(size);
800 return chunk;
801}
802
803void MemoryAllocator::FreePooled(MemoryChunk* chunk) {
804 DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
805 DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
806 chunk_pool_.Add(chunk);
807 intptr_t chunk_size = static_cast<intptr_t>(chunk->size());
808 if (chunk->executable() == EXECUTABLE) {
809 size_executable_.Increment(-chunk_size);
810 }
811 size_.Increment(-chunk_size);
812 UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
813}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000814
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000815bool MemoryAllocator::CommitBlock(Address start, size_t size,
816 Executability executable) {
817 if (!CommitMemory(start, size, executable)) return false;
818
819 if (Heap::ShouldZapGarbage()) {
820 ZapBlock(start, size);
821 }
822
823 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
824 return true;
825}
826
827
828bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
829 if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
830 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
831 return true;
832}
833
834
835void MemoryAllocator::ZapBlock(Address start, size_t size) {
836 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
837 Memory::Address_at(start + s) = kZapValue;
838 }
839}
840
841
842void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
843 AllocationAction action,
844 size_t size) {
845 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
846 MemoryAllocationCallbackRegistration registration =
847 memory_allocation_callbacks_[i];
848 if ((registration.space & space) == space &&
849 (registration.action & action) == action)
850 registration.callback(space, action, static_cast<int>(size));
851 }
852}
853
854
855bool MemoryAllocator::MemoryAllocationCallbackRegistered(
856 MemoryAllocationCallback callback) {
857 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
858 if (memory_allocation_callbacks_[i].callback == callback) return true;
859 }
860 return false;
861}
862
863
864void MemoryAllocator::AddMemoryAllocationCallback(
865 MemoryAllocationCallback callback, ObjectSpace space,
866 AllocationAction action) {
867 DCHECK(callback != NULL);
868 MemoryAllocationCallbackRegistration registration(callback, space, action);
869 DCHECK(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
870 return memory_allocation_callbacks_.Add(registration);
871}
872
873
874void MemoryAllocator::RemoveMemoryAllocationCallback(
875 MemoryAllocationCallback callback) {
876 DCHECK(callback != NULL);
877 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
878 if (memory_allocation_callbacks_[i].callback == callback) {
879 memory_allocation_callbacks_.Remove(i);
880 return;
881 }
882 }
883 UNREACHABLE();
884}
885
886
887#ifdef DEBUG
888void MemoryAllocator::ReportStatistics() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000889 intptr_t size = Size();
890 float pct = static_cast<float>(capacity_ - size) / capacity_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000891 PrintF(" capacity: %" V8_PTR_PREFIX
892 "d"
893 ", used: %" V8_PTR_PREFIX
894 "d"
895 ", available: %%%d\n\n",
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000896 capacity_, size, static_cast<int>(pct * 100));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000897}
898#endif
899
900
901int MemoryAllocator::CodePageGuardStartOffset() {
902 // We are guarding code pages: the first OS page after the header
903 // will be protected as non-writable.
904 return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize());
905}
906
907
908int MemoryAllocator::CodePageGuardSize() {
909 return static_cast<int>(base::OS::CommitPageSize());
910}
911
912
913int MemoryAllocator::CodePageAreaStartOffset() {
914 // We are guarding code pages: the first OS page after the header
915 // will be protected as non-writable.
916 return CodePageGuardStartOffset() + CodePageGuardSize();
917}
918
919
920int MemoryAllocator::CodePageAreaEndOffset() {
921 // We are guarding code pages: the last OS page will be protected as
922 // non-writable.
923 return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize());
924}
925
926
927bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
928 Address start, size_t commit_size,
929 size_t reserved_size) {
930 // Commit page header (not executable).
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000931 Address header = start;
932 size_t header_size = CodePageGuardStartOffset();
933 if (vm->Commit(header, header_size, false)) {
934 // Create guard page after the header.
935 if (vm->Guard(start + CodePageGuardStartOffset())) {
936 // Commit page body (executable).
937 Address body = start + CodePageAreaStartOffset();
938 size_t body_size = commit_size - CodePageGuardStartOffset();
939 if (vm->Commit(body, body_size, true)) {
940 // Create guard page before the end.
941 if (vm->Guard(start + reserved_size - CodePageGuardSize())) {
942 UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
943 commit_size -
944 CodePageGuardStartOffset());
945 return true;
946 }
947 vm->Uncommit(body, body_size);
948 }
949 }
950 vm->Uncommit(header, header_size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000951 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000952 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000953}
954
955
956// -----------------------------------------------------------------------------
957// MemoryChunk implementation
958
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000959void MemoryChunk::ReleaseAllocatedMemory() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000960 delete skip_list_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100961 skip_list_ = nullptr;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000962 delete mutex_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100963 mutex_ = nullptr;
964 ReleaseOldToNewSlots();
965 ReleaseOldToOldSlots();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000966}
967
Ben Murdoch097c5b22016-05-18 11:27:45 +0100968static SlotSet* AllocateSlotSet(size_t size, Address page_start) {
969 size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
970 DCHECK(pages > 0);
971 SlotSet* slot_set = new SlotSet[pages];
972 for (size_t i = 0; i < pages; i++) {
973 slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
974 }
975 return slot_set;
976}
977
978void MemoryChunk::AllocateOldToNewSlots() {
979 DCHECK(nullptr == old_to_new_slots_);
980 old_to_new_slots_ = AllocateSlotSet(size_, address());
981}
982
983void MemoryChunk::ReleaseOldToNewSlots() {
984 delete[] old_to_new_slots_;
985 old_to_new_slots_ = nullptr;
986}
987
988void MemoryChunk::AllocateOldToOldSlots() {
989 DCHECK(nullptr == old_to_old_slots_);
990 old_to_old_slots_ = AllocateSlotSet(size_, address());
991}
992
993void MemoryChunk::ReleaseOldToOldSlots() {
994 delete[] old_to_old_slots_;
995 old_to_old_slots_ = nullptr;
996}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000997
Ben Murdochda12d292016-06-02 14:46:10 +0100998void MemoryChunk::AllocateTypedOldToOldSlots() {
999 DCHECK(nullptr == typed_old_to_old_slots_);
1000 typed_old_to_old_slots_ = new TypedSlotSet(address());
1001}
1002
1003void MemoryChunk::ReleaseTypedOldToOldSlots() {
1004 delete typed_old_to_old_slots_;
1005 typed_old_to_old_slots_ = nullptr;
1006}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001007// -----------------------------------------------------------------------------
1008// PagedSpace implementation
1009
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001010STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) ==
1011 ObjectSpace::kObjectSpaceNewSpace);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001012STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_SPACE) ==
1013 ObjectSpace::kObjectSpaceOldSpace);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001014STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
1015 ObjectSpace::kObjectSpaceCodeSpace);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001016STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
1017 ObjectSpace::kObjectSpaceMapSpace);
1018
Ben Murdoch097c5b22016-05-18 11:27:45 +01001019void Space::AllocationStep(Address soon_object, int size) {
1020 if (!allocation_observers_paused_) {
1021 for (int i = 0; i < allocation_observers_->length(); ++i) {
1022 AllocationObserver* o = (*allocation_observers_)[i];
1023 o->AllocationStep(size, soon_object, size);
1024 }
1025 }
1026}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001027
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001028PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001029 Executability executable)
Ben Murdoch097c5b22016-05-18 11:27:45 +01001030 : Space(heap, space, executable), free_list_(this) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001031 area_size_ = MemoryAllocator::PageAreaSize(space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001032 accounting_stats_.Clear();
1033
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001034 allocation_info_.Reset(nullptr, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001035
1036 anchor_.InitializeAsAnchor(this);
1037}
1038
1039
1040bool PagedSpace::SetUp() { return true; }
1041
1042
1043bool PagedSpace::HasBeenSetUp() { return true; }
1044
1045
1046void PagedSpace::TearDown() {
1047 PageIterator iterator(this);
1048 while (iterator.has_next()) {
1049 heap()->isolate()->memory_allocator()->Free(iterator.next());
1050 }
1051 anchor_.set_next_page(&anchor_);
1052 anchor_.set_prev_page(&anchor_);
1053 accounting_stats_.Clear();
1054}
1055
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001056void PagedSpace::RefillFreeList() {
Ben Murdochda12d292016-06-02 14:46:10 +01001057 // Any PagedSpace might invoke RefillFreeList. We filter all but our old
1058 // generation spaces out.
1059 if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
1060 identity() != MAP_SPACE) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001061 return;
1062 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001063 MarkCompactCollector* collector = heap()->mark_compact_collector();
Ben Murdochda12d292016-06-02 14:46:10 +01001064 List<Page*>* swept_pages = collector->swept_pages(identity());
1065 intptr_t added = 0;
1066 {
1067 base::LockGuard<base::Mutex> guard(collector->swept_pages_mutex());
1068 for (int i = swept_pages->length() - 1; i >= 0; --i) {
1069 Page* p = (*swept_pages)[i];
1070 // Only during compaction pages can actually change ownership. This is
1071 // safe because there exists no other competing action on the page links
1072 // during compaction.
1073 if (is_local() && (p->owner() != this)) {
1074 if (added > kCompactionMemoryWanted) break;
1075 base::LockGuard<base::Mutex> guard(
1076 reinterpret_cast<PagedSpace*>(p->owner())->mutex());
1077 p->Unlink();
1078 p->set_owner(this);
1079 p->InsertAfter(anchor_.prev_page());
1080 }
1081 added += RelinkFreeListCategories(p);
1082 added += p->wasted_memory();
1083 swept_pages->Remove(i);
1084 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001085 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001086 accounting_stats_.IncreaseCapacity(added);
1087}
1088
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001089void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
Ben Murdochda12d292016-06-02 14:46:10 +01001090 DCHECK(identity() == other->identity());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001091 // Unmerged fields:
1092 // area_size_
1093 // anchor_
1094
Ben Murdochda12d292016-06-02 14:46:10 +01001095 other->EmptyAllocationInfo();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001096
1097 // Update and clear accounting statistics.
1098 accounting_stats_.Merge(other->accounting_stats_);
1099 other->accounting_stats_.Clear();
1100
1101 // The linear allocation area of {other} should be destroyed now.
1102 DCHECK(other->top() == nullptr);
1103 DCHECK(other->limit() == nullptr);
1104
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001105 AccountCommitted(other->CommittedMemory());
1106
1107 // Move over pages.
1108 PageIterator it(other);
1109 Page* p = nullptr;
1110 while (it.has_next()) {
1111 p = it.next();
Ben Murdochda12d292016-06-02 14:46:10 +01001112
1113 // Relinking requires the category to be unlinked.
1114 other->UnlinkFreeListCategories(p);
1115
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001116 p->Unlink();
1117 p->set_owner(this);
1118 p->InsertAfter(anchor_.prev_page());
Ben Murdochda12d292016-06-02 14:46:10 +01001119 RelinkFreeListCategories(p);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001120 }
1121}
1122
1123
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001124size_t PagedSpace::CommittedPhysicalMemory() {
1125 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
1126 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1127 size_t size = 0;
1128 PageIterator it(this);
1129 while (it.has_next()) {
1130 size += it.next()->CommittedPhysicalMemory();
1131 }
1132 return size;
1133}
1134
Ben Murdoch097c5b22016-05-18 11:27:45 +01001135bool PagedSpace::ContainsSlow(Address addr) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001136 Page* p = Page::FromAddress(addr);
1137 PageIterator iterator(this);
1138 while (iterator.has_next()) {
1139 if (iterator.next() == p) return true;
1140 }
1141 return false;
1142}
1143
1144
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001145Object* PagedSpace::FindObject(Address addr) {
1146 // Note: this function can only be called on iterable spaces.
1147 DCHECK(!heap()->mark_compact_collector()->in_use());
1148
1149 if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found.
1150
1151 Page* p = Page::FromAddress(addr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001152 HeapObjectIterator it(p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001153 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
1154 Address cur = obj->address();
1155 Address next = cur + obj->Size();
1156 if ((cur <= addr) && (addr < next)) return obj;
1157 }
1158
1159 UNREACHABLE();
1160 return Smi::FromInt(0);
1161}
1162
1163
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001164bool PagedSpace::CanExpand(size_t size) {
1165 DCHECK(heap()->mark_compact_collector()->is_compacting() ||
1166 Capacity() <= heap()->MaxOldGenerationSize());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001167
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001168 // Are we going to exceed capacity for this space? At this point we can be
1169 // way over the maximum size because of AlwaysAllocate scopes and large
1170 // objects.
1171 if (!heap()->CanExpandOldGeneration(static_cast<int>(size))) return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001172
1173 return true;
1174}
1175
1176
1177bool PagedSpace::Expand() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001178 intptr_t size = AreaSize();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001179 if (snapshotable() && !HasPages()) {
1180 size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001181 }
1182
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001183 if (!CanExpand(size)) return false;
1184
Ben Murdochda12d292016-06-02 14:46:10 +01001185 Page* p = heap()->isolate()->memory_allocator()->AllocatePage<Page>(
1186 size, this, executable());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001187 if (p == NULL) return false;
1188
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001189 AccountCommitted(static_cast<intptr_t>(p->size()));
1190
1191 // Pages created during bootstrapping may contain immortal immovable objects.
1192 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
1193
Ben Murdochda12d292016-06-02 14:46:10 +01001194 // When incremental marking was activated, old space pages are allocated
1195 // black.
1196 if (heap()->incremental_marking()->black_allocation() &&
1197 identity() == OLD_SPACE) {
1198 Bitmap::SetAllBits(p);
1199 p->SetFlag(Page::BLACK_PAGE);
1200 if (FLAG_trace_incremental_marking) {
1201 PrintIsolate(heap()->isolate(), "Added black page %p\n", p);
1202 }
1203 }
1204
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001205 DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001206
1207 p->InsertAfter(anchor_.prev_page());
1208
1209 return true;
1210}
1211
1212
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001213int PagedSpace::CountTotalPages() {
1214 PageIterator it(this);
1215 int count = 0;
1216 while (it.has_next()) {
1217 it.next();
1218 count++;
1219 }
1220 return count;
1221}
1222
1223
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001224void PagedSpace::ResetFreeListStatistics() {
1225 PageIterator page_iterator(this);
1226 while (page_iterator.has_next()) {
1227 Page* page = page_iterator.next();
1228 page->ResetFreeListStatistics();
1229 }
1230}
1231
1232
1233void PagedSpace::IncreaseCapacity(int size) {
1234 accounting_stats_.ExpandSpace(size);
1235}
1236
Ben Murdochda12d292016-06-02 14:46:10 +01001237void PagedSpace::ReleasePage(Page* page) {
1238 DCHECK_EQ(page->LiveBytes(), 0);
1239 DCHECK_EQ(AreaSize(), page->area_size());
1240 DCHECK_EQ(page->owner(), this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001241
Ben Murdochda12d292016-06-02 14:46:10 +01001242 free_list_.EvictFreeListItems(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001243 DCHECK(!free_list_.ContainsPageFreeListItems(page));
1244
1245 if (Page::FromAllocationTop(allocation_info_.top()) == page) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001246 allocation_info_.Reset(nullptr, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001247 }
1248
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001249 // If page is still in a list, unlink it from that list.
1250 if (page->next_chunk() != NULL) {
1251 DCHECK(page->prev_chunk() != NULL);
1252 page->Unlink();
1253 }
1254
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001255 AccountUncommitted(static_cast<intptr_t>(page->size()));
1256 heap()->QueueMemoryChunkForFree(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001257
1258 DCHECK(Capacity() > 0);
1259 accounting_stats_.ShrinkSpace(AreaSize());
1260}
1261
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001262#ifdef DEBUG
1263void PagedSpace::Print() {}
1264#endif
1265
1266#ifdef VERIFY_HEAP
1267void PagedSpace::Verify(ObjectVisitor* visitor) {
1268 bool allocation_pointer_found_in_space =
1269 (allocation_info_.top() == allocation_info_.limit());
1270 PageIterator page_iterator(this);
1271 while (page_iterator.has_next()) {
1272 Page* page = page_iterator.next();
1273 CHECK(page->owner() == this);
1274 if (page == Page::FromAllocationTop(allocation_info_.top())) {
1275 allocation_pointer_found_in_space = true;
1276 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001277 CHECK(page->SweepingDone());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001278 HeapObjectIterator it(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001279 Address end_of_previous_object = page->area_start();
1280 Address top = page->area_end();
1281 int black_size = 0;
1282 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1283 CHECK(end_of_previous_object <= object->address());
1284
1285 // The first word should be a map, and we expect all map pointers to
1286 // be in map space.
1287 Map* map = object->map();
1288 CHECK(map->IsMap());
1289 CHECK(heap()->map_space()->Contains(map));
1290
1291 // Perform space-specific object verification.
1292 VerifyObject(object);
1293
1294 // The object itself should look OK.
1295 object->ObjectVerify();
1296
1297 // All the interior pointers should be contained in the heap.
1298 int size = object->Size();
1299 object->IterateBody(map->instance_type(), size, visitor);
Ben Murdochda12d292016-06-02 14:46:10 +01001300 if (!page->IsFlagSet(Page::BLACK_PAGE) &&
1301 Marking::IsBlack(Marking::MarkBitFrom(object))) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001302 black_size += size;
1303 }
1304
1305 CHECK(object->address() + size <= top);
1306 end_of_previous_object = object->address() + size;
1307 }
1308 CHECK_LE(black_size, page->LiveBytes());
1309 }
1310 CHECK(allocation_pointer_found_in_space);
1311}
1312#endif // VERIFY_HEAP
1313
1314// -----------------------------------------------------------------------------
1315// NewSpace implementation
1316
Ben Murdochda12d292016-06-02 14:46:10 +01001317bool NewSpace::SetUp(int initial_semispace_capacity,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001318 int maximum_semispace_capacity) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001319 DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
1320 DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity));
1321
Ben Murdochda12d292016-06-02 14:46:10 +01001322 to_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
1323 from_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
1324 if (!to_space_.Commit()) {
1325 return false;
1326 }
1327 DCHECK(!from_space_.is_committed()); // No need to use memory yet.
1328 ResetAllocationInfo();
1329
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001330 // Allocate and set up the histogram arrays if necessary.
1331 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1332 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001333#define SET_NAME(name) \
1334 allocated_histogram_[name].set_name(#name); \
1335 promoted_histogram_[name].set_name(#name);
1336 INSTANCE_TYPE_LIST(SET_NAME)
1337#undef SET_NAME
1338
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001339 return true;
1340}
1341
1342
1343void NewSpace::TearDown() {
1344 if (allocated_histogram_) {
1345 DeleteArray(allocated_histogram_);
1346 allocated_histogram_ = NULL;
1347 }
1348 if (promoted_histogram_) {
1349 DeleteArray(promoted_histogram_);
1350 promoted_histogram_ = NULL;
1351 }
1352
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001353 allocation_info_.Reset(nullptr, nullptr);
1354
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001355 to_space_.TearDown();
1356 from_space_.TearDown();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001357}
1358
1359
1360void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
1361
1362
1363void NewSpace::Grow() {
1364 // Double the semispace size but only up to maximum capacity.
1365 DCHECK(TotalCapacity() < MaximumCapacity());
1366 int new_capacity =
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001367 Min(MaximumCapacity(),
1368 FLAG_semi_space_growth_factor * static_cast<int>(TotalCapacity()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001369 if (to_space_.GrowTo(new_capacity)) {
1370 // Only grow from space if we managed to grow to-space.
1371 if (!from_space_.GrowTo(new_capacity)) {
1372 // If we managed to grow to-space but couldn't grow from-space,
1373 // attempt to shrink to-space.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001374 if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001375 // We are in an inconsistent state because we could not
1376 // commit/uncommit memory from new space.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001377 CHECK(false);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001378 }
1379 }
1380 }
1381 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1382}
1383
1384
1385void NewSpace::Shrink() {
1386 int new_capacity = Max(InitialTotalCapacity(), 2 * SizeAsInt());
1387 int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1388 if (rounded_new_capacity < TotalCapacity() &&
1389 to_space_.ShrinkTo(rounded_new_capacity)) {
1390 // Only shrink from-space if we managed to shrink to-space.
1391 from_space_.Reset();
1392 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1393 // If we managed to shrink to-space but couldn't shrink from
1394 // space, attempt to grow to-space again.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001395 if (!to_space_.GrowTo(from_space_.current_capacity())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001396 // We are in an inconsistent state because we could not
1397 // commit/uncommit memory from new space.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001398 CHECK(false);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001399 }
1400 }
1401 }
1402 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1403}
1404
1405
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001406void LocalAllocationBuffer::Close() {
1407 if (IsValid()) {
1408 heap_->CreateFillerObjectAt(
1409 allocation_info_.top(),
Ben Murdochda12d292016-06-02 14:46:10 +01001410 static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
1411 ClearRecordedSlots::kNo);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001412 }
1413}
1414
1415
1416LocalAllocationBuffer::LocalAllocationBuffer(Heap* heap,
1417 AllocationInfo allocation_info)
1418 : heap_(heap), allocation_info_(allocation_info) {
1419 if (IsValid()) {
1420 heap_->CreateFillerObjectAt(
1421 allocation_info_.top(),
Ben Murdochda12d292016-06-02 14:46:10 +01001422 static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
1423 ClearRecordedSlots::kNo);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001424 }
1425}
1426
1427
1428LocalAllocationBuffer::LocalAllocationBuffer(
1429 const LocalAllocationBuffer& other) {
1430 *this = other;
1431}
1432
1433
1434LocalAllocationBuffer& LocalAllocationBuffer::operator=(
1435 const LocalAllocationBuffer& other) {
1436 Close();
1437 heap_ = other.heap_;
1438 allocation_info_ = other.allocation_info_;
1439
1440 // This is needed since we (a) cannot yet use move-semantics, and (b) want
1441 // to make the use of the class easy by it as value and (c) implicitly call
1442 // {Close} upon copy.
1443 const_cast<LocalAllocationBuffer&>(other)
1444 .allocation_info_.Reset(nullptr, nullptr);
1445 return *this;
1446}
1447
1448
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001449void NewSpace::UpdateAllocationInfo() {
1450 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001451 allocation_info_.Reset(to_space_.page_low(), to_space_.page_high());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001452 UpdateInlineAllocationLimit(0);
1453 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1454}
1455
1456
1457void NewSpace::ResetAllocationInfo() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001458 Address old_top = allocation_info_.top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001459 to_space_.Reset();
1460 UpdateAllocationInfo();
1461 pages_used_ = 0;
1462 // Clear all mark-bits in the to-space.
1463 NewSpacePageIterator it(&to_space_);
1464 while (it.has_next()) {
1465 Bitmap::Clear(it.next());
1466 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001467 InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001468}
1469
1470
1471void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
1472 if (heap()->inline_allocation_disabled()) {
1473 // Lowest limit when linear allocation was disabled.
1474 Address high = to_space_.page_high();
1475 Address new_top = allocation_info_.top() + size_in_bytes;
1476 allocation_info_.set_limit(Min(new_top, high));
Ben Murdoch097c5b22016-05-18 11:27:45 +01001477 } else if (allocation_observers_paused_ || top_on_previous_step_ == 0) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001478 // Normal limit is the end of the current page.
1479 allocation_info_.set_limit(to_space_.page_high());
1480 } else {
1481 // Lower limit during incremental marking.
1482 Address high = to_space_.page_high();
1483 Address new_top = allocation_info_.top() + size_in_bytes;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001484 Address new_limit = new_top + GetNextInlineAllocationStepSize() - 1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001485 allocation_info_.set_limit(Min(new_limit, high));
1486 }
1487 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1488}
1489
1490
1491bool NewSpace::AddFreshPage() {
1492 Address top = allocation_info_.top();
Ben Murdoch097c5b22016-05-18 11:27:45 +01001493 DCHECK(!NewSpacePage::IsAtStart(top));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001494 if (!to_space_.AdvancePage()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001495 // No more pages left to advance.
1496 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001497 }
1498
1499 // Clear remainder of current page.
1500 Address limit = NewSpacePage::FromLimit(top)->area_end();
1501 if (heap()->gc_state() == Heap::SCAVENGE) {
1502 heap()->promotion_queue()->SetNewLimit(limit);
1503 }
1504
1505 int remaining_in_page = static_cast<int>(limit - top);
Ben Murdochda12d292016-06-02 14:46:10 +01001506 heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001507 pages_used_++;
1508 UpdateAllocationInfo();
1509
1510 return true;
1511}
1512
1513
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001514bool NewSpace::AddFreshPageSynchronized() {
1515 base::LockGuard<base::Mutex> guard(&mutex_);
1516 return AddFreshPage();
1517}
1518
1519
1520bool NewSpace::EnsureAllocation(int size_in_bytes,
1521 AllocationAlignment alignment) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001522 Address old_top = allocation_info_.top();
1523 Address high = to_space_.page_high();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001524 int filler_size = Heap::GetFillToAlign(old_top, alignment);
1525 int aligned_size_in_bytes = size_in_bytes + filler_size;
1526
1527 if (old_top + aligned_size_in_bytes >= high) {
1528 // Not enough room in the page, try to allocate a new one.
1529 if (!AddFreshPage()) {
1530 return false;
1531 }
1532
1533 InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
1534
1535 old_top = allocation_info_.top();
1536 high = to_space_.page_high();
1537 filler_size = Heap::GetFillToAlign(old_top, alignment);
1538 aligned_size_in_bytes = size_in_bytes + filler_size;
1539 }
1540
1541 DCHECK(old_top + aligned_size_in_bytes < high);
1542
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001543 if (allocation_info_.limit() < high) {
1544 // Either the limit has been lowered because linear allocation was disabled
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001545 // or because incremental marking wants to get a chance to do a step,
1546 // or because idle scavenge job wants to get a chance to post a task.
1547 // Set the new limit accordingly.
1548 Address new_top = old_top + aligned_size_in_bytes;
1549 Address soon_object = old_top + filler_size;
1550 InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
1551 UpdateInlineAllocationLimit(aligned_size_in_bytes);
1552 }
1553 return true;
1554}
1555
1556
1557void NewSpace::StartNextInlineAllocationStep() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001558 if (!allocation_observers_paused_) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001559 top_on_previous_step_ =
Ben Murdoch097c5b22016-05-18 11:27:45 +01001560 allocation_observers_->length() ? allocation_info_.top() : 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001561 UpdateInlineAllocationLimit(0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001562 }
1563}
1564
1565
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001566intptr_t NewSpace::GetNextInlineAllocationStepSize() {
1567 intptr_t next_step = 0;
Ben Murdoch097c5b22016-05-18 11:27:45 +01001568 for (int i = 0; i < allocation_observers_->length(); ++i) {
1569 AllocationObserver* o = (*allocation_observers_)[i];
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001570 next_step = next_step ? Min(next_step, o->bytes_to_next_step())
1571 : o->bytes_to_next_step();
1572 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001573 DCHECK(allocation_observers_->length() == 0 || next_step != 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001574 return next_step;
1575}
1576
Ben Murdoch097c5b22016-05-18 11:27:45 +01001577void NewSpace::AddAllocationObserver(AllocationObserver* observer) {
1578 Space::AddAllocationObserver(observer);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001579 StartNextInlineAllocationStep();
1580}
1581
Ben Murdoch097c5b22016-05-18 11:27:45 +01001582void NewSpace::RemoveAllocationObserver(AllocationObserver* observer) {
1583 Space::RemoveAllocationObserver(observer);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001584 StartNextInlineAllocationStep();
1585}
1586
Ben Murdoch097c5b22016-05-18 11:27:45 +01001587void NewSpace::PauseAllocationObservers() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001588 // Do a step to account for memory allocated so far.
1589 InlineAllocationStep(top(), top(), nullptr, 0);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001590 Space::PauseAllocationObservers();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001591 top_on_previous_step_ = 0;
1592 UpdateInlineAllocationLimit(0);
1593}
1594
Ben Murdoch097c5b22016-05-18 11:27:45 +01001595void NewSpace::ResumeAllocationObservers() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001596 DCHECK(top_on_previous_step_ == 0);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001597 Space::ResumeAllocationObservers();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001598 StartNextInlineAllocationStep();
1599}
1600
1601
1602void NewSpace::InlineAllocationStep(Address top, Address new_top,
1603 Address soon_object, size_t size) {
1604 if (top_on_previous_step_) {
1605 int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001606 for (int i = 0; i < allocation_observers_->length(); ++i) {
1607 (*allocation_observers_)[i]->AllocationStep(bytes_allocated, soon_object,
1608 size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001609 }
1610 top_on_previous_step_ = new_top;
1611 }
1612}
1613
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001614#ifdef VERIFY_HEAP
1615// We do not use the SemiSpaceIterator because verification doesn't assume
1616// that it works (it depends on the invariants we are checking).
1617void NewSpace::Verify() {
1618 // The allocation pointer should be in the space or at the very end.
1619 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1620
1621 // There should be objects packed in from the low address up to the
1622 // allocation pointer.
1623 Address current = to_space_.first_page()->area_start();
1624 CHECK_EQ(current, to_space_.space_start());
1625
1626 while (current != top()) {
1627 if (!NewSpacePage::IsAtEnd(current)) {
1628 // The allocation pointer should not be in the middle of an object.
1629 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1630 current < top());
1631
1632 HeapObject* object = HeapObject::FromAddress(current);
1633
1634 // The first word should be a map, and we expect all map pointers to
1635 // be in map space.
1636 Map* map = object->map();
1637 CHECK(map->IsMap());
1638 CHECK(heap()->map_space()->Contains(map));
1639
1640 // The object should not be code or a map.
1641 CHECK(!object->IsMap());
Ben Murdochda12d292016-06-02 14:46:10 +01001642 CHECK(!object->IsAbstractCode());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001643
1644 // The object itself should look OK.
1645 object->ObjectVerify();
1646
1647 // All the interior pointers should be contained in the heap.
1648 VerifyPointersVisitor visitor;
1649 int size = object->Size();
1650 object->IterateBody(map->instance_type(), size, &visitor);
1651
1652 current += size;
1653 } else {
1654 // At end of page, switch to next page.
1655 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1656 // Next page should be valid.
1657 CHECK(!page->is_anchor());
1658 current = page->area_start();
1659 }
1660 }
1661
1662 // Check semi-spaces.
1663 CHECK_EQ(from_space_.id(), kFromSpace);
1664 CHECK_EQ(to_space_.id(), kToSpace);
1665 from_space_.Verify();
1666 to_space_.Verify();
1667}
1668#endif
1669
1670// -----------------------------------------------------------------------------
1671// SemiSpace implementation
1672
Ben Murdochda12d292016-06-02 14:46:10 +01001673void SemiSpace::SetUp(int initial_capacity, int maximum_capacity) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001674 DCHECK_GE(maximum_capacity, Page::kPageSize);
1675 minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1676 current_capacity_ = minimum_capacity_;
1677 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001678 committed_ = false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001679}
1680
1681
1682void SemiSpace::TearDown() {
Ben Murdochda12d292016-06-02 14:46:10 +01001683 // Properly uncommit memory to keep the allocator counters in sync.
1684 if (is_committed()) Uncommit();
1685 current_capacity_ = maximum_capacity_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001686}
1687
1688
1689bool SemiSpace::Commit() {
1690 DCHECK(!is_committed());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001691 NewSpacePage* current = anchor();
Ben Murdoch097c5b22016-05-18 11:27:45 +01001692 const int num_pages = current_capacity_ / Page::kPageSize;
Ben Murdochda12d292016-06-02 14:46:10 +01001693 for (int pages_added = 0; pages_added < num_pages; pages_added++) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001694 NewSpacePage* new_page =
Ben Murdochda12d292016-06-02 14:46:10 +01001695 heap()
1696 ->isolate()
1697 ->memory_allocator()
1698 ->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>(
1699 NewSpacePage::kAllocatableMemory, this, executable());
1700 if (new_page == nullptr) {
1701 RewindPages(current, pages_added);
1702 return false;
1703 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001704 new_page->InsertAfter(current);
1705 current = new_page;
1706 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001707 Reset();
Ben Murdochda12d292016-06-02 14:46:10 +01001708 AccountCommitted(current_capacity_);
1709 if (age_mark_ == nullptr) {
1710 age_mark_ = first_page()->area_start();
1711 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001712 committed_ = true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001713 return true;
1714}
1715
1716
1717bool SemiSpace::Uncommit() {
1718 DCHECK(is_committed());
Ben Murdochda12d292016-06-02 14:46:10 +01001719 NewSpacePageIterator it(this);
1720 while (it.has_next()) {
1721 heap()->isolate()->memory_allocator()->Free<MemoryAllocator::kPooled>(
1722 it.next());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001723 }
1724 anchor()->set_next_page(anchor());
1725 anchor()->set_prev_page(anchor());
Ben Murdochda12d292016-06-02 14:46:10 +01001726 AccountUncommitted(current_capacity_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001727 committed_ = false;
1728 return true;
1729}
1730
1731
1732size_t SemiSpace::CommittedPhysicalMemory() {
1733 if (!is_committed()) return 0;
1734 size_t size = 0;
1735 NewSpacePageIterator it(this);
1736 while (it.has_next()) {
1737 size += it.next()->CommittedPhysicalMemory();
1738 }
1739 return size;
1740}
1741
1742
1743bool SemiSpace::GrowTo(int new_capacity) {
1744 if (!is_committed()) {
1745 if (!Commit()) return false;
1746 }
Ben Murdochda12d292016-06-02 14:46:10 +01001747 DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001748 DCHECK_LE(new_capacity, maximum_capacity_);
1749 DCHECK_GT(new_capacity, current_capacity_);
Ben Murdochda12d292016-06-02 14:46:10 +01001750 const int delta = new_capacity - current_capacity_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001751 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
Ben Murdochda12d292016-06-02 14:46:10 +01001752 int delta_pages = delta / NewSpacePage::kPageSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001753 NewSpacePage* last_page = anchor()->prev_page();
Ben Murdoch097c5b22016-05-18 11:27:45 +01001754 DCHECK_NE(last_page, anchor());
Ben Murdochda12d292016-06-02 14:46:10 +01001755 for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001756 NewSpacePage* new_page =
Ben Murdochda12d292016-06-02 14:46:10 +01001757 heap()
1758 ->isolate()
1759 ->memory_allocator()
1760 ->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>(
1761 NewSpacePage::kAllocatableMemory, this, executable());
1762 if (new_page == nullptr) {
1763 RewindPages(last_page, pages_added);
1764 return false;
1765 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001766 new_page->InsertAfter(last_page);
1767 Bitmap::Clear(new_page);
1768 // Duplicate the flags that was set on the old page.
1769 new_page->SetFlags(last_page->GetFlags(),
1770 NewSpacePage::kCopyOnFlipFlagsMask);
1771 last_page = new_page;
1772 }
Ben Murdochda12d292016-06-02 14:46:10 +01001773 AccountCommitted(static_cast<intptr_t>(delta));
1774 current_capacity_ = new_capacity;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001775 return true;
1776}
1777
Ben Murdochda12d292016-06-02 14:46:10 +01001778void SemiSpace::RewindPages(NewSpacePage* start, int num_pages) {
1779 NewSpacePage* new_last_page = nullptr;
1780 NewSpacePage* last_page = start;
1781 while (num_pages > 0) {
1782 DCHECK_NE(last_page, anchor());
1783 new_last_page = last_page->prev_page();
1784 last_page->prev_page()->set_next_page(last_page->next_page());
1785 last_page->next_page()->set_prev_page(last_page->prev_page());
1786 last_page = new_last_page;
1787 num_pages--;
1788 }
1789}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001790
1791bool SemiSpace::ShrinkTo(int new_capacity) {
Ben Murdochda12d292016-06-02 14:46:10 +01001792 DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001793 DCHECK_GE(new_capacity, minimum_capacity_);
1794 DCHECK_LT(new_capacity, current_capacity_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001795 if (is_committed()) {
Ben Murdochda12d292016-06-02 14:46:10 +01001796 const int delta = current_capacity_ - new_capacity;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001797 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
Ben Murdochda12d292016-06-02 14:46:10 +01001798 int delta_pages = delta / NewSpacePage::kPageSize;
1799 NewSpacePage* new_last_page;
1800 NewSpacePage* last_page;
1801 while (delta_pages > 0) {
1802 last_page = anchor()->prev_page();
1803 new_last_page = last_page->prev_page();
1804 new_last_page->set_next_page(anchor());
1805 anchor()->set_prev_page(new_last_page);
1806 heap()->isolate()->memory_allocator()->Free<MemoryAllocator::kPooled>(
1807 last_page);
1808 delta_pages--;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001809 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001810 AccountUncommitted(static_cast<intptr_t>(delta));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001811 }
Ben Murdochda12d292016-06-02 14:46:10 +01001812 current_capacity_ = new_capacity;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001813 return true;
1814}
1815
Ben Murdoch097c5b22016-05-18 11:27:45 +01001816void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001817 anchor_.set_owner(this);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001818 // Fixup back-pointers to anchor. Address of anchor changes when we swap.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001819 anchor_.prev_page()->set_next_page(&anchor_);
1820 anchor_.next_page()->set_prev_page(&anchor_);
1821
Ben Murdoch097c5b22016-05-18 11:27:45 +01001822 NewSpacePageIterator it(this);
1823 while (it.has_next()) {
1824 NewSpacePage* page = it.next();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001825 page->set_owner(this);
1826 page->SetFlags(flags, mask);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001827 if (id_ == kToSpace) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001828 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1829 page->SetFlag(MemoryChunk::IN_TO_SPACE);
1830 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1831 page->ResetLiveBytes();
1832 } else {
1833 page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1834 page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1835 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001836 DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1837 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001838 }
1839}
1840
1841
1842void SemiSpace::Reset() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001843 DCHECK_NE(anchor_.next_page(), &anchor_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001844 current_page_ = anchor_.next_page();
1845}
1846
1847
1848void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1849 // We won't be swapping semispaces without data in them.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001850 DCHECK_NE(from->anchor_.next_page(), &from->anchor_);
1851 DCHECK_NE(to->anchor_.next_page(), &to->anchor_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001852
Ben Murdoch097c5b22016-05-18 11:27:45 +01001853 intptr_t saved_to_space_flags = to->current_page()->GetFlags();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001854
Ben Murdoch097c5b22016-05-18 11:27:45 +01001855 // We swap all properties but id_.
1856 std::swap(from->current_capacity_, to->current_capacity_);
1857 std::swap(from->maximum_capacity_, to->maximum_capacity_);
1858 std::swap(from->minimum_capacity_, to->minimum_capacity_);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001859 std::swap(from->age_mark_, to->age_mark_);
1860 std::swap(from->committed_, to->committed_);
1861 std::swap(from->anchor_, to->anchor_);
1862 std::swap(from->current_page_, to->current_page_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001863
Ben Murdoch097c5b22016-05-18 11:27:45 +01001864 to->FixPagesFlags(saved_to_space_flags, NewSpacePage::kCopyOnFlipFlagsMask);
1865 from->FixPagesFlags(0, 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001866}
1867
1868
1869void SemiSpace::set_age_mark(Address mark) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001870 DCHECK_EQ(NewSpacePage::FromLimit(mark)->semi_space(), this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001871 age_mark_ = mark;
1872 // Mark all pages up to the one containing mark.
1873 NewSpacePageIterator it(space_start(), mark);
1874 while (it.has_next()) {
1875 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1876 }
1877}
1878
1879
1880#ifdef DEBUG
1881void SemiSpace::Print() {}
1882#endif
1883
1884#ifdef VERIFY_HEAP
1885void SemiSpace::Verify() {
1886 bool is_from_space = (id_ == kFromSpace);
1887 NewSpacePage* page = anchor_.next_page();
1888 CHECK(anchor_.semi_space() == this);
1889 while (page != &anchor_) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001890 CHECK_EQ(page->semi_space(), this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001891 CHECK(page->InNewSpace());
1892 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1893 : MemoryChunk::IN_TO_SPACE));
1894 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1895 : MemoryChunk::IN_FROM_SPACE));
1896 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1897 if (!is_from_space) {
1898 // The pointers-from-here-are-interesting flag isn't updated dynamically
1899 // on from-space pages, so it might be out of sync with the marking state.
1900 if (page->heap()->incremental_marking()->IsMarking()) {
1901 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1902 } else {
1903 CHECK(
1904 !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1905 }
1906 // TODO(gc): Check that the live_bytes_count_ field matches the
1907 // black marking on the page (if we make it match in new-space).
1908 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001909 CHECK_EQ(page->prev_page()->next_page(), page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001910 page = page->next_page();
1911 }
1912}
1913#endif
1914
1915#ifdef DEBUG
1916void SemiSpace::AssertValidRange(Address start, Address end) {
1917 // Addresses belong to same semi-space
1918 NewSpacePage* page = NewSpacePage::FromLimit(start);
1919 NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1920 SemiSpace* space = page->semi_space();
1921 CHECK_EQ(space, end_page->semi_space());
1922 // Start address is before end address, either on same page,
1923 // or end address is on a later page in the linked list of
1924 // semi-space pages.
1925 if (page == end_page) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001926 CHECK_LE(start, end);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001927 } else {
1928 while (page != end_page) {
1929 page = page->next_page();
1930 CHECK_NE(page, space->anchor());
1931 }
1932 }
1933}
1934#endif
1935
1936
1937// -----------------------------------------------------------------------------
1938// SemiSpaceIterator implementation.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001939
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001940SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001941 Initialize(space->bottom(), space->top());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001942}
1943
1944
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001945void SemiSpaceIterator::Initialize(Address start, Address end) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001946 SemiSpace::AssertValidRange(start, end);
1947 current_ = start;
1948 limit_ = end;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001949}
1950
1951
1952#ifdef DEBUG
1953// heap_histograms is shared, always clear it before using it.
1954static void ClearHistograms(Isolate* isolate) {
1955// We reset the name each time, though it hasn't changed.
1956#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1957 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1958#undef DEF_TYPE_NAME
1959
1960#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1961 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1962#undef CLEAR_HISTOGRAM
1963
1964 isolate->js_spill_information()->Clear();
1965}
1966
1967
1968static void ClearCodeKindStatistics(int* code_kind_statistics) {
1969 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1970 code_kind_statistics[i] = 0;
1971 }
1972}
1973
1974
1975static void ReportCodeKindStatistics(int* code_kind_statistics) {
1976 PrintF("\n Code kind histograms: \n");
1977 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1978 if (code_kind_statistics[i] > 0) {
1979 PrintF(" %-20s: %10d bytes\n",
1980 Code::Kind2String(static_cast<Code::Kind>(i)),
1981 code_kind_statistics[i]);
1982 }
1983 }
1984 PrintF("\n");
1985}
1986
1987
1988static int CollectHistogramInfo(HeapObject* obj) {
1989 Isolate* isolate = obj->GetIsolate();
1990 InstanceType type = obj->map()->instance_type();
1991 DCHECK(0 <= type && type <= LAST_TYPE);
1992 DCHECK(isolate->heap_histograms()[type].name() != NULL);
1993 isolate->heap_histograms()[type].increment_number(1);
1994 isolate->heap_histograms()[type].increment_bytes(obj->Size());
1995
1996 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1997 JSObject::cast(obj)
1998 ->IncrementSpillStatistics(isolate->js_spill_information());
1999 }
2000
2001 return obj->Size();
2002}
2003
2004
2005static void ReportHistogram(Isolate* isolate, bool print_spill) {
2006 PrintF("\n Object Histogram:\n");
2007 for (int i = 0; i <= LAST_TYPE; i++) {
2008 if (isolate->heap_histograms()[i].number() > 0) {
2009 PrintF(" %-34s%10d (%10d bytes)\n",
2010 isolate->heap_histograms()[i].name(),
2011 isolate->heap_histograms()[i].number(),
2012 isolate->heap_histograms()[i].bytes());
2013 }
2014 }
2015 PrintF("\n");
2016
2017 // Summarize string types.
2018 int string_number = 0;
2019 int string_bytes = 0;
2020#define INCREMENT(type, size, name, camel_name) \
2021 string_number += isolate->heap_histograms()[type].number(); \
2022 string_bytes += isolate->heap_histograms()[type].bytes();
2023 STRING_TYPE_LIST(INCREMENT)
2024#undef INCREMENT
2025 if (string_number > 0) {
2026 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
2027 string_bytes);
2028 }
2029
2030 if (FLAG_collect_heap_spill_statistics && print_spill) {
2031 isolate->js_spill_information()->Print();
2032 }
2033}
2034#endif // DEBUG
2035
2036
2037// Support for statistics gathering for --heap-stats and --log-gc.
2038void NewSpace::ClearHistograms() {
2039 for (int i = 0; i <= LAST_TYPE; i++) {
2040 allocated_histogram_[i].clear();
2041 promoted_histogram_[i].clear();
2042 }
2043}
2044
2045
2046// Because the copying collector does not touch garbage objects, we iterate
2047// the new space before a collection to get a histogram of allocated objects.
2048// This only happens when --log-gc flag is set.
2049void NewSpace::CollectStatistics() {
2050 ClearHistograms();
2051 SemiSpaceIterator it(this);
2052 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
2053 RecordAllocation(obj);
2054}
2055
2056
2057static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
2058 const char* description) {
2059 LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
2060 // Lump all the string types together.
2061 int string_number = 0;
2062 int string_bytes = 0;
2063#define INCREMENT(type, size, name, camel_name) \
2064 string_number += info[type].number(); \
2065 string_bytes += info[type].bytes();
2066 STRING_TYPE_LIST(INCREMENT)
2067#undef INCREMENT
2068 if (string_number > 0) {
2069 LOG(isolate,
2070 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
2071 }
2072
2073 // Then do the other types.
2074 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
2075 if (info[i].number() > 0) {
2076 LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(),
2077 info[i].bytes()));
2078 }
2079 }
2080 LOG(isolate, HeapSampleEndEvent("NewSpace", description));
2081}
2082
2083
2084void NewSpace::ReportStatistics() {
2085#ifdef DEBUG
2086 if (FLAG_heap_stats) {
2087 float pct = static_cast<float>(Available()) / TotalCapacity();
2088 PrintF(" capacity: %" V8_PTR_PREFIX
2089 "d"
2090 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2091 TotalCapacity(), Available(), static_cast<int>(pct * 100));
2092 PrintF("\n Object Histogram:\n");
2093 for (int i = 0; i <= LAST_TYPE; i++) {
2094 if (allocated_histogram_[i].number() > 0) {
2095 PrintF(" %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(),
2096 allocated_histogram_[i].number(),
2097 allocated_histogram_[i].bytes());
2098 }
2099 }
2100 PrintF("\n");
2101 }
2102#endif // DEBUG
2103
2104 if (FLAG_log_gc) {
2105 Isolate* isolate = heap()->isolate();
2106 DoReportStatistics(isolate, allocated_histogram_, "allocated");
2107 DoReportStatistics(isolate, promoted_histogram_, "promoted");
2108 }
2109}
2110
2111
2112void NewSpace::RecordAllocation(HeapObject* obj) {
2113 InstanceType type = obj->map()->instance_type();
2114 DCHECK(0 <= type && type <= LAST_TYPE);
2115 allocated_histogram_[type].increment_number(1);
2116 allocated_histogram_[type].increment_bytes(obj->Size());
2117}
2118
2119
2120void NewSpace::RecordPromotion(HeapObject* obj) {
2121 InstanceType type = obj->map()->instance_type();
2122 DCHECK(0 <= type && type <= LAST_TYPE);
2123 promoted_histogram_[type].increment_number(1);
2124 promoted_histogram_[type].increment_bytes(obj->Size());
2125}
2126
2127
2128size_t NewSpace::CommittedPhysicalMemory() {
2129 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
2130 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2131 size_t size = to_space_.CommittedPhysicalMemory();
2132 if (from_space_.is_committed()) {
2133 size += from_space_.CommittedPhysicalMemory();
2134 }
2135 return size;
2136}
2137
2138
2139// -----------------------------------------------------------------------------
2140// Free lists for old object spaces implementation
2141
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002142
2143void FreeListCategory::Reset() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002144 set_top(nullptr);
Ben Murdochda12d292016-06-02 14:46:10 +01002145 set_prev(nullptr);
2146 set_next(nullptr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002147 available_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002148}
2149
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002150FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
Ben Murdochda12d292016-06-02 14:46:10 +01002151 DCHECK(page()->CanAllocate());
2152
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002153 FreeSpace* node = top();
2154 if (node == nullptr) return nullptr;
Ben Murdochda12d292016-06-02 14:46:10 +01002155 set_top(node->next());
2156 *node_size = node->Size();
2157 available_ -= *node_size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002158 return node;
2159}
2160
Ben Murdochda12d292016-06-02 14:46:10 +01002161FreeSpace* FreeListCategory::TryPickNodeFromList(int minimum_size,
2162 int* node_size) {
2163 DCHECK(page()->CanAllocate());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002164
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002165 FreeSpace* node = PickNodeFromList(node_size);
Ben Murdochda12d292016-06-02 14:46:10 +01002166 if ((node != nullptr) && (*node_size < minimum_size)) {
2167 Free(node, *node_size, kLinkCategory);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002168 *node_size = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002169 return nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002170 }
2171 return node;
2172}
2173
Ben Murdochda12d292016-06-02 14:46:10 +01002174FreeSpace* FreeListCategory::SearchForNodeInList(int minimum_size,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002175 int* node_size) {
Ben Murdochda12d292016-06-02 14:46:10 +01002176 DCHECK(page()->CanAllocate());
2177
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002178 FreeSpace* prev_non_evac_node = nullptr;
2179 for (FreeSpace* cur_node = top(); cur_node != nullptr;
2180 cur_node = cur_node->next()) {
2181 int size = cur_node->size();
Ben Murdochda12d292016-06-02 14:46:10 +01002182 if (size >= minimum_size) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002183 available_ -= size;
2184 if (cur_node == top()) {
2185 set_top(cur_node->next());
2186 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002187 if (prev_non_evac_node != nullptr) {
2188 prev_non_evac_node->set_next(cur_node->next());
2189 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002190 *node_size = size;
2191 return cur_node;
2192 }
2193
2194 prev_non_evac_node = cur_node;
2195 }
2196 return nullptr;
2197}
2198
Ben Murdochda12d292016-06-02 14:46:10 +01002199bool FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes,
2200 FreeMode mode) {
2201 if (!page()->CanAllocate()) return false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002202
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002203 free_space->set_next(top());
2204 set_top(free_space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002205 available_ += size_in_bytes;
Ben Murdochda12d292016-06-02 14:46:10 +01002206 if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
2207 owner()->AddCategory(this);
2208 }
2209 return true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002210}
2211
2212
2213void FreeListCategory::RepairFreeList(Heap* heap) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002214 FreeSpace* n = top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002215 while (n != NULL) {
2216 Map** map_location = reinterpret_cast<Map**>(n->address());
2217 if (*map_location == NULL) {
2218 *map_location = heap->free_space_map();
2219 } else {
2220 DCHECK(*map_location == heap->free_space_map());
2221 }
2222 n = n->next();
2223 }
2224}
2225
Ben Murdochda12d292016-06-02 14:46:10 +01002226void FreeListCategory::Relink() {
2227 DCHECK(!is_linked());
2228 owner()->AddCategory(this);
2229}
2230
2231void FreeListCategory::Invalidate() {
2232 page()->add_available_in_free_list(-available());
2233 Reset();
2234 type_ = kInvalidCategory;
2235}
2236
Ben Murdoch097c5b22016-05-18 11:27:45 +01002237FreeList::FreeList(PagedSpace* owner) : owner_(owner), wasted_bytes_(0) {
2238 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
Ben Murdochda12d292016-06-02 14:46:10 +01002239 categories_[i] = nullptr;
Ben Murdoch097c5b22016-05-18 11:27:45 +01002240 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002241 Reset();
2242}
2243
2244
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002245void FreeList::Reset() {
Ben Murdochda12d292016-06-02 14:46:10 +01002246 ForAllFreeListCategories(
2247 [](FreeListCategory* category) { category->Reset(); });
Ben Murdoch097c5b22016-05-18 11:27:45 +01002248 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
Ben Murdochda12d292016-06-02 14:46:10 +01002249 categories_[i] = nullptr;
Ben Murdoch097c5b22016-05-18 11:27:45 +01002250 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002251 ResetStats();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002252}
2253
Ben Murdochda12d292016-06-02 14:46:10 +01002254int FreeList::Free(Address start, int size_in_bytes, FreeMode mode) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002255 if (size_in_bytes == 0) return 0;
2256
Ben Murdochda12d292016-06-02 14:46:10 +01002257 owner()->heap()->CreateFillerObjectAt(start, size_in_bytes,
2258 ClearRecordedSlots::kNo);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002259
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002260 Page* page = Page::FromAddress(start);
2261
Ben Murdochda12d292016-06-02 14:46:10 +01002262 // Blocks have to be a minimum size to hold free list items.
2263 if (size_in_bytes < kMinBlockSize) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002264 page->add_wasted_memory(size_in_bytes);
Ben Murdochda12d292016-06-02 14:46:10 +01002265 wasted_bytes_.Increment(size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002266 return size_in_bytes;
2267 }
2268
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002269 FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002270 // Insert other blocks at the head of a free list of the appropriate
2271 // magnitude.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002272 FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
Ben Murdochda12d292016-06-02 14:46:10 +01002273 if (page->free_list_category(type)->Free(free_space, size_in_bytes, mode)) {
2274 page->add_available_in_free_list(size_in_bytes);
2275 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002276 return 0;
2277}
2278
Ben Murdochda12d292016-06-02 14:46:10 +01002279FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, int* node_size) {
2280 FreeListCategoryIterator it(this, type);
2281 FreeSpace* node = nullptr;
2282 while (it.HasNext()) {
2283 FreeListCategory* current = it.Next();
2284 node = current->PickNodeFromList(node_size);
2285 if (node != nullptr) {
2286 Page::FromAddress(node->address())
2287 ->add_available_in_free_list(-(*node_size));
2288 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2289 return node;
2290 }
2291 RemoveCategory(current);
2292 }
2293 return node;
2294}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002295
Ben Murdochda12d292016-06-02 14:46:10 +01002296FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type, int* node_size,
2297 int minimum_size) {
2298 if (categories_[type] == nullptr) return nullptr;
2299 FreeSpace* node =
2300 categories_[type]->TryPickNodeFromList(minimum_size, node_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002301 if (node != nullptr) {
2302 Page::FromAddress(node->address())
Ben Murdoch097c5b22016-05-18 11:27:45 +01002303 ->add_available_in_free_list(-(*node_size));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002304 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2305 }
2306 return node;
2307}
2308
Ben Murdochda12d292016-06-02 14:46:10 +01002309FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
2310 int* node_size, int minimum_size) {
2311 FreeListCategoryIterator it(this, type);
2312 FreeSpace* node = nullptr;
2313 while (it.HasNext()) {
2314 FreeListCategory* current = it.Next();
2315 node = current->SearchForNodeInList(minimum_size, node_size);
2316 if (node != nullptr) {
2317 Page::FromAddress(node->address())
2318 ->add_available_in_free_list(-(*node_size));
2319 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2320 return node;
2321 }
2322 }
2323 return node;
2324}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002325
2326FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
2327 FreeSpace* node = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002328
Ben Murdoch097c5b22016-05-18 11:27:45 +01002329 // First try the allocation fast path: try to allocate the minimum element
2330 // size of a free list category. This operation is constant time.
2331 FreeListCategoryType type =
2332 SelectFastAllocationFreeListCategoryType(size_in_bytes);
2333 for (int i = type; i < kHuge; i++) {
2334 node = FindNodeIn(static_cast<FreeListCategoryType>(i), node_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002335 if (node != nullptr) return node;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002336 }
2337
Ben Murdoch097c5b22016-05-18 11:27:45 +01002338 // Next search the huge list for free list nodes. This takes linear time in
2339 // the number of huge elements.
Ben Murdochda12d292016-06-02 14:46:10 +01002340 node = SearchForNodeInList(kHuge, node_size, size_in_bytes);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002341 if (node != nullptr) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002342 DCHECK(IsVeryLong() || Available() == SumFreeLists());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002343 return node;
2344 }
2345
Ben Murdoch097c5b22016-05-18 11:27:45 +01002346 // We need a huge block of memory, but we didn't find anything in the huge
2347 // list.
2348 if (type == kHuge) return nullptr;
2349
2350 // Now search the best fitting free list for a node that has at least the
Ben Murdochda12d292016-06-02 14:46:10 +01002351 // requested size.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002352 type = SelectFreeListCategoryType(size_in_bytes);
Ben Murdochda12d292016-06-02 14:46:10 +01002353 node = TryFindNodeIn(type, node_size, size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002354
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002355 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2356 return node;
2357}
2358
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002359// Allocation on the old space free list. If it succeeds then a new linear
2360// allocation space has been set up with the top and limit of the space. If
2361// the allocation fails then NULL is returned, and the caller can perform a GC
2362// or allocate a new page before retrying.
2363HeapObject* FreeList::Allocate(int size_in_bytes) {
2364 DCHECK(0 < size_in_bytes);
2365 DCHECK(size_in_bytes <= kMaxBlockSize);
2366 DCHECK(IsAligned(size_in_bytes, kPointerSize));
2367 // Don't free list allocate if there is linear space available.
2368 DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
2369
2370 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2371 // Mark the old linear allocation area with a free space map so it can be
2372 // skipped when scanning the heap. This also puts it back in the free list
2373 // if it is big enough.
2374 owner_->Free(owner_->top(), old_linear_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002375 owner_->SetTopAndLimit(nullptr, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002376
2377 owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
2378 old_linear_size);
2379
2380 int new_node_size = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002381 FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2382 if (new_node == nullptr) return nullptr;
Ben Murdoch097c5b22016-05-18 11:27:45 +01002383 owner_->AllocationStep(new_node->address(), size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002384
2385 int bytes_left = new_node_size - size_in_bytes;
2386 DCHECK(bytes_left >= 0);
2387
2388#ifdef DEBUG
2389 for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2390 reinterpret_cast<Object**>(new_node->address())[i] =
2391 Smi::FromInt(kCodeZapValue);
2392 }
2393#endif
2394
2395 // The old-space-step might have finished sweeping and restarted marking.
2396 // Verify that it did not turn the page of the new node into an evacuation
2397 // candidate.
2398 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2399
2400 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2401
2402 // Memory in the linear allocation area is counted as allocated. We may free
2403 // a little of this again immediately - see below.
2404 owner_->Allocate(new_node_size);
2405
2406 if (owner_->heap()->inline_allocation_disabled()) {
2407 // Keep the linear allocation area empty if requested to do so, just
2408 // return area back to the free list instead.
2409 owner_->Free(new_node->address() + size_in_bytes, bytes_left);
2410 DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
2411 } else if (bytes_left > kThreshold &&
2412 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002413 FLAG_incremental_marking) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002414 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2415 // We don't want to give too large linear areas to the allocator while
2416 // incremental marking is going on, because we won't check again whether
2417 // we want to do another increment until the linear area is used up.
2418 owner_->Free(new_node->address() + size_in_bytes + linear_size,
2419 new_node_size - size_in_bytes - linear_size);
2420 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2421 new_node->address() + size_in_bytes + linear_size);
2422 } else if (bytes_left > 0) {
2423 // Normally we give the rest of the node to the allocator as its new
2424 // linear allocation area.
2425 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2426 new_node->address() + new_node_size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002427 }
2428
2429 return new_node;
2430}
2431
Ben Murdochda12d292016-06-02 14:46:10 +01002432intptr_t FreeList::EvictFreeListItems(Page* page) {
2433 intptr_t sum = 0;
2434 page->ForAllFreeListCategories(
2435 [this, &sum, page](FreeListCategory* category) {
2436 DCHECK_EQ(this, category->owner());
2437 sum += category->available();
2438 RemoveCategory(category);
2439 category->Invalidate();
2440 });
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002441 return sum;
2442}
2443
Ben Murdochda12d292016-06-02 14:46:10 +01002444bool FreeList::ContainsPageFreeListItems(Page* page) {
2445 bool contained = false;
2446 page->ForAllFreeListCategories(
2447 [this, &contained](FreeListCategory* category) {
2448 if (category->owner() == this && category->is_linked()) {
2449 contained = true;
2450 }
2451 });
2452 return contained;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002453}
2454
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002455void FreeList::RepairLists(Heap* heap) {
Ben Murdochda12d292016-06-02 14:46:10 +01002456 ForAllFreeListCategories(
2457 [heap](FreeListCategory* category) { category->RepairFreeList(heap); });
2458}
2459
2460bool FreeList::AddCategory(FreeListCategory* category) {
2461 FreeListCategoryType type = category->type_;
2462 FreeListCategory* top = categories_[type];
2463
2464 if (category->is_empty()) return false;
2465 if (top == category) return false;
2466
2467 // Common double-linked list insertion.
2468 if (top != nullptr) {
2469 top->set_prev(category);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002470 }
Ben Murdochda12d292016-06-02 14:46:10 +01002471 category->set_next(top);
2472 categories_[type] = category;
2473 return true;
2474}
2475
2476void FreeList::RemoveCategory(FreeListCategory* category) {
2477 FreeListCategoryType type = category->type_;
2478 FreeListCategory* top = categories_[type];
2479
2480 // Common double-linked list removal.
2481 if (top == category) {
2482 categories_[type] = category->next();
2483 }
2484 if (category->prev() != nullptr) {
2485 category->prev()->set_next(category->next());
2486 }
2487 if (category->next() != nullptr) {
2488 category->next()->set_prev(category->prev());
2489 }
2490 category->set_next(nullptr);
2491 category->set_prev(nullptr);
2492}
2493
2494void FreeList::PrintCategories(FreeListCategoryType type) {
2495 FreeListCategoryIterator it(this, type);
2496 PrintF("FreeList[%p, top=%p, %d] ", this, categories_[type], type);
2497 while (it.HasNext()) {
2498 FreeListCategory* current = it.Next();
2499 PrintF("%p -> ", current);
2500 }
2501 PrintF("null\n");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002502}
2503
2504
2505#ifdef DEBUG
2506intptr_t FreeListCategory::SumFreeList() {
2507 intptr_t sum = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002508 FreeSpace* cur = top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002509 while (cur != NULL) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002510 DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex));
2511 sum += cur->nobarrier_size();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002512 cur = cur->next();
2513 }
2514 return sum;
2515}
2516
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002517int FreeListCategory::FreeListLength() {
2518 int length = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002519 FreeSpace* cur = top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002520 while (cur != NULL) {
2521 length++;
2522 cur = cur->next();
2523 if (length == kVeryLongFreeList) return length;
2524 }
2525 return length;
2526}
2527
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002528bool FreeList::IsVeryLong() {
Ben Murdochda12d292016-06-02 14:46:10 +01002529 int len = 0;
Ben Murdoch097c5b22016-05-18 11:27:45 +01002530 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
Ben Murdochda12d292016-06-02 14:46:10 +01002531 FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
2532 while (it.HasNext()) {
2533 len += it.Next()->FreeListLength();
2534 if (len >= FreeListCategory::kVeryLongFreeList) return true;
Ben Murdoch097c5b22016-05-18 11:27:45 +01002535 }
2536 }
2537 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002538}
2539
2540
2541// This can take a very long time because it is linear in the number of entries
2542// on the free list, so it should not be called if FreeListLength returns
2543// kVeryLongFreeList.
2544intptr_t FreeList::SumFreeLists() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002545 intptr_t sum = 0;
Ben Murdochda12d292016-06-02 14:46:10 +01002546 ForAllFreeListCategories(
2547 [&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002548 return sum;
2549}
2550#endif
2551
2552
2553// -----------------------------------------------------------------------------
2554// OldSpace implementation
2555
2556void PagedSpace::PrepareForMarkCompact() {
2557 // We don't have a linear allocation area while sweeping. It will be restored
2558 // on the first allocation after the sweep.
2559 EmptyAllocationInfo();
2560
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002561 // Clear the free list before a full GC---it will be rebuilt afterward.
2562 free_list_.Reset();
2563}
2564
2565
2566intptr_t PagedSpace::SizeOfObjects() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002567 const intptr_t size = Size() - (limit() - top());
2568 CHECK_GE(limit(), top());
2569 CHECK_GE(size, 0);
2570 USE(size);
2571 return size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002572}
2573
2574
2575// After we have booted, we have created a map which represents free space
2576// on the heap. If there was already a free list then the elements on it
2577// were created with the wrong FreeSpaceMap (normally NULL), so we need to
2578// fix them.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002579void PagedSpace::RepairFreeListsAfterDeserialization() {
2580 free_list_.RepairLists(heap());
2581 // Each page may have a small free space that is not tracked by a free list.
2582 // Update the maps for those free space objects.
2583 PageIterator iterator(this);
2584 while (iterator.has_next()) {
2585 Page* page = iterator.next();
Ben Murdoch097c5b22016-05-18 11:27:45 +01002586 int size = static_cast<int>(page->wasted_memory());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002587 if (size == 0) continue;
2588 Address address = page->OffsetToAddress(Page::kPageSize - size);
Ben Murdochda12d292016-06-02 14:46:10 +01002589 heap()->CreateFillerObjectAt(address, size, ClearRecordedSlots::kNo);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002590 }
2591}
2592
2593
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002594void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() {
2595 if (allocation_info_.top() >= allocation_info_.limit()) return;
2596
2597 if (!Page::FromAllocationTop(allocation_info_.top())->CanAllocate()) {
2598 // Create filler object to keep page iterable if it was iterable.
2599 int remaining =
2600 static_cast<int>(allocation_info_.limit() - allocation_info_.top());
Ben Murdochda12d292016-06-02 14:46:10 +01002601 heap()->CreateFillerObjectAt(allocation_info_.top(), remaining,
2602 ClearRecordedSlots::kNo);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002603 allocation_info_.Reset(nullptr, nullptr);
2604 }
2605}
2606
2607
2608HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002609 MarkCompactCollector* collector = heap()->mark_compact_collector();
2610 if (collector->sweeping_in_progress()) {
2611 // Wait for the sweeper threads here and complete the sweeping phase.
2612 collector->EnsureSweepingCompleted();
2613
2614 // After waiting for the sweeper threads, there may be new free-list
2615 // entries.
2616 return free_list_.Allocate(size_in_bytes);
2617 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002618 return nullptr;
2619}
2620
2621
2622HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
2623 MarkCompactCollector* collector = heap()->mark_compact_collector();
2624 if (collector->sweeping_in_progress()) {
2625 collector->SweepAndRefill(this);
2626 return free_list_.Allocate(size_in_bytes);
2627 }
2628 return nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002629}
2630
2631
2632HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002633 const int kMaxPagesToSweep = 1;
2634
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002635 // Allocation in this space has failed.
2636
2637 MarkCompactCollector* collector = heap()->mark_compact_collector();
2638 // Sweeping is still in progress.
2639 if (collector->sweeping_in_progress()) {
2640 // First try to refill the free-list, concurrent sweeper threads
2641 // may have freed some objects in the meantime.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002642 RefillFreeList();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002643
2644 // Retry the free list allocation.
2645 HeapObject* object = free_list_.Allocate(size_in_bytes);
2646 if (object != NULL) return object;
2647
2648 // If sweeping is still in progress try to sweep pages on the main thread.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002649 int max_freed = collector->SweepInParallel(heap()->paged_space(identity()),
2650 size_in_bytes, kMaxPagesToSweep);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002651 RefillFreeList();
Ben Murdoch097c5b22016-05-18 11:27:45 +01002652 if (max_freed >= size_in_bytes) {
2653 object = free_list_.Allocate(size_in_bytes);
2654 if (object != nullptr) return object;
2655 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002656 }
2657
2658 // Free list allocation failed and there is no next page. Fail if we have
2659 // hit the old generation size limit that should cause a garbage
2660 // collection.
2661 if (!heap()->always_allocate() &&
2662 heap()->OldGenerationAllocationLimitReached()) {
2663 // If sweeper threads are active, wait for them at that point and steal
2664 // elements form their free-lists.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002665 HeapObject* object = SweepAndRetryAllocation(size_in_bytes);
2666 return object;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002667 }
2668
2669 // Try to expand the space and allocate in the new next page.
2670 if (Expand()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002671 DCHECK((CountTotalPages() > 1) ||
2672 (size_in_bytes <= free_list_.Available()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002673 return free_list_.Allocate(size_in_bytes);
2674 }
2675
2676 // If sweeper threads are active, wait for them at that point and steal
2677 // elements form their free-lists. Allocation may still fail their which
2678 // would indicate that there is not enough memory for the given allocation.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002679 return SweepAndRetryAllocation(size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002680}
2681
2682
2683#ifdef DEBUG
2684void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
2685 CommentStatistic* comments_statistics =
2686 isolate->paged_space_comments_statistics();
2687 ReportCodeKindStatistics(isolate->code_kind_statistics());
2688 PrintF(
2689 "Code comment statistics (\" [ comment-txt : size/ "
2690 "count (average)\"):\n");
2691 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2692 const CommentStatistic& cs = comments_statistics[i];
2693 if (cs.size > 0) {
2694 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2695 cs.size / cs.count);
2696 }
2697 }
2698 PrintF("\n");
2699}
2700
2701
2702void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
2703 CommentStatistic* comments_statistics =
2704 isolate->paged_space_comments_statistics();
2705 ClearCodeKindStatistics(isolate->code_kind_statistics());
2706 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2707 comments_statistics[i].Clear();
2708 }
2709 comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2710 comments_statistics[CommentStatistic::kMaxComments].size = 0;
2711 comments_statistics[CommentStatistic::kMaxComments].count = 0;
2712}
2713
2714
2715// Adds comment to 'comment_statistics' table. Performance OK as long as
2716// 'kMaxComments' is small
2717static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2718 CommentStatistic* comments_statistics =
2719 isolate->paged_space_comments_statistics();
2720 // Do not count empty comments
2721 if (delta <= 0) return;
2722 CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2723 // Search for a free or matching entry in 'comments_statistics': 'cs'
2724 // points to result.
2725 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2726 if (comments_statistics[i].comment == NULL) {
2727 cs = &comments_statistics[i];
2728 cs->comment = comment;
2729 break;
2730 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2731 cs = &comments_statistics[i];
2732 break;
2733 }
2734 }
2735 // Update entry for 'comment'
2736 cs->size += delta;
2737 cs->count += 1;
2738}
2739
2740
2741// Call for each nested comment start (start marked with '[ xxx', end marked
2742// with ']'. RelocIterator 'it' must point to a comment reloc info.
2743static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2744 DCHECK(!it->done());
2745 DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT);
2746 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2747 if (tmp[0] != '[') {
2748 // Not a nested comment; skip
2749 return;
2750 }
2751
2752 // Search for end of nested comment or a new nested comment
2753 const char* const comment_txt =
2754 reinterpret_cast<const char*>(it->rinfo()->data());
2755 const byte* prev_pc = it->rinfo()->pc();
2756 int flat_delta = 0;
2757 it->next();
2758 while (true) {
2759 // All nested comments must be terminated properly, and therefore exit
2760 // from loop.
2761 DCHECK(!it->done());
2762 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2763 const char* const txt =
2764 reinterpret_cast<const char*>(it->rinfo()->data());
2765 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2766 if (txt[0] == ']') break; // End of nested comment
2767 // A new comment
2768 CollectCommentStatistics(isolate, it);
2769 // Skip code that was covered with previous comment
2770 prev_pc = it->rinfo()->pc();
2771 }
2772 it->next();
2773 }
2774 EnterComment(isolate, comment_txt, flat_delta);
2775}
2776
2777
2778// Collects code size statistics:
2779// - by code kind
2780// - by code comment
2781void PagedSpace::CollectCodeStatistics() {
2782 Isolate* isolate = heap()->isolate();
2783 HeapObjectIterator obj_it(this);
2784 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
Ben Murdochda12d292016-06-02 14:46:10 +01002785 if (obj->IsAbstractCode()) {
2786 AbstractCode* code = AbstractCode::cast(obj);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002787 isolate->code_kind_statistics()[code->kind()] += code->Size();
Ben Murdochda12d292016-06-02 14:46:10 +01002788 }
2789 if (obj->IsCode()) {
2790 // TODO(mythria): Also enable this for BytecodeArray when it supports
2791 // RelocInformation.
2792 Code* code = Code::cast(obj);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002793 RelocIterator it(code);
2794 int delta = 0;
2795 const byte* prev_pc = code->instruction_start();
2796 while (!it.done()) {
2797 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2798 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2799 CollectCommentStatistics(isolate, &it);
2800 prev_pc = it.rinfo()->pc();
2801 }
2802 it.next();
2803 }
2804
2805 DCHECK(code->instruction_start() <= prev_pc &&
2806 prev_pc <= code->instruction_end());
2807 delta += static_cast<int>(code->instruction_end() - prev_pc);
2808 EnterComment(isolate, "NoComment", delta);
2809 }
2810 }
2811}
2812
2813
2814void PagedSpace::ReportStatistics() {
2815 int pct = static_cast<int>(Available() * 100 / Capacity());
2816 PrintF(" capacity: %" V8_PTR_PREFIX
2817 "d"
2818 ", waste: %" V8_PTR_PREFIX
2819 "d"
2820 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2821 Capacity(), Waste(), Available(), pct);
2822
2823 if (heap()->mark_compact_collector()->sweeping_in_progress()) {
2824 heap()->mark_compact_collector()->EnsureSweepingCompleted();
2825 }
2826 ClearHistograms(heap()->isolate());
2827 HeapObjectIterator obj_it(this);
2828 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2829 CollectHistogramInfo(obj);
2830 ReportHistogram(heap()->isolate(), true);
2831}
2832#endif
2833
2834
2835// -----------------------------------------------------------------------------
2836// MapSpace implementation
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002837
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002838#ifdef VERIFY_HEAP
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002839void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002840#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002841
2842
2843// -----------------------------------------------------------------------------
2844// LargeObjectIterator
2845
2846LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2847 current_ = space->first_page_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002848}
2849
2850
2851HeapObject* LargeObjectIterator::Next() {
2852 if (current_ == NULL) return NULL;
2853
2854 HeapObject* object = current_->GetObject();
2855 current_ = current_->next_page();
2856 return object;
2857}
2858
2859
2860// -----------------------------------------------------------------------------
2861// LargeObjectSpace
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002862
2863
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002864LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002865 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002866 first_page_(NULL),
2867 size_(0),
2868 page_count_(0),
2869 objects_size_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002870 chunk_map_(HashMap::PointersMatch, 1024) {}
2871
2872
2873LargeObjectSpace::~LargeObjectSpace() {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002874
2875
2876bool LargeObjectSpace::SetUp() {
2877 first_page_ = NULL;
2878 size_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002879 page_count_ = 0;
2880 objects_size_ = 0;
2881 chunk_map_.Clear();
2882 return true;
2883}
2884
2885
2886void LargeObjectSpace::TearDown() {
2887 while (first_page_ != NULL) {
2888 LargePage* page = first_page_;
2889 first_page_ = first_page_->next_page();
2890 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2891
2892 ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2893 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2894 space, kAllocationActionFree, page->size());
2895 heap()->isolate()->memory_allocator()->Free(page);
2896 }
2897 SetUp();
2898}
2899
2900
2901AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
2902 Executability executable) {
2903 // Check if we want to force a GC before growing the old space further.
2904 // If so, fail the allocation.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002905 if (!heap()->CanExpandOldGeneration(object_size)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002906 return AllocationResult::Retry(identity());
2907 }
2908
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002909 LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage(
2910 object_size, this, executable);
2911 if (page == NULL) return AllocationResult::Retry(identity());
2912 DCHECK(page->area_size() >= object_size);
2913
2914 size_ += static_cast<int>(page->size());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002915 AccountCommitted(static_cast<intptr_t>(page->size()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002916 objects_size_ += object_size;
2917 page_count_++;
2918 page->set_next_page(first_page_);
2919 first_page_ = page;
2920
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002921 // Register all MemoryChunk::kAlignment-aligned chunks covered by
2922 // this large page in the chunk map.
2923 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2924 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2925 for (uintptr_t key = base; key <= limit; key++) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002926 HashMap::Entry* entry = chunk_map_.LookupOrInsert(
2927 reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002928 DCHECK(entry != NULL);
2929 entry->value = page;
2930 }
2931
2932 HeapObject* object = page->GetObject();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002933 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size);
2934
2935 if (Heap::ShouldZapGarbage()) {
2936 // Make the object consistent so the heap can be verified in OldSpaceStep.
2937 // We only need to do this in debug builds or if verify_heap is on.
2938 reinterpret_cast<Object**>(object->address())[0] =
2939 heap()->fixed_array_map();
2940 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
2941 }
2942
2943 heap()->incremental_marking()->OldSpaceStep(object_size);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002944 AllocationStep(object->address(), object_size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002945 return object;
2946}
2947
2948
2949size_t LargeObjectSpace::CommittedPhysicalMemory() {
2950 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
2951 size_t size = 0;
2952 LargePage* current = first_page_;
2953 while (current != NULL) {
2954 size += current->CommittedPhysicalMemory();
2955 current = current->next_page();
2956 }
2957 return size;
2958}
2959
2960
2961// GC support
2962Object* LargeObjectSpace::FindObject(Address a) {
2963 LargePage* page = FindPage(a);
2964 if (page != NULL) {
2965 return page->GetObject();
2966 }
2967 return Smi::FromInt(0); // Signaling not found.
2968}
2969
2970
2971LargePage* LargeObjectSpace::FindPage(Address a) {
2972 uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
2973 HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002974 static_cast<uint32_t>(key));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002975 if (e != NULL) {
2976 DCHECK(e->value != NULL);
2977 LargePage* page = reinterpret_cast<LargePage*>(e->value);
2978 DCHECK(page->is_valid());
2979 if (page->Contains(a)) {
2980 return page;
2981 }
2982 }
2983 return NULL;
2984}
2985
2986
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002987void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
2988 LargePage* current = first_page_;
2989 while (current != NULL) {
2990 HeapObject* object = current->GetObject();
2991 MarkBit mark_bit = Marking::MarkBitFrom(object);
2992 DCHECK(Marking::IsBlack(mark_bit));
2993 Marking::BlackToWhite(mark_bit);
2994 Page::FromAddress(object->address())->ResetProgressBar();
2995 Page::FromAddress(object->address())->ResetLiveBytes();
2996 current = current->next_page();
2997 }
2998}
2999
3000
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003001void LargeObjectSpace::FreeUnmarkedObjects() {
3002 LargePage* previous = NULL;
3003 LargePage* current = first_page_;
3004 while (current != NULL) {
3005 HeapObject* object = current->GetObject();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003006 MarkBit mark_bit = Marking::MarkBitFrom(object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003007 DCHECK(!Marking::IsGrey(mark_bit));
3008 if (Marking::IsBlack(mark_bit)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003009 previous = current;
3010 current = current->next_page();
3011 } else {
3012 LargePage* page = current;
3013 // Cut the chunk out from the chunk list.
3014 current = current->next_page();
3015 if (previous == NULL) {
3016 first_page_ = current;
3017 } else {
3018 previous->set_next_page(current);
3019 }
3020
3021 // Free the chunk.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003022 size_ -= static_cast<int>(page->size());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003023 AccountUncommitted(static_cast<intptr_t>(page->size()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003024 objects_size_ -= object->Size();
3025 page_count_--;
3026
3027 // Remove entries belonging to this page.
3028 // Use variable alignment to help pass length check (<= 80 characters)
3029 // of single line in tools/presubmit.py.
3030 const intptr_t alignment = MemoryChunk::kAlignment;
3031 uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment;
3032 uintptr_t limit = base + (page->size() - 1) / alignment;
3033 for (uintptr_t key = base; key <= limit; key++) {
3034 chunk_map_.Remove(reinterpret_cast<void*>(key),
3035 static_cast<uint32_t>(key));
3036 }
3037
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003038 heap()->QueueMemoryChunkForFree(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003039 }
3040 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003041}
3042
3043
3044bool LargeObjectSpace::Contains(HeapObject* object) {
3045 Address address = object->address();
3046 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3047
3048 bool owned = (chunk->owner() == this);
3049
3050 SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
3051
3052 return owned;
3053}
3054
3055
3056#ifdef VERIFY_HEAP
3057// We do not assume that the large object iterator works, because it depends
3058// on the invariants we are checking during verification.
3059void LargeObjectSpace::Verify() {
3060 for (LargePage* chunk = first_page_; chunk != NULL;
3061 chunk = chunk->next_page()) {
3062 // Each chunk contains an object that starts at the large object page's
3063 // object area start.
3064 HeapObject* object = chunk->GetObject();
3065 Page* page = Page::FromAddress(object->address());
3066 CHECK(object->address() == page->area_start());
3067
3068 // The first word should be a map, and we expect all map pointers to be
3069 // in map space.
3070 Map* map = object->map();
3071 CHECK(map->IsMap());
3072 CHECK(heap()->map_space()->Contains(map));
3073
3074 // We have only code, sequential strings, external strings
3075 // (sequential strings that have been morphed into external
3076 // strings), fixed arrays, byte arrays, and constant pool arrays in the
3077 // large object space.
Ben Murdochda12d292016-06-02 14:46:10 +01003078 CHECK(object->IsAbstractCode() || object->IsSeqString() ||
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003079 object->IsExternalString() || object->IsFixedArray() ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003080 object->IsFixedDoubleArray() || object->IsByteArray());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003081
3082 // The object itself should look OK.
3083 object->ObjectVerify();
3084
3085 // Byte arrays and strings don't have interior pointers.
Ben Murdochda12d292016-06-02 14:46:10 +01003086 if (object->IsAbstractCode()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003087 VerifyPointersVisitor code_visitor;
3088 object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
3089 } else if (object->IsFixedArray()) {
3090 FixedArray* array = FixedArray::cast(object);
3091 for (int j = 0; j < array->length(); j++) {
3092 Object* element = array->get(j);
3093 if (element->IsHeapObject()) {
3094 HeapObject* element_object = HeapObject::cast(element);
3095 CHECK(heap()->Contains(element_object));
3096 CHECK(element_object->map()->IsMap());
3097 }
3098 }
3099 }
3100 }
3101}
3102#endif
3103
3104
3105#ifdef DEBUG
3106void LargeObjectSpace::Print() {
3107 OFStream os(stdout);
3108 LargeObjectIterator it(this);
3109 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3110 obj->Print(os);
3111 }
3112}
3113
3114
3115void LargeObjectSpace::ReportStatistics() {
3116 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
3117 int num_objects = 0;
3118 ClearHistograms(heap()->isolate());
3119 LargeObjectIterator it(this);
3120 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3121 num_objects++;
3122 CollectHistogramInfo(obj);
3123 }
3124
3125 PrintF(
3126 " number of objects %d, "
3127 "size of objects %" V8_PTR_PREFIX "d\n",
3128 num_objects, objects_size_);
3129 if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
3130}
3131
3132
3133void LargeObjectSpace::CollectCodeStatistics() {
3134 Isolate* isolate = heap()->isolate();
3135 LargeObjectIterator obj_it(this);
3136 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
Ben Murdochda12d292016-06-02 14:46:10 +01003137 if (obj->IsAbstractCode()) {
3138 AbstractCode* code = AbstractCode::cast(obj);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003139 isolate->code_kind_statistics()[code->kind()] += code->Size();
3140 }
3141 }
3142}
3143
3144
3145void Page::Print() {
3146 // Make a best-effort to print the objects in the page.
3147 PrintF("Page@%p in %s\n", this->address(),
3148 AllocationSpaceName(this->owner()->identity()));
3149 printf(" --------------------------------------\n");
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003150 HeapObjectIterator objects(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003151 unsigned mark_size = 0;
3152 for (HeapObject* object = objects.Next(); object != NULL;
3153 object = objects.Next()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003154 bool is_marked = Marking::IsBlackOrGrey(Marking::MarkBitFrom(object));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003155 PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
3156 if (is_marked) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003157 mark_size += object->Size();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003158 }
3159 object->ShortPrint();
3160 PrintF("\n");
3161 }
3162 printf(" --------------------------------------\n");
3163 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3164}
3165
3166#endif // DEBUG
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003167} // namespace internal
3168} // namespace v8