blob: a0a37523b246a4755e1221d252b63a242a9a8c89 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2011 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005#include "src/heap/spaces.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006
7#include "src/base/bits.h"
8#include "src/base/platform/platform.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00009#include "src/full-codegen/full-codegen.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010010#include "src/heap/slot-set.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000011#include "src/macro-assembler.h"
12#include "src/msan.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000013#include "src/snapshot/snapshot.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000014
15namespace v8 {
16namespace internal {
17
18
19// ----------------------------------------------------------------------------
20// HeapObjectIterator
21
22HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
23 // You can't actually iterate over the anchor page. It is not a real page,
24 // just an anchor for the double linked page list. Initialize as if we have
25 // reached the end of the anchor page, then the first iteration will move on
26 // to the first page.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000027 Initialize(space, NULL, NULL, kAllPagesInSpace);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000028}
29
30
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000031HeapObjectIterator::HeapObjectIterator(Page* page) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000032 Space* owner = page->owner();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000033 DCHECK(owner == page->heap()->old_space() ||
Ben Murdochb8a8cc12014-11-26 15:28:44 +000034 owner == page->heap()->map_space() ||
Ben Murdochb8a8cc12014-11-26 15:28:44 +000035 owner == page->heap()->code_space());
36 Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000037 page->area_end(), kOnePageOnly);
Ben Murdoch097c5b22016-05-18 11:27:45 +010038 DCHECK(page->SweepingDone());
Ben Murdochb8a8cc12014-11-26 15:28:44 +000039}
40
41
42void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000043 HeapObjectIterator::PageMode mode) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000044 space_ = space;
45 cur_addr_ = cur;
46 cur_end_ = end;
47 page_mode_ = mode;
Ben Murdochb8a8cc12014-11-26 15:28:44 +000048}
49
50
51// We have hit the end of the page and should advance to the next block of
52// objects. This happens at the end of the page.
53bool HeapObjectIterator::AdvanceToNextPage() {
54 DCHECK(cur_addr_ == cur_end_);
55 if (page_mode_ == kOnePageOnly) return false;
56 Page* cur_page;
57 if (cur_addr_ == NULL) {
58 cur_page = space_->anchor();
59 } else {
60 cur_page = Page::FromAddress(cur_addr_ - 1);
61 DCHECK(cur_addr_ == cur_page->area_end());
62 }
63 cur_page = cur_page->next_page();
64 if (cur_page == space_->anchor()) return false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000065 cur_page->heap()->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted(
66 cur_page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000067 cur_addr_ = cur_page->area_start();
68 cur_end_ = cur_page->area_end();
Ben Murdoch097c5b22016-05-18 11:27:45 +010069 DCHECK(cur_page->SweepingDone());
Ben Murdochb8a8cc12014-11-26 15:28:44 +000070 return true;
71}
72
Ben Murdoch097c5b22016-05-18 11:27:45 +010073PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
74 : heap_(heap) {
75 AllSpaces spaces(heap_);
76 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
77 space->PauseAllocationObservers();
78 }
79}
80
81PauseAllocationObserversScope::~PauseAllocationObserversScope() {
82 AllSpaces spaces(heap_);
83 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
84 space->ResumeAllocationObservers();
85 }
86}
Ben Murdochb8a8cc12014-11-26 15:28:44 +000087
88// -----------------------------------------------------------------------------
89// CodeRange
90
91
92CodeRange::CodeRange(Isolate* isolate)
93 : isolate_(isolate),
94 code_range_(NULL),
95 free_list_(0),
96 allocation_list_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000097 current_allocation_block_index_(0) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +000098
99
100bool CodeRange::SetUp(size_t requested) {
101 DCHECK(code_range_ == NULL);
102
103 if (requested == 0) {
104 // When a target requires the code range feature, we put all code objects
105 // in a kMaximalCodeRangeSize range of virtual address space, so that
106 // they can call each other with near calls.
107 if (kRequiresCodeRange) {
108 requested = kMaximalCodeRangeSize;
109 } else {
110 return true;
111 }
112 }
113
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400114 if (requested <= kMinimumCodeRangeSize) {
115 requested = kMinimumCodeRangeSize;
116 }
117
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000118 DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000119#ifdef V8_TARGET_ARCH_MIPS64
120 // To use pseudo-relative jumps such as j/jal instructions which have 28-bit
121 // encoded immediate, the addresses have to be in range of 256Mb aligned
122 // region.
123 code_range_ = new base::VirtualMemory(requested, kMaximalCodeRangeSize);
124#else
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000125 code_range_ = new base::VirtualMemory(requested);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000126#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000127 CHECK(code_range_ != NULL);
128 if (!code_range_->IsReserved()) {
129 delete code_range_;
130 code_range_ = NULL;
131 return false;
132 }
133
134 // We are sure that we have mapped a block of requested addresses.
135 DCHECK(code_range_->size() == requested);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000136 Address base = reinterpret_cast<Address>(code_range_->address());
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400137
138 // On some platforms, specifically Win64, we need to reserve some pages at
139 // the beginning of an executable space.
140 if (kReservedCodeRangePages) {
141 if (!code_range_->Commit(
142 base, kReservedCodeRangePages * base::OS::CommitPageSize(), true)) {
143 delete code_range_;
144 code_range_ = NULL;
145 return false;
146 }
147 base += kReservedCodeRangePages * base::OS::CommitPageSize();
148 }
149 Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
150 size_t size = code_range_->size() - (aligned_base - base) -
151 kReservedCodeRangePages * base::OS::CommitPageSize();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000152 allocation_list_.Add(FreeBlock(aligned_base, size));
153 current_allocation_block_index_ = 0;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400154
155 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000156 return true;
157}
158
159
160int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
161 const FreeBlock* right) {
162 // The entire point of CodeRange is that the difference between two
163 // addresses in the range can be represented as a signed 32-bit int,
164 // so the cast is semantically correct.
165 return static_cast<int>(left->start - right->start);
166}
167
168
169bool CodeRange::GetNextAllocationBlock(size_t requested) {
170 for (current_allocation_block_index_++;
171 current_allocation_block_index_ < allocation_list_.length();
172 current_allocation_block_index_++) {
173 if (requested <= allocation_list_[current_allocation_block_index_].size) {
174 return true; // Found a large enough allocation block.
175 }
176 }
177
178 // Sort and merge the free blocks on the free list and the allocation list.
179 free_list_.AddAll(allocation_list_);
180 allocation_list_.Clear();
181 free_list_.Sort(&CompareFreeBlockAddress);
182 for (int i = 0; i < free_list_.length();) {
183 FreeBlock merged = free_list_[i];
184 i++;
185 // Add adjacent free blocks to the current merged block.
186 while (i < free_list_.length() &&
187 free_list_[i].start == merged.start + merged.size) {
188 merged.size += free_list_[i].size;
189 i++;
190 }
191 if (merged.size > 0) {
192 allocation_list_.Add(merged);
193 }
194 }
195 free_list_.Clear();
196
197 for (current_allocation_block_index_ = 0;
198 current_allocation_block_index_ < allocation_list_.length();
199 current_allocation_block_index_++) {
200 if (requested <= allocation_list_[current_allocation_block_index_].size) {
201 return true; // Found a large enough allocation block.
202 }
203 }
204 current_allocation_block_index_ = 0;
205 // Code range is full or too fragmented.
206 return false;
207}
208
209
210Address CodeRange::AllocateRawMemory(const size_t requested_size,
211 const size_t commit_size,
212 size_t* allocated) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000213 // request_size includes guards while committed_size does not. Make sure
214 // callers know about the invariant.
215 CHECK_LE(commit_size,
216 requested_size - 2 * MemoryAllocator::CodePageGuardSize());
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400217 FreeBlock current;
218 if (!ReserveBlock(requested_size, &current)) {
219 *allocated = 0;
220 return NULL;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000221 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400222 *allocated = current.size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000223 DCHECK(*allocated <= current.size);
224 DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
225 if (!isolate_->memory_allocator()->CommitExecutableMemory(
226 code_range_, current.start, commit_size, *allocated)) {
227 *allocated = 0;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400228 ReleaseBlock(&current);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000229 return NULL;
230 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000231 return current.start;
232}
233
234
235bool CodeRange::CommitRawMemory(Address start, size_t length) {
236 return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
237}
238
239
240bool CodeRange::UncommitRawMemory(Address start, size_t length) {
241 return code_range_->Uncommit(start, length);
242}
243
244
245void CodeRange::FreeRawMemory(Address address, size_t length) {
246 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000247 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000248 free_list_.Add(FreeBlock(address, length));
249 code_range_->Uncommit(address, length);
250}
251
252
253void CodeRange::TearDown() {
254 delete code_range_; // Frees all memory in the virtual memory range.
255 code_range_ = NULL;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000256 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000257 free_list_.Free();
258 allocation_list_.Free();
259}
260
261
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400262bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000263 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400264 DCHECK(allocation_list_.length() == 0 ||
265 current_allocation_block_index_ < allocation_list_.length());
266 if (allocation_list_.length() == 0 ||
267 requested_size > allocation_list_[current_allocation_block_index_].size) {
268 // Find an allocation block large enough.
269 if (!GetNextAllocationBlock(requested_size)) return false;
270 }
271 // Commit the requested memory at the start of the current allocation block.
272 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
273 *block = allocation_list_[current_allocation_block_index_];
274 // Don't leave a small free block, useless for a large object or chunk.
275 if (aligned_requested < (block->size - Page::kPageSize)) {
276 block->size = aligned_requested;
277 }
278 DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
279 allocation_list_[current_allocation_block_index_].start += block->size;
280 allocation_list_[current_allocation_block_index_].size -= block->size;
281 return true;
282}
283
284
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000285void CodeRange::ReleaseBlock(const FreeBlock* block) {
286 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
287 free_list_.Add(*block);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400288}
289
290
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000291// -----------------------------------------------------------------------------
292// MemoryAllocator
293//
294
295MemoryAllocator::MemoryAllocator(Isolate* isolate)
296 : isolate_(isolate),
297 capacity_(0),
298 capacity_executable_(0),
299 size_(0),
300 size_executable_(0),
301 lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
302 highest_ever_allocated_(reinterpret_cast<void*>(0)) {}
303
304
305bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
306 capacity_ = RoundUp(capacity, Page::kPageSize);
307 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
308 DCHECK_GE(capacity_, capacity_executable_);
309
310 size_ = 0;
311 size_executable_ = 0;
312
313 return true;
314}
315
316
317void MemoryAllocator::TearDown() {
Ben Murdochda12d292016-06-02 14:46:10 +0100318 for (MemoryChunk* chunk : chunk_pool_) {
319 FreeMemory(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize,
320 NOT_EXECUTABLE);
321 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000322 // Check that spaces were torn down before MemoryAllocator.
Ben Murdochda12d292016-06-02 14:46:10 +0100323 DCHECK_EQ(size_.Value(), 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000324 // TODO(gc) this will be true again when we fix FreeMemory.
325 // DCHECK(size_executable_ == 0);
326 capacity_ = 0;
327 capacity_executable_ = 0;
328}
329
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000330bool MemoryAllocator::CommitMemory(Address base, size_t size,
331 Executability executable) {
332 if (!base::VirtualMemory::CommitRegion(base, size,
333 executable == EXECUTABLE)) {
334 return false;
335 }
336 UpdateAllocatedSpaceLimits(base, base + size);
337 return true;
338}
339
340
341void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
342 Executability executable) {
343 // TODO(gc) make code_range part of memory allocator?
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000344 // Code which is part of the code-range does not have its own VirtualMemory.
345 DCHECK(isolate_->code_range() == NULL ||
346 !isolate_->code_range()->contains(
347 static_cast<Address>(reservation->address())));
348 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000349 !isolate_->code_range()->valid() ||
350 reservation->size() <= Page::kPageSize);
351
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000352 reservation->Release();
353}
354
355
356void MemoryAllocator::FreeMemory(Address base, size_t size,
357 Executability executable) {
358 // TODO(gc) make code_range part of memory allocator?
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000359 if (isolate_->code_range() != NULL &&
360 isolate_->code_range()->contains(static_cast<Address>(base))) {
361 DCHECK(executable == EXECUTABLE);
362 isolate_->code_range()->FreeRawMemory(base, size);
363 } else {
364 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
365 !isolate_->code_range()->valid());
366 bool result = base::VirtualMemory::ReleaseRegion(base, size);
367 USE(result);
368 DCHECK(result);
369 }
370}
371
372
373Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
374 base::VirtualMemory* controller) {
375 base::VirtualMemory reservation(size, alignment);
376
377 if (!reservation.IsReserved()) return NULL;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000378 size_.Increment(static_cast<intptr_t>(reservation.size()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000379 Address base =
380 RoundUp(static_cast<Address>(reservation.address()), alignment);
381 controller->TakeControl(&reservation);
382 return base;
383}
384
385
386Address MemoryAllocator::AllocateAlignedMemory(
387 size_t reserve_size, size_t commit_size, size_t alignment,
388 Executability executable, base::VirtualMemory* controller) {
389 DCHECK(commit_size <= reserve_size);
390 base::VirtualMemory reservation;
391 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
392 if (base == NULL) return NULL;
393
394 if (executable == EXECUTABLE) {
395 if (!CommitExecutableMemory(&reservation, base, commit_size,
396 reserve_size)) {
397 base = NULL;
398 }
399 } else {
400 if (reservation.Commit(base, commit_size, false)) {
401 UpdateAllocatedSpaceLimits(base, base + commit_size);
402 } else {
403 base = NULL;
404 }
405 }
406
407 if (base == NULL) {
408 // Failed to commit the body. Release the mapping and any partially
409 // commited regions inside it.
410 reservation.Release();
411 return NULL;
412 }
413
414 controller->TakeControl(&reservation);
415 return base;
416}
417
418
419void Page::InitializeAsAnchor(PagedSpace* owner) {
420 set_owner(owner);
421 set_prev_page(this);
422 set_next_page(this);
423}
424
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000425void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
426 set_owner(semi_space);
427 set_next_chunk(this);
428 set_prev_chunk(this);
429 // Flags marks this invalid page as not being in new-space.
430 // All real new-space pages will be in new-space.
431 SetFlags(0, ~0);
432}
433
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000434MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
435 Address area_start, Address area_end,
Ben Murdoch097c5b22016-05-18 11:27:45 +0100436 Executability executable, Space* owner,
437 base::VirtualMemory* reservation) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000438 MemoryChunk* chunk = FromAddress(base);
439
440 DCHECK(base == chunk->address());
441
442 chunk->heap_ = heap;
443 chunk->size_ = size;
444 chunk->area_start_ = area_start;
445 chunk->area_end_ = area_end;
446 chunk->flags_ = 0;
447 chunk->set_owner(owner);
448 chunk->InitializeReservedMemory();
Ben Murdoch097c5b22016-05-18 11:27:45 +0100449 chunk->old_to_new_slots_ = nullptr;
450 chunk->old_to_old_slots_ = nullptr;
Ben Murdochda12d292016-06-02 14:46:10 +0100451 chunk->typed_old_to_old_slots_ = nullptr;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100452 chunk->skip_list_ = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000453 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
454 chunk->progress_bar_ = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000455 chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
Ben Murdoch097c5b22016-05-18 11:27:45 +0100456 chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100457 chunk->mutex_ = nullptr;
458 chunk->available_in_free_list_ = 0;
459 chunk->wasted_memory_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000460 chunk->ResetLiveBytes();
461 Bitmap::Clear(chunk);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000462 chunk->set_next_chunk(nullptr);
463 chunk->set_prev_chunk(nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000464
465 DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
466 DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
467
468 if (executable == EXECUTABLE) {
469 chunk->SetFlag(IS_EXECUTABLE);
470 }
471
Ben Murdoch097c5b22016-05-18 11:27:45 +0100472 if (reservation != nullptr) {
473 chunk->reservation_.TakeControl(reservation);
474 }
475
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000476 return chunk;
477}
478
479
480// Commit MemoryChunk area to the requested size.
481bool MemoryChunk::CommitArea(size_t requested) {
482 size_t guard_size =
483 IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
484 size_t header_size = area_start() - address() - guard_size;
485 size_t commit_size =
486 RoundUp(header_size + requested, base::OS::CommitPageSize());
487 size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
488 base::OS::CommitPageSize());
489
490 if (commit_size > committed_size) {
491 // Commit size should be less or equal than the reserved size.
492 DCHECK(commit_size <= size() - 2 * guard_size);
493 // Append the committed area.
494 Address start = address() + committed_size + guard_size;
495 size_t length = commit_size - committed_size;
496 if (reservation_.IsReserved()) {
497 Executability executable =
498 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
499 if (!heap()->isolate()->memory_allocator()->CommitMemory(start, length,
500 executable)) {
501 return false;
502 }
503 } else {
504 CodeRange* code_range = heap_->isolate()->code_range();
505 DCHECK(code_range != NULL && code_range->valid() &&
506 IsFlagSet(IS_EXECUTABLE));
507 if (!code_range->CommitRawMemory(start, length)) return false;
508 }
509
510 if (Heap::ShouldZapGarbage()) {
511 heap_->isolate()->memory_allocator()->ZapBlock(start, length);
512 }
513 } else if (commit_size < committed_size) {
514 DCHECK(commit_size > 0);
515 // Shrink the committed area.
516 size_t length = committed_size - commit_size;
517 Address start = address() + committed_size + guard_size - length;
518 if (reservation_.IsReserved()) {
519 if (!reservation_.Uncommit(start, length)) return false;
520 } else {
521 CodeRange* code_range = heap_->isolate()->code_range();
522 DCHECK(code_range != NULL && code_range->valid() &&
523 IsFlagSet(IS_EXECUTABLE));
524 if (!code_range->UncommitRawMemory(start, length)) return false;
525 }
526 }
527
528 area_end_ = area_start_ + requested;
529 return true;
530}
531
532
533void MemoryChunk::InsertAfter(MemoryChunk* other) {
534 MemoryChunk* other_next = other->next_chunk();
535
536 set_next_chunk(other_next);
537 set_prev_chunk(other);
538 other_next->set_prev_chunk(this);
539 other->set_next_chunk(this);
540}
541
542
543void MemoryChunk::Unlink() {
544 MemoryChunk* next_element = next_chunk();
545 MemoryChunk* prev_element = prev_chunk();
546 next_element->set_prev_chunk(prev_element);
547 prev_element->set_next_chunk(next_element);
548 set_prev_chunk(NULL);
549 set_next_chunk(NULL);
550}
551
552
553MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
554 intptr_t commit_area_size,
555 Executability executable,
556 Space* owner) {
557 DCHECK(commit_area_size <= reserve_area_size);
558
559 size_t chunk_size;
560 Heap* heap = isolate_->heap();
561 Address base = NULL;
562 base::VirtualMemory reservation;
563 Address area_start = NULL;
564 Address area_end = NULL;
565
566 //
567 // MemoryChunk layout:
568 //
569 // Executable
570 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
571 // | Header |
572 // +----------------------------+<- base + CodePageGuardStartOffset
573 // | Guard |
574 // +----------------------------+<- area_start_
575 // | Area |
576 // +----------------------------+<- area_end_ (area_start + commit_area_size)
577 // | Committed but not used |
578 // +----------------------------+<- aligned at OS page boundary
579 // | Reserved but not committed |
580 // +----------------------------+<- aligned at OS page boundary
581 // | Guard |
582 // +----------------------------+<- base + chunk_size
583 //
584 // Non-executable
585 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
586 // | Header |
587 // +----------------------------+<- area_start_ (base + kObjectStartOffset)
588 // | Area |
589 // +----------------------------+<- area_end_ (area_start + commit_area_size)
590 // | Committed but not used |
591 // +----------------------------+<- aligned at OS page boundary
592 // | Reserved but not committed |
593 // +----------------------------+<- base + chunk_size
594 //
595
596 if (executable == EXECUTABLE) {
597 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
598 base::OS::CommitPageSize()) +
599 CodePageGuardSize();
600
601 // Check executable memory limit.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000602 if ((size_executable_.Value() + static_cast<intptr_t>(chunk_size)) >
603 capacity_executable_) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000604 LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
605 "V8 Executable Allocation capacity exceeded"));
606 return NULL;
607 }
608
609 // Size of header (not executable) plus area (executable).
610 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
611 base::OS::CommitPageSize());
612 // Allocate executable memory either from code range or from the
613 // OS.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000614#ifdef V8_TARGET_ARCH_MIPS64
615 // Use code range only for large object space on mips64 to keep address
616 // range within 256-MB memory region.
617 if (isolate_->code_range() != NULL && isolate_->code_range()->valid() &&
618 reserve_area_size > CodePageAreaSize()) {
619#else
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000620 if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000621#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000622 base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size,
623 &chunk_size);
624 DCHECK(
625 IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
626 if (base == NULL) return NULL;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000627 size_.Increment(static_cast<intptr_t>(chunk_size));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000628 // Update executable memory size.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000629 size_executable_.Increment(static_cast<intptr_t>(chunk_size));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000630 } else {
631 base = AllocateAlignedMemory(chunk_size, commit_size,
632 MemoryChunk::kAlignment, executable,
633 &reservation);
634 if (base == NULL) return NULL;
635 // Update executable memory size.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000636 size_executable_.Increment(static_cast<intptr_t>(reservation.size()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000637 }
638
639 if (Heap::ShouldZapGarbage()) {
640 ZapBlock(base, CodePageGuardStartOffset());
641 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
642 }
643
644 area_start = base + CodePageAreaStartOffset();
645 area_end = area_start + commit_area_size;
646 } else {
647 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
648 base::OS::CommitPageSize());
649 size_t commit_size =
650 RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
651 base::OS::CommitPageSize());
652 base =
653 AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
654 executable, &reservation);
655
656 if (base == NULL) return NULL;
657
658 if (Heap::ShouldZapGarbage()) {
659 ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
660 }
661
662 area_start = base + Page::kObjectStartOffset;
663 area_end = area_start + commit_area_size;
664 }
665
666 // Use chunk_size for statistics and callbacks because we assume that they
667 // treat reserved but not-yet committed memory regions of chunks as allocated.
668 isolate_->counters()->memory_allocated()->Increment(
669 static_cast<int>(chunk_size));
670
671 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
672 if (owner != NULL) {
673 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
674 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
675 }
676
Ben Murdoch097c5b22016-05-18 11:27:45 +0100677 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
678 executable, owner, &reservation);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000679}
680
681
682void Page::ResetFreeListStatistics() {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100683 wasted_memory_ = 0;
684 available_in_free_list_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000685}
686
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000687LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
688 Space* owner,
689 Executability executable) {
690 MemoryChunk* chunk =
691 AllocateChunk(object_size, object_size, executable, owner);
692 if (chunk == NULL) return NULL;
Ben Murdochda12d292016-06-02 14:46:10 +0100693 if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
694 STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
695 FATAL("Code page is too large.");
696 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000697 return LargePage::Initialize(isolate_->heap(), chunk);
698}
699
700
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000701void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
702 DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000703 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
704 if (chunk->owner() != NULL) {
705 ObjectSpace space =
706 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
707 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
708 }
709
710 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
711 chunk->IsEvacuationCandidate());
712
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000713 intptr_t size;
714 base::VirtualMemory* reservation = chunk->reserved_memory();
715 if (reservation->IsReserved()) {
716 size = static_cast<intptr_t>(reservation->size());
717 } else {
718 size = static_cast<intptr_t>(chunk->size());
719 }
720 DCHECK(size_.Value() >= size);
721 size_.Increment(-size);
722 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
723
724 if (chunk->executable() == EXECUTABLE) {
725 DCHECK(size_executable_.Value() >= size);
726 size_executable_.Increment(-size);
727 }
728
729 chunk->SetFlag(MemoryChunk::PRE_FREED);
730}
731
732
733void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
734 DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
735 chunk->ReleaseAllocatedMemory();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000736
737 base::VirtualMemory* reservation = chunk->reserved_memory();
738 if (reservation->IsReserved()) {
739 FreeMemory(reservation, chunk->executable());
740 } else {
741 FreeMemory(chunk->address(), chunk->size(), chunk->executable());
742 }
743}
744
Ben Murdochda12d292016-06-02 14:46:10 +0100745template <MemoryAllocator::AllocationMode mode>
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000746void MemoryAllocator::Free(MemoryChunk* chunk) {
Ben Murdochda12d292016-06-02 14:46:10 +0100747 if (mode == kRegular) {
748 PreFreeMemory(chunk);
749 PerformFreeMemory(chunk);
750 } else {
751 DCHECK_EQ(mode, kPooled);
752 FreePooled(chunk);
753 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000754}
755
Ben Murdochda12d292016-06-02 14:46:10 +0100756template void MemoryAllocator::Free<MemoryAllocator::kRegular>(
757 MemoryChunk* chunk);
758
759template void MemoryAllocator::Free<MemoryAllocator::kPooled>(
760 MemoryChunk* chunk);
761
762template <typename PageType, MemoryAllocator::AllocationMode mode,
763 typename SpaceType>
764PageType* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
765 Executability executable) {
766 MemoryChunk* chunk = nullptr;
767 if (mode == kPooled) {
768 DCHECK_EQ(size, static_cast<intptr_t>(MemoryChunk::kAllocatableMemory));
769 DCHECK_EQ(executable, NOT_EXECUTABLE);
770 chunk = AllocatePagePooled(owner);
771 }
772 if (chunk == nullptr) {
773 chunk = AllocateChunk(size, size, executable, owner);
774 }
775 if (chunk == nullptr) return nullptr;
776 return PageType::Initialize(isolate_->heap(), chunk, executable, owner);
777}
778
779template Page* MemoryAllocator::AllocatePage<Page, MemoryAllocator::kRegular,
780 PagedSpace>(intptr_t, PagedSpace*,
781 Executability);
782
783template NewSpacePage* MemoryAllocator::AllocatePage<
784 NewSpacePage, MemoryAllocator::kPooled, SemiSpace>(intptr_t, SemiSpace*,
785 Executability);
786
787template <typename SpaceType>
788MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
789 if (chunk_pool_.is_empty()) return nullptr;
790 const int size = MemoryChunk::kPageSize;
791 MemoryChunk* chunk = chunk_pool_.RemoveLast();
792 const Address start = reinterpret_cast<Address>(chunk);
793 const Address area_start = start + MemoryChunk::kObjectStartOffset;
794 const Address area_end = start + size;
Ben Murdoch6f5d0ea2016-06-15 14:45:46 +0100795 if (!CommitBlock(reinterpret_cast<Address>(chunk), size, NOT_EXECUTABLE)) {
796 return nullptr;
797 }
Ben Murdochda12d292016-06-02 14:46:10 +0100798 base::VirtualMemory reservation(start, size);
799 MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
800 NOT_EXECUTABLE, owner, &reservation);
801 size_.Increment(size);
802 return chunk;
803}
804
805void MemoryAllocator::FreePooled(MemoryChunk* chunk) {
806 DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
807 DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
808 chunk_pool_.Add(chunk);
809 intptr_t chunk_size = static_cast<intptr_t>(chunk->size());
810 if (chunk->executable() == EXECUTABLE) {
811 size_executable_.Increment(-chunk_size);
812 }
813 size_.Increment(-chunk_size);
814 UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
815}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000816
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000817bool MemoryAllocator::CommitBlock(Address start, size_t size,
818 Executability executable) {
819 if (!CommitMemory(start, size, executable)) return false;
820
821 if (Heap::ShouldZapGarbage()) {
822 ZapBlock(start, size);
823 }
824
825 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
826 return true;
827}
828
829
830bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
831 if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
832 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
833 return true;
834}
835
836
837void MemoryAllocator::ZapBlock(Address start, size_t size) {
838 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
839 Memory::Address_at(start + s) = kZapValue;
840 }
841}
842
843
844void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
845 AllocationAction action,
846 size_t size) {
847 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
848 MemoryAllocationCallbackRegistration registration =
849 memory_allocation_callbacks_[i];
850 if ((registration.space & space) == space &&
851 (registration.action & action) == action)
852 registration.callback(space, action, static_cast<int>(size));
853 }
854}
855
856
857bool MemoryAllocator::MemoryAllocationCallbackRegistered(
858 MemoryAllocationCallback callback) {
859 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
860 if (memory_allocation_callbacks_[i].callback == callback) return true;
861 }
862 return false;
863}
864
865
866void MemoryAllocator::AddMemoryAllocationCallback(
867 MemoryAllocationCallback callback, ObjectSpace space,
868 AllocationAction action) {
869 DCHECK(callback != NULL);
870 MemoryAllocationCallbackRegistration registration(callback, space, action);
871 DCHECK(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
872 return memory_allocation_callbacks_.Add(registration);
873}
874
875
876void MemoryAllocator::RemoveMemoryAllocationCallback(
877 MemoryAllocationCallback callback) {
878 DCHECK(callback != NULL);
879 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
880 if (memory_allocation_callbacks_[i].callback == callback) {
881 memory_allocation_callbacks_.Remove(i);
882 return;
883 }
884 }
885 UNREACHABLE();
886}
887
888
889#ifdef DEBUG
890void MemoryAllocator::ReportStatistics() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000891 intptr_t size = Size();
892 float pct = static_cast<float>(capacity_ - size) / capacity_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000893 PrintF(" capacity: %" V8_PTR_PREFIX
894 "d"
895 ", used: %" V8_PTR_PREFIX
896 "d"
897 ", available: %%%d\n\n",
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000898 capacity_, size, static_cast<int>(pct * 100));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000899}
900#endif
901
902
903int MemoryAllocator::CodePageGuardStartOffset() {
904 // We are guarding code pages: the first OS page after the header
905 // will be protected as non-writable.
906 return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize());
907}
908
909
910int MemoryAllocator::CodePageGuardSize() {
911 return static_cast<int>(base::OS::CommitPageSize());
912}
913
914
915int MemoryAllocator::CodePageAreaStartOffset() {
916 // We are guarding code pages: the first OS page after the header
917 // will be protected as non-writable.
918 return CodePageGuardStartOffset() + CodePageGuardSize();
919}
920
921
922int MemoryAllocator::CodePageAreaEndOffset() {
923 // We are guarding code pages: the last OS page will be protected as
924 // non-writable.
925 return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize());
926}
927
928
929bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
930 Address start, size_t commit_size,
931 size_t reserved_size) {
932 // Commit page header (not executable).
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000933 Address header = start;
934 size_t header_size = CodePageGuardStartOffset();
935 if (vm->Commit(header, header_size, false)) {
936 // Create guard page after the header.
937 if (vm->Guard(start + CodePageGuardStartOffset())) {
938 // Commit page body (executable).
939 Address body = start + CodePageAreaStartOffset();
940 size_t body_size = commit_size - CodePageGuardStartOffset();
941 if (vm->Commit(body, body_size, true)) {
942 // Create guard page before the end.
943 if (vm->Guard(start + reserved_size - CodePageGuardSize())) {
944 UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
945 commit_size -
946 CodePageGuardStartOffset());
947 return true;
948 }
949 vm->Uncommit(body, body_size);
950 }
951 }
952 vm->Uncommit(header, header_size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000953 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000954 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000955}
956
957
958// -----------------------------------------------------------------------------
959// MemoryChunk implementation
960
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000961void MemoryChunk::ReleaseAllocatedMemory() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000962 delete skip_list_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100963 skip_list_ = nullptr;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000964 delete mutex_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100965 mutex_ = nullptr;
966 ReleaseOldToNewSlots();
967 ReleaseOldToOldSlots();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000968}
969
Ben Murdoch097c5b22016-05-18 11:27:45 +0100970static SlotSet* AllocateSlotSet(size_t size, Address page_start) {
971 size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
972 DCHECK(pages > 0);
973 SlotSet* slot_set = new SlotSet[pages];
974 for (size_t i = 0; i < pages; i++) {
975 slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
976 }
977 return slot_set;
978}
979
980void MemoryChunk::AllocateOldToNewSlots() {
981 DCHECK(nullptr == old_to_new_slots_);
982 old_to_new_slots_ = AllocateSlotSet(size_, address());
983}
984
985void MemoryChunk::ReleaseOldToNewSlots() {
986 delete[] old_to_new_slots_;
987 old_to_new_slots_ = nullptr;
988}
989
990void MemoryChunk::AllocateOldToOldSlots() {
991 DCHECK(nullptr == old_to_old_slots_);
992 old_to_old_slots_ = AllocateSlotSet(size_, address());
993}
994
995void MemoryChunk::ReleaseOldToOldSlots() {
996 delete[] old_to_old_slots_;
997 old_to_old_slots_ = nullptr;
998}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000999
Ben Murdochda12d292016-06-02 14:46:10 +01001000void MemoryChunk::AllocateTypedOldToOldSlots() {
1001 DCHECK(nullptr == typed_old_to_old_slots_);
1002 typed_old_to_old_slots_ = new TypedSlotSet(address());
1003}
1004
1005void MemoryChunk::ReleaseTypedOldToOldSlots() {
1006 delete typed_old_to_old_slots_;
1007 typed_old_to_old_slots_ = nullptr;
1008}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001009// -----------------------------------------------------------------------------
1010// PagedSpace implementation
1011
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001012STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) ==
1013 ObjectSpace::kObjectSpaceNewSpace);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001014STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_SPACE) ==
1015 ObjectSpace::kObjectSpaceOldSpace);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001016STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
1017 ObjectSpace::kObjectSpaceCodeSpace);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001018STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
1019 ObjectSpace::kObjectSpaceMapSpace);
1020
Ben Murdoch097c5b22016-05-18 11:27:45 +01001021void Space::AllocationStep(Address soon_object, int size) {
1022 if (!allocation_observers_paused_) {
1023 for (int i = 0; i < allocation_observers_->length(); ++i) {
1024 AllocationObserver* o = (*allocation_observers_)[i];
1025 o->AllocationStep(size, soon_object, size);
1026 }
1027 }
1028}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001029
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001030PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001031 Executability executable)
Ben Murdoch097c5b22016-05-18 11:27:45 +01001032 : Space(heap, space, executable), free_list_(this) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001033 area_size_ = MemoryAllocator::PageAreaSize(space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001034 accounting_stats_.Clear();
1035
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001036 allocation_info_.Reset(nullptr, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001037
1038 anchor_.InitializeAsAnchor(this);
1039}
1040
1041
1042bool PagedSpace::SetUp() { return true; }
1043
1044
1045bool PagedSpace::HasBeenSetUp() { return true; }
1046
1047
1048void PagedSpace::TearDown() {
1049 PageIterator iterator(this);
1050 while (iterator.has_next()) {
1051 heap()->isolate()->memory_allocator()->Free(iterator.next());
1052 }
1053 anchor_.set_next_page(&anchor_);
1054 anchor_.set_prev_page(&anchor_);
1055 accounting_stats_.Clear();
1056}
1057
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001058void PagedSpace::RefillFreeList() {
Ben Murdochda12d292016-06-02 14:46:10 +01001059 // Any PagedSpace might invoke RefillFreeList. We filter all but our old
1060 // generation spaces out.
1061 if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
1062 identity() != MAP_SPACE) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001063 return;
1064 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001065 MarkCompactCollector* collector = heap()->mark_compact_collector();
Ben Murdochda12d292016-06-02 14:46:10 +01001066 List<Page*>* swept_pages = collector->swept_pages(identity());
1067 intptr_t added = 0;
1068 {
1069 base::LockGuard<base::Mutex> guard(collector->swept_pages_mutex());
1070 for (int i = swept_pages->length() - 1; i >= 0; --i) {
1071 Page* p = (*swept_pages)[i];
1072 // Only during compaction pages can actually change ownership. This is
1073 // safe because there exists no other competing action on the page links
1074 // during compaction.
1075 if (is_local() && (p->owner() != this)) {
1076 if (added > kCompactionMemoryWanted) break;
1077 base::LockGuard<base::Mutex> guard(
1078 reinterpret_cast<PagedSpace*>(p->owner())->mutex());
1079 p->Unlink();
1080 p->set_owner(this);
1081 p->InsertAfter(anchor_.prev_page());
1082 }
1083 added += RelinkFreeListCategories(p);
1084 added += p->wasted_memory();
1085 swept_pages->Remove(i);
1086 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001087 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001088 accounting_stats_.IncreaseCapacity(added);
1089}
1090
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001091void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
Ben Murdochda12d292016-06-02 14:46:10 +01001092 DCHECK(identity() == other->identity());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001093 // Unmerged fields:
1094 // area_size_
1095 // anchor_
1096
Ben Murdochda12d292016-06-02 14:46:10 +01001097 other->EmptyAllocationInfo();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001098
1099 // Update and clear accounting statistics.
1100 accounting_stats_.Merge(other->accounting_stats_);
1101 other->accounting_stats_.Clear();
1102
1103 // The linear allocation area of {other} should be destroyed now.
1104 DCHECK(other->top() == nullptr);
1105 DCHECK(other->limit() == nullptr);
1106
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001107 AccountCommitted(other->CommittedMemory());
1108
1109 // Move over pages.
1110 PageIterator it(other);
1111 Page* p = nullptr;
1112 while (it.has_next()) {
1113 p = it.next();
Ben Murdochda12d292016-06-02 14:46:10 +01001114
1115 // Relinking requires the category to be unlinked.
1116 other->UnlinkFreeListCategories(p);
1117
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001118 p->Unlink();
1119 p->set_owner(this);
1120 p->InsertAfter(anchor_.prev_page());
Ben Murdochda12d292016-06-02 14:46:10 +01001121 RelinkFreeListCategories(p);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001122 }
1123}
1124
1125
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001126size_t PagedSpace::CommittedPhysicalMemory() {
1127 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
1128 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1129 size_t size = 0;
1130 PageIterator it(this);
1131 while (it.has_next()) {
1132 size += it.next()->CommittedPhysicalMemory();
1133 }
1134 return size;
1135}
1136
Ben Murdoch097c5b22016-05-18 11:27:45 +01001137bool PagedSpace::ContainsSlow(Address addr) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001138 Page* p = Page::FromAddress(addr);
1139 PageIterator iterator(this);
1140 while (iterator.has_next()) {
1141 if (iterator.next() == p) return true;
1142 }
1143 return false;
1144}
1145
1146
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001147Object* PagedSpace::FindObject(Address addr) {
1148 // Note: this function can only be called on iterable spaces.
1149 DCHECK(!heap()->mark_compact_collector()->in_use());
1150
1151 if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found.
1152
1153 Page* p = Page::FromAddress(addr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001154 HeapObjectIterator it(p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001155 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
1156 Address cur = obj->address();
1157 Address next = cur + obj->Size();
1158 if ((cur <= addr) && (addr < next)) return obj;
1159 }
1160
1161 UNREACHABLE();
1162 return Smi::FromInt(0);
1163}
1164
1165
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001166bool PagedSpace::CanExpand(size_t size) {
1167 DCHECK(heap()->mark_compact_collector()->is_compacting() ||
1168 Capacity() <= heap()->MaxOldGenerationSize());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001169
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001170 // Are we going to exceed capacity for this space? At this point we can be
1171 // way over the maximum size because of AlwaysAllocate scopes and large
1172 // objects.
1173 if (!heap()->CanExpandOldGeneration(static_cast<int>(size))) return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001174
1175 return true;
1176}
1177
1178
1179bool PagedSpace::Expand() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001180 intptr_t size = AreaSize();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001181 if (snapshotable() && !HasPages()) {
1182 size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001183 }
1184
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001185 if (!CanExpand(size)) return false;
1186
Ben Murdochda12d292016-06-02 14:46:10 +01001187 Page* p = heap()->isolate()->memory_allocator()->AllocatePage<Page>(
1188 size, this, executable());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001189 if (p == NULL) return false;
1190
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001191 AccountCommitted(static_cast<intptr_t>(p->size()));
1192
1193 // Pages created during bootstrapping may contain immortal immovable objects.
1194 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
1195
Ben Murdochda12d292016-06-02 14:46:10 +01001196 // When incremental marking was activated, old space pages are allocated
1197 // black.
1198 if (heap()->incremental_marking()->black_allocation() &&
1199 identity() == OLD_SPACE) {
1200 Bitmap::SetAllBits(p);
1201 p->SetFlag(Page::BLACK_PAGE);
1202 if (FLAG_trace_incremental_marking) {
1203 PrintIsolate(heap()->isolate(), "Added black page %p\n", p);
1204 }
1205 }
1206
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001207 DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001208
1209 p->InsertAfter(anchor_.prev_page());
1210
1211 return true;
1212}
1213
1214
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001215int PagedSpace::CountTotalPages() {
1216 PageIterator it(this);
1217 int count = 0;
1218 while (it.has_next()) {
1219 it.next();
1220 count++;
1221 }
1222 return count;
1223}
1224
1225
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001226void PagedSpace::ResetFreeListStatistics() {
1227 PageIterator page_iterator(this);
1228 while (page_iterator.has_next()) {
1229 Page* page = page_iterator.next();
1230 page->ResetFreeListStatistics();
1231 }
1232}
1233
1234
1235void PagedSpace::IncreaseCapacity(int size) {
1236 accounting_stats_.ExpandSpace(size);
1237}
1238
Ben Murdochda12d292016-06-02 14:46:10 +01001239void PagedSpace::ReleasePage(Page* page) {
1240 DCHECK_EQ(page->LiveBytes(), 0);
1241 DCHECK_EQ(AreaSize(), page->area_size());
1242 DCHECK_EQ(page->owner(), this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001243
Ben Murdochda12d292016-06-02 14:46:10 +01001244 free_list_.EvictFreeListItems(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001245 DCHECK(!free_list_.ContainsPageFreeListItems(page));
1246
1247 if (Page::FromAllocationTop(allocation_info_.top()) == page) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001248 allocation_info_.Reset(nullptr, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001249 }
1250
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001251 // If page is still in a list, unlink it from that list.
1252 if (page->next_chunk() != NULL) {
1253 DCHECK(page->prev_chunk() != NULL);
1254 page->Unlink();
1255 }
1256
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001257 AccountUncommitted(static_cast<intptr_t>(page->size()));
1258 heap()->QueueMemoryChunkForFree(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001259
1260 DCHECK(Capacity() > 0);
1261 accounting_stats_.ShrinkSpace(AreaSize());
1262}
1263
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001264#ifdef DEBUG
1265void PagedSpace::Print() {}
1266#endif
1267
1268#ifdef VERIFY_HEAP
1269void PagedSpace::Verify(ObjectVisitor* visitor) {
1270 bool allocation_pointer_found_in_space =
1271 (allocation_info_.top() == allocation_info_.limit());
1272 PageIterator page_iterator(this);
1273 while (page_iterator.has_next()) {
1274 Page* page = page_iterator.next();
1275 CHECK(page->owner() == this);
1276 if (page == Page::FromAllocationTop(allocation_info_.top())) {
1277 allocation_pointer_found_in_space = true;
1278 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001279 CHECK(page->SweepingDone());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001280 HeapObjectIterator it(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001281 Address end_of_previous_object = page->area_start();
1282 Address top = page->area_end();
1283 int black_size = 0;
1284 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1285 CHECK(end_of_previous_object <= object->address());
1286
1287 // The first word should be a map, and we expect all map pointers to
1288 // be in map space.
1289 Map* map = object->map();
1290 CHECK(map->IsMap());
1291 CHECK(heap()->map_space()->Contains(map));
1292
1293 // Perform space-specific object verification.
1294 VerifyObject(object);
1295
1296 // The object itself should look OK.
1297 object->ObjectVerify();
1298
1299 // All the interior pointers should be contained in the heap.
1300 int size = object->Size();
1301 object->IterateBody(map->instance_type(), size, visitor);
Ben Murdochda12d292016-06-02 14:46:10 +01001302 if (!page->IsFlagSet(Page::BLACK_PAGE) &&
1303 Marking::IsBlack(Marking::MarkBitFrom(object))) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001304 black_size += size;
1305 }
1306
1307 CHECK(object->address() + size <= top);
1308 end_of_previous_object = object->address() + size;
1309 }
1310 CHECK_LE(black_size, page->LiveBytes());
1311 }
1312 CHECK(allocation_pointer_found_in_space);
1313}
1314#endif // VERIFY_HEAP
1315
1316// -----------------------------------------------------------------------------
1317// NewSpace implementation
1318
Ben Murdochda12d292016-06-02 14:46:10 +01001319bool NewSpace::SetUp(int initial_semispace_capacity,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001320 int maximum_semispace_capacity) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001321 DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
1322 DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity));
1323
Ben Murdochda12d292016-06-02 14:46:10 +01001324 to_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
1325 from_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
1326 if (!to_space_.Commit()) {
1327 return false;
1328 }
1329 DCHECK(!from_space_.is_committed()); // No need to use memory yet.
1330 ResetAllocationInfo();
1331
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001332 // Allocate and set up the histogram arrays if necessary.
1333 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1334 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001335#define SET_NAME(name) \
1336 allocated_histogram_[name].set_name(#name); \
1337 promoted_histogram_[name].set_name(#name);
1338 INSTANCE_TYPE_LIST(SET_NAME)
1339#undef SET_NAME
1340
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001341 return true;
1342}
1343
1344
1345void NewSpace::TearDown() {
1346 if (allocated_histogram_) {
1347 DeleteArray(allocated_histogram_);
1348 allocated_histogram_ = NULL;
1349 }
1350 if (promoted_histogram_) {
1351 DeleteArray(promoted_histogram_);
1352 promoted_histogram_ = NULL;
1353 }
1354
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001355 allocation_info_.Reset(nullptr, nullptr);
1356
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001357 to_space_.TearDown();
1358 from_space_.TearDown();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001359}
1360
1361
1362void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
1363
1364
1365void NewSpace::Grow() {
1366 // Double the semispace size but only up to maximum capacity.
1367 DCHECK(TotalCapacity() < MaximumCapacity());
1368 int new_capacity =
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001369 Min(MaximumCapacity(),
1370 FLAG_semi_space_growth_factor * static_cast<int>(TotalCapacity()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001371 if (to_space_.GrowTo(new_capacity)) {
1372 // Only grow from space if we managed to grow to-space.
1373 if (!from_space_.GrowTo(new_capacity)) {
1374 // If we managed to grow to-space but couldn't grow from-space,
1375 // attempt to shrink to-space.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001376 if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001377 // We are in an inconsistent state because we could not
1378 // commit/uncommit memory from new space.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001379 CHECK(false);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001380 }
1381 }
1382 }
1383 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1384}
1385
1386
1387void NewSpace::Shrink() {
1388 int new_capacity = Max(InitialTotalCapacity(), 2 * SizeAsInt());
1389 int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1390 if (rounded_new_capacity < TotalCapacity() &&
1391 to_space_.ShrinkTo(rounded_new_capacity)) {
1392 // Only shrink from-space if we managed to shrink to-space.
1393 from_space_.Reset();
1394 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1395 // If we managed to shrink to-space but couldn't shrink from
1396 // space, attempt to grow to-space again.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001397 if (!to_space_.GrowTo(from_space_.current_capacity())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001398 // We are in an inconsistent state because we could not
1399 // commit/uncommit memory from new space.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001400 CHECK(false);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001401 }
1402 }
1403 }
1404 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1405}
1406
1407
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001408void LocalAllocationBuffer::Close() {
1409 if (IsValid()) {
1410 heap_->CreateFillerObjectAt(
1411 allocation_info_.top(),
Ben Murdochda12d292016-06-02 14:46:10 +01001412 static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
1413 ClearRecordedSlots::kNo);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001414 }
1415}
1416
1417
1418LocalAllocationBuffer::LocalAllocationBuffer(Heap* heap,
1419 AllocationInfo allocation_info)
1420 : heap_(heap), allocation_info_(allocation_info) {
1421 if (IsValid()) {
1422 heap_->CreateFillerObjectAt(
1423 allocation_info_.top(),
Ben Murdochda12d292016-06-02 14:46:10 +01001424 static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
1425 ClearRecordedSlots::kNo);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001426 }
1427}
1428
1429
1430LocalAllocationBuffer::LocalAllocationBuffer(
1431 const LocalAllocationBuffer& other) {
1432 *this = other;
1433}
1434
1435
1436LocalAllocationBuffer& LocalAllocationBuffer::operator=(
1437 const LocalAllocationBuffer& other) {
1438 Close();
1439 heap_ = other.heap_;
1440 allocation_info_ = other.allocation_info_;
1441
1442 // This is needed since we (a) cannot yet use move-semantics, and (b) want
1443 // to make the use of the class easy by it as value and (c) implicitly call
1444 // {Close} upon copy.
1445 const_cast<LocalAllocationBuffer&>(other)
1446 .allocation_info_.Reset(nullptr, nullptr);
1447 return *this;
1448}
1449
1450
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001451void NewSpace::UpdateAllocationInfo() {
1452 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001453 allocation_info_.Reset(to_space_.page_low(), to_space_.page_high());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001454 UpdateInlineAllocationLimit(0);
1455 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1456}
1457
1458
1459void NewSpace::ResetAllocationInfo() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001460 Address old_top = allocation_info_.top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001461 to_space_.Reset();
1462 UpdateAllocationInfo();
1463 pages_used_ = 0;
1464 // Clear all mark-bits in the to-space.
1465 NewSpacePageIterator it(&to_space_);
1466 while (it.has_next()) {
1467 Bitmap::Clear(it.next());
1468 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001469 InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001470}
1471
1472
1473void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
1474 if (heap()->inline_allocation_disabled()) {
1475 // Lowest limit when linear allocation was disabled.
1476 Address high = to_space_.page_high();
1477 Address new_top = allocation_info_.top() + size_in_bytes;
1478 allocation_info_.set_limit(Min(new_top, high));
Ben Murdoch097c5b22016-05-18 11:27:45 +01001479 } else if (allocation_observers_paused_ || top_on_previous_step_ == 0) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001480 // Normal limit is the end of the current page.
1481 allocation_info_.set_limit(to_space_.page_high());
1482 } else {
1483 // Lower limit during incremental marking.
1484 Address high = to_space_.page_high();
1485 Address new_top = allocation_info_.top() + size_in_bytes;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001486 Address new_limit = new_top + GetNextInlineAllocationStepSize() - 1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001487 allocation_info_.set_limit(Min(new_limit, high));
1488 }
1489 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1490}
1491
1492
1493bool NewSpace::AddFreshPage() {
1494 Address top = allocation_info_.top();
Ben Murdoch097c5b22016-05-18 11:27:45 +01001495 DCHECK(!NewSpacePage::IsAtStart(top));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001496 if (!to_space_.AdvancePage()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001497 // No more pages left to advance.
1498 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001499 }
1500
1501 // Clear remainder of current page.
1502 Address limit = NewSpacePage::FromLimit(top)->area_end();
1503 if (heap()->gc_state() == Heap::SCAVENGE) {
1504 heap()->promotion_queue()->SetNewLimit(limit);
1505 }
1506
1507 int remaining_in_page = static_cast<int>(limit - top);
Ben Murdochda12d292016-06-02 14:46:10 +01001508 heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001509 pages_used_++;
1510 UpdateAllocationInfo();
1511
1512 return true;
1513}
1514
1515
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001516bool NewSpace::AddFreshPageSynchronized() {
1517 base::LockGuard<base::Mutex> guard(&mutex_);
1518 return AddFreshPage();
1519}
1520
1521
1522bool NewSpace::EnsureAllocation(int size_in_bytes,
1523 AllocationAlignment alignment) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001524 Address old_top = allocation_info_.top();
1525 Address high = to_space_.page_high();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001526 int filler_size = Heap::GetFillToAlign(old_top, alignment);
1527 int aligned_size_in_bytes = size_in_bytes + filler_size;
1528
1529 if (old_top + aligned_size_in_bytes >= high) {
1530 // Not enough room in the page, try to allocate a new one.
1531 if (!AddFreshPage()) {
1532 return false;
1533 }
1534
1535 InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
1536
1537 old_top = allocation_info_.top();
1538 high = to_space_.page_high();
1539 filler_size = Heap::GetFillToAlign(old_top, alignment);
1540 aligned_size_in_bytes = size_in_bytes + filler_size;
1541 }
1542
1543 DCHECK(old_top + aligned_size_in_bytes < high);
1544
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001545 if (allocation_info_.limit() < high) {
1546 // Either the limit has been lowered because linear allocation was disabled
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001547 // or because incremental marking wants to get a chance to do a step,
1548 // or because idle scavenge job wants to get a chance to post a task.
1549 // Set the new limit accordingly.
1550 Address new_top = old_top + aligned_size_in_bytes;
1551 Address soon_object = old_top + filler_size;
1552 InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
1553 UpdateInlineAllocationLimit(aligned_size_in_bytes);
1554 }
1555 return true;
1556}
1557
1558
1559void NewSpace::StartNextInlineAllocationStep() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001560 if (!allocation_observers_paused_) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001561 top_on_previous_step_ =
Ben Murdoch097c5b22016-05-18 11:27:45 +01001562 allocation_observers_->length() ? allocation_info_.top() : 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001563 UpdateInlineAllocationLimit(0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001564 }
1565}
1566
1567
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001568intptr_t NewSpace::GetNextInlineAllocationStepSize() {
1569 intptr_t next_step = 0;
Ben Murdoch097c5b22016-05-18 11:27:45 +01001570 for (int i = 0; i < allocation_observers_->length(); ++i) {
1571 AllocationObserver* o = (*allocation_observers_)[i];
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001572 next_step = next_step ? Min(next_step, o->bytes_to_next_step())
1573 : o->bytes_to_next_step();
1574 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001575 DCHECK(allocation_observers_->length() == 0 || next_step != 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001576 return next_step;
1577}
1578
Ben Murdoch097c5b22016-05-18 11:27:45 +01001579void NewSpace::AddAllocationObserver(AllocationObserver* observer) {
1580 Space::AddAllocationObserver(observer);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001581 StartNextInlineAllocationStep();
1582}
1583
Ben Murdoch097c5b22016-05-18 11:27:45 +01001584void NewSpace::RemoveAllocationObserver(AllocationObserver* observer) {
1585 Space::RemoveAllocationObserver(observer);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001586 StartNextInlineAllocationStep();
1587}
1588
Ben Murdoch097c5b22016-05-18 11:27:45 +01001589void NewSpace::PauseAllocationObservers() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001590 // Do a step to account for memory allocated so far.
1591 InlineAllocationStep(top(), top(), nullptr, 0);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001592 Space::PauseAllocationObservers();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001593 top_on_previous_step_ = 0;
1594 UpdateInlineAllocationLimit(0);
1595}
1596
Ben Murdoch097c5b22016-05-18 11:27:45 +01001597void NewSpace::ResumeAllocationObservers() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001598 DCHECK(top_on_previous_step_ == 0);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001599 Space::ResumeAllocationObservers();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001600 StartNextInlineAllocationStep();
1601}
1602
1603
1604void NewSpace::InlineAllocationStep(Address top, Address new_top,
1605 Address soon_object, size_t size) {
1606 if (top_on_previous_step_) {
1607 int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001608 for (int i = 0; i < allocation_observers_->length(); ++i) {
1609 (*allocation_observers_)[i]->AllocationStep(bytes_allocated, soon_object,
1610 size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001611 }
1612 top_on_previous_step_ = new_top;
1613 }
1614}
1615
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001616#ifdef VERIFY_HEAP
1617// We do not use the SemiSpaceIterator because verification doesn't assume
1618// that it works (it depends on the invariants we are checking).
1619void NewSpace::Verify() {
1620 // The allocation pointer should be in the space or at the very end.
1621 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1622
1623 // There should be objects packed in from the low address up to the
1624 // allocation pointer.
1625 Address current = to_space_.first_page()->area_start();
1626 CHECK_EQ(current, to_space_.space_start());
1627
1628 while (current != top()) {
1629 if (!NewSpacePage::IsAtEnd(current)) {
1630 // The allocation pointer should not be in the middle of an object.
1631 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1632 current < top());
1633
1634 HeapObject* object = HeapObject::FromAddress(current);
1635
1636 // The first word should be a map, and we expect all map pointers to
1637 // be in map space.
1638 Map* map = object->map();
1639 CHECK(map->IsMap());
1640 CHECK(heap()->map_space()->Contains(map));
1641
1642 // The object should not be code or a map.
1643 CHECK(!object->IsMap());
Ben Murdochda12d292016-06-02 14:46:10 +01001644 CHECK(!object->IsAbstractCode());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001645
1646 // The object itself should look OK.
1647 object->ObjectVerify();
1648
1649 // All the interior pointers should be contained in the heap.
1650 VerifyPointersVisitor visitor;
1651 int size = object->Size();
1652 object->IterateBody(map->instance_type(), size, &visitor);
1653
1654 current += size;
1655 } else {
1656 // At end of page, switch to next page.
1657 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1658 // Next page should be valid.
1659 CHECK(!page->is_anchor());
1660 current = page->area_start();
1661 }
1662 }
1663
1664 // Check semi-spaces.
1665 CHECK_EQ(from_space_.id(), kFromSpace);
1666 CHECK_EQ(to_space_.id(), kToSpace);
1667 from_space_.Verify();
1668 to_space_.Verify();
1669}
1670#endif
1671
1672// -----------------------------------------------------------------------------
1673// SemiSpace implementation
1674
Ben Murdochda12d292016-06-02 14:46:10 +01001675void SemiSpace::SetUp(int initial_capacity, int maximum_capacity) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001676 DCHECK_GE(maximum_capacity, Page::kPageSize);
1677 minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1678 current_capacity_ = minimum_capacity_;
1679 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001680 committed_ = false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001681}
1682
1683
1684void SemiSpace::TearDown() {
Ben Murdochda12d292016-06-02 14:46:10 +01001685 // Properly uncommit memory to keep the allocator counters in sync.
1686 if (is_committed()) Uncommit();
1687 current_capacity_ = maximum_capacity_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001688}
1689
1690
1691bool SemiSpace::Commit() {
1692 DCHECK(!is_committed());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001693 NewSpacePage* current = anchor();
Ben Murdoch097c5b22016-05-18 11:27:45 +01001694 const int num_pages = current_capacity_ / Page::kPageSize;
Ben Murdochda12d292016-06-02 14:46:10 +01001695 for (int pages_added = 0; pages_added < num_pages; pages_added++) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001696 NewSpacePage* new_page =
Ben Murdochda12d292016-06-02 14:46:10 +01001697 heap()
1698 ->isolate()
1699 ->memory_allocator()
1700 ->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>(
1701 NewSpacePage::kAllocatableMemory, this, executable());
1702 if (new_page == nullptr) {
1703 RewindPages(current, pages_added);
1704 return false;
1705 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001706 new_page->InsertAfter(current);
1707 current = new_page;
1708 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001709 Reset();
Ben Murdochda12d292016-06-02 14:46:10 +01001710 AccountCommitted(current_capacity_);
1711 if (age_mark_ == nullptr) {
1712 age_mark_ = first_page()->area_start();
1713 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001714 committed_ = true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001715 return true;
1716}
1717
1718
1719bool SemiSpace::Uncommit() {
1720 DCHECK(is_committed());
Ben Murdochda12d292016-06-02 14:46:10 +01001721 NewSpacePageIterator it(this);
1722 while (it.has_next()) {
1723 heap()->isolate()->memory_allocator()->Free<MemoryAllocator::kPooled>(
1724 it.next());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001725 }
1726 anchor()->set_next_page(anchor());
1727 anchor()->set_prev_page(anchor());
Ben Murdochda12d292016-06-02 14:46:10 +01001728 AccountUncommitted(current_capacity_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001729 committed_ = false;
1730 return true;
1731}
1732
1733
1734size_t SemiSpace::CommittedPhysicalMemory() {
1735 if (!is_committed()) return 0;
1736 size_t size = 0;
1737 NewSpacePageIterator it(this);
1738 while (it.has_next()) {
1739 size += it.next()->CommittedPhysicalMemory();
1740 }
1741 return size;
1742}
1743
1744
1745bool SemiSpace::GrowTo(int new_capacity) {
1746 if (!is_committed()) {
1747 if (!Commit()) return false;
1748 }
Ben Murdochda12d292016-06-02 14:46:10 +01001749 DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001750 DCHECK_LE(new_capacity, maximum_capacity_);
1751 DCHECK_GT(new_capacity, current_capacity_);
Ben Murdochda12d292016-06-02 14:46:10 +01001752 const int delta = new_capacity - current_capacity_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001753 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
Ben Murdochda12d292016-06-02 14:46:10 +01001754 int delta_pages = delta / NewSpacePage::kPageSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001755 NewSpacePage* last_page = anchor()->prev_page();
Ben Murdoch097c5b22016-05-18 11:27:45 +01001756 DCHECK_NE(last_page, anchor());
Ben Murdochda12d292016-06-02 14:46:10 +01001757 for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001758 NewSpacePage* new_page =
Ben Murdochda12d292016-06-02 14:46:10 +01001759 heap()
1760 ->isolate()
1761 ->memory_allocator()
1762 ->AllocatePage<NewSpacePage, MemoryAllocator::kPooled>(
1763 NewSpacePage::kAllocatableMemory, this, executable());
1764 if (new_page == nullptr) {
1765 RewindPages(last_page, pages_added);
1766 return false;
1767 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001768 new_page->InsertAfter(last_page);
1769 Bitmap::Clear(new_page);
1770 // Duplicate the flags that was set on the old page.
1771 new_page->SetFlags(last_page->GetFlags(),
1772 NewSpacePage::kCopyOnFlipFlagsMask);
1773 last_page = new_page;
1774 }
Ben Murdochda12d292016-06-02 14:46:10 +01001775 AccountCommitted(static_cast<intptr_t>(delta));
1776 current_capacity_ = new_capacity;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001777 return true;
1778}
1779
Ben Murdochda12d292016-06-02 14:46:10 +01001780void SemiSpace::RewindPages(NewSpacePage* start, int num_pages) {
1781 NewSpacePage* new_last_page = nullptr;
1782 NewSpacePage* last_page = start;
1783 while (num_pages > 0) {
1784 DCHECK_NE(last_page, anchor());
1785 new_last_page = last_page->prev_page();
1786 last_page->prev_page()->set_next_page(last_page->next_page());
1787 last_page->next_page()->set_prev_page(last_page->prev_page());
1788 last_page = new_last_page;
1789 num_pages--;
1790 }
1791}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001792
1793bool SemiSpace::ShrinkTo(int new_capacity) {
Ben Murdochda12d292016-06-02 14:46:10 +01001794 DCHECK_EQ(new_capacity & NewSpacePage::kPageAlignmentMask, 0);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001795 DCHECK_GE(new_capacity, minimum_capacity_);
1796 DCHECK_LT(new_capacity, current_capacity_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001797 if (is_committed()) {
Ben Murdochda12d292016-06-02 14:46:10 +01001798 const int delta = current_capacity_ - new_capacity;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001799 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
Ben Murdochda12d292016-06-02 14:46:10 +01001800 int delta_pages = delta / NewSpacePage::kPageSize;
1801 NewSpacePage* new_last_page;
1802 NewSpacePage* last_page;
1803 while (delta_pages > 0) {
1804 last_page = anchor()->prev_page();
1805 new_last_page = last_page->prev_page();
1806 new_last_page->set_next_page(anchor());
1807 anchor()->set_prev_page(new_last_page);
1808 heap()->isolate()->memory_allocator()->Free<MemoryAllocator::kPooled>(
1809 last_page);
1810 delta_pages--;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001811 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001812 AccountUncommitted(static_cast<intptr_t>(delta));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001813 }
Ben Murdochda12d292016-06-02 14:46:10 +01001814 current_capacity_ = new_capacity;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001815 return true;
1816}
1817
Ben Murdoch097c5b22016-05-18 11:27:45 +01001818void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001819 anchor_.set_owner(this);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001820 // Fixup back-pointers to anchor. Address of anchor changes when we swap.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001821 anchor_.prev_page()->set_next_page(&anchor_);
1822 anchor_.next_page()->set_prev_page(&anchor_);
1823
Ben Murdoch097c5b22016-05-18 11:27:45 +01001824 NewSpacePageIterator it(this);
1825 while (it.has_next()) {
1826 NewSpacePage* page = it.next();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001827 page->set_owner(this);
1828 page->SetFlags(flags, mask);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001829 if (id_ == kToSpace) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001830 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1831 page->SetFlag(MemoryChunk::IN_TO_SPACE);
1832 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1833 page->ResetLiveBytes();
1834 } else {
1835 page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1836 page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1837 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001838 DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1839 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001840 }
1841}
1842
1843
1844void SemiSpace::Reset() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001845 DCHECK_NE(anchor_.next_page(), &anchor_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001846 current_page_ = anchor_.next_page();
1847}
1848
1849
1850void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1851 // We won't be swapping semispaces without data in them.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001852 DCHECK_NE(from->anchor_.next_page(), &from->anchor_);
1853 DCHECK_NE(to->anchor_.next_page(), &to->anchor_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001854
Ben Murdoch097c5b22016-05-18 11:27:45 +01001855 intptr_t saved_to_space_flags = to->current_page()->GetFlags();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001856
Ben Murdoch097c5b22016-05-18 11:27:45 +01001857 // We swap all properties but id_.
1858 std::swap(from->current_capacity_, to->current_capacity_);
1859 std::swap(from->maximum_capacity_, to->maximum_capacity_);
1860 std::swap(from->minimum_capacity_, to->minimum_capacity_);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001861 std::swap(from->age_mark_, to->age_mark_);
1862 std::swap(from->committed_, to->committed_);
1863 std::swap(from->anchor_, to->anchor_);
1864 std::swap(from->current_page_, to->current_page_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001865
Ben Murdoch097c5b22016-05-18 11:27:45 +01001866 to->FixPagesFlags(saved_to_space_flags, NewSpacePage::kCopyOnFlipFlagsMask);
1867 from->FixPagesFlags(0, 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001868}
1869
1870
1871void SemiSpace::set_age_mark(Address mark) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001872 DCHECK_EQ(NewSpacePage::FromLimit(mark)->semi_space(), this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001873 age_mark_ = mark;
1874 // Mark all pages up to the one containing mark.
1875 NewSpacePageIterator it(space_start(), mark);
1876 while (it.has_next()) {
1877 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1878 }
1879}
1880
1881
1882#ifdef DEBUG
1883void SemiSpace::Print() {}
1884#endif
1885
1886#ifdef VERIFY_HEAP
1887void SemiSpace::Verify() {
1888 bool is_from_space = (id_ == kFromSpace);
1889 NewSpacePage* page = anchor_.next_page();
1890 CHECK(anchor_.semi_space() == this);
1891 while (page != &anchor_) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001892 CHECK_EQ(page->semi_space(), this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001893 CHECK(page->InNewSpace());
1894 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1895 : MemoryChunk::IN_TO_SPACE));
1896 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1897 : MemoryChunk::IN_FROM_SPACE));
1898 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1899 if (!is_from_space) {
1900 // The pointers-from-here-are-interesting flag isn't updated dynamically
1901 // on from-space pages, so it might be out of sync with the marking state.
1902 if (page->heap()->incremental_marking()->IsMarking()) {
1903 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1904 } else {
1905 CHECK(
1906 !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1907 }
1908 // TODO(gc): Check that the live_bytes_count_ field matches the
1909 // black marking on the page (if we make it match in new-space).
1910 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001911 CHECK_EQ(page->prev_page()->next_page(), page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001912 page = page->next_page();
1913 }
1914}
1915#endif
1916
1917#ifdef DEBUG
1918void SemiSpace::AssertValidRange(Address start, Address end) {
1919 // Addresses belong to same semi-space
1920 NewSpacePage* page = NewSpacePage::FromLimit(start);
1921 NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1922 SemiSpace* space = page->semi_space();
1923 CHECK_EQ(space, end_page->semi_space());
1924 // Start address is before end address, either on same page,
1925 // or end address is on a later page in the linked list of
1926 // semi-space pages.
1927 if (page == end_page) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001928 CHECK_LE(start, end);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001929 } else {
1930 while (page != end_page) {
1931 page = page->next_page();
1932 CHECK_NE(page, space->anchor());
1933 }
1934 }
1935}
1936#endif
1937
1938
1939// -----------------------------------------------------------------------------
1940// SemiSpaceIterator implementation.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001941
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001942SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001943 Initialize(space->bottom(), space->top());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001944}
1945
1946
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001947void SemiSpaceIterator::Initialize(Address start, Address end) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001948 SemiSpace::AssertValidRange(start, end);
1949 current_ = start;
1950 limit_ = end;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001951}
1952
1953
1954#ifdef DEBUG
1955// heap_histograms is shared, always clear it before using it.
1956static void ClearHistograms(Isolate* isolate) {
1957// We reset the name each time, though it hasn't changed.
1958#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1959 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1960#undef DEF_TYPE_NAME
1961
1962#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1963 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1964#undef CLEAR_HISTOGRAM
1965
1966 isolate->js_spill_information()->Clear();
1967}
1968
1969
1970static void ClearCodeKindStatistics(int* code_kind_statistics) {
1971 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1972 code_kind_statistics[i] = 0;
1973 }
1974}
1975
1976
1977static void ReportCodeKindStatistics(int* code_kind_statistics) {
1978 PrintF("\n Code kind histograms: \n");
1979 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1980 if (code_kind_statistics[i] > 0) {
1981 PrintF(" %-20s: %10d bytes\n",
1982 Code::Kind2String(static_cast<Code::Kind>(i)),
1983 code_kind_statistics[i]);
1984 }
1985 }
1986 PrintF("\n");
1987}
1988
1989
1990static int CollectHistogramInfo(HeapObject* obj) {
1991 Isolate* isolate = obj->GetIsolate();
1992 InstanceType type = obj->map()->instance_type();
1993 DCHECK(0 <= type && type <= LAST_TYPE);
1994 DCHECK(isolate->heap_histograms()[type].name() != NULL);
1995 isolate->heap_histograms()[type].increment_number(1);
1996 isolate->heap_histograms()[type].increment_bytes(obj->Size());
1997
1998 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
1999 JSObject::cast(obj)
2000 ->IncrementSpillStatistics(isolate->js_spill_information());
2001 }
2002
2003 return obj->Size();
2004}
2005
2006
2007static void ReportHistogram(Isolate* isolate, bool print_spill) {
2008 PrintF("\n Object Histogram:\n");
2009 for (int i = 0; i <= LAST_TYPE; i++) {
2010 if (isolate->heap_histograms()[i].number() > 0) {
2011 PrintF(" %-34s%10d (%10d bytes)\n",
2012 isolate->heap_histograms()[i].name(),
2013 isolate->heap_histograms()[i].number(),
2014 isolate->heap_histograms()[i].bytes());
2015 }
2016 }
2017 PrintF("\n");
2018
2019 // Summarize string types.
2020 int string_number = 0;
2021 int string_bytes = 0;
2022#define INCREMENT(type, size, name, camel_name) \
2023 string_number += isolate->heap_histograms()[type].number(); \
2024 string_bytes += isolate->heap_histograms()[type].bytes();
2025 STRING_TYPE_LIST(INCREMENT)
2026#undef INCREMENT
2027 if (string_number > 0) {
2028 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
2029 string_bytes);
2030 }
2031
2032 if (FLAG_collect_heap_spill_statistics && print_spill) {
2033 isolate->js_spill_information()->Print();
2034 }
2035}
2036#endif // DEBUG
2037
2038
2039// Support for statistics gathering for --heap-stats and --log-gc.
2040void NewSpace::ClearHistograms() {
2041 for (int i = 0; i <= LAST_TYPE; i++) {
2042 allocated_histogram_[i].clear();
2043 promoted_histogram_[i].clear();
2044 }
2045}
2046
2047
2048// Because the copying collector does not touch garbage objects, we iterate
2049// the new space before a collection to get a histogram of allocated objects.
2050// This only happens when --log-gc flag is set.
2051void NewSpace::CollectStatistics() {
2052 ClearHistograms();
2053 SemiSpaceIterator it(this);
2054 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
2055 RecordAllocation(obj);
2056}
2057
2058
2059static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
2060 const char* description) {
2061 LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
2062 // Lump all the string types together.
2063 int string_number = 0;
2064 int string_bytes = 0;
2065#define INCREMENT(type, size, name, camel_name) \
2066 string_number += info[type].number(); \
2067 string_bytes += info[type].bytes();
2068 STRING_TYPE_LIST(INCREMENT)
2069#undef INCREMENT
2070 if (string_number > 0) {
2071 LOG(isolate,
2072 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
2073 }
2074
2075 // Then do the other types.
2076 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
2077 if (info[i].number() > 0) {
2078 LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(),
2079 info[i].bytes()));
2080 }
2081 }
2082 LOG(isolate, HeapSampleEndEvent("NewSpace", description));
2083}
2084
2085
2086void NewSpace::ReportStatistics() {
2087#ifdef DEBUG
2088 if (FLAG_heap_stats) {
2089 float pct = static_cast<float>(Available()) / TotalCapacity();
2090 PrintF(" capacity: %" V8_PTR_PREFIX
2091 "d"
2092 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2093 TotalCapacity(), Available(), static_cast<int>(pct * 100));
2094 PrintF("\n Object Histogram:\n");
2095 for (int i = 0; i <= LAST_TYPE; i++) {
2096 if (allocated_histogram_[i].number() > 0) {
2097 PrintF(" %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(),
2098 allocated_histogram_[i].number(),
2099 allocated_histogram_[i].bytes());
2100 }
2101 }
2102 PrintF("\n");
2103 }
2104#endif // DEBUG
2105
2106 if (FLAG_log_gc) {
2107 Isolate* isolate = heap()->isolate();
2108 DoReportStatistics(isolate, allocated_histogram_, "allocated");
2109 DoReportStatistics(isolate, promoted_histogram_, "promoted");
2110 }
2111}
2112
2113
2114void NewSpace::RecordAllocation(HeapObject* obj) {
2115 InstanceType type = obj->map()->instance_type();
2116 DCHECK(0 <= type && type <= LAST_TYPE);
2117 allocated_histogram_[type].increment_number(1);
2118 allocated_histogram_[type].increment_bytes(obj->Size());
2119}
2120
2121
2122void NewSpace::RecordPromotion(HeapObject* obj) {
2123 InstanceType type = obj->map()->instance_type();
2124 DCHECK(0 <= type && type <= LAST_TYPE);
2125 promoted_histogram_[type].increment_number(1);
2126 promoted_histogram_[type].increment_bytes(obj->Size());
2127}
2128
2129
2130size_t NewSpace::CommittedPhysicalMemory() {
2131 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
2132 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2133 size_t size = to_space_.CommittedPhysicalMemory();
2134 if (from_space_.is_committed()) {
2135 size += from_space_.CommittedPhysicalMemory();
2136 }
2137 return size;
2138}
2139
2140
2141// -----------------------------------------------------------------------------
2142// Free lists for old object spaces implementation
2143
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002144
2145void FreeListCategory::Reset() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002146 set_top(nullptr);
Ben Murdochda12d292016-06-02 14:46:10 +01002147 set_prev(nullptr);
2148 set_next(nullptr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002149 available_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002150}
2151
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002152FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
Ben Murdochda12d292016-06-02 14:46:10 +01002153 DCHECK(page()->CanAllocate());
2154
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002155 FreeSpace* node = top();
2156 if (node == nullptr) return nullptr;
Ben Murdochda12d292016-06-02 14:46:10 +01002157 set_top(node->next());
2158 *node_size = node->Size();
2159 available_ -= *node_size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002160 return node;
2161}
2162
Ben Murdochda12d292016-06-02 14:46:10 +01002163FreeSpace* FreeListCategory::TryPickNodeFromList(int minimum_size,
2164 int* node_size) {
2165 DCHECK(page()->CanAllocate());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002166
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002167 FreeSpace* node = PickNodeFromList(node_size);
Ben Murdochda12d292016-06-02 14:46:10 +01002168 if ((node != nullptr) && (*node_size < minimum_size)) {
2169 Free(node, *node_size, kLinkCategory);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002170 *node_size = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002171 return nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002172 }
2173 return node;
2174}
2175
Ben Murdochda12d292016-06-02 14:46:10 +01002176FreeSpace* FreeListCategory::SearchForNodeInList(int minimum_size,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002177 int* node_size) {
Ben Murdochda12d292016-06-02 14:46:10 +01002178 DCHECK(page()->CanAllocate());
2179
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002180 FreeSpace* prev_non_evac_node = nullptr;
2181 for (FreeSpace* cur_node = top(); cur_node != nullptr;
2182 cur_node = cur_node->next()) {
2183 int size = cur_node->size();
Ben Murdochda12d292016-06-02 14:46:10 +01002184 if (size >= minimum_size) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002185 available_ -= size;
2186 if (cur_node == top()) {
2187 set_top(cur_node->next());
2188 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002189 if (prev_non_evac_node != nullptr) {
2190 prev_non_evac_node->set_next(cur_node->next());
2191 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002192 *node_size = size;
2193 return cur_node;
2194 }
2195
2196 prev_non_evac_node = cur_node;
2197 }
2198 return nullptr;
2199}
2200
Ben Murdochda12d292016-06-02 14:46:10 +01002201bool FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes,
2202 FreeMode mode) {
2203 if (!page()->CanAllocate()) return false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002204
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002205 free_space->set_next(top());
2206 set_top(free_space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002207 available_ += size_in_bytes;
Ben Murdochda12d292016-06-02 14:46:10 +01002208 if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
2209 owner()->AddCategory(this);
2210 }
2211 return true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002212}
2213
2214
2215void FreeListCategory::RepairFreeList(Heap* heap) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002216 FreeSpace* n = top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002217 while (n != NULL) {
2218 Map** map_location = reinterpret_cast<Map**>(n->address());
2219 if (*map_location == NULL) {
2220 *map_location = heap->free_space_map();
2221 } else {
2222 DCHECK(*map_location == heap->free_space_map());
2223 }
2224 n = n->next();
2225 }
2226}
2227
Ben Murdochda12d292016-06-02 14:46:10 +01002228void FreeListCategory::Relink() {
2229 DCHECK(!is_linked());
2230 owner()->AddCategory(this);
2231}
2232
2233void FreeListCategory::Invalidate() {
2234 page()->add_available_in_free_list(-available());
2235 Reset();
2236 type_ = kInvalidCategory;
2237}
2238
Ben Murdoch097c5b22016-05-18 11:27:45 +01002239FreeList::FreeList(PagedSpace* owner) : owner_(owner), wasted_bytes_(0) {
2240 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
Ben Murdochda12d292016-06-02 14:46:10 +01002241 categories_[i] = nullptr;
Ben Murdoch097c5b22016-05-18 11:27:45 +01002242 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002243 Reset();
2244}
2245
2246
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002247void FreeList::Reset() {
Ben Murdochda12d292016-06-02 14:46:10 +01002248 ForAllFreeListCategories(
2249 [](FreeListCategory* category) { category->Reset(); });
Ben Murdoch097c5b22016-05-18 11:27:45 +01002250 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
Ben Murdochda12d292016-06-02 14:46:10 +01002251 categories_[i] = nullptr;
Ben Murdoch097c5b22016-05-18 11:27:45 +01002252 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002253 ResetStats();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002254}
2255
Ben Murdochda12d292016-06-02 14:46:10 +01002256int FreeList::Free(Address start, int size_in_bytes, FreeMode mode) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002257 if (size_in_bytes == 0) return 0;
2258
Ben Murdochda12d292016-06-02 14:46:10 +01002259 owner()->heap()->CreateFillerObjectAt(start, size_in_bytes,
2260 ClearRecordedSlots::kNo);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002261
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002262 Page* page = Page::FromAddress(start);
2263
Ben Murdochda12d292016-06-02 14:46:10 +01002264 // Blocks have to be a minimum size to hold free list items.
2265 if (size_in_bytes < kMinBlockSize) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002266 page->add_wasted_memory(size_in_bytes);
Ben Murdochda12d292016-06-02 14:46:10 +01002267 wasted_bytes_.Increment(size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002268 return size_in_bytes;
2269 }
2270
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002271 FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002272 // Insert other blocks at the head of a free list of the appropriate
2273 // magnitude.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002274 FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
Ben Murdochda12d292016-06-02 14:46:10 +01002275 if (page->free_list_category(type)->Free(free_space, size_in_bytes, mode)) {
2276 page->add_available_in_free_list(size_in_bytes);
2277 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002278 return 0;
2279}
2280
Ben Murdochda12d292016-06-02 14:46:10 +01002281FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, int* node_size) {
2282 FreeListCategoryIterator it(this, type);
2283 FreeSpace* node = nullptr;
2284 while (it.HasNext()) {
2285 FreeListCategory* current = it.Next();
2286 node = current->PickNodeFromList(node_size);
2287 if (node != nullptr) {
2288 Page::FromAddress(node->address())
2289 ->add_available_in_free_list(-(*node_size));
2290 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2291 return node;
2292 }
2293 RemoveCategory(current);
2294 }
2295 return node;
2296}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002297
Ben Murdochda12d292016-06-02 14:46:10 +01002298FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type, int* node_size,
2299 int minimum_size) {
2300 if (categories_[type] == nullptr) return nullptr;
2301 FreeSpace* node =
2302 categories_[type]->TryPickNodeFromList(minimum_size, node_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002303 if (node != nullptr) {
2304 Page::FromAddress(node->address())
Ben Murdoch097c5b22016-05-18 11:27:45 +01002305 ->add_available_in_free_list(-(*node_size));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002306 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2307 }
2308 return node;
2309}
2310
Ben Murdochda12d292016-06-02 14:46:10 +01002311FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
2312 int* node_size, int minimum_size) {
2313 FreeListCategoryIterator it(this, type);
2314 FreeSpace* node = nullptr;
2315 while (it.HasNext()) {
2316 FreeListCategory* current = it.Next();
2317 node = current->SearchForNodeInList(minimum_size, node_size);
2318 if (node != nullptr) {
2319 Page::FromAddress(node->address())
2320 ->add_available_in_free_list(-(*node_size));
2321 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2322 return node;
2323 }
2324 }
2325 return node;
2326}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002327
2328FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
2329 FreeSpace* node = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002330
Ben Murdoch097c5b22016-05-18 11:27:45 +01002331 // First try the allocation fast path: try to allocate the minimum element
2332 // size of a free list category. This operation is constant time.
2333 FreeListCategoryType type =
2334 SelectFastAllocationFreeListCategoryType(size_in_bytes);
2335 for (int i = type; i < kHuge; i++) {
2336 node = FindNodeIn(static_cast<FreeListCategoryType>(i), node_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002337 if (node != nullptr) return node;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002338 }
2339
Ben Murdoch097c5b22016-05-18 11:27:45 +01002340 // Next search the huge list for free list nodes. This takes linear time in
2341 // the number of huge elements.
Ben Murdochda12d292016-06-02 14:46:10 +01002342 node = SearchForNodeInList(kHuge, node_size, size_in_bytes);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002343 if (node != nullptr) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002344 DCHECK(IsVeryLong() || Available() == SumFreeLists());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002345 return node;
2346 }
2347
Ben Murdoch097c5b22016-05-18 11:27:45 +01002348 // We need a huge block of memory, but we didn't find anything in the huge
2349 // list.
2350 if (type == kHuge) return nullptr;
2351
2352 // Now search the best fitting free list for a node that has at least the
Ben Murdochda12d292016-06-02 14:46:10 +01002353 // requested size.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002354 type = SelectFreeListCategoryType(size_in_bytes);
Ben Murdochda12d292016-06-02 14:46:10 +01002355 node = TryFindNodeIn(type, node_size, size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002356
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002357 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2358 return node;
2359}
2360
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002361// Allocation on the old space free list. If it succeeds then a new linear
2362// allocation space has been set up with the top and limit of the space. If
2363// the allocation fails then NULL is returned, and the caller can perform a GC
2364// or allocate a new page before retrying.
2365HeapObject* FreeList::Allocate(int size_in_bytes) {
2366 DCHECK(0 < size_in_bytes);
2367 DCHECK(size_in_bytes <= kMaxBlockSize);
2368 DCHECK(IsAligned(size_in_bytes, kPointerSize));
2369 // Don't free list allocate if there is linear space available.
2370 DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
2371
2372 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2373 // Mark the old linear allocation area with a free space map so it can be
2374 // skipped when scanning the heap. This also puts it back in the free list
2375 // if it is big enough.
2376 owner_->Free(owner_->top(), old_linear_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002377 owner_->SetTopAndLimit(nullptr, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002378
2379 owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
2380 old_linear_size);
2381
2382 int new_node_size = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002383 FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2384 if (new_node == nullptr) return nullptr;
Ben Murdoch097c5b22016-05-18 11:27:45 +01002385 owner_->AllocationStep(new_node->address(), size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002386
2387 int bytes_left = new_node_size - size_in_bytes;
2388 DCHECK(bytes_left >= 0);
2389
2390#ifdef DEBUG
2391 for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2392 reinterpret_cast<Object**>(new_node->address())[i] =
2393 Smi::FromInt(kCodeZapValue);
2394 }
2395#endif
2396
2397 // The old-space-step might have finished sweeping and restarted marking.
2398 // Verify that it did not turn the page of the new node into an evacuation
2399 // candidate.
2400 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2401
2402 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2403
2404 // Memory in the linear allocation area is counted as allocated. We may free
2405 // a little of this again immediately - see below.
2406 owner_->Allocate(new_node_size);
2407
2408 if (owner_->heap()->inline_allocation_disabled()) {
2409 // Keep the linear allocation area empty if requested to do so, just
2410 // return area back to the free list instead.
2411 owner_->Free(new_node->address() + size_in_bytes, bytes_left);
2412 DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
2413 } else if (bytes_left > kThreshold &&
2414 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002415 FLAG_incremental_marking) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002416 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2417 // We don't want to give too large linear areas to the allocator while
2418 // incremental marking is going on, because we won't check again whether
2419 // we want to do another increment until the linear area is used up.
2420 owner_->Free(new_node->address() + size_in_bytes + linear_size,
2421 new_node_size - size_in_bytes - linear_size);
2422 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2423 new_node->address() + size_in_bytes + linear_size);
2424 } else if (bytes_left > 0) {
2425 // Normally we give the rest of the node to the allocator as its new
2426 // linear allocation area.
2427 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2428 new_node->address() + new_node_size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002429 }
2430
2431 return new_node;
2432}
2433
Ben Murdochda12d292016-06-02 14:46:10 +01002434intptr_t FreeList::EvictFreeListItems(Page* page) {
2435 intptr_t sum = 0;
2436 page->ForAllFreeListCategories(
2437 [this, &sum, page](FreeListCategory* category) {
2438 DCHECK_EQ(this, category->owner());
2439 sum += category->available();
2440 RemoveCategory(category);
2441 category->Invalidate();
2442 });
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002443 return sum;
2444}
2445
Ben Murdochda12d292016-06-02 14:46:10 +01002446bool FreeList::ContainsPageFreeListItems(Page* page) {
2447 bool contained = false;
2448 page->ForAllFreeListCategories(
2449 [this, &contained](FreeListCategory* category) {
2450 if (category->owner() == this && category->is_linked()) {
2451 contained = true;
2452 }
2453 });
2454 return contained;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002455}
2456
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002457void FreeList::RepairLists(Heap* heap) {
Ben Murdochda12d292016-06-02 14:46:10 +01002458 ForAllFreeListCategories(
2459 [heap](FreeListCategory* category) { category->RepairFreeList(heap); });
2460}
2461
2462bool FreeList::AddCategory(FreeListCategory* category) {
2463 FreeListCategoryType type = category->type_;
2464 FreeListCategory* top = categories_[type];
2465
2466 if (category->is_empty()) return false;
2467 if (top == category) return false;
2468
2469 // Common double-linked list insertion.
2470 if (top != nullptr) {
2471 top->set_prev(category);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002472 }
Ben Murdochda12d292016-06-02 14:46:10 +01002473 category->set_next(top);
2474 categories_[type] = category;
2475 return true;
2476}
2477
2478void FreeList::RemoveCategory(FreeListCategory* category) {
2479 FreeListCategoryType type = category->type_;
2480 FreeListCategory* top = categories_[type];
2481
2482 // Common double-linked list removal.
2483 if (top == category) {
2484 categories_[type] = category->next();
2485 }
2486 if (category->prev() != nullptr) {
2487 category->prev()->set_next(category->next());
2488 }
2489 if (category->next() != nullptr) {
2490 category->next()->set_prev(category->prev());
2491 }
2492 category->set_next(nullptr);
2493 category->set_prev(nullptr);
2494}
2495
2496void FreeList::PrintCategories(FreeListCategoryType type) {
2497 FreeListCategoryIterator it(this, type);
2498 PrintF("FreeList[%p, top=%p, %d] ", this, categories_[type], type);
2499 while (it.HasNext()) {
2500 FreeListCategory* current = it.Next();
2501 PrintF("%p -> ", current);
2502 }
2503 PrintF("null\n");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002504}
2505
2506
2507#ifdef DEBUG
2508intptr_t FreeListCategory::SumFreeList() {
2509 intptr_t sum = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002510 FreeSpace* cur = top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002511 while (cur != NULL) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002512 DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex));
2513 sum += cur->nobarrier_size();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002514 cur = cur->next();
2515 }
2516 return sum;
2517}
2518
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002519int FreeListCategory::FreeListLength() {
2520 int length = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002521 FreeSpace* cur = top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002522 while (cur != NULL) {
2523 length++;
2524 cur = cur->next();
2525 if (length == kVeryLongFreeList) return length;
2526 }
2527 return length;
2528}
2529
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002530bool FreeList::IsVeryLong() {
Ben Murdochda12d292016-06-02 14:46:10 +01002531 int len = 0;
Ben Murdoch097c5b22016-05-18 11:27:45 +01002532 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
Ben Murdochda12d292016-06-02 14:46:10 +01002533 FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
2534 while (it.HasNext()) {
2535 len += it.Next()->FreeListLength();
2536 if (len >= FreeListCategory::kVeryLongFreeList) return true;
Ben Murdoch097c5b22016-05-18 11:27:45 +01002537 }
2538 }
2539 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002540}
2541
2542
2543// This can take a very long time because it is linear in the number of entries
2544// on the free list, so it should not be called if FreeListLength returns
2545// kVeryLongFreeList.
2546intptr_t FreeList::SumFreeLists() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002547 intptr_t sum = 0;
Ben Murdochda12d292016-06-02 14:46:10 +01002548 ForAllFreeListCategories(
2549 [&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002550 return sum;
2551}
2552#endif
2553
2554
2555// -----------------------------------------------------------------------------
2556// OldSpace implementation
2557
2558void PagedSpace::PrepareForMarkCompact() {
2559 // We don't have a linear allocation area while sweeping. It will be restored
2560 // on the first allocation after the sweep.
2561 EmptyAllocationInfo();
2562
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002563 // Clear the free list before a full GC---it will be rebuilt afterward.
2564 free_list_.Reset();
2565}
2566
2567
2568intptr_t PagedSpace::SizeOfObjects() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002569 const intptr_t size = Size() - (limit() - top());
2570 CHECK_GE(limit(), top());
2571 CHECK_GE(size, 0);
2572 USE(size);
2573 return size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002574}
2575
2576
2577// After we have booted, we have created a map which represents free space
2578// on the heap. If there was already a free list then the elements on it
2579// were created with the wrong FreeSpaceMap (normally NULL), so we need to
2580// fix them.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002581void PagedSpace::RepairFreeListsAfterDeserialization() {
2582 free_list_.RepairLists(heap());
2583 // Each page may have a small free space that is not tracked by a free list.
2584 // Update the maps for those free space objects.
2585 PageIterator iterator(this);
2586 while (iterator.has_next()) {
2587 Page* page = iterator.next();
Ben Murdoch097c5b22016-05-18 11:27:45 +01002588 int size = static_cast<int>(page->wasted_memory());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002589 if (size == 0) continue;
2590 Address address = page->OffsetToAddress(Page::kPageSize - size);
Ben Murdochda12d292016-06-02 14:46:10 +01002591 heap()->CreateFillerObjectAt(address, size, ClearRecordedSlots::kNo);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002592 }
2593}
2594
2595
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002596void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() {
2597 if (allocation_info_.top() >= allocation_info_.limit()) return;
2598
2599 if (!Page::FromAllocationTop(allocation_info_.top())->CanAllocate()) {
2600 // Create filler object to keep page iterable if it was iterable.
2601 int remaining =
2602 static_cast<int>(allocation_info_.limit() - allocation_info_.top());
Ben Murdochda12d292016-06-02 14:46:10 +01002603 heap()->CreateFillerObjectAt(allocation_info_.top(), remaining,
2604 ClearRecordedSlots::kNo);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002605 allocation_info_.Reset(nullptr, nullptr);
2606 }
2607}
2608
2609
2610HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002611 MarkCompactCollector* collector = heap()->mark_compact_collector();
2612 if (collector->sweeping_in_progress()) {
2613 // Wait for the sweeper threads here and complete the sweeping phase.
2614 collector->EnsureSweepingCompleted();
2615
2616 // After waiting for the sweeper threads, there may be new free-list
2617 // entries.
2618 return free_list_.Allocate(size_in_bytes);
2619 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002620 return nullptr;
2621}
2622
2623
2624HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
2625 MarkCompactCollector* collector = heap()->mark_compact_collector();
2626 if (collector->sweeping_in_progress()) {
2627 collector->SweepAndRefill(this);
2628 return free_list_.Allocate(size_in_bytes);
2629 }
2630 return nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002631}
2632
2633
2634HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002635 const int kMaxPagesToSweep = 1;
2636
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002637 // Allocation in this space has failed.
2638
2639 MarkCompactCollector* collector = heap()->mark_compact_collector();
2640 // Sweeping is still in progress.
2641 if (collector->sweeping_in_progress()) {
2642 // First try to refill the free-list, concurrent sweeper threads
2643 // may have freed some objects in the meantime.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002644 RefillFreeList();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002645
2646 // Retry the free list allocation.
2647 HeapObject* object = free_list_.Allocate(size_in_bytes);
2648 if (object != NULL) return object;
2649
2650 // If sweeping is still in progress try to sweep pages on the main thread.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002651 int max_freed = collector->SweepInParallel(heap()->paged_space(identity()),
2652 size_in_bytes, kMaxPagesToSweep);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002653 RefillFreeList();
Ben Murdoch097c5b22016-05-18 11:27:45 +01002654 if (max_freed >= size_in_bytes) {
2655 object = free_list_.Allocate(size_in_bytes);
2656 if (object != nullptr) return object;
2657 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002658 }
2659
2660 // Free list allocation failed and there is no next page. Fail if we have
2661 // hit the old generation size limit that should cause a garbage
2662 // collection.
2663 if (!heap()->always_allocate() &&
2664 heap()->OldGenerationAllocationLimitReached()) {
2665 // If sweeper threads are active, wait for them at that point and steal
2666 // elements form their free-lists.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002667 HeapObject* object = SweepAndRetryAllocation(size_in_bytes);
2668 return object;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002669 }
2670
2671 // Try to expand the space and allocate in the new next page.
2672 if (Expand()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002673 DCHECK((CountTotalPages() > 1) ||
2674 (size_in_bytes <= free_list_.Available()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002675 return free_list_.Allocate(size_in_bytes);
2676 }
2677
2678 // If sweeper threads are active, wait for them at that point and steal
2679 // elements form their free-lists. Allocation may still fail their which
2680 // would indicate that there is not enough memory for the given allocation.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002681 return SweepAndRetryAllocation(size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002682}
2683
2684
2685#ifdef DEBUG
2686void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
2687 CommentStatistic* comments_statistics =
2688 isolate->paged_space_comments_statistics();
2689 ReportCodeKindStatistics(isolate->code_kind_statistics());
2690 PrintF(
2691 "Code comment statistics (\" [ comment-txt : size/ "
2692 "count (average)\"):\n");
2693 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2694 const CommentStatistic& cs = comments_statistics[i];
2695 if (cs.size > 0) {
2696 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2697 cs.size / cs.count);
2698 }
2699 }
2700 PrintF("\n");
2701}
2702
2703
2704void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
2705 CommentStatistic* comments_statistics =
2706 isolate->paged_space_comments_statistics();
2707 ClearCodeKindStatistics(isolate->code_kind_statistics());
2708 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2709 comments_statistics[i].Clear();
2710 }
2711 comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2712 comments_statistics[CommentStatistic::kMaxComments].size = 0;
2713 comments_statistics[CommentStatistic::kMaxComments].count = 0;
2714}
2715
2716
2717// Adds comment to 'comment_statistics' table. Performance OK as long as
2718// 'kMaxComments' is small
2719static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2720 CommentStatistic* comments_statistics =
2721 isolate->paged_space_comments_statistics();
2722 // Do not count empty comments
2723 if (delta <= 0) return;
2724 CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2725 // Search for a free or matching entry in 'comments_statistics': 'cs'
2726 // points to result.
2727 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2728 if (comments_statistics[i].comment == NULL) {
2729 cs = &comments_statistics[i];
2730 cs->comment = comment;
2731 break;
2732 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2733 cs = &comments_statistics[i];
2734 break;
2735 }
2736 }
2737 // Update entry for 'comment'
2738 cs->size += delta;
2739 cs->count += 1;
2740}
2741
2742
2743// Call for each nested comment start (start marked with '[ xxx', end marked
2744// with ']'. RelocIterator 'it' must point to a comment reloc info.
2745static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2746 DCHECK(!it->done());
2747 DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT);
2748 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2749 if (tmp[0] != '[') {
2750 // Not a nested comment; skip
2751 return;
2752 }
2753
2754 // Search for end of nested comment or a new nested comment
2755 const char* const comment_txt =
2756 reinterpret_cast<const char*>(it->rinfo()->data());
2757 const byte* prev_pc = it->rinfo()->pc();
2758 int flat_delta = 0;
2759 it->next();
2760 while (true) {
2761 // All nested comments must be terminated properly, and therefore exit
2762 // from loop.
2763 DCHECK(!it->done());
2764 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2765 const char* const txt =
2766 reinterpret_cast<const char*>(it->rinfo()->data());
2767 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2768 if (txt[0] == ']') break; // End of nested comment
2769 // A new comment
2770 CollectCommentStatistics(isolate, it);
2771 // Skip code that was covered with previous comment
2772 prev_pc = it->rinfo()->pc();
2773 }
2774 it->next();
2775 }
2776 EnterComment(isolate, comment_txt, flat_delta);
2777}
2778
2779
2780// Collects code size statistics:
2781// - by code kind
2782// - by code comment
2783void PagedSpace::CollectCodeStatistics() {
2784 Isolate* isolate = heap()->isolate();
2785 HeapObjectIterator obj_it(this);
2786 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
Ben Murdochda12d292016-06-02 14:46:10 +01002787 if (obj->IsAbstractCode()) {
2788 AbstractCode* code = AbstractCode::cast(obj);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002789 isolate->code_kind_statistics()[code->kind()] += code->Size();
Ben Murdochda12d292016-06-02 14:46:10 +01002790 }
2791 if (obj->IsCode()) {
2792 // TODO(mythria): Also enable this for BytecodeArray when it supports
2793 // RelocInformation.
2794 Code* code = Code::cast(obj);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002795 RelocIterator it(code);
2796 int delta = 0;
2797 const byte* prev_pc = code->instruction_start();
2798 while (!it.done()) {
2799 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2800 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2801 CollectCommentStatistics(isolate, &it);
2802 prev_pc = it.rinfo()->pc();
2803 }
2804 it.next();
2805 }
2806
2807 DCHECK(code->instruction_start() <= prev_pc &&
2808 prev_pc <= code->instruction_end());
2809 delta += static_cast<int>(code->instruction_end() - prev_pc);
2810 EnterComment(isolate, "NoComment", delta);
2811 }
2812 }
2813}
2814
2815
2816void PagedSpace::ReportStatistics() {
2817 int pct = static_cast<int>(Available() * 100 / Capacity());
2818 PrintF(" capacity: %" V8_PTR_PREFIX
2819 "d"
2820 ", waste: %" V8_PTR_PREFIX
2821 "d"
2822 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2823 Capacity(), Waste(), Available(), pct);
2824
2825 if (heap()->mark_compact_collector()->sweeping_in_progress()) {
2826 heap()->mark_compact_collector()->EnsureSweepingCompleted();
2827 }
2828 ClearHistograms(heap()->isolate());
2829 HeapObjectIterator obj_it(this);
2830 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2831 CollectHistogramInfo(obj);
2832 ReportHistogram(heap()->isolate(), true);
2833}
2834#endif
2835
2836
2837// -----------------------------------------------------------------------------
2838// MapSpace implementation
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002839
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002840#ifdef VERIFY_HEAP
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002841void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002842#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002843
2844
2845// -----------------------------------------------------------------------------
2846// LargeObjectIterator
2847
2848LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2849 current_ = space->first_page_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002850}
2851
2852
2853HeapObject* LargeObjectIterator::Next() {
2854 if (current_ == NULL) return NULL;
2855
2856 HeapObject* object = current_->GetObject();
2857 current_ = current_->next_page();
2858 return object;
2859}
2860
2861
2862// -----------------------------------------------------------------------------
2863// LargeObjectSpace
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002864
2865
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002866LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002867 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002868 first_page_(NULL),
2869 size_(0),
2870 page_count_(0),
2871 objects_size_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002872 chunk_map_(HashMap::PointersMatch, 1024) {}
2873
2874
2875LargeObjectSpace::~LargeObjectSpace() {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002876
2877
2878bool LargeObjectSpace::SetUp() {
2879 first_page_ = NULL;
2880 size_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002881 page_count_ = 0;
2882 objects_size_ = 0;
2883 chunk_map_.Clear();
2884 return true;
2885}
2886
2887
2888void LargeObjectSpace::TearDown() {
2889 while (first_page_ != NULL) {
2890 LargePage* page = first_page_;
2891 first_page_ = first_page_->next_page();
2892 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2893
2894 ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2895 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2896 space, kAllocationActionFree, page->size());
2897 heap()->isolate()->memory_allocator()->Free(page);
2898 }
2899 SetUp();
2900}
2901
2902
2903AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
2904 Executability executable) {
2905 // Check if we want to force a GC before growing the old space further.
2906 // If so, fail the allocation.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002907 if (!heap()->CanExpandOldGeneration(object_size)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002908 return AllocationResult::Retry(identity());
2909 }
2910
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002911 LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage(
2912 object_size, this, executable);
2913 if (page == NULL) return AllocationResult::Retry(identity());
2914 DCHECK(page->area_size() >= object_size);
2915
2916 size_ += static_cast<int>(page->size());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002917 AccountCommitted(static_cast<intptr_t>(page->size()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002918 objects_size_ += object_size;
2919 page_count_++;
2920 page->set_next_page(first_page_);
2921 first_page_ = page;
2922
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002923 // Register all MemoryChunk::kAlignment-aligned chunks covered by
2924 // this large page in the chunk map.
2925 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2926 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2927 for (uintptr_t key = base; key <= limit; key++) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002928 HashMap::Entry* entry = chunk_map_.LookupOrInsert(
2929 reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002930 DCHECK(entry != NULL);
2931 entry->value = page;
2932 }
2933
2934 HeapObject* object = page->GetObject();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002935 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size);
2936
2937 if (Heap::ShouldZapGarbage()) {
2938 // Make the object consistent so the heap can be verified in OldSpaceStep.
2939 // We only need to do this in debug builds or if verify_heap is on.
2940 reinterpret_cast<Object**>(object->address())[0] =
2941 heap()->fixed_array_map();
2942 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
2943 }
2944
2945 heap()->incremental_marking()->OldSpaceStep(object_size);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002946 AllocationStep(object->address(), object_size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002947 return object;
2948}
2949
2950
2951size_t LargeObjectSpace::CommittedPhysicalMemory() {
2952 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
2953 size_t size = 0;
2954 LargePage* current = first_page_;
2955 while (current != NULL) {
2956 size += current->CommittedPhysicalMemory();
2957 current = current->next_page();
2958 }
2959 return size;
2960}
2961
2962
2963// GC support
2964Object* LargeObjectSpace::FindObject(Address a) {
2965 LargePage* page = FindPage(a);
2966 if (page != NULL) {
2967 return page->GetObject();
2968 }
2969 return Smi::FromInt(0); // Signaling not found.
2970}
2971
2972
2973LargePage* LargeObjectSpace::FindPage(Address a) {
2974 uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
2975 HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002976 static_cast<uint32_t>(key));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002977 if (e != NULL) {
2978 DCHECK(e->value != NULL);
2979 LargePage* page = reinterpret_cast<LargePage*>(e->value);
2980 DCHECK(page->is_valid());
2981 if (page->Contains(a)) {
2982 return page;
2983 }
2984 }
2985 return NULL;
2986}
2987
2988
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002989void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
2990 LargePage* current = first_page_;
2991 while (current != NULL) {
2992 HeapObject* object = current->GetObject();
2993 MarkBit mark_bit = Marking::MarkBitFrom(object);
2994 DCHECK(Marking::IsBlack(mark_bit));
2995 Marking::BlackToWhite(mark_bit);
2996 Page::FromAddress(object->address())->ResetProgressBar();
2997 Page::FromAddress(object->address())->ResetLiveBytes();
2998 current = current->next_page();
2999 }
3000}
3001
3002
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003003void LargeObjectSpace::FreeUnmarkedObjects() {
3004 LargePage* previous = NULL;
3005 LargePage* current = first_page_;
3006 while (current != NULL) {
3007 HeapObject* object = current->GetObject();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003008 MarkBit mark_bit = Marking::MarkBitFrom(object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003009 DCHECK(!Marking::IsGrey(mark_bit));
3010 if (Marking::IsBlack(mark_bit)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003011 previous = current;
3012 current = current->next_page();
3013 } else {
3014 LargePage* page = current;
3015 // Cut the chunk out from the chunk list.
3016 current = current->next_page();
3017 if (previous == NULL) {
3018 first_page_ = current;
3019 } else {
3020 previous->set_next_page(current);
3021 }
3022
3023 // Free the chunk.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003024 size_ -= static_cast<int>(page->size());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003025 AccountUncommitted(static_cast<intptr_t>(page->size()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003026 objects_size_ -= object->Size();
3027 page_count_--;
3028
3029 // Remove entries belonging to this page.
3030 // Use variable alignment to help pass length check (<= 80 characters)
3031 // of single line in tools/presubmit.py.
3032 const intptr_t alignment = MemoryChunk::kAlignment;
3033 uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment;
3034 uintptr_t limit = base + (page->size() - 1) / alignment;
3035 for (uintptr_t key = base; key <= limit; key++) {
3036 chunk_map_.Remove(reinterpret_cast<void*>(key),
3037 static_cast<uint32_t>(key));
3038 }
3039
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003040 heap()->QueueMemoryChunkForFree(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003041 }
3042 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003043}
3044
3045
3046bool LargeObjectSpace::Contains(HeapObject* object) {
3047 Address address = object->address();
3048 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3049
3050 bool owned = (chunk->owner() == this);
3051
3052 SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
3053
3054 return owned;
3055}
3056
3057
3058#ifdef VERIFY_HEAP
3059// We do not assume that the large object iterator works, because it depends
3060// on the invariants we are checking during verification.
3061void LargeObjectSpace::Verify() {
3062 for (LargePage* chunk = first_page_; chunk != NULL;
3063 chunk = chunk->next_page()) {
3064 // Each chunk contains an object that starts at the large object page's
3065 // object area start.
3066 HeapObject* object = chunk->GetObject();
3067 Page* page = Page::FromAddress(object->address());
3068 CHECK(object->address() == page->area_start());
3069
3070 // The first word should be a map, and we expect all map pointers to be
3071 // in map space.
3072 Map* map = object->map();
3073 CHECK(map->IsMap());
3074 CHECK(heap()->map_space()->Contains(map));
3075
3076 // We have only code, sequential strings, external strings
3077 // (sequential strings that have been morphed into external
3078 // strings), fixed arrays, byte arrays, and constant pool arrays in the
3079 // large object space.
Ben Murdochda12d292016-06-02 14:46:10 +01003080 CHECK(object->IsAbstractCode() || object->IsSeqString() ||
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003081 object->IsExternalString() || object->IsFixedArray() ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003082 object->IsFixedDoubleArray() || object->IsByteArray());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003083
3084 // The object itself should look OK.
3085 object->ObjectVerify();
3086
3087 // Byte arrays and strings don't have interior pointers.
Ben Murdochda12d292016-06-02 14:46:10 +01003088 if (object->IsAbstractCode()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003089 VerifyPointersVisitor code_visitor;
3090 object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
3091 } else if (object->IsFixedArray()) {
3092 FixedArray* array = FixedArray::cast(object);
3093 for (int j = 0; j < array->length(); j++) {
3094 Object* element = array->get(j);
3095 if (element->IsHeapObject()) {
3096 HeapObject* element_object = HeapObject::cast(element);
3097 CHECK(heap()->Contains(element_object));
3098 CHECK(element_object->map()->IsMap());
3099 }
3100 }
3101 }
3102 }
3103}
3104#endif
3105
3106
3107#ifdef DEBUG
3108void LargeObjectSpace::Print() {
3109 OFStream os(stdout);
3110 LargeObjectIterator it(this);
3111 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3112 obj->Print(os);
3113 }
3114}
3115
3116
3117void LargeObjectSpace::ReportStatistics() {
3118 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
3119 int num_objects = 0;
3120 ClearHistograms(heap()->isolate());
3121 LargeObjectIterator it(this);
3122 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3123 num_objects++;
3124 CollectHistogramInfo(obj);
3125 }
3126
3127 PrintF(
3128 " number of objects %d, "
3129 "size of objects %" V8_PTR_PREFIX "d\n",
3130 num_objects, objects_size_);
3131 if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
3132}
3133
3134
3135void LargeObjectSpace::CollectCodeStatistics() {
3136 Isolate* isolate = heap()->isolate();
3137 LargeObjectIterator obj_it(this);
3138 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
Ben Murdochda12d292016-06-02 14:46:10 +01003139 if (obj->IsAbstractCode()) {
3140 AbstractCode* code = AbstractCode::cast(obj);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003141 isolate->code_kind_statistics()[code->kind()] += code->Size();
3142 }
3143 }
3144}
3145
3146
3147void Page::Print() {
3148 // Make a best-effort to print the objects in the page.
3149 PrintF("Page@%p in %s\n", this->address(),
3150 AllocationSpaceName(this->owner()->identity()));
3151 printf(" --------------------------------------\n");
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003152 HeapObjectIterator objects(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003153 unsigned mark_size = 0;
3154 for (HeapObject* object = objects.Next(); object != NULL;
3155 object = objects.Next()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003156 bool is_marked = Marking::IsBlackOrGrey(Marking::MarkBitFrom(object));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003157 PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
3158 if (is_marked) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003159 mark_size += object->Size();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003160 }
3161 object->ShortPrint();
3162 PrintF("\n");
3163 }
3164 printf(" --------------------------------------\n");
3165 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3166}
3167
3168#endif // DEBUG
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003169} // namespace internal
3170} // namespace v8