blob: 90d252abb592a46a051864883c30c6bd44b436b3 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2011 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005#include "src/heap/spaces.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006
7#include "src/base/bits.h"
8#include "src/base/platform/platform.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00009#include "src/full-codegen/full-codegen.h"
10#include "src/heap/slots-buffer.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000011#include "src/macro-assembler.h"
12#include "src/msan.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000013#include "src/snapshot/snapshot.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000014
15namespace v8 {
16namespace internal {
17
18
19// ----------------------------------------------------------------------------
20// HeapObjectIterator
21
22HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
23 // You can't actually iterate over the anchor page. It is not a real page,
24 // just an anchor for the double linked page list. Initialize as if we have
25 // reached the end of the anchor page, then the first iteration will move on
26 // to the first page.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000027 Initialize(space, NULL, NULL, kAllPagesInSpace);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000028}
29
30
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000031HeapObjectIterator::HeapObjectIterator(Page* page) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000032 Space* owner = page->owner();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000033 DCHECK(owner == page->heap()->old_space() ||
Ben Murdochb8a8cc12014-11-26 15:28:44 +000034 owner == page->heap()->map_space() ||
Ben Murdochb8a8cc12014-11-26 15:28:44 +000035 owner == page->heap()->code_space());
36 Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000037 page->area_end(), kOnePageOnly);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000038 DCHECK(page->WasSwept() || page->SweepingCompleted());
39}
40
41
42void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000043 HeapObjectIterator::PageMode mode) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000044 space_ = space;
45 cur_addr_ = cur;
46 cur_end_ = end;
47 page_mode_ = mode;
Ben Murdochb8a8cc12014-11-26 15:28:44 +000048}
49
50
51// We have hit the end of the page and should advance to the next block of
52// objects. This happens at the end of the page.
53bool HeapObjectIterator::AdvanceToNextPage() {
54 DCHECK(cur_addr_ == cur_end_);
55 if (page_mode_ == kOnePageOnly) return false;
56 Page* cur_page;
57 if (cur_addr_ == NULL) {
58 cur_page = space_->anchor();
59 } else {
60 cur_page = Page::FromAddress(cur_addr_ - 1);
61 DCHECK(cur_addr_ == cur_page->area_end());
62 }
63 cur_page = cur_page->next_page();
64 if (cur_page == space_->anchor()) return false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000065 cur_page->heap()->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted(
66 cur_page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000067 cur_addr_ = cur_page->area_start();
68 cur_end_ = cur_page->area_end();
69 DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted());
70 return true;
71}
72
73
74// -----------------------------------------------------------------------------
75// CodeRange
76
77
78CodeRange::CodeRange(Isolate* isolate)
79 : isolate_(isolate),
80 code_range_(NULL),
81 free_list_(0),
82 allocation_list_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000083 current_allocation_block_index_(0) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +000084
85
86bool CodeRange::SetUp(size_t requested) {
87 DCHECK(code_range_ == NULL);
88
89 if (requested == 0) {
90 // When a target requires the code range feature, we put all code objects
91 // in a kMaximalCodeRangeSize range of virtual address space, so that
92 // they can call each other with near calls.
93 if (kRequiresCodeRange) {
94 requested = kMaximalCodeRangeSize;
95 } else {
96 return true;
97 }
98 }
99
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400100 if (requested <= kMinimumCodeRangeSize) {
101 requested = kMinimumCodeRangeSize;
102 }
103
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000104 DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000105#ifdef V8_TARGET_ARCH_MIPS64
106 // To use pseudo-relative jumps such as j/jal instructions which have 28-bit
107 // encoded immediate, the addresses have to be in range of 256Mb aligned
108 // region.
109 code_range_ = new base::VirtualMemory(requested, kMaximalCodeRangeSize);
110#else
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000111 code_range_ = new base::VirtualMemory(requested);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000112#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000113 CHECK(code_range_ != NULL);
114 if (!code_range_->IsReserved()) {
115 delete code_range_;
116 code_range_ = NULL;
117 return false;
118 }
119
120 // We are sure that we have mapped a block of requested addresses.
121 DCHECK(code_range_->size() == requested);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000122 Address base = reinterpret_cast<Address>(code_range_->address());
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400123
124 // On some platforms, specifically Win64, we need to reserve some pages at
125 // the beginning of an executable space.
126 if (kReservedCodeRangePages) {
127 if (!code_range_->Commit(
128 base, kReservedCodeRangePages * base::OS::CommitPageSize(), true)) {
129 delete code_range_;
130 code_range_ = NULL;
131 return false;
132 }
133 base += kReservedCodeRangePages * base::OS::CommitPageSize();
134 }
135 Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
136 size_t size = code_range_->size() - (aligned_base - base) -
137 kReservedCodeRangePages * base::OS::CommitPageSize();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000138 allocation_list_.Add(FreeBlock(aligned_base, size));
139 current_allocation_block_index_ = 0;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400140
141 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000142 return true;
143}
144
145
146int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
147 const FreeBlock* right) {
148 // The entire point of CodeRange is that the difference between two
149 // addresses in the range can be represented as a signed 32-bit int,
150 // so the cast is semantically correct.
151 return static_cast<int>(left->start - right->start);
152}
153
154
155bool CodeRange::GetNextAllocationBlock(size_t requested) {
156 for (current_allocation_block_index_++;
157 current_allocation_block_index_ < allocation_list_.length();
158 current_allocation_block_index_++) {
159 if (requested <= allocation_list_[current_allocation_block_index_].size) {
160 return true; // Found a large enough allocation block.
161 }
162 }
163
164 // Sort and merge the free blocks on the free list and the allocation list.
165 free_list_.AddAll(allocation_list_);
166 allocation_list_.Clear();
167 free_list_.Sort(&CompareFreeBlockAddress);
168 for (int i = 0; i < free_list_.length();) {
169 FreeBlock merged = free_list_[i];
170 i++;
171 // Add adjacent free blocks to the current merged block.
172 while (i < free_list_.length() &&
173 free_list_[i].start == merged.start + merged.size) {
174 merged.size += free_list_[i].size;
175 i++;
176 }
177 if (merged.size > 0) {
178 allocation_list_.Add(merged);
179 }
180 }
181 free_list_.Clear();
182
183 for (current_allocation_block_index_ = 0;
184 current_allocation_block_index_ < allocation_list_.length();
185 current_allocation_block_index_++) {
186 if (requested <= allocation_list_[current_allocation_block_index_].size) {
187 return true; // Found a large enough allocation block.
188 }
189 }
190 current_allocation_block_index_ = 0;
191 // Code range is full or too fragmented.
192 return false;
193}
194
195
196Address CodeRange::AllocateRawMemory(const size_t requested_size,
197 const size_t commit_size,
198 size_t* allocated) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000199 // request_size includes guards while committed_size does not. Make sure
200 // callers know about the invariant.
201 CHECK_LE(commit_size,
202 requested_size - 2 * MemoryAllocator::CodePageGuardSize());
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400203 FreeBlock current;
204 if (!ReserveBlock(requested_size, &current)) {
205 *allocated = 0;
206 return NULL;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000207 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400208 *allocated = current.size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000209 DCHECK(*allocated <= current.size);
210 DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
211 if (!isolate_->memory_allocator()->CommitExecutableMemory(
212 code_range_, current.start, commit_size, *allocated)) {
213 *allocated = 0;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400214 ReleaseBlock(&current);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000215 return NULL;
216 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000217 return current.start;
218}
219
220
221bool CodeRange::CommitRawMemory(Address start, size_t length) {
222 return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
223}
224
225
226bool CodeRange::UncommitRawMemory(Address start, size_t length) {
227 return code_range_->Uncommit(start, length);
228}
229
230
231void CodeRange::FreeRawMemory(Address address, size_t length) {
232 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000233 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000234 free_list_.Add(FreeBlock(address, length));
235 code_range_->Uncommit(address, length);
236}
237
238
239void CodeRange::TearDown() {
240 delete code_range_; // Frees all memory in the virtual memory range.
241 code_range_ = NULL;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000242 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000243 free_list_.Free();
244 allocation_list_.Free();
245}
246
247
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400248bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000249 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400250 DCHECK(allocation_list_.length() == 0 ||
251 current_allocation_block_index_ < allocation_list_.length());
252 if (allocation_list_.length() == 0 ||
253 requested_size > allocation_list_[current_allocation_block_index_].size) {
254 // Find an allocation block large enough.
255 if (!GetNextAllocationBlock(requested_size)) return false;
256 }
257 // Commit the requested memory at the start of the current allocation block.
258 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
259 *block = allocation_list_[current_allocation_block_index_];
260 // Don't leave a small free block, useless for a large object or chunk.
261 if (aligned_requested < (block->size - Page::kPageSize)) {
262 block->size = aligned_requested;
263 }
264 DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
265 allocation_list_[current_allocation_block_index_].start += block->size;
266 allocation_list_[current_allocation_block_index_].size -= block->size;
267 return true;
268}
269
270
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000271void CodeRange::ReleaseBlock(const FreeBlock* block) {
272 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
273 free_list_.Add(*block);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400274}
275
276
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000277// -----------------------------------------------------------------------------
278// MemoryAllocator
279//
280
281MemoryAllocator::MemoryAllocator(Isolate* isolate)
282 : isolate_(isolate),
283 capacity_(0),
284 capacity_executable_(0),
285 size_(0),
286 size_executable_(0),
287 lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
288 highest_ever_allocated_(reinterpret_cast<void*>(0)) {}
289
290
291bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
292 capacity_ = RoundUp(capacity, Page::kPageSize);
293 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
294 DCHECK_GE(capacity_, capacity_executable_);
295
296 size_ = 0;
297 size_executable_ = 0;
298
299 return true;
300}
301
302
303void MemoryAllocator::TearDown() {
304 // Check that spaces were torn down before MemoryAllocator.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000305 DCHECK(size_.Value() == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000306 // TODO(gc) this will be true again when we fix FreeMemory.
307 // DCHECK(size_executable_ == 0);
308 capacity_ = 0;
309 capacity_executable_ = 0;
310}
311
312
313bool MemoryAllocator::CommitMemory(Address base, size_t size,
314 Executability executable) {
315 if (!base::VirtualMemory::CommitRegion(base, size,
316 executable == EXECUTABLE)) {
317 return false;
318 }
319 UpdateAllocatedSpaceLimits(base, base + size);
320 return true;
321}
322
323
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000324void MemoryAllocator::FreeNewSpaceMemory(Address addr,
325 base::VirtualMemory* reservation,
326 Executability executable) {
327 LOG(isolate_, DeleteEvent("NewSpace", addr));
328
329 DCHECK(reservation->IsReserved());
330 const intptr_t size = static_cast<intptr_t>(reservation->size());
331 DCHECK(size_.Value() >= size);
332 size_.Increment(-size);
333 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
334 FreeMemory(reservation, NOT_EXECUTABLE);
335}
336
337
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000338void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
339 Executability executable) {
340 // TODO(gc) make code_range part of memory allocator?
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000341 // Code which is part of the code-range does not have its own VirtualMemory.
342 DCHECK(isolate_->code_range() == NULL ||
343 !isolate_->code_range()->contains(
344 static_cast<Address>(reservation->address())));
345 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000346 !isolate_->code_range()->valid() ||
347 reservation->size() <= Page::kPageSize);
348
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000349 reservation->Release();
350}
351
352
353void MemoryAllocator::FreeMemory(Address base, size_t size,
354 Executability executable) {
355 // TODO(gc) make code_range part of memory allocator?
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000356 if (isolate_->code_range() != NULL &&
357 isolate_->code_range()->contains(static_cast<Address>(base))) {
358 DCHECK(executable == EXECUTABLE);
359 isolate_->code_range()->FreeRawMemory(base, size);
360 } else {
361 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
362 !isolate_->code_range()->valid());
363 bool result = base::VirtualMemory::ReleaseRegion(base, size);
364 USE(result);
365 DCHECK(result);
366 }
367}
368
369
370Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
371 base::VirtualMemory* controller) {
372 base::VirtualMemory reservation(size, alignment);
373
374 if (!reservation.IsReserved()) return NULL;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000375 size_.Increment(static_cast<intptr_t>(reservation.size()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000376 Address base =
377 RoundUp(static_cast<Address>(reservation.address()), alignment);
378 controller->TakeControl(&reservation);
379 return base;
380}
381
382
383Address MemoryAllocator::AllocateAlignedMemory(
384 size_t reserve_size, size_t commit_size, size_t alignment,
385 Executability executable, base::VirtualMemory* controller) {
386 DCHECK(commit_size <= reserve_size);
387 base::VirtualMemory reservation;
388 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
389 if (base == NULL) return NULL;
390
391 if (executable == EXECUTABLE) {
392 if (!CommitExecutableMemory(&reservation, base, commit_size,
393 reserve_size)) {
394 base = NULL;
395 }
396 } else {
397 if (reservation.Commit(base, commit_size, false)) {
398 UpdateAllocatedSpaceLimits(base, base + commit_size);
399 } else {
400 base = NULL;
401 }
402 }
403
404 if (base == NULL) {
405 // Failed to commit the body. Release the mapping and any partially
406 // commited regions inside it.
407 reservation.Release();
408 return NULL;
409 }
410
411 controller->TakeControl(&reservation);
412 return base;
413}
414
415
416void Page::InitializeAsAnchor(PagedSpace* owner) {
417 set_owner(owner);
418 set_prev_page(this);
419 set_next_page(this);
420}
421
422
423NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start,
424 SemiSpace* semi_space) {
425 Address area_start = start + NewSpacePage::kObjectStartOffset;
426 Address area_end = start + Page::kPageSize;
427
428 MemoryChunk* chunk =
429 MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start,
430 area_end, NOT_EXECUTABLE, semi_space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000431 chunk->initialize_scan_on_scavenge(true);
432 bool in_to_space = (semi_space->id() != kFromSpace);
433 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
434 : MemoryChunk::IN_FROM_SPACE);
435 DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
436 : MemoryChunk::IN_TO_SPACE));
437 NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
438 heap->incremental_marking()->SetNewSpacePageFlags(page);
439 return page;
440}
441
442
443void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
444 set_owner(semi_space);
445 set_next_chunk(this);
446 set_prev_chunk(this);
447 // Flags marks this invalid page as not being in new-space.
448 // All real new-space pages will be in new-space.
449 SetFlags(0, ~0);
450}
451
452
453MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
454 Address area_start, Address area_end,
455 Executability executable, Space* owner) {
456 MemoryChunk* chunk = FromAddress(base);
457
458 DCHECK(base == chunk->address());
459
460 chunk->heap_ = heap;
461 chunk->size_ = size;
462 chunk->area_start_ = area_start;
463 chunk->area_end_ = area_end;
464 chunk->flags_ = 0;
465 chunk->set_owner(owner);
466 chunk->InitializeReservedMemory();
467 chunk->slots_buffer_ = NULL;
468 chunk->skip_list_ = NULL;
469 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
470 chunk->progress_bar_ = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000471 chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
472 chunk->parallel_sweeping_state().SetValue(kSweepingDone);
473 chunk->parallel_compaction_state().SetValue(kCompactingDone);
474 chunk->mutex_ = NULL;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000475 chunk->available_in_small_free_list_ = 0;
476 chunk->available_in_medium_free_list_ = 0;
477 chunk->available_in_large_free_list_ = 0;
478 chunk->available_in_huge_free_list_ = 0;
479 chunk->non_available_small_blocks_ = 0;
480 chunk->ResetLiveBytes();
481 Bitmap::Clear(chunk);
482 chunk->initialize_scan_on_scavenge(false);
483 chunk->SetFlag(WAS_SWEPT);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000484 chunk->set_next_chunk(nullptr);
485 chunk->set_prev_chunk(nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000486
487 DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
488 DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
489
490 if (executable == EXECUTABLE) {
491 chunk->SetFlag(IS_EXECUTABLE);
492 }
493
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000494 return chunk;
495}
496
497
498// Commit MemoryChunk area to the requested size.
499bool MemoryChunk::CommitArea(size_t requested) {
500 size_t guard_size =
501 IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
502 size_t header_size = area_start() - address() - guard_size;
503 size_t commit_size =
504 RoundUp(header_size + requested, base::OS::CommitPageSize());
505 size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
506 base::OS::CommitPageSize());
507
508 if (commit_size > committed_size) {
509 // Commit size should be less or equal than the reserved size.
510 DCHECK(commit_size <= size() - 2 * guard_size);
511 // Append the committed area.
512 Address start = address() + committed_size + guard_size;
513 size_t length = commit_size - committed_size;
514 if (reservation_.IsReserved()) {
515 Executability executable =
516 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
517 if (!heap()->isolate()->memory_allocator()->CommitMemory(start, length,
518 executable)) {
519 return false;
520 }
521 } else {
522 CodeRange* code_range = heap_->isolate()->code_range();
523 DCHECK(code_range != NULL && code_range->valid() &&
524 IsFlagSet(IS_EXECUTABLE));
525 if (!code_range->CommitRawMemory(start, length)) return false;
526 }
527
528 if (Heap::ShouldZapGarbage()) {
529 heap_->isolate()->memory_allocator()->ZapBlock(start, length);
530 }
531 } else if (commit_size < committed_size) {
532 DCHECK(commit_size > 0);
533 // Shrink the committed area.
534 size_t length = committed_size - commit_size;
535 Address start = address() + committed_size + guard_size - length;
536 if (reservation_.IsReserved()) {
537 if (!reservation_.Uncommit(start, length)) return false;
538 } else {
539 CodeRange* code_range = heap_->isolate()->code_range();
540 DCHECK(code_range != NULL && code_range->valid() &&
541 IsFlagSet(IS_EXECUTABLE));
542 if (!code_range->UncommitRawMemory(start, length)) return false;
543 }
544 }
545
546 area_end_ = area_start_ + requested;
547 return true;
548}
549
550
551void MemoryChunk::InsertAfter(MemoryChunk* other) {
552 MemoryChunk* other_next = other->next_chunk();
553
554 set_next_chunk(other_next);
555 set_prev_chunk(other);
556 other_next->set_prev_chunk(this);
557 other->set_next_chunk(this);
558}
559
560
561void MemoryChunk::Unlink() {
562 MemoryChunk* next_element = next_chunk();
563 MemoryChunk* prev_element = prev_chunk();
564 next_element->set_prev_chunk(prev_element);
565 prev_element->set_next_chunk(next_element);
566 set_prev_chunk(NULL);
567 set_next_chunk(NULL);
568}
569
570
571MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
572 intptr_t commit_area_size,
573 Executability executable,
574 Space* owner) {
575 DCHECK(commit_area_size <= reserve_area_size);
576
577 size_t chunk_size;
578 Heap* heap = isolate_->heap();
579 Address base = NULL;
580 base::VirtualMemory reservation;
581 Address area_start = NULL;
582 Address area_end = NULL;
583
584 //
585 // MemoryChunk layout:
586 //
587 // Executable
588 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
589 // | Header |
590 // +----------------------------+<- base + CodePageGuardStartOffset
591 // | Guard |
592 // +----------------------------+<- area_start_
593 // | Area |
594 // +----------------------------+<- area_end_ (area_start + commit_area_size)
595 // | Committed but not used |
596 // +----------------------------+<- aligned at OS page boundary
597 // | Reserved but not committed |
598 // +----------------------------+<- aligned at OS page boundary
599 // | Guard |
600 // +----------------------------+<- base + chunk_size
601 //
602 // Non-executable
603 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
604 // | Header |
605 // +----------------------------+<- area_start_ (base + kObjectStartOffset)
606 // | Area |
607 // +----------------------------+<- area_end_ (area_start + commit_area_size)
608 // | Committed but not used |
609 // +----------------------------+<- aligned at OS page boundary
610 // | Reserved but not committed |
611 // +----------------------------+<- base + chunk_size
612 //
613
614 if (executable == EXECUTABLE) {
615 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
616 base::OS::CommitPageSize()) +
617 CodePageGuardSize();
618
619 // Check executable memory limit.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000620 if ((size_executable_.Value() + static_cast<intptr_t>(chunk_size)) >
621 capacity_executable_) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000622 LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
623 "V8 Executable Allocation capacity exceeded"));
624 return NULL;
625 }
626
627 // Size of header (not executable) plus area (executable).
628 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
629 base::OS::CommitPageSize());
630 // Allocate executable memory either from code range or from the
631 // OS.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000632#ifdef V8_TARGET_ARCH_MIPS64
633 // Use code range only for large object space on mips64 to keep address
634 // range within 256-MB memory region.
635 if (isolate_->code_range() != NULL && isolate_->code_range()->valid() &&
636 reserve_area_size > CodePageAreaSize()) {
637#else
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000638 if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000639#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000640 base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size,
641 &chunk_size);
642 DCHECK(
643 IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
644 if (base == NULL) return NULL;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000645 size_.Increment(static_cast<intptr_t>(chunk_size));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000646 // Update executable memory size.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000647 size_executable_.Increment(static_cast<intptr_t>(chunk_size));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000648 } else {
649 base = AllocateAlignedMemory(chunk_size, commit_size,
650 MemoryChunk::kAlignment, executable,
651 &reservation);
652 if (base == NULL) return NULL;
653 // Update executable memory size.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000654 size_executable_.Increment(static_cast<intptr_t>(reservation.size()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000655 }
656
657 if (Heap::ShouldZapGarbage()) {
658 ZapBlock(base, CodePageGuardStartOffset());
659 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
660 }
661
662 area_start = base + CodePageAreaStartOffset();
663 area_end = area_start + commit_area_size;
664 } else {
665 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
666 base::OS::CommitPageSize());
667 size_t commit_size =
668 RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
669 base::OS::CommitPageSize());
670 base =
671 AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
672 executable, &reservation);
673
674 if (base == NULL) return NULL;
675
676 if (Heap::ShouldZapGarbage()) {
677 ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
678 }
679
680 area_start = base + Page::kObjectStartOffset;
681 area_end = area_start + commit_area_size;
682 }
683
684 // Use chunk_size for statistics and callbacks because we assume that they
685 // treat reserved but not-yet committed memory regions of chunks as allocated.
686 isolate_->counters()->memory_allocated()->Increment(
687 static_cast<int>(chunk_size));
688
689 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
690 if (owner != NULL) {
691 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
692 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
693 }
694
695 MemoryChunk* result = MemoryChunk::Initialize(
696 heap, base, chunk_size, area_start, area_end, executable, owner);
697 result->set_reserved_memory(&reservation);
698 return result;
699}
700
701
702void Page::ResetFreeListStatistics() {
703 non_available_small_blocks_ = 0;
704 available_in_small_free_list_ = 0;
705 available_in_medium_free_list_ = 0;
706 available_in_large_free_list_ = 0;
707 available_in_huge_free_list_ = 0;
708}
709
710
711Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner,
712 Executability executable) {
713 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000714 if (chunk == NULL) return NULL;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000715 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
716}
717
718
719LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
720 Space* owner,
721 Executability executable) {
722 MemoryChunk* chunk =
723 AllocateChunk(object_size, object_size, executable, owner);
724 if (chunk == NULL) return NULL;
725 return LargePage::Initialize(isolate_->heap(), chunk);
726}
727
728
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000729void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
730 DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000731 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
732 if (chunk->owner() != NULL) {
733 ObjectSpace space =
734 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
735 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
736 }
737
738 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
739 chunk->IsEvacuationCandidate());
740
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000741 intptr_t size;
742 base::VirtualMemory* reservation = chunk->reserved_memory();
743 if (reservation->IsReserved()) {
744 size = static_cast<intptr_t>(reservation->size());
745 } else {
746 size = static_cast<intptr_t>(chunk->size());
747 }
748 DCHECK(size_.Value() >= size);
749 size_.Increment(-size);
750 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
751
752 if (chunk->executable() == EXECUTABLE) {
753 DCHECK(size_executable_.Value() >= size);
754 size_executable_.Increment(-size);
755 }
756
757 chunk->SetFlag(MemoryChunk::PRE_FREED);
758}
759
760
761void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
762 DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
763 chunk->ReleaseAllocatedMemory();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000764
765 base::VirtualMemory* reservation = chunk->reserved_memory();
766 if (reservation->IsReserved()) {
767 FreeMemory(reservation, chunk->executable());
768 } else {
769 FreeMemory(chunk->address(), chunk->size(), chunk->executable());
770 }
771}
772
773
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000774void MemoryAllocator::Free(MemoryChunk* chunk) {
775 PreFreeMemory(chunk);
776 PerformFreeMemory(chunk);
777}
778
779
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000780bool MemoryAllocator::CommitBlock(Address start, size_t size,
781 Executability executable) {
782 if (!CommitMemory(start, size, executable)) return false;
783
784 if (Heap::ShouldZapGarbage()) {
785 ZapBlock(start, size);
786 }
787
788 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
789 return true;
790}
791
792
793bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
794 if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
795 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
796 return true;
797}
798
799
800void MemoryAllocator::ZapBlock(Address start, size_t size) {
801 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
802 Memory::Address_at(start + s) = kZapValue;
803 }
804}
805
806
807void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
808 AllocationAction action,
809 size_t size) {
810 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
811 MemoryAllocationCallbackRegistration registration =
812 memory_allocation_callbacks_[i];
813 if ((registration.space & space) == space &&
814 (registration.action & action) == action)
815 registration.callback(space, action, static_cast<int>(size));
816 }
817}
818
819
820bool MemoryAllocator::MemoryAllocationCallbackRegistered(
821 MemoryAllocationCallback callback) {
822 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
823 if (memory_allocation_callbacks_[i].callback == callback) return true;
824 }
825 return false;
826}
827
828
829void MemoryAllocator::AddMemoryAllocationCallback(
830 MemoryAllocationCallback callback, ObjectSpace space,
831 AllocationAction action) {
832 DCHECK(callback != NULL);
833 MemoryAllocationCallbackRegistration registration(callback, space, action);
834 DCHECK(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
835 return memory_allocation_callbacks_.Add(registration);
836}
837
838
839void MemoryAllocator::RemoveMemoryAllocationCallback(
840 MemoryAllocationCallback callback) {
841 DCHECK(callback != NULL);
842 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
843 if (memory_allocation_callbacks_[i].callback == callback) {
844 memory_allocation_callbacks_.Remove(i);
845 return;
846 }
847 }
848 UNREACHABLE();
849}
850
851
852#ifdef DEBUG
853void MemoryAllocator::ReportStatistics() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000854 intptr_t size = Size();
855 float pct = static_cast<float>(capacity_ - size) / capacity_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000856 PrintF(" capacity: %" V8_PTR_PREFIX
857 "d"
858 ", used: %" V8_PTR_PREFIX
859 "d"
860 ", available: %%%d\n\n",
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000861 capacity_, size, static_cast<int>(pct * 100));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000862}
863#endif
864
865
866int MemoryAllocator::CodePageGuardStartOffset() {
867 // We are guarding code pages: the first OS page after the header
868 // will be protected as non-writable.
869 return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize());
870}
871
872
873int MemoryAllocator::CodePageGuardSize() {
874 return static_cast<int>(base::OS::CommitPageSize());
875}
876
877
878int MemoryAllocator::CodePageAreaStartOffset() {
879 // We are guarding code pages: the first OS page after the header
880 // will be protected as non-writable.
881 return CodePageGuardStartOffset() + CodePageGuardSize();
882}
883
884
885int MemoryAllocator::CodePageAreaEndOffset() {
886 // We are guarding code pages: the last OS page will be protected as
887 // non-writable.
888 return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize());
889}
890
891
892bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
893 Address start, size_t commit_size,
894 size_t reserved_size) {
895 // Commit page header (not executable).
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000896 Address header = start;
897 size_t header_size = CodePageGuardStartOffset();
898 if (vm->Commit(header, header_size, false)) {
899 // Create guard page after the header.
900 if (vm->Guard(start + CodePageGuardStartOffset())) {
901 // Commit page body (executable).
902 Address body = start + CodePageAreaStartOffset();
903 size_t body_size = commit_size - CodePageGuardStartOffset();
904 if (vm->Commit(body, body_size, true)) {
905 // Create guard page before the end.
906 if (vm->Guard(start + reserved_size - CodePageGuardSize())) {
907 UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
908 commit_size -
909 CodePageGuardStartOffset());
910 return true;
911 }
912 vm->Uncommit(body, body_size);
913 }
914 }
915 vm->Uncommit(header, header_size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000916 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000917 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000918}
919
920
921// -----------------------------------------------------------------------------
922// MemoryChunk implementation
923
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000924void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
925 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000926 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000927 static_cast<PagedSpace*>(chunk->owner())->Allocate(by);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000928 }
929 chunk->IncrementLiveBytes(by);
930}
931
932
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000933void MemoryChunk::ReleaseAllocatedMemory() {
934 delete slots_buffer_;
935 delete skip_list_;
936 delete mutex_;
937}
938
939
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000940// -----------------------------------------------------------------------------
941// PagedSpace implementation
942
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400943STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) ==
944 ObjectSpace::kObjectSpaceNewSpace);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000945STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_SPACE) ==
946 ObjectSpace::kObjectSpaceOldSpace);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400947STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
948 ObjectSpace::kObjectSpaceCodeSpace);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400949STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
950 ObjectSpace::kObjectSpaceMapSpace);
951
952
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000953PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000954 Executability executable)
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400955 : Space(heap, space, executable),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000956 free_list_(this),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000957 end_of_unswept_pages_(NULL) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400958 area_size_ = MemoryAllocator::PageAreaSize(space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000959 accounting_stats_.Clear();
960
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000961 allocation_info_.Reset(nullptr, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000962
963 anchor_.InitializeAsAnchor(this);
964}
965
966
967bool PagedSpace::SetUp() { return true; }
968
969
970bool PagedSpace::HasBeenSetUp() { return true; }
971
972
973void PagedSpace::TearDown() {
974 PageIterator iterator(this);
975 while (iterator.has_next()) {
976 heap()->isolate()->memory_allocator()->Free(iterator.next());
977 }
978 anchor_.set_next_page(&anchor_);
979 anchor_.set_prev_page(&anchor_);
980 accounting_stats_.Clear();
981}
982
983
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000984void PagedSpace::AddMemory(Address start, intptr_t size) {
985 accounting_stats_.ExpandSpace(static_cast<int>(size));
986 Free(start, static_cast<int>(size));
987}
988
989
990FreeSpace* PagedSpace::TryRemoveMemory(intptr_t size_in_bytes) {
991 FreeSpace* free_space = free_list()->TryRemoveMemory(size_in_bytes);
992 if (free_space != nullptr) {
993 accounting_stats_.DecreaseCapacity(free_space->size());
994 }
995 return free_space;
996}
997
998
999void PagedSpace::DivideUponCompactionSpaces(CompactionSpaceCollection** other,
1000 int num, intptr_t limit) {
1001 DCHECK_GT(num, 0);
1002 DCHECK(other != nullptr);
1003
1004 if (limit == 0) limit = std::numeric_limits<intptr_t>::max();
1005
1006 EmptyAllocationInfo();
1007
1008 bool memory_available = true;
1009 bool spaces_need_memory = true;
1010 FreeSpace* node = nullptr;
1011 CompactionSpace* current_space = nullptr;
1012 // Iterate over spaces and memory as long as we have memory and there are
1013 // spaces in need of some.
1014 while (memory_available && spaces_need_memory) {
1015 spaces_need_memory = false;
1016 // Round-robin over all spaces.
1017 for (int i = 0; i < num; i++) {
1018 current_space = other[i]->Get(identity());
1019 if (current_space->free_list()->Available() < limit) {
1020 // Space has not reached its limit. Try to get some memory.
1021 spaces_need_memory = true;
1022 node = TryRemoveMemory(limit - current_space->free_list()->Available());
1023 if (node != nullptr) {
1024 CHECK(current_space->identity() == identity());
1025 current_space->AddMemory(node->address(), node->size());
1026 } else {
1027 memory_available = false;
1028 break;
1029 }
1030 }
1031 }
1032 }
1033}
1034
1035
1036void PagedSpace::RefillFreeList() {
1037 MarkCompactCollector* collector = heap()->mark_compact_collector();
1038 FreeList* free_list = nullptr;
1039 if (this == heap()->old_space()) {
1040 free_list = collector->free_list_old_space().get();
1041 } else if (this == heap()->code_space()) {
1042 free_list = collector->free_list_code_space().get();
1043 } else if (this == heap()->map_space()) {
1044 free_list = collector->free_list_map_space().get();
1045 } else {
1046 // Any PagedSpace might invoke RefillFreeList. We filter all but our old
1047 // generation spaces out.
1048 return;
1049 }
1050 DCHECK(free_list != nullptr);
1051 intptr_t added = free_list_.Concatenate(free_list);
1052 accounting_stats_.IncreaseCapacity(added);
1053}
1054
1055
1056void CompactionSpace::RefillFreeList() {
1057 MarkCompactCollector* collector = heap()->mark_compact_collector();
1058 FreeList* free_list = nullptr;
1059 if (identity() == OLD_SPACE) {
1060 free_list = collector->free_list_old_space().get();
1061 } else if (identity() == CODE_SPACE) {
1062 free_list = collector->free_list_code_space().get();
1063 } else {
1064 // Compaction spaces only represent old or code space.
1065 UNREACHABLE();
1066 }
1067 DCHECK(free_list != nullptr);
1068 intptr_t refilled = 0;
1069 while (refilled < kCompactionMemoryWanted) {
1070 FreeSpace* node =
1071 free_list->TryRemoveMemory(kCompactionMemoryWanted - refilled);
1072 if (node == nullptr) return;
1073 refilled += node->size();
1074 AddMemory(node->address(), node->size());
1075 }
1076}
1077
1078
1079void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
1080 DCHECK(identity() == other->identity());
1081 // Destroy the linear allocation space of {other}. This is needed to
1082 // (a) not waste the memory and
1083 // (b) keep the rest of the chunk in an iterable state (filler is needed).
1084 other->EmptyAllocationInfo();
1085
1086 // Move over the free list. Concatenate makes sure that the source free list
1087 // gets properly reset after moving over all nodes.
1088 intptr_t added = free_list_.Concatenate(other->free_list());
1089
1090 // Moved memory is not recorded as allocated memory, but rather increases and
1091 // decreases capacity of the corresponding spaces.
1092 other->accounting_stats_.DecreaseCapacity(added);
1093 accounting_stats_.IncreaseCapacity(added);
1094}
1095
1096
1097void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
1098 // Unmerged fields:
1099 // area_size_
1100 // anchor_
1101
1102 MoveOverFreeMemory(other);
1103
1104 // Update and clear accounting statistics.
1105 accounting_stats_.Merge(other->accounting_stats_);
1106 other->accounting_stats_.Clear();
1107
1108 // The linear allocation area of {other} should be destroyed now.
1109 DCHECK(other->top() == nullptr);
1110 DCHECK(other->limit() == nullptr);
1111
1112 DCHECK(other->end_of_unswept_pages_ == nullptr);
1113
1114 AccountCommitted(other->CommittedMemory());
1115
1116 // Move over pages.
1117 PageIterator it(other);
1118 Page* p = nullptr;
1119 while (it.has_next()) {
1120 p = it.next();
1121 p->Unlink();
1122 p->set_owner(this);
1123 p->InsertAfter(anchor_.prev_page());
1124 }
1125}
1126
1127
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001128size_t PagedSpace::CommittedPhysicalMemory() {
1129 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
1130 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1131 size_t size = 0;
1132 PageIterator it(this);
1133 while (it.has_next()) {
1134 size += it.next()->CommittedPhysicalMemory();
1135 }
1136 return size;
1137}
1138
1139
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001140bool PagedSpace::ContainsSafe(Address addr) {
1141 Page* p = Page::FromAddress(addr);
1142 PageIterator iterator(this);
1143 while (iterator.has_next()) {
1144 if (iterator.next() == p) return true;
1145 }
1146 return false;
1147}
1148
1149
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001150Object* PagedSpace::FindObject(Address addr) {
1151 // Note: this function can only be called on iterable spaces.
1152 DCHECK(!heap()->mark_compact_collector()->in_use());
1153
1154 if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found.
1155
1156 Page* p = Page::FromAddress(addr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001157 HeapObjectIterator it(p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001158 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
1159 Address cur = obj->address();
1160 Address next = cur + obj->Size();
1161 if ((cur <= addr) && (addr < next)) return obj;
1162 }
1163
1164 UNREACHABLE();
1165 return Smi::FromInt(0);
1166}
1167
1168
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001169bool PagedSpace::CanExpand(size_t size) {
1170 DCHECK(heap()->mark_compact_collector()->is_compacting() ||
1171 Capacity() <= heap()->MaxOldGenerationSize());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001172
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001173 // Are we going to exceed capacity for this space? At this point we can be
1174 // way over the maximum size because of AlwaysAllocate scopes and large
1175 // objects.
1176 if (!heap()->CanExpandOldGeneration(static_cast<int>(size))) return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001177
1178 return true;
1179}
1180
1181
1182bool PagedSpace::Expand() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001183 intptr_t size = AreaSize();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001184 if (snapshotable() && !HasPages()) {
1185 size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001186 }
1187
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001188 if (!CanExpand(size)) return false;
1189
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001190 Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
1191 executable());
1192 if (p == NULL) return false;
1193
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001194 AccountCommitted(static_cast<intptr_t>(p->size()));
1195
1196 // Pages created during bootstrapping may contain immortal immovable objects.
1197 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
1198
1199 DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001200
1201 p->InsertAfter(anchor_.prev_page());
1202
1203 return true;
1204}
1205
1206
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001207int PagedSpace::CountTotalPages() {
1208 PageIterator it(this);
1209 int count = 0;
1210 while (it.has_next()) {
1211 it.next();
1212 count++;
1213 }
1214 return count;
1215}
1216
1217
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001218void PagedSpace::ResetFreeListStatistics() {
1219 PageIterator page_iterator(this);
1220 while (page_iterator.has_next()) {
1221 Page* page = page_iterator.next();
1222 page->ResetFreeListStatistics();
1223 }
1224}
1225
1226
1227void PagedSpace::IncreaseCapacity(int size) {
1228 accounting_stats_.ExpandSpace(size);
1229}
1230
1231
1232void PagedSpace::ReleasePage(Page* page) {
1233 DCHECK(page->LiveBytes() == 0);
1234 DCHECK(AreaSize() == page->area_size());
1235
1236 if (page->WasSwept()) {
1237 intptr_t size = free_list_.EvictFreeListItems(page);
1238 accounting_stats_.AllocateBytes(size);
1239 DCHECK_EQ(AreaSize(), static_cast<int>(size));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001240 }
1241
1242 if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
1243 heap()->decrement_scan_on_scavenge_pages();
1244 page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE);
1245 }
1246
1247 DCHECK(!free_list_.ContainsPageFreeListItems(page));
1248
1249 if (Page::FromAllocationTop(allocation_info_.top()) == page) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001250 allocation_info_.Reset(nullptr, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001251 }
1252
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001253 // If page is still in a list, unlink it from that list.
1254 if (page->next_chunk() != NULL) {
1255 DCHECK(page->prev_chunk() != NULL);
1256 page->Unlink();
1257 }
1258
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001259 AccountUncommitted(static_cast<intptr_t>(page->size()));
1260 heap()->QueueMemoryChunkForFree(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001261
1262 DCHECK(Capacity() > 0);
1263 accounting_stats_.ShrinkSpace(AreaSize());
1264}
1265
1266
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001267#ifdef DEBUG
1268void PagedSpace::Print() {}
1269#endif
1270
1271#ifdef VERIFY_HEAP
1272void PagedSpace::Verify(ObjectVisitor* visitor) {
1273 bool allocation_pointer_found_in_space =
1274 (allocation_info_.top() == allocation_info_.limit());
1275 PageIterator page_iterator(this);
1276 while (page_iterator.has_next()) {
1277 Page* page = page_iterator.next();
1278 CHECK(page->owner() == this);
1279 if (page == Page::FromAllocationTop(allocation_info_.top())) {
1280 allocation_pointer_found_in_space = true;
1281 }
1282 CHECK(page->WasSwept());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001283 HeapObjectIterator it(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001284 Address end_of_previous_object = page->area_start();
1285 Address top = page->area_end();
1286 int black_size = 0;
1287 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1288 CHECK(end_of_previous_object <= object->address());
1289
1290 // The first word should be a map, and we expect all map pointers to
1291 // be in map space.
1292 Map* map = object->map();
1293 CHECK(map->IsMap());
1294 CHECK(heap()->map_space()->Contains(map));
1295
1296 // Perform space-specific object verification.
1297 VerifyObject(object);
1298
1299 // The object itself should look OK.
1300 object->ObjectVerify();
1301
1302 // All the interior pointers should be contained in the heap.
1303 int size = object->Size();
1304 object->IterateBody(map->instance_type(), size, visitor);
1305 if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
1306 black_size += size;
1307 }
1308
1309 CHECK(object->address() + size <= top);
1310 end_of_previous_object = object->address() + size;
1311 }
1312 CHECK_LE(black_size, page->LiveBytes());
1313 }
1314 CHECK(allocation_pointer_found_in_space);
1315}
1316#endif // VERIFY_HEAP
1317
1318// -----------------------------------------------------------------------------
1319// NewSpace implementation
1320
1321
1322bool NewSpace::SetUp(int reserved_semispace_capacity,
1323 int maximum_semispace_capacity) {
1324 // Set up new space based on the preallocated memory block defined by
1325 // start and size. The provided space is divided into two semi-spaces.
1326 // To support fast containment testing in the new space, the size of
1327 // this chunk must be a power of two and it must be aligned to its size.
1328 int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1329
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001330 int target_semispace_capacity = heap()->TargetSemiSpaceSize();
1331
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001332 size_t size = 2 * reserved_semispace_capacity;
1333 Address base = heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
1334 size, size, &reservation_);
1335 if (base == NULL) return false;
1336
1337 chunk_base_ = base;
1338 chunk_size_ = static_cast<uintptr_t>(size);
1339 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
1340
1341 DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
1342 DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity));
1343
1344 // Allocate and set up the histogram arrays if necessary.
1345 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1346 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1347
1348#define SET_NAME(name) \
1349 allocated_histogram_[name].set_name(#name); \
1350 promoted_histogram_[name].set_name(#name);
1351 INSTANCE_TYPE_LIST(SET_NAME)
1352#undef SET_NAME
1353
1354 DCHECK(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1355 DCHECK(static_cast<intptr_t>(chunk_size_) >=
1356 2 * heap()->ReservedSemiSpaceSize());
1357 DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
1358
1359 to_space_.SetUp(chunk_base_, initial_semispace_capacity,
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001360 target_semispace_capacity, maximum_semispace_capacity);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001361 from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001362 initial_semispace_capacity, target_semispace_capacity,
1363 maximum_semispace_capacity);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001364 if (!to_space_.Commit()) {
1365 return false;
1366 }
1367 DCHECK(!from_space_.is_committed()); // No need to use memory yet.
1368
1369 start_ = chunk_base_;
1370 address_mask_ = ~(2 * reserved_semispace_capacity - 1);
1371 object_mask_ = address_mask_ | kHeapObjectTagMask;
1372 object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
1373
1374 ResetAllocationInfo();
1375
1376 return true;
1377}
1378
1379
1380void NewSpace::TearDown() {
1381 if (allocated_histogram_) {
1382 DeleteArray(allocated_histogram_);
1383 allocated_histogram_ = NULL;
1384 }
1385 if (promoted_histogram_) {
1386 DeleteArray(promoted_histogram_);
1387 promoted_histogram_ = NULL;
1388 }
1389
1390 start_ = NULL;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001391 allocation_info_.Reset(nullptr, nullptr);
1392
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001393
1394 to_space_.TearDown();
1395 from_space_.TearDown();
1396
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001397 heap()->isolate()->memory_allocator()->FreeNewSpaceMemory(
1398 chunk_base_, &reservation_, NOT_EXECUTABLE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001399
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001400 chunk_base_ = NULL;
1401 chunk_size_ = 0;
1402}
1403
1404
1405void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
1406
1407
1408void NewSpace::Grow() {
1409 // Double the semispace size but only up to maximum capacity.
1410 DCHECK(TotalCapacity() < MaximumCapacity());
1411 int new_capacity =
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001412 Min(MaximumCapacity(),
1413 FLAG_semi_space_growth_factor * static_cast<int>(TotalCapacity()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001414 if (to_space_.GrowTo(new_capacity)) {
1415 // Only grow from space if we managed to grow to-space.
1416 if (!from_space_.GrowTo(new_capacity)) {
1417 // If we managed to grow to-space but couldn't grow from-space,
1418 // attempt to shrink to-space.
1419 if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) {
1420 // We are in an inconsistent state because we could not
1421 // commit/uncommit memory from new space.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001422 CHECK(false);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001423 }
1424 }
1425 }
1426 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1427}
1428
1429
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001430bool NewSpace::GrowOnePage() {
1431 if (TotalCapacity() == MaximumCapacity()) return false;
1432 int new_capacity = static_cast<int>(TotalCapacity()) + Page::kPageSize;
1433 if (to_space_.GrowTo(new_capacity)) {
1434 // Only grow from space if we managed to grow to-space and the from space
1435 // is actually committed.
1436 if (from_space_.is_committed()) {
1437 if (!from_space_.GrowTo(new_capacity)) {
1438 // If we managed to grow to-space but couldn't grow from-space,
1439 // attempt to shrink to-space.
1440 if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) {
1441 // We are in an inconsistent state because we could not
1442 // commit/uncommit memory from new space.
1443 CHECK(false);
1444 }
1445 return false;
1446 }
1447 } else {
1448 if (!from_space_.SetTotalCapacity(new_capacity)) {
1449 // Can't really happen, but better safe than sorry.
1450 CHECK(false);
1451 }
1452 }
1453 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1454 return true;
1455 }
1456 return false;
1457}
1458
1459
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001460void NewSpace::Shrink() {
1461 int new_capacity = Max(InitialTotalCapacity(), 2 * SizeAsInt());
1462 int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1463 if (rounded_new_capacity < TotalCapacity() &&
1464 to_space_.ShrinkTo(rounded_new_capacity)) {
1465 // Only shrink from-space if we managed to shrink to-space.
1466 from_space_.Reset();
1467 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1468 // If we managed to shrink to-space but couldn't shrink from
1469 // space, attempt to grow to-space again.
1470 if (!to_space_.GrowTo(from_space_.TotalCapacity())) {
1471 // We are in an inconsistent state because we could not
1472 // commit/uncommit memory from new space.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001473 CHECK(false);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001474 }
1475 }
1476 }
1477 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1478}
1479
1480
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001481void LocalAllocationBuffer::Close() {
1482 if (IsValid()) {
1483 heap_->CreateFillerObjectAt(
1484 allocation_info_.top(),
1485 static_cast<int>(allocation_info_.limit() - allocation_info_.top()));
1486 }
1487}
1488
1489
1490LocalAllocationBuffer::LocalAllocationBuffer(Heap* heap,
1491 AllocationInfo allocation_info)
1492 : heap_(heap), allocation_info_(allocation_info) {
1493 if (IsValid()) {
1494 heap_->CreateFillerObjectAt(
1495 allocation_info_.top(),
1496 static_cast<int>(allocation_info_.limit() - allocation_info_.top()));
1497 }
1498}
1499
1500
1501LocalAllocationBuffer::LocalAllocationBuffer(
1502 const LocalAllocationBuffer& other) {
1503 *this = other;
1504}
1505
1506
1507LocalAllocationBuffer& LocalAllocationBuffer::operator=(
1508 const LocalAllocationBuffer& other) {
1509 Close();
1510 heap_ = other.heap_;
1511 allocation_info_ = other.allocation_info_;
1512
1513 // This is needed since we (a) cannot yet use move-semantics, and (b) want
1514 // to make the use of the class easy by it as value and (c) implicitly call
1515 // {Close} upon copy.
1516 const_cast<LocalAllocationBuffer&>(other)
1517 .allocation_info_.Reset(nullptr, nullptr);
1518 return *this;
1519}
1520
1521
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001522void NewSpace::UpdateAllocationInfo() {
1523 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001524 allocation_info_.Reset(to_space_.page_low(), to_space_.page_high());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001525 UpdateInlineAllocationLimit(0);
1526 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1527}
1528
1529
1530void NewSpace::ResetAllocationInfo() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001531 Address old_top = allocation_info_.top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001532 to_space_.Reset();
1533 UpdateAllocationInfo();
1534 pages_used_ = 0;
1535 // Clear all mark-bits in the to-space.
1536 NewSpacePageIterator it(&to_space_);
1537 while (it.has_next()) {
1538 Bitmap::Clear(it.next());
1539 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001540 InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001541}
1542
1543
1544void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
1545 if (heap()->inline_allocation_disabled()) {
1546 // Lowest limit when linear allocation was disabled.
1547 Address high = to_space_.page_high();
1548 Address new_top = allocation_info_.top() + size_in_bytes;
1549 allocation_info_.set_limit(Min(new_top, high));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001550 } else if (inline_allocation_observers_paused_ ||
1551 top_on_previous_step_ == 0) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001552 // Normal limit is the end of the current page.
1553 allocation_info_.set_limit(to_space_.page_high());
1554 } else {
1555 // Lower limit during incremental marking.
1556 Address high = to_space_.page_high();
1557 Address new_top = allocation_info_.top() + size_in_bytes;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001558 Address new_limit = new_top + GetNextInlineAllocationStepSize() - 1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001559 allocation_info_.set_limit(Min(new_limit, high));
1560 }
1561 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1562}
1563
1564
1565bool NewSpace::AddFreshPage() {
1566 Address top = allocation_info_.top();
1567 if (NewSpacePage::IsAtStart(top)) {
1568 // The current page is already empty. Don't try to make another.
1569
1570 // We should only get here if someone asks to allocate more
1571 // than what can be stored in a single page.
1572 // TODO(gc): Change the limit on new-space allocation to prevent this
1573 // from happening (all such allocations should go directly to LOSpace).
1574 return false;
1575 }
1576 if (!to_space_.AdvancePage()) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001577 // Check if we reached the target capacity yet. If not, try to commit a page
1578 // and continue.
1579 if ((to_space_.TotalCapacity() < to_space_.TargetCapacity()) &&
1580 GrowOnePage()) {
1581 if (!to_space_.AdvancePage()) {
1582 // It doesn't make sense that we managed to commit a page, but can't use
1583 // it.
1584 CHECK(false);
1585 }
1586 } else {
1587 // Failed to get a new page in to-space.
1588 return false;
1589 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001590 }
1591
1592 // Clear remainder of current page.
1593 Address limit = NewSpacePage::FromLimit(top)->area_end();
1594 if (heap()->gc_state() == Heap::SCAVENGE) {
1595 heap()->promotion_queue()->SetNewLimit(limit);
1596 }
1597
1598 int remaining_in_page = static_cast<int>(limit - top);
1599 heap()->CreateFillerObjectAt(top, remaining_in_page);
1600 pages_used_++;
1601 UpdateAllocationInfo();
1602
1603 return true;
1604}
1605
1606
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001607bool NewSpace::AddFreshPageSynchronized() {
1608 base::LockGuard<base::Mutex> guard(&mutex_);
1609 return AddFreshPage();
1610}
1611
1612
1613bool NewSpace::EnsureAllocation(int size_in_bytes,
1614 AllocationAlignment alignment) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001615 Address old_top = allocation_info_.top();
1616 Address high = to_space_.page_high();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001617 int filler_size = Heap::GetFillToAlign(old_top, alignment);
1618 int aligned_size_in_bytes = size_in_bytes + filler_size;
1619
1620 if (old_top + aligned_size_in_bytes >= high) {
1621 // Not enough room in the page, try to allocate a new one.
1622 if (!AddFreshPage()) {
1623 return false;
1624 }
1625
1626 InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
1627
1628 old_top = allocation_info_.top();
1629 high = to_space_.page_high();
1630 filler_size = Heap::GetFillToAlign(old_top, alignment);
1631 aligned_size_in_bytes = size_in_bytes + filler_size;
1632 }
1633
1634 DCHECK(old_top + aligned_size_in_bytes < high);
1635
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001636 if (allocation_info_.limit() < high) {
1637 // Either the limit has been lowered because linear allocation was disabled
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001638 // or because incremental marking wants to get a chance to do a step,
1639 // or because idle scavenge job wants to get a chance to post a task.
1640 // Set the new limit accordingly.
1641 Address new_top = old_top + aligned_size_in_bytes;
1642 Address soon_object = old_top + filler_size;
1643 InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
1644 UpdateInlineAllocationLimit(aligned_size_in_bytes);
1645 }
1646 return true;
1647}
1648
1649
1650void NewSpace::StartNextInlineAllocationStep() {
1651 if (!inline_allocation_observers_paused_) {
1652 top_on_previous_step_ =
1653 inline_allocation_observers_.length() ? allocation_info_.top() : 0;
1654 UpdateInlineAllocationLimit(0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001655 }
1656}
1657
1658
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001659intptr_t NewSpace::GetNextInlineAllocationStepSize() {
1660 intptr_t next_step = 0;
1661 for (int i = 0; i < inline_allocation_observers_.length(); ++i) {
1662 InlineAllocationObserver* o = inline_allocation_observers_[i];
1663 next_step = next_step ? Min(next_step, o->bytes_to_next_step())
1664 : o->bytes_to_next_step();
1665 }
1666 DCHECK(inline_allocation_observers_.length() == 0 || next_step != 0);
1667 return next_step;
1668}
1669
1670
1671void NewSpace::AddInlineAllocationObserver(InlineAllocationObserver* observer) {
1672 inline_allocation_observers_.Add(observer);
1673 StartNextInlineAllocationStep();
1674}
1675
1676
1677void NewSpace::RemoveInlineAllocationObserver(
1678 InlineAllocationObserver* observer) {
1679 bool removed = inline_allocation_observers_.RemoveElement(observer);
1680 // Only used in assertion. Suppress unused variable warning.
1681 static_cast<void>(removed);
1682 DCHECK(removed);
1683 StartNextInlineAllocationStep();
1684}
1685
1686
1687void NewSpace::PauseInlineAllocationObservers() {
1688 // Do a step to account for memory allocated so far.
1689 InlineAllocationStep(top(), top(), nullptr, 0);
1690 inline_allocation_observers_paused_ = true;
1691 top_on_previous_step_ = 0;
1692 UpdateInlineAllocationLimit(0);
1693}
1694
1695
1696void NewSpace::ResumeInlineAllocationObservers() {
1697 DCHECK(top_on_previous_step_ == 0);
1698 inline_allocation_observers_paused_ = false;
1699 StartNextInlineAllocationStep();
1700}
1701
1702
1703void NewSpace::InlineAllocationStep(Address top, Address new_top,
1704 Address soon_object, size_t size) {
1705 if (top_on_previous_step_) {
1706 int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
1707 for (int i = 0; i < inline_allocation_observers_.length(); ++i) {
1708 inline_allocation_observers_[i]->InlineAllocationStep(bytes_allocated,
1709 soon_object, size);
1710 }
1711 top_on_previous_step_ = new_top;
1712 }
1713}
1714
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001715#ifdef VERIFY_HEAP
1716// We do not use the SemiSpaceIterator because verification doesn't assume
1717// that it works (it depends on the invariants we are checking).
1718void NewSpace::Verify() {
1719 // The allocation pointer should be in the space or at the very end.
1720 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1721
1722 // There should be objects packed in from the low address up to the
1723 // allocation pointer.
1724 Address current = to_space_.first_page()->area_start();
1725 CHECK_EQ(current, to_space_.space_start());
1726
1727 while (current != top()) {
1728 if (!NewSpacePage::IsAtEnd(current)) {
1729 // The allocation pointer should not be in the middle of an object.
1730 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1731 current < top());
1732
1733 HeapObject* object = HeapObject::FromAddress(current);
1734
1735 // The first word should be a map, and we expect all map pointers to
1736 // be in map space.
1737 Map* map = object->map();
1738 CHECK(map->IsMap());
1739 CHECK(heap()->map_space()->Contains(map));
1740
1741 // The object should not be code or a map.
1742 CHECK(!object->IsMap());
1743 CHECK(!object->IsCode());
1744
1745 // The object itself should look OK.
1746 object->ObjectVerify();
1747
1748 // All the interior pointers should be contained in the heap.
1749 VerifyPointersVisitor visitor;
1750 int size = object->Size();
1751 object->IterateBody(map->instance_type(), size, &visitor);
1752
1753 current += size;
1754 } else {
1755 // At end of page, switch to next page.
1756 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1757 // Next page should be valid.
1758 CHECK(!page->is_anchor());
1759 current = page->area_start();
1760 }
1761 }
1762
1763 // Check semi-spaces.
1764 CHECK_EQ(from_space_.id(), kFromSpace);
1765 CHECK_EQ(to_space_.id(), kToSpace);
1766 from_space_.Verify();
1767 to_space_.Verify();
1768}
1769#endif
1770
1771// -----------------------------------------------------------------------------
1772// SemiSpace implementation
1773
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001774void SemiSpace::SetUp(Address start, int initial_capacity, int target_capacity,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001775 int maximum_capacity) {
1776 // Creates a space in the young generation. The constructor does not
1777 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1778 // memory of size 'capacity' when set up, and does not grow or shrink
1779 // otherwise. In the mark-compact collector, the memory region of the from
1780 // space is used as the marking stack. It requires contiguous memory
1781 // addresses.
1782 DCHECK(maximum_capacity >= Page::kPageSize);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001783 DCHECK(initial_capacity <= target_capacity);
1784 DCHECK(target_capacity <= maximum_capacity);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001785 initial_total_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1786 total_capacity_ = initial_capacity;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001787 target_capacity_ = RoundDown(target_capacity, Page::kPageSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001788 maximum_total_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001789 committed_ = false;
1790 start_ = start;
1791 address_mask_ = ~(maximum_capacity - 1);
1792 object_mask_ = address_mask_ | kHeapObjectTagMask;
1793 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001794 age_mark_ = start_ + NewSpacePage::kObjectStartOffset;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001795}
1796
1797
1798void SemiSpace::TearDown() {
1799 start_ = NULL;
1800 total_capacity_ = 0;
1801}
1802
1803
1804bool SemiSpace::Commit() {
1805 DCHECK(!is_committed());
1806 int pages = total_capacity_ / Page::kPageSize;
1807 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1808 start_, total_capacity_, executable())) {
1809 return false;
1810 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001811 AccountCommitted(total_capacity_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001812
1813 NewSpacePage* current = anchor();
1814 for (int i = 0; i < pages; i++) {
1815 NewSpacePage* new_page =
1816 NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
1817 new_page->InsertAfter(current);
1818 current = new_page;
1819 }
1820
1821 SetCapacity(total_capacity_);
1822 committed_ = true;
1823 Reset();
1824 return true;
1825}
1826
1827
1828bool SemiSpace::Uncommit() {
1829 DCHECK(is_committed());
1830 Address start = start_ + maximum_total_capacity_ - total_capacity_;
1831 if (!heap()->isolate()->memory_allocator()->UncommitBlock(start,
1832 total_capacity_)) {
1833 return false;
1834 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001835 AccountUncommitted(total_capacity_);
1836
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001837 anchor()->set_next_page(anchor());
1838 anchor()->set_prev_page(anchor());
1839
1840 committed_ = false;
1841 return true;
1842}
1843
1844
1845size_t SemiSpace::CommittedPhysicalMemory() {
1846 if (!is_committed()) return 0;
1847 size_t size = 0;
1848 NewSpacePageIterator it(this);
1849 while (it.has_next()) {
1850 size += it.next()->CommittedPhysicalMemory();
1851 }
1852 return size;
1853}
1854
1855
1856bool SemiSpace::GrowTo(int new_capacity) {
1857 if (!is_committed()) {
1858 if (!Commit()) return false;
1859 }
1860 DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
1861 DCHECK(new_capacity <= maximum_total_capacity_);
1862 DCHECK(new_capacity > total_capacity_);
1863 int pages_before = total_capacity_ / Page::kPageSize;
1864 int pages_after = new_capacity / Page::kPageSize;
1865
1866 size_t delta = new_capacity - total_capacity_;
1867
1868 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1869 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1870 start_ + total_capacity_, delta, executable())) {
1871 return false;
1872 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001873 AccountCommitted(static_cast<intptr_t>(delta));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001874 SetCapacity(new_capacity);
1875 NewSpacePage* last_page = anchor()->prev_page();
1876 DCHECK(last_page != anchor());
1877 for (int i = pages_before; i < pages_after; i++) {
1878 Address page_address = start_ + i * Page::kPageSize;
1879 NewSpacePage* new_page =
1880 NewSpacePage::Initialize(heap(), page_address, this);
1881 new_page->InsertAfter(last_page);
1882 Bitmap::Clear(new_page);
1883 // Duplicate the flags that was set on the old page.
1884 new_page->SetFlags(last_page->GetFlags(),
1885 NewSpacePage::kCopyOnFlipFlagsMask);
1886 last_page = new_page;
1887 }
1888 return true;
1889}
1890
1891
1892bool SemiSpace::ShrinkTo(int new_capacity) {
1893 DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
1894 DCHECK(new_capacity >= initial_total_capacity_);
1895 DCHECK(new_capacity < total_capacity_);
1896 if (is_committed()) {
1897 size_t delta = total_capacity_ - new_capacity;
1898 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1899
1900 MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1901 if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
1902 return false;
1903 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001904 AccountUncommitted(static_cast<intptr_t>(delta));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001905
1906 int pages_after = new_capacity / Page::kPageSize;
1907 NewSpacePage* new_last_page =
1908 NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
1909 new_last_page->set_next_page(anchor());
1910 anchor()->set_prev_page(new_last_page);
1911 DCHECK((current_page_ >= first_page()) && (current_page_ <= new_last_page));
1912 }
1913
1914 SetCapacity(new_capacity);
1915
1916 return true;
1917}
1918
1919
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001920bool SemiSpace::SetTotalCapacity(int new_capacity) {
1921 CHECK(!is_committed());
1922 if (new_capacity >= initial_total_capacity_ &&
1923 new_capacity <= maximum_total_capacity_) {
1924 total_capacity_ = new_capacity;
1925 return true;
1926 }
1927 return false;
1928}
1929
1930
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001931void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
1932 anchor_.set_owner(this);
1933 // Fixup back-pointers to anchor. Address of anchor changes
1934 // when we swap.
1935 anchor_.prev_page()->set_next_page(&anchor_);
1936 anchor_.next_page()->set_prev_page(&anchor_);
1937
1938 bool becomes_to_space = (id_ == kFromSpace);
1939 id_ = becomes_to_space ? kToSpace : kFromSpace;
1940 NewSpacePage* page = anchor_.next_page();
1941 while (page != &anchor_) {
1942 page->set_owner(this);
1943 page->SetFlags(flags, mask);
1944 if (becomes_to_space) {
1945 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1946 page->SetFlag(MemoryChunk::IN_TO_SPACE);
1947 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1948 page->ResetLiveBytes();
1949 } else {
1950 page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1951 page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1952 }
1953 DCHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1954 DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1955 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1956 page = page->next_page();
1957 }
1958}
1959
1960
1961void SemiSpace::Reset() {
1962 DCHECK(anchor_.next_page() != &anchor_);
1963 current_page_ = anchor_.next_page();
1964}
1965
1966
1967void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1968 // We won't be swapping semispaces without data in them.
1969 DCHECK(from->anchor_.next_page() != &from->anchor_);
1970 DCHECK(to->anchor_.next_page() != &to->anchor_);
1971
1972 // Swap bits.
1973 SemiSpace tmp = *from;
1974 *from = *to;
1975 *to = tmp;
1976
1977 // Fixup back-pointers to the page list anchor now that its address
1978 // has changed.
1979 // Swap to/from-space bits on pages.
1980 // Copy GC flags from old active space (from-space) to new (to-space).
1981 intptr_t flags = from->current_page()->GetFlags();
1982 to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
1983
1984 from->FlipPages(0, 0);
1985}
1986
1987
1988void SemiSpace::SetCapacity(int new_capacity) {
1989 total_capacity_ = new_capacity;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001990}
1991
1992
1993void SemiSpace::set_age_mark(Address mark) {
1994 DCHECK(NewSpacePage::FromLimit(mark)->semi_space() == this);
1995 age_mark_ = mark;
1996 // Mark all pages up to the one containing mark.
1997 NewSpacePageIterator it(space_start(), mark);
1998 while (it.has_next()) {
1999 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
2000 }
2001}
2002
2003
2004#ifdef DEBUG
2005void SemiSpace::Print() {}
2006#endif
2007
2008#ifdef VERIFY_HEAP
2009void SemiSpace::Verify() {
2010 bool is_from_space = (id_ == kFromSpace);
2011 NewSpacePage* page = anchor_.next_page();
2012 CHECK(anchor_.semi_space() == this);
2013 while (page != &anchor_) {
2014 CHECK(page->semi_space() == this);
2015 CHECK(page->InNewSpace());
2016 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
2017 : MemoryChunk::IN_TO_SPACE));
2018 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
2019 : MemoryChunk::IN_FROM_SPACE));
2020 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
2021 if (!is_from_space) {
2022 // The pointers-from-here-are-interesting flag isn't updated dynamically
2023 // on from-space pages, so it might be out of sync with the marking state.
2024 if (page->heap()->incremental_marking()->IsMarking()) {
2025 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
2026 } else {
2027 CHECK(
2028 !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
2029 }
2030 // TODO(gc): Check that the live_bytes_count_ field matches the
2031 // black marking on the page (if we make it match in new-space).
2032 }
2033 CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
2034 CHECK(page->prev_page()->next_page() == page);
2035 page = page->next_page();
2036 }
2037}
2038#endif
2039
2040#ifdef DEBUG
2041void SemiSpace::AssertValidRange(Address start, Address end) {
2042 // Addresses belong to same semi-space
2043 NewSpacePage* page = NewSpacePage::FromLimit(start);
2044 NewSpacePage* end_page = NewSpacePage::FromLimit(end);
2045 SemiSpace* space = page->semi_space();
2046 CHECK_EQ(space, end_page->semi_space());
2047 // Start address is before end address, either on same page,
2048 // or end address is on a later page in the linked list of
2049 // semi-space pages.
2050 if (page == end_page) {
2051 CHECK(start <= end);
2052 } else {
2053 while (page != end_page) {
2054 page = page->next_page();
2055 CHECK_NE(page, space->anchor());
2056 }
2057 }
2058}
2059#endif
2060
2061
2062// -----------------------------------------------------------------------------
2063// SemiSpaceIterator implementation.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002064
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002065SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002066 Initialize(space->bottom(), space->top());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002067}
2068
2069
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002070void SemiSpaceIterator::Initialize(Address start, Address end) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002071 SemiSpace::AssertValidRange(start, end);
2072 current_ = start;
2073 limit_ = end;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002074}
2075
2076
2077#ifdef DEBUG
2078// heap_histograms is shared, always clear it before using it.
2079static void ClearHistograms(Isolate* isolate) {
2080// We reset the name each time, though it hasn't changed.
2081#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
2082 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
2083#undef DEF_TYPE_NAME
2084
2085#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
2086 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
2087#undef CLEAR_HISTOGRAM
2088
2089 isolate->js_spill_information()->Clear();
2090}
2091
2092
2093static void ClearCodeKindStatistics(int* code_kind_statistics) {
2094 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
2095 code_kind_statistics[i] = 0;
2096 }
2097}
2098
2099
2100static void ReportCodeKindStatistics(int* code_kind_statistics) {
2101 PrintF("\n Code kind histograms: \n");
2102 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
2103 if (code_kind_statistics[i] > 0) {
2104 PrintF(" %-20s: %10d bytes\n",
2105 Code::Kind2String(static_cast<Code::Kind>(i)),
2106 code_kind_statistics[i]);
2107 }
2108 }
2109 PrintF("\n");
2110}
2111
2112
2113static int CollectHistogramInfo(HeapObject* obj) {
2114 Isolate* isolate = obj->GetIsolate();
2115 InstanceType type = obj->map()->instance_type();
2116 DCHECK(0 <= type && type <= LAST_TYPE);
2117 DCHECK(isolate->heap_histograms()[type].name() != NULL);
2118 isolate->heap_histograms()[type].increment_number(1);
2119 isolate->heap_histograms()[type].increment_bytes(obj->Size());
2120
2121 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
2122 JSObject::cast(obj)
2123 ->IncrementSpillStatistics(isolate->js_spill_information());
2124 }
2125
2126 return obj->Size();
2127}
2128
2129
2130static void ReportHistogram(Isolate* isolate, bool print_spill) {
2131 PrintF("\n Object Histogram:\n");
2132 for (int i = 0; i <= LAST_TYPE; i++) {
2133 if (isolate->heap_histograms()[i].number() > 0) {
2134 PrintF(" %-34s%10d (%10d bytes)\n",
2135 isolate->heap_histograms()[i].name(),
2136 isolate->heap_histograms()[i].number(),
2137 isolate->heap_histograms()[i].bytes());
2138 }
2139 }
2140 PrintF("\n");
2141
2142 // Summarize string types.
2143 int string_number = 0;
2144 int string_bytes = 0;
2145#define INCREMENT(type, size, name, camel_name) \
2146 string_number += isolate->heap_histograms()[type].number(); \
2147 string_bytes += isolate->heap_histograms()[type].bytes();
2148 STRING_TYPE_LIST(INCREMENT)
2149#undef INCREMENT
2150 if (string_number > 0) {
2151 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
2152 string_bytes);
2153 }
2154
2155 if (FLAG_collect_heap_spill_statistics && print_spill) {
2156 isolate->js_spill_information()->Print();
2157 }
2158}
2159#endif // DEBUG
2160
2161
2162// Support for statistics gathering for --heap-stats and --log-gc.
2163void NewSpace::ClearHistograms() {
2164 for (int i = 0; i <= LAST_TYPE; i++) {
2165 allocated_histogram_[i].clear();
2166 promoted_histogram_[i].clear();
2167 }
2168}
2169
2170
2171// Because the copying collector does not touch garbage objects, we iterate
2172// the new space before a collection to get a histogram of allocated objects.
2173// This only happens when --log-gc flag is set.
2174void NewSpace::CollectStatistics() {
2175 ClearHistograms();
2176 SemiSpaceIterator it(this);
2177 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
2178 RecordAllocation(obj);
2179}
2180
2181
2182static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
2183 const char* description) {
2184 LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
2185 // Lump all the string types together.
2186 int string_number = 0;
2187 int string_bytes = 0;
2188#define INCREMENT(type, size, name, camel_name) \
2189 string_number += info[type].number(); \
2190 string_bytes += info[type].bytes();
2191 STRING_TYPE_LIST(INCREMENT)
2192#undef INCREMENT
2193 if (string_number > 0) {
2194 LOG(isolate,
2195 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
2196 }
2197
2198 // Then do the other types.
2199 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
2200 if (info[i].number() > 0) {
2201 LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(),
2202 info[i].bytes()));
2203 }
2204 }
2205 LOG(isolate, HeapSampleEndEvent("NewSpace", description));
2206}
2207
2208
2209void NewSpace::ReportStatistics() {
2210#ifdef DEBUG
2211 if (FLAG_heap_stats) {
2212 float pct = static_cast<float>(Available()) / TotalCapacity();
2213 PrintF(" capacity: %" V8_PTR_PREFIX
2214 "d"
2215 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2216 TotalCapacity(), Available(), static_cast<int>(pct * 100));
2217 PrintF("\n Object Histogram:\n");
2218 for (int i = 0; i <= LAST_TYPE; i++) {
2219 if (allocated_histogram_[i].number() > 0) {
2220 PrintF(" %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(),
2221 allocated_histogram_[i].number(),
2222 allocated_histogram_[i].bytes());
2223 }
2224 }
2225 PrintF("\n");
2226 }
2227#endif // DEBUG
2228
2229 if (FLAG_log_gc) {
2230 Isolate* isolate = heap()->isolate();
2231 DoReportStatistics(isolate, allocated_histogram_, "allocated");
2232 DoReportStatistics(isolate, promoted_histogram_, "promoted");
2233 }
2234}
2235
2236
2237void NewSpace::RecordAllocation(HeapObject* obj) {
2238 InstanceType type = obj->map()->instance_type();
2239 DCHECK(0 <= type && type <= LAST_TYPE);
2240 allocated_histogram_[type].increment_number(1);
2241 allocated_histogram_[type].increment_bytes(obj->Size());
2242}
2243
2244
2245void NewSpace::RecordPromotion(HeapObject* obj) {
2246 InstanceType type = obj->map()->instance_type();
2247 DCHECK(0 <= type && type <= LAST_TYPE);
2248 promoted_histogram_[type].increment_number(1);
2249 promoted_histogram_[type].increment_bytes(obj->Size());
2250}
2251
2252
2253size_t NewSpace::CommittedPhysicalMemory() {
2254 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
2255 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2256 size_t size = to_space_.CommittedPhysicalMemory();
2257 if (from_space_.is_committed()) {
2258 size += from_space_.CommittedPhysicalMemory();
2259 }
2260 return size;
2261}
2262
2263
2264// -----------------------------------------------------------------------------
2265// Free lists for old object spaces implementation
2266
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002267intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
2268 intptr_t free_bytes = 0;
2269 if (category->top() != NULL) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002270 DCHECK(category->end_ != NULL);
2271 free_bytes = category->available();
2272 if (end_ == NULL) {
2273 end_ = category->end();
2274 } else {
2275 category->end()->set_next(top());
2276 }
2277 set_top(category->top());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002278 available_ += category->available();
2279 category->Reset();
2280 }
2281 return free_bytes;
2282}
2283
2284
2285void FreeListCategory::Reset() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002286 set_top(nullptr);
2287 set_end(nullptr);
2288 available_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002289}
2290
2291
2292intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002293 intptr_t sum = 0;
2294 FreeSpace* prev_node = nullptr;
2295 for (FreeSpace* cur_node = top(); cur_node != nullptr;
2296 cur_node = cur_node->next()) {
2297 Page* page_for_node = Page::FromAddress(cur_node->address());
2298 if (page_for_node == p) {
2299 // FreeSpace node on eviction page found, unlink it.
2300 int size = cur_node->size();
2301 sum += size;
2302 DCHECK((prev_node != nullptr) || (top() == cur_node));
2303 if (cur_node == top()) {
2304 set_top(cur_node->next());
2305 }
2306 if (cur_node == end()) {
2307 set_end(prev_node);
2308 }
2309 if (prev_node != nullptr) {
2310 prev_node->set_next(cur_node->next());
2311 }
2312 continue;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002313 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002314 prev_node = cur_node;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002315 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002316 DCHECK_EQ(p->available_in_free_list(type_), sum);
2317 p->add_available_in_free_list(type_, -sum);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002318 available_ -= sum;
2319 return sum;
2320}
2321
2322
2323bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002324 FreeSpace* node = top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002325 while (node != NULL) {
2326 if (Page::FromAddress(node->address()) == p) return true;
2327 node = node->next();
2328 }
2329 return false;
2330}
2331
2332
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002333FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
2334 FreeSpace* node = top();
2335 if (node == nullptr) return nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002336
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002337 Page* page = Page::FromAddress(node->address());
2338 while ((node != nullptr) && !page->CanAllocate()) {
2339 available_ -= node->size();
2340 page->add_available_in_free_list(type_, -(node->Size()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002341 node = node->next();
2342 }
2343
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002344 if (node != nullptr) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002345 set_top(node->next());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002346 *node_size = node->Size();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002347 available_ -= *node_size;
2348 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002349 set_top(nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002350 }
2351
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002352 if (top() == nullptr) {
2353 set_end(nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002354 }
2355
2356 return node;
2357}
2358
2359
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002360FreeSpace* FreeListCategory::PickNodeFromList(int size_in_bytes,
2361 int* node_size) {
2362 FreeSpace* node = PickNodeFromList(node_size);
2363 if ((node != nullptr) && (*node_size < size_in_bytes)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002364 Free(node, *node_size);
2365 *node_size = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002366 return nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002367 }
2368 return node;
2369}
2370
2371
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002372FreeSpace* FreeListCategory::SearchForNodeInList(int size_in_bytes,
2373 int* node_size) {
2374 FreeSpace* prev_non_evac_node = nullptr;
2375 for (FreeSpace* cur_node = top(); cur_node != nullptr;
2376 cur_node = cur_node->next()) {
2377 int size = cur_node->size();
2378 Page* page_for_node = Page::FromAddress(cur_node->address());
2379
2380 if ((size >= size_in_bytes) || !page_for_node->CanAllocate()) {
2381 // The node is either large enough or contained in an evacuation
2382 // candidate. In both cases we need to unlink it from the list.
2383 available_ -= size;
2384 if (cur_node == top()) {
2385 set_top(cur_node->next());
2386 }
2387 if (cur_node == end()) {
2388 set_end(prev_non_evac_node);
2389 }
2390 if (prev_non_evac_node != nullptr) {
2391 prev_non_evac_node->set_next(cur_node->next());
2392 }
2393 // For evacuation candidates we continue.
2394 if (!page_for_node->CanAllocate()) {
2395 page_for_node->add_available_in_free_list(type_, -size);
2396 continue;
2397 }
2398 // Otherwise we have a large enough node and can return.
2399 *node_size = size;
2400 return cur_node;
2401 }
2402
2403 prev_non_evac_node = cur_node;
2404 }
2405 return nullptr;
2406}
2407
2408
2409void FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes) {
2410 free_space->set_next(top());
2411 set_top(free_space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002412 if (end_ == NULL) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002413 end_ = free_space;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002414 }
2415 available_ += size_in_bytes;
2416}
2417
2418
2419void FreeListCategory::RepairFreeList(Heap* heap) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002420 FreeSpace* n = top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002421 while (n != NULL) {
2422 Map** map_location = reinterpret_cast<Map**>(n->address());
2423 if (*map_location == NULL) {
2424 *map_location = heap->free_space_map();
2425 } else {
2426 DCHECK(*map_location == heap->free_space_map());
2427 }
2428 n = n->next();
2429 }
2430}
2431
2432
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002433FreeList::FreeList(PagedSpace* owner)
2434 : owner_(owner),
2435 wasted_bytes_(0),
2436 small_list_(this, kSmall),
2437 medium_list_(this, kMedium),
2438 large_list_(this, kLarge),
2439 huge_list_(this, kHuge) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002440 Reset();
2441}
2442
2443
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002444intptr_t FreeList::Concatenate(FreeList* other) {
2445 intptr_t usable_bytes = 0;
2446 intptr_t wasted_bytes = 0;
2447
2448 // This is safe (not going to deadlock) since Concatenate operations
2449 // are never performed on the same free lists at the same time in
2450 // reverse order. Furthermore, we only lock if the PagedSpace containing
2451 // the free list is know to be globally available, i.e., not local.
2452 if (!owner()->is_local()) mutex_.Lock();
2453 if (!other->owner()->is_local()) other->mutex()->Lock();
2454
2455 wasted_bytes = other->wasted_bytes_;
2456 wasted_bytes_ += wasted_bytes;
2457 other->wasted_bytes_ = 0;
2458
2459 usable_bytes += small_list_.Concatenate(other->GetFreeListCategory(kSmall));
2460 usable_bytes += medium_list_.Concatenate(other->GetFreeListCategory(kMedium));
2461 usable_bytes += large_list_.Concatenate(other->GetFreeListCategory(kLarge));
2462 usable_bytes += huge_list_.Concatenate(other->GetFreeListCategory(kHuge));
2463
2464 if (!other->owner()->is_local()) other->mutex()->Unlock();
2465 if (!owner()->is_local()) mutex_.Unlock();
2466 return usable_bytes + wasted_bytes;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002467}
2468
2469
2470void FreeList::Reset() {
2471 small_list_.Reset();
2472 medium_list_.Reset();
2473 large_list_.Reset();
2474 huge_list_.Reset();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002475 ResetStats();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002476}
2477
2478
2479int FreeList::Free(Address start, int size_in_bytes) {
2480 if (size_in_bytes == 0) return 0;
2481
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002482 owner()->heap()->CreateFillerObjectAt(start, size_in_bytes);
2483
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002484 Page* page = Page::FromAddress(start);
2485
2486 // Early return to drop too-small blocks on the floor.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002487 if (size_in_bytes <= kSmallListMin) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002488 page->add_non_available_small_blocks(size_in_bytes);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002489 wasted_bytes_ += size_in_bytes;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002490 return size_in_bytes;
2491 }
2492
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002493 FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002494 // Insert other blocks at the head of a free list of the appropriate
2495 // magnitude.
2496 if (size_in_bytes <= kSmallListMax) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002497 small_list_.Free(free_space, size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002498 page->add_available_in_small_free_list(size_in_bytes);
2499 } else if (size_in_bytes <= kMediumListMax) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002500 medium_list_.Free(free_space, size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002501 page->add_available_in_medium_free_list(size_in_bytes);
2502 } else if (size_in_bytes <= kLargeListMax) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002503 large_list_.Free(free_space, size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002504 page->add_available_in_large_free_list(size_in_bytes);
2505 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002506 huge_list_.Free(free_space, size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002507 page->add_available_in_huge_free_list(size_in_bytes);
2508 }
2509
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002510 DCHECK(IsVeryLong() || Available() == SumFreeLists());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002511 return 0;
2512}
2513
2514
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002515FreeSpace* FreeList::FindNodeIn(FreeListCategoryType category, int* node_size) {
2516 FreeSpace* node = GetFreeListCategory(category)->PickNodeFromList(node_size);
2517 if (node != nullptr) {
2518 Page::FromAddress(node->address())
2519 ->add_available_in_free_list(category, -(*node_size));
2520 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2521 }
2522 return node;
2523}
2524
2525
2526FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
2527 FreeSpace* node = nullptr;
2528 Page* page = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002529
2530 if (size_in_bytes <= kSmallAllocationMax) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002531 node = FindNodeIn(kSmall, node_size);
2532 if (node != nullptr) return node;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002533 }
2534
2535 if (size_in_bytes <= kMediumAllocationMax) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002536 node = FindNodeIn(kMedium, node_size);
2537 if (node != nullptr) return node;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002538 }
2539
2540 if (size_in_bytes <= kLargeAllocationMax) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002541 node = FindNodeIn(kLarge, node_size);
2542 if (node != nullptr) return node;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002543 }
2544
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002545 node = huge_list_.SearchForNodeInList(size_in_bytes, node_size);
2546 if (node != nullptr) {
2547 page = Page::FromAddress(node->address());
2548 page->add_available_in_large_free_list(-(*node_size));
2549 DCHECK(IsVeryLong() || Available() == SumFreeLists());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002550 return node;
2551 }
2552
2553 if (size_in_bytes <= kSmallListMax) {
2554 node = small_list_.PickNodeFromList(size_in_bytes, node_size);
2555 if (node != NULL) {
2556 DCHECK(size_in_bytes <= *node_size);
2557 page = Page::FromAddress(node->address());
2558 page->add_available_in_small_free_list(-(*node_size));
2559 }
2560 } else if (size_in_bytes <= kMediumListMax) {
2561 node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
2562 if (node != NULL) {
2563 DCHECK(size_in_bytes <= *node_size);
2564 page = Page::FromAddress(node->address());
2565 page->add_available_in_medium_free_list(-(*node_size));
2566 }
2567 } else if (size_in_bytes <= kLargeListMax) {
2568 node = large_list_.PickNodeFromList(size_in_bytes, node_size);
2569 if (node != NULL) {
2570 DCHECK(size_in_bytes <= *node_size);
2571 page = Page::FromAddress(node->address());
2572 page->add_available_in_large_free_list(-(*node_size));
2573 }
2574 }
2575
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002576 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2577 return node;
2578}
2579
2580
2581FreeSpace* FreeList::TryRemoveMemory(intptr_t hint_size_in_bytes) {
2582 hint_size_in_bytes = RoundDown(hint_size_in_bytes, kPointerSize);
2583 base::LockGuard<base::Mutex> guard(&mutex_);
2584 FreeSpace* node = nullptr;
2585 int node_size = 0;
2586 // Try to find a node that fits exactly.
2587 node = FindNodeFor(static_cast<int>(hint_size_in_bytes), &node_size);
2588 // If no node could be found get as much memory as possible.
2589 if (node == nullptr) node = FindNodeIn(kHuge, &node_size);
2590 if (node == nullptr) node = FindNodeIn(kLarge, &node_size);
2591 if (node != nullptr) {
2592 // We round up the size to (kSmallListMin + kPointerSize) to (a) have a
2593 // size larger then the minimum size required for FreeSpace, and (b) to get
2594 // a block that can actually be freed into some FreeList later on.
2595 if (hint_size_in_bytes <= kSmallListMin) {
2596 hint_size_in_bytes = kSmallListMin + kPointerSize;
2597 }
2598 // Give back left overs that were not required by {size_in_bytes}.
2599 intptr_t left_over = node_size - hint_size_in_bytes;
2600
2601 // Do not bother to return anything below {kSmallListMin} as it would be
2602 // immediately discarded anyways.
2603 if (left_over > kSmallListMin) {
2604 Free(node->address() + hint_size_in_bytes, static_cast<int>(left_over));
2605 node->set_size(static_cast<int>(hint_size_in_bytes));
2606 }
2607 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002608 return node;
2609}
2610
2611
2612// Allocation on the old space free list. If it succeeds then a new linear
2613// allocation space has been set up with the top and limit of the space. If
2614// the allocation fails then NULL is returned, and the caller can perform a GC
2615// or allocate a new page before retrying.
2616HeapObject* FreeList::Allocate(int size_in_bytes) {
2617 DCHECK(0 < size_in_bytes);
2618 DCHECK(size_in_bytes <= kMaxBlockSize);
2619 DCHECK(IsAligned(size_in_bytes, kPointerSize));
2620 // Don't free list allocate if there is linear space available.
2621 DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
2622
2623 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2624 // Mark the old linear allocation area with a free space map so it can be
2625 // skipped when scanning the heap. This also puts it back in the free list
2626 // if it is big enough.
2627 owner_->Free(owner_->top(), old_linear_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002628 owner_->SetTopAndLimit(nullptr, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002629
2630 owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
2631 old_linear_size);
2632
2633 int new_node_size = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002634 FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2635 if (new_node == nullptr) return nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002636
2637 int bytes_left = new_node_size - size_in_bytes;
2638 DCHECK(bytes_left >= 0);
2639
2640#ifdef DEBUG
2641 for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2642 reinterpret_cast<Object**>(new_node->address())[i] =
2643 Smi::FromInt(kCodeZapValue);
2644 }
2645#endif
2646
2647 // The old-space-step might have finished sweeping and restarted marking.
2648 // Verify that it did not turn the page of the new node into an evacuation
2649 // candidate.
2650 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2651
2652 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2653
2654 // Memory in the linear allocation area is counted as allocated. We may free
2655 // a little of this again immediately - see below.
2656 owner_->Allocate(new_node_size);
2657
2658 if (owner_->heap()->inline_allocation_disabled()) {
2659 // Keep the linear allocation area empty if requested to do so, just
2660 // return area back to the free list instead.
2661 owner_->Free(new_node->address() + size_in_bytes, bytes_left);
2662 DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
2663 } else if (bytes_left > kThreshold &&
2664 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002665 FLAG_incremental_marking) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002666 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2667 // We don't want to give too large linear areas to the allocator while
2668 // incremental marking is going on, because we won't check again whether
2669 // we want to do another increment until the linear area is used up.
2670 owner_->Free(new_node->address() + size_in_bytes + linear_size,
2671 new_node_size - size_in_bytes - linear_size);
2672 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2673 new_node->address() + size_in_bytes + linear_size);
2674 } else if (bytes_left > 0) {
2675 // Normally we give the rest of the node to the allocator as its new
2676 // linear allocation area.
2677 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2678 new_node->address() + new_node_size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002679 }
2680
2681 return new_node;
2682}
2683
2684
2685intptr_t FreeList::EvictFreeListItems(Page* p) {
2686 intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002687 if (sum < p->area_size()) {
2688 sum += small_list_.EvictFreeListItemsInList(p) +
2689 medium_list_.EvictFreeListItemsInList(p) +
2690 large_list_.EvictFreeListItemsInList(p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002691 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002692 return sum;
2693}
2694
2695
2696bool FreeList::ContainsPageFreeListItems(Page* p) {
2697 return huge_list_.EvictFreeListItemsInList(p) ||
2698 small_list_.EvictFreeListItemsInList(p) ||
2699 medium_list_.EvictFreeListItemsInList(p) ||
2700 large_list_.EvictFreeListItemsInList(p);
2701}
2702
2703
2704void FreeList::RepairLists(Heap* heap) {
2705 small_list_.RepairFreeList(heap);
2706 medium_list_.RepairFreeList(heap);
2707 large_list_.RepairFreeList(heap);
2708 huge_list_.RepairFreeList(heap);
2709}
2710
2711
2712#ifdef DEBUG
2713intptr_t FreeListCategory::SumFreeList() {
2714 intptr_t sum = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002715 FreeSpace* cur = top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002716 while (cur != NULL) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002717 DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex));
2718 sum += cur->nobarrier_size();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002719 cur = cur->next();
2720 }
2721 return sum;
2722}
2723
2724
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002725int FreeListCategory::FreeListLength() {
2726 int length = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002727 FreeSpace* cur = top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002728 while (cur != NULL) {
2729 length++;
2730 cur = cur->next();
2731 if (length == kVeryLongFreeList) return length;
2732 }
2733 return length;
2734}
2735
2736
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002737bool FreeListCategory::IsVeryLong() {
2738 return FreeListLength() == kVeryLongFreeList;
2739}
2740
2741
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002742bool FreeList::IsVeryLong() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002743 return small_list_.IsVeryLong() || medium_list_.IsVeryLong() ||
2744 large_list_.IsVeryLong() || huge_list_.IsVeryLong();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002745}
2746
2747
2748// This can take a very long time because it is linear in the number of entries
2749// on the free list, so it should not be called if FreeListLength returns
2750// kVeryLongFreeList.
2751intptr_t FreeList::SumFreeLists() {
2752 intptr_t sum = small_list_.SumFreeList();
2753 sum += medium_list_.SumFreeList();
2754 sum += large_list_.SumFreeList();
2755 sum += huge_list_.SumFreeList();
2756 return sum;
2757}
2758#endif
2759
2760
2761// -----------------------------------------------------------------------------
2762// OldSpace implementation
2763
2764void PagedSpace::PrepareForMarkCompact() {
2765 // We don't have a linear allocation area while sweeping. It will be restored
2766 // on the first allocation after the sweep.
2767 EmptyAllocationInfo();
2768
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002769 // Clear the free list before a full GC---it will be rebuilt afterward.
2770 free_list_.Reset();
2771}
2772
2773
2774intptr_t PagedSpace::SizeOfObjects() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002775 const intptr_t size = Size() - (limit() - top());
2776 CHECK_GE(limit(), top());
2777 CHECK_GE(size, 0);
2778 USE(size);
2779 return size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002780}
2781
2782
2783// After we have booted, we have created a map which represents free space
2784// on the heap. If there was already a free list then the elements on it
2785// were created with the wrong FreeSpaceMap (normally NULL), so we need to
2786// fix them.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002787void PagedSpace::RepairFreeListsAfterDeserialization() {
2788 free_list_.RepairLists(heap());
2789 // Each page may have a small free space that is not tracked by a free list.
2790 // Update the maps for those free space objects.
2791 PageIterator iterator(this);
2792 while (iterator.has_next()) {
2793 Page* page = iterator.next();
2794 int size = static_cast<int>(page->non_available_small_blocks());
2795 if (size == 0) continue;
2796 Address address = page->OffsetToAddress(Page::kPageSize - size);
2797 heap()->CreateFillerObjectAt(address, size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002798 }
2799}
2800
2801
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002802void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() {
2803 if (allocation_info_.top() >= allocation_info_.limit()) return;
2804
2805 if (!Page::FromAllocationTop(allocation_info_.top())->CanAllocate()) {
2806 // Create filler object to keep page iterable if it was iterable.
2807 int remaining =
2808 static_cast<int>(allocation_info_.limit() - allocation_info_.top());
2809 heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
2810 allocation_info_.Reset(nullptr, nullptr);
2811 }
2812}
2813
2814
2815HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002816 MarkCompactCollector* collector = heap()->mark_compact_collector();
2817 if (collector->sweeping_in_progress()) {
2818 // Wait for the sweeper threads here and complete the sweeping phase.
2819 collector->EnsureSweepingCompleted();
2820
2821 // After waiting for the sweeper threads, there may be new free-list
2822 // entries.
2823 return free_list_.Allocate(size_in_bytes);
2824 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002825 return nullptr;
2826}
2827
2828
2829HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
2830 MarkCompactCollector* collector = heap()->mark_compact_collector();
2831 if (collector->sweeping_in_progress()) {
2832 collector->SweepAndRefill(this);
2833 return free_list_.Allocate(size_in_bytes);
2834 }
2835 return nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002836}
2837
2838
2839HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2840 // Allocation in this space has failed.
2841
2842 MarkCompactCollector* collector = heap()->mark_compact_collector();
2843 // Sweeping is still in progress.
2844 if (collector->sweeping_in_progress()) {
2845 // First try to refill the free-list, concurrent sweeper threads
2846 // may have freed some objects in the meantime.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002847 RefillFreeList();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002848
2849 // Retry the free list allocation.
2850 HeapObject* object = free_list_.Allocate(size_in_bytes);
2851 if (object != NULL) return object;
2852
2853 // If sweeping is still in progress try to sweep pages on the main thread.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002854 collector->SweepInParallel(heap()->paged_space(identity()), size_in_bytes);
2855 RefillFreeList();
2856 object = free_list_.Allocate(size_in_bytes);
2857 if (object != nullptr) return object;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002858 }
2859
2860 // Free list allocation failed and there is no next page. Fail if we have
2861 // hit the old generation size limit that should cause a garbage
2862 // collection.
2863 if (!heap()->always_allocate() &&
2864 heap()->OldGenerationAllocationLimitReached()) {
2865 // If sweeper threads are active, wait for them at that point and steal
2866 // elements form their free-lists.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002867 HeapObject* object = SweepAndRetryAllocation(size_in_bytes);
2868 return object;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002869 }
2870
2871 // Try to expand the space and allocate in the new next page.
2872 if (Expand()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002873 DCHECK((CountTotalPages() > 1) ||
2874 (size_in_bytes <= free_list_.Available()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002875 return free_list_.Allocate(size_in_bytes);
2876 }
2877
2878 // If sweeper threads are active, wait for them at that point and steal
2879 // elements form their free-lists. Allocation may still fail their which
2880 // would indicate that there is not enough memory for the given allocation.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002881 return SweepAndRetryAllocation(size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002882}
2883
2884
2885#ifdef DEBUG
2886void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
2887 CommentStatistic* comments_statistics =
2888 isolate->paged_space_comments_statistics();
2889 ReportCodeKindStatistics(isolate->code_kind_statistics());
2890 PrintF(
2891 "Code comment statistics (\" [ comment-txt : size/ "
2892 "count (average)\"):\n");
2893 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2894 const CommentStatistic& cs = comments_statistics[i];
2895 if (cs.size > 0) {
2896 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2897 cs.size / cs.count);
2898 }
2899 }
2900 PrintF("\n");
2901}
2902
2903
2904void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
2905 CommentStatistic* comments_statistics =
2906 isolate->paged_space_comments_statistics();
2907 ClearCodeKindStatistics(isolate->code_kind_statistics());
2908 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2909 comments_statistics[i].Clear();
2910 }
2911 comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2912 comments_statistics[CommentStatistic::kMaxComments].size = 0;
2913 comments_statistics[CommentStatistic::kMaxComments].count = 0;
2914}
2915
2916
2917// Adds comment to 'comment_statistics' table. Performance OK as long as
2918// 'kMaxComments' is small
2919static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2920 CommentStatistic* comments_statistics =
2921 isolate->paged_space_comments_statistics();
2922 // Do not count empty comments
2923 if (delta <= 0) return;
2924 CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2925 // Search for a free or matching entry in 'comments_statistics': 'cs'
2926 // points to result.
2927 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2928 if (comments_statistics[i].comment == NULL) {
2929 cs = &comments_statistics[i];
2930 cs->comment = comment;
2931 break;
2932 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2933 cs = &comments_statistics[i];
2934 break;
2935 }
2936 }
2937 // Update entry for 'comment'
2938 cs->size += delta;
2939 cs->count += 1;
2940}
2941
2942
2943// Call for each nested comment start (start marked with '[ xxx', end marked
2944// with ']'. RelocIterator 'it' must point to a comment reloc info.
2945static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2946 DCHECK(!it->done());
2947 DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT);
2948 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2949 if (tmp[0] != '[') {
2950 // Not a nested comment; skip
2951 return;
2952 }
2953
2954 // Search for end of nested comment or a new nested comment
2955 const char* const comment_txt =
2956 reinterpret_cast<const char*>(it->rinfo()->data());
2957 const byte* prev_pc = it->rinfo()->pc();
2958 int flat_delta = 0;
2959 it->next();
2960 while (true) {
2961 // All nested comments must be terminated properly, and therefore exit
2962 // from loop.
2963 DCHECK(!it->done());
2964 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2965 const char* const txt =
2966 reinterpret_cast<const char*>(it->rinfo()->data());
2967 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2968 if (txt[0] == ']') break; // End of nested comment
2969 // A new comment
2970 CollectCommentStatistics(isolate, it);
2971 // Skip code that was covered with previous comment
2972 prev_pc = it->rinfo()->pc();
2973 }
2974 it->next();
2975 }
2976 EnterComment(isolate, comment_txt, flat_delta);
2977}
2978
2979
2980// Collects code size statistics:
2981// - by code kind
2982// - by code comment
2983void PagedSpace::CollectCodeStatistics() {
2984 Isolate* isolate = heap()->isolate();
2985 HeapObjectIterator obj_it(this);
2986 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2987 if (obj->IsCode()) {
2988 Code* code = Code::cast(obj);
2989 isolate->code_kind_statistics()[code->kind()] += code->Size();
2990 RelocIterator it(code);
2991 int delta = 0;
2992 const byte* prev_pc = code->instruction_start();
2993 while (!it.done()) {
2994 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2995 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2996 CollectCommentStatistics(isolate, &it);
2997 prev_pc = it.rinfo()->pc();
2998 }
2999 it.next();
3000 }
3001
3002 DCHECK(code->instruction_start() <= prev_pc &&
3003 prev_pc <= code->instruction_end());
3004 delta += static_cast<int>(code->instruction_end() - prev_pc);
3005 EnterComment(isolate, "NoComment", delta);
3006 }
3007 }
3008}
3009
3010
3011void PagedSpace::ReportStatistics() {
3012 int pct = static_cast<int>(Available() * 100 / Capacity());
3013 PrintF(" capacity: %" V8_PTR_PREFIX
3014 "d"
3015 ", waste: %" V8_PTR_PREFIX
3016 "d"
3017 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
3018 Capacity(), Waste(), Available(), pct);
3019
3020 if (heap()->mark_compact_collector()->sweeping_in_progress()) {
3021 heap()->mark_compact_collector()->EnsureSweepingCompleted();
3022 }
3023 ClearHistograms(heap()->isolate());
3024 HeapObjectIterator obj_it(this);
3025 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
3026 CollectHistogramInfo(obj);
3027 ReportHistogram(heap()->isolate(), true);
3028}
3029#endif
3030
3031
3032// -----------------------------------------------------------------------------
3033// MapSpace implementation
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003034
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003035#ifdef VERIFY_HEAP
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003036void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003037#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003038
3039
3040// -----------------------------------------------------------------------------
3041// LargeObjectIterator
3042
3043LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
3044 current_ = space->first_page_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003045}
3046
3047
3048HeapObject* LargeObjectIterator::Next() {
3049 if (current_ == NULL) return NULL;
3050
3051 HeapObject* object = current_->GetObject();
3052 current_ = current_->next_page();
3053 return object;
3054}
3055
3056
3057// -----------------------------------------------------------------------------
3058// LargeObjectSpace
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003059
3060
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003061LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003062 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003063 first_page_(NULL),
3064 size_(0),
3065 page_count_(0),
3066 objects_size_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003067 chunk_map_(HashMap::PointersMatch, 1024) {}
3068
3069
3070LargeObjectSpace::~LargeObjectSpace() {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003071
3072
3073bool LargeObjectSpace::SetUp() {
3074 first_page_ = NULL;
3075 size_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003076 page_count_ = 0;
3077 objects_size_ = 0;
3078 chunk_map_.Clear();
3079 return true;
3080}
3081
3082
3083void LargeObjectSpace::TearDown() {
3084 while (first_page_ != NULL) {
3085 LargePage* page = first_page_;
3086 first_page_ = first_page_->next_page();
3087 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
3088
3089 ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
3090 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
3091 space, kAllocationActionFree, page->size());
3092 heap()->isolate()->memory_allocator()->Free(page);
3093 }
3094 SetUp();
3095}
3096
3097
3098AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
3099 Executability executable) {
3100 // Check if we want to force a GC before growing the old space further.
3101 // If so, fail the allocation.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003102 if (!heap()->CanExpandOldGeneration(object_size)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003103 return AllocationResult::Retry(identity());
3104 }
3105
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003106 LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage(
3107 object_size, this, executable);
3108 if (page == NULL) return AllocationResult::Retry(identity());
3109 DCHECK(page->area_size() >= object_size);
3110
3111 size_ += static_cast<int>(page->size());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003112 AccountCommitted(static_cast<intptr_t>(page->size()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003113 objects_size_ += object_size;
3114 page_count_++;
3115 page->set_next_page(first_page_);
3116 first_page_ = page;
3117
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003118 // Register all MemoryChunk::kAlignment-aligned chunks covered by
3119 // this large page in the chunk map.
3120 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
3121 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
3122 for (uintptr_t key = base; key <= limit; key++) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003123 HashMap::Entry* entry = chunk_map_.LookupOrInsert(
3124 reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003125 DCHECK(entry != NULL);
3126 entry->value = page;
3127 }
3128
3129 HeapObject* object = page->GetObject();
3130
3131 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size);
3132
3133 if (Heap::ShouldZapGarbage()) {
3134 // Make the object consistent so the heap can be verified in OldSpaceStep.
3135 // We only need to do this in debug builds or if verify_heap is on.
3136 reinterpret_cast<Object**>(object->address())[0] =
3137 heap()->fixed_array_map();
3138 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
3139 }
3140
3141 heap()->incremental_marking()->OldSpaceStep(object_size);
3142 return object;
3143}
3144
3145
3146size_t LargeObjectSpace::CommittedPhysicalMemory() {
3147 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
3148 size_t size = 0;
3149 LargePage* current = first_page_;
3150 while (current != NULL) {
3151 size += current->CommittedPhysicalMemory();
3152 current = current->next_page();
3153 }
3154 return size;
3155}
3156
3157
3158// GC support
3159Object* LargeObjectSpace::FindObject(Address a) {
3160 LargePage* page = FindPage(a);
3161 if (page != NULL) {
3162 return page->GetObject();
3163 }
3164 return Smi::FromInt(0); // Signaling not found.
3165}
3166
3167
3168LargePage* LargeObjectSpace::FindPage(Address a) {
3169 uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
3170 HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003171 static_cast<uint32_t>(key));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003172 if (e != NULL) {
3173 DCHECK(e->value != NULL);
3174 LargePage* page = reinterpret_cast<LargePage*>(e->value);
3175 DCHECK(page->is_valid());
3176 if (page->Contains(a)) {
3177 return page;
3178 }
3179 }
3180 return NULL;
3181}
3182
3183
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003184void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
3185 LargePage* current = first_page_;
3186 while (current != NULL) {
3187 HeapObject* object = current->GetObject();
3188 MarkBit mark_bit = Marking::MarkBitFrom(object);
3189 DCHECK(Marking::IsBlack(mark_bit));
3190 Marking::BlackToWhite(mark_bit);
3191 Page::FromAddress(object->address())->ResetProgressBar();
3192 Page::FromAddress(object->address())->ResetLiveBytes();
3193 current = current->next_page();
3194 }
3195}
3196
3197
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003198void LargeObjectSpace::FreeUnmarkedObjects() {
3199 LargePage* previous = NULL;
3200 LargePage* current = first_page_;
3201 while (current != NULL) {
3202 HeapObject* object = current->GetObject();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003203 MarkBit mark_bit = Marking::MarkBitFrom(object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003204 DCHECK(!Marking::IsGrey(mark_bit));
3205 if (Marking::IsBlack(mark_bit)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003206 previous = current;
3207 current = current->next_page();
3208 } else {
3209 LargePage* page = current;
3210 // Cut the chunk out from the chunk list.
3211 current = current->next_page();
3212 if (previous == NULL) {
3213 first_page_ = current;
3214 } else {
3215 previous->set_next_page(current);
3216 }
3217
3218 // Free the chunk.
3219 heap()->mark_compact_collector()->ReportDeleteIfNeeded(object,
3220 heap()->isolate());
3221 size_ -= static_cast<int>(page->size());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003222 AccountUncommitted(static_cast<intptr_t>(page->size()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003223 objects_size_ -= object->Size();
3224 page_count_--;
3225
3226 // Remove entries belonging to this page.
3227 // Use variable alignment to help pass length check (<= 80 characters)
3228 // of single line in tools/presubmit.py.
3229 const intptr_t alignment = MemoryChunk::kAlignment;
3230 uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment;
3231 uintptr_t limit = base + (page->size() - 1) / alignment;
3232 for (uintptr_t key = base; key <= limit; key++) {
3233 chunk_map_.Remove(reinterpret_cast<void*>(key),
3234 static_cast<uint32_t>(key));
3235 }
3236
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003237 heap()->QueueMemoryChunkForFree(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003238 }
3239 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003240}
3241
3242
3243bool LargeObjectSpace::Contains(HeapObject* object) {
3244 Address address = object->address();
3245 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3246
3247 bool owned = (chunk->owner() == this);
3248
3249 SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
3250
3251 return owned;
3252}
3253
3254
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003255bool LargeObjectSpace::Contains(Address address) {
3256 return FindPage(address) != NULL;
3257}
3258
3259
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003260#ifdef VERIFY_HEAP
3261// We do not assume that the large object iterator works, because it depends
3262// on the invariants we are checking during verification.
3263void LargeObjectSpace::Verify() {
3264 for (LargePage* chunk = first_page_; chunk != NULL;
3265 chunk = chunk->next_page()) {
3266 // Each chunk contains an object that starts at the large object page's
3267 // object area start.
3268 HeapObject* object = chunk->GetObject();
3269 Page* page = Page::FromAddress(object->address());
3270 CHECK(object->address() == page->area_start());
3271
3272 // The first word should be a map, and we expect all map pointers to be
3273 // in map space.
3274 Map* map = object->map();
3275 CHECK(map->IsMap());
3276 CHECK(heap()->map_space()->Contains(map));
3277
3278 // We have only code, sequential strings, external strings
3279 // (sequential strings that have been morphed into external
3280 // strings), fixed arrays, byte arrays, and constant pool arrays in the
3281 // large object space.
3282 CHECK(object->IsCode() || object->IsSeqString() ||
3283 object->IsExternalString() || object->IsFixedArray() ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003284 object->IsFixedDoubleArray() || object->IsByteArray());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003285
3286 // The object itself should look OK.
3287 object->ObjectVerify();
3288
3289 // Byte arrays and strings don't have interior pointers.
3290 if (object->IsCode()) {
3291 VerifyPointersVisitor code_visitor;
3292 object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
3293 } else if (object->IsFixedArray()) {
3294 FixedArray* array = FixedArray::cast(object);
3295 for (int j = 0; j < array->length(); j++) {
3296 Object* element = array->get(j);
3297 if (element->IsHeapObject()) {
3298 HeapObject* element_object = HeapObject::cast(element);
3299 CHECK(heap()->Contains(element_object));
3300 CHECK(element_object->map()->IsMap());
3301 }
3302 }
3303 }
3304 }
3305}
3306#endif
3307
3308
3309#ifdef DEBUG
3310void LargeObjectSpace::Print() {
3311 OFStream os(stdout);
3312 LargeObjectIterator it(this);
3313 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3314 obj->Print(os);
3315 }
3316}
3317
3318
3319void LargeObjectSpace::ReportStatistics() {
3320 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
3321 int num_objects = 0;
3322 ClearHistograms(heap()->isolate());
3323 LargeObjectIterator it(this);
3324 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3325 num_objects++;
3326 CollectHistogramInfo(obj);
3327 }
3328
3329 PrintF(
3330 " number of objects %d, "
3331 "size of objects %" V8_PTR_PREFIX "d\n",
3332 num_objects, objects_size_);
3333 if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
3334}
3335
3336
3337void LargeObjectSpace::CollectCodeStatistics() {
3338 Isolate* isolate = heap()->isolate();
3339 LargeObjectIterator obj_it(this);
3340 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
3341 if (obj->IsCode()) {
3342 Code* code = Code::cast(obj);
3343 isolate->code_kind_statistics()[code->kind()] += code->Size();
3344 }
3345 }
3346}
3347
3348
3349void Page::Print() {
3350 // Make a best-effort to print the objects in the page.
3351 PrintF("Page@%p in %s\n", this->address(),
3352 AllocationSpaceName(this->owner()->identity()));
3353 printf(" --------------------------------------\n");
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003354 HeapObjectIterator objects(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003355 unsigned mark_size = 0;
3356 for (HeapObject* object = objects.Next(); object != NULL;
3357 object = objects.Next()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003358 bool is_marked = Marking::IsBlackOrGrey(Marking::MarkBitFrom(object));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003359 PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
3360 if (is_marked) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003361 mark_size += object->Size();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003362 }
3363 object->ShortPrint();
3364 PrintF("\n");
3365 }
3366 printf(" --------------------------------------\n");
3367 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3368}
3369
3370#endif // DEBUG
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003371} // namespace internal
3372} // namespace v8