blob: 6b98fc1d0ef28c8141924334cd5d833942301004 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2011 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005#include "src/heap/spaces.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006
7#include "src/base/bits.h"
8#include "src/base/platform/platform.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00009#include "src/full-codegen/full-codegen.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010010#include "src/heap/slot-set.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000011#include "src/heap/slots-buffer.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000012#include "src/macro-assembler.h"
13#include "src/msan.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000014#include "src/snapshot/snapshot.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000015
16namespace v8 {
17namespace internal {
18
19
20// ----------------------------------------------------------------------------
21// HeapObjectIterator
22
23HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
24 // You can't actually iterate over the anchor page. It is not a real page,
25 // just an anchor for the double linked page list. Initialize as if we have
26 // reached the end of the anchor page, then the first iteration will move on
27 // to the first page.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000028 Initialize(space, NULL, NULL, kAllPagesInSpace);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000029}
30
31
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000032HeapObjectIterator::HeapObjectIterator(Page* page) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000033 Space* owner = page->owner();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000034 DCHECK(owner == page->heap()->old_space() ||
Ben Murdochb8a8cc12014-11-26 15:28:44 +000035 owner == page->heap()->map_space() ||
Ben Murdochb8a8cc12014-11-26 15:28:44 +000036 owner == page->heap()->code_space());
37 Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000038 page->area_end(), kOnePageOnly);
Ben Murdoch097c5b22016-05-18 11:27:45 +010039 DCHECK(page->SweepingDone());
Ben Murdochb8a8cc12014-11-26 15:28:44 +000040}
41
42
43void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000044 HeapObjectIterator::PageMode mode) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000045 space_ = space;
46 cur_addr_ = cur;
47 cur_end_ = end;
48 page_mode_ = mode;
Ben Murdochb8a8cc12014-11-26 15:28:44 +000049}
50
51
52// We have hit the end of the page and should advance to the next block of
53// objects. This happens at the end of the page.
54bool HeapObjectIterator::AdvanceToNextPage() {
55 DCHECK(cur_addr_ == cur_end_);
56 if (page_mode_ == kOnePageOnly) return false;
57 Page* cur_page;
58 if (cur_addr_ == NULL) {
59 cur_page = space_->anchor();
60 } else {
61 cur_page = Page::FromAddress(cur_addr_ - 1);
62 DCHECK(cur_addr_ == cur_page->area_end());
63 }
64 cur_page = cur_page->next_page();
65 if (cur_page == space_->anchor()) return false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000066 cur_page->heap()->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted(
67 cur_page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000068 cur_addr_ = cur_page->area_start();
69 cur_end_ = cur_page->area_end();
Ben Murdoch097c5b22016-05-18 11:27:45 +010070 DCHECK(cur_page->SweepingDone());
Ben Murdochb8a8cc12014-11-26 15:28:44 +000071 return true;
72}
73
Ben Murdoch097c5b22016-05-18 11:27:45 +010074PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
75 : heap_(heap) {
76 AllSpaces spaces(heap_);
77 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
78 space->PauseAllocationObservers();
79 }
80}
81
82PauseAllocationObserversScope::~PauseAllocationObserversScope() {
83 AllSpaces spaces(heap_);
84 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
85 space->ResumeAllocationObservers();
86 }
87}
Ben Murdochb8a8cc12014-11-26 15:28:44 +000088
89// -----------------------------------------------------------------------------
90// CodeRange
91
92
93CodeRange::CodeRange(Isolate* isolate)
94 : isolate_(isolate),
95 code_range_(NULL),
96 free_list_(0),
97 allocation_list_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000098 current_allocation_block_index_(0) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +000099
100
101bool CodeRange::SetUp(size_t requested) {
102 DCHECK(code_range_ == NULL);
103
104 if (requested == 0) {
105 // When a target requires the code range feature, we put all code objects
106 // in a kMaximalCodeRangeSize range of virtual address space, so that
107 // they can call each other with near calls.
108 if (kRequiresCodeRange) {
109 requested = kMaximalCodeRangeSize;
110 } else {
111 return true;
112 }
113 }
114
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400115 if (requested <= kMinimumCodeRangeSize) {
116 requested = kMinimumCodeRangeSize;
117 }
118
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000119 DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000120#ifdef V8_TARGET_ARCH_MIPS64
121 // To use pseudo-relative jumps such as j/jal instructions which have 28-bit
122 // encoded immediate, the addresses have to be in range of 256Mb aligned
123 // region.
124 code_range_ = new base::VirtualMemory(requested, kMaximalCodeRangeSize);
125#else
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000126 code_range_ = new base::VirtualMemory(requested);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000127#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000128 CHECK(code_range_ != NULL);
129 if (!code_range_->IsReserved()) {
130 delete code_range_;
131 code_range_ = NULL;
132 return false;
133 }
134
135 // We are sure that we have mapped a block of requested addresses.
136 DCHECK(code_range_->size() == requested);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000137 Address base = reinterpret_cast<Address>(code_range_->address());
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400138
139 // On some platforms, specifically Win64, we need to reserve some pages at
140 // the beginning of an executable space.
141 if (kReservedCodeRangePages) {
142 if (!code_range_->Commit(
143 base, kReservedCodeRangePages * base::OS::CommitPageSize(), true)) {
144 delete code_range_;
145 code_range_ = NULL;
146 return false;
147 }
148 base += kReservedCodeRangePages * base::OS::CommitPageSize();
149 }
150 Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
151 size_t size = code_range_->size() - (aligned_base - base) -
152 kReservedCodeRangePages * base::OS::CommitPageSize();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000153 allocation_list_.Add(FreeBlock(aligned_base, size));
154 current_allocation_block_index_ = 0;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400155
156 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000157 return true;
158}
159
160
161int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
162 const FreeBlock* right) {
163 // The entire point of CodeRange is that the difference between two
164 // addresses in the range can be represented as a signed 32-bit int,
165 // so the cast is semantically correct.
166 return static_cast<int>(left->start - right->start);
167}
168
169
170bool CodeRange::GetNextAllocationBlock(size_t requested) {
171 for (current_allocation_block_index_++;
172 current_allocation_block_index_ < allocation_list_.length();
173 current_allocation_block_index_++) {
174 if (requested <= allocation_list_[current_allocation_block_index_].size) {
175 return true; // Found a large enough allocation block.
176 }
177 }
178
179 // Sort and merge the free blocks on the free list and the allocation list.
180 free_list_.AddAll(allocation_list_);
181 allocation_list_.Clear();
182 free_list_.Sort(&CompareFreeBlockAddress);
183 for (int i = 0; i < free_list_.length();) {
184 FreeBlock merged = free_list_[i];
185 i++;
186 // Add adjacent free blocks to the current merged block.
187 while (i < free_list_.length() &&
188 free_list_[i].start == merged.start + merged.size) {
189 merged.size += free_list_[i].size;
190 i++;
191 }
192 if (merged.size > 0) {
193 allocation_list_.Add(merged);
194 }
195 }
196 free_list_.Clear();
197
198 for (current_allocation_block_index_ = 0;
199 current_allocation_block_index_ < allocation_list_.length();
200 current_allocation_block_index_++) {
201 if (requested <= allocation_list_[current_allocation_block_index_].size) {
202 return true; // Found a large enough allocation block.
203 }
204 }
205 current_allocation_block_index_ = 0;
206 // Code range is full or too fragmented.
207 return false;
208}
209
210
211Address CodeRange::AllocateRawMemory(const size_t requested_size,
212 const size_t commit_size,
213 size_t* allocated) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000214 // request_size includes guards while committed_size does not. Make sure
215 // callers know about the invariant.
216 CHECK_LE(commit_size,
217 requested_size - 2 * MemoryAllocator::CodePageGuardSize());
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400218 FreeBlock current;
219 if (!ReserveBlock(requested_size, &current)) {
220 *allocated = 0;
221 return NULL;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000222 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400223 *allocated = current.size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000224 DCHECK(*allocated <= current.size);
225 DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
226 if (!isolate_->memory_allocator()->CommitExecutableMemory(
227 code_range_, current.start, commit_size, *allocated)) {
228 *allocated = 0;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400229 ReleaseBlock(&current);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000230 return NULL;
231 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000232 return current.start;
233}
234
235
236bool CodeRange::CommitRawMemory(Address start, size_t length) {
237 return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
238}
239
240
241bool CodeRange::UncommitRawMemory(Address start, size_t length) {
242 return code_range_->Uncommit(start, length);
243}
244
245
246void CodeRange::FreeRawMemory(Address address, size_t length) {
247 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000248 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000249 free_list_.Add(FreeBlock(address, length));
250 code_range_->Uncommit(address, length);
251}
252
253
254void CodeRange::TearDown() {
255 delete code_range_; // Frees all memory in the virtual memory range.
256 code_range_ = NULL;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000257 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000258 free_list_.Free();
259 allocation_list_.Free();
260}
261
262
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400263bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000264 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400265 DCHECK(allocation_list_.length() == 0 ||
266 current_allocation_block_index_ < allocation_list_.length());
267 if (allocation_list_.length() == 0 ||
268 requested_size > allocation_list_[current_allocation_block_index_].size) {
269 // Find an allocation block large enough.
270 if (!GetNextAllocationBlock(requested_size)) return false;
271 }
272 // Commit the requested memory at the start of the current allocation block.
273 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
274 *block = allocation_list_[current_allocation_block_index_];
275 // Don't leave a small free block, useless for a large object or chunk.
276 if (aligned_requested < (block->size - Page::kPageSize)) {
277 block->size = aligned_requested;
278 }
279 DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
280 allocation_list_[current_allocation_block_index_].start += block->size;
281 allocation_list_[current_allocation_block_index_].size -= block->size;
282 return true;
283}
284
285
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000286void CodeRange::ReleaseBlock(const FreeBlock* block) {
287 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
288 free_list_.Add(*block);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400289}
290
291
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000292// -----------------------------------------------------------------------------
293// MemoryAllocator
294//
295
296MemoryAllocator::MemoryAllocator(Isolate* isolate)
297 : isolate_(isolate),
298 capacity_(0),
299 capacity_executable_(0),
300 size_(0),
301 size_executable_(0),
302 lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
303 highest_ever_allocated_(reinterpret_cast<void*>(0)) {}
304
305
306bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
307 capacity_ = RoundUp(capacity, Page::kPageSize);
308 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
309 DCHECK_GE(capacity_, capacity_executable_);
310
311 size_ = 0;
312 size_executable_ = 0;
313
314 return true;
315}
316
317
318void MemoryAllocator::TearDown() {
319 // Check that spaces were torn down before MemoryAllocator.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000320 DCHECK(size_.Value() == 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000321 // TODO(gc) this will be true again when we fix FreeMemory.
322 // DCHECK(size_executable_ == 0);
323 capacity_ = 0;
324 capacity_executable_ = 0;
325}
326
327
328bool MemoryAllocator::CommitMemory(Address base, size_t size,
329 Executability executable) {
330 if (!base::VirtualMemory::CommitRegion(base, size,
331 executable == EXECUTABLE)) {
332 return false;
333 }
334 UpdateAllocatedSpaceLimits(base, base + size);
335 return true;
336}
337
338
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000339void MemoryAllocator::FreeNewSpaceMemory(Address addr,
340 base::VirtualMemory* reservation,
341 Executability executable) {
342 LOG(isolate_, DeleteEvent("NewSpace", addr));
343
344 DCHECK(reservation->IsReserved());
345 const intptr_t size = static_cast<intptr_t>(reservation->size());
346 DCHECK(size_.Value() >= size);
347 size_.Increment(-size);
348 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
349 FreeMemory(reservation, NOT_EXECUTABLE);
350}
351
352
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000353void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
354 Executability executable) {
355 // TODO(gc) make code_range part of memory allocator?
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000356 // Code which is part of the code-range does not have its own VirtualMemory.
357 DCHECK(isolate_->code_range() == NULL ||
358 !isolate_->code_range()->contains(
359 static_cast<Address>(reservation->address())));
360 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000361 !isolate_->code_range()->valid() ||
362 reservation->size() <= Page::kPageSize);
363
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000364 reservation->Release();
365}
366
367
368void MemoryAllocator::FreeMemory(Address base, size_t size,
369 Executability executable) {
370 // TODO(gc) make code_range part of memory allocator?
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000371 if (isolate_->code_range() != NULL &&
372 isolate_->code_range()->contains(static_cast<Address>(base))) {
373 DCHECK(executable == EXECUTABLE);
374 isolate_->code_range()->FreeRawMemory(base, size);
375 } else {
376 DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
377 !isolate_->code_range()->valid());
378 bool result = base::VirtualMemory::ReleaseRegion(base, size);
379 USE(result);
380 DCHECK(result);
381 }
382}
383
384
385Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
386 base::VirtualMemory* controller) {
387 base::VirtualMemory reservation(size, alignment);
388
389 if (!reservation.IsReserved()) return NULL;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000390 size_.Increment(static_cast<intptr_t>(reservation.size()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000391 Address base =
392 RoundUp(static_cast<Address>(reservation.address()), alignment);
393 controller->TakeControl(&reservation);
394 return base;
395}
396
397
398Address MemoryAllocator::AllocateAlignedMemory(
399 size_t reserve_size, size_t commit_size, size_t alignment,
400 Executability executable, base::VirtualMemory* controller) {
401 DCHECK(commit_size <= reserve_size);
402 base::VirtualMemory reservation;
403 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
404 if (base == NULL) return NULL;
405
406 if (executable == EXECUTABLE) {
407 if (!CommitExecutableMemory(&reservation, base, commit_size,
408 reserve_size)) {
409 base = NULL;
410 }
411 } else {
412 if (reservation.Commit(base, commit_size, false)) {
413 UpdateAllocatedSpaceLimits(base, base + commit_size);
414 } else {
415 base = NULL;
416 }
417 }
418
419 if (base == NULL) {
420 // Failed to commit the body. Release the mapping and any partially
421 // commited regions inside it.
422 reservation.Release();
423 return NULL;
424 }
425
426 controller->TakeControl(&reservation);
427 return base;
428}
429
430
431void Page::InitializeAsAnchor(PagedSpace* owner) {
432 set_owner(owner);
433 set_prev_page(this);
434 set_next_page(this);
435}
436
437
438NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start,
439 SemiSpace* semi_space) {
440 Address area_start = start + NewSpacePage::kObjectStartOffset;
441 Address area_end = start + Page::kPageSize;
442
443 MemoryChunk* chunk =
444 MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start,
Ben Murdoch097c5b22016-05-18 11:27:45 +0100445 area_end, NOT_EXECUTABLE, semi_space, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000446 bool in_to_space = (semi_space->id() != kFromSpace);
447 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
448 : MemoryChunk::IN_FROM_SPACE);
449 DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
450 : MemoryChunk::IN_TO_SPACE));
451 NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
452 heap->incremental_marking()->SetNewSpacePageFlags(page);
453 return page;
454}
455
456
457void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
458 set_owner(semi_space);
459 set_next_chunk(this);
460 set_prev_chunk(this);
461 // Flags marks this invalid page as not being in new-space.
462 // All real new-space pages will be in new-space.
463 SetFlags(0, ~0);
464}
465
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000466MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
467 Address area_start, Address area_end,
Ben Murdoch097c5b22016-05-18 11:27:45 +0100468 Executability executable, Space* owner,
469 base::VirtualMemory* reservation) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000470 MemoryChunk* chunk = FromAddress(base);
471
472 DCHECK(base == chunk->address());
473
474 chunk->heap_ = heap;
475 chunk->size_ = size;
476 chunk->area_start_ = area_start;
477 chunk->area_end_ = area_end;
478 chunk->flags_ = 0;
479 chunk->set_owner(owner);
480 chunk->InitializeReservedMemory();
Ben Murdoch097c5b22016-05-18 11:27:45 +0100481 chunk->slots_buffer_ = nullptr;
482 chunk->old_to_new_slots_ = nullptr;
483 chunk->old_to_old_slots_ = nullptr;
484 chunk->skip_list_ = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000485 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
486 chunk->progress_bar_ = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000487 chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
Ben Murdoch097c5b22016-05-18 11:27:45 +0100488 chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000489 chunk->parallel_compaction_state().SetValue(kCompactingDone);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100490 chunk->mutex_ = nullptr;
491 chunk->available_in_free_list_ = 0;
492 chunk->wasted_memory_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000493 chunk->ResetLiveBytes();
494 Bitmap::Clear(chunk);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000495 chunk->set_next_chunk(nullptr);
496 chunk->set_prev_chunk(nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000497
498 DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
499 DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
500
501 if (executable == EXECUTABLE) {
502 chunk->SetFlag(IS_EXECUTABLE);
503 }
504
Ben Murdoch097c5b22016-05-18 11:27:45 +0100505 if (reservation != nullptr) {
506 chunk->reservation_.TakeControl(reservation);
507 }
508
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000509 return chunk;
510}
511
512
513// Commit MemoryChunk area to the requested size.
514bool MemoryChunk::CommitArea(size_t requested) {
515 size_t guard_size =
516 IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
517 size_t header_size = area_start() - address() - guard_size;
518 size_t commit_size =
519 RoundUp(header_size + requested, base::OS::CommitPageSize());
520 size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
521 base::OS::CommitPageSize());
522
523 if (commit_size > committed_size) {
524 // Commit size should be less or equal than the reserved size.
525 DCHECK(commit_size <= size() - 2 * guard_size);
526 // Append the committed area.
527 Address start = address() + committed_size + guard_size;
528 size_t length = commit_size - committed_size;
529 if (reservation_.IsReserved()) {
530 Executability executable =
531 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
532 if (!heap()->isolate()->memory_allocator()->CommitMemory(start, length,
533 executable)) {
534 return false;
535 }
536 } else {
537 CodeRange* code_range = heap_->isolate()->code_range();
538 DCHECK(code_range != NULL && code_range->valid() &&
539 IsFlagSet(IS_EXECUTABLE));
540 if (!code_range->CommitRawMemory(start, length)) return false;
541 }
542
543 if (Heap::ShouldZapGarbage()) {
544 heap_->isolate()->memory_allocator()->ZapBlock(start, length);
545 }
546 } else if (commit_size < committed_size) {
547 DCHECK(commit_size > 0);
548 // Shrink the committed area.
549 size_t length = committed_size - commit_size;
550 Address start = address() + committed_size + guard_size - length;
551 if (reservation_.IsReserved()) {
552 if (!reservation_.Uncommit(start, length)) return false;
553 } else {
554 CodeRange* code_range = heap_->isolate()->code_range();
555 DCHECK(code_range != NULL && code_range->valid() &&
556 IsFlagSet(IS_EXECUTABLE));
557 if (!code_range->UncommitRawMemory(start, length)) return false;
558 }
559 }
560
561 area_end_ = area_start_ + requested;
562 return true;
563}
564
565
566void MemoryChunk::InsertAfter(MemoryChunk* other) {
567 MemoryChunk* other_next = other->next_chunk();
568
569 set_next_chunk(other_next);
570 set_prev_chunk(other);
571 other_next->set_prev_chunk(this);
572 other->set_next_chunk(this);
573}
574
575
576void MemoryChunk::Unlink() {
577 MemoryChunk* next_element = next_chunk();
578 MemoryChunk* prev_element = prev_chunk();
579 next_element->set_prev_chunk(prev_element);
580 prev_element->set_next_chunk(next_element);
581 set_prev_chunk(NULL);
582 set_next_chunk(NULL);
583}
584
585
586MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
587 intptr_t commit_area_size,
588 Executability executable,
589 Space* owner) {
590 DCHECK(commit_area_size <= reserve_area_size);
591
592 size_t chunk_size;
593 Heap* heap = isolate_->heap();
594 Address base = NULL;
595 base::VirtualMemory reservation;
596 Address area_start = NULL;
597 Address area_end = NULL;
598
599 //
600 // MemoryChunk layout:
601 //
602 // Executable
603 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
604 // | Header |
605 // +----------------------------+<- base + CodePageGuardStartOffset
606 // | Guard |
607 // +----------------------------+<- area_start_
608 // | Area |
609 // +----------------------------+<- area_end_ (area_start + commit_area_size)
610 // | Committed but not used |
611 // +----------------------------+<- aligned at OS page boundary
612 // | Reserved but not committed |
613 // +----------------------------+<- aligned at OS page boundary
614 // | Guard |
615 // +----------------------------+<- base + chunk_size
616 //
617 // Non-executable
618 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
619 // | Header |
620 // +----------------------------+<- area_start_ (base + kObjectStartOffset)
621 // | Area |
622 // +----------------------------+<- area_end_ (area_start + commit_area_size)
623 // | Committed but not used |
624 // +----------------------------+<- aligned at OS page boundary
625 // | Reserved but not committed |
626 // +----------------------------+<- base + chunk_size
627 //
628
629 if (executable == EXECUTABLE) {
630 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
631 base::OS::CommitPageSize()) +
632 CodePageGuardSize();
633
634 // Check executable memory limit.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000635 if ((size_executable_.Value() + static_cast<intptr_t>(chunk_size)) >
636 capacity_executable_) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000637 LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
638 "V8 Executable Allocation capacity exceeded"));
639 return NULL;
640 }
641
642 // Size of header (not executable) plus area (executable).
643 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
644 base::OS::CommitPageSize());
645 // Allocate executable memory either from code range or from the
646 // OS.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000647#ifdef V8_TARGET_ARCH_MIPS64
648 // Use code range only for large object space on mips64 to keep address
649 // range within 256-MB memory region.
650 if (isolate_->code_range() != NULL && isolate_->code_range()->valid() &&
651 reserve_area_size > CodePageAreaSize()) {
652#else
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000653 if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000654#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000655 base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size,
656 &chunk_size);
657 DCHECK(
658 IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
659 if (base == NULL) return NULL;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000660 size_.Increment(static_cast<intptr_t>(chunk_size));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000661 // Update executable memory size.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000662 size_executable_.Increment(static_cast<intptr_t>(chunk_size));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000663 } else {
664 base = AllocateAlignedMemory(chunk_size, commit_size,
665 MemoryChunk::kAlignment, executable,
666 &reservation);
667 if (base == NULL) return NULL;
668 // Update executable memory size.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000669 size_executable_.Increment(static_cast<intptr_t>(reservation.size()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000670 }
671
672 if (Heap::ShouldZapGarbage()) {
673 ZapBlock(base, CodePageGuardStartOffset());
674 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
675 }
676
677 area_start = base + CodePageAreaStartOffset();
678 area_end = area_start + commit_area_size;
679 } else {
680 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
681 base::OS::CommitPageSize());
682 size_t commit_size =
683 RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
684 base::OS::CommitPageSize());
685 base =
686 AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
687 executable, &reservation);
688
689 if (base == NULL) return NULL;
690
691 if (Heap::ShouldZapGarbage()) {
692 ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
693 }
694
695 area_start = base + Page::kObjectStartOffset;
696 area_end = area_start + commit_area_size;
697 }
698
699 // Use chunk_size for statistics and callbacks because we assume that they
700 // treat reserved but not-yet committed memory regions of chunks as allocated.
701 isolate_->counters()->memory_allocated()->Increment(
702 static_cast<int>(chunk_size));
703
704 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
705 if (owner != NULL) {
706 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
707 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
708 }
709
Ben Murdoch097c5b22016-05-18 11:27:45 +0100710 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
711 executable, owner, &reservation);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000712}
713
714
715void Page::ResetFreeListStatistics() {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100716 wasted_memory_ = 0;
717 available_in_free_list_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000718}
719
720
721Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner,
722 Executability executable) {
723 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000724 if (chunk == NULL) return NULL;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000725 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
726}
727
728
729LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
730 Space* owner,
731 Executability executable) {
732 MemoryChunk* chunk =
733 AllocateChunk(object_size, object_size, executable, owner);
734 if (chunk == NULL) return NULL;
735 return LargePage::Initialize(isolate_->heap(), chunk);
736}
737
738
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000739void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
740 DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000741 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
742 if (chunk->owner() != NULL) {
743 ObjectSpace space =
744 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
745 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
746 }
747
748 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
749 chunk->IsEvacuationCandidate());
750
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000751 intptr_t size;
752 base::VirtualMemory* reservation = chunk->reserved_memory();
753 if (reservation->IsReserved()) {
754 size = static_cast<intptr_t>(reservation->size());
755 } else {
756 size = static_cast<intptr_t>(chunk->size());
757 }
758 DCHECK(size_.Value() >= size);
759 size_.Increment(-size);
760 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
761
762 if (chunk->executable() == EXECUTABLE) {
763 DCHECK(size_executable_.Value() >= size);
764 size_executable_.Increment(-size);
765 }
766
767 chunk->SetFlag(MemoryChunk::PRE_FREED);
768}
769
770
771void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
772 DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
773 chunk->ReleaseAllocatedMemory();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000774
775 base::VirtualMemory* reservation = chunk->reserved_memory();
776 if (reservation->IsReserved()) {
777 FreeMemory(reservation, chunk->executable());
778 } else {
779 FreeMemory(chunk->address(), chunk->size(), chunk->executable());
780 }
781}
782
783
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000784void MemoryAllocator::Free(MemoryChunk* chunk) {
785 PreFreeMemory(chunk);
786 PerformFreeMemory(chunk);
787}
788
789
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000790bool MemoryAllocator::CommitBlock(Address start, size_t size,
791 Executability executable) {
792 if (!CommitMemory(start, size, executable)) return false;
793
794 if (Heap::ShouldZapGarbage()) {
795 ZapBlock(start, size);
796 }
797
798 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
799 return true;
800}
801
802
803bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
804 if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
805 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
806 return true;
807}
808
809
810void MemoryAllocator::ZapBlock(Address start, size_t size) {
811 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
812 Memory::Address_at(start + s) = kZapValue;
813 }
814}
815
816
817void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
818 AllocationAction action,
819 size_t size) {
820 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
821 MemoryAllocationCallbackRegistration registration =
822 memory_allocation_callbacks_[i];
823 if ((registration.space & space) == space &&
824 (registration.action & action) == action)
825 registration.callback(space, action, static_cast<int>(size));
826 }
827}
828
829
830bool MemoryAllocator::MemoryAllocationCallbackRegistered(
831 MemoryAllocationCallback callback) {
832 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
833 if (memory_allocation_callbacks_[i].callback == callback) return true;
834 }
835 return false;
836}
837
838
839void MemoryAllocator::AddMemoryAllocationCallback(
840 MemoryAllocationCallback callback, ObjectSpace space,
841 AllocationAction action) {
842 DCHECK(callback != NULL);
843 MemoryAllocationCallbackRegistration registration(callback, space, action);
844 DCHECK(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
845 return memory_allocation_callbacks_.Add(registration);
846}
847
848
849void MemoryAllocator::RemoveMemoryAllocationCallback(
850 MemoryAllocationCallback callback) {
851 DCHECK(callback != NULL);
852 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
853 if (memory_allocation_callbacks_[i].callback == callback) {
854 memory_allocation_callbacks_.Remove(i);
855 return;
856 }
857 }
858 UNREACHABLE();
859}
860
861
862#ifdef DEBUG
863void MemoryAllocator::ReportStatistics() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000864 intptr_t size = Size();
865 float pct = static_cast<float>(capacity_ - size) / capacity_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000866 PrintF(" capacity: %" V8_PTR_PREFIX
867 "d"
868 ", used: %" V8_PTR_PREFIX
869 "d"
870 ", available: %%%d\n\n",
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000871 capacity_, size, static_cast<int>(pct * 100));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000872}
873#endif
874
875
876int MemoryAllocator::CodePageGuardStartOffset() {
877 // We are guarding code pages: the first OS page after the header
878 // will be protected as non-writable.
879 return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize());
880}
881
882
883int MemoryAllocator::CodePageGuardSize() {
884 return static_cast<int>(base::OS::CommitPageSize());
885}
886
887
888int MemoryAllocator::CodePageAreaStartOffset() {
889 // We are guarding code pages: the first OS page after the header
890 // will be protected as non-writable.
891 return CodePageGuardStartOffset() + CodePageGuardSize();
892}
893
894
895int MemoryAllocator::CodePageAreaEndOffset() {
896 // We are guarding code pages: the last OS page will be protected as
897 // non-writable.
898 return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize());
899}
900
901
902bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
903 Address start, size_t commit_size,
904 size_t reserved_size) {
905 // Commit page header (not executable).
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000906 Address header = start;
907 size_t header_size = CodePageGuardStartOffset();
908 if (vm->Commit(header, header_size, false)) {
909 // Create guard page after the header.
910 if (vm->Guard(start + CodePageGuardStartOffset())) {
911 // Commit page body (executable).
912 Address body = start + CodePageAreaStartOffset();
913 size_t body_size = commit_size - CodePageGuardStartOffset();
914 if (vm->Commit(body, body_size, true)) {
915 // Create guard page before the end.
916 if (vm->Guard(start + reserved_size - CodePageGuardSize())) {
917 UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
918 commit_size -
919 CodePageGuardStartOffset());
920 return true;
921 }
922 vm->Uncommit(body, body_size);
923 }
924 }
925 vm->Uncommit(header, header_size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000926 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000927 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000928}
929
930
931// -----------------------------------------------------------------------------
932// MemoryChunk implementation
933
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000934void MemoryChunk::ReleaseAllocatedMemory() {
935 delete slots_buffer_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100936 slots_buffer_ = nullptr;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000937 delete skip_list_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100938 skip_list_ = nullptr;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000939 delete mutex_;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100940 mutex_ = nullptr;
941 ReleaseOldToNewSlots();
942 ReleaseOldToOldSlots();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000943}
944
Ben Murdoch097c5b22016-05-18 11:27:45 +0100945static SlotSet* AllocateSlotSet(size_t size, Address page_start) {
946 size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
947 DCHECK(pages > 0);
948 SlotSet* slot_set = new SlotSet[pages];
949 for (size_t i = 0; i < pages; i++) {
950 slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
951 }
952 return slot_set;
953}
954
955void MemoryChunk::AllocateOldToNewSlots() {
956 DCHECK(nullptr == old_to_new_slots_);
957 old_to_new_slots_ = AllocateSlotSet(size_, address());
958}
959
960void MemoryChunk::ReleaseOldToNewSlots() {
961 delete[] old_to_new_slots_;
962 old_to_new_slots_ = nullptr;
963}
964
965void MemoryChunk::AllocateOldToOldSlots() {
966 DCHECK(nullptr == old_to_old_slots_);
967 old_to_old_slots_ = AllocateSlotSet(size_, address());
968}
969
970void MemoryChunk::ReleaseOldToOldSlots() {
971 delete[] old_to_old_slots_;
972 old_to_old_slots_ = nullptr;
973}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000974
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000975// -----------------------------------------------------------------------------
976// PagedSpace implementation
977
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400978STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) ==
979 ObjectSpace::kObjectSpaceNewSpace);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000980STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_SPACE) ==
981 ObjectSpace::kObjectSpaceOldSpace);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400982STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
983 ObjectSpace::kObjectSpaceCodeSpace);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400984STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
985 ObjectSpace::kObjectSpaceMapSpace);
986
Ben Murdoch097c5b22016-05-18 11:27:45 +0100987void Space::AllocationStep(Address soon_object, int size) {
988 if (!allocation_observers_paused_) {
989 for (int i = 0; i < allocation_observers_->length(); ++i) {
990 AllocationObserver* o = (*allocation_observers_)[i];
991 o->AllocationStep(size, soon_object, size);
992 }
993 }
994}
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400995
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000996PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000997 Executability executable)
Ben Murdoch097c5b22016-05-18 11:27:45 +0100998 : Space(heap, space, executable), free_list_(this) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400999 area_size_ = MemoryAllocator::PageAreaSize(space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001000 accounting_stats_.Clear();
1001
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001002 allocation_info_.Reset(nullptr, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001003
1004 anchor_.InitializeAsAnchor(this);
1005}
1006
1007
1008bool PagedSpace::SetUp() { return true; }
1009
1010
1011bool PagedSpace::HasBeenSetUp() { return true; }
1012
1013
1014void PagedSpace::TearDown() {
1015 PageIterator iterator(this);
1016 while (iterator.has_next()) {
1017 heap()->isolate()->memory_allocator()->Free(iterator.next());
1018 }
1019 anchor_.set_next_page(&anchor_);
1020 anchor_.set_prev_page(&anchor_);
1021 accounting_stats_.Clear();
1022}
1023
1024
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001025void PagedSpace::AddMemory(Address start, intptr_t size) {
1026 accounting_stats_.ExpandSpace(static_cast<int>(size));
1027 Free(start, static_cast<int>(size));
1028}
1029
1030
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001031void PagedSpace::RefillFreeList() {
1032 MarkCompactCollector* collector = heap()->mark_compact_collector();
1033 FreeList* free_list = nullptr;
1034 if (this == heap()->old_space()) {
1035 free_list = collector->free_list_old_space().get();
1036 } else if (this == heap()->code_space()) {
1037 free_list = collector->free_list_code_space().get();
1038 } else if (this == heap()->map_space()) {
1039 free_list = collector->free_list_map_space().get();
1040 } else {
1041 // Any PagedSpace might invoke RefillFreeList. We filter all but our old
1042 // generation spaces out.
1043 return;
1044 }
1045 DCHECK(free_list != nullptr);
1046 intptr_t added = free_list_.Concatenate(free_list);
1047 accounting_stats_.IncreaseCapacity(added);
1048}
1049
1050
1051void CompactionSpace::RefillFreeList() {
1052 MarkCompactCollector* collector = heap()->mark_compact_collector();
1053 FreeList* free_list = nullptr;
1054 if (identity() == OLD_SPACE) {
1055 free_list = collector->free_list_old_space().get();
1056 } else if (identity() == CODE_SPACE) {
1057 free_list = collector->free_list_code_space().get();
1058 } else {
1059 // Compaction spaces only represent old or code space.
1060 UNREACHABLE();
1061 }
1062 DCHECK(free_list != nullptr);
1063 intptr_t refilled = 0;
1064 while (refilled < kCompactionMemoryWanted) {
1065 FreeSpace* node =
1066 free_list->TryRemoveMemory(kCompactionMemoryWanted - refilled);
1067 if (node == nullptr) return;
1068 refilled += node->size();
1069 AddMemory(node->address(), node->size());
1070 }
1071}
1072
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001073void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
1074 DCHECK(identity() == other->identity());
1075 // Destroy the linear allocation space of {other}. This is needed to
1076 // (a) not waste the memory and
1077 // (b) keep the rest of the chunk in an iterable state (filler is needed).
1078 other->EmptyAllocationInfo();
1079
1080 // Move over the free list. Concatenate makes sure that the source free list
1081 // gets properly reset after moving over all nodes.
1082 intptr_t added = free_list_.Concatenate(other->free_list());
1083
1084 // Moved memory is not recorded as allocated memory, but rather increases and
1085 // decreases capacity of the corresponding spaces.
1086 other->accounting_stats_.DecreaseCapacity(added);
1087 accounting_stats_.IncreaseCapacity(added);
1088}
1089
1090
1091void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
1092 // Unmerged fields:
1093 // area_size_
1094 // anchor_
1095
1096 MoveOverFreeMemory(other);
1097
1098 // Update and clear accounting statistics.
1099 accounting_stats_.Merge(other->accounting_stats_);
1100 other->accounting_stats_.Clear();
1101
1102 // The linear allocation area of {other} should be destroyed now.
1103 DCHECK(other->top() == nullptr);
1104 DCHECK(other->limit() == nullptr);
1105
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001106 AccountCommitted(other->CommittedMemory());
1107
1108 // Move over pages.
1109 PageIterator it(other);
1110 Page* p = nullptr;
1111 while (it.has_next()) {
1112 p = it.next();
1113 p->Unlink();
1114 p->set_owner(this);
1115 p->InsertAfter(anchor_.prev_page());
1116 }
1117}
1118
1119
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001120size_t PagedSpace::CommittedPhysicalMemory() {
1121 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
1122 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1123 size_t size = 0;
1124 PageIterator it(this);
1125 while (it.has_next()) {
1126 size += it.next()->CommittedPhysicalMemory();
1127 }
1128 return size;
1129}
1130
Ben Murdoch097c5b22016-05-18 11:27:45 +01001131bool PagedSpace::ContainsSlow(Address addr) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001132 Page* p = Page::FromAddress(addr);
1133 PageIterator iterator(this);
1134 while (iterator.has_next()) {
1135 if (iterator.next() == p) return true;
1136 }
1137 return false;
1138}
1139
1140
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001141Object* PagedSpace::FindObject(Address addr) {
1142 // Note: this function can only be called on iterable spaces.
1143 DCHECK(!heap()->mark_compact_collector()->in_use());
1144
1145 if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found.
1146
1147 Page* p = Page::FromAddress(addr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001148 HeapObjectIterator it(p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001149 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
1150 Address cur = obj->address();
1151 Address next = cur + obj->Size();
1152 if ((cur <= addr) && (addr < next)) return obj;
1153 }
1154
1155 UNREACHABLE();
1156 return Smi::FromInt(0);
1157}
1158
1159
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001160bool PagedSpace::CanExpand(size_t size) {
1161 DCHECK(heap()->mark_compact_collector()->is_compacting() ||
1162 Capacity() <= heap()->MaxOldGenerationSize());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001163
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001164 // Are we going to exceed capacity for this space? At this point we can be
1165 // way over the maximum size because of AlwaysAllocate scopes and large
1166 // objects.
1167 if (!heap()->CanExpandOldGeneration(static_cast<int>(size))) return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001168
1169 return true;
1170}
1171
1172
1173bool PagedSpace::Expand() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001174 intptr_t size = AreaSize();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001175 if (snapshotable() && !HasPages()) {
1176 size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001177 }
1178
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001179 if (!CanExpand(size)) return false;
1180
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001181 Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
1182 executable());
1183 if (p == NULL) return false;
1184
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001185 AccountCommitted(static_cast<intptr_t>(p->size()));
1186
1187 // Pages created during bootstrapping may contain immortal immovable objects.
1188 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
1189
1190 DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001191
1192 p->InsertAfter(anchor_.prev_page());
1193
1194 return true;
1195}
1196
1197
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001198int PagedSpace::CountTotalPages() {
1199 PageIterator it(this);
1200 int count = 0;
1201 while (it.has_next()) {
1202 it.next();
1203 count++;
1204 }
1205 return count;
1206}
1207
1208
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001209void PagedSpace::ResetFreeListStatistics() {
1210 PageIterator page_iterator(this);
1211 while (page_iterator.has_next()) {
1212 Page* page = page_iterator.next();
1213 page->ResetFreeListStatistics();
1214 }
1215}
1216
1217
1218void PagedSpace::IncreaseCapacity(int size) {
1219 accounting_stats_.ExpandSpace(size);
1220}
1221
1222
Ben Murdoch097c5b22016-05-18 11:27:45 +01001223void PagedSpace::ReleasePage(Page* page, bool evict_free_list_items) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001224 DCHECK(page->LiveBytes() == 0);
1225 DCHECK(AreaSize() == page->area_size());
1226
Ben Murdoch097c5b22016-05-18 11:27:45 +01001227 if (evict_free_list_items) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001228 intptr_t size = free_list_.EvictFreeListItems(page);
1229 accounting_stats_.AllocateBytes(size);
1230 DCHECK_EQ(AreaSize(), static_cast<int>(size));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001231 }
1232
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001233 DCHECK(!free_list_.ContainsPageFreeListItems(page));
1234
1235 if (Page::FromAllocationTop(allocation_info_.top()) == page) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001236 allocation_info_.Reset(nullptr, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001237 }
1238
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001239 // If page is still in a list, unlink it from that list.
1240 if (page->next_chunk() != NULL) {
1241 DCHECK(page->prev_chunk() != NULL);
1242 page->Unlink();
1243 }
1244
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001245 AccountUncommitted(static_cast<intptr_t>(page->size()));
1246 heap()->QueueMemoryChunkForFree(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001247
1248 DCHECK(Capacity() > 0);
1249 accounting_stats_.ShrinkSpace(AreaSize());
1250}
1251
1252
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001253#ifdef DEBUG
1254void PagedSpace::Print() {}
1255#endif
1256
1257#ifdef VERIFY_HEAP
1258void PagedSpace::Verify(ObjectVisitor* visitor) {
1259 bool allocation_pointer_found_in_space =
1260 (allocation_info_.top() == allocation_info_.limit());
1261 PageIterator page_iterator(this);
1262 while (page_iterator.has_next()) {
1263 Page* page = page_iterator.next();
1264 CHECK(page->owner() == this);
1265 if (page == Page::FromAllocationTop(allocation_info_.top())) {
1266 allocation_pointer_found_in_space = true;
1267 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001268 CHECK(page->SweepingDone());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001269 HeapObjectIterator it(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001270 Address end_of_previous_object = page->area_start();
1271 Address top = page->area_end();
1272 int black_size = 0;
1273 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1274 CHECK(end_of_previous_object <= object->address());
1275
1276 // The first word should be a map, and we expect all map pointers to
1277 // be in map space.
1278 Map* map = object->map();
1279 CHECK(map->IsMap());
1280 CHECK(heap()->map_space()->Contains(map));
1281
1282 // Perform space-specific object verification.
1283 VerifyObject(object);
1284
1285 // The object itself should look OK.
1286 object->ObjectVerify();
1287
1288 // All the interior pointers should be contained in the heap.
1289 int size = object->Size();
1290 object->IterateBody(map->instance_type(), size, visitor);
1291 if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
1292 black_size += size;
1293 }
1294
1295 CHECK(object->address() + size <= top);
1296 end_of_previous_object = object->address() + size;
1297 }
1298 CHECK_LE(black_size, page->LiveBytes());
1299 }
1300 CHECK(allocation_pointer_found_in_space);
1301}
1302#endif // VERIFY_HEAP
1303
1304// -----------------------------------------------------------------------------
1305// NewSpace implementation
1306
1307
1308bool NewSpace::SetUp(int reserved_semispace_capacity,
1309 int maximum_semispace_capacity) {
1310 // Set up new space based on the preallocated memory block defined by
1311 // start and size. The provided space is divided into two semi-spaces.
1312 // To support fast containment testing in the new space, the size of
1313 // this chunk must be a power of two and it must be aligned to its size.
1314 int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1315
1316 size_t size = 2 * reserved_semispace_capacity;
1317 Address base = heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
1318 size, size, &reservation_);
1319 if (base == NULL) return false;
1320
1321 chunk_base_ = base;
1322 chunk_size_ = static_cast<uintptr_t>(size);
1323 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
1324
1325 DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
1326 DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity));
1327
1328 // Allocate and set up the histogram arrays if necessary.
1329 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1330 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1331
1332#define SET_NAME(name) \
1333 allocated_histogram_[name].set_name(#name); \
1334 promoted_histogram_[name].set_name(#name);
1335 INSTANCE_TYPE_LIST(SET_NAME)
1336#undef SET_NAME
1337
1338 DCHECK(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1339 DCHECK(static_cast<intptr_t>(chunk_size_) >=
1340 2 * heap()->ReservedSemiSpaceSize());
1341 DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
1342
1343 to_space_.SetUp(chunk_base_, initial_semispace_capacity,
Ben Murdoch097c5b22016-05-18 11:27:45 +01001344 maximum_semispace_capacity);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001345 from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
Ben Murdoch097c5b22016-05-18 11:27:45 +01001346 initial_semispace_capacity, maximum_semispace_capacity);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001347 if (!to_space_.Commit()) {
1348 return false;
1349 }
1350 DCHECK(!from_space_.is_committed()); // No need to use memory yet.
1351
1352 start_ = chunk_base_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001353
1354 ResetAllocationInfo();
1355
1356 return true;
1357}
1358
1359
1360void NewSpace::TearDown() {
1361 if (allocated_histogram_) {
1362 DeleteArray(allocated_histogram_);
1363 allocated_histogram_ = NULL;
1364 }
1365 if (promoted_histogram_) {
1366 DeleteArray(promoted_histogram_);
1367 promoted_histogram_ = NULL;
1368 }
1369
1370 start_ = NULL;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001371 allocation_info_.Reset(nullptr, nullptr);
1372
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001373
1374 to_space_.TearDown();
1375 from_space_.TearDown();
1376
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001377 heap()->isolate()->memory_allocator()->FreeNewSpaceMemory(
1378 chunk_base_, &reservation_, NOT_EXECUTABLE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001379
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001380 chunk_base_ = NULL;
1381 chunk_size_ = 0;
1382}
1383
1384
1385void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
1386
1387
1388void NewSpace::Grow() {
1389 // Double the semispace size but only up to maximum capacity.
1390 DCHECK(TotalCapacity() < MaximumCapacity());
1391 int new_capacity =
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001392 Min(MaximumCapacity(),
1393 FLAG_semi_space_growth_factor * static_cast<int>(TotalCapacity()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001394 if (to_space_.GrowTo(new_capacity)) {
1395 // Only grow from space if we managed to grow to-space.
1396 if (!from_space_.GrowTo(new_capacity)) {
1397 // If we managed to grow to-space but couldn't grow from-space,
1398 // attempt to shrink to-space.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001399 if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001400 // We are in an inconsistent state because we could not
1401 // commit/uncommit memory from new space.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001402 CHECK(false);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001403 }
1404 }
1405 }
1406 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1407}
1408
1409
1410void NewSpace::Shrink() {
1411 int new_capacity = Max(InitialTotalCapacity(), 2 * SizeAsInt());
1412 int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1413 if (rounded_new_capacity < TotalCapacity() &&
1414 to_space_.ShrinkTo(rounded_new_capacity)) {
1415 // Only shrink from-space if we managed to shrink to-space.
1416 from_space_.Reset();
1417 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1418 // If we managed to shrink to-space but couldn't shrink from
1419 // space, attempt to grow to-space again.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001420 if (!to_space_.GrowTo(from_space_.current_capacity())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001421 // We are in an inconsistent state because we could not
1422 // commit/uncommit memory from new space.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001423 CHECK(false);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001424 }
1425 }
1426 }
1427 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1428}
1429
1430
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001431void LocalAllocationBuffer::Close() {
1432 if (IsValid()) {
1433 heap_->CreateFillerObjectAt(
1434 allocation_info_.top(),
1435 static_cast<int>(allocation_info_.limit() - allocation_info_.top()));
1436 }
1437}
1438
1439
1440LocalAllocationBuffer::LocalAllocationBuffer(Heap* heap,
1441 AllocationInfo allocation_info)
1442 : heap_(heap), allocation_info_(allocation_info) {
1443 if (IsValid()) {
1444 heap_->CreateFillerObjectAt(
1445 allocation_info_.top(),
1446 static_cast<int>(allocation_info_.limit() - allocation_info_.top()));
1447 }
1448}
1449
1450
1451LocalAllocationBuffer::LocalAllocationBuffer(
1452 const LocalAllocationBuffer& other) {
1453 *this = other;
1454}
1455
1456
1457LocalAllocationBuffer& LocalAllocationBuffer::operator=(
1458 const LocalAllocationBuffer& other) {
1459 Close();
1460 heap_ = other.heap_;
1461 allocation_info_ = other.allocation_info_;
1462
1463 // This is needed since we (a) cannot yet use move-semantics, and (b) want
1464 // to make the use of the class easy by it as value and (c) implicitly call
1465 // {Close} upon copy.
1466 const_cast<LocalAllocationBuffer&>(other)
1467 .allocation_info_.Reset(nullptr, nullptr);
1468 return *this;
1469}
1470
1471
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001472void NewSpace::UpdateAllocationInfo() {
1473 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001474 allocation_info_.Reset(to_space_.page_low(), to_space_.page_high());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001475 UpdateInlineAllocationLimit(0);
1476 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1477}
1478
1479
1480void NewSpace::ResetAllocationInfo() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001481 Address old_top = allocation_info_.top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001482 to_space_.Reset();
1483 UpdateAllocationInfo();
1484 pages_used_ = 0;
1485 // Clear all mark-bits in the to-space.
1486 NewSpacePageIterator it(&to_space_);
1487 while (it.has_next()) {
1488 Bitmap::Clear(it.next());
1489 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001490 InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001491}
1492
1493
1494void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
1495 if (heap()->inline_allocation_disabled()) {
1496 // Lowest limit when linear allocation was disabled.
1497 Address high = to_space_.page_high();
1498 Address new_top = allocation_info_.top() + size_in_bytes;
1499 allocation_info_.set_limit(Min(new_top, high));
Ben Murdoch097c5b22016-05-18 11:27:45 +01001500 } else if (allocation_observers_paused_ || top_on_previous_step_ == 0) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001501 // Normal limit is the end of the current page.
1502 allocation_info_.set_limit(to_space_.page_high());
1503 } else {
1504 // Lower limit during incremental marking.
1505 Address high = to_space_.page_high();
1506 Address new_top = allocation_info_.top() + size_in_bytes;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001507 Address new_limit = new_top + GetNextInlineAllocationStepSize() - 1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001508 allocation_info_.set_limit(Min(new_limit, high));
1509 }
1510 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1511}
1512
1513
1514bool NewSpace::AddFreshPage() {
1515 Address top = allocation_info_.top();
Ben Murdoch097c5b22016-05-18 11:27:45 +01001516 DCHECK(!NewSpacePage::IsAtStart(top));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001517 if (!to_space_.AdvancePage()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001518 // No more pages left to advance.
1519 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001520 }
1521
1522 // Clear remainder of current page.
1523 Address limit = NewSpacePage::FromLimit(top)->area_end();
1524 if (heap()->gc_state() == Heap::SCAVENGE) {
1525 heap()->promotion_queue()->SetNewLimit(limit);
1526 }
1527
1528 int remaining_in_page = static_cast<int>(limit - top);
1529 heap()->CreateFillerObjectAt(top, remaining_in_page);
1530 pages_used_++;
1531 UpdateAllocationInfo();
1532
1533 return true;
1534}
1535
1536
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001537bool NewSpace::AddFreshPageSynchronized() {
1538 base::LockGuard<base::Mutex> guard(&mutex_);
1539 return AddFreshPage();
1540}
1541
1542
1543bool NewSpace::EnsureAllocation(int size_in_bytes,
1544 AllocationAlignment alignment) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001545 Address old_top = allocation_info_.top();
1546 Address high = to_space_.page_high();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001547 int filler_size = Heap::GetFillToAlign(old_top, alignment);
1548 int aligned_size_in_bytes = size_in_bytes + filler_size;
1549
1550 if (old_top + aligned_size_in_bytes >= high) {
1551 // Not enough room in the page, try to allocate a new one.
1552 if (!AddFreshPage()) {
1553 return false;
1554 }
1555
1556 InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
1557
1558 old_top = allocation_info_.top();
1559 high = to_space_.page_high();
1560 filler_size = Heap::GetFillToAlign(old_top, alignment);
1561 aligned_size_in_bytes = size_in_bytes + filler_size;
1562 }
1563
1564 DCHECK(old_top + aligned_size_in_bytes < high);
1565
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001566 if (allocation_info_.limit() < high) {
1567 // Either the limit has been lowered because linear allocation was disabled
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001568 // or because incremental marking wants to get a chance to do a step,
1569 // or because idle scavenge job wants to get a chance to post a task.
1570 // Set the new limit accordingly.
1571 Address new_top = old_top + aligned_size_in_bytes;
1572 Address soon_object = old_top + filler_size;
1573 InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
1574 UpdateInlineAllocationLimit(aligned_size_in_bytes);
1575 }
1576 return true;
1577}
1578
1579
1580void NewSpace::StartNextInlineAllocationStep() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001581 if (!allocation_observers_paused_) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001582 top_on_previous_step_ =
Ben Murdoch097c5b22016-05-18 11:27:45 +01001583 allocation_observers_->length() ? allocation_info_.top() : 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001584 UpdateInlineAllocationLimit(0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001585 }
1586}
1587
1588
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001589intptr_t NewSpace::GetNextInlineAllocationStepSize() {
1590 intptr_t next_step = 0;
Ben Murdoch097c5b22016-05-18 11:27:45 +01001591 for (int i = 0; i < allocation_observers_->length(); ++i) {
1592 AllocationObserver* o = (*allocation_observers_)[i];
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001593 next_step = next_step ? Min(next_step, o->bytes_to_next_step())
1594 : o->bytes_to_next_step();
1595 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001596 DCHECK(allocation_observers_->length() == 0 || next_step != 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001597 return next_step;
1598}
1599
Ben Murdoch097c5b22016-05-18 11:27:45 +01001600void NewSpace::AddAllocationObserver(AllocationObserver* observer) {
1601 Space::AddAllocationObserver(observer);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001602 StartNextInlineAllocationStep();
1603}
1604
Ben Murdoch097c5b22016-05-18 11:27:45 +01001605void NewSpace::RemoveAllocationObserver(AllocationObserver* observer) {
1606 Space::RemoveAllocationObserver(observer);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001607 StartNextInlineAllocationStep();
1608}
1609
Ben Murdoch097c5b22016-05-18 11:27:45 +01001610void NewSpace::PauseAllocationObservers() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001611 // Do a step to account for memory allocated so far.
1612 InlineAllocationStep(top(), top(), nullptr, 0);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001613 Space::PauseAllocationObservers();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001614 top_on_previous_step_ = 0;
1615 UpdateInlineAllocationLimit(0);
1616}
1617
Ben Murdoch097c5b22016-05-18 11:27:45 +01001618void NewSpace::ResumeAllocationObservers() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001619 DCHECK(top_on_previous_step_ == 0);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001620 Space::ResumeAllocationObservers();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001621 StartNextInlineAllocationStep();
1622}
1623
1624
1625void NewSpace::InlineAllocationStep(Address top, Address new_top,
1626 Address soon_object, size_t size) {
1627 if (top_on_previous_step_) {
1628 int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001629 for (int i = 0; i < allocation_observers_->length(); ++i) {
1630 (*allocation_observers_)[i]->AllocationStep(bytes_allocated, soon_object,
1631 size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001632 }
1633 top_on_previous_step_ = new_top;
1634 }
1635}
1636
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001637#ifdef VERIFY_HEAP
1638// We do not use the SemiSpaceIterator because verification doesn't assume
1639// that it works (it depends on the invariants we are checking).
1640void NewSpace::Verify() {
1641 // The allocation pointer should be in the space or at the very end.
1642 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1643
1644 // There should be objects packed in from the low address up to the
1645 // allocation pointer.
1646 Address current = to_space_.first_page()->area_start();
1647 CHECK_EQ(current, to_space_.space_start());
1648
1649 while (current != top()) {
1650 if (!NewSpacePage::IsAtEnd(current)) {
1651 // The allocation pointer should not be in the middle of an object.
1652 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1653 current < top());
1654
1655 HeapObject* object = HeapObject::FromAddress(current);
1656
1657 // The first word should be a map, and we expect all map pointers to
1658 // be in map space.
1659 Map* map = object->map();
1660 CHECK(map->IsMap());
1661 CHECK(heap()->map_space()->Contains(map));
1662
1663 // The object should not be code or a map.
1664 CHECK(!object->IsMap());
1665 CHECK(!object->IsCode());
1666
1667 // The object itself should look OK.
1668 object->ObjectVerify();
1669
1670 // All the interior pointers should be contained in the heap.
1671 VerifyPointersVisitor visitor;
1672 int size = object->Size();
1673 object->IterateBody(map->instance_type(), size, &visitor);
1674
1675 current += size;
1676 } else {
1677 // At end of page, switch to next page.
1678 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1679 // Next page should be valid.
1680 CHECK(!page->is_anchor());
1681 current = page->area_start();
1682 }
1683 }
1684
1685 // Check semi-spaces.
1686 CHECK_EQ(from_space_.id(), kFromSpace);
1687 CHECK_EQ(to_space_.id(), kToSpace);
1688 from_space_.Verify();
1689 to_space_.Verify();
1690}
1691#endif
1692
1693// -----------------------------------------------------------------------------
1694// SemiSpace implementation
1695
Ben Murdoch097c5b22016-05-18 11:27:45 +01001696void SemiSpace::SetUp(Address start, int initial_capacity,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001697 int maximum_capacity) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001698 DCHECK_GE(maximum_capacity, Page::kPageSize);
1699 minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1700 current_capacity_ = minimum_capacity_;
1701 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001702 committed_ = false;
1703 start_ = start;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001704 age_mark_ = start_ + NewSpacePage::kObjectStartOffset;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001705}
1706
1707
1708void SemiSpace::TearDown() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001709 start_ = nullptr;
1710 current_capacity_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001711}
1712
1713
1714bool SemiSpace::Commit() {
1715 DCHECK(!is_committed());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001716 if (!heap()->isolate()->memory_allocator()->CommitBlock(
Ben Murdoch097c5b22016-05-18 11:27:45 +01001717 start_, current_capacity_, executable())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001718 return false;
1719 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001720 AccountCommitted(current_capacity_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001721
1722 NewSpacePage* current = anchor();
Ben Murdoch097c5b22016-05-18 11:27:45 +01001723 const int num_pages = current_capacity_ / Page::kPageSize;
1724 for (int i = 0; i < num_pages; i++) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001725 NewSpacePage* new_page =
1726 NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
1727 new_page->InsertAfter(current);
1728 current = new_page;
1729 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001730 Reset();
Ben Murdoch097c5b22016-05-18 11:27:45 +01001731
1732 set_current_capacity(current_capacity_);
1733 committed_ = true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001734 return true;
1735}
1736
1737
1738bool SemiSpace::Uncommit() {
1739 DCHECK(is_committed());
Ben Murdoch097c5b22016-05-18 11:27:45 +01001740 Address start = start_ + maximum_capacity_ - current_capacity_;
1741 if (!heap()->isolate()->memory_allocator()->UncommitBlock(
1742 start, current_capacity_)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001743 return false;
1744 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001745 AccountUncommitted(current_capacity_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001746
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001747 anchor()->set_next_page(anchor());
1748 anchor()->set_prev_page(anchor());
1749
1750 committed_ = false;
1751 return true;
1752}
1753
1754
1755size_t SemiSpace::CommittedPhysicalMemory() {
1756 if (!is_committed()) return 0;
1757 size_t size = 0;
1758 NewSpacePageIterator it(this);
1759 while (it.has_next()) {
1760 size += it.next()->CommittedPhysicalMemory();
1761 }
1762 return size;
1763}
1764
1765
1766bool SemiSpace::GrowTo(int new_capacity) {
1767 if (!is_committed()) {
1768 if (!Commit()) return false;
1769 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001770 DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
1771 DCHECK_LE(new_capacity, maximum_capacity_);
1772 DCHECK_GT(new_capacity, current_capacity_);
1773 int pages_before = current_capacity_ / Page::kPageSize;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001774 int pages_after = new_capacity / Page::kPageSize;
1775
Ben Murdoch097c5b22016-05-18 11:27:45 +01001776 size_t delta = new_capacity - current_capacity_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001777
1778 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1779 if (!heap()->isolate()->memory_allocator()->CommitBlock(
Ben Murdoch097c5b22016-05-18 11:27:45 +01001780 start_ + current_capacity_, delta, executable())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001781 return false;
1782 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001783 AccountCommitted(static_cast<intptr_t>(delta));
Ben Murdoch097c5b22016-05-18 11:27:45 +01001784 set_current_capacity(new_capacity);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001785 NewSpacePage* last_page = anchor()->prev_page();
Ben Murdoch097c5b22016-05-18 11:27:45 +01001786 DCHECK_NE(last_page, anchor());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001787 for (int i = pages_before; i < pages_after; i++) {
1788 Address page_address = start_ + i * Page::kPageSize;
1789 NewSpacePage* new_page =
1790 NewSpacePage::Initialize(heap(), page_address, this);
1791 new_page->InsertAfter(last_page);
1792 Bitmap::Clear(new_page);
1793 // Duplicate the flags that was set on the old page.
1794 new_page->SetFlags(last_page->GetFlags(),
1795 NewSpacePage::kCopyOnFlipFlagsMask);
1796 last_page = new_page;
1797 }
1798 return true;
1799}
1800
1801
1802bool SemiSpace::ShrinkTo(int new_capacity) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001803 DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
1804 DCHECK_GE(new_capacity, minimum_capacity_);
1805 DCHECK_LT(new_capacity, current_capacity_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001806 if (is_committed()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001807 size_t delta = current_capacity_ - new_capacity;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001808 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
1809
1810 MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
1811 if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
1812 return false;
1813 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001814 AccountUncommitted(static_cast<intptr_t>(delta));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001815
1816 int pages_after = new_capacity / Page::kPageSize;
1817 NewSpacePage* new_last_page =
1818 NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
1819 new_last_page->set_next_page(anchor());
1820 anchor()->set_prev_page(new_last_page);
1821 DCHECK((current_page_ >= first_page()) && (current_page_ <= new_last_page));
1822 }
1823
Ben Murdoch097c5b22016-05-18 11:27:45 +01001824 set_current_capacity(new_capacity);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001825
1826 return true;
1827}
1828
Ben Murdoch097c5b22016-05-18 11:27:45 +01001829void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001830 anchor_.set_owner(this);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001831 // Fixup back-pointers to anchor. Address of anchor changes when we swap.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001832 anchor_.prev_page()->set_next_page(&anchor_);
1833 anchor_.next_page()->set_prev_page(&anchor_);
1834
Ben Murdoch097c5b22016-05-18 11:27:45 +01001835 NewSpacePageIterator it(this);
1836 while (it.has_next()) {
1837 NewSpacePage* page = it.next();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001838 page->set_owner(this);
1839 page->SetFlags(flags, mask);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001840 if (id_ == kToSpace) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001841 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1842 page->SetFlag(MemoryChunk::IN_TO_SPACE);
1843 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1844 page->ResetLiveBytes();
1845 } else {
1846 page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1847 page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1848 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001849 DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1850 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001851 }
1852}
1853
1854
1855void SemiSpace::Reset() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001856 DCHECK_NE(anchor_.next_page(), &anchor_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001857 current_page_ = anchor_.next_page();
1858}
1859
1860
1861void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1862 // We won't be swapping semispaces without data in them.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001863 DCHECK_NE(from->anchor_.next_page(), &from->anchor_);
1864 DCHECK_NE(to->anchor_.next_page(), &to->anchor_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001865
Ben Murdoch097c5b22016-05-18 11:27:45 +01001866 intptr_t saved_to_space_flags = to->current_page()->GetFlags();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001867
Ben Murdoch097c5b22016-05-18 11:27:45 +01001868 // We swap all properties but id_.
1869 std::swap(from->current_capacity_, to->current_capacity_);
1870 std::swap(from->maximum_capacity_, to->maximum_capacity_);
1871 std::swap(from->minimum_capacity_, to->minimum_capacity_);
1872 std::swap(from->start_, to->start_);
1873 std::swap(from->age_mark_, to->age_mark_);
1874 std::swap(from->committed_, to->committed_);
1875 std::swap(from->anchor_, to->anchor_);
1876 std::swap(from->current_page_, to->current_page_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001877
Ben Murdoch097c5b22016-05-18 11:27:45 +01001878 to->FixPagesFlags(saved_to_space_flags, NewSpacePage::kCopyOnFlipFlagsMask);
1879 from->FixPagesFlags(0, 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001880}
1881
1882
1883void SemiSpace::set_age_mark(Address mark) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001884 DCHECK_EQ(NewSpacePage::FromLimit(mark)->semi_space(), this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001885 age_mark_ = mark;
1886 // Mark all pages up to the one containing mark.
1887 NewSpacePageIterator it(space_start(), mark);
1888 while (it.has_next()) {
1889 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1890 }
1891}
1892
1893
1894#ifdef DEBUG
1895void SemiSpace::Print() {}
1896#endif
1897
1898#ifdef VERIFY_HEAP
1899void SemiSpace::Verify() {
1900 bool is_from_space = (id_ == kFromSpace);
1901 NewSpacePage* page = anchor_.next_page();
1902 CHECK(anchor_.semi_space() == this);
1903 while (page != &anchor_) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001904 CHECK_EQ(page->semi_space(), this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001905 CHECK(page->InNewSpace());
1906 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1907 : MemoryChunk::IN_TO_SPACE));
1908 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1909 : MemoryChunk::IN_FROM_SPACE));
1910 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1911 if (!is_from_space) {
1912 // The pointers-from-here-are-interesting flag isn't updated dynamically
1913 // on from-space pages, so it might be out of sync with the marking state.
1914 if (page->heap()->incremental_marking()->IsMarking()) {
1915 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1916 } else {
1917 CHECK(
1918 !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1919 }
1920 // TODO(gc): Check that the live_bytes_count_ field matches the
1921 // black marking on the page (if we make it match in new-space).
1922 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001923 CHECK_EQ(page->prev_page()->next_page(), page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001924 page = page->next_page();
1925 }
1926}
1927#endif
1928
1929#ifdef DEBUG
1930void SemiSpace::AssertValidRange(Address start, Address end) {
1931 // Addresses belong to same semi-space
1932 NewSpacePage* page = NewSpacePage::FromLimit(start);
1933 NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1934 SemiSpace* space = page->semi_space();
1935 CHECK_EQ(space, end_page->semi_space());
1936 // Start address is before end address, either on same page,
1937 // or end address is on a later page in the linked list of
1938 // semi-space pages.
1939 if (page == end_page) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001940 CHECK_LE(start, end);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001941 } else {
1942 while (page != end_page) {
1943 page = page->next_page();
1944 CHECK_NE(page, space->anchor());
1945 }
1946 }
1947}
1948#endif
1949
1950
1951// -----------------------------------------------------------------------------
1952// SemiSpaceIterator implementation.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001953
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001954SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001955 Initialize(space->bottom(), space->top());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001956}
1957
1958
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001959void SemiSpaceIterator::Initialize(Address start, Address end) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001960 SemiSpace::AssertValidRange(start, end);
1961 current_ = start;
1962 limit_ = end;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001963}
1964
1965
1966#ifdef DEBUG
1967// heap_histograms is shared, always clear it before using it.
1968static void ClearHistograms(Isolate* isolate) {
1969// We reset the name each time, though it hasn't changed.
1970#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
1971 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
1972#undef DEF_TYPE_NAME
1973
1974#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
1975 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
1976#undef CLEAR_HISTOGRAM
1977
1978 isolate->js_spill_information()->Clear();
1979}
1980
1981
1982static void ClearCodeKindStatistics(int* code_kind_statistics) {
1983 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1984 code_kind_statistics[i] = 0;
1985 }
1986}
1987
1988
1989static void ReportCodeKindStatistics(int* code_kind_statistics) {
1990 PrintF("\n Code kind histograms: \n");
1991 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
1992 if (code_kind_statistics[i] > 0) {
1993 PrintF(" %-20s: %10d bytes\n",
1994 Code::Kind2String(static_cast<Code::Kind>(i)),
1995 code_kind_statistics[i]);
1996 }
1997 }
1998 PrintF("\n");
1999}
2000
2001
2002static int CollectHistogramInfo(HeapObject* obj) {
2003 Isolate* isolate = obj->GetIsolate();
2004 InstanceType type = obj->map()->instance_type();
2005 DCHECK(0 <= type && type <= LAST_TYPE);
2006 DCHECK(isolate->heap_histograms()[type].name() != NULL);
2007 isolate->heap_histograms()[type].increment_number(1);
2008 isolate->heap_histograms()[type].increment_bytes(obj->Size());
2009
2010 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
2011 JSObject::cast(obj)
2012 ->IncrementSpillStatistics(isolate->js_spill_information());
2013 }
2014
2015 return obj->Size();
2016}
2017
2018
2019static void ReportHistogram(Isolate* isolate, bool print_spill) {
2020 PrintF("\n Object Histogram:\n");
2021 for (int i = 0; i <= LAST_TYPE; i++) {
2022 if (isolate->heap_histograms()[i].number() > 0) {
2023 PrintF(" %-34s%10d (%10d bytes)\n",
2024 isolate->heap_histograms()[i].name(),
2025 isolate->heap_histograms()[i].number(),
2026 isolate->heap_histograms()[i].bytes());
2027 }
2028 }
2029 PrintF("\n");
2030
2031 // Summarize string types.
2032 int string_number = 0;
2033 int string_bytes = 0;
2034#define INCREMENT(type, size, name, camel_name) \
2035 string_number += isolate->heap_histograms()[type].number(); \
2036 string_bytes += isolate->heap_histograms()[type].bytes();
2037 STRING_TYPE_LIST(INCREMENT)
2038#undef INCREMENT
2039 if (string_number > 0) {
2040 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
2041 string_bytes);
2042 }
2043
2044 if (FLAG_collect_heap_spill_statistics && print_spill) {
2045 isolate->js_spill_information()->Print();
2046 }
2047}
2048#endif // DEBUG
2049
2050
2051// Support for statistics gathering for --heap-stats and --log-gc.
2052void NewSpace::ClearHistograms() {
2053 for (int i = 0; i <= LAST_TYPE; i++) {
2054 allocated_histogram_[i].clear();
2055 promoted_histogram_[i].clear();
2056 }
2057}
2058
2059
2060// Because the copying collector does not touch garbage objects, we iterate
2061// the new space before a collection to get a histogram of allocated objects.
2062// This only happens when --log-gc flag is set.
2063void NewSpace::CollectStatistics() {
2064 ClearHistograms();
2065 SemiSpaceIterator it(this);
2066 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
2067 RecordAllocation(obj);
2068}
2069
2070
2071static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
2072 const char* description) {
2073 LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
2074 // Lump all the string types together.
2075 int string_number = 0;
2076 int string_bytes = 0;
2077#define INCREMENT(type, size, name, camel_name) \
2078 string_number += info[type].number(); \
2079 string_bytes += info[type].bytes();
2080 STRING_TYPE_LIST(INCREMENT)
2081#undef INCREMENT
2082 if (string_number > 0) {
2083 LOG(isolate,
2084 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
2085 }
2086
2087 // Then do the other types.
2088 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
2089 if (info[i].number() > 0) {
2090 LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(),
2091 info[i].bytes()));
2092 }
2093 }
2094 LOG(isolate, HeapSampleEndEvent("NewSpace", description));
2095}
2096
2097
2098void NewSpace::ReportStatistics() {
2099#ifdef DEBUG
2100 if (FLAG_heap_stats) {
2101 float pct = static_cast<float>(Available()) / TotalCapacity();
2102 PrintF(" capacity: %" V8_PTR_PREFIX
2103 "d"
2104 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2105 TotalCapacity(), Available(), static_cast<int>(pct * 100));
2106 PrintF("\n Object Histogram:\n");
2107 for (int i = 0; i <= LAST_TYPE; i++) {
2108 if (allocated_histogram_[i].number() > 0) {
2109 PrintF(" %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(),
2110 allocated_histogram_[i].number(),
2111 allocated_histogram_[i].bytes());
2112 }
2113 }
2114 PrintF("\n");
2115 }
2116#endif // DEBUG
2117
2118 if (FLAG_log_gc) {
2119 Isolate* isolate = heap()->isolate();
2120 DoReportStatistics(isolate, allocated_histogram_, "allocated");
2121 DoReportStatistics(isolate, promoted_histogram_, "promoted");
2122 }
2123}
2124
2125
2126void NewSpace::RecordAllocation(HeapObject* obj) {
2127 InstanceType type = obj->map()->instance_type();
2128 DCHECK(0 <= type && type <= LAST_TYPE);
2129 allocated_histogram_[type].increment_number(1);
2130 allocated_histogram_[type].increment_bytes(obj->Size());
2131}
2132
2133
2134void NewSpace::RecordPromotion(HeapObject* obj) {
2135 InstanceType type = obj->map()->instance_type();
2136 DCHECK(0 <= type && type <= LAST_TYPE);
2137 promoted_histogram_[type].increment_number(1);
2138 promoted_histogram_[type].increment_bytes(obj->Size());
2139}
2140
2141
2142size_t NewSpace::CommittedPhysicalMemory() {
2143 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
2144 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2145 size_t size = to_space_.CommittedPhysicalMemory();
2146 if (from_space_.is_committed()) {
2147 size += from_space_.CommittedPhysicalMemory();
2148 }
2149 return size;
2150}
2151
2152
2153// -----------------------------------------------------------------------------
2154// Free lists for old object spaces implementation
2155
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002156intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
2157 intptr_t free_bytes = 0;
2158 if (category->top() != NULL) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002159 DCHECK(category->end_ != NULL);
2160 free_bytes = category->available();
2161 if (end_ == NULL) {
2162 end_ = category->end();
2163 } else {
2164 category->end()->set_next(top());
2165 }
2166 set_top(category->top());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002167 available_ += category->available();
2168 category->Reset();
2169 }
2170 return free_bytes;
2171}
2172
2173
2174void FreeListCategory::Reset() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002175 set_top(nullptr);
2176 set_end(nullptr);
2177 available_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002178}
2179
2180
2181intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002182 intptr_t sum = 0;
2183 FreeSpace* prev_node = nullptr;
2184 for (FreeSpace* cur_node = top(); cur_node != nullptr;
2185 cur_node = cur_node->next()) {
2186 Page* page_for_node = Page::FromAddress(cur_node->address());
2187 if (page_for_node == p) {
2188 // FreeSpace node on eviction page found, unlink it.
2189 int size = cur_node->size();
2190 sum += size;
2191 DCHECK((prev_node != nullptr) || (top() == cur_node));
2192 if (cur_node == top()) {
2193 set_top(cur_node->next());
2194 }
2195 if (cur_node == end()) {
2196 set_end(prev_node);
2197 }
2198 if (prev_node != nullptr) {
2199 prev_node->set_next(cur_node->next());
2200 }
2201 continue;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002202 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002203 prev_node = cur_node;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002204 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01002205 p->add_available_in_free_list(-sum);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002206 available_ -= sum;
2207 return sum;
2208}
2209
2210
2211bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002212 FreeSpace* node = top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002213 while (node != NULL) {
2214 if (Page::FromAddress(node->address()) == p) return true;
2215 node = node->next();
2216 }
2217 return false;
2218}
2219
2220
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002221FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
2222 FreeSpace* node = top();
2223 if (node == nullptr) return nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002224
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002225 Page* page = Page::FromAddress(node->address());
2226 while ((node != nullptr) && !page->CanAllocate()) {
2227 available_ -= node->size();
Ben Murdoch097c5b22016-05-18 11:27:45 +01002228 page->add_available_in_free_list(-(node->Size()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002229 node = node->next();
2230 }
2231
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002232 if (node != nullptr) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002233 set_top(node->next());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002234 *node_size = node->Size();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002235 available_ -= *node_size;
2236 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002237 set_top(nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002238 }
2239
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002240 if (top() == nullptr) {
2241 set_end(nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002242 }
2243
2244 return node;
2245}
2246
2247
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002248FreeSpace* FreeListCategory::PickNodeFromList(int size_in_bytes,
2249 int* node_size) {
2250 FreeSpace* node = PickNodeFromList(node_size);
2251 if ((node != nullptr) && (*node_size < size_in_bytes)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002252 Free(node, *node_size);
2253 *node_size = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002254 return nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002255 }
2256 return node;
2257}
2258
2259
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002260FreeSpace* FreeListCategory::SearchForNodeInList(int size_in_bytes,
2261 int* node_size) {
2262 FreeSpace* prev_non_evac_node = nullptr;
2263 for (FreeSpace* cur_node = top(); cur_node != nullptr;
2264 cur_node = cur_node->next()) {
2265 int size = cur_node->size();
2266 Page* page_for_node = Page::FromAddress(cur_node->address());
2267
2268 if ((size >= size_in_bytes) || !page_for_node->CanAllocate()) {
2269 // The node is either large enough or contained in an evacuation
2270 // candidate. In both cases we need to unlink it from the list.
2271 available_ -= size;
2272 if (cur_node == top()) {
2273 set_top(cur_node->next());
2274 }
2275 if (cur_node == end()) {
2276 set_end(prev_non_evac_node);
2277 }
2278 if (prev_non_evac_node != nullptr) {
2279 prev_non_evac_node->set_next(cur_node->next());
2280 }
2281 // For evacuation candidates we continue.
2282 if (!page_for_node->CanAllocate()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002283 page_for_node->add_available_in_free_list(-size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002284 continue;
2285 }
2286 // Otherwise we have a large enough node and can return.
2287 *node_size = size;
2288 return cur_node;
2289 }
2290
2291 prev_non_evac_node = cur_node;
2292 }
2293 return nullptr;
2294}
2295
2296
2297void FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes) {
2298 free_space->set_next(top());
2299 set_top(free_space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002300 if (end_ == NULL) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002301 end_ = free_space;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002302 }
2303 available_ += size_in_bytes;
2304}
2305
2306
2307void FreeListCategory::RepairFreeList(Heap* heap) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002308 FreeSpace* n = top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002309 while (n != NULL) {
2310 Map** map_location = reinterpret_cast<Map**>(n->address());
2311 if (*map_location == NULL) {
2312 *map_location = heap->free_space_map();
2313 } else {
2314 DCHECK(*map_location == heap->free_space_map());
2315 }
2316 n = n->next();
2317 }
2318}
2319
Ben Murdoch097c5b22016-05-18 11:27:45 +01002320FreeList::FreeList(PagedSpace* owner) : owner_(owner), wasted_bytes_(0) {
2321 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2322 category_[i].Initialize(this, static_cast<FreeListCategoryType>(i));
2323 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002324 Reset();
2325}
2326
2327
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002328intptr_t FreeList::Concatenate(FreeList* other) {
2329 intptr_t usable_bytes = 0;
2330 intptr_t wasted_bytes = 0;
2331
2332 // This is safe (not going to deadlock) since Concatenate operations
2333 // are never performed on the same free lists at the same time in
2334 // reverse order. Furthermore, we only lock if the PagedSpace containing
2335 // the free list is know to be globally available, i.e., not local.
2336 if (!owner()->is_local()) mutex_.Lock();
2337 if (!other->owner()->is_local()) other->mutex()->Lock();
2338
2339 wasted_bytes = other->wasted_bytes_;
2340 wasted_bytes_ += wasted_bytes;
2341 other->wasted_bytes_ = 0;
2342
Ben Murdoch097c5b22016-05-18 11:27:45 +01002343 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2344 usable_bytes += category_[i].Concatenate(
2345 other->GetFreeListCategory(static_cast<FreeListCategoryType>(i)));
2346 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002347
2348 if (!other->owner()->is_local()) other->mutex()->Unlock();
2349 if (!owner()->is_local()) mutex_.Unlock();
2350 return usable_bytes + wasted_bytes;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002351}
2352
2353
2354void FreeList::Reset() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002355 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2356 category_[i].Reset();
2357 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002358 ResetStats();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002359}
2360
2361
2362int FreeList::Free(Address start, int size_in_bytes) {
2363 if (size_in_bytes == 0) return 0;
2364
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002365 owner()->heap()->CreateFillerObjectAt(start, size_in_bytes);
2366
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002367 Page* page = Page::FromAddress(start);
2368
2369 // Early return to drop too-small blocks on the floor.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002370 if (size_in_bytes <= kSmallListMin) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002371 page->add_wasted_memory(size_in_bytes);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002372 wasted_bytes_ += size_in_bytes;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002373 return size_in_bytes;
2374 }
2375
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002376 FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002377 // Insert other blocks at the head of a free list of the appropriate
2378 // magnitude.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002379 FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
2380 category_[type].Free(free_space, size_in_bytes);
2381 page->add_available_in_free_list(size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002382
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002383 DCHECK(IsVeryLong() || Available() == SumFreeLists());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002384 return 0;
2385}
2386
2387
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002388FreeSpace* FreeList::FindNodeIn(FreeListCategoryType category, int* node_size) {
2389 FreeSpace* node = GetFreeListCategory(category)->PickNodeFromList(node_size);
2390 if (node != nullptr) {
2391 Page::FromAddress(node->address())
Ben Murdoch097c5b22016-05-18 11:27:45 +01002392 ->add_available_in_free_list(-(*node_size));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002393 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2394 }
2395 return node;
2396}
2397
2398
2399FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
2400 FreeSpace* node = nullptr;
2401 Page* page = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002402
Ben Murdoch097c5b22016-05-18 11:27:45 +01002403 // First try the allocation fast path: try to allocate the minimum element
2404 // size of a free list category. This operation is constant time.
2405 FreeListCategoryType type =
2406 SelectFastAllocationFreeListCategoryType(size_in_bytes);
2407 for (int i = type; i < kHuge; i++) {
2408 node = FindNodeIn(static_cast<FreeListCategoryType>(i), node_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002409 if (node != nullptr) return node;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002410 }
2411
Ben Murdoch097c5b22016-05-18 11:27:45 +01002412 // Next search the huge list for free list nodes. This takes linear time in
2413 // the number of huge elements.
2414 node = category_[kHuge].SearchForNodeInList(size_in_bytes, node_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002415 if (node != nullptr) {
2416 page = Page::FromAddress(node->address());
Ben Murdoch097c5b22016-05-18 11:27:45 +01002417 page->add_available_in_free_list(-(*node_size));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002418 DCHECK(IsVeryLong() || Available() == SumFreeLists());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002419 return node;
2420 }
2421
Ben Murdoch097c5b22016-05-18 11:27:45 +01002422 // We need a huge block of memory, but we didn't find anything in the huge
2423 // list.
2424 if (type == kHuge) return nullptr;
2425
2426 // Now search the best fitting free list for a node that has at least the
2427 // requested size. This takes linear time in the number of elements.
2428 type = SelectFreeListCategoryType(size_in_bytes);
2429 node = category_[type].PickNodeFromList(size_in_bytes, node_size);
2430 if (node != nullptr) {
2431 DCHECK(size_in_bytes <= *node_size);
2432 page = Page::FromAddress(node->address());
2433 page->add_available_in_free_list(-(*node_size));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002434 }
2435
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002436 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2437 return node;
2438}
2439
2440
2441FreeSpace* FreeList::TryRemoveMemory(intptr_t hint_size_in_bytes) {
2442 hint_size_in_bytes = RoundDown(hint_size_in_bytes, kPointerSize);
2443 base::LockGuard<base::Mutex> guard(&mutex_);
2444 FreeSpace* node = nullptr;
2445 int node_size = 0;
2446 // Try to find a node that fits exactly.
2447 node = FindNodeFor(static_cast<int>(hint_size_in_bytes), &node_size);
2448 // If no node could be found get as much memory as possible.
2449 if (node == nullptr) node = FindNodeIn(kHuge, &node_size);
2450 if (node == nullptr) node = FindNodeIn(kLarge, &node_size);
2451 if (node != nullptr) {
2452 // We round up the size to (kSmallListMin + kPointerSize) to (a) have a
2453 // size larger then the minimum size required for FreeSpace, and (b) to get
2454 // a block that can actually be freed into some FreeList later on.
2455 if (hint_size_in_bytes <= kSmallListMin) {
2456 hint_size_in_bytes = kSmallListMin + kPointerSize;
2457 }
2458 // Give back left overs that were not required by {size_in_bytes}.
2459 intptr_t left_over = node_size - hint_size_in_bytes;
2460
2461 // Do not bother to return anything below {kSmallListMin} as it would be
2462 // immediately discarded anyways.
2463 if (left_over > kSmallListMin) {
2464 Free(node->address() + hint_size_in_bytes, static_cast<int>(left_over));
2465 node->set_size(static_cast<int>(hint_size_in_bytes));
2466 }
2467 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002468 return node;
2469}
2470
2471
2472// Allocation on the old space free list. If it succeeds then a new linear
2473// allocation space has been set up with the top and limit of the space. If
2474// the allocation fails then NULL is returned, and the caller can perform a GC
2475// or allocate a new page before retrying.
2476HeapObject* FreeList::Allocate(int size_in_bytes) {
2477 DCHECK(0 < size_in_bytes);
2478 DCHECK(size_in_bytes <= kMaxBlockSize);
2479 DCHECK(IsAligned(size_in_bytes, kPointerSize));
2480 // Don't free list allocate if there is linear space available.
2481 DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
2482
2483 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2484 // Mark the old linear allocation area with a free space map so it can be
2485 // skipped when scanning the heap. This also puts it back in the free list
2486 // if it is big enough.
2487 owner_->Free(owner_->top(), old_linear_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002488 owner_->SetTopAndLimit(nullptr, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002489
2490 owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
2491 old_linear_size);
2492
2493 int new_node_size = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002494 FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2495 if (new_node == nullptr) return nullptr;
Ben Murdoch097c5b22016-05-18 11:27:45 +01002496 owner_->AllocationStep(new_node->address(), size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002497
2498 int bytes_left = new_node_size - size_in_bytes;
2499 DCHECK(bytes_left >= 0);
2500
2501#ifdef DEBUG
2502 for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2503 reinterpret_cast<Object**>(new_node->address())[i] =
2504 Smi::FromInt(kCodeZapValue);
2505 }
2506#endif
2507
2508 // The old-space-step might have finished sweeping and restarted marking.
2509 // Verify that it did not turn the page of the new node into an evacuation
2510 // candidate.
2511 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2512
2513 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2514
2515 // Memory in the linear allocation area is counted as allocated. We may free
2516 // a little of this again immediately - see below.
2517 owner_->Allocate(new_node_size);
2518
2519 if (owner_->heap()->inline_allocation_disabled()) {
2520 // Keep the linear allocation area empty if requested to do so, just
2521 // return area back to the free list instead.
2522 owner_->Free(new_node->address() + size_in_bytes, bytes_left);
2523 DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
2524 } else if (bytes_left > kThreshold &&
2525 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002526 FLAG_incremental_marking) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002527 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2528 // We don't want to give too large linear areas to the allocator while
2529 // incremental marking is going on, because we won't check again whether
2530 // we want to do another increment until the linear area is used up.
2531 owner_->Free(new_node->address() + size_in_bytes + linear_size,
2532 new_node_size - size_in_bytes - linear_size);
2533 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2534 new_node->address() + size_in_bytes + linear_size);
2535 } else if (bytes_left > 0) {
2536 // Normally we give the rest of the node to the allocator as its new
2537 // linear allocation area.
2538 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2539 new_node->address() + new_node_size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002540 }
2541
2542 return new_node;
2543}
2544
2545
2546intptr_t FreeList::EvictFreeListItems(Page* p) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002547 intptr_t sum = category_[kHuge].EvictFreeListItemsInList(p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002548 if (sum < p->area_size()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002549 for (int i = kFirstCategory; i <= kLarge; i++) {
2550 sum += category_[i].EvictFreeListItemsInList(p);
2551 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002552 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002553 return sum;
2554}
2555
2556
2557bool FreeList::ContainsPageFreeListItems(Page* p) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002558 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2559 if (category_[i].EvictFreeListItemsInList(p)) {
2560 return true;
2561 }
2562 }
2563 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002564}
2565
2566
2567void FreeList::RepairLists(Heap* heap) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002568 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2569 category_[i].RepairFreeList(heap);
2570 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002571}
2572
2573
2574#ifdef DEBUG
2575intptr_t FreeListCategory::SumFreeList() {
2576 intptr_t sum = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002577 FreeSpace* cur = top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002578 while (cur != NULL) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002579 DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex));
2580 sum += cur->nobarrier_size();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002581 cur = cur->next();
2582 }
2583 return sum;
2584}
2585
2586
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002587int FreeListCategory::FreeListLength() {
2588 int length = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002589 FreeSpace* cur = top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002590 while (cur != NULL) {
2591 length++;
2592 cur = cur->next();
2593 if (length == kVeryLongFreeList) return length;
2594 }
2595 return length;
2596}
2597
2598
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002599bool FreeListCategory::IsVeryLong() {
2600 return FreeListLength() == kVeryLongFreeList;
2601}
2602
2603
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002604bool FreeList::IsVeryLong() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002605 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2606 if (category_[i].IsVeryLong()) {
2607 return true;
2608 }
2609 }
2610 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002611}
2612
2613
2614// This can take a very long time because it is linear in the number of entries
2615// on the free list, so it should not be called if FreeListLength returns
2616// kVeryLongFreeList.
2617intptr_t FreeList::SumFreeLists() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002618 intptr_t sum = 0;
2619 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
2620 sum += category_[i].SumFreeList();
2621 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002622 return sum;
2623}
2624#endif
2625
2626
2627// -----------------------------------------------------------------------------
2628// OldSpace implementation
2629
2630void PagedSpace::PrepareForMarkCompact() {
2631 // We don't have a linear allocation area while sweeping. It will be restored
2632 // on the first allocation after the sweep.
2633 EmptyAllocationInfo();
2634
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002635 // Clear the free list before a full GC---it will be rebuilt afterward.
2636 free_list_.Reset();
2637}
2638
2639
2640intptr_t PagedSpace::SizeOfObjects() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002641 const intptr_t size = Size() - (limit() - top());
2642 CHECK_GE(limit(), top());
2643 CHECK_GE(size, 0);
2644 USE(size);
2645 return size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002646}
2647
2648
2649// After we have booted, we have created a map which represents free space
2650// on the heap. If there was already a free list then the elements on it
2651// were created with the wrong FreeSpaceMap (normally NULL), so we need to
2652// fix them.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002653void PagedSpace::RepairFreeListsAfterDeserialization() {
2654 free_list_.RepairLists(heap());
2655 // Each page may have a small free space that is not tracked by a free list.
2656 // Update the maps for those free space objects.
2657 PageIterator iterator(this);
2658 while (iterator.has_next()) {
2659 Page* page = iterator.next();
Ben Murdoch097c5b22016-05-18 11:27:45 +01002660 int size = static_cast<int>(page->wasted_memory());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002661 if (size == 0) continue;
2662 Address address = page->OffsetToAddress(Page::kPageSize - size);
2663 heap()->CreateFillerObjectAt(address, size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002664 }
2665}
2666
2667
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002668void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() {
2669 if (allocation_info_.top() >= allocation_info_.limit()) return;
2670
2671 if (!Page::FromAllocationTop(allocation_info_.top())->CanAllocate()) {
2672 // Create filler object to keep page iterable if it was iterable.
2673 int remaining =
2674 static_cast<int>(allocation_info_.limit() - allocation_info_.top());
2675 heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
2676 allocation_info_.Reset(nullptr, nullptr);
2677 }
2678}
2679
2680
2681HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002682 MarkCompactCollector* collector = heap()->mark_compact_collector();
2683 if (collector->sweeping_in_progress()) {
2684 // Wait for the sweeper threads here and complete the sweeping phase.
2685 collector->EnsureSweepingCompleted();
2686
2687 // After waiting for the sweeper threads, there may be new free-list
2688 // entries.
2689 return free_list_.Allocate(size_in_bytes);
2690 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002691 return nullptr;
2692}
2693
2694
2695HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
2696 MarkCompactCollector* collector = heap()->mark_compact_collector();
2697 if (collector->sweeping_in_progress()) {
2698 collector->SweepAndRefill(this);
2699 return free_list_.Allocate(size_in_bytes);
2700 }
2701 return nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002702}
2703
2704
2705HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002706 const int kMaxPagesToSweep = 1;
2707
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002708 // Allocation in this space has failed.
2709
2710 MarkCompactCollector* collector = heap()->mark_compact_collector();
2711 // Sweeping is still in progress.
2712 if (collector->sweeping_in_progress()) {
2713 // First try to refill the free-list, concurrent sweeper threads
2714 // may have freed some objects in the meantime.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002715 RefillFreeList();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002716
2717 // Retry the free list allocation.
2718 HeapObject* object = free_list_.Allocate(size_in_bytes);
2719 if (object != NULL) return object;
2720
2721 // If sweeping is still in progress try to sweep pages on the main thread.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002722 int max_freed = collector->SweepInParallel(heap()->paged_space(identity()),
2723 size_in_bytes, kMaxPagesToSweep);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002724 RefillFreeList();
Ben Murdoch097c5b22016-05-18 11:27:45 +01002725 if (max_freed >= size_in_bytes) {
2726 object = free_list_.Allocate(size_in_bytes);
2727 if (object != nullptr) return object;
2728 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002729 }
2730
2731 // Free list allocation failed and there is no next page. Fail if we have
2732 // hit the old generation size limit that should cause a garbage
2733 // collection.
2734 if (!heap()->always_allocate() &&
2735 heap()->OldGenerationAllocationLimitReached()) {
2736 // If sweeper threads are active, wait for them at that point and steal
2737 // elements form their free-lists.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002738 HeapObject* object = SweepAndRetryAllocation(size_in_bytes);
2739 return object;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002740 }
2741
2742 // Try to expand the space and allocate in the new next page.
2743 if (Expand()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002744 DCHECK((CountTotalPages() > 1) ||
2745 (size_in_bytes <= free_list_.Available()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002746 return free_list_.Allocate(size_in_bytes);
2747 }
2748
2749 // If sweeper threads are active, wait for them at that point and steal
2750 // elements form their free-lists. Allocation may still fail their which
2751 // would indicate that there is not enough memory for the given allocation.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002752 return SweepAndRetryAllocation(size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002753}
2754
2755
2756#ifdef DEBUG
2757void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
2758 CommentStatistic* comments_statistics =
2759 isolate->paged_space_comments_statistics();
2760 ReportCodeKindStatistics(isolate->code_kind_statistics());
2761 PrintF(
2762 "Code comment statistics (\" [ comment-txt : size/ "
2763 "count (average)\"):\n");
2764 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2765 const CommentStatistic& cs = comments_statistics[i];
2766 if (cs.size > 0) {
2767 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2768 cs.size / cs.count);
2769 }
2770 }
2771 PrintF("\n");
2772}
2773
2774
2775void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
2776 CommentStatistic* comments_statistics =
2777 isolate->paged_space_comments_statistics();
2778 ClearCodeKindStatistics(isolate->code_kind_statistics());
2779 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2780 comments_statistics[i].Clear();
2781 }
2782 comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2783 comments_statistics[CommentStatistic::kMaxComments].size = 0;
2784 comments_statistics[CommentStatistic::kMaxComments].count = 0;
2785}
2786
2787
2788// Adds comment to 'comment_statistics' table. Performance OK as long as
2789// 'kMaxComments' is small
2790static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2791 CommentStatistic* comments_statistics =
2792 isolate->paged_space_comments_statistics();
2793 // Do not count empty comments
2794 if (delta <= 0) return;
2795 CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2796 // Search for a free or matching entry in 'comments_statistics': 'cs'
2797 // points to result.
2798 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2799 if (comments_statistics[i].comment == NULL) {
2800 cs = &comments_statistics[i];
2801 cs->comment = comment;
2802 break;
2803 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2804 cs = &comments_statistics[i];
2805 break;
2806 }
2807 }
2808 // Update entry for 'comment'
2809 cs->size += delta;
2810 cs->count += 1;
2811}
2812
2813
2814// Call for each nested comment start (start marked with '[ xxx', end marked
2815// with ']'. RelocIterator 'it' must point to a comment reloc info.
2816static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2817 DCHECK(!it->done());
2818 DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT);
2819 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2820 if (tmp[0] != '[') {
2821 // Not a nested comment; skip
2822 return;
2823 }
2824
2825 // Search for end of nested comment or a new nested comment
2826 const char* const comment_txt =
2827 reinterpret_cast<const char*>(it->rinfo()->data());
2828 const byte* prev_pc = it->rinfo()->pc();
2829 int flat_delta = 0;
2830 it->next();
2831 while (true) {
2832 // All nested comments must be terminated properly, and therefore exit
2833 // from loop.
2834 DCHECK(!it->done());
2835 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2836 const char* const txt =
2837 reinterpret_cast<const char*>(it->rinfo()->data());
2838 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2839 if (txt[0] == ']') break; // End of nested comment
2840 // A new comment
2841 CollectCommentStatistics(isolate, it);
2842 // Skip code that was covered with previous comment
2843 prev_pc = it->rinfo()->pc();
2844 }
2845 it->next();
2846 }
2847 EnterComment(isolate, comment_txt, flat_delta);
2848}
2849
2850
2851// Collects code size statistics:
2852// - by code kind
2853// - by code comment
2854void PagedSpace::CollectCodeStatistics() {
2855 Isolate* isolate = heap()->isolate();
2856 HeapObjectIterator obj_it(this);
2857 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2858 if (obj->IsCode()) {
2859 Code* code = Code::cast(obj);
2860 isolate->code_kind_statistics()[code->kind()] += code->Size();
2861 RelocIterator it(code);
2862 int delta = 0;
2863 const byte* prev_pc = code->instruction_start();
2864 while (!it.done()) {
2865 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2866 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2867 CollectCommentStatistics(isolate, &it);
2868 prev_pc = it.rinfo()->pc();
2869 }
2870 it.next();
2871 }
2872
2873 DCHECK(code->instruction_start() <= prev_pc &&
2874 prev_pc <= code->instruction_end());
2875 delta += static_cast<int>(code->instruction_end() - prev_pc);
2876 EnterComment(isolate, "NoComment", delta);
2877 }
2878 }
2879}
2880
2881
2882void PagedSpace::ReportStatistics() {
2883 int pct = static_cast<int>(Available() * 100 / Capacity());
2884 PrintF(" capacity: %" V8_PTR_PREFIX
2885 "d"
2886 ", waste: %" V8_PTR_PREFIX
2887 "d"
2888 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2889 Capacity(), Waste(), Available(), pct);
2890
2891 if (heap()->mark_compact_collector()->sweeping_in_progress()) {
2892 heap()->mark_compact_collector()->EnsureSweepingCompleted();
2893 }
2894 ClearHistograms(heap()->isolate());
2895 HeapObjectIterator obj_it(this);
2896 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2897 CollectHistogramInfo(obj);
2898 ReportHistogram(heap()->isolate(), true);
2899}
2900#endif
2901
2902
2903// -----------------------------------------------------------------------------
2904// MapSpace implementation
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002905
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002906#ifdef VERIFY_HEAP
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002907void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002908#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002909
2910
2911// -----------------------------------------------------------------------------
2912// LargeObjectIterator
2913
2914LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2915 current_ = space->first_page_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002916}
2917
2918
2919HeapObject* LargeObjectIterator::Next() {
2920 if (current_ == NULL) return NULL;
2921
2922 HeapObject* object = current_->GetObject();
2923 current_ = current_->next_page();
2924 return object;
2925}
2926
2927
2928// -----------------------------------------------------------------------------
2929// LargeObjectSpace
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002930
2931
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002932LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002933 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002934 first_page_(NULL),
2935 size_(0),
2936 page_count_(0),
2937 objects_size_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002938 chunk_map_(HashMap::PointersMatch, 1024) {}
2939
2940
2941LargeObjectSpace::~LargeObjectSpace() {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002942
2943
2944bool LargeObjectSpace::SetUp() {
2945 first_page_ = NULL;
2946 size_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002947 page_count_ = 0;
2948 objects_size_ = 0;
2949 chunk_map_.Clear();
2950 return true;
2951}
2952
2953
2954void LargeObjectSpace::TearDown() {
2955 while (first_page_ != NULL) {
2956 LargePage* page = first_page_;
2957 first_page_ = first_page_->next_page();
2958 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2959
2960 ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2961 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2962 space, kAllocationActionFree, page->size());
2963 heap()->isolate()->memory_allocator()->Free(page);
2964 }
2965 SetUp();
2966}
2967
2968
2969AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
2970 Executability executable) {
2971 // Check if we want to force a GC before growing the old space further.
2972 // If so, fail the allocation.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002973 if (!heap()->CanExpandOldGeneration(object_size)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002974 return AllocationResult::Retry(identity());
2975 }
2976
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002977 LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage(
2978 object_size, this, executable);
2979 if (page == NULL) return AllocationResult::Retry(identity());
2980 DCHECK(page->area_size() >= object_size);
2981
2982 size_ += static_cast<int>(page->size());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002983 AccountCommitted(static_cast<intptr_t>(page->size()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002984 objects_size_ += object_size;
2985 page_count_++;
2986 page->set_next_page(first_page_);
2987 first_page_ = page;
2988
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002989 // Register all MemoryChunk::kAlignment-aligned chunks covered by
2990 // this large page in the chunk map.
2991 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
2992 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
2993 for (uintptr_t key = base; key <= limit; key++) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002994 HashMap::Entry* entry = chunk_map_.LookupOrInsert(
2995 reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002996 DCHECK(entry != NULL);
2997 entry->value = page;
2998 }
2999
3000 HeapObject* object = page->GetObject();
3001
3002 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size);
3003
3004 if (Heap::ShouldZapGarbage()) {
3005 // Make the object consistent so the heap can be verified in OldSpaceStep.
3006 // We only need to do this in debug builds or if verify_heap is on.
3007 reinterpret_cast<Object**>(object->address())[0] =
3008 heap()->fixed_array_map();
3009 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
3010 }
3011
3012 heap()->incremental_marking()->OldSpaceStep(object_size);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003013 AllocationStep(object->address(), object_size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003014 return object;
3015}
3016
3017
3018size_t LargeObjectSpace::CommittedPhysicalMemory() {
3019 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
3020 size_t size = 0;
3021 LargePage* current = first_page_;
3022 while (current != NULL) {
3023 size += current->CommittedPhysicalMemory();
3024 current = current->next_page();
3025 }
3026 return size;
3027}
3028
3029
3030// GC support
3031Object* LargeObjectSpace::FindObject(Address a) {
3032 LargePage* page = FindPage(a);
3033 if (page != NULL) {
3034 return page->GetObject();
3035 }
3036 return Smi::FromInt(0); // Signaling not found.
3037}
3038
3039
3040LargePage* LargeObjectSpace::FindPage(Address a) {
3041 uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
3042 HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003043 static_cast<uint32_t>(key));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003044 if (e != NULL) {
3045 DCHECK(e->value != NULL);
3046 LargePage* page = reinterpret_cast<LargePage*>(e->value);
3047 DCHECK(page->is_valid());
3048 if (page->Contains(a)) {
3049 return page;
3050 }
3051 }
3052 return NULL;
3053}
3054
3055
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003056void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
3057 LargePage* current = first_page_;
3058 while (current != NULL) {
3059 HeapObject* object = current->GetObject();
3060 MarkBit mark_bit = Marking::MarkBitFrom(object);
3061 DCHECK(Marking::IsBlack(mark_bit));
3062 Marking::BlackToWhite(mark_bit);
3063 Page::FromAddress(object->address())->ResetProgressBar();
3064 Page::FromAddress(object->address())->ResetLiveBytes();
3065 current = current->next_page();
3066 }
3067}
3068
3069
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003070void LargeObjectSpace::FreeUnmarkedObjects() {
3071 LargePage* previous = NULL;
3072 LargePage* current = first_page_;
3073 while (current != NULL) {
3074 HeapObject* object = current->GetObject();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003075 MarkBit mark_bit = Marking::MarkBitFrom(object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003076 DCHECK(!Marking::IsGrey(mark_bit));
3077 if (Marking::IsBlack(mark_bit)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003078 previous = current;
3079 current = current->next_page();
3080 } else {
3081 LargePage* page = current;
3082 // Cut the chunk out from the chunk list.
3083 current = current->next_page();
3084 if (previous == NULL) {
3085 first_page_ = current;
3086 } else {
3087 previous->set_next_page(current);
3088 }
3089
3090 // Free the chunk.
3091 heap()->mark_compact_collector()->ReportDeleteIfNeeded(object,
3092 heap()->isolate());
3093 size_ -= static_cast<int>(page->size());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003094 AccountUncommitted(static_cast<intptr_t>(page->size()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003095 objects_size_ -= object->Size();
3096 page_count_--;
3097
3098 // Remove entries belonging to this page.
3099 // Use variable alignment to help pass length check (<= 80 characters)
3100 // of single line in tools/presubmit.py.
3101 const intptr_t alignment = MemoryChunk::kAlignment;
3102 uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment;
3103 uintptr_t limit = base + (page->size() - 1) / alignment;
3104 for (uintptr_t key = base; key <= limit; key++) {
3105 chunk_map_.Remove(reinterpret_cast<void*>(key),
3106 static_cast<uint32_t>(key));
3107 }
3108
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003109 heap()->QueueMemoryChunkForFree(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003110 }
3111 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003112}
3113
3114
3115bool LargeObjectSpace::Contains(HeapObject* object) {
3116 Address address = object->address();
3117 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3118
3119 bool owned = (chunk->owner() == this);
3120
3121 SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
3122
3123 return owned;
3124}
3125
3126
3127#ifdef VERIFY_HEAP
3128// We do not assume that the large object iterator works, because it depends
3129// on the invariants we are checking during verification.
3130void LargeObjectSpace::Verify() {
3131 for (LargePage* chunk = first_page_; chunk != NULL;
3132 chunk = chunk->next_page()) {
3133 // Each chunk contains an object that starts at the large object page's
3134 // object area start.
3135 HeapObject* object = chunk->GetObject();
3136 Page* page = Page::FromAddress(object->address());
3137 CHECK(object->address() == page->area_start());
3138
3139 // The first word should be a map, and we expect all map pointers to be
3140 // in map space.
3141 Map* map = object->map();
3142 CHECK(map->IsMap());
3143 CHECK(heap()->map_space()->Contains(map));
3144
3145 // We have only code, sequential strings, external strings
3146 // (sequential strings that have been morphed into external
3147 // strings), fixed arrays, byte arrays, and constant pool arrays in the
3148 // large object space.
3149 CHECK(object->IsCode() || object->IsSeqString() ||
3150 object->IsExternalString() || object->IsFixedArray() ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003151 object->IsFixedDoubleArray() || object->IsByteArray());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003152
3153 // The object itself should look OK.
3154 object->ObjectVerify();
3155
3156 // Byte arrays and strings don't have interior pointers.
3157 if (object->IsCode()) {
3158 VerifyPointersVisitor code_visitor;
3159 object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
3160 } else if (object->IsFixedArray()) {
3161 FixedArray* array = FixedArray::cast(object);
3162 for (int j = 0; j < array->length(); j++) {
3163 Object* element = array->get(j);
3164 if (element->IsHeapObject()) {
3165 HeapObject* element_object = HeapObject::cast(element);
3166 CHECK(heap()->Contains(element_object));
3167 CHECK(element_object->map()->IsMap());
3168 }
3169 }
3170 }
3171 }
3172}
3173#endif
3174
3175
3176#ifdef DEBUG
3177void LargeObjectSpace::Print() {
3178 OFStream os(stdout);
3179 LargeObjectIterator it(this);
3180 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3181 obj->Print(os);
3182 }
3183}
3184
3185
3186void LargeObjectSpace::ReportStatistics() {
3187 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
3188 int num_objects = 0;
3189 ClearHistograms(heap()->isolate());
3190 LargeObjectIterator it(this);
3191 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3192 num_objects++;
3193 CollectHistogramInfo(obj);
3194 }
3195
3196 PrintF(
3197 " number of objects %d, "
3198 "size of objects %" V8_PTR_PREFIX "d\n",
3199 num_objects, objects_size_);
3200 if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
3201}
3202
3203
3204void LargeObjectSpace::CollectCodeStatistics() {
3205 Isolate* isolate = heap()->isolate();
3206 LargeObjectIterator obj_it(this);
3207 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
3208 if (obj->IsCode()) {
3209 Code* code = Code::cast(obj);
3210 isolate->code_kind_statistics()[code->kind()] += code->Size();
3211 }
3212 }
3213}
3214
3215
3216void Page::Print() {
3217 // Make a best-effort to print the objects in the page.
3218 PrintF("Page@%p in %s\n", this->address(),
3219 AllocationSpaceName(this->owner()->identity()));
3220 printf(" --------------------------------------\n");
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003221 HeapObjectIterator objects(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003222 unsigned mark_size = 0;
3223 for (HeapObject* object = objects.Next(); object != NULL;
3224 object = objects.Next()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003225 bool is_marked = Marking::IsBlackOrGrey(Marking::MarkBitFrom(object));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003226 PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
3227 if (is_marked) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003228 mark_size += object->Size();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003229 }
3230 object->ShortPrint();
3231 PrintF("\n");
3232 }
3233 printf(" --------------------------------------\n");
3234 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3235}
3236
3237#endif // DEBUG
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003238} // namespace internal
3239} // namespace v8