blob: 1dcd044cd5e347807b887e49b641ff0f17ac2315 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2011 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00005#include "src/heap/spaces.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +00006
Ben Murdoch61f157c2016-09-16 13:49:30 +01007#include <utility>
8
Ben Murdochb8a8cc12014-11-26 15:28:44 +00009#include "src/base/bits.h"
10#include "src/base/platform/platform.h"
Ben Murdochc5610432016-08-08 18:44:38 +010011#include "src/base/platform/semaphore.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000012#include "src/full-codegen/full-codegen.h"
Ben Murdoch61f157c2016-09-16 13:49:30 +010013#include "src/heap/array-buffer-tracker.h"
Ben Murdoch097c5b22016-05-18 11:27:45 +010014#include "src/heap/slot-set.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000015#include "src/macro-assembler.h"
16#include "src/msan.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000017#include "src/snapshot/snapshot.h"
Ben Murdochc5610432016-08-08 18:44:38 +010018#include "src/v8.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000019
20namespace v8 {
21namespace internal {
22
23
24// ----------------------------------------------------------------------------
25// HeapObjectIterator
26
Ben Murdoch61f157c2016-09-16 13:49:30 +010027HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
28 : cur_addr_(nullptr),
29 cur_end_(nullptr),
30 space_(space),
31 page_range_(space->anchor()->next_page(), space->anchor()),
32 current_page_(page_range_.begin()) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +000033
Ben Murdoch61f157c2016-09-16 13:49:30 +010034HeapObjectIterator::HeapObjectIterator(Page* page)
35 : cur_addr_(nullptr),
36 cur_end_(nullptr),
37 space_(reinterpret_cast<PagedSpace*>(page->owner())),
38 page_range_(page),
39 current_page_(page_range_.begin()) {
40#ifdef DEBUG
Ben Murdochb8a8cc12014-11-26 15:28:44 +000041 Space* owner = page->owner();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000042 DCHECK(owner == page->heap()->old_space() ||
Ben Murdochb8a8cc12014-11-26 15:28:44 +000043 owner == page->heap()->map_space() ||
Ben Murdochb8a8cc12014-11-26 15:28:44 +000044 owner == page->heap()->code_space());
Ben Murdoch61f157c2016-09-16 13:49:30 +010045#endif // DEBUG
Ben Murdochb8a8cc12014-11-26 15:28:44 +000046}
47
Ben Murdochb8a8cc12014-11-26 15:28:44 +000048// We have hit the end of the page and should advance to the next block of
49// objects. This happens at the end of the page.
50bool HeapObjectIterator::AdvanceToNextPage() {
Ben Murdoch61f157c2016-09-16 13:49:30 +010051 DCHECK_EQ(cur_addr_, cur_end_);
52 if (current_page_ == page_range_.end()) return false;
53 Page* cur_page = *(current_page_++);
54 space_->heap()
Ben Murdochc5610432016-08-08 18:44:38 +010055 ->mark_compact_collector()
56 ->sweeper()
57 .SweepOrWaitUntilSweepingCompleted(cur_page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000058 cur_addr_ = cur_page->area_start();
59 cur_end_ = cur_page->area_end();
Ben Murdoch097c5b22016-05-18 11:27:45 +010060 DCHECK(cur_page->SweepingDone());
Ben Murdochb8a8cc12014-11-26 15:28:44 +000061 return true;
62}
63
Ben Murdoch097c5b22016-05-18 11:27:45 +010064PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
65 : heap_(heap) {
66 AllSpaces spaces(heap_);
67 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
68 space->PauseAllocationObservers();
69 }
70}
71
72PauseAllocationObserversScope::~PauseAllocationObserversScope() {
73 AllSpaces spaces(heap_);
74 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
75 space->ResumeAllocationObservers();
76 }
77}
Ben Murdochb8a8cc12014-11-26 15:28:44 +000078
79// -----------------------------------------------------------------------------
80// CodeRange
81
82
83CodeRange::CodeRange(Isolate* isolate)
84 : isolate_(isolate),
85 code_range_(NULL),
86 free_list_(0),
87 allocation_list_(0),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000088 current_allocation_block_index_(0) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +000089
90
91bool CodeRange::SetUp(size_t requested) {
92 DCHECK(code_range_ == NULL);
93
94 if (requested == 0) {
95 // When a target requires the code range feature, we put all code objects
96 // in a kMaximalCodeRangeSize range of virtual address space, so that
97 // they can call each other with near calls.
98 if (kRequiresCodeRange) {
99 requested = kMaximalCodeRangeSize;
100 } else {
101 return true;
102 }
103 }
104
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400105 if (requested <= kMinimumCodeRangeSize) {
106 requested = kMinimumCodeRangeSize;
107 }
108
Ben Murdoch61f157c2016-09-16 13:49:30 +0100109 const size_t reserved_area =
110 kReservedCodeRangePages * base::OS::CommitPageSize();
111 if (requested < (kMaximalCodeRangeSize - reserved_area))
112 requested += reserved_area;
113
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000114 DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
Ben Murdoch61f157c2016-09-16 13:49:30 +0100115
116 code_range_ = new base::VirtualMemory(
117 requested, Max(kCodeRangeAreaAlignment,
118 static_cast<size_t>(base::OS::AllocateAlignment())));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000119 CHECK(code_range_ != NULL);
120 if (!code_range_->IsReserved()) {
121 delete code_range_;
122 code_range_ = NULL;
123 return false;
124 }
125
126 // We are sure that we have mapped a block of requested addresses.
127 DCHECK(code_range_->size() == requested);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000128 Address base = reinterpret_cast<Address>(code_range_->address());
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400129
130 // On some platforms, specifically Win64, we need to reserve some pages at
131 // the beginning of an executable space.
Ben Murdoch61f157c2016-09-16 13:49:30 +0100132 if (reserved_area > 0) {
133 if (!code_range_->Commit(base, reserved_area, true)) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400134 delete code_range_;
135 code_range_ = NULL;
136 return false;
137 }
Ben Murdoch61f157c2016-09-16 13:49:30 +0100138 base += reserved_area;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400139 }
140 Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
Ben Murdoch61f157c2016-09-16 13:49:30 +0100141 size_t size = code_range_->size() - (aligned_base - base) - reserved_area;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000142 allocation_list_.Add(FreeBlock(aligned_base, size));
143 current_allocation_block_index_ = 0;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400144
145 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000146 return true;
147}
148
149
150int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
151 const FreeBlock* right) {
152 // The entire point of CodeRange is that the difference between two
153 // addresses in the range can be represented as a signed 32-bit int,
154 // so the cast is semantically correct.
155 return static_cast<int>(left->start - right->start);
156}
157
158
159bool CodeRange::GetNextAllocationBlock(size_t requested) {
160 for (current_allocation_block_index_++;
161 current_allocation_block_index_ < allocation_list_.length();
162 current_allocation_block_index_++) {
163 if (requested <= allocation_list_[current_allocation_block_index_].size) {
164 return true; // Found a large enough allocation block.
165 }
166 }
167
168 // Sort and merge the free blocks on the free list and the allocation list.
169 free_list_.AddAll(allocation_list_);
170 allocation_list_.Clear();
171 free_list_.Sort(&CompareFreeBlockAddress);
172 for (int i = 0; i < free_list_.length();) {
173 FreeBlock merged = free_list_[i];
174 i++;
175 // Add adjacent free blocks to the current merged block.
176 while (i < free_list_.length() &&
177 free_list_[i].start == merged.start + merged.size) {
178 merged.size += free_list_[i].size;
179 i++;
180 }
181 if (merged.size > 0) {
182 allocation_list_.Add(merged);
183 }
184 }
185 free_list_.Clear();
186
187 for (current_allocation_block_index_ = 0;
188 current_allocation_block_index_ < allocation_list_.length();
189 current_allocation_block_index_++) {
190 if (requested <= allocation_list_[current_allocation_block_index_].size) {
191 return true; // Found a large enough allocation block.
192 }
193 }
194 current_allocation_block_index_ = 0;
195 // Code range is full or too fragmented.
196 return false;
197}
198
199
200Address CodeRange::AllocateRawMemory(const size_t requested_size,
201 const size_t commit_size,
202 size_t* allocated) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000203 // request_size includes guards while committed_size does not. Make sure
204 // callers know about the invariant.
205 CHECK_LE(commit_size,
206 requested_size - 2 * MemoryAllocator::CodePageGuardSize());
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400207 FreeBlock current;
208 if (!ReserveBlock(requested_size, &current)) {
209 *allocated = 0;
210 return NULL;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000211 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400212 *allocated = current.size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000213 DCHECK(*allocated <= current.size);
214 DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
Ben Murdochc5610432016-08-08 18:44:38 +0100215 if (!isolate_->heap()->memory_allocator()->CommitExecutableMemory(
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000216 code_range_, current.start, commit_size, *allocated)) {
217 *allocated = 0;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400218 ReleaseBlock(&current);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000219 return NULL;
220 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000221 return current.start;
222}
223
224
225bool CodeRange::CommitRawMemory(Address start, size_t length) {
Ben Murdochc5610432016-08-08 18:44:38 +0100226 return isolate_->heap()->memory_allocator()->CommitMemory(start, length,
227 EXECUTABLE);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000228}
229
230
231bool CodeRange::UncommitRawMemory(Address start, size_t length) {
232 return code_range_->Uncommit(start, length);
233}
234
235
236void CodeRange::FreeRawMemory(Address address, size_t length) {
237 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000238 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000239 free_list_.Add(FreeBlock(address, length));
240 code_range_->Uncommit(address, length);
241}
242
243
244void CodeRange::TearDown() {
245 delete code_range_; // Frees all memory in the virtual memory range.
246 code_range_ = NULL;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000247 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000248 free_list_.Free();
249 allocation_list_.Free();
250}
251
252
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400253bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000254 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400255 DCHECK(allocation_list_.length() == 0 ||
256 current_allocation_block_index_ < allocation_list_.length());
257 if (allocation_list_.length() == 0 ||
258 requested_size > allocation_list_[current_allocation_block_index_].size) {
259 // Find an allocation block large enough.
260 if (!GetNextAllocationBlock(requested_size)) return false;
261 }
262 // Commit the requested memory at the start of the current allocation block.
263 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
264 *block = allocation_list_[current_allocation_block_index_];
265 // Don't leave a small free block, useless for a large object or chunk.
266 if (aligned_requested < (block->size - Page::kPageSize)) {
267 block->size = aligned_requested;
268 }
269 DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
270 allocation_list_[current_allocation_block_index_].start += block->size;
271 allocation_list_[current_allocation_block_index_].size -= block->size;
272 return true;
273}
274
275
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000276void CodeRange::ReleaseBlock(const FreeBlock* block) {
277 base::LockGuard<base::Mutex> guard(&code_range_mutex_);
278 free_list_.Add(*block);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400279}
280
281
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000282// -----------------------------------------------------------------------------
283// MemoryAllocator
284//
285
286MemoryAllocator::MemoryAllocator(Isolate* isolate)
287 : isolate_(isolate),
Ben Murdochc5610432016-08-08 18:44:38 +0100288 code_range_(nullptr),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000289 capacity_(0),
290 capacity_executable_(0),
291 size_(0),
292 size_executable_(0),
293 lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
Ben Murdochc5610432016-08-08 18:44:38 +0100294 highest_ever_allocated_(reinterpret_cast<void*>(0)),
295 unmapper_(this) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000296
Ben Murdochc5610432016-08-08 18:44:38 +0100297bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable,
298 intptr_t code_range_size) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000299 capacity_ = RoundUp(capacity, Page::kPageSize);
300 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
301 DCHECK_GE(capacity_, capacity_executable_);
302
303 size_ = 0;
304 size_executable_ = 0;
305
Ben Murdochc5610432016-08-08 18:44:38 +0100306 code_range_ = new CodeRange(isolate_);
307 if (!code_range_->SetUp(static_cast<size_t>(code_range_size))) return false;
308
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000309 return true;
310}
311
312
313void MemoryAllocator::TearDown() {
Ben Murdochc5610432016-08-08 18:44:38 +0100314 unmapper()->WaitUntilCompleted();
315
316 MemoryChunk* chunk = nullptr;
317 while ((chunk = unmapper()->TryGetPooledMemoryChunkSafe()) != nullptr) {
Ben Murdochda12d292016-06-02 14:46:10 +0100318 FreeMemory(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize,
319 NOT_EXECUTABLE);
320 }
Ben Murdochc5610432016-08-08 18:44:38 +0100321
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000322 // Check that spaces were torn down before MemoryAllocator.
Ben Murdochda12d292016-06-02 14:46:10 +0100323 DCHECK_EQ(size_.Value(), 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000324 // TODO(gc) this will be true again when we fix FreeMemory.
325 // DCHECK(size_executable_ == 0);
326 capacity_ = 0;
327 capacity_executable_ = 0;
Ben Murdochc5610432016-08-08 18:44:38 +0100328
329 if (last_chunk_.IsReserved()) {
330 last_chunk_.Release();
331 }
332
333 delete code_range_;
334 code_range_ = nullptr;
335}
336
337class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public v8::Task {
338 public:
339 explicit UnmapFreeMemoryTask(Unmapper* unmapper) : unmapper_(unmapper) {}
340
341 private:
342 // v8::Task overrides.
343 void Run() override {
344 unmapper_->PerformFreeMemoryOnQueuedChunks();
345 unmapper_->pending_unmapping_tasks_semaphore_.Signal();
346 }
347
348 Unmapper* unmapper_;
349 DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
350};
351
352void MemoryAllocator::Unmapper::FreeQueuedChunks() {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100353 ReconsiderDelayedChunks();
Ben Murdochc5610432016-08-08 18:44:38 +0100354 if (FLAG_concurrent_sweeping) {
355 V8::GetCurrentPlatform()->CallOnBackgroundThread(
356 new UnmapFreeMemoryTask(this), v8::Platform::kShortRunningTask);
357 concurrent_unmapping_tasks_active_++;
358 } else {
359 PerformFreeMemoryOnQueuedChunks();
360 }
361}
362
363bool MemoryAllocator::Unmapper::WaitUntilCompleted() {
364 bool waited = false;
365 while (concurrent_unmapping_tasks_active_ > 0) {
366 pending_unmapping_tasks_semaphore_.Wait();
367 concurrent_unmapping_tasks_active_--;
368 waited = true;
369 }
370 return waited;
371}
372
373void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
374 MemoryChunk* chunk = nullptr;
375 // Regular chunks.
376 while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
377 bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
378 allocator_->PerformFreeMemory(chunk);
379 if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
380 }
381 // Non-regular chunks.
382 while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
383 allocator_->PerformFreeMemory(chunk);
384 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000385}
386
Ben Murdoch61f157c2016-09-16 13:49:30 +0100387void MemoryAllocator::Unmapper::ReconsiderDelayedChunks() {
388 std::list<MemoryChunk*> delayed_chunks(std::move(delayed_regular_chunks_));
389 // Move constructed, so the permanent list should be empty.
390 DCHECK(delayed_regular_chunks_.empty());
391 for (auto it = delayed_chunks.begin(); it != delayed_chunks.end(); ++it) {
392 AddMemoryChunkSafe<kRegular>(*it);
393 }
394}
395
396bool MemoryAllocator::CanFreeMemoryChunk(MemoryChunk* chunk) {
397 MarkCompactCollector* mc = isolate_->heap()->mark_compact_collector();
398 // We cannot free memory chunks in new space while the sweeper is running
399 // since a sweeper thread might be stuck right before trying to lock the
400 // corresponding page.
401 return !chunk->InNewSpace() || (mc == nullptr) ||
402 mc->sweeper().IsSweepingCompleted();
403}
404
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000405bool MemoryAllocator::CommitMemory(Address base, size_t size,
406 Executability executable) {
407 if (!base::VirtualMemory::CommitRegion(base, size,
408 executable == EXECUTABLE)) {
409 return false;
410 }
411 UpdateAllocatedSpaceLimits(base, base + size);
412 return true;
413}
414
415
416void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
417 Executability executable) {
418 // TODO(gc) make code_range part of memory allocator?
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000419 // Code which is part of the code-range does not have its own VirtualMemory.
Ben Murdochc5610432016-08-08 18:44:38 +0100420 DCHECK(code_range() == NULL ||
421 !code_range()->contains(static_cast<Address>(reservation->address())));
Ben Murdoch61f157c2016-09-16 13:49:30 +0100422 DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid() ||
423 reservation->size() <= Page::kPageSize);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000424
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000425 reservation->Release();
426}
427
428
429void MemoryAllocator::FreeMemory(Address base, size_t size,
430 Executability executable) {
431 // TODO(gc) make code_range part of memory allocator?
Ben Murdochc5610432016-08-08 18:44:38 +0100432 if (code_range() != NULL &&
433 code_range()->contains(static_cast<Address>(base))) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000434 DCHECK(executable == EXECUTABLE);
Ben Murdochc5610432016-08-08 18:44:38 +0100435 code_range()->FreeRawMemory(base, size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000436 } else {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100437 DCHECK(executable == NOT_EXECUTABLE || !code_range()->valid());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000438 bool result = base::VirtualMemory::ReleaseRegion(base, size);
439 USE(result);
440 DCHECK(result);
441 }
442}
443
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000444Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
445 base::VirtualMemory* controller) {
446 base::VirtualMemory reservation(size, alignment);
447
448 if (!reservation.IsReserved()) return NULL;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000449 size_.Increment(static_cast<intptr_t>(reservation.size()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000450 Address base =
451 RoundUp(static_cast<Address>(reservation.address()), alignment);
452 controller->TakeControl(&reservation);
453 return base;
454}
455
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000456Address MemoryAllocator::AllocateAlignedMemory(
457 size_t reserve_size, size_t commit_size, size_t alignment,
458 Executability executable, base::VirtualMemory* controller) {
459 DCHECK(commit_size <= reserve_size);
460 base::VirtualMemory reservation;
461 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
462 if (base == NULL) return NULL;
463
464 if (executable == EXECUTABLE) {
465 if (!CommitExecutableMemory(&reservation, base, commit_size,
466 reserve_size)) {
467 base = NULL;
468 }
469 } else {
470 if (reservation.Commit(base, commit_size, false)) {
471 UpdateAllocatedSpaceLimits(base, base + commit_size);
472 } else {
473 base = NULL;
474 }
475 }
476
477 if (base == NULL) {
478 // Failed to commit the body. Release the mapping and any partially
479 // commited regions inside it.
480 reservation.Release();
481 return NULL;
482 }
483
484 controller->TakeControl(&reservation);
485 return base;
486}
487
Ben Murdochc5610432016-08-08 18:44:38 +0100488void Page::InitializeAsAnchor(Space* space) {
489 set_owner(space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000490 set_next_chunk(this);
491 set_prev_chunk(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000492 SetFlags(0, ~0);
Ben Murdochc5610432016-08-08 18:44:38 +0100493 SetFlag(ANCHOR);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000494}
495
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000496MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
497 Address area_start, Address area_end,
Ben Murdoch097c5b22016-05-18 11:27:45 +0100498 Executability executable, Space* owner,
499 base::VirtualMemory* reservation) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000500 MemoryChunk* chunk = FromAddress(base);
501
502 DCHECK(base == chunk->address());
503
504 chunk->heap_ = heap;
505 chunk->size_ = size;
506 chunk->area_start_ = area_start;
507 chunk->area_end_ = area_end;
508 chunk->flags_ = 0;
509 chunk->set_owner(owner);
510 chunk->InitializeReservedMemory();
Ben Murdoch097c5b22016-05-18 11:27:45 +0100511 chunk->old_to_new_slots_ = nullptr;
512 chunk->old_to_old_slots_ = nullptr;
Ben Murdoch61f157c2016-09-16 13:49:30 +0100513 chunk->typed_old_to_new_slots_ = nullptr;
Ben Murdochda12d292016-06-02 14:46:10 +0100514 chunk->typed_old_to_old_slots_ = nullptr;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100515 chunk->skip_list_ = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000516 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
517 chunk->progress_bar_ = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000518 chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
Ben Murdoch097c5b22016-05-18 11:27:45 +0100519 chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
Ben Murdoch61f157c2016-09-16 13:49:30 +0100520 chunk->mutex_ = new base::Mutex();
Ben Murdoch097c5b22016-05-18 11:27:45 +0100521 chunk->available_in_free_list_ = 0;
522 chunk->wasted_memory_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000523 chunk->ResetLiveBytes();
524 Bitmap::Clear(chunk);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000525 chunk->set_next_chunk(nullptr);
526 chunk->set_prev_chunk(nullptr);
Ben Murdoch61f157c2016-09-16 13:49:30 +0100527 chunk->local_tracker_ = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000528
529 DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
530 DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
531
532 if (executable == EXECUTABLE) {
533 chunk->SetFlag(IS_EXECUTABLE);
534 }
535
Ben Murdoch097c5b22016-05-18 11:27:45 +0100536 if (reservation != nullptr) {
537 chunk->reservation_.TakeControl(reservation);
538 }
539
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000540 return chunk;
541}
542
543
544// Commit MemoryChunk area to the requested size.
545bool MemoryChunk::CommitArea(size_t requested) {
546 size_t guard_size =
547 IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
548 size_t header_size = area_start() - address() - guard_size;
549 size_t commit_size =
550 RoundUp(header_size + requested, base::OS::CommitPageSize());
551 size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
552 base::OS::CommitPageSize());
553
554 if (commit_size > committed_size) {
555 // Commit size should be less or equal than the reserved size.
556 DCHECK(commit_size <= size() - 2 * guard_size);
557 // Append the committed area.
558 Address start = address() + committed_size + guard_size;
559 size_t length = commit_size - committed_size;
560 if (reservation_.IsReserved()) {
561 Executability executable =
562 IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
Ben Murdochc5610432016-08-08 18:44:38 +0100563 if (!heap()->memory_allocator()->CommitMemory(start, length,
564 executable)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000565 return false;
566 }
567 } else {
Ben Murdochc5610432016-08-08 18:44:38 +0100568 CodeRange* code_range = heap_->memory_allocator()->code_range();
Ben Murdoch61f157c2016-09-16 13:49:30 +0100569 DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000570 if (!code_range->CommitRawMemory(start, length)) return false;
571 }
572
573 if (Heap::ShouldZapGarbage()) {
Ben Murdochc5610432016-08-08 18:44:38 +0100574 heap_->memory_allocator()->ZapBlock(start, length);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000575 }
576 } else if (commit_size < committed_size) {
577 DCHECK(commit_size > 0);
578 // Shrink the committed area.
579 size_t length = committed_size - commit_size;
580 Address start = address() + committed_size + guard_size - length;
581 if (reservation_.IsReserved()) {
582 if (!reservation_.Uncommit(start, length)) return false;
583 } else {
Ben Murdochc5610432016-08-08 18:44:38 +0100584 CodeRange* code_range = heap_->memory_allocator()->code_range();
Ben Murdoch61f157c2016-09-16 13:49:30 +0100585 DCHECK(code_range->valid() && IsFlagSet(IS_EXECUTABLE));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000586 if (!code_range->UncommitRawMemory(start, length)) return false;
587 }
588 }
589
590 area_end_ = area_start_ + requested;
591 return true;
592}
593
594
595void MemoryChunk::InsertAfter(MemoryChunk* other) {
596 MemoryChunk* other_next = other->next_chunk();
597
598 set_next_chunk(other_next);
599 set_prev_chunk(other);
600 other_next->set_prev_chunk(this);
601 other->set_next_chunk(this);
602}
603
604
605void MemoryChunk::Unlink() {
606 MemoryChunk* next_element = next_chunk();
607 MemoryChunk* prev_element = prev_chunk();
608 next_element->set_prev_chunk(prev_element);
609 prev_element->set_next_chunk(next_element);
610 set_prev_chunk(NULL);
611 set_next_chunk(NULL);
612}
613
614
615MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
616 intptr_t commit_area_size,
617 Executability executable,
618 Space* owner) {
619 DCHECK(commit_area_size <= reserve_area_size);
620
621 size_t chunk_size;
622 Heap* heap = isolate_->heap();
623 Address base = NULL;
624 base::VirtualMemory reservation;
625 Address area_start = NULL;
626 Address area_end = NULL;
627
628 //
629 // MemoryChunk layout:
630 //
631 // Executable
632 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
633 // | Header |
634 // +----------------------------+<- base + CodePageGuardStartOffset
635 // | Guard |
636 // +----------------------------+<- area_start_
637 // | Area |
638 // +----------------------------+<- area_end_ (area_start + commit_area_size)
639 // | Committed but not used |
640 // +----------------------------+<- aligned at OS page boundary
641 // | Reserved but not committed |
642 // +----------------------------+<- aligned at OS page boundary
643 // | Guard |
644 // +----------------------------+<- base + chunk_size
645 //
646 // Non-executable
647 // +----------------------------+<- base aligned with MemoryChunk::kAlignment
648 // | Header |
649 // +----------------------------+<- area_start_ (base + kObjectStartOffset)
650 // | Area |
651 // +----------------------------+<- area_end_ (area_start + commit_area_size)
652 // | Committed but not used |
653 // +----------------------------+<- aligned at OS page boundary
654 // | Reserved but not committed |
655 // +----------------------------+<- base + chunk_size
656 //
657
658 if (executable == EXECUTABLE) {
659 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
660 base::OS::CommitPageSize()) +
661 CodePageGuardSize();
662
663 // Check executable memory limit.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000664 if ((size_executable_.Value() + static_cast<intptr_t>(chunk_size)) >
665 capacity_executable_) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000666 LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
667 "V8 Executable Allocation capacity exceeded"));
668 return NULL;
669 }
670
671 // Size of header (not executable) plus area (executable).
672 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
673 base::OS::CommitPageSize());
674 // Allocate executable memory either from code range or from the
675 // OS.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000676#ifdef V8_TARGET_ARCH_MIPS64
677 // Use code range only for large object space on mips64 to keep address
678 // range within 256-MB memory region.
Ben Murdoch61f157c2016-09-16 13:49:30 +0100679 if (code_range()->valid() && reserve_area_size > CodePageAreaSize()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000680#else
Ben Murdoch61f157c2016-09-16 13:49:30 +0100681 if (code_range()->valid()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000682#endif
Ben Murdochc5610432016-08-08 18:44:38 +0100683 base =
684 code_range()->AllocateRawMemory(chunk_size, commit_size, &chunk_size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000685 DCHECK(
686 IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
687 if (base == NULL) return NULL;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000688 size_.Increment(static_cast<intptr_t>(chunk_size));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000689 // Update executable memory size.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000690 size_executable_.Increment(static_cast<intptr_t>(chunk_size));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000691 } else {
692 base = AllocateAlignedMemory(chunk_size, commit_size,
693 MemoryChunk::kAlignment, executable,
694 &reservation);
695 if (base == NULL) return NULL;
696 // Update executable memory size.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000697 size_executable_.Increment(static_cast<intptr_t>(reservation.size()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000698 }
699
700 if (Heap::ShouldZapGarbage()) {
701 ZapBlock(base, CodePageGuardStartOffset());
702 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
703 }
704
705 area_start = base + CodePageAreaStartOffset();
706 area_end = area_start + commit_area_size;
707 } else {
708 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
709 base::OS::CommitPageSize());
710 size_t commit_size =
711 RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
712 base::OS::CommitPageSize());
713 base =
714 AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
715 executable, &reservation);
716
717 if (base == NULL) return NULL;
718
719 if (Heap::ShouldZapGarbage()) {
720 ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
721 }
722
723 area_start = base + Page::kObjectStartOffset;
724 area_end = area_start + commit_area_size;
725 }
726
727 // Use chunk_size for statistics and callbacks because we assume that they
728 // treat reserved but not-yet committed memory regions of chunks as allocated.
729 isolate_->counters()->memory_allocated()->Increment(
730 static_cast<int>(chunk_size));
731
732 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000733
Ben Murdochc5610432016-08-08 18:44:38 +0100734 // We cannot use the last chunk in the address space because we would
735 // overflow when comparing top and limit if this chunk is used for a
736 // linear allocation area.
737 if ((reinterpret_cast<uintptr_t>(base) + chunk_size) == 0u) {
738 CHECK(!last_chunk_.IsReserved());
739 last_chunk_.TakeControl(&reservation);
740 UncommitBlock(reinterpret_cast<Address>(last_chunk_.address()),
741 last_chunk_.size());
742 size_.Increment(-static_cast<intptr_t>(chunk_size));
743 if (executable == EXECUTABLE) {
744 size_executable_.Increment(-static_cast<intptr_t>(chunk_size));
745 }
746 CHECK(last_chunk_.IsReserved());
747 return AllocateChunk(reserve_area_size, commit_area_size, executable,
748 owner);
749 }
750
Ben Murdoch097c5b22016-05-18 11:27:45 +0100751 return MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
752 executable, owner, &reservation);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000753}
754
755
756void Page::ResetFreeListStatistics() {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100757 wasted_memory_ = 0;
758 available_in_free_list_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000759}
760
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000761void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
762 DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000763 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000764
765 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
766 chunk->IsEvacuationCandidate());
767
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000768 intptr_t size;
769 base::VirtualMemory* reservation = chunk->reserved_memory();
770 if (reservation->IsReserved()) {
771 size = static_cast<intptr_t>(reservation->size());
772 } else {
773 size = static_cast<intptr_t>(chunk->size());
774 }
775 DCHECK(size_.Value() >= size);
776 size_.Increment(-size);
777 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
778
779 if (chunk->executable() == EXECUTABLE) {
780 DCHECK(size_executable_.Value() >= size);
781 size_executable_.Increment(-size);
782 }
783
784 chunk->SetFlag(MemoryChunk::PRE_FREED);
785}
786
787
788void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
789 DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
790 chunk->ReleaseAllocatedMemory();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000791
792 base::VirtualMemory* reservation = chunk->reserved_memory();
Ben Murdochc5610432016-08-08 18:44:38 +0100793 if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
794 UncommitBlock(reinterpret_cast<Address>(chunk), MemoryChunk::kPageSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000795 } else {
Ben Murdochc5610432016-08-08 18:44:38 +0100796 if (reservation->IsReserved()) {
797 FreeMemory(reservation, chunk->executable());
798 } else {
799 FreeMemory(chunk->address(), chunk->size(), chunk->executable());
800 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000801 }
802}
803
Ben Murdochc5610432016-08-08 18:44:38 +0100804template <MemoryAllocator::FreeMode mode>
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000805void MemoryAllocator::Free(MemoryChunk* chunk) {
Ben Murdochc5610432016-08-08 18:44:38 +0100806 switch (mode) {
807 case kFull:
808 PreFreeMemory(chunk);
809 PerformFreeMemory(chunk);
810 break;
811 case kPooledAndQueue:
812 DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
813 DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
814 chunk->SetFlag(MemoryChunk::POOLED);
815 // Fall through to kPreFreeAndQueue.
816 case kPreFreeAndQueue:
817 PreFreeMemory(chunk);
818 // The chunks added to this queue will be freed by a concurrent thread.
819 unmapper()->AddMemoryChunkSafe(chunk);
820 break;
821 default:
822 UNREACHABLE();
Ben Murdochda12d292016-06-02 14:46:10 +0100823 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000824}
825
Ben Murdochc5610432016-08-08 18:44:38 +0100826template void MemoryAllocator::Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
827
828template void MemoryAllocator::Free<MemoryAllocator::kPreFreeAndQueue>(
Ben Murdochda12d292016-06-02 14:46:10 +0100829 MemoryChunk* chunk);
830
Ben Murdochc5610432016-08-08 18:44:38 +0100831template void MemoryAllocator::Free<MemoryAllocator::kPooledAndQueue>(
Ben Murdochda12d292016-06-02 14:46:10 +0100832 MemoryChunk* chunk);
833
Ben Murdochc5610432016-08-08 18:44:38 +0100834template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
835Page* MemoryAllocator::AllocatePage(intptr_t size, SpaceType* owner,
836 Executability executable) {
Ben Murdochda12d292016-06-02 14:46:10 +0100837 MemoryChunk* chunk = nullptr;
Ben Murdochc5610432016-08-08 18:44:38 +0100838 if (alloc_mode == kPooled) {
Ben Murdochda12d292016-06-02 14:46:10 +0100839 DCHECK_EQ(size, static_cast<intptr_t>(MemoryChunk::kAllocatableMemory));
840 DCHECK_EQ(executable, NOT_EXECUTABLE);
841 chunk = AllocatePagePooled(owner);
842 }
843 if (chunk == nullptr) {
844 chunk = AllocateChunk(size, size, executable, owner);
845 }
846 if (chunk == nullptr) return nullptr;
Ben Murdochc5610432016-08-08 18:44:38 +0100847 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
Ben Murdochda12d292016-06-02 14:46:10 +0100848}
849
Ben Murdochc5610432016-08-08 18:44:38 +0100850template Page*
851MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
852 intptr_t size, PagedSpace* owner, Executability executable);
853template Page*
854MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
855 intptr_t size, SemiSpace* owner, Executability executable);
856template Page*
857MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
858 intptr_t size, SemiSpace* owner, Executability executable);
Ben Murdochda12d292016-06-02 14:46:10 +0100859
Ben Murdochc5610432016-08-08 18:44:38 +0100860LargePage* MemoryAllocator::AllocateLargePage(intptr_t size,
861 LargeObjectSpace* owner,
862 Executability executable) {
863 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
864 if (chunk == nullptr) return nullptr;
865 return LargePage::Initialize(isolate_->heap(), chunk, executable, owner);
866}
Ben Murdochda12d292016-06-02 14:46:10 +0100867
868template <typename SpaceType>
869MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
Ben Murdochc5610432016-08-08 18:44:38 +0100870 MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
871 if (chunk == nullptr) return nullptr;
Ben Murdochda12d292016-06-02 14:46:10 +0100872 const int size = MemoryChunk::kPageSize;
Ben Murdochda12d292016-06-02 14:46:10 +0100873 const Address start = reinterpret_cast<Address>(chunk);
874 const Address area_start = start + MemoryChunk::kObjectStartOffset;
875 const Address area_end = start + size;
Ben Murdoch6f5d0ea2016-06-15 14:45:46 +0100876 if (!CommitBlock(reinterpret_cast<Address>(chunk), size, NOT_EXECUTABLE)) {
877 return nullptr;
878 }
Ben Murdochda12d292016-06-02 14:46:10 +0100879 base::VirtualMemory reservation(start, size);
880 MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
881 NOT_EXECUTABLE, owner, &reservation);
882 size_.Increment(size);
883 return chunk;
884}
885
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000886bool MemoryAllocator::CommitBlock(Address start, size_t size,
887 Executability executable) {
888 if (!CommitMemory(start, size, executable)) return false;
889
890 if (Heap::ShouldZapGarbage()) {
891 ZapBlock(start, size);
892 }
893
894 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
895 return true;
896}
897
898
899bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
900 if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
901 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
902 return true;
903}
904
905
906void MemoryAllocator::ZapBlock(Address start, size_t size) {
907 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
908 Memory::Address_at(start + s) = kZapValue;
909 }
910}
911
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000912#ifdef DEBUG
913void MemoryAllocator::ReportStatistics() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000914 intptr_t size = Size();
915 float pct = static_cast<float>(capacity_ - size) / capacity_;
Ben Murdochc5610432016-08-08 18:44:38 +0100916 PrintF(" capacity: %" V8PRIdPTR ", used: %" V8PRIdPTR
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000917 ", available: %%%d\n\n",
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000918 capacity_, size, static_cast<int>(pct * 100));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000919}
920#endif
921
922
923int MemoryAllocator::CodePageGuardStartOffset() {
924 // We are guarding code pages: the first OS page after the header
925 // will be protected as non-writable.
926 return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize());
927}
928
929
930int MemoryAllocator::CodePageGuardSize() {
931 return static_cast<int>(base::OS::CommitPageSize());
932}
933
934
935int MemoryAllocator::CodePageAreaStartOffset() {
936 // We are guarding code pages: the first OS page after the header
937 // will be protected as non-writable.
938 return CodePageGuardStartOffset() + CodePageGuardSize();
939}
940
941
942int MemoryAllocator::CodePageAreaEndOffset() {
943 // We are guarding code pages: the last OS page will be protected as
944 // non-writable.
945 return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize());
946}
947
948
949bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
950 Address start, size_t commit_size,
951 size_t reserved_size) {
952 // Commit page header (not executable).
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000953 Address header = start;
954 size_t header_size = CodePageGuardStartOffset();
955 if (vm->Commit(header, header_size, false)) {
956 // Create guard page after the header.
957 if (vm->Guard(start + CodePageGuardStartOffset())) {
958 // Commit page body (executable).
959 Address body = start + CodePageAreaStartOffset();
960 size_t body_size = commit_size - CodePageGuardStartOffset();
961 if (vm->Commit(body, body_size, true)) {
962 // Create guard page before the end.
963 if (vm->Guard(start + reserved_size - CodePageGuardSize())) {
964 UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
965 commit_size -
966 CodePageGuardStartOffset());
967 return true;
968 }
969 vm->Uncommit(body, body_size);
970 }
971 }
972 vm->Uncommit(header, header_size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000973 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000974 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000975}
976
977
978// -----------------------------------------------------------------------------
979// MemoryChunk implementation
980
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000981void MemoryChunk::ReleaseAllocatedMemory() {
Ben Murdochc5610432016-08-08 18:44:38 +0100982 if (skip_list_ != nullptr) {
983 delete skip_list_;
984 skip_list_ = nullptr;
985 }
986 if (mutex_ != nullptr) {
987 delete mutex_;
988 mutex_ = nullptr;
989 }
990 if (old_to_new_slots_ != nullptr) ReleaseOldToNewSlots();
991 if (old_to_old_slots_ != nullptr) ReleaseOldToOldSlots();
Ben Murdoch61f157c2016-09-16 13:49:30 +0100992 if (typed_old_to_new_slots_ != nullptr) ReleaseTypedOldToNewSlots();
993 if (typed_old_to_old_slots_ != nullptr) ReleaseTypedOldToOldSlots();
994 if (local_tracker_ != nullptr) ReleaseLocalTracker();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000995}
996
Ben Murdoch097c5b22016-05-18 11:27:45 +0100997static SlotSet* AllocateSlotSet(size_t size, Address page_start) {
998 size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
999 DCHECK(pages > 0);
1000 SlotSet* slot_set = new SlotSet[pages];
1001 for (size_t i = 0; i < pages; i++) {
1002 slot_set[i].SetPageStart(page_start + i * Page::kPageSize);
1003 }
1004 return slot_set;
1005}
1006
1007void MemoryChunk::AllocateOldToNewSlots() {
1008 DCHECK(nullptr == old_to_new_slots_);
1009 old_to_new_slots_ = AllocateSlotSet(size_, address());
1010}
1011
1012void MemoryChunk::ReleaseOldToNewSlots() {
1013 delete[] old_to_new_slots_;
1014 old_to_new_slots_ = nullptr;
1015}
1016
1017void MemoryChunk::AllocateOldToOldSlots() {
1018 DCHECK(nullptr == old_to_old_slots_);
1019 old_to_old_slots_ = AllocateSlotSet(size_, address());
1020}
1021
1022void MemoryChunk::ReleaseOldToOldSlots() {
1023 delete[] old_to_old_slots_;
1024 old_to_old_slots_ = nullptr;
1025}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001026
Ben Murdoch61f157c2016-09-16 13:49:30 +01001027void MemoryChunk::AllocateTypedOldToNewSlots() {
1028 DCHECK(nullptr == typed_old_to_new_slots_);
1029 typed_old_to_new_slots_ = new TypedSlotSet(address());
1030}
1031
1032void MemoryChunk::ReleaseTypedOldToNewSlots() {
1033 delete typed_old_to_new_slots_;
1034 typed_old_to_new_slots_ = nullptr;
1035}
1036
Ben Murdochda12d292016-06-02 14:46:10 +01001037void MemoryChunk::AllocateTypedOldToOldSlots() {
1038 DCHECK(nullptr == typed_old_to_old_slots_);
1039 typed_old_to_old_slots_ = new TypedSlotSet(address());
1040}
1041
1042void MemoryChunk::ReleaseTypedOldToOldSlots() {
1043 delete typed_old_to_old_slots_;
1044 typed_old_to_old_slots_ = nullptr;
1045}
Ben Murdoch61f157c2016-09-16 13:49:30 +01001046
1047void MemoryChunk::AllocateLocalTracker() {
1048 DCHECK_NULL(local_tracker_);
1049 local_tracker_ = new LocalArrayBufferTracker(heap());
1050}
1051
1052void MemoryChunk::ReleaseLocalTracker() {
1053 DCHECK_NOT_NULL(local_tracker_);
1054 delete local_tracker_;
1055 local_tracker_ = nullptr;
1056}
1057
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001058// -----------------------------------------------------------------------------
1059// PagedSpace implementation
1060
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001061STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) ==
1062 ObjectSpace::kObjectSpaceNewSpace);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001063STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_SPACE) ==
1064 ObjectSpace::kObjectSpaceOldSpace);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001065STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
1066 ObjectSpace::kObjectSpaceCodeSpace);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001067STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
1068 ObjectSpace::kObjectSpaceMapSpace);
1069
Ben Murdoch097c5b22016-05-18 11:27:45 +01001070void Space::AllocationStep(Address soon_object, int size) {
1071 if (!allocation_observers_paused_) {
1072 for (int i = 0; i < allocation_observers_->length(); ++i) {
1073 AllocationObserver* o = (*allocation_observers_)[i];
1074 o->AllocationStep(size, soon_object, size);
1075 }
1076 }
1077}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001078
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001079PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001080 Executability executable)
Ben Murdochc5610432016-08-08 18:44:38 +01001081 : Space(heap, space, executable), anchor_(this), free_list_(this) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001082 area_size_ = MemoryAllocator::PageAreaSize(space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001083 accounting_stats_.Clear();
1084
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001085 allocation_info_.Reset(nullptr, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001086}
1087
1088
1089bool PagedSpace::SetUp() { return true; }
1090
1091
1092bool PagedSpace::HasBeenSetUp() { return true; }
1093
1094
1095void PagedSpace::TearDown() {
Ben Murdoch61f157c2016-09-16 13:49:30 +01001096 for (auto it = begin(); it != end();) {
1097 Page* page = *(it++); // Will be erased.
1098 ArrayBufferTracker::FreeAll(page);
1099 heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001100 }
1101 anchor_.set_next_page(&anchor_);
1102 anchor_.set_prev_page(&anchor_);
1103 accounting_stats_.Clear();
1104}
1105
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001106void PagedSpace::RefillFreeList() {
Ben Murdochda12d292016-06-02 14:46:10 +01001107 // Any PagedSpace might invoke RefillFreeList. We filter all but our old
1108 // generation spaces out.
1109 if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
1110 identity() != MAP_SPACE) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001111 return;
1112 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001113 MarkCompactCollector* collector = heap()->mark_compact_collector();
Ben Murdochda12d292016-06-02 14:46:10 +01001114 intptr_t added = 0;
1115 {
Ben Murdochc5610432016-08-08 18:44:38 +01001116 Page* p = nullptr;
1117 while ((p = collector->sweeper().GetSweptPageSafe(this)) != nullptr) {
Ben Murdochda12d292016-06-02 14:46:10 +01001118 // Only during compaction pages can actually change ownership. This is
1119 // safe because there exists no other competing action on the page links
1120 // during compaction.
1121 if (is_local() && (p->owner() != this)) {
Ben Murdochda12d292016-06-02 14:46:10 +01001122 base::LockGuard<base::Mutex> guard(
1123 reinterpret_cast<PagedSpace*>(p->owner())->mutex());
1124 p->Unlink();
1125 p->set_owner(this);
1126 p->InsertAfter(anchor_.prev_page());
1127 }
1128 added += RelinkFreeListCategories(p);
1129 added += p->wasted_memory();
Ben Murdochc5610432016-08-08 18:44:38 +01001130 if (is_local() && (added > kCompactionMemoryWanted)) break;
Ben Murdochda12d292016-06-02 14:46:10 +01001131 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001132 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001133 accounting_stats_.IncreaseCapacity(added);
1134}
1135
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001136void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
Ben Murdochda12d292016-06-02 14:46:10 +01001137 DCHECK(identity() == other->identity());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001138 // Unmerged fields:
1139 // area_size_
1140 // anchor_
1141
Ben Murdochda12d292016-06-02 14:46:10 +01001142 other->EmptyAllocationInfo();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001143
1144 // Update and clear accounting statistics.
1145 accounting_stats_.Merge(other->accounting_stats_);
1146 other->accounting_stats_.Clear();
1147
1148 // The linear allocation area of {other} should be destroyed now.
1149 DCHECK(other->top() == nullptr);
1150 DCHECK(other->limit() == nullptr);
1151
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001152 AccountCommitted(other->CommittedMemory());
1153
1154 // Move over pages.
Ben Murdoch61f157c2016-09-16 13:49:30 +01001155 for (auto it = other->begin(); it != other->end();) {
1156 Page* p = *(it++);
Ben Murdochda12d292016-06-02 14:46:10 +01001157
1158 // Relinking requires the category to be unlinked.
1159 other->UnlinkFreeListCategories(p);
1160
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001161 p->Unlink();
1162 p->set_owner(this);
1163 p->InsertAfter(anchor_.prev_page());
Ben Murdochda12d292016-06-02 14:46:10 +01001164 RelinkFreeListCategories(p);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001165 }
1166}
1167
1168
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001169size_t PagedSpace::CommittedPhysicalMemory() {
1170 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
1171 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1172 size_t size = 0;
Ben Murdoch61f157c2016-09-16 13:49:30 +01001173 for (Page* page : *this) {
1174 size += page->CommittedPhysicalMemory();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001175 }
1176 return size;
1177}
1178
Ben Murdoch097c5b22016-05-18 11:27:45 +01001179bool PagedSpace::ContainsSlow(Address addr) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001180 Page* p = Page::FromAddress(addr);
Ben Murdoch61f157c2016-09-16 13:49:30 +01001181 for (Page* page : *this) {
1182 if (page == p) return true;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001183 }
1184 return false;
1185}
1186
1187
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001188Object* PagedSpace::FindObject(Address addr) {
1189 // Note: this function can only be called on iterable spaces.
1190 DCHECK(!heap()->mark_compact_collector()->in_use());
1191
1192 if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found.
1193
1194 Page* p = Page::FromAddress(addr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001195 HeapObjectIterator it(p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001196 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
1197 Address cur = obj->address();
1198 Address next = cur + obj->Size();
1199 if ((cur <= addr) && (addr < next)) return obj;
1200 }
1201
1202 UNREACHABLE();
1203 return Smi::FromInt(0);
1204}
1205
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001206bool PagedSpace::Expand() {
Ben Murdochc5610432016-08-08 18:44:38 +01001207 int size = AreaSize();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001208 if (snapshotable() && !HasPages()) {
1209 size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001210 }
1211
Ben Murdochc5610432016-08-08 18:44:38 +01001212 if (!heap()->CanExpandOldGeneration(size)) return false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001213
Ben Murdochc5610432016-08-08 18:44:38 +01001214 Page* p = heap()->memory_allocator()->AllocatePage(size, this, executable());
1215 if (p == nullptr) return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001216
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001217 AccountCommitted(static_cast<intptr_t>(p->size()));
1218
1219 // Pages created during bootstrapping may contain immortal immovable objects.
1220 if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
1221
Ben Murdochda12d292016-06-02 14:46:10 +01001222 // When incremental marking was activated, old space pages are allocated
1223 // black.
1224 if (heap()->incremental_marking()->black_allocation() &&
1225 identity() == OLD_SPACE) {
1226 Bitmap::SetAllBits(p);
1227 p->SetFlag(Page::BLACK_PAGE);
1228 if (FLAG_trace_incremental_marking) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01001229 PrintIsolate(heap()->isolate(), "Added black page %p\n",
1230 static_cast<void*>(p));
Ben Murdochda12d292016-06-02 14:46:10 +01001231 }
1232 }
1233
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001234 DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001235
1236 p->InsertAfter(anchor_.prev_page());
1237
1238 return true;
1239}
1240
1241
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001242int PagedSpace::CountTotalPages() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001243 int count = 0;
Ben Murdoch61f157c2016-09-16 13:49:30 +01001244 for (Page* page : *this) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001245 count++;
Ben Murdoch61f157c2016-09-16 13:49:30 +01001246 USE(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001247 }
1248 return count;
1249}
1250
1251
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001252void PagedSpace::ResetFreeListStatistics() {
Ben Murdoch61f157c2016-09-16 13:49:30 +01001253 for (Page* page : *this) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001254 page->ResetFreeListStatistics();
1255 }
1256}
1257
1258
1259void PagedSpace::IncreaseCapacity(int size) {
1260 accounting_stats_.ExpandSpace(size);
1261}
1262
Ben Murdochda12d292016-06-02 14:46:10 +01001263void PagedSpace::ReleasePage(Page* page) {
1264 DCHECK_EQ(page->LiveBytes(), 0);
1265 DCHECK_EQ(AreaSize(), page->area_size());
1266 DCHECK_EQ(page->owner(), this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001267
Ben Murdochda12d292016-06-02 14:46:10 +01001268 free_list_.EvictFreeListItems(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001269 DCHECK(!free_list_.ContainsPageFreeListItems(page));
1270
Ben Murdochc5610432016-08-08 18:44:38 +01001271 if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001272 allocation_info_.Reset(nullptr, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001273 }
1274
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001275 // If page is still in a list, unlink it from that list.
1276 if (page->next_chunk() != NULL) {
1277 DCHECK(page->prev_chunk() != NULL);
1278 page->Unlink();
1279 }
1280
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001281 AccountUncommitted(static_cast<intptr_t>(page->size()));
Ben Murdochc5610432016-08-08 18:44:38 +01001282 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001283
1284 DCHECK(Capacity() > 0);
1285 accounting_stats_.ShrinkSpace(AreaSize());
1286}
1287
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001288#ifdef DEBUG
1289void PagedSpace::Print() {}
1290#endif
1291
1292#ifdef VERIFY_HEAP
1293void PagedSpace::Verify(ObjectVisitor* visitor) {
1294 bool allocation_pointer_found_in_space =
1295 (allocation_info_.top() == allocation_info_.limit());
Ben Murdoch61f157c2016-09-16 13:49:30 +01001296 for (Page* page : *this) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001297 CHECK(page->owner() == this);
Ben Murdochc5610432016-08-08 18:44:38 +01001298 if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001299 allocation_pointer_found_in_space = true;
1300 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001301 CHECK(page->SweepingDone());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001302 HeapObjectIterator it(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001303 Address end_of_previous_object = page->area_start();
1304 Address top = page->area_end();
1305 int black_size = 0;
1306 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1307 CHECK(end_of_previous_object <= object->address());
1308
1309 // The first word should be a map, and we expect all map pointers to
1310 // be in map space.
1311 Map* map = object->map();
1312 CHECK(map->IsMap());
1313 CHECK(heap()->map_space()->Contains(map));
1314
1315 // Perform space-specific object verification.
1316 VerifyObject(object);
1317
1318 // The object itself should look OK.
1319 object->ObjectVerify();
1320
1321 // All the interior pointers should be contained in the heap.
1322 int size = object->Size();
1323 object->IterateBody(map->instance_type(), size, visitor);
Ben Murdochda12d292016-06-02 14:46:10 +01001324 if (!page->IsFlagSet(Page::BLACK_PAGE) &&
1325 Marking::IsBlack(Marking::MarkBitFrom(object))) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001326 black_size += size;
1327 }
1328
1329 CHECK(object->address() + size <= top);
1330 end_of_previous_object = object->address() + size;
1331 }
1332 CHECK_LE(black_size, page->LiveBytes());
1333 }
1334 CHECK(allocation_pointer_found_in_space);
1335}
1336#endif // VERIFY_HEAP
1337
1338// -----------------------------------------------------------------------------
1339// NewSpace implementation
1340
Ben Murdochda12d292016-06-02 14:46:10 +01001341bool NewSpace::SetUp(int initial_semispace_capacity,
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001342 int maximum_semispace_capacity) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001343 DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
1344 DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity));
1345
Ben Murdochda12d292016-06-02 14:46:10 +01001346 to_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
1347 from_space_.SetUp(initial_semispace_capacity, maximum_semispace_capacity);
1348 if (!to_space_.Commit()) {
1349 return false;
1350 }
1351 DCHECK(!from_space_.is_committed()); // No need to use memory yet.
1352 ResetAllocationInfo();
1353
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001354 // Allocate and set up the histogram arrays if necessary.
1355 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1356 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001357#define SET_NAME(name) \
1358 allocated_histogram_[name].set_name(#name); \
1359 promoted_histogram_[name].set_name(#name);
1360 INSTANCE_TYPE_LIST(SET_NAME)
1361#undef SET_NAME
1362
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001363 return true;
1364}
1365
1366
1367void NewSpace::TearDown() {
1368 if (allocated_histogram_) {
1369 DeleteArray(allocated_histogram_);
1370 allocated_histogram_ = NULL;
1371 }
1372 if (promoted_histogram_) {
1373 DeleteArray(promoted_histogram_);
1374 promoted_histogram_ = NULL;
1375 }
1376
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001377 allocation_info_.Reset(nullptr, nullptr);
1378
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001379 to_space_.TearDown();
1380 from_space_.TearDown();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001381}
1382
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001383void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
1384
1385
1386void NewSpace::Grow() {
1387 // Double the semispace size but only up to maximum capacity.
1388 DCHECK(TotalCapacity() < MaximumCapacity());
1389 int new_capacity =
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001390 Min(MaximumCapacity(),
1391 FLAG_semi_space_growth_factor * static_cast<int>(TotalCapacity()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001392 if (to_space_.GrowTo(new_capacity)) {
1393 // Only grow from space if we managed to grow to-space.
1394 if (!from_space_.GrowTo(new_capacity)) {
1395 // If we managed to grow to-space but couldn't grow from-space,
1396 // attempt to shrink to-space.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001397 if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001398 // We are in an inconsistent state because we could not
1399 // commit/uncommit memory from new space.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001400 CHECK(false);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001401 }
1402 }
1403 }
1404 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1405}
1406
1407
1408void NewSpace::Shrink() {
1409 int new_capacity = Max(InitialTotalCapacity(), 2 * SizeAsInt());
1410 int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
1411 if (rounded_new_capacity < TotalCapacity() &&
1412 to_space_.ShrinkTo(rounded_new_capacity)) {
1413 // Only shrink from-space if we managed to shrink to-space.
1414 from_space_.Reset();
1415 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1416 // If we managed to shrink to-space but couldn't shrink from
1417 // space, attempt to grow to-space again.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001418 if (!to_space_.GrowTo(from_space_.current_capacity())) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001419 // We are in an inconsistent state because we could not
1420 // commit/uncommit memory from new space.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001421 CHECK(false);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001422 }
1423 }
1424 }
1425 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1426}
1427
Ben Murdoch61f157c2016-09-16 13:49:30 +01001428bool NewSpace::Rebalance() {
1429 CHECK(heap()->promotion_queue()->is_empty());
1430 // Order here is important to make use of the page pool.
1431 return to_space_.EnsureCurrentCapacity() &&
1432 from_space_.EnsureCurrentCapacity();
1433}
1434
1435bool SemiSpace::EnsureCurrentCapacity() {
1436 if (is_committed()) {
1437 const int expected_pages = current_capacity_ / Page::kPageSize;
1438 int actual_pages = 0;
1439 Page* current_page = anchor()->next_page();
1440 while (current_page != anchor()) {
1441 actual_pages++;
1442 current_page = current_page->next_page();
1443 if (actual_pages > expected_pages) {
1444 Page* to_remove = current_page->prev_page();
1445 // Make sure we don't overtake the actual top pointer.
1446 CHECK_NE(to_remove, current_page_);
1447 to_remove->Unlink();
1448 heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
1449 to_remove);
1450 }
1451 }
1452 while (actual_pages < expected_pages) {
1453 actual_pages++;
1454 current_page =
1455 heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
1456 Page::kAllocatableMemory, this, executable());
1457 if (current_page == nullptr) return false;
1458 DCHECK_NOT_NULL(current_page);
1459 current_page->InsertAfter(anchor());
1460 Bitmap::Clear(current_page);
1461 current_page->SetFlags(anchor()->prev_page()->GetFlags(),
1462 Page::kCopyAllFlags);
1463 heap()->CreateFillerObjectAt(current_page->area_start(),
1464 current_page->area_size(),
1465 ClearRecordedSlots::kNo);
1466 }
1467 }
1468 return true;
1469}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001470
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001471void LocalAllocationBuffer::Close() {
1472 if (IsValid()) {
1473 heap_->CreateFillerObjectAt(
1474 allocation_info_.top(),
Ben Murdochda12d292016-06-02 14:46:10 +01001475 static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
1476 ClearRecordedSlots::kNo);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001477 }
1478}
1479
1480
1481LocalAllocationBuffer::LocalAllocationBuffer(Heap* heap,
1482 AllocationInfo allocation_info)
1483 : heap_(heap), allocation_info_(allocation_info) {
1484 if (IsValid()) {
1485 heap_->CreateFillerObjectAt(
1486 allocation_info_.top(),
Ben Murdochda12d292016-06-02 14:46:10 +01001487 static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
1488 ClearRecordedSlots::kNo);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001489 }
1490}
1491
1492
1493LocalAllocationBuffer::LocalAllocationBuffer(
1494 const LocalAllocationBuffer& other) {
1495 *this = other;
1496}
1497
1498
1499LocalAllocationBuffer& LocalAllocationBuffer::operator=(
1500 const LocalAllocationBuffer& other) {
1501 Close();
1502 heap_ = other.heap_;
1503 allocation_info_ = other.allocation_info_;
1504
1505 // This is needed since we (a) cannot yet use move-semantics, and (b) want
1506 // to make the use of the class easy by it as value and (c) implicitly call
1507 // {Close} upon copy.
1508 const_cast<LocalAllocationBuffer&>(other)
1509 .allocation_info_.Reset(nullptr, nullptr);
1510 return *this;
1511}
1512
1513
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001514void NewSpace::UpdateAllocationInfo() {
1515 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001516 allocation_info_.Reset(to_space_.page_low(), to_space_.page_high());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001517 UpdateInlineAllocationLimit(0);
1518 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1519}
1520
1521
1522void NewSpace::ResetAllocationInfo() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001523 Address old_top = allocation_info_.top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001524 to_space_.Reset();
1525 UpdateAllocationInfo();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001526 // Clear all mark-bits in the to-space.
Ben Murdoch61f157c2016-09-16 13:49:30 +01001527 for (Page* p : to_space_) {
1528 Bitmap::Clear(p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001529 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001530 InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001531}
1532
1533
1534void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
1535 if (heap()->inline_allocation_disabled()) {
1536 // Lowest limit when linear allocation was disabled.
1537 Address high = to_space_.page_high();
1538 Address new_top = allocation_info_.top() + size_in_bytes;
1539 allocation_info_.set_limit(Min(new_top, high));
Ben Murdoch097c5b22016-05-18 11:27:45 +01001540 } else if (allocation_observers_paused_ || top_on_previous_step_ == 0) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001541 // Normal limit is the end of the current page.
1542 allocation_info_.set_limit(to_space_.page_high());
1543 } else {
1544 // Lower limit during incremental marking.
1545 Address high = to_space_.page_high();
1546 Address new_top = allocation_info_.top() + size_in_bytes;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001547 Address new_limit = new_top + GetNextInlineAllocationStepSize() - 1;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001548 allocation_info_.set_limit(Min(new_limit, high));
1549 }
1550 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1551}
1552
1553
1554bool NewSpace::AddFreshPage() {
1555 Address top = allocation_info_.top();
Ben Murdochc5610432016-08-08 18:44:38 +01001556 DCHECK(!Page::IsAtObjectStart(top));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001557 if (!to_space_.AdvancePage()) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001558 // No more pages left to advance.
1559 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001560 }
1561
1562 // Clear remainder of current page.
Ben Murdochc5610432016-08-08 18:44:38 +01001563 Address limit = Page::FromAllocationAreaAddress(top)->area_end();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001564 if (heap()->gc_state() == Heap::SCAVENGE) {
1565 heap()->promotion_queue()->SetNewLimit(limit);
1566 }
1567
1568 int remaining_in_page = static_cast<int>(limit - top);
Ben Murdochda12d292016-06-02 14:46:10 +01001569 heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001570 UpdateAllocationInfo();
1571
1572 return true;
1573}
1574
1575
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001576bool NewSpace::AddFreshPageSynchronized() {
1577 base::LockGuard<base::Mutex> guard(&mutex_);
1578 return AddFreshPage();
1579}
1580
1581
1582bool NewSpace::EnsureAllocation(int size_in_bytes,
1583 AllocationAlignment alignment) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001584 Address old_top = allocation_info_.top();
1585 Address high = to_space_.page_high();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001586 int filler_size = Heap::GetFillToAlign(old_top, alignment);
1587 int aligned_size_in_bytes = size_in_bytes + filler_size;
1588
Ben Murdochc5610432016-08-08 18:44:38 +01001589 if (old_top + aligned_size_in_bytes > high) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001590 // Not enough room in the page, try to allocate a new one.
1591 if (!AddFreshPage()) {
1592 return false;
1593 }
1594
1595 InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
1596
1597 old_top = allocation_info_.top();
1598 high = to_space_.page_high();
1599 filler_size = Heap::GetFillToAlign(old_top, alignment);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001600 }
1601
Ben Murdochc5610432016-08-08 18:44:38 +01001602 DCHECK(old_top + aligned_size_in_bytes <= high);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001603
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001604 if (allocation_info_.limit() < high) {
1605 // Either the limit has been lowered because linear allocation was disabled
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001606 // or because incremental marking wants to get a chance to do a step,
1607 // or because idle scavenge job wants to get a chance to post a task.
1608 // Set the new limit accordingly.
1609 Address new_top = old_top + aligned_size_in_bytes;
1610 Address soon_object = old_top + filler_size;
1611 InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
1612 UpdateInlineAllocationLimit(aligned_size_in_bytes);
1613 }
1614 return true;
1615}
1616
1617
1618void NewSpace::StartNextInlineAllocationStep() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001619 if (!allocation_observers_paused_) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001620 top_on_previous_step_ =
Ben Murdoch097c5b22016-05-18 11:27:45 +01001621 allocation_observers_->length() ? allocation_info_.top() : 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001622 UpdateInlineAllocationLimit(0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001623 }
1624}
1625
1626
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001627intptr_t NewSpace::GetNextInlineAllocationStepSize() {
1628 intptr_t next_step = 0;
Ben Murdoch097c5b22016-05-18 11:27:45 +01001629 for (int i = 0; i < allocation_observers_->length(); ++i) {
1630 AllocationObserver* o = (*allocation_observers_)[i];
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001631 next_step = next_step ? Min(next_step, o->bytes_to_next_step())
1632 : o->bytes_to_next_step();
1633 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001634 DCHECK(allocation_observers_->length() == 0 || next_step != 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001635 return next_step;
1636}
1637
Ben Murdoch097c5b22016-05-18 11:27:45 +01001638void NewSpace::AddAllocationObserver(AllocationObserver* observer) {
1639 Space::AddAllocationObserver(observer);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001640 StartNextInlineAllocationStep();
1641}
1642
Ben Murdoch097c5b22016-05-18 11:27:45 +01001643void NewSpace::RemoveAllocationObserver(AllocationObserver* observer) {
1644 Space::RemoveAllocationObserver(observer);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001645 StartNextInlineAllocationStep();
1646}
1647
Ben Murdoch097c5b22016-05-18 11:27:45 +01001648void NewSpace::PauseAllocationObservers() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001649 // Do a step to account for memory allocated so far.
1650 InlineAllocationStep(top(), top(), nullptr, 0);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001651 Space::PauseAllocationObservers();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001652 top_on_previous_step_ = 0;
1653 UpdateInlineAllocationLimit(0);
1654}
1655
Ben Murdoch097c5b22016-05-18 11:27:45 +01001656void NewSpace::ResumeAllocationObservers() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001657 DCHECK(top_on_previous_step_ == 0);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001658 Space::ResumeAllocationObservers();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001659 StartNextInlineAllocationStep();
1660}
1661
1662
1663void NewSpace::InlineAllocationStep(Address top, Address new_top,
1664 Address soon_object, size_t size) {
1665 if (top_on_previous_step_) {
1666 int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001667 for (int i = 0; i < allocation_observers_->length(); ++i) {
1668 (*allocation_observers_)[i]->AllocationStep(bytes_allocated, soon_object,
1669 size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001670 }
1671 top_on_previous_step_ = new_top;
1672 }
1673}
1674
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001675#ifdef VERIFY_HEAP
1676// We do not use the SemiSpaceIterator because verification doesn't assume
1677// that it works (it depends on the invariants we are checking).
1678void NewSpace::Verify() {
1679 // The allocation pointer should be in the space or at the very end.
1680 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1681
1682 // There should be objects packed in from the low address up to the
1683 // allocation pointer.
1684 Address current = to_space_.first_page()->area_start();
1685 CHECK_EQ(current, to_space_.space_start());
1686
1687 while (current != top()) {
Ben Murdochc5610432016-08-08 18:44:38 +01001688 if (!Page::IsAlignedToPageSize(current)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001689 // The allocation pointer should not be in the middle of an object.
Ben Murdochc5610432016-08-08 18:44:38 +01001690 CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001691 current < top());
1692
1693 HeapObject* object = HeapObject::FromAddress(current);
1694
1695 // The first word should be a map, and we expect all map pointers to
1696 // be in map space.
1697 Map* map = object->map();
1698 CHECK(map->IsMap());
1699 CHECK(heap()->map_space()->Contains(map));
1700
1701 // The object should not be code or a map.
1702 CHECK(!object->IsMap());
Ben Murdochda12d292016-06-02 14:46:10 +01001703 CHECK(!object->IsAbstractCode());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001704
1705 // The object itself should look OK.
1706 object->ObjectVerify();
1707
1708 // All the interior pointers should be contained in the heap.
1709 VerifyPointersVisitor visitor;
1710 int size = object->Size();
1711 object->IterateBody(map->instance_type(), size, &visitor);
1712
1713 current += size;
1714 } else {
1715 // At end of page, switch to next page.
Ben Murdochc5610432016-08-08 18:44:38 +01001716 Page* page = Page::FromAllocationAreaAddress(current)->next_page();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001717 // Next page should be valid.
1718 CHECK(!page->is_anchor());
1719 current = page->area_start();
1720 }
1721 }
1722
1723 // Check semi-spaces.
1724 CHECK_EQ(from_space_.id(), kFromSpace);
1725 CHECK_EQ(to_space_.id(), kToSpace);
1726 from_space_.Verify();
1727 to_space_.Verify();
1728}
1729#endif
1730
1731// -----------------------------------------------------------------------------
1732// SemiSpace implementation
1733
Ben Murdochda12d292016-06-02 14:46:10 +01001734void SemiSpace::SetUp(int initial_capacity, int maximum_capacity) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001735 DCHECK_GE(maximum_capacity, Page::kPageSize);
1736 minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1737 current_capacity_ = minimum_capacity_;
1738 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001739 committed_ = false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001740}
1741
1742
1743void SemiSpace::TearDown() {
Ben Murdochda12d292016-06-02 14:46:10 +01001744 // Properly uncommit memory to keep the allocator counters in sync.
Ben Murdoch61f157c2016-09-16 13:49:30 +01001745 if (is_committed()) {
1746 for (Page* p : *this) {
1747 ArrayBufferTracker::FreeAll(p);
1748 }
1749 Uncommit();
1750 }
Ben Murdochda12d292016-06-02 14:46:10 +01001751 current_capacity_ = maximum_capacity_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001752}
1753
1754
1755bool SemiSpace::Commit() {
1756 DCHECK(!is_committed());
Ben Murdochc5610432016-08-08 18:44:38 +01001757 Page* current = anchor();
Ben Murdoch097c5b22016-05-18 11:27:45 +01001758 const int num_pages = current_capacity_ / Page::kPageSize;
Ben Murdochda12d292016-06-02 14:46:10 +01001759 for (int pages_added = 0; pages_added < num_pages; pages_added++) {
Ben Murdochc5610432016-08-08 18:44:38 +01001760 Page* new_page =
1761 heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
1762 Page::kAllocatableMemory, this, executable());
Ben Murdochda12d292016-06-02 14:46:10 +01001763 if (new_page == nullptr) {
1764 RewindPages(current, pages_added);
1765 return false;
1766 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001767 new_page->InsertAfter(current);
1768 current = new_page;
1769 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001770 Reset();
Ben Murdochda12d292016-06-02 14:46:10 +01001771 AccountCommitted(current_capacity_);
1772 if (age_mark_ == nullptr) {
1773 age_mark_ = first_page()->area_start();
1774 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001775 committed_ = true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001776 return true;
1777}
1778
1779
1780bool SemiSpace::Uncommit() {
1781 DCHECK(is_committed());
Ben Murdoch61f157c2016-09-16 13:49:30 +01001782 for (auto it = begin(); it != end();) {
1783 Page* p = *(it++);
1784 heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(p);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001785 }
1786 anchor()->set_next_page(anchor());
1787 anchor()->set_prev_page(anchor());
Ben Murdochda12d292016-06-02 14:46:10 +01001788 AccountUncommitted(current_capacity_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001789 committed_ = false;
Ben Murdochc5610432016-08-08 18:44:38 +01001790 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001791 return true;
1792}
1793
1794
1795size_t SemiSpace::CommittedPhysicalMemory() {
1796 if (!is_committed()) return 0;
1797 size_t size = 0;
Ben Murdoch61f157c2016-09-16 13:49:30 +01001798 for (Page* p : *this) {
1799 size += p->CommittedPhysicalMemory();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001800 }
1801 return size;
1802}
1803
1804
1805bool SemiSpace::GrowTo(int new_capacity) {
1806 if (!is_committed()) {
1807 if (!Commit()) return false;
1808 }
Ben Murdochc5610432016-08-08 18:44:38 +01001809 DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001810 DCHECK_LE(new_capacity, maximum_capacity_);
1811 DCHECK_GT(new_capacity, current_capacity_);
Ben Murdochda12d292016-06-02 14:46:10 +01001812 const int delta = new_capacity - current_capacity_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001813 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
Ben Murdochc5610432016-08-08 18:44:38 +01001814 const int delta_pages = delta / Page::kPageSize;
1815 Page* last_page = anchor()->prev_page();
Ben Murdoch097c5b22016-05-18 11:27:45 +01001816 DCHECK_NE(last_page, anchor());
Ben Murdochda12d292016-06-02 14:46:10 +01001817 for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
Ben Murdochc5610432016-08-08 18:44:38 +01001818 Page* new_page =
1819 heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
1820 Page::kAllocatableMemory, this, executable());
Ben Murdochda12d292016-06-02 14:46:10 +01001821 if (new_page == nullptr) {
1822 RewindPages(last_page, pages_added);
1823 return false;
1824 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001825 new_page->InsertAfter(last_page);
1826 Bitmap::Clear(new_page);
1827 // Duplicate the flags that was set on the old page.
Ben Murdochc5610432016-08-08 18:44:38 +01001828 new_page->SetFlags(last_page->GetFlags(), Page::kCopyOnFlipFlagsMask);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001829 last_page = new_page;
1830 }
Ben Murdochda12d292016-06-02 14:46:10 +01001831 AccountCommitted(static_cast<intptr_t>(delta));
1832 current_capacity_ = new_capacity;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001833 return true;
1834}
1835
Ben Murdochc5610432016-08-08 18:44:38 +01001836void SemiSpace::RewindPages(Page* start, int num_pages) {
1837 Page* new_last_page = nullptr;
1838 Page* last_page = start;
Ben Murdochda12d292016-06-02 14:46:10 +01001839 while (num_pages > 0) {
1840 DCHECK_NE(last_page, anchor());
1841 new_last_page = last_page->prev_page();
1842 last_page->prev_page()->set_next_page(last_page->next_page());
1843 last_page->next_page()->set_prev_page(last_page->prev_page());
1844 last_page = new_last_page;
1845 num_pages--;
1846 }
1847}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001848
1849bool SemiSpace::ShrinkTo(int new_capacity) {
Ben Murdochc5610432016-08-08 18:44:38 +01001850 DCHECK_EQ(new_capacity & Page::kPageAlignmentMask, 0);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001851 DCHECK_GE(new_capacity, minimum_capacity_);
1852 DCHECK_LT(new_capacity, current_capacity_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001853 if (is_committed()) {
Ben Murdochda12d292016-06-02 14:46:10 +01001854 const int delta = current_capacity_ - new_capacity;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001855 DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
Ben Murdochc5610432016-08-08 18:44:38 +01001856 int delta_pages = delta / Page::kPageSize;
1857 Page* new_last_page;
1858 Page* last_page;
Ben Murdochda12d292016-06-02 14:46:10 +01001859 while (delta_pages > 0) {
1860 last_page = anchor()->prev_page();
1861 new_last_page = last_page->prev_page();
1862 new_last_page->set_next_page(anchor());
1863 anchor()->set_prev_page(new_last_page);
Ben Murdochc5610432016-08-08 18:44:38 +01001864 heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
Ben Murdochda12d292016-06-02 14:46:10 +01001865 last_page);
1866 delta_pages--;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001867 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001868 AccountUncommitted(static_cast<intptr_t>(delta));
Ben Murdochc5610432016-08-08 18:44:38 +01001869 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001870 }
Ben Murdochda12d292016-06-02 14:46:10 +01001871 current_capacity_ = new_capacity;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001872 return true;
1873}
1874
Ben Murdoch097c5b22016-05-18 11:27:45 +01001875void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001876 anchor_.set_owner(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001877 anchor_.prev_page()->set_next_page(&anchor_);
1878 anchor_.next_page()->set_prev_page(&anchor_);
1879
Ben Murdoch61f157c2016-09-16 13:49:30 +01001880 for (Page* page : *this) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001881 page->set_owner(this);
1882 page->SetFlags(flags, mask);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001883 if (id_ == kToSpace) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001884 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1885 page->SetFlag(MemoryChunk::IN_TO_SPACE);
1886 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1887 page->ResetLiveBytes();
1888 } else {
1889 page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1890 page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1891 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001892 DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1893 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001894 }
1895}
1896
1897
1898void SemiSpace::Reset() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001899 DCHECK_NE(anchor_.next_page(), &anchor_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001900 current_page_ = anchor_.next_page();
Ben Murdoch61f157c2016-09-16 13:49:30 +01001901 pages_used_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001902}
1903
Ben Murdoch61f157c2016-09-16 13:49:30 +01001904void SemiSpace::RemovePage(Page* page) {
1905 if (current_page_ == page) {
1906 current_page_ = page->prev_page();
1907 }
1908 page->Unlink();
1909}
1910
1911void SemiSpace::PrependPage(Page* page) {
1912 page->SetFlags(current_page()->GetFlags(), Page::kCopyAllFlags);
1913 page->set_owner(this);
1914 page->InsertAfter(anchor());
1915 pages_used_++;
Ben Murdochc5610432016-08-08 18:44:38 +01001916}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001917
1918void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1919 // We won't be swapping semispaces without data in them.
Ben Murdoch097c5b22016-05-18 11:27:45 +01001920 DCHECK_NE(from->anchor_.next_page(), &from->anchor_);
1921 DCHECK_NE(to->anchor_.next_page(), &to->anchor_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001922
Ben Murdoch097c5b22016-05-18 11:27:45 +01001923 intptr_t saved_to_space_flags = to->current_page()->GetFlags();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001924
Ben Murdoch097c5b22016-05-18 11:27:45 +01001925 // We swap all properties but id_.
1926 std::swap(from->current_capacity_, to->current_capacity_);
1927 std::swap(from->maximum_capacity_, to->maximum_capacity_);
1928 std::swap(from->minimum_capacity_, to->minimum_capacity_);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001929 std::swap(from->age_mark_, to->age_mark_);
1930 std::swap(from->committed_, to->committed_);
1931 std::swap(from->anchor_, to->anchor_);
1932 std::swap(from->current_page_, to->current_page_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001933
Ben Murdochc5610432016-08-08 18:44:38 +01001934 to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
Ben Murdoch097c5b22016-05-18 11:27:45 +01001935 from->FixPagesFlags(0, 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001936}
1937
1938
1939void SemiSpace::set_age_mark(Address mark) {
Ben Murdochc5610432016-08-08 18:44:38 +01001940 DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(), this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001941 age_mark_ = mark;
1942 // Mark all pages up to the one containing mark.
Ben Murdoch61f157c2016-09-16 13:49:30 +01001943 for (Page* p : NewSpacePageRange(space_start(), mark)) {
1944 p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001945 }
1946}
1947
1948
1949#ifdef DEBUG
1950void SemiSpace::Print() {}
1951#endif
1952
1953#ifdef VERIFY_HEAP
1954void SemiSpace::Verify() {
1955 bool is_from_space = (id_ == kFromSpace);
Ben Murdochc5610432016-08-08 18:44:38 +01001956 Page* page = anchor_.next_page();
1957 CHECK(anchor_.owner() == this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001958 while (page != &anchor_) {
Ben Murdochc5610432016-08-08 18:44:38 +01001959 CHECK_EQ(page->owner(), this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001960 CHECK(page->InNewSpace());
1961 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1962 : MemoryChunk::IN_TO_SPACE));
1963 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1964 : MemoryChunk::IN_FROM_SPACE));
1965 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1966 if (!is_from_space) {
1967 // The pointers-from-here-are-interesting flag isn't updated dynamically
1968 // on from-space pages, so it might be out of sync with the marking state.
1969 if (page->heap()->incremental_marking()->IsMarking()) {
1970 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1971 } else {
1972 CHECK(
1973 !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1974 }
1975 // TODO(gc): Check that the live_bytes_count_ field matches the
1976 // black marking on the page (if we make it match in new-space).
1977 }
Ben Murdoch097c5b22016-05-18 11:27:45 +01001978 CHECK_EQ(page->prev_page()->next_page(), page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001979 page = page->next_page();
1980 }
1981}
1982#endif
1983
1984#ifdef DEBUG
1985void SemiSpace::AssertValidRange(Address start, Address end) {
1986 // Addresses belong to same semi-space
Ben Murdochc5610432016-08-08 18:44:38 +01001987 Page* page = Page::FromAllocationAreaAddress(start);
1988 Page* end_page = Page::FromAllocationAreaAddress(end);
1989 SemiSpace* space = reinterpret_cast<SemiSpace*>(page->owner());
1990 CHECK_EQ(space, end_page->owner());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001991 // Start address is before end address, either on same page,
1992 // or end address is on a later page in the linked list of
1993 // semi-space pages.
1994 if (page == end_page) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01001995 CHECK_LE(start, end);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001996 } else {
1997 while (page != end_page) {
1998 page = page->next_page();
1999 CHECK_NE(page, space->anchor());
2000 }
2001 }
2002}
2003#endif
2004
2005
2006// -----------------------------------------------------------------------------
2007// SemiSpaceIterator implementation.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002008
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002009SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002010 Initialize(space->bottom(), space->top());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002011}
2012
2013
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002014void SemiSpaceIterator::Initialize(Address start, Address end) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002015 SemiSpace::AssertValidRange(start, end);
2016 current_ = start;
2017 limit_ = end;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002018}
2019
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002020#ifdef DEBUG
2021// heap_histograms is shared, always clear it before using it.
2022static void ClearHistograms(Isolate* isolate) {
2023// We reset the name each time, though it hasn't changed.
2024#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
2025 INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
2026#undef DEF_TYPE_NAME
2027
2028#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
2029 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
2030#undef CLEAR_HISTOGRAM
2031
2032 isolate->js_spill_information()->Clear();
2033}
2034
2035
2036static void ClearCodeKindStatistics(int* code_kind_statistics) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01002037 for (int i = 0; i < AbstractCode::NUMBER_OF_KINDS; i++) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002038 code_kind_statistics[i] = 0;
2039 }
2040}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002041static void ReportCodeKindStatistics(int* code_kind_statistics) {
2042 PrintF("\n Code kind histograms: \n");
Ben Murdoch61f157c2016-09-16 13:49:30 +01002043 for (int i = 0; i < AbstractCode::NUMBER_OF_KINDS; i++) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002044 if (code_kind_statistics[i] > 0) {
2045 PrintF(" %-20s: %10d bytes\n",
Ben Murdoch61f157c2016-09-16 13:49:30 +01002046 AbstractCode::Kind2String(static_cast<AbstractCode::Kind>(i)),
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002047 code_kind_statistics[i]);
2048 }
2049 }
2050 PrintF("\n");
2051}
2052
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002053static int CollectHistogramInfo(HeapObject* obj) {
2054 Isolate* isolate = obj->GetIsolate();
2055 InstanceType type = obj->map()->instance_type();
2056 DCHECK(0 <= type && type <= LAST_TYPE);
2057 DCHECK(isolate->heap_histograms()[type].name() != NULL);
2058 isolate->heap_histograms()[type].increment_number(1);
2059 isolate->heap_histograms()[type].increment_bytes(obj->Size());
2060
2061 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
2062 JSObject::cast(obj)
2063 ->IncrementSpillStatistics(isolate->js_spill_information());
2064 }
2065
2066 return obj->Size();
2067}
2068
2069
2070static void ReportHistogram(Isolate* isolate, bool print_spill) {
2071 PrintF("\n Object Histogram:\n");
2072 for (int i = 0; i <= LAST_TYPE; i++) {
2073 if (isolate->heap_histograms()[i].number() > 0) {
2074 PrintF(" %-34s%10d (%10d bytes)\n",
2075 isolate->heap_histograms()[i].name(),
2076 isolate->heap_histograms()[i].number(),
2077 isolate->heap_histograms()[i].bytes());
2078 }
2079 }
2080 PrintF("\n");
2081
2082 // Summarize string types.
2083 int string_number = 0;
2084 int string_bytes = 0;
2085#define INCREMENT(type, size, name, camel_name) \
2086 string_number += isolate->heap_histograms()[type].number(); \
2087 string_bytes += isolate->heap_histograms()[type].bytes();
2088 STRING_TYPE_LIST(INCREMENT)
2089#undef INCREMENT
2090 if (string_number > 0) {
2091 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
2092 string_bytes);
2093 }
2094
2095 if (FLAG_collect_heap_spill_statistics && print_spill) {
2096 isolate->js_spill_information()->Print();
2097 }
2098}
2099#endif // DEBUG
2100
2101
2102// Support for statistics gathering for --heap-stats and --log-gc.
2103void NewSpace::ClearHistograms() {
2104 for (int i = 0; i <= LAST_TYPE; i++) {
2105 allocated_histogram_[i].clear();
2106 promoted_histogram_[i].clear();
2107 }
2108}
2109
2110
2111// Because the copying collector does not touch garbage objects, we iterate
2112// the new space before a collection to get a histogram of allocated objects.
2113// This only happens when --log-gc flag is set.
2114void NewSpace::CollectStatistics() {
2115 ClearHistograms();
2116 SemiSpaceIterator it(this);
2117 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
2118 RecordAllocation(obj);
2119}
2120
2121
2122static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
2123 const char* description) {
2124 LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
2125 // Lump all the string types together.
2126 int string_number = 0;
2127 int string_bytes = 0;
2128#define INCREMENT(type, size, name, camel_name) \
2129 string_number += info[type].number(); \
2130 string_bytes += info[type].bytes();
2131 STRING_TYPE_LIST(INCREMENT)
2132#undef INCREMENT
2133 if (string_number > 0) {
2134 LOG(isolate,
2135 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
2136 }
2137
2138 // Then do the other types.
2139 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
2140 if (info[i].number() > 0) {
2141 LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(),
2142 info[i].bytes()));
2143 }
2144 }
2145 LOG(isolate, HeapSampleEndEvent("NewSpace", description));
2146}
2147
2148
2149void NewSpace::ReportStatistics() {
2150#ifdef DEBUG
2151 if (FLAG_heap_stats) {
2152 float pct = static_cast<float>(Available()) / TotalCapacity();
Ben Murdochc5610432016-08-08 18:44:38 +01002153 PrintF(" capacity: %" V8PRIdPTR ", available: %" V8PRIdPTR ", %%%d\n",
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002154 TotalCapacity(), Available(), static_cast<int>(pct * 100));
2155 PrintF("\n Object Histogram:\n");
2156 for (int i = 0; i <= LAST_TYPE; i++) {
2157 if (allocated_histogram_[i].number() > 0) {
2158 PrintF(" %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(),
2159 allocated_histogram_[i].number(),
2160 allocated_histogram_[i].bytes());
2161 }
2162 }
2163 PrintF("\n");
2164 }
2165#endif // DEBUG
2166
2167 if (FLAG_log_gc) {
2168 Isolate* isolate = heap()->isolate();
2169 DoReportStatistics(isolate, allocated_histogram_, "allocated");
2170 DoReportStatistics(isolate, promoted_histogram_, "promoted");
2171 }
2172}
2173
2174
2175void NewSpace::RecordAllocation(HeapObject* obj) {
2176 InstanceType type = obj->map()->instance_type();
2177 DCHECK(0 <= type && type <= LAST_TYPE);
2178 allocated_histogram_[type].increment_number(1);
2179 allocated_histogram_[type].increment_bytes(obj->Size());
2180}
2181
2182
2183void NewSpace::RecordPromotion(HeapObject* obj) {
2184 InstanceType type = obj->map()->instance_type();
2185 DCHECK(0 <= type && type <= LAST_TYPE);
2186 promoted_histogram_[type].increment_number(1);
2187 promoted_histogram_[type].increment_bytes(obj->Size());
2188}
2189
2190
2191size_t NewSpace::CommittedPhysicalMemory() {
2192 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
2193 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2194 size_t size = to_space_.CommittedPhysicalMemory();
2195 if (from_space_.is_committed()) {
2196 size += from_space_.CommittedPhysicalMemory();
2197 }
2198 return size;
2199}
2200
2201
2202// -----------------------------------------------------------------------------
2203// Free lists for old object spaces implementation
2204
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002205
2206void FreeListCategory::Reset() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002207 set_top(nullptr);
Ben Murdochda12d292016-06-02 14:46:10 +01002208 set_prev(nullptr);
2209 set_next(nullptr);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002210 available_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002211}
2212
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002213FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
Ben Murdochda12d292016-06-02 14:46:10 +01002214 DCHECK(page()->CanAllocate());
2215
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002216 FreeSpace* node = top();
2217 if (node == nullptr) return nullptr;
Ben Murdochda12d292016-06-02 14:46:10 +01002218 set_top(node->next());
2219 *node_size = node->Size();
2220 available_ -= *node_size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002221 return node;
2222}
2223
Ben Murdochda12d292016-06-02 14:46:10 +01002224FreeSpace* FreeListCategory::TryPickNodeFromList(int minimum_size,
2225 int* node_size) {
2226 DCHECK(page()->CanAllocate());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002227
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002228 FreeSpace* node = PickNodeFromList(node_size);
Ben Murdochda12d292016-06-02 14:46:10 +01002229 if ((node != nullptr) && (*node_size < minimum_size)) {
2230 Free(node, *node_size, kLinkCategory);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002231 *node_size = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002232 return nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002233 }
2234 return node;
2235}
2236
Ben Murdochda12d292016-06-02 14:46:10 +01002237FreeSpace* FreeListCategory::SearchForNodeInList(int minimum_size,
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002238 int* node_size) {
Ben Murdochda12d292016-06-02 14:46:10 +01002239 DCHECK(page()->CanAllocate());
2240
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002241 FreeSpace* prev_non_evac_node = nullptr;
2242 for (FreeSpace* cur_node = top(); cur_node != nullptr;
2243 cur_node = cur_node->next()) {
2244 int size = cur_node->size();
Ben Murdochda12d292016-06-02 14:46:10 +01002245 if (size >= minimum_size) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002246 available_ -= size;
2247 if (cur_node == top()) {
2248 set_top(cur_node->next());
2249 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002250 if (prev_non_evac_node != nullptr) {
2251 prev_non_evac_node->set_next(cur_node->next());
2252 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002253 *node_size = size;
2254 return cur_node;
2255 }
2256
2257 prev_non_evac_node = cur_node;
2258 }
2259 return nullptr;
2260}
2261
Ben Murdochda12d292016-06-02 14:46:10 +01002262bool FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes,
2263 FreeMode mode) {
2264 if (!page()->CanAllocate()) return false;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002265
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002266 free_space->set_next(top());
2267 set_top(free_space);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002268 available_ += size_in_bytes;
Ben Murdochda12d292016-06-02 14:46:10 +01002269 if ((mode == kLinkCategory) && (prev() == nullptr) && (next() == nullptr)) {
2270 owner()->AddCategory(this);
2271 }
2272 return true;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002273}
2274
2275
2276void FreeListCategory::RepairFreeList(Heap* heap) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002277 FreeSpace* n = top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002278 while (n != NULL) {
2279 Map** map_location = reinterpret_cast<Map**>(n->address());
2280 if (*map_location == NULL) {
2281 *map_location = heap->free_space_map();
2282 } else {
2283 DCHECK(*map_location == heap->free_space_map());
2284 }
2285 n = n->next();
2286 }
2287}
2288
Ben Murdochda12d292016-06-02 14:46:10 +01002289void FreeListCategory::Relink() {
2290 DCHECK(!is_linked());
2291 owner()->AddCategory(this);
2292}
2293
2294void FreeListCategory::Invalidate() {
2295 page()->add_available_in_free_list(-available());
2296 Reset();
2297 type_ = kInvalidCategory;
2298}
2299
Ben Murdoch097c5b22016-05-18 11:27:45 +01002300FreeList::FreeList(PagedSpace* owner) : owner_(owner), wasted_bytes_(0) {
2301 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
Ben Murdochda12d292016-06-02 14:46:10 +01002302 categories_[i] = nullptr;
Ben Murdoch097c5b22016-05-18 11:27:45 +01002303 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002304 Reset();
2305}
2306
2307
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002308void FreeList::Reset() {
Ben Murdochda12d292016-06-02 14:46:10 +01002309 ForAllFreeListCategories(
2310 [](FreeListCategory* category) { category->Reset(); });
Ben Murdoch097c5b22016-05-18 11:27:45 +01002311 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
Ben Murdochda12d292016-06-02 14:46:10 +01002312 categories_[i] = nullptr;
Ben Murdoch097c5b22016-05-18 11:27:45 +01002313 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002314 ResetStats();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002315}
2316
Ben Murdochda12d292016-06-02 14:46:10 +01002317int FreeList::Free(Address start, int size_in_bytes, FreeMode mode) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002318 if (size_in_bytes == 0) return 0;
2319
Ben Murdochda12d292016-06-02 14:46:10 +01002320 owner()->heap()->CreateFillerObjectAt(start, size_in_bytes,
2321 ClearRecordedSlots::kNo);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002322
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002323 Page* page = Page::FromAddress(start);
2324
Ben Murdochda12d292016-06-02 14:46:10 +01002325 // Blocks have to be a minimum size to hold free list items.
2326 if (size_in_bytes < kMinBlockSize) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002327 page->add_wasted_memory(size_in_bytes);
Ben Murdochda12d292016-06-02 14:46:10 +01002328 wasted_bytes_.Increment(size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002329 return size_in_bytes;
2330 }
2331
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002332 FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002333 // Insert other blocks at the head of a free list of the appropriate
2334 // magnitude.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002335 FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
Ben Murdochda12d292016-06-02 14:46:10 +01002336 if (page->free_list_category(type)->Free(free_space, size_in_bytes, mode)) {
2337 page->add_available_in_free_list(size_in_bytes);
2338 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002339 return 0;
2340}
2341
Ben Murdochda12d292016-06-02 14:46:10 +01002342FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type, int* node_size) {
2343 FreeListCategoryIterator it(this, type);
2344 FreeSpace* node = nullptr;
2345 while (it.HasNext()) {
2346 FreeListCategory* current = it.Next();
2347 node = current->PickNodeFromList(node_size);
2348 if (node != nullptr) {
2349 Page::FromAddress(node->address())
2350 ->add_available_in_free_list(-(*node_size));
2351 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2352 return node;
2353 }
2354 RemoveCategory(current);
2355 }
2356 return node;
2357}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002358
Ben Murdochda12d292016-06-02 14:46:10 +01002359FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type, int* node_size,
2360 int minimum_size) {
2361 if (categories_[type] == nullptr) return nullptr;
2362 FreeSpace* node =
2363 categories_[type]->TryPickNodeFromList(minimum_size, node_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002364 if (node != nullptr) {
2365 Page::FromAddress(node->address())
Ben Murdoch097c5b22016-05-18 11:27:45 +01002366 ->add_available_in_free_list(-(*node_size));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002367 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2368 }
2369 return node;
2370}
2371
Ben Murdochda12d292016-06-02 14:46:10 +01002372FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
2373 int* node_size, int minimum_size) {
2374 FreeListCategoryIterator it(this, type);
2375 FreeSpace* node = nullptr;
2376 while (it.HasNext()) {
2377 FreeListCategory* current = it.Next();
2378 node = current->SearchForNodeInList(minimum_size, node_size);
2379 if (node != nullptr) {
2380 Page::FromAddress(node->address())
2381 ->add_available_in_free_list(-(*node_size));
2382 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2383 return node;
2384 }
2385 }
2386 return node;
2387}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002388
2389FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
2390 FreeSpace* node = nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002391
Ben Murdoch097c5b22016-05-18 11:27:45 +01002392 // First try the allocation fast path: try to allocate the minimum element
2393 // size of a free list category. This operation is constant time.
2394 FreeListCategoryType type =
2395 SelectFastAllocationFreeListCategoryType(size_in_bytes);
2396 for (int i = type; i < kHuge; i++) {
2397 node = FindNodeIn(static_cast<FreeListCategoryType>(i), node_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002398 if (node != nullptr) return node;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002399 }
2400
Ben Murdoch097c5b22016-05-18 11:27:45 +01002401 // Next search the huge list for free list nodes. This takes linear time in
2402 // the number of huge elements.
Ben Murdochda12d292016-06-02 14:46:10 +01002403 node = SearchForNodeInList(kHuge, node_size, size_in_bytes);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002404 if (node != nullptr) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002405 DCHECK(IsVeryLong() || Available() == SumFreeLists());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002406 return node;
2407 }
2408
Ben Murdoch097c5b22016-05-18 11:27:45 +01002409 // We need a huge block of memory, but we didn't find anything in the huge
2410 // list.
2411 if (type == kHuge) return nullptr;
2412
2413 // Now search the best fitting free list for a node that has at least the
Ben Murdochda12d292016-06-02 14:46:10 +01002414 // requested size.
Ben Murdoch097c5b22016-05-18 11:27:45 +01002415 type = SelectFreeListCategoryType(size_in_bytes);
Ben Murdochda12d292016-06-02 14:46:10 +01002416 node = TryFindNodeIn(type, node_size, size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002417
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002418 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2419 return node;
2420}
2421
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002422// Allocation on the old space free list. If it succeeds then a new linear
2423// allocation space has been set up with the top and limit of the space. If
2424// the allocation fails then NULL is returned, and the caller can perform a GC
2425// or allocate a new page before retrying.
2426HeapObject* FreeList::Allocate(int size_in_bytes) {
2427 DCHECK(0 < size_in_bytes);
2428 DCHECK(size_in_bytes <= kMaxBlockSize);
2429 DCHECK(IsAligned(size_in_bytes, kPointerSize));
2430 // Don't free list allocate if there is linear space available.
2431 DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
2432
2433 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
2434 // Mark the old linear allocation area with a free space map so it can be
2435 // skipped when scanning the heap. This also puts it back in the free list
2436 // if it is big enough.
2437 owner_->Free(owner_->top(), old_linear_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002438 owner_->SetTopAndLimit(nullptr, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002439
2440 owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
2441 old_linear_size);
2442
2443 int new_node_size = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002444 FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
2445 if (new_node == nullptr) return nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002446
2447 int bytes_left = new_node_size - size_in_bytes;
2448 DCHECK(bytes_left >= 0);
2449
2450#ifdef DEBUG
2451 for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
2452 reinterpret_cast<Object**>(new_node->address())[i] =
2453 Smi::FromInt(kCodeZapValue);
2454 }
2455#endif
2456
2457 // The old-space-step might have finished sweeping and restarted marking.
2458 // Verify that it did not turn the page of the new node into an evacuation
2459 // candidate.
2460 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
2461
2462 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
2463
2464 // Memory in the linear allocation area is counted as allocated. We may free
2465 // a little of this again immediately - see below.
2466 owner_->Allocate(new_node_size);
2467
2468 if (owner_->heap()->inline_allocation_disabled()) {
2469 // Keep the linear allocation area empty if requested to do so, just
2470 // return area back to the free list instead.
2471 owner_->Free(new_node->address() + size_in_bytes, bytes_left);
Ben Murdochc5610432016-08-08 18:44:38 +01002472 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2473 new_node->address() + size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002474 } else if (bytes_left > kThreshold &&
2475 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002476 FLAG_incremental_marking) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002477 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
2478 // We don't want to give too large linear areas to the allocator while
2479 // incremental marking is going on, because we won't check again whether
2480 // we want to do another increment until the linear area is used up.
2481 owner_->Free(new_node->address() + size_in_bytes + linear_size,
2482 new_node_size - size_in_bytes - linear_size);
2483 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2484 new_node->address() + size_in_bytes + linear_size);
Ben Murdochc5610432016-08-08 18:44:38 +01002485 } else {
2486 DCHECK(bytes_left >= 0);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002487 // Normally we give the rest of the node to the allocator as its new
2488 // linear allocation area.
2489 owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
2490 new_node->address() + new_node_size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002491 }
2492
Ben Murdochc5610432016-08-08 18:44:38 +01002493 owner_->AllocationStep(new_node->address(), size_in_bytes);
2494
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002495 return new_node;
2496}
2497
Ben Murdochda12d292016-06-02 14:46:10 +01002498intptr_t FreeList::EvictFreeListItems(Page* page) {
2499 intptr_t sum = 0;
2500 page->ForAllFreeListCategories(
2501 [this, &sum, page](FreeListCategory* category) {
2502 DCHECK_EQ(this, category->owner());
2503 sum += category->available();
2504 RemoveCategory(category);
2505 category->Invalidate();
2506 });
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002507 return sum;
2508}
2509
Ben Murdochda12d292016-06-02 14:46:10 +01002510bool FreeList::ContainsPageFreeListItems(Page* page) {
2511 bool contained = false;
2512 page->ForAllFreeListCategories(
2513 [this, &contained](FreeListCategory* category) {
2514 if (category->owner() == this && category->is_linked()) {
2515 contained = true;
2516 }
2517 });
2518 return contained;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002519}
2520
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002521void FreeList::RepairLists(Heap* heap) {
Ben Murdochda12d292016-06-02 14:46:10 +01002522 ForAllFreeListCategories(
2523 [heap](FreeListCategory* category) { category->RepairFreeList(heap); });
2524}
2525
2526bool FreeList::AddCategory(FreeListCategory* category) {
2527 FreeListCategoryType type = category->type_;
2528 FreeListCategory* top = categories_[type];
2529
2530 if (category->is_empty()) return false;
2531 if (top == category) return false;
2532
2533 // Common double-linked list insertion.
2534 if (top != nullptr) {
2535 top->set_prev(category);
Ben Murdoch097c5b22016-05-18 11:27:45 +01002536 }
Ben Murdochda12d292016-06-02 14:46:10 +01002537 category->set_next(top);
2538 categories_[type] = category;
2539 return true;
2540}
2541
2542void FreeList::RemoveCategory(FreeListCategory* category) {
2543 FreeListCategoryType type = category->type_;
2544 FreeListCategory* top = categories_[type];
2545
2546 // Common double-linked list removal.
2547 if (top == category) {
2548 categories_[type] = category->next();
2549 }
2550 if (category->prev() != nullptr) {
2551 category->prev()->set_next(category->next());
2552 }
2553 if (category->next() != nullptr) {
2554 category->next()->set_prev(category->prev());
2555 }
2556 category->set_next(nullptr);
2557 category->set_prev(nullptr);
2558}
2559
2560void FreeList::PrintCategories(FreeListCategoryType type) {
2561 FreeListCategoryIterator it(this, type);
Ben Murdoch61f157c2016-09-16 13:49:30 +01002562 PrintF("FreeList[%p, top=%p, %d] ", static_cast<void*>(this),
2563 static_cast<void*>(categories_[type]), type);
Ben Murdochda12d292016-06-02 14:46:10 +01002564 while (it.HasNext()) {
2565 FreeListCategory* current = it.Next();
Ben Murdoch61f157c2016-09-16 13:49:30 +01002566 PrintF("%p -> ", static_cast<void*>(current));
Ben Murdochda12d292016-06-02 14:46:10 +01002567 }
2568 PrintF("null\n");
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002569}
2570
2571
2572#ifdef DEBUG
2573intptr_t FreeListCategory::SumFreeList() {
2574 intptr_t sum = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002575 FreeSpace* cur = top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002576 while (cur != NULL) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002577 DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex));
2578 sum += cur->nobarrier_size();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002579 cur = cur->next();
2580 }
2581 return sum;
2582}
2583
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002584int FreeListCategory::FreeListLength() {
2585 int length = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002586 FreeSpace* cur = top();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002587 while (cur != NULL) {
2588 length++;
2589 cur = cur->next();
2590 if (length == kVeryLongFreeList) return length;
2591 }
2592 return length;
2593}
2594
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002595bool FreeList::IsVeryLong() {
Ben Murdochda12d292016-06-02 14:46:10 +01002596 int len = 0;
Ben Murdoch097c5b22016-05-18 11:27:45 +01002597 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
Ben Murdochda12d292016-06-02 14:46:10 +01002598 FreeListCategoryIterator it(this, static_cast<FreeListCategoryType>(i));
2599 while (it.HasNext()) {
2600 len += it.Next()->FreeListLength();
2601 if (len >= FreeListCategory::kVeryLongFreeList) return true;
Ben Murdoch097c5b22016-05-18 11:27:45 +01002602 }
2603 }
2604 return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002605}
2606
2607
2608// This can take a very long time because it is linear in the number of entries
2609// on the free list, so it should not be called if FreeListLength returns
2610// kVeryLongFreeList.
2611intptr_t FreeList::SumFreeLists() {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002612 intptr_t sum = 0;
Ben Murdochda12d292016-06-02 14:46:10 +01002613 ForAllFreeListCategories(
2614 [&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002615 return sum;
2616}
2617#endif
2618
2619
2620// -----------------------------------------------------------------------------
2621// OldSpace implementation
2622
2623void PagedSpace::PrepareForMarkCompact() {
2624 // We don't have a linear allocation area while sweeping. It will be restored
2625 // on the first allocation after the sweep.
2626 EmptyAllocationInfo();
2627
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002628 // Clear the free list before a full GC---it will be rebuilt afterward.
2629 free_list_.Reset();
2630}
2631
2632
2633intptr_t PagedSpace::SizeOfObjects() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002634 const intptr_t size = Size() - (limit() - top());
2635 CHECK_GE(limit(), top());
2636 CHECK_GE(size, 0);
2637 USE(size);
2638 return size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002639}
2640
2641
2642// After we have booted, we have created a map which represents free space
2643// on the heap. If there was already a free list then the elements on it
2644// were created with the wrong FreeSpaceMap (normally NULL), so we need to
2645// fix them.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002646void PagedSpace::RepairFreeListsAfterDeserialization() {
2647 free_list_.RepairLists(heap());
2648 // Each page may have a small free space that is not tracked by a free list.
2649 // Update the maps for those free space objects.
Ben Murdoch61f157c2016-09-16 13:49:30 +01002650 for (Page* page : *this) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002651 int size = static_cast<int>(page->wasted_memory());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002652 if (size == 0) continue;
2653 Address address = page->OffsetToAddress(Page::kPageSize - size);
Ben Murdochda12d292016-06-02 14:46:10 +01002654 heap()->CreateFillerObjectAt(address, size, ClearRecordedSlots::kNo);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002655 }
2656}
2657
2658
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002659void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() {
2660 if (allocation_info_.top() >= allocation_info_.limit()) return;
2661
Ben Murdochc5610432016-08-08 18:44:38 +01002662 if (!Page::FromAllocationAreaAddress(allocation_info_.top())->CanAllocate()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002663 // Create filler object to keep page iterable if it was iterable.
2664 int remaining =
2665 static_cast<int>(allocation_info_.limit() - allocation_info_.top());
Ben Murdochda12d292016-06-02 14:46:10 +01002666 heap()->CreateFillerObjectAt(allocation_info_.top(), remaining,
2667 ClearRecordedSlots::kNo);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002668 allocation_info_.Reset(nullptr, nullptr);
2669 }
2670}
2671
2672
2673HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002674 MarkCompactCollector* collector = heap()->mark_compact_collector();
2675 if (collector->sweeping_in_progress()) {
2676 // Wait for the sweeper threads here and complete the sweeping phase.
2677 collector->EnsureSweepingCompleted();
2678
2679 // After waiting for the sweeper threads, there may be new free-list
2680 // entries.
2681 return free_list_.Allocate(size_in_bytes);
2682 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002683 return nullptr;
2684}
2685
2686
2687HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
2688 MarkCompactCollector* collector = heap()->mark_compact_collector();
2689 if (collector->sweeping_in_progress()) {
2690 collector->SweepAndRefill(this);
2691 return free_list_.Allocate(size_in_bytes);
2692 }
2693 return nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002694}
2695
2696
2697HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
Ben Murdoch097c5b22016-05-18 11:27:45 +01002698 const int kMaxPagesToSweep = 1;
2699
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002700 // Allocation in this space has failed.
2701
2702 MarkCompactCollector* collector = heap()->mark_compact_collector();
2703 // Sweeping is still in progress.
2704 if (collector->sweeping_in_progress()) {
2705 // First try to refill the free-list, concurrent sweeper threads
2706 // may have freed some objects in the meantime.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002707 RefillFreeList();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002708
2709 // Retry the free list allocation.
2710 HeapObject* object = free_list_.Allocate(size_in_bytes);
2711 if (object != NULL) return object;
2712
2713 // If sweeping is still in progress try to sweep pages on the main thread.
Ben Murdochc5610432016-08-08 18:44:38 +01002714 int max_freed = collector->sweeper().ParallelSweepSpace(
2715 identity(), size_in_bytes, kMaxPagesToSweep);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002716 RefillFreeList();
Ben Murdoch097c5b22016-05-18 11:27:45 +01002717 if (max_freed >= size_in_bytes) {
2718 object = free_list_.Allocate(size_in_bytes);
2719 if (object != nullptr) return object;
2720 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002721 }
2722
2723 // Free list allocation failed and there is no next page. Fail if we have
2724 // hit the old generation size limit that should cause a garbage
2725 // collection.
2726 if (!heap()->always_allocate() &&
2727 heap()->OldGenerationAllocationLimitReached()) {
2728 // If sweeper threads are active, wait for them at that point and steal
2729 // elements form their free-lists.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002730 HeapObject* object = SweepAndRetryAllocation(size_in_bytes);
2731 return object;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002732 }
2733
2734 // Try to expand the space and allocate in the new next page.
2735 if (Expand()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002736 DCHECK((CountTotalPages() > 1) ||
2737 (size_in_bytes <= free_list_.Available()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002738 return free_list_.Allocate(size_in_bytes);
2739 }
2740
2741 // If sweeper threads are active, wait for them at that point and steal
2742 // elements form their free-lists. Allocation may still fail their which
2743 // would indicate that there is not enough memory for the given allocation.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002744 return SweepAndRetryAllocation(size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002745}
2746
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002747#ifdef DEBUG
2748void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
2749 CommentStatistic* comments_statistics =
2750 isolate->paged_space_comments_statistics();
2751 ReportCodeKindStatistics(isolate->code_kind_statistics());
Ben Murdoch61f157c2016-09-16 13:49:30 +01002752 PrintF("Code size including metadata : %10d bytes\n",
2753 isolate->code_and_metadata_size());
2754 PrintF("Bytecode size including metadata: %10d bytes\n",
2755 isolate->bytecode_and_metadata_size());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002756 PrintF(
2757 "Code comment statistics (\" [ comment-txt : size/ "
2758 "count (average)\"):\n");
2759 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2760 const CommentStatistic& cs = comments_statistics[i];
2761 if (cs.size > 0) {
2762 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count,
2763 cs.size / cs.count);
2764 }
2765 }
2766 PrintF("\n");
2767}
2768
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002769void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
2770 CommentStatistic* comments_statistics =
2771 isolate->paged_space_comments_statistics();
2772 ClearCodeKindStatistics(isolate->code_kind_statistics());
2773 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2774 comments_statistics[i].Clear();
2775 }
2776 comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
2777 comments_statistics[CommentStatistic::kMaxComments].size = 0;
2778 comments_statistics[CommentStatistic::kMaxComments].count = 0;
2779}
2780
2781
2782// Adds comment to 'comment_statistics' table. Performance OK as long as
2783// 'kMaxComments' is small
2784static void EnterComment(Isolate* isolate, const char* comment, int delta) {
2785 CommentStatistic* comments_statistics =
2786 isolate->paged_space_comments_statistics();
2787 // Do not count empty comments
2788 if (delta <= 0) return;
2789 CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
2790 // Search for a free or matching entry in 'comments_statistics': 'cs'
2791 // points to result.
2792 for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
2793 if (comments_statistics[i].comment == NULL) {
2794 cs = &comments_statistics[i];
2795 cs->comment = comment;
2796 break;
2797 } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
2798 cs = &comments_statistics[i];
2799 break;
2800 }
2801 }
2802 // Update entry for 'comment'
2803 cs->size += delta;
2804 cs->count += 1;
2805}
2806
2807
2808// Call for each nested comment start (start marked with '[ xxx', end marked
2809// with ']'. RelocIterator 'it' must point to a comment reloc info.
2810static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
2811 DCHECK(!it->done());
2812 DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT);
2813 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
2814 if (tmp[0] != '[') {
2815 // Not a nested comment; skip
2816 return;
2817 }
2818
2819 // Search for end of nested comment or a new nested comment
2820 const char* const comment_txt =
2821 reinterpret_cast<const char*>(it->rinfo()->data());
2822 const byte* prev_pc = it->rinfo()->pc();
2823 int flat_delta = 0;
2824 it->next();
2825 while (true) {
2826 // All nested comments must be terminated properly, and therefore exit
2827 // from loop.
2828 DCHECK(!it->done());
2829 if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
2830 const char* const txt =
2831 reinterpret_cast<const char*>(it->rinfo()->data());
2832 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
2833 if (txt[0] == ']') break; // End of nested comment
2834 // A new comment
2835 CollectCommentStatistics(isolate, it);
2836 // Skip code that was covered with previous comment
2837 prev_pc = it->rinfo()->pc();
2838 }
2839 it->next();
2840 }
2841 EnterComment(isolate, comment_txt, flat_delta);
2842}
2843
Ben Murdoch61f157c2016-09-16 13:49:30 +01002844// Collects code comment statistics
2845static void CollectCodeCommentStatistics(HeapObject* obj, Isolate* isolate) {
2846 if (!obj->IsCode()) {
2847 return;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002848 }
Ben Murdoch61f157c2016-09-16 13:49:30 +01002849 Code* code = Code::cast(obj);
2850 RelocIterator it(code);
2851 int delta = 0;
2852 const byte* prev_pc = code->instruction_start();
2853 while (!it.done()) {
2854 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2855 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2856 CollectCommentStatistics(isolate, &it);
2857 prev_pc = it.rinfo()->pc();
2858 }
2859 it.next();
2860 }
2861
2862 DCHECK(code->instruction_start() <= prev_pc &&
2863 prev_pc <= code->instruction_end());
2864 delta += static_cast<int>(code->instruction_end() - prev_pc);
2865 EnterComment(isolate, "NoComment", delta);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002866}
2867
2868
2869void PagedSpace::ReportStatistics() {
2870 int pct = static_cast<int>(Available() * 100 / Capacity());
Ben Murdochc5610432016-08-08 18:44:38 +01002871 PrintF(" capacity: %" V8PRIdPTR ", waste: %" V8PRIdPTR
2872 ", available: %" V8PRIdPTR ", %%%d\n",
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002873 Capacity(), Waste(), Available(), pct);
2874
2875 if (heap()->mark_compact_collector()->sweeping_in_progress()) {
2876 heap()->mark_compact_collector()->EnsureSweepingCompleted();
2877 }
2878 ClearHistograms(heap()->isolate());
2879 HeapObjectIterator obj_it(this);
2880 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2881 CollectHistogramInfo(obj);
2882 ReportHistogram(heap()->isolate(), true);
2883}
2884#endif
2885
Ben Murdoch61f157c2016-09-16 13:49:30 +01002886static void RecordCodeSizeIncludingMetadata(AbstractCode* abstract_code,
2887 Isolate* isolate) {
2888 int size = abstract_code->SizeIncludingMetadata();
2889 if (abstract_code->IsCode()) {
2890 size += isolate->code_and_metadata_size();
2891 isolate->set_code_and_metadata_size(size);
2892 } else {
2893 size += isolate->bytecode_and_metadata_size();
2894 isolate->set_bytecode_and_metadata_size(size);
2895 }
2896}
2897
2898// Collects code size statistics:
2899// - code and metadata size
2900// - by code kind (only in debug mode)
2901// - by code comment (only in debug mode)
2902void PagedSpace::CollectCodeStatistics() {
2903 Isolate* isolate = heap()->isolate();
2904 HeapObjectIterator obj_it(this);
2905 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2906 if (obj->IsAbstractCode()) {
2907 AbstractCode* code = AbstractCode::cast(obj);
2908 RecordCodeSizeIncludingMetadata(code, isolate);
2909#ifdef DEBUG
2910 isolate->code_kind_statistics()[code->kind()] += code->Size();
2911 CollectCodeCommentStatistics(obj, isolate);
2912#endif
2913 }
2914 }
2915}
2916
2917void PagedSpace::ResetCodeAndMetadataStatistics(Isolate* isolate) {
2918 isolate->set_code_and_metadata_size(0);
2919 isolate->set_bytecode_and_metadata_size(0);
2920#ifdef DEBUG
2921 ResetCodeStatistics(isolate);
2922#endif
2923}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002924
2925// -----------------------------------------------------------------------------
2926// MapSpace implementation
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002927
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002928#ifdef VERIFY_HEAP
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002929void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002930#endif
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002931
2932
2933// -----------------------------------------------------------------------------
2934// LargeObjectIterator
2935
2936LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2937 current_ = space->first_page_;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002938}
2939
2940
2941HeapObject* LargeObjectIterator::Next() {
2942 if (current_ == NULL) return NULL;
2943
2944 HeapObject* object = current_->GetObject();
2945 current_ = current_->next_page();
2946 return object;
2947}
2948
2949
2950// -----------------------------------------------------------------------------
2951// LargeObjectSpace
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002952
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002953LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002954 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002955 first_page_(NULL),
2956 size_(0),
2957 page_count_(0),
2958 objects_size_(0),
Ben Murdoch61f157c2016-09-16 13:49:30 +01002959 chunk_map_(base::HashMap::PointersMatch, 1024) {}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002960
2961LargeObjectSpace::~LargeObjectSpace() {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002962
2963
2964bool LargeObjectSpace::SetUp() {
2965 first_page_ = NULL;
2966 size_ = 0;
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002967 page_count_ = 0;
2968 objects_size_ = 0;
2969 chunk_map_.Clear();
2970 return true;
2971}
2972
2973
2974void LargeObjectSpace::TearDown() {
2975 while (first_page_ != NULL) {
2976 LargePage* page = first_page_;
2977 first_page_ = first_page_->next_page();
2978 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
Ben Murdochc5610432016-08-08 18:44:38 +01002979 heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002980 }
2981 SetUp();
2982}
2983
2984
2985AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
2986 Executability executable) {
2987 // Check if we want to force a GC before growing the old space further.
2988 // If so, fail the allocation.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002989 if (!heap()->CanExpandOldGeneration(object_size)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002990 return AllocationResult::Retry(identity());
2991 }
2992
Ben Murdochc5610432016-08-08 18:44:38 +01002993 LargePage* page = heap()->memory_allocator()->AllocateLargePage(
Ben Murdochb8a8cc12014-11-26 15:28:44 +00002994 object_size, this, executable);
2995 if (page == NULL) return AllocationResult::Retry(identity());
2996 DCHECK(page->area_size() >= object_size);
2997
2998 size_ += static_cast<int>(page->size());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00002999 AccountCommitted(static_cast<intptr_t>(page->size()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003000 objects_size_ += object_size;
3001 page_count_++;
3002 page->set_next_page(first_page_);
3003 first_page_ = page;
3004
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003005 // Register all MemoryChunk::kAlignment-aligned chunks covered by
3006 // this large page in the chunk map.
3007 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
3008 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
3009 for (uintptr_t key = base; key <= limit; key++) {
Ben Murdoch61f157c2016-09-16 13:49:30 +01003010 base::HashMap::Entry* entry = chunk_map_.LookupOrInsert(
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003011 reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003012 DCHECK(entry != NULL);
3013 entry->value = page;
3014 }
3015
3016 HeapObject* object = page->GetObject();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003017 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size);
3018
3019 if (Heap::ShouldZapGarbage()) {
3020 // Make the object consistent so the heap can be verified in OldSpaceStep.
3021 // We only need to do this in debug builds or if verify_heap is on.
3022 reinterpret_cast<Object**>(object->address())[0] =
3023 heap()->fixed_array_map();
3024 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
3025 }
3026
3027 heap()->incremental_marking()->OldSpaceStep(object_size);
Ben Murdoch097c5b22016-05-18 11:27:45 +01003028 AllocationStep(object->address(), object_size);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003029 return object;
3030}
3031
3032
3033size_t LargeObjectSpace::CommittedPhysicalMemory() {
Ben Murdoch61f157c2016-09-16 13:49:30 +01003034 // On a platform that provides lazy committing of memory, we over-account
3035 // the actually committed memory. There is no easy way right now to support
3036 // precise accounting of committed memory in large object space.
3037 return CommittedMemory();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003038}
3039
3040
3041// GC support
3042Object* LargeObjectSpace::FindObject(Address a) {
3043 LargePage* page = FindPage(a);
3044 if (page != NULL) {
3045 return page->GetObject();
3046 }
3047 return Smi::FromInt(0); // Signaling not found.
3048}
3049
3050
3051LargePage* LargeObjectSpace::FindPage(Address a) {
3052 uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
Ben Murdoch61f157c2016-09-16 13:49:30 +01003053 base::HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
3054 static_cast<uint32_t>(key));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003055 if (e != NULL) {
3056 DCHECK(e->value != NULL);
3057 LargePage* page = reinterpret_cast<LargePage*>(e->value);
Ben Murdochc5610432016-08-08 18:44:38 +01003058 DCHECK(LargePage::IsValid(page));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003059 if (page->Contains(a)) {
3060 return page;
3061 }
3062 }
3063 return NULL;
3064}
3065
3066
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003067void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
3068 LargePage* current = first_page_;
3069 while (current != NULL) {
3070 HeapObject* object = current->GetObject();
3071 MarkBit mark_bit = Marking::MarkBitFrom(object);
3072 DCHECK(Marking::IsBlack(mark_bit));
3073 Marking::BlackToWhite(mark_bit);
3074 Page::FromAddress(object->address())->ResetProgressBar();
3075 Page::FromAddress(object->address())->ResetLiveBytes();
3076 current = current->next_page();
3077 }
3078}
3079
3080
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003081void LargeObjectSpace::FreeUnmarkedObjects() {
3082 LargePage* previous = NULL;
3083 LargePage* current = first_page_;
3084 while (current != NULL) {
3085 HeapObject* object = current->GetObject();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003086 MarkBit mark_bit = Marking::MarkBitFrom(object);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003087 DCHECK(!Marking::IsGrey(mark_bit));
3088 if (Marking::IsBlack(mark_bit)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003089 previous = current;
3090 current = current->next_page();
3091 } else {
3092 LargePage* page = current;
3093 // Cut the chunk out from the chunk list.
3094 current = current->next_page();
3095 if (previous == NULL) {
3096 first_page_ = current;
3097 } else {
3098 previous->set_next_page(current);
3099 }
3100
3101 // Free the chunk.
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003102 size_ -= static_cast<int>(page->size());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003103 AccountUncommitted(static_cast<intptr_t>(page->size()));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003104 objects_size_ -= object->Size();
3105 page_count_--;
3106
3107 // Remove entries belonging to this page.
3108 // Use variable alignment to help pass length check (<= 80 characters)
3109 // of single line in tools/presubmit.py.
3110 const intptr_t alignment = MemoryChunk::kAlignment;
3111 uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment;
3112 uintptr_t limit = base + (page->size() - 1) / alignment;
3113 for (uintptr_t key = base; key <= limit; key++) {
3114 chunk_map_.Remove(reinterpret_cast<void*>(key),
3115 static_cast<uint32_t>(key));
3116 }
3117
Ben Murdochc5610432016-08-08 18:44:38 +01003118 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003119 }
3120 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003121}
3122
3123
3124bool LargeObjectSpace::Contains(HeapObject* object) {
3125 Address address = object->address();
3126 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3127
3128 bool owned = (chunk->owner() == this);
3129
3130 SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
3131
3132 return owned;
3133}
3134
3135
3136#ifdef VERIFY_HEAP
3137// We do not assume that the large object iterator works, because it depends
3138// on the invariants we are checking during verification.
3139void LargeObjectSpace::Verify() {
3140 for (LargePage* chunk = first_page_; chunk != NULL;
3141 chunk = chunk->next_page()) {
3142 // Each chunk contains an object that starts at the large object page's
3143 // object area start.
3144 HeapObject* object = chunk->GetObject();
3145 Page* page = Page::FromAddress(object->address());
3146 CHECK(object->address() == page->area_start());
3147
3148 // The first word should be a map, and we expect all map pointers to be
3149 // in map space.
3150 Map* map = object->map();
3151 CHECK(map->IsMap());
3152 CHECK(heap()->map_space()->Contains(map));
3153
3154 // We have only code, sequential strings, external strings
3155 // (sequential strings that have been morphed into external
3156 // strings), fixed arrays, byte arrays, and constant pool arrays in the
3157 // large object space.
Ben Murdochda12d292016-06-02 14:46:10 +01003158 CHECK(object->IsAbstractCode() || object->IsSeqString() ||
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003159 object->IsExternalString() || object->IsFixedArray() ||
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003160 object->IsFixedDoubleArray() || object->IsByteArray());
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003161
3162 // The object itself should look OK.
3163 object->ObjectVerify();
3164
3165 // Byte arrays and strings don't have interior pointers.
Ben Murdochda12d292016-06-02 14:46:10 +01003166 if (object->IsAbstractCode()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003167 VerifyPointersVisitor code_visitor;
3168 object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
3169 } else if (object->IsFixedArray()) {
3170 FixedArray* array = FixedArray::cast(object);
3171 for (int j = 0; j < array->length(); j++) {
3172 Object* element = array->get(j);
3173 if (element->IsHeapObject()) {
3174 HeapObject* element_object = HeapObject::cast(element);
3175 CHECK(heap()->Contains(element_object));
3176 CHECK(element_object->map()->IsMap());
3177 }
3178 }
3179 }
3180 }
3181}
3182#endif
3183
Ben Murdoch61f157c2016-09-16 13:49:30 +01003184void LargeObjectSpace::CollectCodeStatistics() {
3185 Isolate* isolate = heap()->isolate();
3186 LargeObjectIterator obj_it(this);
3187 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
3188 if (obj->IsAbstractCode()) {
3189 AbstractCode* code = AbstractCode::cast(obj);
3190 RecordCodeSizeIncludingMetadata(code, isolate);
3191#ifdef DEBUG
3192 isolate->code_kind_statistics()[code->kind()] += code->Size();
3193 CollectCodeCommentStatistics(obj, isolate);
3194#endif
3195 }
3196 }
3197}
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003198
3199#ifdef DEBUG
3200void LargeObjectSpace::Print() {
3201 OFStream os(stdout);
3202 LargeObjectIterator it(this);
3203 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3204 obj->Print(os);
3205 }
3206}
3207
3208
3209void LargeObjectSpace::ReportStatistics() {
Ben Murdochc5610432016-08-08 18:44:38 +01003210 PrintF(" size: %" V8PRIdPTR "\n", size_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003211 int num_objects = 0;
3212 ClearHistograms(heap()->isolate());
3213 LargeObjectIterator it(this);
3214 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3215 num_objects++;
3216 CollectHistogramInfo(obj);
3217 }
3218
3219 PrintF(
3220 " number of objects %d, "
Ben Murdochc5610432016-08-08 18:44:38 +01003221 "size of objects %" V8PRIdPTR "\n",
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003222 num_objects, objects_size_);
3223 if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
3224}
3225
3226
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003227void Page::Print() {
3228 // Make a best-effort to print the objects in the page.
Ben Murdoch61f157c2016-09-16 13:49:30 +01003229 PrintF("Page@%p in %s\n", static_cast<void*>(this->address()),
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003230 AllocationSpaceName(this->owner()->identity()));
3231 printf(" --------------------------------------\n");
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003232 HeapObjectIterator objects(this);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003233 unsigned mark_size = 0;
3234 for (HeapObject* object = objects.Next(); object != NULL;
3235 object = objects.Next()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003236 bool is_marked = Marking::IsBlackOrGrey(Marking::MarkBitFrom(object));
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003237 PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
3238 if (is_marked) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003239 mark_size += object->Size();
Ben Murdochb8a8cc12014-11-26 15:28:44 +00003240 }
3241 object->ShortPrint();
3242 PrintF("\n");
3243 }
3244 printf(" --------------------------------------\n");
3245 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3246}
3247
3248#endif // DEBUG
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00003249} // namespace internal
3250} // namespace v8