Upgrade V8 to version 4.9.385.28
https://chromium.googlesource.com/v8/v8/+/4.9.385.28
FPIIM-449
Change-Id: I4b2e74289d4bf3667f2f3dc8aa2e541f63e26eb4
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
index 060052e..90d252a 100644
--- a/src/heap/spaces.cc
+++ b/src/heap/spaces.cc
@@ -2,14 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/v8.h"
+#include "src/heap/spaces.h"
#include "src/base/bits.h"
#include "src/base/platform/platform.h"
-#include "src/full-codegen.h"
-#include "src/heap/mark-compact.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/heap/slots-buffer.h"
#include "src/macro-assembler.h"
#include "src/msan.h"
+#include "src/snapshot/snapshot.h"
namespace v8 {
namespace internal {
@@ -23,43 +24,27 @@
// just an anchor for the double linked page list. Initialize as if we have
// reached the end of the anchor page, then the first iteration will move on
// to the first page.
- Initialize(space, NULL, NULL, kAllPagesInSpace, NULL);
+ Initialize(space, NULL, NULL, kAllPagesInSpace);
}
-HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
- HeapObjectCallback size_func) {
- // You can't actually iterate over the anchor page. It is not a real page,
- // just an anchor for the double linked page list. Initialize the current
- // address and end as NULL, then the first iteration will move on
- // to the first page.
- Initialize(space, NULL, NULL, kAllPagesInSpace, size_func);
-}
-
-
-HeapObjectIterator::HeapObjectIterator(Page* page,
- HeapObjectCallback size_func) {
+HeapObjectIterator::HeapObjectIterator(Page* page) {
Space* owner = page->owner();
- DCHECK(owner == page->heap()->old_pointer_space() ||
- owner == page->heap()->old_data_space() ||
+ DCHECK(owner == page->heap()->old_space() ||
owner == page->heap()->map_space() ||
- owner == page->heap()->cell_space() ||
- owner == page->heap()->property_cell_space() ||
owner == page->heap()->code_space());
Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
- page->area_end(), kOnePageOnly, size_func);
+ page->area_end(), kOnePageOnly);
DCHECK(page->WasSwept() || page->SweepingCompleted());
}
void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
- HeapObjectIterator::PageMode mode,
- HeapObjectCallback size_f) {
+ HeapObjectIterator::PageMode mode) {
space_ = space;
cur_addr_ = cur;
cur_end_ = end;
page_mode_ = mode;
- size_func_ = size_f;
}
@@ -77,6 +62,8 @@
}
cur_page = cur_page->next_page();
if (cur_page == space_->anchor()) return false;
+ cur_page->heap()->mark_compact_collector()->SweepOrWaitUntilSweepingCompleted(
+ cur_page);
cur_addr_ = cur_page->area_start();
cur_end_ = cur_page->area_end();
DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted());
@@ -93,8 +80,7 @@
code_range_(NULL),
free_list_(0),
allocation_list_(0),
- current_allocation_block_index_(0),
- emergency_block_() {}
+ current_allocation_block_index_(0) {}
bool CodeRange::SetUp(size_t requested) {
@@ -116,7 +102,14 @@
}
DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
+#ifdef V8_TARGET_ARCH_MIPS64
+ // To use pseudo-relative jumps such as j/jal instructions which have 28-bit
+ // encoded immediate, the addresses have to be in range of 256Mb aligned
+ // region.
+ code_range_ = new base::VirtualMemory(requested, kMaximalCodeRangeSize);
+#else
code_range_ = new base::VirtualMemory(requested);
+#endif
CHECK(code_range_ != NULL);
if (!code_range_->IsReserved()) {
delete code_range_;
@@ -146,7 +139,6 @@
current_allocation_block_index_ = 0;
LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
- ReserveEmergencyBlock();
return true;
}
@@ -204,7 +196,10 @@
Address CodeRange::AllocateRawMemory(const size_t requested_size,
const size_t commit_size,
size_t* allocated) {
- DCHECK(commit_size <= requested_size);
+ // request_size includes guards while committed_size does not. Make sure
+ // callers know about the invariant.
+ CHECK_LE(commit_size,
+ requested_size - 2 * MemoryAllocator::CodePageGuardSize());
FreeBlock current;
if (!ReserveBlock(requested_size, ¤t)) {
*allocated = 0;
@@ -235,6 +230,7 @@
void CodeRange::FreeRawMemory(Address address, size_t length) {
DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
+ base::LockGuard<base::Mutex> guard(&code_range_mutex_);
free_list_.Add(FreeBlock(address, length));
code_range_->Uncommit(address, length);
}
@@ -243,12 +239,14 @@
void CodeRange::TearDown() {
delete code_range_; // Frees all memory in the virtual memory range.
code_range_ = NULL;
+ base::LockGuard<base::Mutex> guard(&code_range_mutex_);
free_list_.Free();
allocation_list_.Free();
}
bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
+ base::LockGuard<base::Mutex> guard(&code_range_mutex_);
DCHECK(allocation_list_.length() == 0 ||
current_allocation_block_index_ < allocation_list_.length());
if (allocation_list_.length() == 0 ||
@@ -270,24 +268,9 @@
}
-void CodeRange::ReleaseBlock(const FreeBlock* block) { free_list_.Add(*block); }
-
-
-void CodeRange::ReserveEmergencyBlock() {
- const size_t requested_size = MemoryAllocator::CodePageAreaSize();
- if (emergency_block_.size == 0) {
- ReserveBlock(requested_size, &emergency_block_);
- } else {
- DCHECK(emergency_block_.size >= requested_size);
- }
-}
-
-
-void CodeRange::ReleaseEmergencyBlock() {
- if (emergency_block_.size != 0) {
- ReleaseBlock(&emergency_block_);
- emergency_block_.size = 0;
- }
+void CodeRange::ReleaseBlock(const FreeBlock* block) {
+ base::LockGuard<base::Mutex> guard(&code_range_mutex_);
+ free_list_.Add(*block);
}
@@ -319,7 +302,7 @@
void MemoryAllocator::TearDown() {
// Check that spaces were torn down before MemoryAllocator.
- DCHECK(size_ == 0);
+ DCHECK(size_.Value() == 0);
// TODO(gc) this will be true again when we fix FreeMemory.
// DCHECK(size_executable_ == 0);
capacity_ = 0;
@@ -338,26 +321,31 @@
}
+void MemoryAllocator::FreeNewSpaceMemory(Address addr,
+ base::VirtualMemory* reservation,
+ Executability executable) {
+ LOG(isolate_, DeleteEvent("NewSpace", addr));
+
+ DCHECK(reservation->IsReserved());
+ const intptr_t size = static_cast<intptr_t>(reservation->size());
+ DCHECK(size_.Value() >= size);
+ size_.Increment(-size);
+ isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
+ FreeMemory(reservation, NOT_EXECUTABLE);
+}
+
+
void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
Executability executable) {
// TODO(gc) make code_range part of memory allocator?
- DCHECK(reservation->IsReserved());
- size_t size = reservation->size();
- DCHECK(size_ >= size);
- size_ -= size;
-
- isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
-
- if (executable == EXECUTABLE) {
- DCHECK(size_executable_ >= size);
- size_executable_ -= size;
- }
// Code which is part of the code-range does not have its own VirtualMemory.
DCHECK(isolate_->code_range() == NULL ||
!isolate_->code_range()->contains(
static_cast<Address>(reservation->address())));
DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
- !isolate_->code_range()->valid());
+ !isolate_->code_range()->valid() ||
+ reservation->size() <= Page::kPageSize);
+
reservation->Release();
}
@@ -365,15 +353,6 @@
void MemoryAllocator::FreeMemory(Address base, size_t size,
Executability executable) {
// TODO(gc) make code_range part of memory allocator?
- DCHECK(size_ >= size);
- size_ -= size;
-
- isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
-
- if (executable == EXECUTABLE) {
- DCHECK(size_executable_ >= size);
- size_executable_ -= size;
- }
if (isolate_->code_range() != NULL &&
isolate_->code_range()->contains(static_cast<Address>(base))) {
DCHECK(executable == EXECUTABLE);
@@ -393,7 +372,7 @@
base::VirtualMemory reservation(size, alignment);
if (!reservation.IsReserved()) return NULL;
- size_ += reservation.size();
+ size_.Increment(static_cast<intptr_t>(reservation.size()));
Address base =
RoundUp(static_cast<Address>(reservation.address()), alignment);
controller->TakeControl(&reservation);
@@ -449,8 +428,6 @@
MemoryChunk* chunk =
MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start,
area_end, NOT_EXECUTABLE, semi_space);
- chunk->set_next_chunk(NULL);
- chunk->set_prev_chunk(NULL);
chunk->initialize_scan_on_scavenge(true);
bool in_to_space = (semi_space->id() != kFromSpace);
chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
@@ -491,8 +468,10 @@
chunk->skip_list_ = NULL;
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
chunk->progress_bar_ = 0;
- chunk->high_water_mark_ = static_cast<int>(area_start - base);
- chunk->set_parallel_sweeping(SWEEPING_DONE);
+ chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
+ chunk->parallel_sweeping_state().SetValue(kSweepingDone);
+ chunk->parallel_compaction_state().SetValue(kCompactingDone);
+ chunk->mutex_ = NULL;
chunk->available_in_small_free_list_ = 0;
chunk->available_in_medium_free_list_ = 0;
chunk->available_in_large_free_list_ = 0;
@@ -502,6 +481,8 @@
Bitmap::Clear(chunk);
chunk->initialize_scan_on_scavenge(false);
chunk->SetFlag(WAS_SWEPT);
+ chunk->set_next_chunk(nullptr);
+ chunk->set_prev_chunk(nullptr);
DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
@@ -510,10 +491,6 @@
chunk->SetFlag(IS_EXECUTABLE);
}
- if (owner == heap->old_data_space()) {
- chunk->SetFlag(CONTAINS_ONLY_DATA);
- }
-
return chunk;
}
@@ -640,7 +617,8 @@
CodePageGuardSize();
// Check executable memory limit.
- if (size_executable_ + chunk_size > capacity_executable_) {
+ if ((size_executable_.Value() + static_cast<intptr_t>(chunk_size)) >
+ capacity_executable_) {
LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
"V8 Executable Allocation capacity exceeded"));
return NULL;
@@ -651,22 +629,29 @@
base::OS::CommitPageSize());
// Allocate executable memory either from code range or from the
// OS.
+#ifdef V8_TARGET_ARCH_MIPS64
+ // Use code range only for large object space on mips64 to keep address
+ // range within 256-MB memory region.
+ if (isolate_->code_range() != NULL && isolate_->code_range()->valid() &&
+ reserve_area_size > CodePageAreaSize()) {
+#else
if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
+#endif
base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size,
&chunk_size);
DCHECK(
IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
if (base == NULL) return NULL;
- size_ += chunk_size;
+ size_.Increment(static_cast<intptr_t>(chunk_size));
// Update executable memory size.
- size_executable_ += chunk_size;
+ size_executable_.Increment(static_cast<intptr_t>(chunk_size));
} else {
base = AllocateAlignedMemory(chunk_size, commit_size,
MemoryChunk::kAlignment, executable,
&reservation);
if (base == NULL) return NULL;
// Update executable memory size.
- size_executable_ += reservation.size();
+ size_executable_.Increment(static_cast<intptr_t>(reservation.size()));
}
if (Heap::ShouldZapGarbage()) {
@@ -726,9 +711,7 @@
Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner,
Executability executable) {
MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
-
if (chunk == NULL) return NULL;
-
return Page::Initialize(isolate_->heap(), chunk, executable, owner);
}
@@ -743,7 +726,8 @@
}
-void MemoryAllocator::Free(MemoryChunk* chunk) {
+void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
+ DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
if (chunk->owner() != NULL) {
ObjectSpace space =
@@ -754,8 +738,29 @@
isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
chunk->IsEvacuationCandidate());
- delete chunk->slots_buffer();
- delete chunk->skip_list();
+ intptr_t size;
+ base::VirtualMemory* reservation = chunk->reserved_memory();
+ if (reservation->IsReserved()) {
+ size = static_cast<intptr_t>(reservation->size());
+ } else {
+ size = static_cast<intptr_t>(chunk->size());
+ }
+ DCHECK(size_.Value() >= size);
+ size_.Increment(-size);
+ isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
+
+ if (chunk->executable() == EXECUTABLE) {
+ DCHECK(size_executable_.Value() >= size);
+ size_executable_.Increment(-size);
+ }
+
+ chunk->SetFlag(MemoryChunk::PRE_FREED);
+}
+
+
+void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
+ DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
+ chunk->ReleaseAllocatedMemory();
base::VirtualMemory* reservation = chunk->reserved_memory();
if (reservation->IsReserved()) {
@@ -766,6 +771,12 @@
}
+void MemoryAllocator::Free(MemoryChunk* chunk) {
+ PreFreeMemory(chunk);
+ PerformFreeMemory(chunk);
+}
+
+
bool MemoryAllocator::CommitBlock(Address start, size_t size,
Executability executable) {
if (!CommitMemory(start, size, executable)) return false;
@@ -840,13 +851,14 @@
#ifdef DEBUG
void MemoryAllocator::ReportStatistics() {
- float pct = static_cast<float>(capacity_ - size_) / capacity_;
+ intptr_t size = Size();
+ float pct = static_cast<float>(capacity_ - size) / capacity_;
PrintF(" capacity: %" V8_PTR_PREFIX
"d"
", used: %" V8_PTR_PREFIX
"d"
", available: %%%d\n\n",
- capacity_, size_, static_cast<int>(pct * 100));
+ capacity_, size, static_cast<int>(pct * 100));
}
#endif
@@ -881,80 +893,72 @@
Address start, size_t commit_size,
size_t reserved_size) {
// Commit page header (not executable).
- if (!vm->Commit(start, CodePageGuardStartOffset(), false)) {
- return false;
+ Address header = start;
+ size_t header_size = CodePageGuardStartOffset();
+ if (vm->Commit(header, header_size, false)) {
+ // Create guard page after the header.
+ if (vm->Guard(start + CodePageGuardStartOffset())) {
+ // Commit page body (executable).
+ Address body = start + CodePageAreaStartOffset();
+ size_t body_size = commit_size - CodePageGuardStartOffset();
+ if (vm->Commit(body, body_size, true)) {
+ // Create guard page before the end.
+ if (vm->Guard(start + reserved_size - CodePageGuardSize())) {
+ UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
+ commit_size -
+ CodePageGuardStartOffset());
+ return true;
+ }
+ vm->Uncommit(body, body_size);
+ }
+ }
+ vm->Uncommit(header, header_size);
}
-
- // Create guard page after the header.
- if (!vm->Guard(start + CodePageGuardStartOffset())) {
- return false;
- }
-
- // Commit page body (executable).
- if (!vm->Commit(start + CodePageAreaStartOffset(),
- commit_size - CodePageGuardStartOffset(), true)) {
- return false;
- }
-
- // Create guard page before the end.
- if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
- return false;
- }
-
- UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
- commit_size -
- CodePageGuardStartOffset());
- return true;
+ return false;
}
// -----------------------------------------------------------------------------
// MemoryChunk implementation
-void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(address);
+void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
- static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
+ static_cast<PagedSpace*>(chunk->owner())->Allocate(by);
}
chunk->IncrementLiveBytes(by);
}
+void MemoryChunk::ReleaseAllocatedMemory() {
+ delete slots_buffer_;
+ delete skip_list_;
+ delete mutex_;
+}
+
+
// -----------------------------------------------------------------------------
// PagedSpace implementation
STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) ==
ObjectSpace::kObjectSpaceNewSpace);
-STATIC_ASSERT(static_cast<ObjectSpace>(1
- << AllocationSpace::OLD_POINTER_SPACE) ==
- ObjectSpace::kObjectSpaceOldPointerSpace);
-STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_DATA_SPACE) ==
- ObjectSpace::kObjectSpaceOldDataSpace);
+STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_SPACE) ==
+ ObjectSpace::kObjectSpaceOldSpace);
STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
ObjectSpace::kObjectSpaceCodeSpace);
-STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CELL_SPACE) ==
- ObjectSpace::kObjectSpaceCellSpace);
-STATIC_ASSERT(
- static_cast<ObjectSpace>(1 << AllocationSpace::PROPERTY_CELL_SPACE) ==
- ObjectSpace::kObjectSpacePropertyCellSpace);
STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
ObjectSpace::kObjectSpaceMapSpace);
-PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace space,
+PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
Executability executable)
: Space(heap, space, executable),
free_list_(this),
- unswept_free_bytes_(0),
- end_of_unswept_pages_(NULL),
- emergency_memory_(NULL) {
+ end_of_unswept_pages_(NULL) {
area_size_ = MemoryAllocator::PageAreaSize(space);
- max_capacity_ =
- (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) * AreaSize();
accounting_stats_.Clear();
- allocation_info_.set_top(NULL);
- allocation_info_.set_limit(NULL);
+ allocation_info_.Reset(nullptr, nullptr);
anchor_.InitializeAsAnchor(this);
}
@@ -977,6 +981,150 @@
}
+void PagedSpace::AddMemory(Address start, intptr_t size) {
+ accounting_stats_.ExpandSpace(static_cast<int>(size));
+ Free(start, static_cast<int>(size));
+}
+
+
+FreeSpace* PagedSpace::TryRemoveMemory(intptr_t size_in_bytes) {
+ FreeSpace* free_space = free_list()->TryRemoveMemory(size_in_bytes);
+ if (free_space != nullptr) {
+ accounting_stats_.DecreaseCapacity(free_space->size());
+ }
+ return free_space;
+}
+
+
+void PagedSpace::DivideUponCompactionSpaces(CompactionSpaceCollection** other,
+ int num, intptr_t limit) {
+ DCHECK_GT(num, 0);
+ DCHECK(other != nullptr);
+
+ if (limit == 0) limit = std::numeric_limits<intptr_t>::max();
+
+ EmptyAllocationInfo();
+
+ bool memory_available = true;
+ bool spaces_need_memory = true;
+ FreeSpace* node = nullptr;
+ CompactionSpace* current_space = nullptr;
+ // Iterate over spaces and memory as long as we have memory and there are
+ // spaces in need of some.
+ while (memory_available && spaces_need_memory) {
+ spaces_need_memory = false;
+ // Round-robin over all spaces.
+ for (int i = 0; i < num; i++) {
+ current_space = other[i]->Get(identity());
+ if (current_space->free_list()->Available() < limit) {
+ // Space has not reached its limit. Try to get some memory.
+ spaces_need_memory = true;
+ node = TryRemoveMemory(limit - current_space->free_list()->Available());
+ if (node != nullptr) {
+ CHECK(current_space->identity() == identity());
+ current_space->AddMemory(node->address(), node->size());
+ } else {
+ memory_available = false;
+ break;
+ }
+ }
+ }
+ }
+}
+
+
+void PagedSpace::RefillFreeList() {
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+ FreeList* free_list = nullptr;
+ if (this == heap()->old_space()) {
+ free_list = collector->free_list_old_space().get();
+ } else if (this == heap()->code_space()) {
+ free_list = collector->free_list_code_space().get();
+ } else if (this == heap()->map_space()) {
+ free_list = collector->free_list_map_space().get();
+ } else {
+ // Any PagedSpace might invoke RefillFreeList. We filter all but our old
+ // generation spaces out.
+ return;
+ }
+ DCHECK(free_list != nullptr);
+ intptr_t added = free_list_.Concatenate(free_list);
+ accounting_stats_.IncreaseCapacity(added);
+}
+
+
+void CompactionSpace::RefillFreeList() {
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+ FreeList* free_list = nullptr;
+ if (identity() == OLD_SPACE) {
+ free_list = collector->free_list_old_space().get();
+ } else if (identity() == CODE_SPACE) {
+ free_list = collector->free_list_code_space().get();
+ } else {
+ // Compaction spaces only represent old or code space.
+ UNREACHABLE();
+ }
+ DCHECK(free_list != nullptr);
+ intptr_t refilled = 0;
+ while (refilled < kCompactionMemoryWanted) {
+ FreeSpace* node =
+ free_list->TryRemoveMemory(kCompactionMemoryWanted - refilled);
+ if (node == nullptr) return;
+ refilled += node->size();
+ AddMemory(node->address(), node->size());
+ }
+}
+
+
+void PagedSpace::MoveOverFreeMemory(PagedSpace* other) {
+ DCHECK(identity() == other->identity());
+ // Destroy the linear allocation space of {other}. This is needed to
+ // (a) not waste the memory and
+ // (b) keep the rest of the chunk in an iterable state (filler is needed).
+ other->EmptyAllocationInfo();
+
+ // Move over the free list. Concatenate makes sure that the source free list
+ // gets properly reset after moving over all nodes.
+ intptr_t added = free_list_.Concatenate(other->free_list());
+
+ // Moved memory is not recorded as allocated memory, but rather increases and
+ // decreases capacity of the corresponding spaces.
+ other->accounting_stats_.DecreaseCapacity(added);
+ accounting_stats_.IncreaseCapacity(added);
+}
+
+
+void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
+ // Unmerged fields:
+ // area_size_
+ // anchor_
+
+ MoveOverFreeMemory(other);
+
+ // Update and clear accounting statistics.
+ accounting_stats_.Merge(other->accounting_stats_);
+ other->accounting_stats_.Clear();
+
+ // The linear allocation area of {other} should be destroyed now.
+ DCHECK(other->top() == nullptr);
+ DCHECK(other->limit() == nullptr);
+
+ DCHECK(other->end_of_unswept_pages_ == nullptr);
+
+ AccountCommitted(other->CommittedMemory());
+
+ // Move over pages.
+ PageIterator it(other);
+ Page* p = nullptr;
+ while (it.has_next()) {
+ p = it.next();
+ p->Unlink();
+ p->set_owner(this);
+ p->InsertAfter(anchor_.prev_page());
+ }
+}
+
+
size_t PagedSpace::CommittedPhysicalMemory() {
if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
@@ -989,6 +1137,16 @@
}
+bool PagedSpace::ContainsSafe(Address addr) {
+ Page* p = Page::FromAddress(addr);
+ PageIterator iterator(this);
+ while (iterator.has_next()) {
+ if (iterator.next() == p) return true;
+ }
+ return false;
+}
+
+
Object* PagedSpace::FindObject(Address addr) {
// Note: this function can only be called on iterable spaces.
DCHECK(!heap()->mark_compact_collector()->in_use());
@@ -996,7 +1154,7 @@
if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found.
Page* p = Page::FromAddress(addr);
- HeapObjectIterator it(p, NULL);
+ HeapObjectIterator it(p);
for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
Address cur = obj->address();
Address next = cur + obj->Size();
@@ -1008,34 +1166,37 @@
}
-bool PagedSpace::CanExpand() {
- DCHECK(max_capacity_ % AreaSize() == 0);
+bool PagedSpace::CanExpand(size_t size) {
+ DCHECK(heap()->mark_compact_collector()->is_compacting() ||
+ Capacity() <= heap()->MaxOldGenerationSize());
- if (Capacity() == max_capacity_) return false;
-
- DCHECK(Capacity() < max_capacity_);
-
- // Are we going to exceed capacity for this space?
- if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
+ // Are we going to exceed capacity for this space? At this point we can be
+ // way over the maximum size because of AlwaysAllocate scopes and large
+ // objects.
+ if (!heap()->CanExpandOldGeneration(static_cast<int>(size))) return false;
return true;
}
bool PagedSpace::Expand() {
- if (!CanExpand()) return false;
-
intptr_t size = AreaSize();
-
- if (anchor_.next_page() == &anchor_) {
- size = SizeOfFirstPage();
+ if (snapshotable() && !HasPages()) {
+ size = Snapshot::SizeOfFirstPage(heap()->isolate(), identity());
}
+ if (!CanExpand(size)) return false;
+
Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
executable());
if (p == NULL) return false;
- DCHECK(Capacity() <= max_capacity_);
+ AccountCommitted(static_cast<intptr_t>(p->size()));
+
+ // Pages created during bootstrapping may contain immortal immovable objects.
+ if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
+
+ DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
p->InsertAfter(anchor_.prev_page());
@@ -1043,48 +1204,6 @@
}
-intptr_t PagedSpace::SizeOfFirstPage() {
- // If using an ool constant pool then transfer the constant pool allowance
- // from the code space to the old pointer space.
- static const int constant_pool_delta = FLAG_enable_ool_constant_pool ? 48 : 0;
- int size = 0;
- switch (identity()) {
- case OLD_POINTER_SPACE:
- size = (128 + constant_pool_delta) * kPointerSize * KB;
- break;
- case OLD_DATA_SPACE:
- size = 192 * KB;
- break;
- case MAP_SPACE:
- size = 16 * kPointerSize * KB;
- break;
- case CELL_SPACE:
- size = 16 * kPointerSize * KB;
- break;
- case PROPERTY_CELL_SPACE:
- size = 8 * kPointerSize * KB;
- break;
- case CODE_SPACE: {
- CodeRange* code_range = heap()->isolate()->code_range();
- if (code_range != NULL && code_range->valid()) {
- // When code range exists, code pages are allocated in a special way
- // (from the reserved code range). That part of the code is not yet
- // upgraded to handle small pages.
- size = AreaSize();
- } else {
- size = RoundUp((480 - constant_pool_delta) * KB *
- FullCodeGenerator::kBootCodeSizeMultiplier / 100,
- kPointerSize);
- }
- break;
- }
- default:
- UNREACHABLE();
- }
- return Min(size, AreaSize());
-}
-
-
int PagedSpace::CountTotalPages() {
PageIterator it(this);
int count = 0;
@@ -1096,14 +1215,6 @@
}
-void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) {
- sizes->huge_size_ = page->available_in_huge_free_list();
- sizes->small_size_ = page->available_in_small_free_list();
- sizes->medium_size_ = page->available_in_medium_free_list();
- sizes->large_size_ = page->available_in_large_free_list();
-}
-
-
void PagedSpace::ResetFreeListStatistics() {
PageIterator page_iterator(this);
while (page_iterator.has_next()) {
@@ -1126,8 +1237,6 @@
intptr_t size = free_list_.EvictFreeListItems(page);
accounting_stats_.AllocateBytes(size);
DCHECK_EQ(AreaSize(), static_cast<int>(size));
- } else {
- DecreaseUnsweptFreeBytes(page);
}
if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
@@ -1138,8 +1247,7 @@
DCHECK(!free_list_.ContainsPageFreeListItems(page));
if (Page::FromAllocationTop(allocation_info_.top()) == page) {
- allocation_info_.set_top(NULL);
- allocation_info_.set_limit(NULL);
+ allocation_info_.Reset(nullptr, nullptr);
}
// If page is still in a list, unlink it from that list.
@@ -1148,48 +1256,14 @@
page->Unlink();
}
- if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
- heap()->isolate()->memory_allocator()->Free(page);
- } else {
- heap()->QueueMemoryChunkForFree(page);
- }
+ AccountUncommitted(static_cast<intptr_t>(page->size()));
+ heap()->QueueMemoryChunkForFree(page);
DCHECK(Capacity() > 0);
accounting_stats_.ShrinkSpace(AreaSize());
}
-void PagedSpace::CreateEmergencyMemory() {
- if (identity() == CODE_SPACE) {
- // Make the emergency block available to the allocator.
- CodeRange* code_range = heap()->isolate()->code_range();
- if (code_range != NULL && code_range->valid()) {
- code_range->ReleaseEmergencyBlock();
- }
- DCHECK(MemoryAllocator::CodePageAreaSize() == AreaSize());
- }
- emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk(
- AreaSize(), AreaSize(), executable(), this);
-}
-
-
-void PagedSpace::FreeEmergencyMemory() {
- Page* page = static_cast<Page*>(emergency_memory_);
- DCHECK(page->LiveBytes() == 0);
- DCHECK(AreaSize() == page->area_size());
- DCHECK(!free_list_.ContainsPageFreeListItems(page));
- heap()->isolate()->memory_allocator()->Free(page);
- emergency_memory_ = NULL;
-}
-
-
-void PagedSpace::UseEmergencyMemory() {
- Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this);
- page->InsertAfter(anchor_.prev_page());
- emergency_memory_ = NULL;
-}
-
-
#ifdef DEBUG
void PagedSpace::Print() {}
#endif
@@ -1206,7 +1280,7 @@
allocation_pointer_found_in_space = true;
}
CHECK(page->WasSwept());
- HeapObjectIterator it(page, NULL);
+ HeapObjectIterator it(page);
Address end_of_previous_object = page->area_start();
Address top = page->area_end();
int black_size = 0;
@@ -1314,17 +1388,15 @@
}
start_ = NULL;
- allocation_info_.set_top(NULL);
- allocation_info_.set_limit(NULL);
+ allocation_info_.Reset(nullptr, nullptr);
+
to_space_.TearDown();
from_space_.TearDown();
- LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
+ heap()->isolate()->memory_allocator()->FreeNewSpaceMemory(
+ chunk_base_, &reservation_, NOT_EXECUTABLE);
- DCHECK(reservation_.IsReserved());
- heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
- NOT_EXECUTABLE);
chunk_base_ = NULL;
chunk_size_ = 0;
}
@@ -1406,16 +1478,57 @@
}
+void LocalAllocationBuffer::Close() {
+ if (IsValid()) {
+ heap_->CreateFillerObjectAt(
+ allocation_info_.top(),
+ static_cast<int>(allocation_info_.limit() - allocation_info_.top()));
+ }
+}
+
+
+LocalAllocationBuffer::LocalAllocationBuffer(Heap* heap,
+ AllocationInfo allocation_info)
+ : heap_(heap), allocation_info_(allocation_info) {
+ if (IsValid()) {
+ heap_->CreateFillerObjectAt(
+ allocation_info_.top(),
+ static_cast<int>(allocation_info_.limit() - allocation_info_.top()));
+ }
+}
+
+
+LocalAllocationBuffer::LocalAllocationBuffer(
+ const LocalAllocationBuffer& other) {
+ *this = other;
+}
+
+
+LocalAllocationBuffer& LocalAllocationBuffer::operator=(
+ const LocalAllocationBuffer& other) {
+ Close();
+ heap_ = other.heap_;
+ allocation_info_ = other.allocation_info_;
+
+ // This is needed since we (a) cannot yet use move-semantics, and (b) want
+ // to make the use of the class easy by it as value and (c) implicitly call
+ // {Close} upon copy.
+ const_cast<LocalAllocationBuffer&>(other)
+ .allocation_info_.Reset(nullptr, nullptr);
+ return *this;
+}
+
+
void NewSpace::UpdateAllocationInfo() {
MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
- allocation_info_.set_top(to_space_.page_low());
- allocation_info_.set_limit(to_space_.page_high());
+ allocation_info_.Reset(to_space_.page_low(), to_space_.page_high());
UpdateInlineAllocationLimit(0);
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
void NewSpace::ResetAllocationInfo() {
+ Address old_top = allocation_info_.top();
to_space_.Reset();
UpdateAllocationInfo();
pages_used_ = 0;
@@ -1424,6 +1537,7 @@
while (it.has_next()) {
Bitmap::Clear(it.next());
}
+ InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
}
@@ -1433,14 +1547,15 @@
Address high = to_space_.page_high();
Address new_top = allocation_info_.top() + size_in_bytes;
allocation_info_.set_limit(Min(new_top, high));
- } else if (inline_allocation_limit_step() == 0) {
+ } else if (inline_allocation_observers_paused_ ||
+ top_on_previous_step_ == 0) {
// Normal limit is the end of the current page.
allocation_info_.set_limit(to_space_.page_high());
} else {
// Lower limit during incremental marking.
Address high = to_space_.page_high();
Address new_top = allocation_info_.top() + size_in_bytes;
- Address new_limit = new_top + inline_allocation_limit_step_;
+ Address new_limit = new_top + GetNextInlineAllocationStepSize() - 1;
allocation_info_.set_limit(Min(new_limit, high));
}
DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
@@ -1489,33 +1604,114 @@
}
-AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes) {
+bool NewSpace::AddFreshPageSynchronized() {
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ return AddFreshPage();
+}
+
+
+bool NewSpace::EnsureAllocation(int size_in_bytes,
+ AllocationAlignment alignment) {
Address old_top = allocation_info_.top();
Address high = to_space_.page_high();
+ int filler_size = Heap::GetFillToAlign(old_top, alignment);
+ int aligned_size_in_bytes = size_in_bytes + filler_size;
+
+ if (old_top + aligned_size_in_bytes >= high) {
+ // Not enough room in the page, try to allocate a new one.
+ if (!AddFreshPage()) {
+ return false;
+ }
+
+ InlineAllocationStep(old_top, allocation_info_.top(), nullptr, 0);
+
+ old_top = allocation_info_.top();
+ high = to_space_.page_high();
+ filler_size = Heap::GetFillToAlign(old_top, alignment);
+ aligned_size_in_bytes = size_in_bytes + filler_size;
+ }
+
+ DCHECK(old_top + aligned_size_in_bytes < high);
+
if (allocation_info_.limit() < high) {
// Either the limit has been lowered because linear allocation was disabled
- // or because incremental marking wants to get a chance to do a step. Set
- // the new limit accordingly.
- Address new_top = old_top + size_in_bytes;
- int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
- heap()->incremental_marking()->Step(bytes_allocated,
- IncrementalMarking::GC_VIA_STACK_GUARD);
- UpdateInlineAllocationLimit(size_in_bytes);
- top_on_previous_step_ = new_top;
- return AllocateRaw(size_in_bytes);
- } else if (AddFreshPage()) {
- // Switched to new page. Try allocating again.
- int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
- heap()->incremental_marking()->Step(bytes_allocated,
- IncrementalMarking::GC_VIA_STACK_GUARD);
- top_on_previous_step_ = to_space_.page_low();
- return AllocateRaw(size_in_bytes);
- } else {
- return AllocationResult::Retry();
+ // or because incremental marking wants to get a chance to do a step,
+ // or because idle scavenge job wants to get a chance to post a task.
+ // Set the new limit accordingly.
+ Address new_top = old_top + aligned_size_in_bytes;
+ Address soon_object = old_top + filler_size;
+ InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
+ UpdateInlineAllocationLimit(aligned_size_in_bytes);
+ }
+ return true;
+}
+
+
+void NewSpace::StartNextInlineAllocationStep() {
+ if (!inline_allocation_observers_paused_) {
+ top_on_previous_step_ =
+ inline_allocation_observers_.length() ? allocation_info_.top() : 0;
+ UpdateInlineAllocationLimit(0);
}
}
+intptr_t NewSpace::GetNextInlineAllocationStepSize() {
+ intptr_t next_step = 0;
+ for (int i = 0; i < inline_allocation_observers_.length(); ++i) {
+ InlineAllocationObserver* o = inline_allocation_observers_[i];
+ next_step = next_step ? Min(next_step, o->bytes_to_next_step())
+ : o->bytes_to_next_step();
+ }
+ DCHECK(inline_allocation_observers_.length() == 0 || next_step != 0);
+ return next_step;
+}
+
+
+void NewSpace::AddInlineAllocationObserver(InlineAllocationObserver* observer) {
+ inline_allocation_observers_.Add(observer);
+ StartNextInlineAllocationStep();
+}
+
+
+void NewSpace::RemoveInlineAllocationObserver(
+ InlineAllocationObserver* observer) {
+ bool removed = inline_allocation_observers_.RemoveElement(observer);
+ // Only used in assertion. Suppress unused variable warning.
+ static_cast<void>(removed);
+ DCHECK(removed);
+ StartNextInlineAllocationStep();
+}
+
+
+void NewSpace::PauseInlineAllocationObservers() {
+ // Do a step to account for memory allocated so far.
+ InlineAllocationStep(top(), top(), nullptr, 0);
+ inline_allocation_observers_paused_ = true;
+ top_on_previous_step_ = 0;
+ UpdateInlineAllocationLimit(0);
+}
+
+
+void NewSpace::ResumeInlineAllocationObservers() {
+ DCHECK(top_on_previous_step_ == 0);
+ inline_allocation_observers_paused_ = false;
+ StartNextInlineAllocationStep();
+}
+
+
+void NewSpace::InlineAllocationStep(Address top, Address new_top,
+ Address soon_object, size_t size) {
+ if (top_on_previous_step_) {
+ int bytes_allocated = static_cast<int>(top - top_on_previous_step_);
+ for (int i = 0; i < inline_allocation_observers_.length(); ++i) {
+ inline_allocation_observers_[i]->InlineAllocationStep(bytes_allocated,
+ soon_object, size);
+ }
+ top_on_previous_step_ = new_top;
+ }
+}
+
#ifdef VERIFY_HEAP
// We do not use the SemiSpaceIterator because verification doesn't assume
// that it works (it depends on the invariants we are checking).
@@ -1590,13 +1786,12 @@
total_capacity_ = initial_capacity;
target_capacity_ = RoundDown(target_capacity, Page::kPageSize);
maximum_total_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
- maximum_committed_ = 0;
committed_ = false;
start_ = start;
address_mask_ = ~(maximum_capacity - 1);
object_mask_ = address_mask_ | kHeapObjectTagMask;
object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
- age_mark_ = start_;
+ age_mark_ = start_ + NewSpacePage::kObjectStartOffset;
}
@@ -1613,6 +1808,7 @@
start_, total_capacity_, executable())) {
return false;
}
+ AccountCommitted(total_capacity_);
NewSpacePage* current = anchor();
for (int i = 0; i < pages; i++) {
@@ -1636,6 +1832,8 @@
total_capacity_)) {
return false;
}
+ AccountUncommitted(total_capacity_);
+
anchor()->set_next_page(anchor());
anchor()->set_prev_page(anchor());
@@ -1672,6 +1870,7 @@
start_ + total_capacity_, delta, executable())) {
return false;
}
+ AccountCommitted(static_cast<intptr_t>(delta));
SetCapacity(new_capacity);
NewSpacePage* last_page = anchor()->prev_page();
DCHECK(last_page != anchor());
@@ -1702,6 +1901,7 @@
if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
return false;
}
+ AccountUncommitted(static_cast<intptr_t>(delta));
int pages_after = new_capacity / Page::kPageSize;
NewSpacePage* new_last_page =
@@ -1787,9 +1987,6 @@
void SemiSpace::SetCapacity(int new_capacity) {
total_capacity_ = new_capacity;
- if (total_capacity_ > maximum_committed_) {
- maximum_committed_ = total_capacity_;
- }
}
@@ -1864,33 +2061,16 @@
// -----------------------------------------------------------------------------
// SemiSpaceIterator implementation.
+
SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
- Initialize(space->bottom(), space->top(), NULL);
+ Initialize(space->bottom(), space->top());
}
-SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
- HeapObjectCallback size_func) {
- Initialize(space->bottom(), space->top(), size_func);
-}
-
-
-SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
- Initialize(start, space->top(), NULL);
-}
-
-
-SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
- Initialize(from, to, NULL);
-}
-
-
-void SemiSpaceIterator::Initialize(Address start, Address end,
- HeapObjectCallback size_func) {
+void SemiSpaceIterator::Initialize(Address start, Address end) {
SemiSpace::AssertValidRange(start, end);
current_ = start;
limit_ = end;
- size_func_ = size_func;
}
@@ -2084,87 +2264,9 @@
// -----------------------------------------------------------------------------
// Free lists for old object spaces implementation
-void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
- DCHECK(size_in_bytes > 0);
- DCHECK(IsAligned(size_in_bytes, kPointerSize));
-
- // We write a map and possibly size information to the block. If the block
- // is big enough to be a FreeSpace with at least one extra word (the next
- // pointer), we set its map to be the free space map and its size to an
- // appropriate array length for the desired size from HeapObject::Size().
- // If the block is too small (eg, one or two words), to hold both a size
- // field and a next pointer, we give it a filler map that gives it the
- // correct size.
- if (size_in_bytes > FreeSpace::kHeaderSize) {
- // Can't use FreeSpace::cast because it fails during deserialization.
- // We have to set the size first with a release store before we store
- // the map because a concurrent store buffer scan on scavenge must not
- // observe a map with an invalid size.
- FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
- this_as_free_space->nobarrier_set_size(size_in_bytes);
- synchronized_set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
- } else if (size_in_bytes == kPointerSize) {
- set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
- } else if (size_in_bytes == 2 * kPointerSize) {
- set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
- } else {
- UNREACHABLE();
- }
- // We would like to DCHECK(Size() == size_in_bytes) but this would fail during
- // deserialization because the free space map is not done yet.
-}
-
-
-FreeListNode* FreeListNode::next() {
- DCHECK(IsFreeListNode(this));
- if (map() == GetHeap()->raw_unchecked_free_space_map()) {
- DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize);
- return reinterpret_cast<FreeListNode*>(
- Memory::Address_at(address() + kNextOffset));
- } else {
- return reinterpret_cast<FreeListNode*>(
- Memory::Address_at(address() + kPointerSize));
- }
-}
-
-
-FreeListNode** FreeListNode::next_address() {
- DCHECK(IsFreeListNode(this));
- if (map() == GetHeap()->raw_unchecked_free_space_map()) {
- DCHECK(Size() >= kNextOffset + kPointerSize);
- return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
- } else {
- return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
- }
-}
-
-
-void FreeListNode::set_next(FreeListNode* next) {
- DCHECK(IsFreeListNode(this));
- // While we are booting the VM the free space map will actually be null. So
- // we have to make sure that we don't try to use it for anything at that
- // stage.
- if (map() == GetHeap()->raw_unchecked_free_space_map()) {
- DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize);
- base::NoBarrier_Store(
- reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
- reinterpret_cast<base::AtomicWord>(next));
- } else {
- base::NoBarrier_Store(
- reinterpret_cast<base::AtomicWord*>(address() + kPointerSize),
- reinterpret_cast<base::AtomicWord>(next));
- }
-}
-
-
intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
intptr_t free_bytes = 0;
if (category->top() != NULL) {
- // This is safe (not going to deadlock) since Concatenate operations
- // are never performed on the same free lists at the same time in
- // reverse order.
- base::LockGuard<base::Mutex> target_lock_guard(mutex());
- base::LockGuard<base::Mutex> source_lock_guard(category->mutex());
DCHECK(category->end_ != NULL);
free_bytes = category->available();
if (end_ == NULL) {
@@ -2173,7 +2275,6 @@
category->end()->set_next(top());
}
set_top(category->top());
- base::NoBarrier_Store(&top_, category->top_);
available_ += category->available();
category->Reset();
}
@@ -2182,36 +2283,45 @@
void FreeListCategory::Reset() {
- set_top(NULL);
- set_end(NULL);
- set_available(0);
+ set_top(nullptr);
+ set_end(nullptr);
+ available_ = 0;
}
intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
- int sum = 0;
- FreeListNode* t = top();
- FreeListNode** n = &t;
- while (*n != NULL) {
- if (Page::FromAddress((*n)->address()) == p) {
- FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
- sum += free_space->Size();
- *n = (*n)->next();
- } else {
- n = (*n)->next_address();
+ intptr_t sum = 0;
+ FreeSpace* prev_node = nullptr;
+ for (FreeSpace* cur_node = top(); cur_node != nullptr;
+ cur_node = cur_node->next()) {
+ Page* page_for_node = Page::FromAddress(cur_node->address());
+ if (page_for_node == p) {
+ // FreeSpace node on eviction page found, unlink it.
+ int size = cur_node->size();
+ sum += size;
+ DCHECK((prev_node != nullptr) || (top() == cur_node));
+ if (cur_node == top()) {
+ set_top(cur_node->next());
+ }
+ if (cur_node == end()) {
+ set_end(prev_node);
+ }
+ if (prev_node != nullptr) {
+ prev_node->set_next(cur_node->next());
+ }
+ continue;
}
+ prev_node = cur_node;
}
- set_top(t);
- if (top() == NULL) {
- set_end(NULL);
- }
+ DCHECK_EQ(p->available_in_free_list(type_), sum);
+ p->add_available_in_free_list(type_, -sum);
available_ -= sum;
return sum;
}
bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
- FreeListNode* node = top();
+ FreeSpace* node = top();
while (node != NULL) {
if (Page::FromAddress(node->address()) == p) return true;
node = node->next();
@@ -2220,57 +2330,94 @@
}
-FreeListNode* FreeListCategory::PickNodeFromList(int* node_size) {
- FreeListNode* node = top();
+FreeSpace* FreeListCategory::PickNodeFromList(int* node_size) {
+ FreeSpace* node = top();
+ if (node == nullptr) return nullptr;
- if (node == NULL) return NULL;
-
- while (node != NULL &&
- Page::FromAddress(node->address())->IsEvacuationCandidate()) {
- available_ -= reinterpret_cast<FreeSpace*>(node)->Size();
+ Page* page = Page::FromAddress(node->address());
+ while ((node != nullptr) && !page->CanAllocate()) {
+ available_ -= node->size();
+ page->add_available_in_free_list(type_, -(node->Size()));
node = node->next();
}
- if (node != NULL) {
+ if (node != nullptr) {
set_top(node->next());
- *node_size = reinterpret_cast<FreeSpace*>(node)->Size();
+ *node_size = node->Size();
available_ -= *node_size;
} else {
- set_top(NULL);
+ set_top(nullptr);
}
- if (top() == NULL) {
- set_end(NULL);
+ if (top() == nullptr) {
+ set_end(nullptr);
}
return node;
}
-FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
- int* node_size) {
- FreeListNode* node = PickNodeFromList(node_size);
- if (node != NULL && *node_size < size_in_bytes) {
+FreeSpace* FreeListCategory::PickNodeFromList(int size_in_bytes,
+ int* node_size) {
+ FreeSpace* node = PickNodeFromList(node_size);
+ if ((node != nullptr) && (*node_size < size_in_bytes)) {
Free(node, *node_size);
*node_size = 0;
- return NULL;
+ return nullptr;
}
return node;
}
-void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
- node->set_next(top());
- set_top(node);
+FreeSpace* FreeListCategory::SearchForNodeInList(int size_in_bytes,
+ int* node_size) {
+ FreeSpace* prev_non_evac_node = nullptr;
+ for (FreeSpace* cur_node = top(); cur_node != nullptr;
+ cur_node = cur_node->next()) {
+ int size = cur_node->size();
+ Page* page_for_node = Page::FromAddress(cur_node->address());
+
+ if ((size >= size_in_bytes) || !page_for_node->CanAllocate()) {
+ // The node is either large enough or contained in an evacuation
+ // candidate. In both cases we need to unlink it from the list.
+ available_ -= size;
+ if (cur_node == top()) {
+ set_top(cur_node->next());
+ }
+ if (cur_node == end()) {
+ set_end(prev_non_evac_node);
+ }
+ if (prev_non_evac_node != nullptr) {
+ prev_non_evac_node->set_next(cur_node->next());
+ }
+ // For evacuation candidates we continue.
+ if (!page_for_node->CanAllocate()) {
+ page_for_node->add_available_in_free_list(type_, -size);
+ continue;
+ }
+ // Otherwise we have a large enough node and can return.
+ *node_size = size;
+ return cur_node;
+ }
+
+ prev_non_evac_node = cur_node;
+ }
+ return nullptr;
+}
+
+
+void FreeListCategory::Free(FreeSpace* free_space, int size_in_bytes) {
+ free_space->set_next(top());
+ set_top(free_space);
if (end_ == NULL) {
- end_ = node;
+ end_ = free_space;
}
available_ += size_in_bytes;
}
void FreeListCategory::RepairFreeList(Heap* heap) {
- FreeListNode* n = top();
+ FreeSpace* n = top();
while (n != NULL) {
Map** map_location = reinterpret_cast<Map**>(n->address());
if (*map_location == NULL) {
@@ -2283,18 +2430,40 @@
}
-FreeList::FreeList(PagedSpace* owner) : owner_(owner), heap_(owner->heap()) {
+FreeList::FreeList(PagedSpace* owner)
+ : owner_(owner),
+ wasted_bytes_(0),
+ small_list_(this, kSmall),
+ medium_list_(this, kMedium),
+ large_list_(this, kLarge),
+ huge_list_(this, kHuge) {
Reset();
}
-intptr_t FreeList::Concatenate(FreeList* free_list) {
- intptr_t free_bytes = 0;
- free_bytes += small_list_.Concatenate(free_list->small_list());
- free_bytes += medium_list_.Concatenate(free_list->medium_list());
- free_bytes += large_list_.Concatenate(free_list->large_list());
- free_bytes += huge_list_.Concatenate(free_list->huge_list());
- return free_bytes;
+intptr_t FreeList::Concatenate(FreeList* other) {
+ intptr_t usable_bytes = 0;
+ intptr_t wasted_bytes = 0;
+
+ // This is safe (not going to deadlock) since Concatenate operations
+ // are never performed on the same free lists at the same time in
+ // reverse order. Furthermore, we only lock if the PagedSpace containing
+ // the free list is know to be globally available, i.e., not local.
+ if (!owner()->is_local()) mutex_.Lock();
+ if (!other->owner()->is_local()) other->mutex()->Lock();
+
+ wasted_bytes = other->wasted_bytes_;
+ wasted_bytes_ += wasted_bytes;
+ other->wasted_bytes_ = 0;
+
+ usable_bytes += small_list_.Concatenate(other->GetFreeListCategory(kSmall));
+ usable_bytes += medium_list_.Concatenate(other->GetFreeListCategory(kMedium));
+ usable_bytes += large_list_.Concatenate(other->GetFreeListCategory(kLarge));
+ usable_bytes += huge_list_.Concatenate(other->GetFreeListCategory(kHuge));
+
+ if (!other->owner()->is_local()) other->mutex()->Unlock();
+ if (!owner()->is_local()) mutex_.Unlock();
+ return usable_bytes + wasted_bytes;
}
@@ -2303,123 +2472,81 @@
medium_list_.Reset();
large_list_.Reset();
huge_list_.Reset();
+ ResetStats();
}
int FreeList::Free(Address start, int size_in_bytes) {
if (size_in_bytes == 0) return 0;
- FreeListNode* node = FreeListNode::FromAddress(start);
- node->set_size(heap_, size_in_bytes);
+ owner()->heap()->CreateFillerObjectAt(start, size_in_bytes);
+
Page* page = Page::FromAddress(start);
// Early return to drop too-small blocks on the floor.
- if (size_in_bytes < kSmallListMin) {
+ if (size_in_bytes <= kSmallListMin) {
page->add_non_available_small_blocks(size_in_bytes);
+ wasted_bytes_ += size_in_bytes;
return size_in_bytes;
}
+ FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
// Insert other blocks at the head of a free list of the appropriate
// magnitude.
if (size_in_bytes <= kSmallListMax) {
- small_list_.Free(node, size_in_bytes);
+ small_list_.Free(free_space, size_in_bytes);
page->add_available_in_small_free_list(size_in_bytes);
} else if (size_in_bytes <= kMediumListMax) {
- medium_list_.Free(node, size_in_bytes);
+ medium_list_.Free(free_space, size_in_bytes);
page->add_available_in_medium_free_list(size_in_bytes);
} else if (size_in_bytes <= kLargeListMax) {
- large_list_.Free(node, size_in_bytes);
+ large_list_.Free(free_space, size_in_bytes);
page->add_available_in_large_free_list(size_in_bytes);
} else {
- huge_list_.Free(node, size_in_bytes);
+ huge_list_.Free(free_space, size_in_bytes);
page->add_available_in_huge_free_list(size_in_bytes);
}
- DCHECK(IsVeryLong() || available() == SumFreeLists());
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
return 0;
}
-FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
- FreeListNode* node = NULL;
- Page* page = NULL;
+FreeSpace* FreeList::FindNodeIn(FreeListCategoryType category, int* node_size) {
+ FreeSpace* node = GetFreeListCategory(category)->PickNodeFromList(node_size);
+ if (node != nullptr) {
+ Page::FromAddress(node->address())
+ ->add_available_in_free_list(category, -(*node_size));
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ }
+ return node;
+}
+
+
+FreeSpace* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
+ FreeSpace* node = nullptr;
+ Page* page = nullptr;
if (size_in_bytes <= kSmallAllocationMax) {
- node = small_list_.PickNodeFromList(node_size);
- if (node != NULL) {
- DCHECK(size_in_bytes <= *node_size);
- page = Page::FromAddress(node->address());
- page->add_available_in_small_free_list(-(*node_size));
- DCHECK(IsVeryLong() || available() == SumFreeLists());
- return node;
- }
+ node = FindNodeIn(kSmall, node_size);
+ if (node != nullptr) return node;
}
if (size_in_bytes <= kMediumAllocationMax) {
- node = medium_list_.PickNodeFromList(node_size);
- if (node != NULL) {
- DCHECK(size_in_bytes <= *node_size);
- page = Page::FromAddress(node->address());
- page->add_available_in_medium_free_list(-(*node_size));
- DCHECK(IsVeryLong() || available() == SumFreeLists());
- return node;
- }
+ node = FindNodeIn(kMedium, node_size);
+ if (node != nullptr) return node;
}
if (size_in_bytes <= kLargeAllocationMax) {
- node = large_list_.PickNodeFromList(node_size);
- if (node != NULL) {
- DCHECK(size_in_bytes <= *node_size);
- page = Page::FromAddress(node->address());
- page->add_available_in_large_free_list(-(*node_size));
- DCHECK(IsVeryLong() || available() == SumFreeLists());
- return node;
- }
+ node = FindNodeIn(kLarge, node_size);
+ if (node != nullptr) return node;
}
- int huge_list_available = huge_list_.available();
- FreeListNode* top_node = huge_list_.top();
- for (FreeListNode** cur = &top_node; *cur != NULL;
- cur = (*cur)->next_address()) {
- FreeListNode* cur_node = *cur;
- while (cur_node != NULL &&
- Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
- int size = reinterpret_cast<FreeSpace*>(cur_node)->Size();
- huge_list_available -= size;
- page = Page::FromAddress(cur_node->address());
- page->add_available_in_huge_free_list(-size);
- cur_node = cur_node->next();
- }
-
- *cur = cur_node;
- if (cur_node == NULL) {
- huge_list_.set_end(NULL);
- break;
- }
-
- DCHECK((*cur)->map() == heap_->raw_unchecked_free_space_map());
- FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
- int size = cur_as_free_space->Size();
- if (size >= size_in_bytes) {
- // Large enough node found. Unlink it from the list.
- node = *cur;
- *cur = node->next();
- *node_size = size;
- huge_list_available -= size;
- page = Page::FromAddress(node->address());
- page->add_available_in_huge_free_list(-size);
- break;
- }
- }
-
- huge_list_.set_top(top_node);
- if (huge_list_.top() == NULL) {
- huge_list_.set_end(NULL);
- }
- huge_list_.set_available(huge_list_available);
-
- if (node != NULL) {
- DCHECK(IsVeryLong() || available() == SumFreeLists());
+ node = huge_list_.SearchForNodeInList(size_in_bytes, node_size);
+ if (node != nullptr) {
+ page = Page::FromAddress(node->address());
+ page->add_available_in_large_free_list(-(*node_size));
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
return node;
}
@@ -2446,7 +2573,38 @@
}
}
- DCHECK(IsVeryLong() || available() == SumFreeLists());
+ DCHECK(IsVeryLong() || Available() == SumFreeLists());
+ return node;
+}
+
+
+FreeSpace* FreeList::TryRemoveMemory(intptr_t hint_size_in_bytes) {
+ hint_size_in_bytes = RoundDown(hint_size_in_bytes, kPointerSize);
+ base::LockGuard<base::Mutex> guard(&mutex_);
+ FreeSpace* node = nullptr;
+ int node_size = 0;
+ // Try to find a node that fits exactly.
+ node = FindNodeFor(static_cast<int>(hint_size_in_bytes), &node_size);
+ // If no node could be found get as much memory as possible.
+ if (node == nullptr) node = FindNodeIn(kHuge, &node_size);
+ if (node == nullptr) node = FindNodeIn(kLarge, &node_size);
+ if (node != nullptr) {
+ // We round up the size to (kSmallListMin + kPointerSize) to (a) have a
+ // size larger then the minimum size required for FreeSpace, and (b) to get
+ // a block that can actually be freed into some FreeList later on.
+ if (hint_size_in_bytes <= kSmallListMin) {
+ hint_size_in_bytes = kSmallListMin + kPointerSize;
+ }
+ // Give back left overs that were not required by {size_in_bytes}.
+ intptr_t left_over = node_size - hint_size_in_bytes;
+
+ // Do not bother to return anything below {kSmallListMin} as it would be
+ // immediately discarded anyways.
+ if (left_over > kSmallListMin) {
+ Free(node->address() + hint_size_in_bytes, static_cast<int>(left_over));
+ node->set_size(static_cast<int>(hint_size_in_bytes));
+ }
+ }
return node;
}
@@ -2467,16 +2625,14 @@
// skipped when scanning the heap. This also puts it back in the free list
// if it is big enough.
owner_->Free(owner_->top(), old_linear_size);
+ owner_->SetTopAndLimit(nullptr, nullptr);
owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
old_linear_size);
int new_node_size = 0;
- FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
- if (new_node == NULL) {
- owner_->SetTopAndLimit(NULL, NULL);
- return NULL;
- }
+ FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
+ if (new_node == nullptr) return nullptr;
int bytes_left = new_node_size - size_in_bytes;
DCHECK(bytes_left >= 0);
@@ -2506,7 +2662,7 @@
DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
} else if (bytes_left > kThreshold &&
owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
- FLAG_incremental_marking_steps) {
+ FLAG_incremental_marking) {
int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
// We don't want to give too large linear areas to the allocator while
// incremental marking is going on, because we won't check again whether
@@ -2520,10 +2676,6 @@
// linear allocation area.
owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
new_node->address() + new_node_size);
- } else {
- // TODO(gc) Try not freeing linear allocation region when bytes_left
- // are zero.
- owner_->SetTopAndLimit(NULL, NULL);
}
return new_node;
@@ -2532,17 +2684,11 @@
intptr_t FreeList::EvictFreeListItems(Page* p) {
intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
- p->set_available_in_huge_free_list(0);
-
if (sum < p->area_size()) {
sum += small_list_.EvictFreeListItemsInList(p) +
medium_list_.EvictFreeListItemsInList(p) +
large_list_.EvictFreeListItemsInList(p);
- p->set_available_in_small_free_list(0);
- p->set_available_in_medium_free_list(0);
- p->set_available_in_large_free_list(0);
}
-
return sum;
}
@@ -2566,23 +2712,19 @@
#ifdef DEBUG
intptr_t FreeListCategory::SumFreeList() {
intptr_t sum = 0;
- FreeListNode* cur = top();
+ FreeSpace* cur = top();
while (cur != NULL) {
- DCHECK(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
- FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
- sum += cur_as_free_space->nobarrier_size();
+ DCHECK(cur->map() == cur->GetHeap()->root(Heap::kFreeSpaceMapRootIndex));
+ sum += cur->nobarrier_size();
cur = cur->next();
}
return sum;
}
-static const int kVeryLongFreeList = 500;
-
-
int FreeListCategory::FreeListLength() {
int length = 0;
- FreeListNode* cur = top();
+ FreeSpace* cur = top();
while (cur != NULL) {
length++;
cur = cur->next();
@@ -2592,12 +2734,14 @@
}
+bool FreeListCategory::IsVeryLong() {
+ return FreeListLength() == kVeryLongFreeList;
+}
+
+
bool FreeList::IsVeryLong() {
- if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
- if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
- if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
- if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
- return false;
+ return small_list_.IsVeryLong() || medium_list_.IsVeryLong() ||
+ large_list_.IsVeryLong() || huge_list_.IsVeryLong();
}
@@ -2622,20 +2766,17 @@
// on the first allocation after the sweep.
EmptyAllocationInfo();
- // This counter will be increased for pages which will be swept by the
- // sweeper threads.
- unswept_free_bytes_ = 0;
-
// Clear the free list before a full GC---it will be rebuilt afterward.
free_list_.Reset();
}
intptr_t PagedSpace::SizeOfObjects() {
- DCHECK(!FLAG_concurrent_sweeping ||
- heap()->mark_compact_collector()->sweeping_in_progress() ||
- (unswept_free_bytes_ == 0));
- return Size() - unswept_free_bytes_ - (limit() - top());
+ const intptr_t size = Size() - (limit() - top());
+ CHECK_GE(limit(), top());
+ CHECK_GE(size, 0);
+ USE(size);
+ return size;
}
@@ -2643,27 +2784,35 @@
// on the heap. If there was already a free list then the elements on it
// were created with the wrong FreeSpaceMap (normally NULL), so we need to
// fix them.
-void PagedSpace::RepairFreeListsAfterBoot() { free_list_.RepairLists(heap()); }
-
-
-void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
- if (allocation_info_.top() >= allocation_info_.limit()) return;
-
- if (Page::FromAllocationTop(allocation_info_.top())
- ->IsEvacuationCandidate()) {
- // Create filler object to keep page iterable if it was iterable.
- int remaining =
- static_cast<int>(allocation_info_.limit() - allocation_info_.top());
- heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
-
- allocation_info_.set_top(NULL);
- allocation_info_.set_limit(NULL);
+void PagedSpace::RepairFreeListsAfterDeserialization() {
+ free_list_.RepairLists(heap());
+ // Each page may have a small free space that is not tracked by a free list.
+ // Update the maps for those free space objects.
+ PageIterator iterator(this);
+ while (iterator.has_next()) {
+ Page* page = iterator.next();
+ int size = static_cast<int>(page->non_available_small_blocks());
+ if (size == 0) continue;
+ Address address = page->OffsetToAddress(Page::kPageSize - size);
+ heap()->CreateFillerObjectAt(address, size);
}
}
-HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
- int size_in_bytes) {
+void PagedSpace::EvictEvacuationCandidatesFromLinearAllocationArea() {
+ if (allocation_info_.top() >= allocation_info_.limit()) return;
+
+ if (!Page::FromAllocationTop(allocation_info_.top())->CanAllocate()) {
+ // Create filler object to keep page iterable if it was iterable.
+ int remaining =
+ static_cast<int>(allocation_info_.limit() - allocation_info_.top());
+ heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
+ allocation_info_.Reset(nullptr, nullptr);
+ }
+}
+
+
+HeapObject* PagedSpace::SweepAndRetryAllocation(int size_in_bytes) {
MarkCompactCollector* collector = heap()->mark_compact_collector();
if (collector->sweeping_in_progress()) {
// Wait for the sweeper threads here and complete the sweeping phase.
@@ -2673,7 +2822,17 @@
// entries.
return free_list_.Allocate(size_in_bytes);
}
- return NULL;
+ return nullptr;
+}
+
+
+HeapObject* CompactionSpace::SweepAndRetryAllocation(int size_in_bytes) {
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+ if (collector->sweeping_in_progress()) {
+ collector->SweepAndRefill(this);
+ return free_list_.Allocate(size_in_bytes);
+ }
+ return nullptr;
}
@@ -2685,22 +2844,17 @@
if (collector->sweeping_in_progress()) {
// First try to refill the free-list, concurrent sweeper threads
// may have freed some objects in the meantime.
- collector->RefillFreeList(this);
+ RefillFreeList();
// Retry the free list allocation.
HeapObject* object = free_list_.Allocate(size_in_bytes);
if (object != NULL) return object;
// If sweeping is still in progress try to sweep pages on the main thread.
- int free_chunk = collector->SweepInParallel(this, size_in_bytes);
- collector->RefillFreeList(this);
- if (free_chunk >= size_in_bytes) {
- HeapObject* object = free_list_.Allocate(size_in_bytes);
- // We should be able to allocate an object here since we just freed that
- // much memory.
- DCHECK(object != NULL);
- if (object != NULL) return object;
- }
+ collector->SweepInParallel(heap()->paged_space(identity()), size_in_bytes);
+ RefillFreeList();
+ object = free_list_.Allocate(size_in_bytes);
+ if (object != nullptr) return object;
}
// Free list allocation failed and there is no next page. Fail if we have
@@ -2710,20 +2864,21 @@
heap()->OldGenerationAllocationLimitReached()) {
// If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists.
- HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
- if (object != NULL) return object;
+ HeapObject* object = SweepAndRetryAllocation(size_in_bytes);
+ return object;
}
// Try to expand the space and allocate in the new next page.
if (Expand()) {
- DCHECK(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
+ DCHECK((CountTotalPages() > 1) ||
+ (size_in_bytes <= free_list_.Available()));
return free_list_.Allocate(size_in_bytes);
}
// If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists. Allocation may still fail their which
// would indicate that there is not enough memory for the given allocation.
- return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
+ return SweepAndRetryAllocation(size_in_bytes);
}
@@ -2876,25 +3031,10 @@
// -----------------------------------------------------------------------------
// MapSpace implementation
-// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
-// there is at least one non-inlined virtual function. I would prefer to hide
-// the VerifyObject definition behind VERIFY_HEAP.
+#ifdef VERIFY_HEAP
void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
-
-
-// -----------------------------------------------------------------------------
-// CellSpace and PropertyCellSpace implementation
-// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
-// there is at least one non-inlined virtual function. I would prefer to hide
-// the VerifyObject definition behind VERIFY_HEAP.
-
-void CellSpace::VerifyObject(HeapObject* object) { CHECK(object->IsCell()); }
-
-
-void PropertyCellSpace::VerifyObject(HeapObject* object) {
- CHECK(object->IsPropertyCell());
-}
+#endif
// -----------------------------------------------------------------------------
@@ -2902,14 +3042,6 @@
LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
current_ = space->first_page_;
- size_func_ = NULL;
-}
-
-
-LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
- HeapObjectCallback size_func) {
- current_ = space->first_page_;
- size_func_ = size_func;
}
@@ -2924,24 +3056,23 @@
// -----------------------------------------------------------------------------
// LargeObjectSpace
-static bool ComparePointers(void* key1, void* key2) { return key1 == key2; }
-LargeObjectSpace::LargeObjectSpace(Heap* heap, intptr_t max_capacity,
- AllocationSpace id)
+LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
: Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
- max_capacity_(max_capacity),
first_page_(NULL),
size_(0),
page_count_(0),
objects_size_(0),
- chunk_map_(ComparePointers, 1024) {}
+ chunk_map_(HashMap::PointersMatch, 1024) {}
+
+
+LargeObjectSpace::~LargeObjectSpace() {}
bool LargeObjectSpace::SetUp() {
first_page_ = NULL;
size_ = 0;
- maximum_committed_ = 0;
page_count_ = 0;
objects_size_ = 0;
chunk_map_.Clear();
@@ -2968,35 +3099,29 @@
Executability executable) {
// Check if we want to force a GC before growing the old space further.
// If so, fail the allocation.
- if (!heap()->always_allocate() &&
- heap()->OldGenerationAllocationLimitReached()) {
+ if (!heap()->CanExpandOldGeneration(object_size)) {
return AllocationResult::Retry(identity());
}
- if (!CanAllocateSize(object_size)) return AllocationResult::Retry(identity());
-
LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage(
object_size, this, executable);
if (page == NULL) return AllocationResult::Retry(identity());
DCHECK(page->area_size() >= object_size);
size_ += static_cast<int>(page->size());
+ AccountCommitted(static_cast<intptr_t>(page->size()));
objects_size_ += object_size;
page_count_++;
page->set_next_page(first_page_);
first_page_ = page;
- if (size_ > maximum_committed_) {
- maximum_committed_ = size_;
- }
-
// Register all MemoryChunk::kAlignment-aligned chunks covered by
// this large page in the chunk map.
uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
for (uintptr_t key = base; key <= limit; key++) {
- HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
- static_cast<uint32_t>(key), true);
+ HashMap::Entry* entry = chunk_map_.LookupOrInsert(
+ reinterpret_cast<void*>(key), static_cast<uint32_t>(key));
DCHECK(entry != NULL);
entry->value = page;
}
@@ -3043,7 +3168,7 @@
LargePage* LargeObjectSpace::FindPage(Address a) {
uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
- static_cast<uint32_t>(key), false);
+ static_cast<uint32_t>(key));
if (e != NULL) {
DCHECK(e->value != NULL);
LargePage* page = reinterpret_cast<LargePage*>(e->value);
@@ -3056,19 +3181,28 @@
}
+void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
+ LargePage* current = first_page_;
+ while (current != NULL) {
+ HeapObject* object = current->GetObject();
+ MarkBit mark_bit = Marking::MarkBitFrom(object);
+ DCHECK(Marking::IsBlack(mark_bit));
+ Marking::BlackToWhite(mark_bit);
+ Page::FromAddress(object->address())->ResetProgressBar();
+ Page::FromAddress(object->address())->ResetLiveBytes();
+ current = current->next_page();
+ }
+}
+
+
void LargeObjectSpace::FreeUnmarkedObjects() {
LargePage* previous = NULL;
LargePage* current = first_page_;
while (current != NULL) {
HeapObject* object = current->GetObject();
- // Can this large page contain pointers to non-trivial objects. No other
- // pointer object is this big.
- bool is_pointer_object = object->IsFixedArray();
MarkBit mark_bit = Marking::MarkBitFrom(object);
- if (mark_bit.Get()) {
- mark_bit.Clear();
- Page::FromAddress(object->address())->ResetProgressBar();
- Page::FromAddress(object->address())->ResetLiveBytes();
+ DCHECK(!Marking::IsGrey(mark_bit));
+ if (Marking::IsBlack(mark_bit)) {
previous = current;
current = current->next_page();
} else {
@@ -3085,6 +3219,7 @@
heap()->mark_compact_collector()->ReportDeleteIfNeeded(object,
heap()->isolate());
size_ -= static_cast<int>(page->size());
+ AccountUncommitted(static_cast<intptr_t>(page->size()));
objects_size_ -= object->Size();
page_count_--;
@@ -3099,14 +3234,9 @@
static_cast<uint32_t>(key));
}
- if (is_pointer_object) {
- heap()->QueueMemoryChunkForFree(page);
- } else {
- heap()->isolate()->memory_allocator()->Free(page);
- }
+ heap()->QueueMemoryChunkForFree(page);
}
}
- heap()->FreeQueuedChunks();
}
@@ -3122,6 +3252,11 @@
}
+bool LargeObjectSpace::Contains(Address address) {
+ return FindPage(address) != NULL;
+}
+
+
#ifdef VERIFY_HEAP
// We do not assume that the large object iterator works, because it depends
// on the invariants we are checking during verification.
@@ -3146,8 +3281,7 @@
// large object space.
CHECK(object->IsCode() || object->IsSeqString() ||
object->IsExternalString() || object->IsFixedArray() ||
- object->IsFixedDoubleArray() || object->IsByteArray() ||
- object->IsConstantPoolArray());
+ object->IsFixedDoubleArray() || object->IsByteArray());
// The object itself should look OK.
object->ObjectVerify();
@@ -3217,14 +3351,14 @@
PrintF("Page@%p in %s\n", this->address(),
AllocationSpaceName(this->owner()->identity()));
printf(" --------------------------------------\n");
- HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
+ HeapObjectIterator objects(this);
unsigned mark_size = 0;
for (HeapObject* object = objects.Next(); object != NULL;
object = objects.Next()) {
- bool is_marked = Marking::MarkBitFrom(object).Get();
+ bool is_marked = Marking::IsBlackOrGrey(Marking::MarkBitFrom(object));
PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little.
if (is_marked) {
- mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
+ mark_size += object->Size();
}
object->ShortPrint();
PrintF("\n");
@@ -3234,5 +3368,5 @@
}
#endif // DEBUG
-}
-} // namespace v8::internal
+} // namespace internal
+} // namespace v8