Merge V8 at 3.8.9.11
Bug: 5688872
Change-Id: Ie3b1dd67a730ec5e82686b7b37dba26f6a9bb24f
diff --git a/src/spaces.cc b/src/spaces.cc
index 1ee3359..de738fb 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -132,7 +132,7 @@
}
-bool CodeRange::Setup(const size_t requested) {
+bool CodeRange::SetUp(const size_t requested) {
ASSERT(code_range_ == NULL);
code_range_ = new VirtualMemory(requested);
@@ -270,7 +270,7 @@
}
-bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) {
+bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
capacity_ = RoundUp(capacity, Page::kPageSize);
capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
ASSERT_GE(capacity_, capacity_executable_);
@@ -750,6 +750,17 @@
// -----------------------------------------------------------------------------
+// MemoryChunk implementation
+
+void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
+ MemoryChunk* chunk = MemoryChunk::FromAddress(address);
+ if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
+ static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
+ }
+ chunk->IncrementLiveBytes(by);
+}
+
+// -----------------------------------------------------------------------------
// PagedSpace implementation
PagedSpace::PagedSpace(Heap* heap,
@@ -759,7 +770,8 @@
: Space(heap, id, executable),
free_list_(this),
was_swept_conservatively_(false),
- first_unswept_page_(Page::FromAddress(NULL)) {
+ first_unswept_page_(Page::FromAddress(NULL)),
+ unswept_free_bytes_(0) {
if (id == CODE_SPACE) {
area_size_ = heap->isolate()->memory_allocator()->
CodePageAreaSize();
@@ -777,12 +789,12 @@
}
-bool PagedSpace::Setup() {
+bool PagedSpace::SetUp() {
return true;
}
-bool PagedSpace::HasBeenSetup() {
+bool PagedSpace::HasBeenSetUp() {
return true;
}
@@ -872,6 +884,8 @@
intptr_t size = free_list_.EvictFreeListItems(page);
accounting_stats_.AllocateBytes(size);
ASSERT_EQ(AreaSize(), static_cast<int>(size));
+ } else {
+ DecreaseUnsweptFreeBytes(page);
}
if (Page::FromAllocationTop(allocation_info_.top) == page) {
@@ -981,9 +995,9 @@
// NewSpace implementation
-bool NewSpace::Setup(int reserved_semispace_capacity,
+bool NewSpace::SetUp(int reserved_semispace_capacity,
int maximum_semispace_capacity) {
- // Setup new space based on the preallocated memory block defined by
+ // Set up new space based on the preallocated memory block defined by
// start and size. The provided space is divided into two semi-spaces.
// To support fast containment testing in the new space, the size of
// this chunk must be a power of two and it must be aligned to its size.
@@ -1002,7 +1016,7 @@
ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
ASSERT(IsPowerOf2(maximum_semispace_capacity));
- // Allocate and setup the histogram arrays if necessary.
+ // Allocate and set up the histogram arrays if necessary.
allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
@@ -1016,14 +1030,13 @@
2 * heap()->ReservedSemiSpaceSize());
ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
- if (!to_space_.Setup(chunk_base_,
- initial_semispace_capacity,
- maximum_semispace_capacity)) {
- return false;
- }
- if (!from_space_.Setup(chunk_base_ + reserved_semispace_capacity,
- initial_semispace_capacity,
- maximum_semispace_capacity)) {
+ to_space_.SetUp(chunk_base_,
+ initial_semispace_capacity,
+ maximum_semispace_capacity);
+ from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
+ initial_semispace_capacity,
+ maximum_semispace_capacity);
+ if (!to_space_.Commit()) {
return false;
}
@@ -1256,7 +1269,7 @@
// -----------------------------------------------------------------------------
// SemiSpace implementation
-bool SemiSpace::Setup(Address start,
+void SemiSpace::SetUp(Address start,
int initial_capacity,
int maximum_capacity) {
// Creates a space in the young generation. The constructor does not
@@ -1275,8 +1288,6 @@
object_mask_ = address_mask_ | kHeapObjectTagMask;
object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
age_mark_ = start_;
-
- return Commit();
}
@@ -1326,6 +1337,9 @@
bool SemiSpace::GrowTo(int new_capacity) {
+ if (!is_committed()) {
+ if (!Commit()) return false;
+ }
ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
ASSERT(new_capacity <= maximum_capacity_);
ASSERT(new_capacity > capacity_);
@@ -1364,24 +1378,29 @@
ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
ASSERT(new_capacity >= initial_capacity_);
ASSERT(new_capacity < capacity_);
- // Semispaces grow backwards from the end of their allocated capacity,
- // so we find the before and after start addresses relative to the
- // end of the space.
- Address space_end = start_ + maximum_capacity_;
- Address old_start = space_end - capacity_;
- size_t delta = capacity_ - new_capacity;
- ASSERT(IsAligned(delta, OS::AllocateAlignment()));
- if (!heap()->isolate()->memory_allocator()->UncommitBlock(old_start, delta)) {
- return false;
- }
- capacity_ = new_capacity;
+ if (is_committed()) {
+ // Semispaces grow backwards from the end of their allocated capacity,
+ // so we find the before and after start addresses relative to the
+ // end of the space.
+ Address space_end = start_ + maximum_capacity_;
+ Address old_start = space_end - capacity_;
+ size_t delta = capacity_ - new_capacity;
+ ASSERT(IsAligned(delta, OS::AllocateAlignment()));
- int pages_after = capacity_ / Page::kPageSize;
- NewSpacePage* new_last_page =
- NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
- new_last_page->set_next_page(anchor());
- anchor()->set_prev_page(new_last_page);
- ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
+ MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
+ if (!allocator->UncommitBlock(old_start, delta)) {
+ return false;
+ }
+
+ int pages_after = new_capacity / Page::kPageSize;
+ NewSpacePage* new_last_page =
+ NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
+ new_last_page->set_next_page(anchor());
+ anchor()->set_prev_page(new_last_page);
+ ASSERT((current_page_ <= first_page()) && (current_page_ >= new_last_page));
+ }
+
+ capacity_ = new_capacity;
return true;
}
@@ -1764,14 +1783,14 @@
// field and a next pointer, we give it a filler map that gives it the
// correct size.
if (size_in_bytes > FreeSpace::kHeaderSize) {
- set_map_unsafe(heap->raw_unchecked_free_space_map());
+ set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
// Can't use FreeSpace::cast because it fails during deserialization.
FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
this_as_free_space->set_size(size_in_bytes);
} else if (size_in_bytes == kPointerSize) {
- set_map_unsafe(heap->raw_unchecked_one_pointer_filler_map());
+ set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
} else if (size_in_bytes == 2 * kPointerSize) {
- set_map_unsafe(heap->raw_unchecked_two_pointer_filler_map());
+ set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
} else {
UNREACHABLE();
}
@@ -2165,6 +2184,7 @@
} while (p != anchor());
}
first_unswept_page_ = Page::FromAddress(NULL);
+ unswept_free_bytes_ = 0;
// Clear the free list before a full GC---it will be rebuilt afterward.
free_list_.Reset();
@@ -2213,6 +2233,7 @@
PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
reinterpret_cast<intptr_t>(p));
}
+ DecreaseUnsweptFreeBytes(p);
freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
}
p = next_page;
@@ -2250,6 +2271,16 @@
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Allocation in this space has failed.
+ // If there are unswept pages advance lazy sweeper then sweep one page before
+ // allocating a new page.
+ if (first_unswept_page_->is_valid()) {
+ AdvanceSweeper(size_in_bytes);
+
+ // Retry the free list allocation.
+ HeapObject* object = free_list_.Allocate(size_in_bytes);
+ if (object != NULL) return object;
+ }
+
// Free list allocation failed and there is no next page. Fail if we have
// hit the old generation size limit that should cause a garbage
// collection.
@@ -2258,26 +2289,19 @@
return NULL;
}
- // If there are unswept pages advance lazy sweeper.
- if (first_unswept_page_->is_valid()) {
- AdvanceSweeper(size_in_bytes);
+ // Try to expand the space and allocate in the new next page.
+ if (Expand()) {
+ return free_list_.Allocate(size_in_bytes);
+ }
+
+ // Last ditch, sweep all the remaining pages to try to find space. This may
+ // cause a pause.
+ if (!IsSweepingComplete()) {
+ AdvanceSweeper(kMaxInt);
// Retry the free list allocation.
HeapObject* object = free_list_.Allocate(size_in_bytes);
if (object != NULL) return object;
-
- if (!IsSweepingComplete()) {
- AdvanceSweeper(kMaxInt);
-
- // Retry the free list allocation.
- object = free_list_.Allocate(size_in_bytes);
- if (object != NULL) return object;
- }
- }
-
- // Try to expand the space and allocate in the new next page.
- if (Expand()) {
- return free_list_.Allocate(size_in_bytes);
}
// Finally, fail.
@@ -2508,7 +2532,7 @@
objects_size_(0) {}
-bool LargeObjectSpace::Setup() {
+bool LargeObjectSpace::SetUp() {
first_page_ = NULL;
size_ = 0;
page_count_ = 0;
@@ -2528,7 +2552,7 @@
space, kAllocationActionFree, page->size());
heap()->isolate()->memory_allocator()->Free(page);
}
- Setup();
+ SetUp();
}
@@ -2610,7 +2634,7 @@
MarkBit mark_bit = Marking::MarkBitFrom(object);
if (mark_bit.Get()) {
mark_bit.Clear();
- MemoryChunk::IncrementLiveBytes(object->address(), -object->Size());
+ MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
previous = current;
current = current->next_page();
} else {