Upgrade to V8 3.4
Merge 3.4.14.35
Simple merge required updates to makefiles only.
Bug: 568872
Change-Id: I403a38452c547e06fcfa951c12eca12a1bc40978
diff --git a/src/spaces.cc b/src/spaces.cc
index b494d24..0f80496 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -402,7 +402,9 @@
size_t length,
Executability executable) {
#ifdef DEBUG
- ZapBlock(reinterpret_cast<Address>(mem), length);
+ // Do not try to zap the guard page.
+ size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
+ ZapBlock(reinterpret_cast<Address>(mem) + guard_size, length - guard_size);
#endif
if (isolate_->code_range()->contains(static_cast<Address>(mem))) {
isolate_->code_range()->FreeRawMemory(mem, length);
@@ -504,14 +506,28 @@
LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size));
*allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
+
// We may 'lose' a page due to alignment.
ASSERT(*allocated_pages >= kPagesPerChunk - 1);
- if (*allocated_pages == 0) {
- FreeRawMemory(chunk, chunk_size, owner->executable());
+
+ size_t guard_size = (owner->executable() == EXECUTABLE) ? Page::kPageSize : 0;
+
+ // Check that we got at least one page that we can use.
+ if (*allocated_pages <= ((guard_size != 0) ? 1 : 0)) {
+ FreeRawMemory(chunk,
+ chunk_size,
+ owner->executable());
LOG(isolate_, DeleteEvent("PagedChunk", chunk));
return Page::FromAddress(NULL);
}
+ if (guard_size != 0) {
+ OS::Guard(chunk, guard_size);
+ chunk_size -= guard_size;
+ chunk = static_cast<Address>(chunk) + guard_size;
+ --*allocated_pages;
+ }
+
int chunk_id = Pop();
chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
@@ -681,7 +697,8 @@
LOG(isolate_, DeleteEvent("PagedChunk", c.address()));
ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity());
size_t size = c.size();
- FreeRawMemory(c.address(), size, c.executable());
+ size_t guard_size = (c.executable() == EXECUTABLE) ? Page::kPageSize : 0;
+ FreeRawMemory(c.address() - guard_size, size + guard_size, c.executable());
PerformAllocationCallback(space, kAllocationActionFree, size);
}
c.init(NULL, 0, NULL);
@@ -868,30 +885,6 @@
}
-#ifdef ENABLE_HEAP_PROTECTION
-
-void PagedSpace::Protect() {
- Page* page = first_page_;
- while (page->is_valid()) {
- Isolate::Current()->memory_allocator()->ProtectChunkFromPage(page);
- page = Isolate::Current()->memory_allocator()->
- FindLastPageInSameChunk(page)->next_page();
- }
-}
-
-
-void PagedSpace::Unprotect() {
- Page* page = first_page_;
- while (page->is_valid()) {
- Isolate::Current()->memory_allocator()->UnprotectChunkFromPage(page);
- page = Isolate::Current()->memory_allocator()->
- FindLastPageInSameChunk(page)->next_page();
- }
-}
-
-#endif
-
-
void PagedSpace::MarkAllPagesClean() {
PageIterator it(this, PageIterator::ALL_PAGES);
while (it.has_next()) {
@@ -1196,7 +1189,6 @@
ASSERT(IsPowerOf2(maximum_semispace_capacity));
// Allocate and setup the histogram arrays if necessary.
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
@@ -1204,7 +1196,6 @@
promoted_histogram_[name].set_name(#name);
INSTANCE_TYPE_LIST(SET_NAME)
#undef SET_NAME
-#endif
ASSERT(size == 2 * heap()->ReservedSemiSpaceSize());
ASSERT(IsAddressAligned(start, size, 0));
@@ -1236,7 +1227,6 @@
void NewSpace::TearDown() {
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
if (allocated_histogram_) {
DeleteArray(allocated_histogram_);
allocated_histogram_ = NULL;
@@ -1245,7 +1235,6 @@
DeleteArray(promoted_histogram_);
promoted_histogram_ = NULL;
}
-#endif
start_ = NULL;
allocation_info_.top = NULL;
@@ -1258,24 +1247,6 @@
}
-#ifdef ENABLE_HEAP_PROTECTION
-
-void NewSpace::Protect() {
- heap()->isolate()->memory_allocator()->Protect(ToSpaceLow(), Capacity());
- heap()->isolate()->memory_allocator()->Protect(FromSpaceLow(), Capacity());
-}
-
-
-void NewSpace::Unprotect() {
- heap()->isolate()->memory_allocator()->Unprotect(ToSpaceLow(), Capacity(),
- to_space_.executable());
- heap()->isolate()->memory_allocator()->Unprotect(FromSpaceLow(), Capacity(),
- from_space_.executable());
-}
-
-#endif
-
-
void NewSpace::Flip() {
SemiSpace tmp = from_space_;
from_space_ = to_space_;
@@ -1638,7 +1609,6 @@
// Support for statistics gathering for --heap-stats and --log-gc.
-#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
void NewSpace::ClearHistograms() {
for (int i = 0; i <= LAST_TYPE; i++) {
allocated_histogram_[i].clear();
@@ -1648,9 +1618,7 @@
// Because the copying collector does not touch garbage objects, we iterate
// the new space before a collection to get a histogram of allocated objects.
-// This only happens (1) when compiled with DEBUG and the --heap-stats flag is
-// set, or when compiled with ENABLE_LOGGING_AND_PROFILING and the --log-gc
-// flag is set.
+// This only happens when --log-gc flag is set.
void NewSpace::CollectStatistics() {
ClearHistograms();
SemiSpaceIterator it(this);
@@ -1659,7 +1627,6 @@
}
-#ifdef ENABLE_LOGGING_AND_PROFILING
static void DoReportStatistics(Isolate* isolate,
HistogramInfo* info, const char* description) {
LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
@@ -1686,7 +1653,6 @@
}
LOG(isolate, HeapSampleEndEvent("NewSpace", description));
}
-#endif // ENABLE_LOGGING_AND_PROFILING
void NewSpace::ReportStatistics() {
@@ -1709,13 +1675,11 @@
}
#endif // DEBUG
-#ifdef ENABLE_LOGGING_AND_PROFILING
if (FLAG_log_gc) {
Isolate* isolate = ISOLATE;
DoReportStatistics(isolate, allocated_histogram_, "allocated");
DoReportStatistics(isolate, promoted_histogram_, "promoted");
}
-#endif // ENABLE_LOGGING_AND_PROFILING
}
@@ -1733,7 +1697,6 @@
promoted_histogram_[type].increment_number(1);
promoted_histogram_[type].increment_bytes(obj->Size());
}
-#endif // defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
// -----------------------------------------------------------------------------
@@ -2726,9 +2689,10 @@
Executability executable) {
size_t requested = ChunkSizeFor(size_in_bytes);
size_t size;
+ size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
Isolate* isolate = Isolate::Current();
void* mem = isolate->memory_allocator()->AllocateRawMemory(
- requested, &size, executable);
+ requested + guard_size, &size, executable);
if (mem == NULL) return NULL;
// The start of the chunk may be overlayed with a page so we have to
@@ -2736,13 +2700,19 @@
ASSERT((size & Page::kPageFlagMask) == 0);
LOG(isolate, NewEvent("LargeObjectChunk", mem, size));
- if (size < requested) {
+ if (size < requested + guard_size) {
isolate->memory_allocator()->FreeRawMemory(
mem, size, executable);
LOG(isolate, DeleteEvent("LargeObjectChunk", mem));
return NULL;
}
+ if (guard_size != 0) {
+ OS::Guard(mem, guard_size);
+ size -= guard_size;
+ mem = static_cast<Address>(mem) + guard_size;
+ }
+
ObjectSpace space = (executable == EXECUTABLE)
? kObjectSpaceCodeSpace
: kObjectSpaceLoSpace;
@@ -2796,9 +2766,11 @@
ObjectSpace space = kObjectSpaceLoSpace;
if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
size_t size = chunk->size();
- heap()->isolate()->memory_allocator()->FreeRawMemory(chunk->address(),
- size,
- executable);
+ size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
+ heap()->isolate()->memory_allocator()->FreeRawMemory(
+ chunk->address() - guard_size,
+ size + guard_size,
+ executable);
heap()->isolate()->memory_allocator()->PerformAllocationCallback(
space, kAllocationActionFree, size);
}
@@ -2809,31 +2781,6 @@
}
-#ifdef ENABLE_HEAP_PROTECTION
-
-void LargeObjectSpace::Protect() {
- LargeObjectChunk* chunk = first_chunk_;
- while (chunk != NULL) {
- heap()->isolate()->memory_allocator()->Protect(chunk->address(),
- chunk->size());
- chunk = chunk->next();
- }
-}
-
-
-void LargeObjectSpace::Unprotect() {
- LargeObjectChunk* chunk = first_chunk_;
- while (chunk != NULL) {
- bool is_code = chunk->GetObject()->IsCode();
- heap()->isolate()->memory_allocator()->Unprotect(chunk->address(),
- chunk->size(), is_code ? EXECUTABLE : NOT_EXECUTABLE);
- chunk = chunk->next();
- }
-}
-
-#endif
-
-
MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size,
int object_size,
Executability executable) {
@@ -3020,10 +2967,15 @@
objects_size_ -= object->Size();
page_count_--;
ObjectSpace space = kObjectSpaceLoSpace;
- if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
- heap()->isolate()->memory_allocator()->FreeRawMemory(chunk_address,
- chunk_size,
- executable);
+ size_t guard_size = 0;
+ if (executable == EXECUTABLE) {
+ space = kObjectSpaceCodeSpace;
+ guard_size = Page::kPageSize;
+ }
+ heap()->isolate()->memory_allocator()->FreeRawMemory(
+ chunk_address - guard_size,
+ chunk_size + guard_size,
+ executable);
heap()->isolate()->memory_allocator()->PerformAllocationCallback(
space, kAllocationActionFree, size_);
LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk_address));
@@ -3070,7 +3022,7 @@
// strings), fixed arrays, and byte arrays in large object space.
ASSERT(object->IsCode() || object->IsSeqString() ||
object->IsExternalString() || object->IsFixedArray() ||
- object->IsByteArray());
+ object->IsFixedDoubleArray() || object->IsByteArray());
// The object itself should look OK.
object->Verify();