Revert "Revert "Upgrade to 5.0.71.48"" DO NOT MERGE
This reverts commit f2e3994fa5148cc3d9946666f0b0596290192b0e,
and updates the x64 makefile properly so it doesn't break that
build.
FPIIM-449
Change-Id: Ib83e35bfbae6af627451c926a9650ec57c045605
(cherry picked from commit 109988c7ccb6f3fd1a58574fa3dfb88beaef6632)
diff --git a/test/cctest/heap/test-spaces.cc b/test/cctest/heap/test-spaces.cc
index 2fe099d..41345bc 100644
--- a/test/cctest/heap/test-spaces.cc
+++ b/test/cctest/heap/test-spaces.cc
@@ -448,236 +448,6 @@
}
-TEST(CompactionSpaceUsingExternalMemory) {
- const int kObjectSize = 512;
-
- Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
- MemoryAllocator* allocator = new MemoryAllocator(isolate);
- CHECK(allocator != nullptr);
- CHECK(allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
- TestMemoryAllocatorScope test_scope(isolate, allocator);
-
- CompactionSpaceCollection* collection = new CompactionSpaceCollection(heap);
- CompactionSpace* compaction_space = collection->Get(OLD_SPACE);
- CHECK(compaction_space != NULL);
- CHECK(compaction_space->SetUp());
-
- OldSpace* old_space = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
- CHECK(old_space != NULL);
- CHECK(old_space->SetUp());
-
- // The linear allocation area already counts as used bytes, making
- // exact testing impossible.
- heap->DisableInlineAllocation();
-
- // Test:
- // * Allocate a backing store in old_space.
- // * Compute the number num_rest_objects of kObjectSize objects that fit into
- // of available memory.
- // kNumRestObjects.
- // * Add the rest of available memory to the compaction space.
- // * Allocate kNumRestObjects in the compaction space.
- // * Allocate one object more.
- // * Merge the compaction space and compare the expected number of pages.
-
- // Allocate a single object in old_space to initialize a backing page.
- old_space->AllocateRawUnaligned(kObjectSize).ToObjectChecked();
- // Compute the number of objects that fit into the rest in old_space.
- intptr_t rest = static_cast<int>(old_space->Available());
- CHECK_GT(rest, 0);
- intptr_t num_rest_objects = rest / kObjectSize;
- // After allocating num_rest_objects in compaction_space we allocate a bit
- // more.
- const intptr_t kAdditionalCompactionMemory = kObjectSize;
- // We expect a single old_space page.
- const intptr_t kExpectedInitialOldSpacePages = 1;
- // We expect a single additional page in compaction space because we mostly
- // use external memory.
- const intptr_t kExpectedCompactionPages = 1;
- // We expect two pages to be reachable from old_space in the end.
- const intptr_t kExpectedOldSpacePagesAfterMerge = 2;
-
- CHECK_EQ(old_space->CountTotalPages(), kExpectedInitialOldSpacePages);
- CHECK_EQ(compaction_space->CountTotalPages(), 0);
- CHECK_EQ(compaction_space->Capacity(), 0);
- // Make the rest of memory available for compaction.
- old_space->DivideUponCompactionSpaces(&collection, 1, rest);
- CHECK_EQ(compaction_space->CountTotalPages(), 0);
- CHECK_EQ(compaction_space->Capacity(), rest);
- while (num_rest_objects-- > 0) {
- compaction_space->AllocateRawUnaligned(kObjectSize).ToObjectChecked();
- }
- // We only used external memory so far.
- CHECK_EQ(compaction_space->CountTotalPages(), 0);
- // Additional allocation.
- compaction_space->AllocateRawUnaligned(kAdditionalCompactionMemory)
- .ToObjectChecked();
- // Now the compaction space shouldve also acquired a page.
- CHECK_EQ(compaction_space->CountTotalPages(), kExpectedCompactionPages);
-
- old_space->MergeCompactionSpace(compaction_space);
- CHECK_EQ(old_space->CountTotalPages(), kExpectedOldSpacePagesAfterMerge);
-
- delete collection;
- delete old_space;
-
- allocator->TearDown();
- delete allocator;
-}
-
-
-CompactionSpaceCollection** HeapTester::InitializeCompactionSpaces(
- Heap* heap, int num_spaces) {
- CompactionSpaceCollection** spaces =
- new CompactionSpaceCollection*[num_spaces];
- for (int i = 0; i < num_spaces; i++) {
- spaces[i] = new CompactionSpaceCollection(heap);
- }
- return spaces;
-}
-
-
-void HeapTester::DestroyCompactionSpaces(CompactionSpaceCollection** spaces,
- int num_spaces) {
- for (int i = 0; i < num_spaces; i++) {
- delete spaces[i];
- }
- delete[] spaces;
-}
-
-
-void HeapTester::MergeCompactionSpaces(PagedSpace* space,
- CompactionSpaceCollection** spaces,
- int num_spaces) {
- AllocationSpace id = space->identity();
- for (int i = 0; i < num_spaces; i++) {
- space->MergeCompactionSpace(spaces[i]->Get(id));
- CHECK_EQ(spaces[i]->Get(id)->accounting_stats_.Size(), 0);
- CHECK_EQ(spaces[i]->Get(id)->accounting_stats_.Capacity(), 0);
- CHECK_EQ(spaces[i]->Get(id)->Waste(), 0);
- }
-}
-
-
-void HeapTester::AllocateInCompactionSpaces(CompactionSpaceCollection** spaces,
- AllocationSpace id, int num_spaces,
- int num_objects, int object_size) {
- for (int i = 0; i < num_spaces; i++) {
- for (int j = 0; j < num_objects; j++) {
- spaces[i]->Get(id)->AllocateRawUnaligned(object_size).ToObjectChecked();
- }
- spaces[i]->Get(id)->EmptyAllocationInfo();
- CHECK_EQ(spaces[i]->Get(id)->accounting_stats_.Size(),
- num_objects * object_size);
- CHECK_GE(spaces[i]->Get(id)->accounting_stats_.Capacity(),
- spaces[i]->Get(id)->accounting_stats_.Size());
- }
-}
-
-
-void HeapTester::CompactionStats(CompactionSpaceCollection** spaces,
- AllocationSpace id, int num_spaces,
- intptr_t* capacity, intptr_t* size) {
- *capacity = 0;
- *size = 0;
- for (int i = 0; i < num_spaces; i++) {
- *capacity += spaces[i]->Get(id)->accounting_stats_.Capacity();
- *size += spaces[i]->Get(id)->accounting_stats_.Size();
- }
-}
-
-
-void HeapTester::TestCompactionSpaceDivide(int num_additional_objects,
- int object_size,
- int num_compaction_spaces,
- int additional_capacity_in_bytes) {
- Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
- OldSpace* old_space = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
- CHECK(old_space != nullptr);
- CHECK(old_space->SetUp());
- old_space->AllocateRawUnaligned(object_size).ToObjectChecked();
- old_space->EmptyAllocationInfo();
-
- intptr_t rest_capacity = old_space->accounting_stats_.Capacity() -
- old_space->accounting_stats_.Size();
- intptr_t capacity_for_compaction_space =
- rest_capacity / num_compaction_spaces;
- int num_objects_in_compaction_space =
- static_cast<int>(capacity_for_compaction_space) / object_size +
- num_additional_objects;
- CHECK_GT(num_objects_in_compaction_space, 0);
- intptr_t initial_old_space_capacity = old_space->accounting_stats_.Capacity();
-
- CompactionSpaceCollection** spaces =
- InitializeCompactionSpaces(heap, num_compaction_spaces);
- old_space->DivideUponCompactionSpaces(spaces, num_compaction_spaces,
- capacity_for_compaction_space);
-
- intptr_t compaction_capacity = 0;
- intptr_t compaction_size = 0;
- CompactionStats(spaces, OLD_SPACE, num_compaction_spaces,
- &compaction_capacity, &compaction_size);
-
- intptr_t old_space_capacity = old_space->accounting_stats_.Capacity();
- intptr_t old_space_size = old_space->accounting_stats_.Size();
- // Compaction space memory is subtracted from the original space's capacity.
- CHECK_EQ(old_space_capacity,
- initial_old_space_capacity - compaction_capacity);
- CHECK_EQ(compaction_size, 0);
-
- AllocateInCompactionSpaces(spaces, OLD_SPACE, num_compaction_spaces,
- num_objects_in_compaction_space, object_size);
-
- // Old space size and capacity should be the same as after dividing.
- CHECK_EQ(old_space->accounting_stats_.Size(), old_space_size);
- CHECK_EQ(old_space->accounting_stats_.Capacity(), old_space_capacity);
-
- CompactionStats(spaces, OLD_SPACE, num_compaction_spaces,
- &compaction_capacity, &compaction_size);
- MergeCompactionSpaces(old_space, spaces, num_compaction_spaces);
-
- CHECK_EQ(old_space->accounting_stats_.Capacity(),
- old_space_capacity + compaction_capacity);
- CHECK_EQ(old_space->accounting_stats_.Size(),
- old_space_size + compaction_size);
- // We check against the expected end capacity.
- CHECK_EQ(old_space->accounting_stats_.Capacity(),
- initial_old_space_capacity + additional_capacity_in_bytes);
-
- DestroyCompactionSpaces(spaces, num_compaction_spaces);
- delete old_space;
-}
-
-
-HEAP_TEST(CompactionSpaceDivideSinglePage) {
- const int kObjectSize = KB;
- const int kCompactionSpaces = 4;
- // Since the bound for objects is tight and the dividing is best effort, we
- // subtract some objects to make sure we still fit in the initial page.
- // A CHECK makes sure that the overall number of allocated objects stays
- // > 0.
- const int kAdditionalObjects = -10;
- const int kAdditionalCapacityRequired = 0;
- TestCompactionSpaceDivide(kAdditionalObjects, kObjectSize, kCompactionSpaces,
- kAdditionalCapacityRequired);
-}
-
-
-HEAP_TEST(CompactionSpaceDivideMultiplePages) {
- const int kObjectSize = KB;
- const int kCompactionSpaces = 4;
- // Allocate half a page of objects to ensure that we need one more page per
- // compaction space.
- const int kAdditionalObjects = (Page::kPageSize / kObjectSize / 2);
- const int kAdditionalCapacityRequired =
- Page::kAllocatableMemory * kCompactionSpaces;
- TestCompactionSpaceDivide(kAdditionalObjects, kObjectSize, kCompactionSpaces,
- kAdditionalCapacityRequired);
-}
-
-
TEST(LargeObjectSpace) {
v8::V8::Initialize();
@@ -744,50 +514,6 @@
CHECK(isolate->heap()->lo_space()->IsEmpty());
}
-
-UNINITIALIZED_TEST(NewSpaceGrowsToTargetCapacity) {
- FLAG_target_semi_space_size = 2 * (Page::kPageSize / MB);
- if (FLAG_optimize_for_size) return;
-
- v8::Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- v8::Isolate* isolate = v8::Isolate::New(create_params);
- {
- v8::Isolate::Scope isolate_scope(isolate);
- v8::HandleScope handle_scope(isolate);
- v8::Context::New(isolate)->Enter();
-
- Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
-
- NewSpace* new_space = i_isolate->heap()->new_space();
-
- // This test doesn't work if we start with a non-default new space
- // configuration.
- if (new_space->InitialTotalCapacity() == Page::kPageSize) {
- CHECK_EQ(new_space->CommittedMemory(), new_space->InitialTotalCapacity());
-
- // Fill up the first (and only) page of the semi space.
- FillCurrentPage(new_space);
-
- // Try to allocate out of the new space. A new page should be added and
- // the
- // allocation should succeed.
- v8::internal::AllocationResult allocation =
- new_space->AllocateRawUnaligned(80);
- CHECK(!allocation.IsRetry());
- CHECK_EQ(new_space->CommittedMemory(), 2 * Page::kPageSize);
-
- // Turn the allocation into a proper object so isolate teardown won't
- // crash.
- HeapObject* free_space = NULL;
- CHECK(allocation.To(&free_space));
- new_space->heap()->CreateFillerObjectAt(free_space->address(), 80);
- }
- }
- isolate->Dispose();
-}
-
-
static HeapObject* AllocateUnaligned(NewSpace* space, int size) {
AllocationResult allocation = space->AllocateRawUnaligned(size);
CHECK(!allocation.IsRetry());
@@ -797,10 +523,27 @@
return filler;
}
-class Observer : public InlineAllocationObserver {
+static HeapObject* AllocateUnaligned(PagedSpace* space, int size) {
+ AllocationResult allocation = space->AllocateRaw(size, kDoubleUnaligned);
+ CHECK(!allocation.IsRetry());
+ HeapObject* filler = NULL;
+ CHECK(allocation.To(&filler));
+ space->heap()->CreateFillerObjectAt(filler->address(), size);
+ return filler;
+}
+
+static HeapObject* AllocateUnaligned(LargeObjectSpace* space, int size) {
+ AllocationResult allocation = space->AllocateRaw(size, EXECUTABLE);
+ CHECK(!allocation.IsRetry());
+ HeapObject* filler = NULL;
+ CHECK(allocation.To(&filler));
+ return filler;
+}
+
+class Observer : public AllocationObserver {
public:
explicit Observer(intptr_t step_size)
- : InlineAllocationObserver(step_size), count_(0) {}
+ : AllocationObserver(step_size), count_(0) {}
void Step(int bytes_allocated, Address, size_t) override { count_++; }
@@ -810,8 +553,76 @@
int count_;
};
+template <typename T>
+void testAllocationObserver(Isolate* i_isolate, T* space) {
+ Observer observer1(128);
+ space->AddAllocationObserver(&observer1);
-UNINITIALIZED_TEST(InlineAllocationObserver) {
+ // The observer should not get notified if we have only allocated less than
+ // 128 bytes.
+ AllocateUnaligned(space, 64);
+ CHECK_EQ(observer1.count(), 0);
+
+ // The observer should get called when we have allocated exactly 128 bytes.
+ AllocateUnaligned(space, 64);
+ CHECK_EQ(observer1.count(), 1);
+
+ // Another >128 bytes should get another notification.
+ AllocateUnaligned(space, 136);
+ CHECK_EQ(observer1.count(), 2);
+
+ // Allocating a large object should get only one notification.
+ AllocateUnaligned(space, 1024);
+ CHECK_EQ(observer1.count(), 3);
+
+ // Allocating another 2048 bytes in small objects should get 16
+ // notifications.
+ for (int i = 0; i < 64; ++i) {
+ AllocateUnaligned(space, 32);
+ }
+ CHECK_EQ(observer1.count(), 19);
+
+ // Multiple observers should work.
+ Observer observer2(96);
+ space->AddAllocationObserver(&observer2);
+
+ AllocateUnaligned(space, 2048);
+ CHECK_EQ(observer1.count(), 20);
+ CHECK_EQ(observer2.count(), 1);
+
+ AllocateUnaligned(space, 104);
+ CHECK_EQ(observer1.count(), 20);
+ CHECK_EQ(observer2.count(), 2);
+
+ // Callback should stop getting called after an observer is removed.
+ space->RemoveAllocationObserver(&observer1);
+
+ AllocateUnaligned(space, 384);
+ CHECK_EQ(observer1.count(), 20); // no more notifications.
+ CHECK_EQ(observer2.count(), 3); // this one is still active.
+
+ // Ensure that PauseInlineAllocationObserversScope work correctly.
+ AllocateUnaligned(space, 48);
+ CHECK_EQ(observer2.count(), 3);
+ {
+ PauseAllocationObserversScope pause_observers(i_isolate->heap());
+ CHECK_EQ(observer2.count(), 3);
+ AllocateUnaligned(space, 384);
+ CHECK_EQ(observer2.count(), 3);
+ }
+ CHECK_EQ(observer2.count(), 3);
+ // Coupled with the 48 bytes allocated before the pause, another 48 bytes
+ // allocated here should trigger a notification.
+ AllocateUnaligned(space, 48);
+ CHECK_EQ(observer2.count(), 4);
+
+ space->RemoveAllocationObserver(&observer2);
+ AllocateUnaligned(space, 384);
+ CHECK_EQ(observer1.count(), 20);
+ CHECK_EQ(observer2.count(), 4);
+}
+
+UNINITIALIZED_TEST(AllocationObserver) {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
@@ -822,73 +633,13 @@
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
- NewSpace* new_space = i_isolate->heap()->new_space();
-
- Observer observer1(128);
- new_space->AddInlineAllocationObserver(&observer1);
-
- // The observer should not get notified if we have only allocated less than
- // 128 bytes.
- AllocateUnaligned(new_space, 64);
- CHECK_EQ(observer1.count(), 0);
-
- // The observer should get called when we have allocated exactly 128 bytes.
- AllocateUnaligned(new_space, 64);
- CHECK_EQ(observer1.count(), 1);
-
- // Another >128 bytes should get another notification.
- AllocateUnaligned(new_space, 136);
- CHECK_EQ(observer1.count(), 2);
-
- // Allocating a large object should get only one notification.
- AllocateUnaligned(new_space, 1024);
- CHECK_EQ(observer1.count(), 3);
-
- // Allocating another 2048 bytes in small objects should get 16
- // notifications.
- for (int i = 0; i < 64; ++i) {
- AllocateUnaligned(new_space, 32);
- }
- CHECK_EQ(observer1.count(), 19);
-
- // Multiple observers should work.
- Observer observer2(96);
- new_space->AddInlineAllocationObserver(&observer2);
-
- AllocateUnaligned(new_space, 2048);
- CHECK_EQ(observer1.count(), 20);
- CHECK_EQ(observer2.count(), 1);
-
- AllocateUnaligned(new_space, 104);
- CHECK_EQ(observer1.count(), 20);
- CHECK_EQ(observer2.count(), 2);
-
- // Callback should stop getting called after an observer is removed.
- new_space->RemoveInlineAllocationObserver(&observer1);
-
- AllocateUnaligned(new_space, 384);
- CHECK_EQ(observer1.count(), 20); // no more notifications.
- CHECK_EQ(observer2.count(), 3); // this one is still active.
-
- // Ensure that PauseInlineAllocationObserversScope work correctly.
- AllocateUnaligned(new_space, 48);
- CHECK_EQ(observer2.count(), 3);
- {
- PauseInlineAllocationObserversScope pause_observers(new_space);
- CHECK_EQ(observer2.count(), 3);
- AllocateUnaligned(new_space, 384);
- CHECK_EQ(observer2.count(), 3);
- }
- CHECK_EQ(observer2.count(), 3);
- // Coupled with the 48 bytes allocated before the pause, another 48 bytes
- // allocated here should trigger a notification.
- AllocateUnaligned(new_space, 48);
- CHECK_EQ(observer2.count(), 4);
-
- new_space->RemoveInlineAllocationObserver(&observer2);
- AllocateUnaligned(new_space, 384);
- CHECK_EQ(observer1.count(), 20);
- CHECK_EQ(observer2.count(), 4);
+ testAllocationObserver<NewSpace>(i_isolate, i_isolate->heap()->new_space());
+ // Old space is used but the code path is shared for all
+ // classes inheriting from PagedSpace.
+ testAllocationObserver<PagedSpace>(i_isolate,
+ i_isolate->heap()->old_space());
+ testAllocationObserver<LargeObjectSpace>(i_isolate,
+ i_isolate->heap()->lo_space());
}
isolate->Dispose();
}
@@ -908,16 +659,16 @@
NewSpace* new_space = i_isolate->heap()->new_space();
Observer observer1(512);
- new_space->AddInlineAllocationObserver(&observer1);
+ new_space->AddAllocationObserver(&observer1);
Observer observer2(576);
- new_space->AddInlineAllocationObserver(&observer2);
+ new_space->AddAllocationObserver(&observer2);
for (int i = 0; i < 512; ++i) {
AllocateUnaligned(new_space, 32);
}
- new_space->RemoveInlineAllocationObserver(&observer1);
- new_space->RemoveInlineAllocationObserver(&observer2);
+ new_space->RemoveAllocationObserver(&observer1);
+ new_space->RemoveAllocationObserver(&observer2);
CHECK_EQ(observer1.count(), 32);
CHECK_EQ(observer2.count(), 28);