Revert "Revert "Upgrade to 5.0.71.48"" DO NOT MERGE
This reverts commit f2e3994fa5148cc3d9946666f0b0596290192b0e,
and updates the x64 makefile properly so it doesn't break that
build.
FPIIM-449
Change-Id: Ib83e35bfbae6af627451c926a9650ec57c045605
(cherry picked from commit 109988c7ccb6f3fd1a58574fa3dfb88beaef6632)
diff --git a/test/cctest/heap/heap-tester.h b/test/cctest/heap/heap-tester.h
index 0a0860b..5d098f5 100644
--- a/test/cctest/heap/heap-tester.h
+++ b/test/cctest/heap/heap-tester.h
@@ -28,9 +28,9 @@
V(StressHandles) \
V(TestMemoryReducerSampleJsCalls) \
V(TestSizeOfObjects) \
+ V(Regress587004) \
V(WriteBarriersInCopyJSObject)
-
#define HEAP_TEST(Name) \
CcTest register_test_##Name(v8::internal::HeapTester::Test##Name, __FILE__, \
#Name, NULL, true, true); \
@@ -59,25 +59,6 @@
/* test-api.cc */
static void ResetWeakHandle(bool global_gc);
-
- /* test-spaces.cc */
- static CompactionSpaceCollection** InitializeCompactionSpaces(Heap* heap,
- int num_spaces);
- static void DestroyCompactionSpaces(CompactionSpaceCollection** spaces,
- int num_spaces);
- static void MergeCompactionSpaces(PagedSpace* space,
- CompactionSpaceCollection** spaces,
- int num_spaces);
- static void AllocateInCompactionSpaces(CompactionSpaceCollection** spaces,
- AllocationSpace id, int num_spaces,
- int num_objects, int object_size);
- static void CompactionStats(CompactionSpaceCollection** spaces,
- AllocationSpace id, int num_spaces,
- intptr_t* capacity, intptr_t* size);
- static void TestCompactionSpaceDivide(int num_additional_objects,
- int object_size,
- int num_compaction_spaces,
- int additional_capacity_in_bytes);
};
} // namespace internal
diff --git a/test/cctest/heap/test-heap.cc b/test/cctest/heap/test-heap.cc
index 726887a..88aee8a 100644
--- a/test/cctest/heap/test-heap.cc
+++ b/test/cctest/heap/test-heap.cc
@@ -33,6 +33,7 @@
#include "src/deoptimizer.h"
#include "src/execution.h"
#include "src/factory.h"
+#include "src/field-type.h"
#include "src/global-handles.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/memory-reducer.h"
@@ -1515,6 +1516,50 @@
CHECK(function->is_compiled() || !function->IsOptimized());
}
+TEST(TestUseOfIncrementalBarrierOnCompileLazy) {
+ // Turn off always_opt because it interferes with running the built-in for
+ // the last call to g().
+ i::FLAG_always_opt = false;
+ i::FLAG_allow_natives_syntax = true;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ Heap* heap = isolate->heap();
+ v8::HandleScope scope(CcTest::isolate());
+
+ CompileRun(
+ "function make_closure(x) {"
+ " return function() { return x + 3 };"
+ "}"
+ "var f = make_closure(5); f();"
+ "var g = make_closure(5);");
+
+ // Check f is compiled.
+ Handle<String> f_name = factory->InternalizeUtf8String("f");
+ Handle<Object> f_value =
+ Object::GetProperty(isolate->global_object(), f_name).ToHandleChecked();
+ Handle<JSFunction> f_function = Handle<JSFunction>::cast(f_value);
+ CHECK(f_function->is_compiled());
+
+ // Check g is not compiled.
+ Handle<String> g_name = factory->InternalizeUtf8String("g");
+ Handle<Object> g_value =
+ Object::GetProperty(isolate->global_object(), g_name).ToHandleChecked();
+ Handle<JSFunction> g_function = Handle<JSFunction>::cast(g_value);
+ // TODO(mvstanton): change to check that g is *not* compiled when optimized
+ // cache
+ // map lookup moves to the compile lazy builtin.
+ CHECK(g_function->is_compiled());
+
+ SimulateIncrementalMarking(heap);
+ CompileRun("%OptimizeFunctionOnNextCall(f); f();");
+
+ // g should now have available an optimized function, unmarked by gc. The
+ // CompileLazy built-in will discover it and install it in the closure, and
+ // the incremental write barrier should be used.
+ CompileRun("g();");
+ CHECK(g_function->is_compiled());
+}
TEST(CompilationCacheCachingBehavior) {
// If we do not flush code, or have the compilation cache turned off, this
@@ -3514,6 +3559,13 @@
// The optimizer can allocate stuff, messing up the test.
i::FLAG_crankshaft = false;
i::FLAG_always_opt = false;
+ // Parallel compaction increases fragmentation, depending on how existing
+ // memory is distributed. Since this is non-deterministic because of
+ // concurrent sweeping, we disable it for this test.
+ i::FLAG_parallel_compaction = false;
+ // Concurrent sweeping adds non determinism, depending on when memory is
+ // available for further reuse.
+ i::FLAG_concurrent_sweeping = false;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
@@ -4163,9 +4215,6 @@
CHECK(shared1->code()->gc_metadata() != NULL);
// Optimize function and make sure the unoptimized code is replaced.
-#ifdef DEBUG
- FLAG_stop_at = "f";
-#endif
CompileRun("%OptimizeFunctionOnNextCall(g);"
"g(false);");
@@ -5555,8 +5604,8 @@
Handle<FixedArray> o1 = isolate->factory()->NewFixedArray(kFixedArrayLen);
Handle<FixedArray> o2 = isolate->factory()->NewFixedArray(kFixedArrayLen);
- CHECK(heap->InNewSpace(o1->address()));
- CHECK(heap->InNewSpace(o2->address()));
+ CHECK(heap->InNewSpace(*o1));
+ CHECK(heap->InNewSpace(*o2));
HeapIterator it(heap, i::HeapIterator::kFilterUnreachable);
@@ -5571,33 +5620,6 @@
}
-TEST(ArrayShiftSweeping) {
- i::FLAG_expose_gc = true;
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
- Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
-
- v8::Local<v8::Value> result = CompileRun(
- "var array = new Array(400);"
- "var tmp = new Array(1000);"
- "array[0] = 10;"
- "gc();"
- "gc();"
- "array.shift();"
- "array;");
-
- Handle<JSObject> o = Handle<JSObject>::cast(
- v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(result)));
- CHECK(heap->InOldSpace(o->elements()));
- CHECK(heap->InOldSpace(*o));
- Page* page = Page::FromAddress(o->elements()->address());
- CHECK(page->parallel_sweeping_state().Value() <=
- MemoryChunk::kSweepingFinalize ||
- Marking::IsBlack(Marking::MarkBitFrom(o->elements())));
-}
-
-
UNINITIALIZED_TEST(PromotionQueue) {
i::FLAG_expose_gc = true;
i::FLAG_max_semi_space_size = 2 * (Page::kPageSize / MB);
@@ -5681,10 +5703,12 @@
Heap* heap = isolate->heap();
Handle<Map> map1 = Map::Create(isolate, 1);
+ Handle<String> name = factory->NewStringFromStaticChars("foo");
+ name = factory->InternalizeString(name);
Handle<Map> map2 =
- Map::CopyWithField(map1, factory->NewStringFromStaticChars("foo"),
- HeapType::Any(isolate), NONE, Representation::Tagged(),
- OMIT_TRANSITION).ToHandleChecked();
+ Map::CopyWithField(map1, name, FieldType::Any(isolate), NONE,
+ Representation::Tagged(), OMIT_TRANSITION)
+ .ToHandleChecked();
int desired_offset = Page::kPageSize - map1->instance_size();
@@ -6232,7 +6256,6 @@
const char* flag = "--turbo-filter=*";
FlagList::SetFlagsFromString(flag, StrLength(flag));
FLAG_always_opt = true;
- FLAG_turbo_try_finally = true;
CompileRun(test);
}
@@ -6470,6 +6493,43 @@
CheckDoubleEquals(2, calls_per_ms);
}
+HEAP_TEST(Regress587004) {
+ FLAG_concurrent_sweeping = false;
+#ifdef VERIFY_HEAP
+ FLAG_verify_heap = false;
+#endif
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ Heap* heap = CcTest::heap();
+ Isolate* isolate = CcTest::i_isolate();
+ Factory* factory = isolate->factory();
+ const int N = (Page::kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) /
+ kPointerSize;
+ Handle<FixedArray> array = factory->NewFixedArray(N, TENURED);
+ CHECK(heap->old_space()->Contains(*array));
+ Handle<Object> number = factory->NewHeapNumber(1.0);
+ CHECK(heap->InNewSpace(*number));
+ for (int i = 0; i < N; i++) {
+ array->set(i, *number);
+ }
+ heap->CollectGarbage(OLD_SPACE);
+ SimulateFullSpace(heap->old_space());
+ heap->RightTrimFixedArray<Heap::CONCURRENT_TO_SWEEPER>(*array, N - 1);
+ heap->mark_compact_collector()->EnsureSweepingCompleted();
+ ByteArray* byte_array;
+ const int M = 256;
+ // Don't allow old space expansion. The test works without this flag too,
+ // but becomes very slow.
+ heap->set_force_oom(true);
+ while (heap->AllocateByteArray(M, TENURED).To(&byte_array)) {
+ for (int j = 0; j < M; j++) {
+ byte_array->set(j, 0x31);
+ }
+ }
+ // Re-enable old space expansion to avoid OOM crash.
+ heap->set_force_oom(false);
+ heap->CollectGarbage(NEW_SPACE);
+}
} // namespace internal
} // namespace v8
diff --git a/test/cctest/heap/test-spaces.cc b/test/cctest/heap/test-spaces.cc
index 2fe099d..41345bc 100644
--- a/test/cctest/heap/test-spaces.cc
+++ b/test/cctest/heap/test-spaces.cc
@@ -448,236 +448,6 @@
}
-TEST(CompactionSpaceUsingExternalMemory) {
- const int kObjectSize = 512;
-
- Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
- MemoryAllocator* allocator = new MemoryAllocator(isolate);
- CHECK(allocator != nullptr);
- CHECK(allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
- TestMemoryAllocatorScope test_scope(isolate, allocator);
-
- CompactionSpaceCollection* collection = new CompactionSpaceCollection(heap);
- CompactionSpace* compaction_space = collection->Get(OLD_SPACE);
- CHECK(compaction_space != NULL);
- CHECK(compaction_space->SetUp());
-
- OldSpace* old_space = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
- CHECK(old_space != NULL);
- CHECK(old_space->SetUp());
-
- // The linear allocation area already counts as used bytes, making
- // exact testing impossible.
- heap->DisableInlineAllocation();
-
- // Test:
- // * Allocate a backing store in old_space.
- // * Compute the number num_rest_objects of kObjectSize objects that fit into
- // of available memory.
- // kNumRestObjects.
- // * Add the rest of available memory to the compaction space.
- // * Allocate kNumRestObjects in the compaction space.
- // * Allocate one object more.
- // * Merge the compaction space and compare the expected number of pages.
-
- // Allocate a single object in old_space to initialize a backing page.
- old_space->AllocateRawUnaligned(kObjectSize).ToObjectChecked();
- // Compute the number of objects that fit into the rest in old_space.
- intptr_t rest = static_cast<int>(old_space->Available());
- CHECK_GT(rest, 0);
- intptr_t num_rest_objects = rest / kObjectSize;
- // After allocating num_rest_objects in compaction_space we allocate a bit
- // more.
- const intptr_t kAdditionalCompactionMemory = kObjectSize;
- // We expect a single old_space page.
- const intptr_t kExpectedInitialOldSpacePages = 1;
- // We expect a single additional page in compaction space because we mostly
- // use external memory.
- const intptr_t kExpectedCompactionPages = 1;
- // We expect two pages to be reachable from old_space in the end.
- const intptr_t kExpectedOldSpacePagesAfterMerge = 2;
-
- CHECK_EQ(old_space->CountTotalPages(), kExpectedInitialOldSpacePages);
- CHECK_EQ(compaction_space->CountTotalPages(), 0);
- CHECK_EQ(compaction_space->Capacity(), 0);
- // Make the rest of memory available for compaction.
- old_space->DivideUponCompactionSpaces(&collection, 1, rest);
- CHECK_EQ(compaction_space->CountTotalPages(), 0);
- CHECK_EQ(compaction_space->Capacity(), rest);
- while (num_rest_objects-- > 0) {
- compaction_space->AllocateRawUnaligned(kObjectSize).ToObjectChecked();
- }
- // We only used external memory so far.
- CHECK_EQ(compaction_space->CountTotalPages(), 0);
- // Additional allocation.
- compaction_space->AllocateRawUnaligned(kAdditionalCompactionMemory)
- .ToObjectChecked();
- // Now the compaction space shouldve also acquired a page.
- CHECK_EQ(compaction_space->CountTotalPages(), kExpectedCompactionPages);
-
- old_space->MergeCompactionSpace(compaction_space);
- CHECK_EQ(old_space->CountTotalPages(), kExpectedOldSpacePagesAfterMerge);
-
- delete collection;
- delete old_space;
-
- allocator->TearDown();
- delete allocator;
-}
-
-
-CompactionSpaceCollection** HeapTester::InitializeCompactionSpaces(
- Heap* heap, int num_spaces) {
- CompactionSpaceCollection** spaces =
- new CompactionSpaceCollection*[num_spaces];
- for (int i = 0; i < num_spaces; i++) {
- spaces[i] = new CompactionSpaceCollection(heap);
- }
- return spaces;
-}
-
-
-void HeapTester::DestroyCompactionSpaces(CompactionSpaceCollection** spaces,
- int num_spaces) {
- for (int i = 0; i < num_spaces; i++) {
- delete spaces[i];
- }
- delete[] spaces;
-}
-
-
-void HeapTester::MergeCompactionSpaces(PagedSpace* space,
- CompactionSpaceCollection** spaces,
- int num_spaces) {
- AllocationSpace id = space->identity();
- for (int i = 0; i < num_spaces; i++) {
- space->MergeCompactionSpace(spaces[i]->Get(id));
- CHECK_EQ(spaces[i]->Get(id)->accounting_stats_.Size(), 0);
- CHECK_EQ(spaces[i]->Get(id)->accounting_stats_.Capacity(), 0);
- CHECK_EQ(spaces[i]->Get(id)->Waste(), 0);
- }
-}
-
-
-void HeapTester::AllocateInCompactionSpaces(CompactionSpaceCollection** spaces,
- AllocationSpace id, int num_spaces,
- int num_objects, int object_size) {
- for (int i = 0; i < num_spaces; i++) {
- for (int j = 0; j < num_objects; j++) {
- spaces[i]->Get(id)->AllocateRawUnaligned(object_size).ToObjectChecked();
- }
- spaces[i]->Get(id)->EmptyAllocationInfo();
- CHECK_EQ(spaces[i]->Get(id)->accounting_stats_.Size(),
- num_objects * object_size);
- CHECK_GE(spaces[i]->Get(id)->accounting_stats_.Capacity(),
- spaces[i]->Get(id)->accounting_stats_.Size());
- }
-}
-
-
-void HeapTester::CompactionStats(CompactionSpaceCollection** spaces,
- AllocationSpace id, int num_spaces,
- intptr_t* capacity, intptr_t* size) {
- *capacity = 0;
- *size = 0;
- for (int i = 0; i < num_spaces; i++) {
- *capacity += spaces[i]->Get(id)->accounting_stats_.Capacity();
- *size += spaces[i]->Get(id)->accounting_stats_.Size();
- }
-}
-
-
-void HeapTester::TestCompactionSpaceDivide(int num_additional_objects,
- int object_size,
- int num_compaction_spaces,
- int additional_capacity_in_bytes) {
- Isolate* isolate = CcTest::i_isolate();
- Heap* heap = isolate->heap();
- OldSpace* old_space = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
- CHECK(old_space != nullptr);
- CHECK(old_space->SetUp());
- old_space->AllocateRawUnaligned(object_size).ToObjectChecked();
- old_space->EmptyAllocationInfo();
-
- intptr_t rest_capacity = old_space->accounting_stats_.Capacity() -
- old_space->accounting_stats_.Size();
- intptr_t capacity_for_compaction_space =
- rest_capacity / num_compaction_spaces;
- int num_objects_in_compaction_space =
- static_cast<int>(capacity_for_compaction_space) / object_size +
- num_additional_objects;
- CHECK_GT(num_objects_in_compaction_space, 0);
- intptr_t initial_old_space_capacity = old_space->accounting_stats_.Capacity();
-
- CompactionSpaceCollection** spaces =
- InitializeCompactionSpaces(heap, num_compaction_spaces);
- old_space->DivideUponCompactionSpaces(spaces, num_compaction_spaces,
- capacity_for_compaction_space);
-
- intptr_t compaction_capacity = 0;
- intptr_t compaction_size = 0;
- CompactionStats(spaces, OLD_SPACE, num_compaction_spaces,
- &compaction_capacity, &compaction_size);
-
- intptr_t old_space_capacity = old_space->accounting_stats_.Capacity();
- intptr_t old_space_size = old_space->accounting_stats_.Size();
- // Compaction space memory is subtracted from the original space's capacity.
- CHECK_EQ(old_space_capacity,
- initial_old_space_capacity - compaction_capacity);
- CHECK_EQ(compaction_size, 0);
-
- AllocateInCompactionSpaces(spaces, OLD_SPACE, num_compaction_spaces,
- num_objects_in_compaction_space, object_size);
-
- // Old space size and capacity should be the same as after dividing.
- CHECK_EQ(old_space->accounting_stats_.Size(), old_space_size);
- CHECK_EQ(old_space->accounting_stats_.Capacity(), old_space_capacity);
-
- CompactionStats(spaces, OLD_SPACE, num_compaction_spaces,
- &compaction_capacity, &compaction_size);
- MergeCompactionSpaces(old_space, spaces, num_compaction_spaces);
-
- CHECK_EQ(old_space->accounting_stats_.Capacity(),
- old_space_capacity + compaction_capacity);
- CHECK_EQ(old_space->accounting_stats_.Size(),
- old_space_size + compaction_size);
- // We check against the expected end capacity.
- CHECK_EQ(old_space->accounting_stats_.Capacity(),
- initial_old_space_capacity + additional_capacity_in_bytes);
-
- DestroyCompactionSpaces(spaces, num_compaction_spaces);
- delete old_space;
-}
-
-
-HEAP_TEST(CompactionSpaceDivideSinglePage) {
- const int kObjectSize = KB;
- const int kCompactionSpaces = 4;
- // Since the bound for objects is tight and the dividing is best effort, we
- // subtract some objects to make sure we still fit in the initial page.
- // A CHECK makes sure that the overall number of allocated objects stays
- // > 0.
- const int kAdditionalObjects = -10;
- const int kAdditionalCapacityRequired = 0;
- TestCompactionSpaceDivide(kAdditionalObjects, kObjectSize, kCompactionSpaces,
- kAdditionalCapacityRequired);
-}
-
-
-HEAP_TEST(CompactionSpaceDivideMultiplePages) {
- const int kObjectSize = KB;
- const int kCompactionSpaces = 4;
- // Allocate half a page of objects to ensure that we need one more page per
- // compaction space.
- const int kAdditionalObjects = (Page::kPageSize / kObjectSize / 2);
- const int kAdditionalCapacityRequired =
- Page::kAllocatableMemory * kCompactionSpaces;
- TestCompactionSpaceDivide(kAdditionalObjects, kObjectSize, kCompactionSpaces,
- kAdditionalCapacityRequired);
-}
-
-
TEST(LargeObjectSpace) {
v8::V8::Initialize();
@@ -744,50 +514,6 @@
CHECK(isolate->heap()->lo_space()->IsEmpty());
}
-
-UNINITIALIZED_TEST(NewSpaceGrowsToTargetCapacity) {
- FLAG_target_semi_space_size = 2 * (Page::kPageSize / MB);
- if (FLAG_optimize_for_size) return;
-
- v8::Isolate::CreateParams create_params;
- create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
- v8::Isolate* isolate = v8::Isolate::New(create_params);
- {
- v8::Isolate::Scope isolate_scope(isolate);
- v8::HandleScope handle_scope(isolate);
- v8::Context::New(isolate)->Enter();
-
- Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
-
- NewSpace* new_space = i_isolate->heap()->new_space();
-
- // This test doesn't work if we start with a non-default new space
- // configuration.
- if (new_space->InitialTotalCapacity() == Page::kPageSize) {
- CHECK_EQ(new_space->CommittedMemory(), new_space->InitialTotalCapacity());
-
- // Fill up the first (and only) page of the semi space.
- FillCurrentPage(new_space);
-
- // Try to allocate out of the new space. A new page should be added and
- // the
- // allocation should succeed.
- v8::internal::AllocationResult allocation =
- new_space->AllocateRawUnaligned(80);
- CHECK(!allocation.IsRetry());
- CHECK_EQ(new_space->CommittedMemory(), 2 * Page::kPageSize);
-
- // Turn the allocation into a proper object so isolate teardown won't
- // crash.
- HeapObject* free_space = NULL;
- CHECK(allocation.To(&free_space));
- new_space->heap()->CreateFillerObjectAt(free_space->address(), 80);
- }
- }
- isolate->Dispose();
-}
-
-
static HeapObject* AllocateUnaligned(NewSpace* space, int size) {
AllocationResult allocation = space->AllocateRawUnaligned(size);
CHECK(!allocation.IsRetry());
@@ -797,10 +523,27 @@
return filler;
}
-class Observer : public InlineAllocationObserver {
+static HeapObject* AllocateUnaligned(PagedSpace* space, int size) {
+ AllocationResult allocation = space->AllocateRaw(size, kDoubleUnaligned);
+ CHECK(!allocation.IsRetry());
+ HeapObject* filler = NULL;
+ CHECK(allocation.To(&filler));
+ space->heap()->CreateFillerObjectAt(filler->address(), size);
+ return filler;
+}
+
+static HeapObject* AllocateUnaligned(LargeObjectSpace* space, int size) {
+ AllocationResult allocation = space->AllocateRaw(size, EXECUTABLE);
+ CHECK(!allocation.IsRetry());
+ HeapObject* filler = NULL;
+ CHECK(allocation.To(&filler));
+ return filler;
+}
+
+class Observer : public AllocationObserver {
public:
explicit Observer(intptr_t step_size)
- : InlineAllocationObserver(step_size), count_(0) {}
+ : AllocationObserver(step_size), count_(0) {}
void Step(int bytes_allocated, Address, size_t) override { count_++; }
@@ -810,8 +553,76 @@
int count_;
};
+template <typename T>
+void testAllocationObserver(Isolate* i_isolate, T* space) {
+ Observer observer1(128);
+ space->AddAllocationObserver(&observer1);
-UNINITIALIZED_TEST(InlineAllocationObserver) {
+ // The observer should not get notified if we have only allocated less than
+ // 128 bytes.
+ AllocateUnaligned(space, 64);
+ CHECK_EQ(observer1.count(), 0);
+
+ // The observer should get called when we have allocated exactly 128 bytes.
+ AllocateUnaligned(space, 64);
+ CHECK_EQ(observer1.count(), 1);
+
+ // Another >128 bytes should get another notification.
+ AllocateUnaligned(space, 136);
+ CHECK_EQ(observer1.count(), 2);
+
+ // Allocating a large object should get only one notification.
+ AllocateUnaligned(space, 1024);
+ CHECK_EQ(observer1.count(), 3);
+
+ // Allocating another 2048 bytes in small objects should get 16
+ // notifications.
+ for (int i = 0; i < 64; ++i) {
+ AllocateUnaligned(space, 32);
+ }
+ CHECK_EQ(observer1.count(), 19);
+
+ // Multiple observers should work.
+ Observer observer2(96);
+ space->AddAllocationObserver(&observer2);
+
+ AllocateUnaligned(space, 2048);
+ CHECK_EQ(observer1.count(), 20);
+ CHECK_EQ(observer2.count(), 1);
+
+ AllocateUnaligned(space, 104);
+ CHECK_EQ(observer1.count(), 20);
+ CHECK_EQ(observer2.count(), 2);
+
+ // Callback should stop getting called after an observer is removed.
+ space->RemoveAllocationObserver(&observer1);
+
+ AllocateUnaligned(space, 384);
+ CHECK_EQ(observer1.count(), 20); // no more notifications.
+ CHECK_EQ(observer2.count(), 3); // this one is still active.
+
+ // Ensure that PauseInlineAllocationObserversScope work correctly.
+ AllocateUnaligned(space, 48);
+ CHECK_EQ(observer2.count(), 3);
+ {
+ PauseAllocationObserversScope pause_observers(i_isolate->heap());
+ CHECK_EQ(observer2.count(), 3);
+ AllocateUnaligned(space, 384);
+ CHECK_EQ(observer2.count(), 3);
+ }
+ CHECK_EQ(observer2.count(), 3);
+ // Coupled with the 48 bytes allocated before the pause, another 48 bytes
+ // allocated here should trigger a notification.
+ AllocateUnaligned(space, 48);
+ CHECK_EQ(observer2.count(), 4);
+
+ space->RemoveAllocationObserver(&observer2);
+ AllocateUnaligned(space, 384);
+ CHECK_EQ(observer1.count(), 20);
+ CHECK_EQ(observer2.count(), 4);
+}
+
+UNINITIALIZED_TEST(AllocationObserver) {
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
@@ -822,73 +633,13 @@
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
- NewSpace* new_space = i_isolate->heap()->new_space();
-
- Observer observer1(128);
- new_space->AddInlineAllocationObserver(&observer1);
-
- // The observer should not get notified if we have only allocated less than
- // 128 bytes.
- AllocateUnaligned(new_space, 64);
- CHECK_EQ(observer1.count(), 0);
-
- // The observer should get called when we have allocated exactly 128 bytes.
- AllocateUnaligned(new_space, 64);
- CHECK_EQ(observer1.count(), 1);
-
- // Another >128 bytes should get another notification.
- AllocateUnaligned(new_space, 136);
- CHECK_EQ(observer1.count(), 2);
-
- // Allocating a large object should get only one notification.
- AllocateUnaligned(new_space, 1024);
- CHECK_EQ(observer1.count(), 3);
-
- // Allocating another 2048 bytes in small objects should get 16
- // notifications.
- for (int i = 0; i < 64; ++i) {
- AllocateUnaligned(new_space, 32);
- }
- CHECK_EQ(observer1.count(), 19);
-
- // Multiple observers should work.
- Observer observer2(96);
- new_space->AddInlineAllocationObserver(&observer2);
-
- AllocateUnaligned(new_space, 2048);
- CHECK_EQ(observer1.count(), 20);
- CHECK_EQ(observer2.count(), 1);
-
- AllocateUnaligned(new_space, 104);
- CHECK_EQ(observer1.count(), 20);
- CHECK_EQ(observer2.count(), 2);
-
- // Callback should stop getting called after an observer is removed.
- new_space->RemoveInlineAllocationObserver(&observer1);
-
- AllocateUnaligned(new_space, 384);
- CHECK_EQ(observer1.count(), 20); // no more notifications.
- CHECK_EQ(observer2.count(), 3); // this one is still active.
-
- // Ensure that PauseInlineAllocationObserversScope work correctly.
- AllocateUnaligned(new_space, 48);
- CHECK_EQ(observer2.count(), 3);
- {
- PauseInlineAllocationObserversScope pause_observers(new_space);
- CHECK_EQ(observer2.count(), 3);
- AllocateUnaligned(new_space, 384);
- CHECK_EQ(observer2.count(), 3);
- }
- CHECK_EQ(observer2.count(), 3);
- // Coupled with the 48 bytes allocated before the pause, another 48 bytes
- // allocated here should trigger a notification.
- AllocateUnaligned(new_space, 48);
- CHECK_EQ(observer2.count(), 4);
-
- new_space->RemoveInlineAllocationObserver(&observer2);
- AllocateUnaligned(new_space, 384);
- CHECK_EQ(observer1.count(), 20);
- CHECK_EQ(observer2.count(), 4);
+ testAllocationObserver<NewSpace>(i_isolate, i_isolate->heap()->new_space());
+ // Old space is used but the code path is shared for all
+ // classes inheriting from PagedSpace.
+ testAllocationObserver<PagedSpace>(i_isolate,
+ i_isolate->heap()->old_space());
+ testAllocationObserver<LargeObjectSpace>(i_isolate,
+ i_isolate->heap()->lo_space());
}
isolate->Dispose();
}
@@ -908,16 +659,16 @@
NewSpace* new_space = i_isolate->heap()->new_space();
Observer observer1(512);
- new_space->AddInlineAllocationObserver(&observer1);
+ new_space->AddAllocationObserver(&observer1);
Observer observer2(576);
- new_space->AddInlineAllocationObserver(&observer2);
+ new_space->AddAllocationObserver(&observer2);
for (int i = 0; i < 512; ++i) {
AllocateUnaligned(new_space, 32);
}
- new_space->RemoveInlineAllocationObserver(&observer1);
- new_space->RemoveInlineAllocationObserver(&observer2);
+ new_space->RemoveAllocationObserver(&observer1);
+ new_space->RemoveAllocationObserver(&observer2);
CHECK_EQ(observer1.count(), 32);
CHECK_EQ(observer2.count(), 28);