blob: 41345bc7d16310197ba855e62e41e0430a2be77c [file] [log] [blame]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include <stdlib.h>
29
30#include "src/base/platform/platform.h"
31#include "src/snapshot/snapshot.h"
32#include "src/v8.h"
33#include "test/cctest/cctest.h"
34#include "test/cctest/heap/heap-tester.h"
35#include "test/cctest/heap/utils-inl.h"
36
37namespace v8 {
38namespace internal {
39
40#if 0
41static void VerifyRegionMarking(Address page_start) {
42#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
43 Page* p = Page::FromAddress(page_start);
44
45 p->SetRegionMarks(Page::kAllRegionsCleanMarks);
46
47 for (Address addr = p->ObjectAreaStart();
48 addr < p->ObjectAreaEnd();
49 addr += kPointerSize) {
50 CHECK(!Page::FromAddress(addr)->IsRegionDirty(addr));
51 }
52
53 for (Address addr = p->ObjectAreaStart();
54 addr < p->ObjectAreaEnd();
55 addr += kPointerSize) {
56 Page::FromAddress(addr)->MarkRegionDirty(addr);
57 }
58
59 for (Address addr = p->ObjectAreaStart();
60 addr < p->ObjectAreaEnd();
61 addr += kPointerSize) {
62 CHECK(Page::FromAddress(addr)->IsRegionDirty(addr));
63 }
64#endif
65}
66#endif
67
68
69// TODO(gc) you can no longer allocate pages like this. Details are hidden.
70#if 0
71TEST(Page) {
72 byte* mem = NewArray<byte>(2*Page::kPageSize);
73 CHECK(mem != NULL);
74
75 Address start = reinterpret_cast<Address>(mem);
76 Address page_start = RoundUp(start, Page::kPageSize);
77
78 Page* p = Page::FromAddress(page_start);
79 // Initialized Page has heap pointer, normally set by memory_allocator.
80 p->heap_ = CcTest::heap();
81 CHECK(p->address() == page_start);
82 CHECK(p->is_valid());
83
84 p->opaque_header = 0;
85 p->SetIsLargeObjectPage(false);
86 CHECK(!p->next_page()->is_valid());
87
88 CHECK(p->ObjectAreaStart() == page_start + Page::kObjectStartOffset);
89 CHECK(p->ObjectAreaEnd() == page_start + Page::kPageSize);
90
91 CHECK(p->Offset(page_start + Page::kObjectStartOffset) ==
92 Page::kObjectStartOffset);
93 CHECK(p->Offset(page_start + Page::kPageSize) == Page::kPageSize);
94
95 CHECK(p->OffsetToAddress(Page::kObjectStartOffset) == p->ObjectAreaStart());
96 CHECK(p->OffsetToAddress(Page::kPageSize) == p->ObjectAreaEnd());
97
98 // test region marking
99 VerifyRegionMarking(page_start);
100
101 DeleteArray(mem);
102}
103#endif
104
105
106// Temporarily sets a given allocator in an isolate.
107class TestMemoryAllocatorScope {
108 public:
109 TestMemoryAllocatorScope(Isolate* isolate, MemoryAllocator* allocator)
110 : isolate_(isolate),
111 old_allocator_(isolate->memory_allocator_) {
112 isolate->memory_allocator_ = allocator;
113 }
114
115 ~TestMemoryAllocatorScope() {
116 isolate_->memory_allocator_ = old_allocator_;
117 }
118
119 private:
120 Isolate* isolate_;
121 MemoryAllocator* old_allocator_;
122
123 DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope);
124};
125
126
127// Temporarily sets a given code range in an isolate.
128class TestCodeRangeScope {
129 public:
130 TestCodeRangeScope(Isolate* isolate, CodeRange* code_range)
131 : isolate_(isolate),
132 old_code_range_(isolate->code_range_) {
133 isolate->code_range_ = code_range;
134 }
135
136 ~TestCodeRangeScope() {
137 isolate_->code_range_ = old_code_range_;
138 }
139
140 private:
141 Isolate* isolate_;
142 CodeRange* old_code_range_;
143
144 DISALLOW_COPY_AND_ASSIGN(TestCodeRangeScope);
145};
146
147
148static void VerifyMemoryChunk(Isolate* isolate,
149 Heap* heap,
150 CodeRange* code_range,
151 size_t reserve_area_size,
152 size_t commit_area_size,
153 size_t second_commit_area_size,
154 Executability executable) {
155 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
156 CHECK(memory_allocator->SetUp(heap->MaxReserved(),
157 heap->MaxExecutableSize()));
158 TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
159 TestCodeRangeScope test_code_range_scope(isolate, code_range);
160
161 size_t header_size = (executable == EXECUTABLE)
162 ? MemoryAllocator::CodePageGuardStartOffset()
163 : MemoryChunk::kObjectStartOffset;
164 size_t guard_size = (executable == EXECUTABLE)
165 ? MemoryAllocator::CodePageGuardSize()
166 : 0;
167
168 MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(reserve_area_size,
169 commit_area_size,
170 executable,
171 NULL);
172 size_t alignment = code_range != NULL && code_range->valid()
173 ? MemoryChunk::kAlignment
174 : base::OS::CommitPageSize();
175 size_t reserved_size =
176 ((executable == EXECUTABLE))
177 ? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
178 alignment)
179 : RoundUp(header_size + reserve_area_size,
180 base::OS::CommitPageSize());
181 CHECK(memory_chunk->size() == reserved_size);
182 CHECK(memory_chunk->area_start() < memory_chunk->address() +
183 memory_chunk->size());
184 CHECK(memory_chunk->area_end() <= memory_chunk->address() +
185 memory_chunk->size());
186 CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);
187
188 Address area_start = memory_chunk->area_start();
189
190 memory_chunk->CommitArea(second_commit_area_size);
191 CHECK(area_start == memory_chunk->area_start());
192 CHECK(memory_chunk->area_start() < memory_chunk->address() +
193 memory_chunk->size());
194 CHECK(memory_chunk->area_end() <= memory_chunk->address() +
195 memory_chunk->size());
196 CHECK(static_cast<size_t>(memory_chunk->area_size()) ==
197 second_commit_area_size);
198
199 memory_allocator->Free(memory_chunk);
200 memory_allocator->TearDown();
201 delete memory_allocator;
202}
203
204
205TEST(Regress3540) {
206 Isolate* isolate = CcTest::i_isolate();
207 Heap* heap = isolate->heap();
208 const int pageSize = Page::kPageSize;
209 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
210 CHECK(
211 memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
212 TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
213 CodeRange* code_range = new CodeRange(isolate);
214 const size_t code_range_size = 4 * pageSize;
215 if (!code_range->SetUp(
216 code_range_size +
217 RoundUp(v8::base::OS::CommitPageSize() * kReservedCodeRangePages,
218 MemoryChunk::kAlignment) +
219 v8::internal::MemoryAllocator::CodePageAreaSize())) {
220 return;
221 }
222
223 Address address;
224 size_t size;
225 size_t request_size = code_range_size - 2 * pageSize;
226 address = code_range->AllocateRawMemory(
227 request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()),
228 &size);
229 CHECK(address != NULL);
230
231 Address null_address;
232 size_t null_size;
233 request_size = code_range_size - pageSize;
234 null_address = code_range->AllocateRawMemory(
235 request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()),
236 &null_size);
237 CHECK(null_address == NULL);
238
239 code_range->FreeRawMemory(address, size);
240 delete code_range;
241 memory_allocator->TearDown();
242 delete memory_allocator;
243}
244
245
246static unsigned int Pseudorandom() {
247 static uint32_t lo = 2345;
248 lo = 18273 * (lo & 0xFFFFF) + (lo >> 16);
249 return lo & 0xFFFFF;
250}
251
252
253TEST(MemoryChunk) {
254 Isolate* isolate = CcTest::i_isolate();
255 Heap* heap = isolate->heap();
256
257 size_t reserve_area_size = 1 * MB;
258 size_t initial_commit_area_size, second_commit_area_size;
259
260 for (int i = 0; i < 100; i++) {
261 initial_commit_area_size = Pseudorandom();
262 second_commit_area_size = Pseudorandom();
263
264 // With CodeRange.
265 CodeRange* code_range = new CodeRange(isolate);
266 const size_t code_range_size = 32 * MB;
267 if (!code_range->SetUp(code_range_size)) return;
268
269 VerifyMemoryChunk(isolate,
270 heap,
271 code_range,
272 reserve_area_size,
273 initial_commit_area_size,
274 second_commit_area_size,
275 EXECUTABLE);
276
277 VerifyMemoryChunk(isolate,
278 heap,
279 code_range,
280 reserve_area_size,
281 initial_commit_area_size,
282 second_commit_area_size,
283 NOT_EXECUTABLE);
284 delete code_range;
285
286 // Without CodeRange.
287 code_range = NULL;
288 VerifyMemoryChunk(isolate,
289 heap,
290 code_range,
291 reserve_area_size,
292 initial_commit_area_size,
293 second_commit_area_size,
294 EXECUTABLE);
295
296 VerifyMemoryChunk(isolate,
297 heap,
298 code_range,
299 reserve_area_size,
300 initial_commit_area_size,
301 second_commit_area_size,
302 NOT_EXECUTABLE);
303 }
304}
305
306
307TEST(MemoryAllocator) {
308 Isolate* isolate = CcTest::i_isolate();
309 Heap* heap = isolate->heap();
310
311 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
312 CHECK(memory_allocator != nullptr);
313 CHECK(memory_allocator->SetUp(heap->MaxReserved(),
314 heap->MaxExecutableSize()));
315 TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
316
317 {
318 int total_pages = 0;
319 OldSpace faked_space(heap, OLD_SPACE, NOT_EXECUTABLE);
320 Page* first_page = memory_allocator->AllocatePage(
321 faked_space.AreaSize(), &faked_space, NOT_EXECUTABLE);
322
323 first_page->InsertAfter(faked_space.anchor()->prev_page());
324 CHECK(first_page->is_valid());
325 CHECK(first_page->next_page() == faked_space.anchor());
326 total_pages++;
327
328 for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
329 CHECK(p->owner() == &faked_space);
330 }
331
332 // Again, we should get n or n - 1 pages.
333 Page* other = memory_allocator->AllocatePage(faked_space.AreaSize(),
334 &faked_space, NOT_EXECUTABLE);
335 CHECK(other->is_valid());
336 total_pages++;
337 other->InsertAfter(first_page);
338 int page_count = 0;
339 for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
340 CHECK(p->owner() == &faked_space);
341 page_count++;
342 }
343 CHECK(total_pages == page_count);
344
345 Page* second_page = first_page->next_page();
346 CHECK(second_page->is_valid());
347
348 // OldSpace's destructor will tear down the space and free up all pages.
349 }
350 memory_allocator->TearDown();
351 delete memory_allocator;
352}
353
354
355TEST(NewSpace) {
356 Isolate* isolate = CcTest::i_isolate();
357 Heap* heap = isolate->heap();
358 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
359 CHECK(memory_allocator->SetUp(heap->MaxReserved(),
360 heap->MaxExecutableSize()));
361 TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
362
363 NewSpace new_space(heap);
364
365 CHECK(new_space.SetUp(CcTest::heap()->ReservedSemiSpaceSize(),
366 CcTest::heap()->ReservedSemiSpaceSize()));
367 CHECK(new_space.HasBeenSetUp());
368
369 while (new_space.Available() >= Page::kMaxRegularHeapObjectSize) {
370 Object* obj =
371 new_space.AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize)
372 .ToObjectChecked();
373 CHECK(new_space.Contains(HeapObject::cast(obj)));
374 }
375
376 new_space.TearDown();
377 memory_allocator->TearDown();
378 delete memory_allocator;
379}
380
381
382TEST(OldSpace) {
383 Isolate* isolate = CcTest::i_isolate();
384 Heap* heap = isolate->heap();
385 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
386 CHECK(memory_allocator->SetUp(heap->MaxReserved(),
387 heap->MaxExecutableSize()));
388 TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
389
390 OldSpace* s = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
391 CHECK(s != NULL);
392
393 CHECK(s->SetUp());
394
395 while (s->Available() > 0) {
396 s->AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize).ToObjectChecked();
397 }
398
399 delete s;
400 memory_allocator->TearDown();
401 delete memory_allocator;
402}
403
404
405TEST(CompactionSpace) {
406 Isolate* isolate = CcTest::i_isolate();
407 Heap* heap = isolate->heap();
408 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
409 CHECK(memory_allocator != nullptr);
410 CHECK(
411 memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
412 TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
413
414 CompactionSpace* compaction_space =
415 new CompactionSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
416 CHECK(compaction_space != NULL);
417 CHECK(compaction_space->SetUp());
418
419 OldSpace* old_space = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
420 CHECK(old_space != NULL);
421 CHECK(old_space->SetUp());
422
423 // Cannot loop until "Available()" since we initially have 0 bytes available
424 // and would thus neither grow, nor be able to allocate an object.
425 const int kNumObjects = 100;
426 const int kNumObjectsPerPage =
427 compaction_space->AreaSize() / Page::kMaxRegularHeapObjectSize;
428 const int kExpectedPages =
429 (kNumObjects + kNumObjectsPerPage - 1) / kNumObjectsPerPage;
430 for (int i = 0; i < kNumObjects; i++) {
431 compaction_space->AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize)
432 .ToObjectChecked();
433 }
434 int pages_in_old_space = old_space->CountTotalPages();
435 int pages_in_compaction_space = compaction_space->CountTotalPages();
436 CHECK_EQ(pages_in_compaction_space, kExpectedPages);
437 CHECK_LE(pages_in_old_space, 1);
438
439 old_space->MergeCompactionSpace(compaction_space);
440 CHECK_EQ(old_space->CountTotalPages(),
441 pages_in_old_space + pages_in_compaction_space);
442
443 delete compaction_space;
444 delete old_space;
445
446 memory_allocator->TearDown();
447 delete memory_allocator;
448}
449
450
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000451TEST(LargeObjectSpace) {
452 v8::V8::Initialize();
453
454 LargeObjectSpace* lo = CcTest::heap()->lo_space();
455 CHECK(lo != NULL);
456
457 int lo_size = Page::kPageSize;
458
459 Object* obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE).ToObjectChecked();
460 CHECK(obj->IsHeapObject());
461
462 HeapObject* ho = HeapObject::cast(obj);
463
464 CHECK(lo->Contains(HeapObject::cast(obj)));
465
466 CHECK(lo->FindObject(ho->address()) == obj);
467
468 CHECK(lo->Contains(ho));
469
470 while (true) {
471 intptr_t available = lo->Available();
472 { AllocationResult allocation = lo->AllocateRaw(lo_size, NOT_EXECUTABLE);
473 if (allocation.IsRetry()) break;
474 }
475 // The available value is conservative such that it may report
476 // zero prior to heap exhaustion.
477 CHECK(lo->Available() < available || available == 0);
478 }
479
480 CHECK(!lo->IsEmpty());
481
482 CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE).IsRetry());
483}
484
485
486TEST(SizeOfFirstPageIsLargeEnough) {
487 if (i::FLAG_always_opt) return;
488 // Bootstrapping without a snapshot causes more allocations.
489 CcTest::InitializeVM();
490 Isolate* isolate = CcTest::i_isolate();
491 if (!isolate->snapshot_available()) return;
492 if (Snapshot::EmbedsScript(isolate)) return;
493
494 // If this test fails due to enabling experimental natives that are not part
495 // of the snapshot, we may need to adjust CalculateFirstPageSizes.
496
497 // Freshly initialized VM gets by with one page per space.
498 for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
499 // Debug code can be very large, so skip CODE_SPACE if we are generating it.
500 if (i == CODE_SPACE && i::FLAG_debug_code) continue;
501 CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
502 }
503
504 // Executing the empty script gets by with one page per space.
505 HandleScope scope(isolate);
506 CompileRun("/*empty*/");
507 for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
508 // Debug code can be very large, so skip CODE_SPACE if we are generating it.
509 if (i == CODE_SPACE && i::FLAG_debug_code) continue;
510 CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
511 }
512
513 // No large objects required to perform the above steps.
514 CHECK(isolate->heap()->lo_space()->IsEmpty());
515}
516
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000517static HeapObject* AllocateUnaligned(NewSpace* space, int size) {
518 AllocationResult allocation = space->AllocateRawUnaligned(size);
519 CHECK(!allocation.IsRetry());
520 HeapObject* filler = NULL;
521 CHECK(allocation.To(&filler));
522 space->heap()->CreateFillerObjectAt(filler->address(), size);
523 return filler;
524}
525
Ben Murdoch097c5b22016-05-18 11:27:45 +0100526static HeapObject* AllocateUnaligned(PagedSpace* space, int size) {
527 AllocationResult allocation = space->AllocateRaw(size, kDoubleUnaligned);
528 CHECK(!allocation.IsRetry());
529 HeapObject* filler = NULL;
530 CHECK(allocation.To(&filler));
531 space->heap()->CreateFillerObjectAt(filler->address(), size);
532 return filler;
533}
534
535static HeapObject* AllocateUnaligned(LargeObjectSpace* space, int size) {
536 AllocationResult allocation = space->AllocateRaw(size, EXECUTABLE);
537 CHECK(!allocation.IsRetry());
538 HeapObject* filler = NULL;
539 CHECK(allocation.To(&filler));
540 return filler;
541}
542
543class Observer : public AllocationObserver {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000544 public:
545 explicit Observer(intptr_t step_size)
Ben Murdoch097c5b22016-05-18 11:27:45 +0100546 : AllocationObserver(step_size), count_(0) {}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000547
548 void Step(int bytes_allocated, Address, size_t) override { count_++; }
549
550 int count() const { return count_; }
551
552 private:
553 int count_;
554};
555
Ben Murdoch097c5b22016-05-18 11:27:45 +0100556template <typename T>
557void testAllocationObserver(Isolate* i_isolate, T* space) {
558 Observer observer1(128);
559 space->AddAllocationObserver(&observer1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000560
Ben Murdoch097c5b22016-05-18 11:27:45 +0100561 // The observer should not get notified if we have only allocated less than
562 // 128 bytes.
563 AllocateUnaligned(space, 64);
564 CHECK_EQ(observer1.count(), 0);
565
566 // The observer should get called when we have allocated exactly 128 bytes.
567 AllocateUnaligned(space, 64);
568 CHECK_EQ(observer1.count(), 1);
569
570 // Another >128 bytes should get another notification.
571 AllocateUnaligned(space, 136);
572 CHECK_EQ(observer1.count(), 2);
573
574 // Allocating a large object should get only one notification.
575 AllocateUnaligned(space, 1024);
576 CHECK_EQ(observer1.count(), 3);
577
578 // Allocating another 2048 bytes in small objects should get 16
579 // notifications.
580 for (int i = 0; i < 64; ++i) {
581 AllocateUnaligned(space, 32);
582 }
583 CHECK_EQ(observer1.count(), 19);
584
585 // Multiple observers should work.
586 Observer observer2(96);
587 space->AddAllocationObserver(&observer2);
588
589 AllocateUnaligned(space, 2048);
590 CHECK_EQ(observer1.count(), 20);
591 CHECK_EQ(observer2.count(), 1);
592
593 AllocateUnaligned(space, 104);
594 CHECK_EQ(observer1.count(), 20);
595 CHECK_EQ(observer2.count(), 2);
596
597 // Callback should stop getting called after an observer is removed.
598 space->RemoveAllocationObserver(&observer1);
599
600 AllocateUnaligned(space, 384);
601 CHECK_EQ(observer1.count(), 20); // no more notifications.
602 CHECK_EQ(observer2.count(), 3); // this one is still active.
603
604 // Ensure that PauseInlineAllocationObserversScope work correctly.
605 AllocateUnaligned(space, 48);
606 CHECK_EQ(observer2.count(), 3);
607 {
608 PauseAllocationObserversScope pause_observers(i_isolate->heap());
609 CHECK_EQ(observer2.count(), 3);
610 AllocateUnaligned(space, 384);
611 CHECK_EQ(observer2.count(), 3);
612 }
613 CHECK_EQ(observer2.count(), 3);
614 // Coupled with the 48 bytes allocated before the pause, another 48 bytes
615 // allocated here should trigger a notification.
616 AllocateUnaligned(space, 48);
617 CHECK_EQ(observer2.count(), 4);
618
619 space->RemoveAllocationObserver(&observer2);
620 AllocateUnaligned(space, 384);
621 CHECK_EQ(observer1.count(), 20);
622 CHECK_EQ(observer2.count(), 4);
623}
624
625UNINITIALIZED_TEST(AllocationObserver) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000626 v8::Isolate::CreateParams create_params;
627 create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
628 v8::Isolate* isolate = v8::Isolate::New(create_params);
629 {
630 v8::Isolate::Scope isolate_scope(isolate);
631 v8::HandleScope handle_scope(isolate);
632 v8::Context::New(isolate)->Enter();
633
634 Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
635
Ben Murdoch097c5b22016-05-18 11:27:45 +0100636 testAllocationObserver<NewSpace>(i_isolate, i_isolate->heap()->new_space());
637 // Old space is used but the code path is shared for all
638 // classes inheriting from PagedSpace.
639 testAllocationObserver<PagedSpace>(i_isolate,
640 i_isolate->heap()->old_space());
641 testAllocationObserver<LargeObjectSpace>(i_isolate,
642 i_isolate->heap()->lo_space());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000643 }
644 isolate->Dispose();
645}
646
647
648UNINITIALIZED_TEST(InlineAllocationObserverCadence) {
649 v8::Isolate::CreateParams create_params;
650 create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
651 v8::Isolate* isolate = v8::Isolate::New(create_params);
652 {
653 v8::Isolate::Scope isolate_scope(isolate);
654 v8::HandleScope handle_scope(isolate);
655 v8::Context::New(isolate)->Enter();
656
657 Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
658
659 NewSpace* new_space = i_isolate->heap()->new_space();
660
661 Observer observer1(512);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100662 new_space->AddAllocationObserver(&observer1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000663 Observer observer2(576);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100664 new_space->AddAllocationObserver(&observer2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000665
666 for (int i = 0; i < 512; ++i) {
667 AllocateUnaligned(new_space, 32);
668 }
669
Ben Murdoch097c5b22016-05-18 11:27:45 +0100670 new_space->RemoveAllocationObserver(&observer1);
671 new_space->RemoveAllocationObserver(&observer2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000672
673 CHECK_EQ(observer1.count(), 32);
674 CHECK_EQ(observer2.count(), 28);
675 }
676 isolate->Dispose();
677}
678
679} // namespace internal
680} // namespace v8