blob: a7cf161ca6917502374d666339b3e163652206e3 [file] [log] [blame]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include <stdlib.h>
29
30#include "src/base/platform/platform.h"
31#include "src/snapshot/snapshot.h"
32#include "src/v8.h"
33#include "test/cctest/cctest.h"
34#include "test/cctest/heap/heap-tester.h"
35#include "test/cctest/heap/utils-inl.h"
36
37namespace v8 {
38namespace internal {
39
40#if 0
41static void VerifyRegionMarking(Address page_start) {
42#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
43 Page* p = Page::FromAddress(page_start);
44
45 p->SetRegionMarks(Page::kAllRegionsCleanMarks);
46
47 for (Address addr = p->ObjectAreaStart();
48 addr < p->ObjectAreaEnd();
49 addr += kPointerSize) {
50 CHECK(!Page::FromAddress(addr)->IsRegionDirty(addr));
51 }
52
53 for (Address addr = p->ObjectAreaStart();
54 addr < p->ObjectAreaEnd();
55 addr += kPointerSize) {
56 Page::FromAddress(addr)->MarkRegionDirty(addr);
57 }
58
59 for (Address addr = p->ObjectAreaStart();
60 addr < p->ObjectAreaEnd();
61 addr += kPointerSize) {
62 CHECK(Page::FromAddress(addr)->IsRegionDirty(addr));
63 }
64#endif
65}
66#endif
67
68
69// TODO(gc) you can no longer allocate pages like this. Details are hidden.
70#if 0
71TEST(Page) {
72 byte* mem = NewArray<byte>(2*Page::kPageSize);
73 CHECK(mem != NULL);
74
75 Address start = reinterpret_cast<Address>(mem);
76 Address page_start = RoundUp(start, Page::kPageSize);
77
78 Page* p = Page::FromAddress(page_start);
79 // Initialized Page has heap pointer, normally set by memory_allocator.
80 p->heap_ = CcTest::heap();
81 CHECK(p->address() == page_start);
82 CHECK(p->is_valid());
83
84 p->opaque_header = 0;
85 p->SetIsLargeObjectPage(false);
86 CHECK(!p->next_page()->is_valid());
87
88 CHECK(p->ObjectAreaStart() == page_start + Page::kObjectStartOffset);
89 CHECK(p->ObjectAreaEnd() == page_start + Page::kPageSize);
90
91 CHECK(p->Offset(page_start + Page::kObjectStartOffset) ==
92 Page::kObjectStartOffset);
93 CHECK(p->Offset(page_start + Page::kPageSize) == Page::kPageSize);
94
95 CHECK(p->OffsetToAddress(Page::kObjectStartOffset) == p->ObjectAreaStart());
96 CHECK(p->OffsetToAddress(Page::kPageSize) == p->ObjectAreaEnd());
97
98 // test region marking
99 VerifyRegionMarking(page_start);
100
101 DeleteArray(mem);
102}
103#endif
104
105
106// Temporarily sets a given allocator in an isolate.
107class TestMemoryAllocatorScope {
108 public:
109 TestMemoryAllocatorScope(Isolate* isolate, MemoryAllocator* allocator)
110 : isolate_(isolate),
111 old_allocator_(isolate->memory_allocator_) {
112 isolate->memory_allocator_ = allocator;
113 }
114
115 ~TestMemoryAllocatorScope() {
116 isolate_->memory_allocator_ = old_allocator_;
117 }
118
119 private:
120 Isolate* isolate_;
121 MemoryAllocator* old_allocator_;
122
123 DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope);
124};
125
126
127// Temporarily sets a given code range in an isolate.
128class TestCodeRangeScope {
129 public:
130 TestCodeRangeScope(Isolate* isolate, CodeRange* code_range)
131 : isolate_(isolate),
132 old_code_range_(isolate->code_range_) {
133 isolate->code_range_ = code_range;
134 }
135
136 ~TestCodeRangeScope() {
137 isolate_->code_range_ = old_code_range_;
138 }
139
140 private:
141 Isolate* isolate_;
142 CodeRange* old_code_range_;
143
144 DISALLOW_COPY_AND_ASSIGN(TestCodeRangeScope);
145};
146
147
148static void VerifyMemoryChunk(Isolate* isolate,
149 Heap* heap,
150 CodeRange* code_range,
151 size_t reserve_area_size,
152 size_t commit_area_size,
153 size_t second_commit_area_size,
154 Executability executable) {
155 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
156 CHECK(memory_allocator->SetUp(heap->MaxReserved(),
157 heap->MaxExecutableSize()));
158 TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
159 TestCodeRangeScope test_code_range_scope(isolate, code_range);
160
161 size_t header_size = (executable == EXECUTABLE)
162 ? MemoryAllocator::CodePageGuardStartOffset()
163 : MemoryChunk::kObjectStartOffset;
164 size_t guard_size = (executable == EXECUTABLE)
165 ? MemoryAllocator::CodePageGuardSize()
166 : 0;
167
168 MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(reserve_area_size,
169 commit_area_size,
170 executable,
171 NULL);
172 size_t alignment = code_range != NULL && code_range->valid()
173 ? MemoryChunk::kAlignment
174 : base::OS::CommitPageSize();
175 size_t reserved_size =
176 ((executable == EXECUTABLE))
177 ? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
178 alignment)
179 : RoundUp(header_size + reserve_area_size,
180 base::OS::CommitPageSize());
181 CHECK(memory_chunk->size() == reserved_size);
182 CHECK(memory_chunk->area_start() < memory_chunk->address() +
183 memory_chunk->size());
184 CHECK(memory_chunk->area_end() <= memory_chunk->address() +
185 memory_chunk->size());
186 CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);
187
188 Address area_start = memory_chunk->area_start();
189
190 memory_chunk->CommitArea(second_commit_area_size);
191 CHECK(area_start == memory_chunk->area_start());
192 CHECK(memory_chunk->area_start() < memory_chunk->address() +
193 memory_chunk->size());
194 CHECK(memory_chunk->area_end() <= memory_chunk->address() +
195 memory_chunk->size());
196 CHECK(static_cast<size_t>(memory_chunk->area_size()) ==
197 second_commit_area_size);
198
199 memory_allocator->Free(memory_chunk);
200 memory_allocator->TearDown();
201 delete memory_allocator;
202}
203
204
205TEST(Regress3540) {
206 Isolate* isolate = CcTest::i_isolate();
207 Heap* heap = isolate->heap();
208 const int pageSize = Page::kPageSize;
209 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
210 CHECK(
211 memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
212 TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
213 CodeRange* code_range = new CodeRange(isolate);
214 const size_t code_range_size = 4 * pageSize;
215 if (!code_range->SetUp(
216 code_range_size +
217 RoundUp(v8::base::OS::CommitPageSize() * kReservedCodeRangePages,
218 MemoryChunk::kAlignment) +
219 v8::internal::MemoryAllocator::CodePageAreaSize())) {
220 return;
221 }
222
223 Address address;
224 size_t size;
225 size_t request_size = code_range_size - 2 * pageSize;
226 address = code_range->AllocateRawMemory(
227 request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()),
228 &size);
229 CHECK(address != NULL);
230
231 Address null_address;
232 size_t null_size;
233 request_size = code_range_size - pageSize;
234 null_address = code_range->AllocateRawMemory(
235 request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()),
236 &null_size);
237 CHECK(null_address == NULL);
238
239 code_range->FreeRawMemory(address, size);
240 delete code_range;
241 memory_allocator->TearDown();
242 delete memory_allocator;
243}
244
245
246static unsigned int Pseudorandom() {
247 static uint32_t lo = 2345;
248 lo = 18273 * (lo & 0xFFFFF) + (lo >> 16);
249 return lo & 0xFFFFF;
250}
251
252
253TEST(MemoryChunk) {
254 Isolate* isolate = CcTest::i_isolate();
255 Heap* heap = isolate->heap();
256
257 size_t reserve_area_size = 1 * MB;
258 size_t initial_commit_area_size, second_commit_area_size;
259
260 for (int i = 0; i < 100; i++) {
261 initial_commit_area_size = Pseudorandom();
262 second_commit_area_size = Pseudorandom();
263
264 // With CodeRange.
265 CodeRange* code_range = new CodeRange(isolate);
266 const size_t code_range_size = 32 * MB;
267 if (!code_range->SetUp(code_range_size)) return;
268
269 VerifyMemoryChunk(isolate,
270 heap,
271 code_range,
272 reserve_area_size,
273 initial_commit_area_size,
274 second_commit_area_size,
275 EXECUTABLE);
276
277 VerifyMemoryChunk(isolate,
278 heap,
279 code_range,
280 reserve_area_size,
281 initial_commit_area_size,
282 second_commit_area_size,
283 NOT_EXECUTABLE);
284 delete code_range;
285
286 // Without CodeRange.
287 code_range = NULL;
288 VerifyMemoryChunk(isolate,
289 heap,
290 code_range,
291 reserve_area_size,
292 initial_commit_area_size,
293 second_commit_area_size,
294 EXECUTABLE);
295
296 VerifyMemoryChunk(isolate,
297 heap,
298 code_range,
299 reserve_area_size,
300 initial_commit_area_size,
301 second_commit_area_size,
302 NOT_EXECUTABLE);
303 }
304}
305
306
307TEST(MemoryAllocator) {
308 Isolate* isolate = CcTest::i_isolate();
309 Heap* heap = isolate->heap();
310
311 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
312 CHECK(memory_allocator != nullptr);
313 CHECK(memory_allocator->SetUp(heap->MaxReserved(),
314 heap->MaxExecutableSize()));
315 TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
316
317 {
318 int total_pages = 0;
319 OldSpace faked_space(heap, OLD_SPACE, NOT_EXECUTABLE);
Ben Murdochda12d292016-06-02 14:46:10 +0100320 Page* first_page = memory_allocator->AllocatePage<Page>(
321 faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
322 NOT_EXECUTABLE);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000323
324 first_page->InsertAfter(faked_space.anchor()->prev_page());
325 CHECK(first_page->is_valid());
326 CHECK(first_page->next_page() == faked_space.anchor());
327 total_pages++;
328
329 for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
330 CHECK(p->owner() == &faked_space);
331 }
332
333 // Again, we should get n or n - 1 pages.
Ben Murdochda12d292016-06-02 14:46:10 +0100334 Page* other = memory_allocator->AllocatePage<Page>(
335 faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
336 NOT_EXECUTABLE);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000337 CHECK(other->is_valid());
338 total_pages++;
339 other->InsertAfter(first_page);
340 int page_count = 0;
341 for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
342 CHECK(p->owner() == &faked_space);
343 page_count++;
344 }
345 CHECK(total_pages == page_count);
346
347 Page* second_page = first_page->next_page();
348 CHECK(second_page->is_valid());
349
350 // OldSpace's destructor will tear down the space and free up all pages.
351 }
352 memory_allocator->TearDown();
353 delete memory_allocator;
354}
355
356
357TEST(NewSpace) {
358 Isolate* isolate = CcTest::i_isolate();
359 Heap* heap = isolate->heap();
360 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
361 CHECK(memory_allocator->SetUp(heap->MaxReserved(),
362 heap->MaxExecutableSize()));
363 TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
364
365 NewSpace new_space(heap);
366
Ben Murdochda12d292016-06-02 14:46:10 +0100367 CHECK(new_space.SetUp(CcTest::heap()->InitialSemiSpaceSize(),
368 CcTest::heap()->InitialSemiSpaceSize()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000369 CHECK(new_space.HasBeenSetUp());
370
371 while (new_space.Available() >= Page::kMaxRegularHeapObjectSize) {
372 Object* obj =
373 new_space.AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize)
374 .ToObjectChecked();
375 CHECK(new_space.Contains(HeapObject::cast(obj)));
376 }
377
378 new_space.TearDown();
379 memory_allocator->TearDown();
380 delete memory_allocator;
381}
382
383
384TEST(OldSpace) {
385 Isolate* isolate = CcTest::i_isolate();
386 Heap* heap = isolate->heap();
387 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
388 CHECK(memory_allocator->SetUp(heap->MaxReserved(),
389 heap->MaxExecutableSize()));
390 TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
391
392 OldSpace* s = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
393 CHECK(s != NULL);
394
395 CHECK(s->SetUp());
396
397 while (s->Available() > 0) {
398 s->AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize).ToObjectChecked();
399 }
400
401 delete s;
402 memory_allocator->TearDown();
403 delete memory_allocator;
404}
405
406
407TEST(CompactionSpace) {
408 Isolate* isolate = CcTest::i_isolate();
409 Heap* heap = isolate->heap();
410 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
411 CHECK(memory_allocator != nullptr);
412 CHECK(
413 memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
414 TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
415
416 CompactionSpace* compaction_space =
417 new CompactionSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
418 CHECK(compaction_space != NULL);
419 CHECK(compaction_space->SetUp());
420
421 OldSpace* old_space = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
422 CHECK(old_space != NULL);
423 CHECK(old_space->SetUp());
424
425 // Cannot loop until "Available()" since we initially have 0 bytes available
426 // and would thus neither grow, nor be able to allocate an object.
427 const int kNumObjects = 100;
428 const int kNumObjectsPerPage =
429 compaction_space->AreaSize() / Page::kMaxRegularHeapObjectSize;
430 const int kExpectedPages =
431 (kNumObjects + kNumObjectsPerPage - 1) / kNumObjectsPerPage;
432 for (int i = 0; i < kNumObjects; i++) {
433 compaction_space->AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize)
434 .ToObjectChecked();
435 }
436 int pages_in_old_space = old_space->CountTotalPages();
437 int pages_in_compaction_space = compaction_space->CountTotalPages();
438 CHECK_EQ(pages_in_compaction_space, kExpectedPages);
439 CHECK_LE(pages_in_old_space, 1);
440
441 old_space->MergeCompactionSpace(compaction_space);
442 CHECK_EQ(old_space->CountTotalPages(),
443 pages_in_old_space + pages_in_compaction_space);
444
445 delete compaction_space;
446 delete old_space;
447
448 memory_allocator->TearDown();
449 delete memory_allocator;
450}
451
452
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000453TEST(LargeObjectSpace) {
454 v8::V8::Initialize();
455
456 LargeObjectSpace* lo = CcTest::heap()->lo_space();
457 CHECK(lo != NULL);
458
459 int lo_size = Page::kPageSize;
460
461 Object* obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE).ToObjectChecked();
462 CHECK(obj->IsHeapObject());
463
464 HeapObject* ho = HeapObject::cast(obj);
465
466 CHECK(lo->Contains(HeapObject::cast(obj)));
467
468 CHECK(lo->FindObject(ho->address()) == obj);
469
470 CHECK(lo->Contains(ho));
471
472 while (true) {
473 intptr_t available = lo->Available();
474 { AllocationResult allocation = lo->AllocateRaw(lo_size, NOT_EXECUTABLE);
475 if (allocation.IsRetry()) break;
476 }
477 // The available value is conservative such that it may report
478 // zero prior to heap exhaustion.
479 CHECK(lo->Available() < available || available == 0);
480 }
481
482 CHECK(!lo->IsEmpty());
483
484 CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE).IsRetry());
485}
486
487
488TEST(SizeOfFirstPageIsLargeEnough) {
489 if (i::FLAG_always_opt) return;
490 // Bootstrapping without a snapshot causes more allocations.
491 CcTest::InitializeVM();
492 Isolate* isolate = CcTest::i_isolate();
493 if (!isolate->snapshot_available()) return;
494 if (Snapshot::EmbedsScript(isolate)) return;
495
496 // If this test fails due to enabling experimental natives that are not part
497 // of the snapshot, we may need to adjust CalculateFirstPageSizes.
498
499 // Freshly initialized VM gets by with one page per space.
500 for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
501 // Debug code can be very large, so skip CODE_SPACE if we are generating it.
502 if (i == CODE_SPACE && i::FLAG_debug_code) continue;
503 CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
504 }
505
506 // Executing the empty script gets by with one page per space.
507 HandleScope scope(isolate);
508 CompileRun("/*empty*/");
509 for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
510 // Debug code can be very large, so skip CODE_SPACE if we are generating it.
511 if (i == CODE_SPACE && i::FLAG_debug_code) continue;
512 CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
513 }
514
515 // No large objects required to perform the above steps.
516 CHECK(isolate->heap()->lo_space()->IsEmpty());
517}
518
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000519static HeapObject* AllocateUnaligned(NewSpace* space, int size) {
520 AllocationResult allocation = space->AllocateRawUnaligned(size);
521 CHECK(!allocation.IsRetry());
522 HeapObject* filler = NULL;
523 CHECK(allocation.To(&filler));
Ben Murdochda12d292016-06-02 14:46:10 +0100524 space->heap()->CreateFillerObjectAt(filler->address(), size,
525 ClearRecordedSlots::kNo);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000526 return filler;
527}
528
Ben Murdoch097c5b22016-05-18 11:27:45 +0100529static HeapObject* AllocateUnaligned(PagedSpace* space, int size) {
530 AllocationResult allocation = space->AllocateRaw(size, kDoubleUnaligned);
531 CHECK(!allocation.IsRetry());
532 HeapObject* filler = NULL;
533 CHECK(allocation.To(&filler));
Ben Murdochda12d292016-06-02 14:46:10 +0100534 space->heap()->CreateFillerObjectAt(filler->address(), size,
535 ClearRecordedSlots::kNo);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100536 return filler;
537}
538
539static HeapObject* AllocateUnaligned(LargeObjectSpace* space, int size) {
540 AllocationResult allocation = space->AllocateRaw(size, EXECUTABLE);
541 CHECK(!allocation.IsRetry());
542 HeapObject* filler = NULL;
543 CHECK(allocation.To(&filler));
544 return filler;
545}
546
547class Observer : public AllocationObserver {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000548 public:
549 explicit Observer(intptr_t step_size)
Ben Murdoch097c5b22016-05-18 11:27:45 +0100550 : AllocationObserver(step_size), count_(0) {}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000551
552 void Step(int bytes_allocated, Address, size_t) override { count_++; }
553
554 int count() const { return count_; }
555
556 private:
557 int count_;
558};
559
Ben Murdoch097c5b22016-05-18 11:27:45 +0100560template <typename T>
561void testAllocationObserver(Isolate* i_isolate, T* space) {
562 Observer observer1(128);
563 space->AddAllocationObserver(&observer1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000564
Ben Murdoch097c5b22016-05-18 11:27:45 +0100565 // The observer should not get notified if we have only allocated less than
566 // 128 bytes.
567 AllocateUnaligned(space, 64);
568 CHECK_EQ(observer1.count(), 0);
569
570 // The observer should get called when we have allocated exactly 128 bytes.
571 AllocateUnaligned(space, 64);
572 CHECK_EQ(observer1.count(), 1);
573
574 // Another >128 bytes should get another notification.
575 AllocateUnaligned(space, 136);
576 CHECK_EQ(observer1.count(), 2);
577
578 // Allocating a large object should get only one notification.
579 AllocateUnaligned(space, 1024);
580 CHECK_EQ(observer1.count(), 3);
581
582 // Allocating another 2048 bytes in small objects should get 16
583 // notifications.
584 for (int i = 0; i < 64; ++i) {
585 AllocateUnaligned(space, 32);
586 }
587 CHECK_EQ(observer1.count(), 19);
588
589 // Multiple observers should work.
590 Observer observer2(96);
591 space->AddAllocationObserver(&observer2);
592
593 AllocateUnaligned(space, 2048);
594 CHECK_EQ(observer1.count(), 20);
595 CHECK_EQ(observer2.count(), 1);
596
597 AllocateUnaligned(space, 104);
598 CHECK_EQ(observer1.count(), 20);
599 CHECK_EQ(observer2.count(), 2);
600
601 // Callback should stop getting called after an observer is removed.
602 space->RemoveAllocationObserver(&observer1);
603
604 AllocateUnaligned(space, 384);
605 CHECK_EQ(observer1.count(), 20); // no more notifications.
606 CHECK_EQ(observer2.count(), 3); // this one is still active.
607
608 // Ensure that PauseInlineAllocationObserversScope work correctly.
609 AllocateUnaligned(space, 48);
610 CHECK_EQ(observer2.count(), 3);
611 {
612 PauseAllocationObserversScope pause_observers(i_isolate->heap());
613 CHECK_EQ(observer2.count(), 3);
614 AllocateUnaligned(space, 384);
615 CHECK_EQ(observer2.count(), 3);
616 }
617 CHECK_EQ(observer2.count(), 3);
618 // Coupled with the 48 bytes allocated before the pause, another 48 bytes
619 // allocated here should trigger a notification.
620 AllocateUnaligned(space, 48);
621 CHECK_EQ(observer2.count(), 4);
622
623 space->RemoveAllocationObserver(&observer2);
624 AllocateUnaligned(space, 384);
625 CHECK_EQ(observer1.count(), 20);
626 CHECK_EQ(observer2.count(), 4);
627}
628
629UNINITIALIZED_TEST(AllocationObserver) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000630 v8::Isolate::CreateParams create_params;
631 create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
632 v8::Isolate* isolate = v8::Isolate::New(create_params);
633 {
634 v8::Isolate::Scope isolate_scope(isolate);
635 v8::HandleScope handle_scope(isolate);
636 v8::Context::New(isolate)->Enter();
637
638 Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
639
Ben Murdoch097c5b22016-05-18 11:27:45 +0100640 testAllocationObserver<NewSpace>(i_isolate, i_isolate->heap()->new_space());
641 // Old space is used but the code path is shared for all
642 // classes inheriting from PagedSpace.
643 testAllocationObserver<PagedSpace>(i_isolate,
644 i_isolate->heap()->old_space());
645 testAllocationObserver<LargeObjectSpace>(i_isolate,
646 i_isolate->heap()->lo_space());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000647 }
648 isolate->Dispose();
649}
650
651
652UNINITIALIZED_TEST(InlineAllocationObserverCadence) {
653 v8::Isolate::CreateParams create_params;
654 create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
655 v8::Isolate* isolate = v8::Isolate::New(create_params);
656 {
657 v8::Isolate::Scope isolate_scope(isolate);
658 v8::HandleScope handle_scope(isolate);
659 v8::Context::New(isolate)->Enter();
660
661 Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
662
663 NewSpace* new_space = i_isolate->heap()->new_space();
664
665 Observer observer1(512);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100666 new_space->AddAllocationObserver(&observer1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000667 Observer observer2(576);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100668 new_space->AddAllocationObserver(&observer2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000669
670 for (int i = 0; i < 512; ++i) {
671 AllocateUnaligned(new_space, 32);
672 }
673
Ben Murdoch097c5b22016-05-18 11:27:45 +0100674 new_space->RemoveAllocationObserver(&observer1);
675 new_space->RemoveAllocationObserver(&observer2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000676
677 CHECK_EQ(observer1.count(), 32);
678 CHECK_EQ(observer2.count(), 28);
679 }
680 isolate->Dispose();
681}
682
683} // namespace internal
684} // namespace v8