blob: 2328518f2a68cd0a9bed0cef3ab8277a8e179892 [file] [log] [blame]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include <stdlib.h>
29
30#include "src/base/platform/platform.h"
31#include "src/snapshot/snapshot.h"
32#include "src/v8.h"
33#include "test/cctest/cctest.h"
34#include "test/cctest/heap/heap-tester.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000035
36namespace v8 {
37namespace internal {
38
39#if 0
40static void VerifyRegionMarking(Address page_start) {
41#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
42 Page* p = Page::FromAddress(page_start);
43
44 p->SetRegionMarks(Page::kAllRegionsCleanMarks);
45
46 for (Address addr = p->ObjectAreaStart();
47 addr < p->ObjectAreaEnd();
48 addr += kPointerSize) {
49 CHECK(!Page::FromAddress(addr)->IsRegionDirty(addr));
50 }
51
52 for (Address addr = p->ObjectAreaStart();
53 addr < p->ObjectAreaEnd();
54 addr += kPointerSize) {
55 Page::FromAddress(addr)->MarkRegionDirty(addr);
56 }
57
58 for (Address addr = p->ObjectAreaStart();
59 addr < p->ObjectAreaEnd();
60 addr += kPointerSize) {
61 CHECK(Page::FromAddress(addr)->IsRegionDirty(addr));
62 }
63#endif
64}
65#endif
66
67
68// TODO(gc) you can no longer allocate pages like this. Details are hidden.
69#if 0
70TEST(Page) {
71 byte* mem = NewArray<byte>(2*Page::kPageSize);
72 CHECK(mem != NULL);
73
74 Address start = reinterpret_cast<Address>(mem);
75 Address page_start = RoundUp(start, Page::kPageSize);
76
77 Page* p = Page::FromAddress(page_start);
78 // Initialized Page has heap pointer, normally set by memory_allocator.
79 p->heap_ = CcTest::heap();
80 CHECK(p->address() == page_start);
81 CHECK(p->is_valid());
82
83 p->opaque_header = 0;
84 p->SetIsLargeObjectPage(false);
85 CHECK(!p->next_page()->is_valid());
86
87 CHECK(p->ObjectAreaStart() == page_start + Page::kObjectStartOffset);
88 CHECK(p->ObjectAreaEnd() == page_start + Page::kPageSize);
89
90 CHECK(p->Offset(page_start + Page::kObjectStartOffset) ==
91 Page::kObjectStartOffset);
92 CHECK(p->Offset(page_start + Page::kPageSize) == Page::kPageSize);
93
94 CHECK(p->OffsetToAddress(Page::kObjectStartOffset) == p->ObjectAreaStart());
95 CHECK(p->OffsetToAddress(Page::kPageSize) == p->ObjectAreaEnd());
96
97 // test region marking
98 VerifyRegionMarking(page_start);
99
100 DeleteArray(mem);
101}
102#endif
103
104
105// Temporarily sets a given allocator in an isolate.
106class TestMemoryAllocatorScope {
107 public:
108 TestMemoryAllocatorScope(Isolate* isolate, MemoryAllocator* allocator)
Ben Murdochc5610432016-08-08 18:44:38 +0100109 : isolate_(isolate), old_allocator_(isolate->heap()->memory_allocator()) {
110 isolate->heap()->memory_allocator_ = allocator;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000111 }
112
113 ~TestMemoryAllocatorScope() {
Ben Murdochc5610432016-08-08 18:44:38 +0100114 isolate_->heap()->memory_allocator_ = old_allocator_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000115 }
116
117 private:
118 Isolate* isolate_;
119 MemoryAllocator* old_allocator_;
120
121 DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope);
122};
123
124
125// Temporarily sets a given code range in an isolate.
126class TestCodeRangeScope {
127 public:
128 TestCodeRangeScope(Isolate* isolate, CodeRange* code_range)
129 : isolate_(isolate),
Ben Murdochc5610432016-08-08 18:44:38 +0100130 old_code_range_(isolate->heap()->memory_allocator()->code_range()) {
131 isolate->heap()->memory_allocator()->code_range_ = code_range;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000132 }
133
134 ~TestCodeRangeScope() {
Ben Murdochc5610432016-08-08 18:44:38 +0100135 isolate_->heap()->memory_allocator()->code_range_ = old_code_range_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000136 }
137
138 private:
139 Isolate* isolate_;
140 CodeRange* old_code_range_;
141
142 DISALLOW_COPY_AND_ASSIGN(TestCodeRangeScope);
143};
144
145
146static void VerifyMemoryChunk(Isolate* isolate,
147 Heap* heap,
148 CodeRange* code_range,
149 size_t reserve_area_size,
150 size_t commit_area_size,
151 size_t second_commit_area_size,
152 Executability executable) {
153 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
Ben Murdochc5610432016-08-08 18:44:38 +0100154 CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
155 0));
156 {
157 TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
158 TestCodeRangeScope test_code_range_scope(isolate, code_range);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000159
Ben Murdochc5610432016-08-08 18:44:38 +0100160 size_t header_size = (executable == EXECUTABLE)
161 ? MemoryAllocator::CodePageGuardStartOffset()
162 : MemoryChunk::kObjectStartOffset;
163 size_t guard_size =
164 (executable == EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000165
Ben Murdochc5610432016-08-08 18:44:38 +0100166 MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(
167 reserve_area_size, commit_area_size, executable, NULL);
168 size_t alignment = code_range != NULL && code_range->valid()
169 ? MemoryChunk::kAlignment
170 : base::OS::CommitPageSize();
171 size_t reserved_size =
172 ((executable == EXECUTABLE))
173 ? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
174 alignment)
175 : RoundUp(header_size + reserve_area_size,
176 base::OS::CommitPageSize());
177 CHECK(memory_chunk->size() == reserved_size);
178 CHECK(memory_chunk->area_start() <
179 memory_chunk->address() + memory_chunk->size());
180 CHECK(memory_chunk->area_end() <=
181 memory_chunk->address() + memory_chunk->size());
182 CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000183
Ben Murdochc5610432016-08-08 18:44:38 +0100184 Address area_start = memory_chunk->area_start();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000185
Ben Murdochc5610432016-08-08 18:44:38 +0100186 memory_chunk->CommitArea(second_commit_area_size);
187 CHECK(area_start == memory_chunk->area_start());
188 CHECK(memory_chunk->area_start() <
189 memory_chunk->address() + memory_chunk->size());
190 CHECK(memory_chunk->area_end() <=
191 memory_chunk->address() + memory_chunk->size());
192 CHECK(static_cast<size_t>(memory_chunk->area_size()) ==
193 second_commit_area_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000194
Ben Murdochc5610432016-08-08 18:44:38 +0100195 memory_allocator->Free<MemoryAllocator::kFull>(memory_chunk);
196 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000197 memory_allocator->TearDown();
198 delete memory_allocator;
199}
200
201
202TEST(Regress3540) {
203 Isolate* isolate = CcTest::i_isolate();
204 Heap* heap = isolate->heap();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000205 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
Ben Murdochc5610432016-08-08 18:44:38 +0100206 CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
207 0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000208 TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
209 CodeRange* code_range = new CodeRange(isolate);
Ben Murdoch61f157c2016-09-16 13:49:30 +0100210 size_t code_range_size =
211 kMinimumCodeRangeSize > 0 ? kMinimumCodeRangeSize : 3 * Page::kPageSize;
212 if (!code_range->SetUp(code_range_size)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000213 return;
214 }
215
216 Address address;
217 size_t size;
Ben Murdoch61f157c2016-09-16 13:49:30 +0100218 size_t request_size = code_range_size - Page::kPageSize;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000219 address = code_range->AllocateRawMemory(
220 request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()),
221 &size);
Ben Murdoch61f157c2016-09-16 13:49:30 +0100222 CHECK_NOT_NULL(address);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000223
224 Address null_address;
225 size_t null_size;
Ben Murdoch61f157c2016-09-16 13:49:30 +0100226 request_size = code_range_size - Page::kPageSize;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000227 null_address = code_range->AllocateRawMemory(
228 request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()),
229 &null_size);
Ben Murdoch61f157c2016-09-16 13:49:30 +0100230 CHECK_NULL(null_address);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000231
232 code_range->FreeRawMemory(address, size);
233 delete code_range;
234 memory_allocator->TearDown();
235 delete memory_allocator;
236}
237
238
239static unsigned int Pseudorandom() {
240 static uint32_t lo = 2345;
241 lo = 18273 * (lo & 0xFFFFF) + (lo >> 16);
242 return lo & 0xFFFFF;
243}
244
245
246TEST(MemoryChunk) {
247 Isolate* isolate = CcTest::i_isolate();
248 Heap* heap = isolate->heap();
249
250 size_t reserve_area_size = 1 * MB;
251 size_t initial_commit_area_size, second_commit_area_size;
252
253 for (int i = 0; i < 100; i++) {
254 initial_commit_area_size = Pseudorandom();
255 second_commit_area_size = Pseudorandom();
256
257 // With CodeRange.
258 CodeRange* code_range = new CodeRange(isolate);
259 const size_t code_range_size = 32 * MB;
260 if (!code_range->SetUp(code_range_size)) return;
261
262 VerifyMemoryChunk(isolate,
263 heap,
264 code_range,
265 reserve_area_size,
266 initial_commit_area_size,
267 second_commit_area_size,
268 EXECUTABLE);
269
270 VerifyMemoryChunk(isolate,
271 heap,
272 code_range,
273 reserve_area_size,
274 initial_commit_area_size,
275 second_commit_area_size,
276 NOT_EXECUTABLE);
277 delete code_range;
278
Ben Murdoch61f157c2016-09-16 13:49:30 +0100279 // Without a valid CodeRange, i.e., omitting SetUp.
280 code_range = new CodeRange(isolate);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000281 VerifyMemoryChunk(isolate,
282 heap,
283 code_range,
284 reserve_area_size,
285 initial_commit_area_size,
286 second_commit_area_size,
287 EXECUTABLE);
288
289 VerifyMemoryChunk(isolate,
290 heap,
291 code_range,
292 reserve_area_size,
293 initial_commit_area_size,
294 second_commit_area_size,
295 NOT_EXECUTABLE);
Ben Murdoch61f157c2016-09-16 13:49:30 +0100296 delete code_range;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000297 }
298}
299
300
301TEST(MemoryAllocator) {
302 Isolate* isolate = CcTest::i_isolate();
303 Heap* heap = isolate->heap();
304
305 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
306 CHECK(memory_allocator != nullptr);
Ben Murdochc5610432016-08-08 18:44:38 +0100307 CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
308 0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000309 TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
310
311 {
312 int total_pages = 0;
313 OldSpace faked_space(heap, OLD_SPACE, NOT_EXECUTABLE);
Ben Murdochc5610432016-08-08 18:44:38 +0100314 Page* first_page = memory_allocator->AllocatePage(
Ben Murdochda12d292016-06-02 14:46:10 +0100315 faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
316 NOT_EXECUTABLE);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000317
318 first_page->InsertAfter(faked_space.anchor()->prev_page());
Ben Murdochc5610432016-08-08 18:44:38 +0100319 CHECK(Page::IsValid(first_page));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000320 CHECK(first_page->next_page() == faked_space.anchor());
321 total_pages++;
322
323 for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
324 CHECK(p->owner() == &faked_space);
325 }
326
327 // Again, we should get n or n - 1 pages.
Ben Murdochc5610432016-08-08 18:44:38 +0100328 Page* other = memory_allocator->AllocatePage(
Ben Murdochda12d292016-06-02 14:46:10 +0100329 faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
330 NOT_EXECUTABLE);
Ben Murdochc5610432016-08-08 18:44:38 +0100331 CHECK(Page::IsValid(other));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000332 total_pages++;
333 other->InsertAfter(first_page);
334 int page_count = 0;
335 for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
336 CHECK(p->owner() == &faked_space);
337 page_count++;
338 }
339 CHECK(total_pages == page_count);
340
341 Page* second_page = first_page->next_page();
Ben Murdochc5610432016-08-08 18:44:38 +0100342 CHECK(Page::IsValid(second_page));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000343
344 // OldSpace's destructor will tear down the space and free up all pages.
345 }
346 memory_allocator->TearDown();
347 delete memory_allocator;
348}
349
350
351TEST(NewSpace) {
352 Isolate* isolate = CcTest::i_isolate();
353 Heap* heap = isolate->heap();
354 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
Ben Murdochc5610432016-08-08 18:44:38 +0100355 CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
356 0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000357 TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
358
359 NewSpace new_space(heap);
360
Ben Murdochda12d292016-06-02 14:46:10 +0100361 CHECK(new_space.SetUp(CcTest::heap()->InitialSemiSpaceSize(),
362 CcTest::heap()->InitialSemiSpaceSize()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000363 CHECK(new_space.HasBeenSetUp());
364
365 while (new_space.Available() >= Page::kMaxRegularHeapObjectSize) {
366 Object* obj =
367 new_space.AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize)
368 .ToObjectChecked();
369 CHECK(new_space.Contains(HeapObject::cast(obj)));
370 }
371
372 new_space.TearDown();
373 memory_allocator->TearDown();
374 delete memory_allocator;
375}
376
377
378TEST(OldSpace) {
379 Isolate* isolate = CcTest::i_isolate();
380 Heap* heap = isolate->heap();
381 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
Ben Murdochc5610432016-08-08 18:44:38 +0100382 CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
383 0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000384 TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
385
386 OldSpace* s = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
387 CHECK(s != NULL);
388
389 CHECK(s->SetUp());
390
391 while (s->Available() > 0) {
392 s->AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize).ToObjectChecked();
393 }
394
395 delete s;
396 memory_allocator->TearDown();
397 delete memory_allocator;
398}
399
400
401TEST(CompactionSpace) {
402 Isolate* isolate = CcTest::i_isolate();
403 Heap* heap = isolate->heap();
404 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
405 CHECK(memory_allocator != nullptr);
Ben Murdochc5610432016-08-08 18:44:38 +0100406 CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
407 0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000408 TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
409
410 CompactionSpace* compaction_space =
411 new CompactionSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
412 CHECK(compaction_space != NULL);
413 CHECK(compaction_space->SetUp());
414
415 OldSpace* old_space = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
416 CHECK(old_space != NULL);
417 CHECK(old_space->SetUp());
418
419 // Cannot loop until "Available()" since we initially have 0 bytes available
420 // and would thus neither grow, nor be able to allocate an object.
421 const int kNumObjects = 100;
422 const int kNumObjectsPerPage =
423 compaction_space->AreaSize() / Page::kMaxRegularHeapObjectSize;
424 const int kExpectedPages =
425 (kNumObjects + kNumObjectsPerPage - 1) / kNumObjectsPerPage;
426 for (int i = 0; i < kNumObjects; i++) {
427 compaction_space->AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize)
428 .ToObjectChecked();
429 }
430 int pages_in_old_space = old_space->CountTotalPages();
431 int pages_in_compaction_space = compaction_space->CountTotalPages();
432 CHECK_EQ(pages_in_compaction_space, kExpectedPages);
433 CHECK_LE(pages_in_old_space, 1);
434
435 old_space->MergeCompactionSpace(compaction_space);
436 CHECK_EQ(old_space->CountTotalPages(),
437 pages_in_old_space + pages_in_compaction_space);
438
439 delete compaction_space;
440 delete old_space;
441
442 memory_allocator->TearDown();
443 delete memory_allocator;
444}
445
446
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000447TEST(LargeObjectSpace) {
448 v8::V8::Initialize();
449
450 LargeObjectSpace* lo = CcTest::heap()->lo_space();
451 CHECK(lo != NULL);
452
453 int lo_size = Page::kPageSize;
454
455 Object* obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE).ToObjectChecked();
456 CHECK(obj->IsHeapObject());
457
458 HeapObject* ho = HeapObject::cast(obj);
459
460 CHECK(lo->Contains(HeapObject::cast(obj)));
461
462 CHECK(lo->FindObject(ho->address()) == obj);
463
464 CHECK(lo->Contains(ho));
465
466 while (true) {
467 intptr_t available = lo->Available();
468 { AllocationResult allocation = lo->AllocateRaw(lo_size, NOT_EXECUTABLE);
469 if (allocation.IsRetry()) break;
470 }
471 // The available value is conservative such that it may report
472 // zero prior to heap exhaustion.
473 CHECK(lo->Available() < available || available == 0);
474 }
475
476 CHECK(!lo->IsEmpty());
477
478 CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE).IsRetry());
479}
480
481
482TEST(SizeOfFirstPageIsLargeEnough) {
483 if (i::FLAG_always_opt) return;
484 // Bootstrapping without a snapshot causes more allocations.
485 CcTest::InitializeVM();
486 Isolate* isolate = CcTest::i_isolate();
487 if (!isolate->snapshot_available()) return;
Ben Murdoch61f157c2016-09-16 13:49:30 +0100488 HandleScope scope(isolate);
489 v8::Local<v8::Context> context = CcTest::isolate()->GetCurrentContext();
490 // Skip this test on the custom snapshot builder.
491 if (!CcTest::global()
492 ->Get(context, v8_str("assertEquals"))
493 .ToLocalChecked()
494 ->IsUndefined()) {
495 return;
496 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000497
498 // If this test fails due to enabling experimental natives that are not part
499 // of the snapshot, we may need to adjust CalculateFirstPageSizes.
500
501 // Freshly initialized VM gets by with one page per space.
502 for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
503 // Debug code can be very large, so skip CODE_SPACE if we are generating it.
504 if (i == CODE_SPACE && i::FLAG_debug_code) continue;
505 CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
506 }
507
508 // Executing the empty script gets by with one page per space.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000509 CompileRun("/*empty*/");
510 for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
511 // Debug code can be very large, so skip CODE_SPACE if we are generating it.
512 if (i == CODE_SPACE && i::FLAG_debug_code) continue;
513 CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
514 }
515
516 // No large objects required to perform the above steps.
517 CHECK(isolate->heap()->lo_space()->IsEmpty());
518}
519
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000520static HeapObject* AllocateUnaligned(NewSpace* space, int size) {
521 AllocationResult allocation = space->AllocateRawUnaligned(size);
522 CHECK(!allocation.IsRetry());
523 HeapObject* filler = NULL;
524 CHECK(allocation.To(&filler));
Ben Murdochda12d292016-06-02 14:46:10 +0100525 space->heap()->CreateFillerObjectAt(filler->address(), size,
526 ClearRecordedSlots::kNo);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000527 return filler;
528}
529
Ben Murdoch097c5b22016-05-18 11:27:45 +0100530static HeapObject* AllocateUnaligned(PagedSpace* space, int size) {
531 AllocationResult allocation = space->AllocateRaw(size, kDoubleUnaligned);
532 CHECK(!allocation.IsRetry());
533 HeapObject* filler = NULL;
534 CHECK(allocation.To(&filler));
Ben Murdochda12d292016-06-02 14:46:10 +0100535 space->heap()->CreateFillerObjectAt(filler->address(), size,
536 ClearRecordedSlots::kNo);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100537 return filler;
538}
539
540static HeapObject* AllocateUnaligned(LargeObjectSpace* space, int size) {
541 AllocationResult allocation = space->AllocateRaw(size, EXECUTABLE);
542 CHECK(!allocation.IsRetry());
543 HeapObject* filler = NULL;
544 CHECK(allocation.To(&filler));
545 return filler;
546}
547
548class Observer : public AllocationObserver {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000549 public:
550 explicit Observer(intptr_t step_size)
Ben Murdoch097c5b22016-05-18 11:27:45 +0100551 : AllocationObserver(step_size), count_(0) {}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000552
553 void Step(int bytes_allocated, Address, size_t) override { count_++; }
554
555 int count() const { return count_; }
556
557 private:
558 int count_;
559};
560
Ben Murdoch097c5b22016-05-18 11:27:45 +0100561template <typename T>
562void testAllocationObserver(Isolate* i_isolate, T* space) {
563 Observer observer1(128);
564 space->AddAllocationObserver(&observer1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000565
Ben Murdoch097c5b22016-05-18 11:27:45 +0100566 // The observer should not get notified if we have only allocated less than
567 // 128 bytes.
568 AllocateUnaligned(space, 64);
569 CHECK_EQ(observer1.count(), 0);
570
571 // The observer should get called when we have allocated exactly 128 bytes.
572 AllocateUnaligned(space, 64);
573 CHECK_EQ(observer1.count(), 1);
574
575 // Another >128 bytes should get another notification.
576 AllocateUnaligned(space, 136);
577 CHECK_EQ(observer1.count(), 2);
578
579 // Allocating a large object should get only one notification.
580 AllocateUnaligned(space, 1024);
581 CHECK_EQ(observer1.count(), 3);
582
583 // Allocating another 2048 bytes in small objects should get 16
584 // notifications.
585 for (int i = 0; i < 64; ++i) {
586 AllocateUnaligned(space, 32);
587 }
588 CHECK_EQ(observer1.count(), 19);
589
590 // Multiple observers should work.
591 Observer observer2(96);
592 space->AddAllocationObserver(&observer2);
593
594 AllocateUnaligned(space, 2048);
595 CHECK_EQ(observer1.count(), 20);
596 CHECK_EQ(observer2.count(), 1);
597
598 AllocateUnaligned(space, 104);
599 CHECK_EQ(observer1.count(), 20);
600 CHECK_EQ(observer2.count(), 2);
601
602 // Callback should stop getting called after an observer is removed.
603 space->RemoveAllocationObserver(&observer1);
604
605 AllocateUnaligned(space, 384);
606 CHECK_EQ(observer1.count(), 20); // no more notifications.
607 CHECK_EQ(observer2.count(), 3); // this one is still active.
608
609 // Ensure that PauseInlineAllocationObserversScope work correctly.
610 AllocateUnaligned(space, 48);
611 CHECK_EQ(observer2.count(), 3);
612 {
613 PauseAllocationObserversScope pause_observers(i_isolate->heap());
614 CHECK_EQ(observer2.count(), 3);
615 AllocateUnaligned(space, 384);
616 CHECK_EQ(observer2.count(), 3);
617 }
618 CHECK_EQ(observer2.count(), 3);
619 // Coupled with the 48 bytes allocated before the pause, another 48 bytes
620 // allocated here should trigger a notification.
621 AllocateUnaligned(space, 48);
622 CHECK_EQ(observer2.count(), 4);
623
624 space->RemoveAllocationObserver(&observer2);
625 AllocateUnaligned(space, 384);
626 CHECK_EQ(observer1.count(), 20);
627 CHECK_EQ(observer2.count(), 4);
628}
629
630UNINITIALIZED_TEST(AllocationObserver) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000631 v8::Isolate::CreateParams create_params;
632 create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
633 v8::Isolate* isolate = v8::Isolate::New(create_params);
634 {
635 v8::Isolate::Scope isolate_scope(isolate);
636 v8::HandleScope handle_scope(isolate);
637 v8::Context::New(isolate)->Enter();
638
639 Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
640
Ben Murdoch097c5b22016-05-18 11:27:45 +0100641 testAllocationObserver<NewSpace>(i_isolate, i_isolate->heap()->new_space());
642 // Old space is used but the code path is shared for all
643 // classes inheriting from PagedSpace.
644 testAllocationObserver<PagedSpace>(i_isolate,
645 i_isolate->heap()->old_space());
646 testAllocationObserver<LargeObjectSpace>(i_isolate,
647 i_isolate->heap()->lo_space());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000648 }
649 isolate->Dispose();
650}
651
652
653UNINITIALIZED_TEST(InlineAllocationObserverCadence) {
654 v8::Isolate::CreateParams create_params;
655 create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
656 v8::Isolate* isolate = v8::Isolate::New(create_params);
657 {
658 v8::Isolate::Scope isolate_scope(isolate);
659 v8::HandleScope handle_scope(isolate);
660 v8::Context::New(isolate)->Enter();
661
662 Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
663
664 NewSpace* new_space = i_isolate->heap()->new_space();
665
666 Observer observer1(512);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100667 new_space->AddAllocationObserver(&observer1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000668 Observer observer2(576);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100669 new_space->AddAllocationObserver(&observer2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000670
671 for (int i = 0; i < 512; ++i) {
672 AllocateUnaligned(new_space, 32);
673 }
674
Ben Murdoch097c5b22016-05-18 11:27:45 +0100675 new_space->RemoveAllocationObserver(&observer1);
676 new_space->RemoveAllocationObserver(&observer2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000677
678 CHECK_EQ(observer1.count(), 32);
679 CHECK_EQ(observer2.count(), 28);
680 }
681 isolate->Dispose();
682}
683
684} // namespace internal
685} // namespace v8