blob: da364c51408b8709d7d5373fd7cc30f1d96bb99f [file] [log] [blame]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001// Copyright 2011 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#include <stdlib.h>
29
30#include "src/base/platform/platform.h"
31#include "src/snapshot/snapshot.h"
32#include "src/v8.h"
33#include "test/cctest/cctest.h"
34#include "test/cctest/heap/heap-tester.h"
35#include "test/cctest/heap/utils-inl.h"
36
37namespace v8 {
38namespace internal {
39
40#if 0
41static void VerifyRegionMarking(Address page_start) {
42#ifdef ENABLE_CARDMARKING_WRITE_BARRIER
43 Page* p = Page::FromAddress(page_start);
44
45 p->SetRegionMarks(Page::kAllRegionsCleanMarks);
46
47 for (Address addr = p->ObjectAreaStart();
48 addr < p->ObjectAreaEnd();
49 addr += kPointerSize) {
50 CHECK(!Page::FromAddress(addr)->IsRegionDirty(addr));
51 }
52
53 for (Address addr = p->ObjectAreaStart();
54 addr < p->ObjectAreaEnd();
55 addr += kPointerSize) {
56 Page::FromAddress(addr)->MarkRegionDirty(addr);
57 }
58
59 for (Address addr = p->ObjectAreaStart();
60 addr < p->ObjectAreaEnd();
61 addr += kPointerSize) {
62 CHECK(Page::FromAddress(addr)->IsRegionDirty(addr));
63 }
64#endif
65}
66#endif
67
68
69// TODO(gc) you can no longer allocate pages like this. Details are hidden.
70#if 0
71TEST(Page) {
72 byte* mem = NewArray<byte>(2*Page::kPageSize);
73 CHECK(mem != NULL);
74
75 Address start = reinterpret_cast<Address>(mem);
76 Address page_start = RoundUp(start, Page::kPageSize);
77
78 Page* p = Page::FromAddress(page_start);
79 // Initialized Page has heap pointer, normally set by memory_allocator.
80 p->heap_ = CcTest::heap();
81 CHECK(p->address() == page_start);
82 CHECK(p->is_valid());
83
84 p->opaque_header = 0;
85 p->SetIsLargeObjectPage(false);
86 CHECK(!p->next_page()->is_valid());
87
88 CHECK(p->ObjectAreaStart() == page_start + Page::kObjectStartOffset);
89 CHECK(p->ObjectAreaEnd() == page_start + Page::kPageSize);
90
91 CHECK(p->Offset(page_start + Page::kObjectStartOffset) ==
92 Page::kObjectStartOffset);
93 CHECK(p->Offset(page_start + Page::kPageSize) == Page::kPageSize);
94
95 CHECK(p->OffsetToAddress(Page::kObjectStartOffset) == p->ObjectAreaStart());
96 CHECK(p->OffsetToAddress(Page::kPageSize) == p->ObjectAreaEnd());
97
98 // test region marking
99 VerifyRegionMarking(page_start);
100
101 DeleteArray(mem);
102}
103#endif
104
105
106// Temporarily sets a given allocator in an isolate.
107class TestMemoryAllocatorScope {
108 public:
109 TestMemoryAllocatorScope(Isolate* isolate, MemoryAllocator* allocator)
Ben Murdochc5610432016-08-08 18:44:38 +0100110 : isolate_(isolate), old_allocator_(isolate->heap()->memory_allocator()) {
111 isolate->heap()->memory_allocator_ = allocator;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000112 }
113
114 ~TestMemoryAllocatorScope() {
Ben Murdochc5610432016-08-08 18:44:38 +0100115 isolate_->heap()->memory_allocator_ = old_allocator_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000116 }
117
118 private:
119 Isolate* isolate_;
120 MemoryAllocator* old_allocator_;
121
122 DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope);
123};
124
125
126// Temporarily sets a given code range in an isolate.
127class TestCodeRangeScope {
128 public:
129 TestCodeRangeScope(Isolate* isolate, CodeRange* code_range)
130 : isolate_(isolate),
Ben Murdochc5610432016-08-08 18:44:38 +0100131 old_code_range_(isolate->heap()->memory_allocator()->code_range()) {
132 isolate->heap()->memory_allocator()->code_range_ = code_range;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000133 }
134
135 ~TestCodeRangeScope() {
Ben Murdochc5610432016-08-08 18:44:38 +0100136 isolate_->heap()->memory_allocator()->code_range_ = old_code_range_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000137 }
138
139 private:
140 Isolate* isolate_;
141 CodeRange* old_code_range_;
142
143 DISALLOW_COPY_AND_ASSIGN(TestCodeRangeScope);
144};
145
146
147static void VerifyMemoryChunk(Isolate* isolate,
148 Heap* heap,
149 CodeRange* code_range,
150 size_t reserve_area_size,
151 size_t commit_area_size,
152 size_t second_commit_area_size,
153 Executability executable) {
154 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
Ben Murdochc5610432016-08-08 18:44:38 +0100155 CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
156 0));
157 {
158 TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
159 TestCodeRangeScope test_code_range_scope(isolate, code_range);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000160
Ben Murdochc5610432016-08-08 18:44:38 +0100161 size_t header_size = (executable == EXECUTABLE)
162 ? MemoryAllocator::CodePageGuardStartOffset()
163 : MemoryChunk::kObjectStartOffset;
164 size_t guard_size =
165 (executable == EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000166
Ben Murdochc5610432016-08-08 18:44:38 +0100167 MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(
168 reserve_area_size, commit_area_size, executable, NULL);
169 size_t alignment = code_range != NULL && code_range->valid()
170 ? MemoryChunk::kAlignment
171 : base::OS::CommitPageSize();
172 size_t reserved_size =
173 ((executable == EXECUTABLE))
174 ? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
175 alignment)
176 : RoundUp(header_size + reserve_area_size,
177 base::OS::CommitPageSize());
178 CHECK(memory_chunk->size() == reserved_size);
179 CHECK(memory_chunk->area_start() <
180 memory_chunk->address() + memory_chunk->size());
181 CHECK(memory_chunk->area_end() <=
182 memory_chunk->address() + memory_chunk->size());
183 CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000184
Ben Murdochc5610432016-08-08 18:44:38 +0100185 Address area_start = memory_chunk->area_start();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000186
Ben Murdochc5610432016-08-08 18:44:38 +0100187 memory_chunk->CommitArea(second_commit_area_size);
188 CHECK(area_start == memory_chunk->area_start());
189 CHECK(memory_chunk->area_start() <
190 memory_chunk->address() + memory_chunk->size());
191 CHECK(memory_chunk->area_end() <=
192 memory_chunk->address() + memory_chunk->size());
193 CHECK(static_cast<size_t>(memory_chunk->area_size()) ==
194 second_commit_area_size);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000195
Ben Murdochc5610432016-08-08 18:44:38 +0100196 memory_allocator->Free<MemoryAllocator::kFull>(memory_chunk);
197 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000198 memory_allocator->TearDown();
199 delete memory_allocator;
200}
201
202
203TEST(Regress3540) {
204 Isolate* isolate = CcTest::i_isolate();
205 Heap* heap = isolate->heap();
206 const int pageSize = Page::kPageSize;
207 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
Ben Murdochc5610432016-08-08 18:44:38 +0100208 CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
209 0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000210 TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
211 CodeRange* code_range = new CodeRange(isolate);
212 const size_t code_range_size = 4 * pageSize;
213 if (!code_range->SetUp(
214 code_range_size +
215 RoundUp(v8::base::OS::CommitPageSize() * kReservedCodeRangePages,
216 MemoryChunk::kAlignment) +
217 v8::internal::MemoryAllocator::CodePageAreaSize())) {
218 return;
219 }
220
221 Address address;
222 size_t size;
223 size_t request_size = code_range_size - 2 * pageSize;
224 address = code_range->AllocateRawMemory(
225 request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()),
226 &size);
227 CHECK(address != NULL);
228
229 Address null_address;
230 size_t null_size;
231 request_size = code_range_size - pageSize;
232 null_address = code_range->AllocateRawMemory(
233 request_size, request_size - (2 * MemoryAllocator::CodePageGuardSize()),
234 &null_size);
235 CHECK(null_address == NULL);
236
237 code_range->FreeRawMemory(address, size);
238 delete code_range;
239 memory_allocator->TearDown();
240 delete memory_allocator;
241}
242
243
244static unsigned int Pseudorandom() {
245 static uint32_t lo = 2345;
246 lo = 18273 * (lo & 0xFFFFF) + (lo >> 16);
247 return lo & 0xFFFFF;
248}
249
250
251TEST(MemoryChunk) {
252 Isolate* isolate = CcTest::i_isolate();
253 Heap* heap = isolate->heap();
254
255 size_t reserve_area_size = 1 * MB;
256 size_t initial_commit_area_size, second_commit_area_size;
257
258 for (int i = 0; i < 100; i++) {
259 initial_commit_area_size = Pseudorandom();
260 second_commit_area_size = Pseudorandom();
261
262 // With CodeRange.
263 CodeRange* code_range = new CodeRange(isolate);
264 const size_t code_range_size = 32 * MB;
265 if (!code_range->SetUp(code_range_size)) return;
266
267 VerifyMemoryChunk(isolate,
268 heap,
269 code_range,
270 reserve_area_size,
271 initial_commit_area_size,
272 second_commit_area_size,
273 EXECUTABLE);
274
275 VerifyMemoryChunk(isolate,
276 heap,
277 code_range,
278 reserve_area_size,
279 initial_commit_area_size,
280 second_commit_area_size,
281 NOT_EXECUTABLE);
282 delete code_range;
283
284 // Without CodeRange.
285 code_range = NULL;
286 VerifyMemoryChunk(isolate,
287 heap,
288 code_range,
289 reserve_area_size,
290 initial_commit_area_size,
291 second_commit_area_size,
292 EXECUTABLE);
293
294 VerifyMemoryChunk(isolate,
295 heap,
296 code_range,
297 reserve_area_size,
298 initial_commit_area_size,
299 second_commit_area_size,
300 NOT_EXECUTABLE);
301 }
302}
303
304
305TEST(MemoryAllocator) {
306 Isolate* isolate = CcTest::i_isolate();
307 Heap* heap = isolate->heap();
308
309 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
310 CHECK(memory_allocator != nullptr);
Ben Murdochc5610432016-08-08 18:44:38 +0100311 CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
312 0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000313 TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
314
315 {
316 int total_pages = 0;
317 OldSpace faked_space(heap, OLD_SPACE, NOT_EXECUTABLE);
Ben Murdochc5610432016-08-08 18:44:38 +0100318 Page* first_page = memory_allocator->AllocatePage(
Ben Murdochda12d292016-06-02 14:46:10 +0100319 faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
320 NOT_EXECUTABLE);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000321
322 first_page->InsertAfter(faked_space.anchor()->prev_page());
Ben Murdochc5610432016-08-08 18:44:38 +0100323 CHECK(Page::IsValid(first_page));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000324 CHECK(first_page->next_page() == faked_space.anchor());
325 total_pages++;
326
327 for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
328 CHECK(p->owner() == &faked_space);
329 }
330
331 // Again, we should get n or n - 1 pages.
Ben Murdochc5610432016-08-08 18:44:38 +0100332 Page* other = memory_allocator->AllocatePage(
Ben Murdochda12d292016-06-02 14:46:10 +0100333 faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
334 NOT_EXECUTABLE);
Ben Murdochc5610432016-08-08 18:44:38 +0100335 CHECK(Page::IsValid(other));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000336 total_pages++;
337 other->InsertAfter(first_page);
338 int page_count = 0;
339 for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) {
340 CHECK(p->owner() == &faked_space);
341 page_count++;
342 }
343 CHECK(total_pages == page_count);
344
345 Page* second_page = first_page->next_page();
Ben Murdochc5610432016-08-08 18:44:38 +0100346 CHECK(Page::IsValid(second_page));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000347
348 // OldSpace's destructor will tear down the space and free up all pages.
349 }
350 memory_allocator->TearDown();
351 delete memory_allocator;
352}
353
354
355TEST(NewSpace) {
356 Isolate* isolate = CcTest::i_isolate();
357 Heap* heap = isolate->heap();
358 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
Ben Murdochc5610432016-08-08 18:44:38 +0100359 CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
360 0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000361 TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
362
363 NewSpace new_space(heap);
364
Ben Murdochda12d292016-06-02 14:46:10 +0100365 CHECK(new_space.SetUp(CcTest::heap()->InitialSemiSpaceSize(),
366 CcTest::heap()->InitialSemiSpaceSize()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000367 CHECK(new_space.HasBeenSetUp());
368
369 while (new_space.Available() >= Page::kMaxRegularHeapObjectSize) {
370 Object* obj =
371 new_space.AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize)
372 .ToObjectChecked();
373 CHECK(new_space.Contains(HeapObject::cast(obj)));
374 }
375
376 new_space.TearDown();
377 memory_allocator->TearDown();
378 delete memory_allocator;
379}
380
381
382TEST(OldSpace) {
383 Isolate* isolate = CcTest::i_isolate();
384 Heap* heap = isolate->heap();
385 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
Ben Murdochc5610432016-08-08 18:44:38 +0100386 CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
387 0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000388 TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
389
390 OldSpace* s = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
391 CHECK(s != NULL);
392
393 CHECK(s->SetUp());
394
395 while (s->Available() > 0) {
396 s->AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize).ToObjectChecked();
397 }
398
399 delete s;
400 memory_allocator->TearDown();
401 delete memory_allocator;
402}
403
404
405TEST(CompactionSpace) {
406 Isolate* isolate = CcTest::i_isolate();
407 Heap* heap = isolate->heap();
408 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
409 CHECK(memory_allocator != nullptr);
Ben Murdochc5610432016-08-08 18:44:38 +0100410 CHECK(memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize(),
411 0));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000412 TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
413
414 CompactionSpace* compaction_space =
415 new CompactionSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
416 CHECK(compaction_space != NULL);
417 CHECK(compaction_space->SetUp());
418
419 OldSpace* old_space = new OldSpace(heap, OLD_SPACE, NOT_EXECUTABLE);
420 CHECK(old_space != NULL);
421 CHECK(old_space->SetUp());
422
423 // Cannot loop until "Available()" since we initially have 0 bytes available
424 // and would thus neither grow, nor be able to allocate an object.
425 const int kNumObjects = 100;
426 const int kNumObjectsPerPage =
427 compaction_space->AreaSize() / Page::kMaxRegularHeapObjectSize;
428 const int kExpectedPages =
429 (kNumObjects + kNumObjectsPerPage - 1) / kNumObjectsPerPage;
430 for (int i = 0; i < kNumObjects; i++) {
431 compaction_space->AllocateRawUnaligned(Page::kMaxRegularHeapObjectSize)
432 .ToObjectChecked();
433 }
434 int pages_in_old_space = old_space->CountTotalPages();
435 int pages_in_compaction_space = compaction_space->CountTotalPages();
436 CHECK_EQ(pages_in_compaction_space, kExpectedPages);
437 CHECK_LE(pages_in_old_space, 1);
438
439 old_space->MergeCompactionSpace(compaction_space);
440 CHECK_EQ(old_space->CountTotalPages(),
441 pages_in_old_space + pages_in_compaction_space);
442
443 delete compaction_space;
444 delete old_space;
445
446 memory_allocator->TearDown();
447 delete memory_allocator;
448}
449
450
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000451TEST(LargeObjectSpace) {
452 v8::V8::Initialize();
453
454 LargeObjectSpace* lo = CcTest::heap()->lo_space();
455 CHECK(lo != NULL);
456
457 int lo_size = Page::kPageSize;
458
459 Object* obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE).ToObjectChecked();
460 CHECK(obj->IsHeapObject());
461
462 HeapObject* ho = HeapObject::cast(obj);
463
464 CHECK(lo->Contains(HeapObject::cast(obj)));
465
466 CHECK(lo->FindObject(ho->address()) == obj);
467
468 CHECK(lo->Contains(ho));
469
470 while (true) {
471 intptr_t available = lo->Available();
472 { AllocationResult allocation = lo->AllocateRaw(lo_size, NOT_EXECUTABLE);
473 if (allocation.IsRetry()) break;
474 }
475 // The available value is conservative such that it may report
476 // zero prior to heap exhaustion.
477 CHECK(lo->Available() < available || available == 0);
478 }
479
480 CHECK(!lo->IsEmpty());
481
482 CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE).IsRetry());
483}
484
485
486TEST(SizeOfFirstPageIsLargeEnough) {
487 if (i::FLAG_always_opt) return;
488 // Bootstrapping without a snapshot causes more allocations.
489 CcTest::InitializeVM();
490 Isolate* isolate = CcTest::i_isolate();
491 if (!isolate->snapshot_available()) return;
492 if (Snapshot::EmbedsScript(isolate)) return;
493
494 // If this test fails due to enabling experimental natives that are not part
495 // of the snapshot, we may need to adjust CalculateFirstPageSizes.
496
497 // Freshly initialized VM gets by with one page per space.
498 for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
499 // Debug code can be very large, so skip CODE_SPACE if we are generating it.
500 if (i == CODE_SPACE && i::FLAG_debug_code) continue;
501 CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
502 }
503
504 // Executing the empty script gets by with one page per space.
505 HandleScope scope(isolate);
506 CompileRun("/*empty*/");
507 for (int i = FIRST_PAGED_SPACE; i <= LAST_PAGED_SPACE; i++) {
508 // Debug code can be very large, so skip CODE_SPACE if we are generating it.
509 if (i == CODE_SPACE && i::FLAG_debug_code) continue;
510 CHECK_EQ(1, isolate->heap()->paged_space(i)->CountTotalPages());
511 }
512
513 // No large objects required to perform the above steps.
514 CHECK(isolate->heap()->lo_space()->IsEmpty());
515}
516
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000517static HeapObject* AllocateUnaligned(NewSpace* space, int size) {
518 AllocationResult allocation = space->AllocateRawUnaligned(size);
519 CHECK(!allocation.IsRetry());
520 HeapObject* filler = NULL;
521 CHECK(allocation.To(&filler));
Ben Murdochda12d292016-06-02 14:46:10 +0100522 space->heap()->CreateFillerObjectAt(filler->address(), size,
523 ClearRecordedSlots::kNo);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000524 return filler;
525}
526
Ben Murdoch097c5b22016-05-18 11:27:45 +0100527static HeapObject* AllocateUnaligned(PagedSpace* space, int size) {
528 AllocationResult allocation = space->AllocateRaw(size, kDoubleUnaligned);
529 CHECK(!allocation.IsRetry());
530 HeapObject* filler = NULL;
531 CHECK(allocation.To(&filler));
Ben Murdochda12d292016-06-02 14:46:10 +0100532 space->heap()->CreateFillerObjectAt(filler->address(), size,
533 ClearRecordedSlots::kNo);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100534 return filler;
535}
536
537static HeapObject* AllocateUnaligned(LargeObjectSpace* space, int size) {
538 AllocationResult allocation = space->AllocateRaw(size, EXECUTABLE);
539 CHECK(!allocation.IsRetry());
540 HeapObject* filler = NULL;
541 CHECK(allocation.To(&filler));
542 return filler;
543}
544
545class Observer : public AllocationObserver {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000546 public:
547 explicit Observer(intptr_t step_size)
Ben Murdoch097c5b22016-05-18 11:27:45 +0100548 : AllocationObserver(step_size), count_(0) {}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000549
550 void Step(int bytes_allocated, Address, size_t) override { count_++; }
551
552 int count() const { return count_; }
553
554 private:
555 int count_;
556};
557
Ben Murdoch097c5b22016-05-18 11:27:45 +0100558template <typename T>
559void testAllocationObserver(Isolate* i_isolate, T* space) {
560 Observer observer1(128);
561 space->AddAllocationObserver(&observer1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000562
Ben Murdoch097c5b22016-05-18 11:27:45 +0100563 // The observer should not get notified if we have only allocated less than
564 // 128 bytes.
565 AllocateUnaligned(space, 64);
566 CHECK_EQ(observer1.count(), 0);
567
568 // The observer should get called when we have allocated exactly 128 bytes.
569 AllocateUnaligned(space, 64);
570 CHECK_EQ(observer1.count(), 1);
571
572 // Another >128 bytes should get another notification.
573 AllocateUnaligned(space, 136);
574 CHECK_EQ(observer1.count(), 2);
575
576 // Allocating a large object should get only one notification.
577 AllocateUnaligned(space, 1024);
578 CHECK_EQ(observer1.count(), 3);
579
580 // Allocating another 2048 bytes in small objects should get 16
581 // notifications.
582 for (int i = 0; i < 64; ++i) {
583 AllocateUnaligned(space, 32);
584 }
585 CHECK_EQ(observer1.count(), 19);
586
587 // Multiple observers should work.
588 Observer observer2(96);
589 space->AddAllocationObserver(&observer2);
590
591 AllocateUnaligned(space, 2048);
592 CHECK_EQ(observer1.count(), 20);
593 CHECK_EQ(observer2.count(), 1);
594
595 AllocateUnaligned(space, 104);
596 CHECK_EQ(observer1.count(), 20);
597 CHECK_EQ(observer2.count(), 2);
598
599 // Callback should stop getting called after an observer is removed.
600 space->RemoveAllocationObserver(&observer1);
601
602 AllocateUnaligned(space, 384);
603 CHECK_EQ(observer1.count(), 20); // no more notifications.
604 CHECK_EQ(observer2.count(), 3); // this one is still active.
605
606 // Ensure that PauseInlineAllocationObserversScope work correctly.
607 AllocateUnaligned(space, 48);
608 CHECK_EQ(observer2.count(), 3);
609 {
610 PauseAllocationObserversScope pause_observers(i_isolate->heap());
611 CHECK_EQ(observer2.count(), 3);
612 AllocateUnaligned(space, 384);
613 CHECK_EQ(observer2.count(), 3);
614 }
615 CHECK_EQ(observer2.count(), 3);
616 // Coupled with the 48 bytes allocated before the pause, another 48 bytes
617 // allocated here should trigger a notification.
618 AllocateUnaligned(space, 48);
619 CHECK_EQ(observer2.count(), 4);
620
621 space->RemoveAllocationObserver(&observer2);
622 AllocateUnaligned(space, 384);
623 CHECK_EQ(observer1.count(), 20);
624 CHECK_EQ(observer2.count(), 4);
625}
626
627UNINITIALIZED_TEST(AllocationObserver) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000628 v8::Isolate::CreateParams create_params;
629 create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
630 v8::Isolate* isolate = v8::Isolate::New(create_params);
631 {
632 v8::Isolate::Scope isolate_scope(isolate);
633 v8::HandleScope handle_scope(isolate);
634 v8::Context::New(isolate)->Enter();
635
636 Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
637
Ben Murdoch097c5b22016-05-18 11:27:45 +0100638 testAllocationObserver<NewSpace>(i_isolate, i_isolate->heap()->new_space());
639 // Old space is used but the code path is shared for all
640 // classes inheriting from PagedSpace.
641 testAllocationObserver<PagedSpace>(i_isolate,
642 i_isolate->heap()->old_space());
643 testAllocationObserver<LargeObjectSpace>(i_isolate,
644 i_isolate->heap()->lo_space());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000645 }
646 isolate->Dispose();
647}
648
649
650UNINITIALIZED_TEST(InlineAllocationObserverCadence) {
651 v8::Isolate::CreateParams create_params;
652 create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
653 v8::Isolate* isolate = v8::Isolate::New(create_params);
654 {
655 v8::Isolate::Scope isolate_scope(isolate);
656 v8::HandleScope handle_scope(isolate);
657 v8::Context::New(isolate)->Enter();
658
659 Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
660
661 NewSpace* new_space = i_isolate->heap()->new_space();
662
663 Observer observer1(512);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100664 new_space->AddAllocationObserver(&observer1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000665 Observer observer2(576);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100666 new_space->AddAllocationObserver(&observer2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000667
668 for (int i = 0; i < 512; ++i) {
669 AllocateUnaligned(new_space, 32);
670 }
671
Ben Murdoch097c5b22016-05-18 11:27:45 +0100672 new_space->RemoveAllocationObserver(&observer1);
673 new_space->RemoveAllocationObserver(&observer2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000674
675 CHECK_EQ(observer1.count(), 32);
676 CHECK_EQ(observer2.count(), 28);
677 }
678 isolate->Dispose();
679}
680
681} // namespace internal
682} // namespace v8