blob: 135498f69d180b6057e4d89b4d64afc1fb7905bf [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2011 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_SPACES_INL_H_
6#define V8_HEAP_SPACES_INL_H_
7
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00008#include "src/heap/incremental-marking.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +00009#include "src/heap/spaces.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000010#include "src/isolate.h"
11#include "src/msan.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000012#include "src/profiler/heap-profiler.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000013#include "src/v8memory.h"
14
15namespace v8 {
16namespace internal {
17
18
19// -----------------------------------------------------------------------------
20// Bitmap
21
22void Bitmap::Clear(MemoryChunk* chunk) {
23 Bitmap* bitmap = chunk->markbits();
24 for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
25 chunk->ResetLiveBytes();
26}
27
Ben Murdochda12d292016-06-02 14:46:10 +010028void Bitmap::SetAllBits(MemoryChunk* chunk) {
29 Bitmap* bitmap = chunk->markbits();
30 for (int i = 0; i < bitmap->CellsCount(); i++)
31 bitmap->cells()[i] = 0xffffffff;
32}
Ben Murdochb8a8cc12014-11-26 15:28:44 +000033
34// -----------------------------------------------------------------------------
35// PageIterator
36
Ben Murdochb8a8cc12014-11-26 15:28:44 +000037PageIterator::PageIterator(PagedSpace* space)
38 : space_(space),
39 prev_page_(&space->anchor_),
40 next_page_(prev_page_->next_page()) {}
41
42
43bool PageIterator::has_next() { return next_page_ != &space_->anchor_; }
44
45
46Page* PageIterator::next() {
47 DCHECK(has_next());
48 prev_page_ = next_page_;
49 next_page_ = next_page_->next_page();
50 return prev_page_;
51}
52
53
54// -----------------------------------------------------------------------------
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000055// SemiSpaceIterator
Ben Murdochb8a8cc12014-11-26 15:28:44 +000056
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000057HeapObject* SemiSpaceIterator::Next() {
58 while (current_ != limit_) {
59 if (NewSpacePage::IsAtEnd(current_)) {
60 NewSpacePage* page = NewSpacePage::FromLimit(current_);
61 page = page->next_page();
62 DCHECK(!page->is_anchor());
63 current_ = page->area_start();
64 if (current_ == limit_) return nullptr;
65 }
66 HeapObject* object = HeapObject::FromAddress(current_);
67 current_ += object->Size();
68 if (!object->IsFiller()) {
69 return object;
70 }
71 }
72 return nullptr;
73}
74
75
76HeapObject* SemiSpaceIterator::next_object() { return Next(); }
77
78
79// -----------------------------------------------------------------------------
80// NewSpacePageIterator
Ben Murdochb8a8cc12014-11-26 15:28:44 +000081
82NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
83 : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
84 next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
85 last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) {}
86
87NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
88 : prev_page_(space->anchor()),
89 next_page_(prev_page_->next_page()),
90 last_page_(prev_page_->prev_page()) {}
91
92NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
93 : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
94 next_page_(NewSpacePage::FromAddress(start)),
95 last_page_(NewSpacePage::FromLimit(limit)) {
96 SemiSpace::AssertValidRange(start, limit);
97}
98
99
100bool NewSpacePageIterator::has_next() { return prev_page_ != last_page_; }
101
102
103NewSpacePage* NewSpacePageIterator::next() {
104 DCHECK(has_next());
105 prev_page_ = next_page_;
106 next_page_ = next_page_->next_page();
107 return prev_page_;
108}
109
110
111// -----------------------------------------------------------------------------
112// HeapObjectIterator
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000113
114HeapObject* HeapObjectIterator::Next() {
115 do {
116 HeapObject* next_obj = FromCurrentPage();
117 if (next_obj != NULL) return next_obj;
118 } while (AdvanceToNextPage());
119 return NULL;
120}
121
122
123HeapObject* HeapObjectIterator::next_object() { return Next(); }
124
125
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000126HeapObject* HeapObjectIterator::FromCurrentPage() {
127 while (cur_addr_ != cur_end_) {
128 if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
129 cur_addr_ = space_->limit();
130 continue;
131 }
132 HeapObject* obj = HeapObject::FromAddress(cur_addr_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000133 int obj_size = obj->Size();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000134 cur_addr_ += obj_size;
135 DCHECK(cur_addr_ <= cur_end_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000136 // TODO(hpayer): Remove the debugging code.
137 if (cur_addr_ > cur_end_) {
138 space_->heap()->isolate()->PushStackTraceAndDie(0xaaaaaaaa, obj, NULL,
139 obj_size);
140 }
141
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000142 if (!obj->IsFiller()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000143 if (obj->IsCode()) {
144 DCHECK_EQ(space_, space_->heap()->code_space());
145 DCHECK_CODEOBJECT_SIZE(obj_size, space_);
146 } else {
147 DCHECK_OBJECT_SIZE(obj_size);
148 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000149 return obj;
150 }
151 }
152 return NULL;
153}
154
Ben Murdochda12d292016-06-02 14:46:10 +0100155// -----------------------------------------------------------------------------
156// LargePageIterator
157
158LargePageIterator::LargePageIterator(LargeObjectSpace* space)
159 : next_page_(space->first_page()) {}
160
161LargePage* LargePageIterator::next() {
162 LargePage* result = next_page_;
163 if (next_page_ != nullptr) {
164 next_page_ = next_page_->next_page();
165 }
166 return result;
167}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000168
169// -----------------------------------------------------------------------------
170// MemoryAllocator
171
172#ifdef ENABLE_HEAP_PROTECTION
173
174void MemoryAllocator::Protect(Address start, size_t size) {
175 base::OS::Protect(start, size);
176}
177
178
179void MemoryAllocator::Unprotect(Address start, size_t size,
180 Executability executable) {
181 base::OS::Unprotect(start, size, executable);
182}
183
184
185void MemoryAllocator::ProtectChunkFromPage(Page* page) {
186 int id = GetChunkId(page);
187 base::OS::Protect(chunks_[id].address(), chunks_[id].size());
188}
189
190
191void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
192 int id = GetChunkId(page);
193 base::OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
194 chunks_[id].owner()->executable() == EXECUTABLE);
195}
196
197#endif
198
Ben Murdoch097c5b22016-05-18 11:27:45 +0100199// -----------------------------------------------------------------------------
200// SemiSpace
201
202bool SemiSpace::Contains(HeapObject* o) {
203 return id_ == kToSpace
204 ? MemoryChunk::FromAddress(o->address())->InToSpace()
205 : MemoryChunk::FromAddress(o->address())->InFromSpace();
206}
207
208bool SemiSpace::Contains(Object* o) {
209 return o->IsHeapObject() && Contains(HeapObject::cast(o));
210}
211
212bool SemiSpace::ContainsSlow(Address a) {
213 NewSpacePageIterator it(this);
214 while (it.has_next()) {
215 if (it.next() == MemoryChunk::FromAddress(a)) return true;
216 }
217 return false;
218}
219
220// --------------------------------------------------------------------------
221// NewSpace
222
223bool NewSpace::Contains(HeapObject* o) {
224 return MemoryChunk::FromAddress(o->address())->InNewSpace();
225}
226
227bool NewSpace::Contains(Object* o) {
228 return o->IsHeapObject() && Contains(HeapObject::cast(o));
229}
230
231bool NewSpace::ContainsSlow(Address a) {
232 return from_space_.ContainsSlow(a) || to_space_.ContainsSlow(a);
233}
234
235bool NewSpace::ToSpaceContainsSlow(Address a) {
236 return to_space_.ContainsSlow(a);
237}
238
239bool NewSpace::FromSpaceContainsSlow(Address a) {
240 return from_space_.ContainsSlow(a);
241}
242
243bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
244bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000245
246// --------------------------------------------------------------------------
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000247// AllocationResult
248
249AllocationSpace AllocationResult::RetrySpace() {
250 DCHECK(IsRetry());
251 return static_cast<AllocationSpace>(Smi::cast(object_)->value());
252}
253
Ben Murdochda12d292016-06-02 14:46:10 +0100254NewSpacePage* NewSpacePage::Initialize(Heap* heap, MemoryChunk* chunk,
255 Executability executable,
256 SemiSpace* owner) {
257 DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
258 bool in_to_space = (owner->id() != kFromSpace);
259 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
260 : MemoryChunk::IN_FROM_SPACE);
261 DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
262 : MemoryChunk::IN_TO_SPACE));
263 NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
264 heap->incremental_marking()->SetNewSpacePageFlags(page);
265 return page;
266}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000267
268// --------------------------------------------------------------------------
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000269// PagedSpace
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000270
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000271Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
272 PagedSpace* owner) {
273 Page* page = reinterpret_cast<Page*>(chunk);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000274 page->mutex_ = new base::Mutex();
275 DCHECK(page->area_size() <= kAllocatableMemory);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000276 DCHECK(chunk->owner() == owner);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000277
Ben Murdochda12d292016-06-02 14:46:10 +0100278 owner->IncreaseCapacity(page->area_size());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000279 heap->incremental_marking()->SetOldSpacePageFlags(chunk);
280
Ben Murdochda12d292016-06-02 14:46:10 +0100281 // Make sure that categories are initialized before freeing the area.
282 page->InitializeFreeListCategories();
283 owner->Free(page->area_start(), page->area_size());
284
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000285 return page;
286}
287
Ben Murdochda12d292016-06-02 14:46:10 +0100288void Page::InitializeFreeListCategories() {
289 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
290 categories_[i].Initialize(static_cast<FreeListCategoryType>(i));
291 }
292}
293
Ben Murdoch097c5b22016-05-18 11:27:45 +0100294void MemoryChunk::IncrementLiveBytesFromGC(HeapObject* object, int by) {
295 MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by);
296}
297
298void MemoryChunk::ResetLiveBytes() {
299 if (FLAG_trace_live_bytes) {
300 PrintIsolate(heap()->isolate(), "live-bytes: reset page=%p %d->0\n", this,
301 live_byte_count_);
302 }
303 live_byte_count_ = 0;
304}
305
306void MemoryChunk::IncrementLiveBytes(int by) {
Ben Murdochda12d292016-06-02 14:46:10 +0100307 if (IsFlagSet(BLACK_PAGE)) return;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100308 if (FLAG_trace_live_bytes) {
309 PrintIsolate(heap()->isolate(),
310 "live-bytes: update page=%p delta=%d %d->%d\n", this, by,
311 live_byte_count_, live_byte_count_ + by);
312 }
313 live_byte_count_ += by;
314 DCHECK_GE(live_byte_count_, 0);
315 DCHECK_LE(static_cast<size_t>(live_byte_count_), size_);
316}
317
318void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
319 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
320 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->SweepingDone()) {
321 static_cast<PagedSpace*>(chunk->owner())->Allocate(by);
322 }
323 chunk->IncrementLiveBytes(by);
324}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000325
326bool PagedSpace::Contains(Address addr) {
327 Page* p = Page::FromAddress(addr);
328 if (!p->is_valid()) return false;
329 return p->owner() == this;
330}
331
Ben Murdoch097c5b22016-05-18 11:27:45 +0100332bool PagedSpace::Contains(Object* o) {
333 if (!o->IsHeapObject()) return false;
334 Page* p = Page::FromAddress(HeapObject::cast(o)->address());
335 if (!p->is_valid()) return false;
336 return p->owner() == this;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000337}
338
Ben Murdochda12d292016-06-02 14:46:10 +0100339void PagedSpace::UnlinkFreeListCategories(Page* page) {
340 DCHECK_EQ(this, page->owner());
341 page->ForAllFreeListCategories([this](FreeListCategory* category) {
342 DCHECK_EQ(free_list(), category->owner());
343 free_list()->RemoveCategory(category);
344 });
345}
346
347intptr_t PagedSpace::RelinkFreeListCategories(Page* page) {
348 DCHECK_EQ(this, page->owner());
349 intptr_t added = 0;
350 page->ForAllFreeListCategories([&added](FreeListCategory* category) {
351 added += category->available();
352 category->Relink();
353 });
354 return added;
355}
356
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000357MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100358 MemoryChunk* chunk = MemoryChunk::FromAddress(addr);
359 uintptr_t offset = addr - chunk->address();
360 if (offset < MemoryChunk::kHeaderSize || !chunk->HasPageHeader()) {
361 chunk = heap->lo_space()->FindPage(addr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000362 }
Ben Murdoch097c5b22016-05-18 11:27:45 +0100363 return chunk;
364}
365
366Page* Page::FromAnyPointerAddress(Heap* heap, Address addr) {
367 return static_cast<Page*>(MemoryChunk::FromAnyPointerAddress(heap, addr));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000368}
369
Ben Murdochda12d292016-06-02 14:46:10 +0100370void Page::MarkNeverAllocateForTesting() {
371 DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
372 SetFlag(NEVER_ALLOCATE_ON_PAGE);
373 reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
374}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000375
Ben Murdochda12d292016-06-02 14:46:10 +0100376void Page::MarkEvacuationCandidate() {
377 DCHECK(!IsFlagSet(NEVER_EVACUATE));
378 DCHECK_NULL(old_to_old_slots_);
379 DCHECK_NULL(typed_old_to_old_slots_);
380 SetFlag(EVACUATION_CANDIDATE);
381 reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
382}
383
384void Page::ClearEvacuationCandidate() {
385 DCHECK_NULL(old_to_old_slots_);
386 DCHECK_NULL(typed_old_to_old_slots_);
387 ClearFlag(EVACUATION_CANDIDATE);
388 InitializeFreeListCategories();
389}
390
391MemoryChunkIterator::MemoryChunkIterator(Heap* heap, Mode mode)
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000392 : state_(kOldSpaceState),
Ben Murdochda12d292016-06-02 14:46:10 +0100393 mode_(mode),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000394 old_iterator_(heap->old_space()),
Ben Murdochda12d292016-06-02 14:46:10 +0100395 code_iterator_(heap->code_space()),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000396 map_iterator_(heap->map_space()),
397 lo_iterator_(heap->lo_space()) {}
398
Ben Murdochda12d292016-06-02 14:46:10 +0100399MemoryChunk* MemoryChunkIterator::next() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000400 switch (state_) {
401 case kOldSpaceState: {
402 if (old_iterator_.has_next()) {
403 return old_iterator_.next();
404 }
405 state_ = kMapState;
406 // Fall through.
407 }
408 case kMapState: {
Ben Murdochda12d292016-06-02 14:46:10 +0100409 if (mode_ != ALL_BUT_MAP_SPACE && map_iterator_.has_next()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000410 return map_iterator_.next();
411 }
Ben Murdochda12d292016-06-02 14:46:10 +0100412 state_ = kCodeState;
413 // Fall through.
414 }
415 case kCodeState: {
416 if (mode_ != ALL_BUT_CODE_SPACE && code_iterator_.has_next()) {
417 return code_iterator_.next();
418 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000419 state_ = kLargeObjectState;
420 // Fall through.
421 }
422 case kLargeObjectState: {
Ben Murdochda12d292016-06-02 14:46:10 +0100423 MemoryChunk* answer = lo_iterator_.next();
424 if (answer != nullptr) {
425 return answer;
426 }
427 state_ = kFinishedState;
428 // Fall through;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000429 }
430 case kFinishedState:
Ben Murdochda12d292016-06-02 14:46:10 +0100431 return nullptr;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000432 default:
433 break;
434 }
435 UNREACHABLE();
Ben Murdochda12d292016-06-02 14:46:10 +0100436 return nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000437}
438
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000439void Page::set_next_page(Page* page) {
440 DCHECK(page->owner() == owner());
441 set_next_chunk(page);
442}
443
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000444void Page::set_prev_page(Page* page) {
445 DCHECK(page->owner() == owner());
446 set_prev_chunk(page);
447}
448
Ben Murdochda12d292016-06-02 14:46:10 +0100449Page* FreeListCategory::page() {
450 return Page::FromAddress(reinterpret_cast<Address>(this));
451}
452
453FreeList* FreeListCategory::owner() {
454 return reinterpret_cast<PagedSpace*>(
455 Page::FromAddress(reinterpret_cast<Address>(this))->owner())
456 ->free_list();
457}
458
459bool FreeListCategory::is_linked() {
460 return prev_ != nullptr || next_ != nullptr || owner()->top(type_) == this;
461}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000462
463// Try linear allocation in the page of alloc_info's allocation top. Does
464// not contain slow case logic (e.g. move to the next page or try free list
465// allocation) so it can be used by all the allocation functions and for all
466// the paged spaces.
467HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
468 Address current_top = allocation_info_.top();
469 Address new_top = current_top + size_in_bytes;
470 if (new_top > allocation_info_.limit()) return NULL;
471
472 allocation_info_.set_top(new_top);
473 return HeapObject::FromAddress(current_top);
474}
475
476
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000477AllocationResult LocalAllocationBuffer::AllocateRawAligned(
478 int size_in_bytes, AllocationAlignment alignment) {
479 Address current_top = allocation_info_.top();
480 int filler_size = Heap::GetFillToAlign(current_top, alignment);
481
482 Address new_top = current_top + filler_size + size_in_bytes;
483 if (new_top > allocation_info_.limit()) return AllocationResult::Retry();
484
485 allocation_info_.set_top(new_top);
486 if (filler_size > 0) {
487 return heap_->PrecedeWithFiller(HeapObject::FromAddress(current_top),
488 filler_size);
489 }
490
491 return AllocationResult(HeapObject::FromAddress(current_top));
492}
493
494
495HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
496 AllocationAlignment alignment) {
497 Address current_top = allocation_info_.top();
498 int filler_size = Heap::GetFillToAlign(current_top, alignment);
499
500 Address new_top = current_top + filler_size + *size_in_bytes;
501 if (new_top > allocation_info_.limit()) return NULL;
502
503 allocation_info_.set_top(new_top);
504 if (filler_size > 0) {
505 *size_in_bytes += filler_size;
506 return heap()->PrecedeWithFiller(HeapObject::FromAddress(current_top),
507 filler_size);
508 }
509
510 return HeapObject::FromAddress(current_top);
511}
512
513
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000514// Raw allocation.
Ben Murdochda12d292016-06-02 14:46:10 +0100515AllocationResult PagedSpace::AllocateRawUnaligned(
516 int size_in_bytes, UpdateSkipList update_skip_list) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000517 HeapObject* object = AllocateLinearly(size_in_bytes);
518
519 if (object == NULL) {
520 object = free_list_.Allocate(size_in_bytes);
521 if (object == NULL) {
522 object = SlowAllocateRaw(size_in_bytes);
523 }
524 }
525
526 if (object != NULL) {
Ben Murdochda12d292016-06-02 14:46:10 +0100527 if (update_skip_list == UPDATE_SKIP_LIST && identity() == CODE_SPACE) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000528 SkipList::Update(object->address(), size_in_bytes);
529 }
530 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
531 return object;
532 }
533
534 return AllocationResult::Retry(identity());
535}
536
537
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000538AllocationResult PagedSpace::AllocateRawUnalignedSynchronized(
539 int size_in_bytes) {
540 base::LockGuard<base::Mutex> lock_guard(&space_mutex_);
541 return AllocateRawUnaligned(size_in_bytes);
542}
543
544
545// Raw allocation.
546AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
547 AllocationAlignment alignment) {
548 DCHECK(identity() == OLD_SPACE);
549 int allocation_size = size_in_bytes;
550 HeapObject* object = AllocateLinearlyAligned(&allocation_size, alignment);
551
552 if (object == NULL) {
553 // We don't know exactly how much filler we need to align until space is
554 // allocated, so assume the worst case.
555 int filler_size = Heap::GetMaximumFillToAlign(alignment);
556 allocation_size += filler_size;
557 object = free_list_.Allocate(allocation_size);
558 if (object == NULL) {
559 object = SlowAllocateRaw(allocation_size);
560 }
561 if (object != NULL && filler_size != 0) {
562 object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
563 alignment);
564 // Filler objects are initialized, so mark only the aligned object memory
565 // as uninitialized.
566 allocation_size = size_in_bytes;
567 }
568 }
569
570 if (object != NULL) {
571 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), allocation_size);
572 return object;
573 }
574
575 return AllocationResult::Retry(identity());
576}
577
578
579AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
580 AllocationAlignment alignment) {
581#ifdef V8_HOST_ARCH_32_BIT
Ben Murdoch097c5b22016-05-18 11:27:45 +0100582 AllocationResult result =
583 alignment == kDoubleAligned
584 ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
585 : AllocateRawUnaligned(size_in_bytes);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000586#else
Ben Murdoch097c5b22016-05-18 11:27:45 +0100587 AllocationResult result = AllocateRawUnaligned(size_in_bytes);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000588#endif
Ben Murdoch097c5b22016-05-18 11:27:45 +0100589 HeapObject* heap_obj = nullptr;
590 if (!result.IsRetry() && result.To(&heap_obj)) {
591 AllocationStep(heap_obj->address(), size_in_bytes);
592 }
593 return result;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000594}
595
596
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000597// -----------------------------------------------------------------------------
598// NewSpace
599
600
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000601AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
602 AllocationAlignment alignment) {
603 Address top = allocation_info_.top();
604 int filler_size = Heap::GetFillToAlign(top, alignment);
605 int aligned_size_in_bytes = size_in_bytes + filler_size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000606
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000607 if (allocation_info_.limit() - top < aligned_size_in_bytes) {
608 // See if we can create room.
609 if (!EnsureAllocation(size_in_bytes, alignment)) {
610 return AllocationResult::Retry();
611 }
612
613 top = allocation_info_.top();
614 filler_size = Heap::GetFillToAlign(top, alignment);
615 aligned_size_in_bytes = size_in_bytes + filler_size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000616 }
617
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000618 HeapObject* obj = HeapObject::FromAddress(top);
619 allocation_info_.set_top(top + aligned_size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000620 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
621
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000622 if (filler_size > 0) {
623 obj = heap()->PrecedeWithFiller(obj, filler_size);
624 }
625
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000626 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
627
628 return obj;
629}
630
631
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000632AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
633 Address top = allocation_info_.top();
634 if (allocation_info_.limit() < top + size_in_bytes) {
635 // See if we can create room.
636 if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
637 return AllocationResult::Retry();
638 }
639
640 top = allocation_info_.top();
641 }
642
643 HeapObject* obj = HeapObject::FromAddress(top);
644 allocation_info_.set_top(top + size_in_bytes);
645 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
646
647 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
648
649 return obj;
650}
651
652
653AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
654 AllocationAlignment alignment) {
655#ifdef V8_HOST_ARCH_32_BIT
656 return alignment == kDoubleAligned
657 ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
658 : AllocateRawUnaligned(size_in_bytes);
659#else
660 return AllocateRawUnaligned(size_in_bytes);
661#endif
662}
663
664
665MUST_USE_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
666 int size_in_bytes, AllocationAlignment alignment) {
667 base::LockGuard<base::Mutex> guard(&mutex_);
668 return AllocateRaw(size_in_bytes, alignment);
669}
670
671
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000672LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
673 heap->incremental_marking()->SetOldSpacePageFlags(chunk);
674 return static_cast<LargePage*>(chunk);
675}
676
677
678intptr_t LargeObjectSpace::Available() {
679 return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
680}
681
682
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000683LocalAllocationBuffer LocalAllocationBuffer::InvalidBuffer() {
684 return LocalAllocationBuffer(nullptr, AllocationInfo(nullptr, nullptr));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000685}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000686
687
688LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
689 AllocationResult result,
690 intptr_t size) {
691 if (result.IsRetry()) return InvalidBuffer();
692 HeapObject* obj = nullptr;
693 bool ok = result.To(&obj);
694 USE(ok);
695 DCHECK(ok);
696 Address top = HeapObject::cast(obj)->address();
697 return LocalAllocationBuffer(heap, AllocationInfo(top, top + size));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000698}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000699
700
701bool LocalAllocationBuffer::TryMerge(LocalAllocationBuffer* other) {
702 if (allocation_info_.top() == other->allocation_info_.limit()) {
703 allocation_info_.set_top(other->allocation_info_.top());
704 other->allocation_info_.Reset(nullptr, nullptr);
705 return true;
706 }
707 return false;
708}
709
710} // namespace internal
711} // namespace v8
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000712
713#endif // V8_HEAP_SPACES_INL_H_