blob: dbf3ffff2aae74fcfe3b4db10fb77b65e49f36b3 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2011 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_SPACES_INL_H_
6#define V8_HEAP_SPACES_INL_H_
7
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00008#include "src/heap/incremental-marking.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +00009#include "src/heap/spaces.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000010#include "src/isolate.h"
11#include "src/msan.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000012#include "src/profiler/heap-profiler.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000013#include "src/v8memory.h"
14
15namespace v8 {
16namespace internal {
17
Ben Murdoch61f157c2016-09-16 13:49:30 +010018template <class PAGE_TYPE>
19PageIteratorImpl<PAGE_TYPE>& PageIteratorImpl<PAGE_TYPE>::operator++() {
20 p_ = p_->next_page();
21 return *this;
22}
23
24template <class PAGE_TYPE>
25PageIteratorImpl<PAGE_TYPE> PageIteratorImpl<PAGE_TYPE>::operator++(int) {
26 PageIteratorImpl<PAGE_TYPE> tmp(*this);
27 operator++();
28 return tmp;
29}
30
31NewSpacePageRange::NewSpacePageRange(Address start, Address limit)
32 : range_(Page::FromAddress(start),
33 Page::FromAllocationAreaAddress(limit)->next_page()) {
34 SemiSpace::AssertValidRange(start, limit);
35}
Ben Murdochb8a8cc12014-11-26 15:28:44 +000036
37// -----------------------------------------------------------------------------
38// Bitmap
39
40void Bitmap::Clear(MemoryChunk* chunk) {
41 Bitmap* bitmap = chunk->markbits();
42 for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
43 chunk->ResetLiveBytes();
44}
45
Ben Murdochda12d292016-06-02 14:46:10 +010046void Bitmap::SetAllBits(MemoryChunk* chunk) {
47 Bitmap* bitmap = chunk->markbits();
48 for (int i = 0; i < bitmap->CellsCount(); i++)
49 bitmap->cells()[i] = 0xffffffff;
50}
Ben Murdochb8a8cc12014-11-26 15:28:44 +000051
Ben Murdochb8a8cc12014-11-26 15:28:44 +000052
53// -----------------------------------------------------------------------------
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000054// SemiSpaceIterator
Ben Murdochb8a8cc12014-11-26 15:28:44 +000055
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000056HeapObject* SemiSpaceIterator::Next() {
57 while (current_ != limit_) {
Ben Murdochc5610432016-08-08 18:44:38 +010058 if (Page::IsAlignedToPageSize(current_)) {
59 Page* page = Page::FromAllocationAreaAddress(current_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000060 page = page->next_page();
61 DCHECK(!page->is_anchor());
62 current_ = page->area_start();
63 if (current_ == limit_) return nullptr;
64 }
65 HeapObject* object = HeapObject::FromAddress(current_);
66 current_ += object->Size();
67 if (!object->IsFiller()) {
68 return object;
69 }
70 }
71 return nullptr;
72}
73
Ben Murdochb8a8cc12014-11-26 15:28:44 +000074// -----------------------------------------------------------------------------
75// HeapObjectIterator
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000076
77HeapObject* HeapObjectIterator::Next() {
78 do {
79 HeapObject* next_obj = FromCurrentPage();
Ben Murdoch61f157c2016-09-16 13:49:30 +010080 if (next_obj != nullptr) return next_obj;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000081 } while (AdvanceToNextPage());
Ben Murdoch61f157c2016-09-16 13:49:30 +010082 return nullptr;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000083}
84
Ben Murdochb8a8cc12014-11-26 15:28:44 +000085HeapObject* HeapObjectIterator::FromCurrentPage() {
86 while (cur_addr_ != cur_end_) {
87 if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
88 cur_addr_ = space_->limit();
89 continue;
90 }
91 HeapObject* obj = HeapObject::FromAddress(cur_addr_);
Ben Murdoch61f157c2016-09-16 13:49:30 +010092 const int obj_size = obj->Size();
Ben Murdochb8a8cc12014-11-26 15:28:44 +000093 cur_addr_ += obj_size;
Ben Murdoch61f157c2016-09-16 13:49:30 +010094 DCHECK_LE(cur_addr_, cur_end_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +000095 if (!obj->IsFiller()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000096 if (obj->IsCode()) {
97 DCHECK_EQ(space_, space_->heap()->code_space());
98 DCHECK_CODEOBJECT_SIZE(obj_size, space_);
99 } else {
100 DCHECK_OBJECT_SIZE(obj_size);
101 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000102 return obj;
103 }
104 }
Ben Murdoch61f157c2016-09-16 13:49:30 +0100105 return nullptr;
Ben Murdochda12d292016-06-02 14:46:10 +0100106}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000107
108// -----------------------------------------------------------------------------
109// MemoryAllocator
110
111#ifdef ENABLE_HEAP_PROTECTION
112
113void MemoryAllocator::Protect(Address start, size_t size) {
114 base::OS::Protect(start, size);
115}
116
117
118void MemoryAllocator::Unprotect(Address start, size_t size,
119 Executability executable) {
120 base::OS::Unprotect(start, size, executable);
121}
122
123
124void MemoryAllocator::ProtectChunkFromPage(Page* page) {
125 int id = GetChunkId(page);
126 base::OS::Protect(chunks_[id].address(), chunks_[id].size());
127}
128
129
130void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
131 int id = GetChunkId(page);
132 base::OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
133 chunks_[id].owner()->executable() == EXECUTABLE);
134}
135
136#endif
137
Ben Murdoch097c5b22016-05-18 11:27:45 +0100138// -----------------------------------------------------------------------------
139// SemiSpace
140
141bool SemiSpace::Contains(HeapObject* o) {
142 return id_ == kToSpace
143 ? MemoryChunk::FromAddress(o->address())->InToSpace()
144 : MemoryChunk::FromAddress(o->address())->InFromSpace();
145}
146
147bool SemiSpace::Contains(Object* o) {
148 return o->IsHeapObject() && Contains(HeapObject::cast(o));
149}
150
151bool SemiSpace::ContainsSlow(Address a) {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100152 for (Page* p : *this) {
153 if (p == MemoryChunk::FromAddress(a)) return true;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100154 }
155 return false;
156}
157
158// --------------------------------------------------------------------------
159// NewSpace
160
161bool NewSpace::Contains(HeapObject* o) {
162 return MemoryChunk::FromAddress(o->address())->InNewSpace();
163}
164
165bool NewSpace::Contains(Object* o) {
166 return o->IsHeapObject() && Contains(HeapObject::cast(o));
167}
168
169bool NewSpace::ContainsSlow(Address a) {
170 return from_space_.ContainsSlow(a) || to_space_.ContainsSlow(a);
171}
172
173bool NewSpace::ToSpaceContainsSlow(Address a) {
174 return to_space_.ContainsSlow(a);
175}
176
177bool NewSpace::FromSpaceContainsSlow(Address a) {
178 return from_space_.ContainsSlow(a);
179}
180
181bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
182bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000183
184// --------------------------------------------------------------------------
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000185// AllocationResult
186
187AllocationSpace AllocationResult::RetrySpace() {
188 DCHECK(IsRetry());
189 return static_cast<AllocationSpace>(Smi::cast(object_)->value());
190}
191
Ben Murdochc5610432016-08-08 18:44:38 +0100192Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
193 SemiSpace* owner) {
Ben Murdochda12d292016-06-02 14:46:10 +0100194 DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
195 bool in_to_space = (owner->id() != kFromSpace);
196 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
197 : MemoryChunk::IN_FROM_SPACE);
198 DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
199 : MemoryChunk::IN_TO_SPACE));
Ben Murdochc5610432016-08-08 18:44:38 +0100200 Page* page = static_cast<Page*>(chunk);
Ben Murdochda12d292016-06-02 14:46:10 +0100201 heap->incremental_marking()->SetNewSpacePageFlags(page);
Ben Murdoch61f157c2016-09-16 13:49:30 +0100202 page->AllocateLocalTracker();
Ben Murdochda12d292016-06-02 14:46:10 +0100203 return page;
204}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000205
206// --------------------------------------------------------------------------
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000207// PagedSpace
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000208
Ben Murdochc5610432016-08-08 18:44:38 +0100209template <Page::InitializationMode mode>
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000210Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
211 PagedSpace* owner) {
212 Page* page = reinterpret_cast<Page*>(chunk);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000213 DCHECK(page->area_size() <= kAllocatableMemory);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000214 DCHECK(chunk->owner() == owner);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000215
Ben Murdochda12d292016-06-02 14:46:10 +0100216 owner->IncreaseCapacity(page->area_size());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000217 heap->incremental_marking()->SetOldSpacePageFlags(chunk);
218
Ben Murdochda12d292016-06-02 14:46:10 +0100219 // Make sure that categories are initialized before freeing the area.
220 page->InitializeFreeListCategories();
Ben Murdochc5610432016-08-08 18:44:38 +0100221 // In the case we do not free the memory, we effectively account for the whole
222 // page as allocated memory that cannot be used for further allocations.
223 if (mode == kFreeMemory) {
224 owner->Free(page->area_start(), page->area_size());
225 }
Ben Murdochda12d292016-06-02 14:46:10 +0100226
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000227 return page;
228}
229
Ben Murdochc5610432016-08-08 18:44:38 +0100230Page* Page::ConvertNewToOld(Page* old_page, PagedSpace* new_owner) {
231 DCHECK(old_page->InNewSpace());
232 old_page->set_owner(new_owner);
233 old_page->SetFlags(0, ~0);
234 new_owner->AccountCommitted(old_page->size());
235 Page* new_page = Page::Initialize<kDoNotFreeMemory>(
236 old_page->heap(), old_page, NOT_EXECUTABLE, new_owner);
237 new_page->InsertAfter(new_owner->anchor()->prev_page());
238 return new_page;
239}
240
Ben Murdochda12d292016-06-02 14:46:10 +0100241void Page::InitializeFreeListCategories() {
242 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
243 categories_[i].Initialize(static_cast<FreeListCategoryType>(i));
244 }
245}
246
Ben Murdoch097c5b22016-05-18 11:27:45 +0100247void MemoryChunk::IncrementLiveBytesFromGC(HeapObject* object, int by) {
248 MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by);
249}
250
251void MemoryChunk::ResetLiveBytes() {
252 if (FLAG_trace_live_bytes) {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100253 PrintIsolate(heap()->isolate(), "live-bytes: reset page=%p %d->0\n",
254 static_cast<void*>(this), live_byte_count_);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100255 }
256 live_byte_count_ = 0;
257}
258
259void MemoryChunk::IncrementLiveBytes(int by) {
Ben Murdochda12d292016-06-02 14:46:10 +0100260 if (IsFlagSet(BLACK_PAGE)) return;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100261 if (FLAG_trace_live_bytes) {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100262 PrintIsolate(
263 heap()->isolate(), "live-bytes: update page=%p delta=%d %d->%d\n",
264 static_cast<void*>(this), by, live_byte_count_, live_byte_count_ + by);
Ben Murdoch097c5b22016-05-18 11:27:45 +0100265 }
266 live_byte_count_ += by;
267 DCHECK_GE(live_byte_count_, 0);
268 DCHECK_LE(static_cast<size_t>(live_byte_count_), size_);
269}
270
271void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
272 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
273 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->SweepingDone()) {
274 static_cast<PagedSpace*>(chunk->owner())->Allocate(by);
275 }
276 chunk->IncrementLiveBytes(by);
277}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000278
279bool PagedSpace::Contains(Address addr) {
280 Page* p = Page::FromAddress(addr);
Ben Murdochc5610432016-08-08 18:44:38 +0100281 if (!Page::IsValid(p)) return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000282 return p->owner() == this;
283}
284
Ben Murdoch097c5b22016-05-18 11:27:45 +0100285bool PagedSpace::Contains(Object* o) {
286 if (!o->IsHeapObject()) return false;
287 Page* p = Page::FromAddress(HeapObject::cast(o)->address());
Ben Murdochc5610432016-08-08 18:44:38 +0100288 if (!Page::IsValid(p)) return false;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100289 return p->owner() == this;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000290}
291
Ben Murdochda12d292016-06-02 14:46:10 +0100292void PagedSpace::UnlinkFreeListCategories(Page* page) {
293 DCHECK_EQ(this, page->owner());
294 page->ForAllFreeListCategories([this](FreeListCategory* category) {
295 DCHECK_EQ(free_list(), category->owner());
296 free_list()->RemoveCategory(category);
297 });
298}
299
300intptr_t PagedSpace::RelinkFreeListCategories(Page* page) {
301 DCHECK_EQ(this, page->owner());
302 intptr_t added = 0;
303 page->ForAllFreeListCategories([&added](FreeListCategory* category) {
304 added += category->available();
305 category->Relink();
306 });
307 return added;
308}
309
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000310MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100311 MemoryChunk* chunk = MemoryChunk::FromAddress(addr);
312 uintptr_t offset = addr - chunk->address();
313 if (offset < MemoryChunk::kHeaderSize || !chunk->HasPageHeader()) {
314 chunk = heap->lo_space()->FindPage(addr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000315 }
Ben Murdoch097c5b22016-05-18 11:27:45 +0100316 return chunk;
317}
318
319Page* Page::FromAnyPointerAddress(Heap* heap, Address addr) {
320 return static_cast<Page*>(MemoryChunk::FromAnyPointerAddress(heap, addr));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000321}
322
Ben Murdochda12d292016-06-02 14:46:10 +0100323void Page::MarkNeverAllocateForTesting() {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100324 DCHECK(this->owner()->identity() != NEW_SPACE);
Ben Murdochda12d292016-06-02 14:46:10 +0100325 DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
326 SetFlag(NEVER_ALLOCATE_ON_PAGE);
327 reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
328}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000329
Ben Murdochda12d292016-06-02 14:46:10 +0100330void Page::MarkEvacuationCandidate() {
331 DCHECK(!IsFlagSet(NEVER_EVACUATE));
332 DCHECK_NULL(old_to_old_slots_);
333 DCHECK_NULL(typed_old_to_old_slots_);
334 SetFlag(EVACUATION_CANDIDATE);
335 reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
336}
337
338void Page::ClearEvacuationCandidate() {
Ben Murdochc5610432016-08-08 18:44:38 +0100339 if (!IsFlagSet(COMPACTION_WAS_ABORTED)) {
340 DCHECK_NULL(old_to_old_slots_);
341 DCHECK_NULL(typed_old_to_old_slots_);
342 }
Ben Murdochda12d292016-06-02 14:46:10 +0100343 ClearFlag(EVACUATION_CANDIDATE);
344 InitializeFreeListCategories();
345}
346
Ben Murdoch61f157c2016-09-16 13:49:30 +0100347MemoryChunkIterator::MemoryChunkIterator(Heap* heap)
348 : heap_(heap),
349 state_(kOldSpaceState),
350 old_iterator_(heap->old_space()->begin()),
351 code_iterator_(heap->code_space()->begin()),
352 map_iterator_(heap->map_space()->begin()),
353 lo_iterator_(heap->lo_space()->begin()) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000354
Ben Murdochda12d292016-06-02 14:46:10 +0100355MemoryChunk* MemoryChunkIterator::next() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000356 switch (state_) {
357 case kOldSpaceState: {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100358 if (old_iterator_ != heap_->old_space()->end()) return *(old_iterator_++);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000359 state_ = kMapState;
360 // Fall through.
361 }
362 case kMapState: {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100363 if (map_iterator_ != heap_->map_space()->end()) return *(map_iterator_++);
Ben Murdochda12d292016-06-02 14:46:10 +0100364 state_ = kCodeState;
365 // Fall through.
366 }
367 case kCodeState: {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100368 if (code_iterator_ != heap_->code_space()->end())
369 return *(code_iterator_++);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000370 state_ = kLargeObjectState;
371 // Fall through.
372 }
373 case kLargeObjectState: {
Ben Murdoch61f157c2016-09-16 13:49:30 +0100374 if (lo_iterator_ != heap_->lo_space()->end()) return *(lo_iterator_++);
Ben Murdochda12d292016-06-02 14:46:10 +0100375 state_ = kFinishedState;
376 // Fall through;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000377 }
378 case kFinishedState:
Ben Murdochda12d292016-06-02 14:46:10 +0100379 return nullptr;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000380 default:
381 break;
382 }
383 UNREACHABLE();
Ben Murdochda12d292016-06-02 14:46:10 +0100384 return nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000385}
386
Ben Murdochda12d292016-06-02 14:46:10 +0100387Page* FreeListCategory::page() {
388 return Page::FromAddress(reinterpret_cast<Address>(this));
389}
390
391FreeList* FreeListCategory::owner() {
392 return reinterpret_cast<PagedSpace*>(
393 Page::FromAddress(reinterpret_cast<Address>(this))->owner())
394 ->free_list();
395}
396
397bool FreeListCategory::is_linked() {
398 return prev_ != nullptr || next_ != nullptr || owner()->top(type_) == this;
399}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000400
401// Try linear allocation in the page of alloc_info's allocation top. Does
402// not contain slow case logic (e.g. move to the next page or try free list
403// allocation) so it can be used by all the allocation functions and for all
404// the paged spaces.
405HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
406 Address current_top = allocation_info_.top();
407 Address new_top = current_top + size_in_bytes;
408 if (new_top > allocation_info_.limit()) return NULL;
409
410 allocation_info_.set_top(new_top);
411 return HeapObject::FromAddress(current_top);
412}
413
414
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000415AllocationResult LocalAllocationBuffer::AllocateRawAligned(
416 int size_in_bytes, AllocationAlignment alignment) {
417 Address current_top = allocation_info_.top();
418 int filler_size = Heap::GetFillToAlign(current_top, alignment);
419
420 Address new_top = current_top + filler_size + size_in_bytes;
421 if (new_top > allocation_info_.limit()) return AllocationResult::Retry();
422
423 allocation_info_.set_top(new_top);
424 if (filler_size > 0) {
425 return heap_->PrecedeWithFiller(HeapObject::FromAddress(current_top),
426 filler_size);
427 }
428
429 return AllocationResult(HeapObject::FromAddress(current_top));
430}
431
432
433HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
434 AllocationAlignment alignment) {
435 Address current_top = allocation_info_.top();
436 int filler_size = Heap::GetFillToAlign(current_top, alignment);
437
438 Address new_top = current_top + filler_size + *size_in_bytes;
439 if (new_top > allocation_info_.limit()) return NULL;
440
441 allocation_info_.set_top(new_top);
442 if (filler_size > 0) {
443 *size_in_bytes += filler_size;
444 return heap()->PrecedeWithFiller(HeapObject::FromAddress(current_top),
445 filler_size);
446 }
447
448 return HeapObject::FromAddress(current_top);
449}
450
451
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000452// Raw allocation.
Ben Murdochda12d292016-06-02 14:46:10 +0100453AllocationResult PagedSpace::AllocateRawUnaligned(
454 int size_in_bytes, UpdateSkipList update_skip_list) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000455 HeapObject* object = AllocateLinearly(size_in_bytes);
456
457 if (object == NULL) {
458 object = free_list_.Allocate(size_in_bytes);
459 if (object == NULL) {
460 object = SlowAllocateRaw(size_in_bytes);
461 }
462 }
463
464 if (object != NULL) {
Ben Murdochda12d292016-06-02 14:46:10 +0100465 if (update_skip_list == UPDATE_SKIP_LIST && identity() == CODE_SPACE) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000466 SkipList::Update(object->address(), size_in_bytes);
467 }
468 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
469 return object;
470 }
471
472 return AllocationResult::Retry(identity());
473}
474
475
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000476AllocationResult PagedSpace::AllocateRawUnalignedSynchronized(
477 int size_in_bytes) {
478 base::LockGuard<base::Mutex> lock_guard(&space_mutex_);
479 return AllocateRawUnaligned(size_in_bytes);
480}
481
482
483// Raw allocation.
484AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
485 AllocationAlignment alignment) {
486 DCHECK(identity() == OLD_SPACE);
487 int allocation_size = size_in_bytes;
488 HeapObject* object = AllocateLinearlyAligned(&allocation_size, alignment);
489
490 if (object == NULL) {
491 // We don't know exactly how much filler we need to align until space is
492 // allocated, so assume the worst case.
493 int filler_size = Heap::GetMaximumFillToAlign(alignment);
494 allocation_size += filler_size;
495 object = free_list_.Allocate(allocation_size);
496 if (object == NULL) {
497 object = SlowAllocateRaw(allocation_size);
498 }
499 if (object != NULL && filler_size != 0) {
500 object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
501 alignment);
502 // Filler objects are initialized, so mark only the aligned object memory
503 // as uninitialized.
504 allocation_size = size_in_bytes;
505 }
506 }
507
508 if (object != NULL) {
509 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), allocation_size);
510 return object;
511 }
512
513 return AllocationResult::Retry(identity());
514}
515
516
517AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
518 AllocationAlignment alignment) {
519#ifdef V8_HOST_ARCH_32_BIT
Ben Murdoch097c5b22016-05-18 11:27:45 +0100520 AllocationResult result =
521 alignment == kDoubleAligned
522 ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
523 : AllocateRawUnaligned(size_in_bytes);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000524#else
Ben Murdoch097c5b22016-05-18 11:27:45 +0100525 AllocationResult result = AllocateRawUnaligned(size_in_bytes);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000526#endif
Ben Murdoch097c5b22016-05-18 11:27:45 +0100527 HeapObject* heap_obj = nullptr;
528 if (!result.IsRetry() && result.To(&heap_obj)) {
529 AllocationStep(heap_obj->address(), size_in_bytes);
530 }
531 return result;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000532}
533
534
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000535// -----------------------------------------------------------------------------
536// NewSpace
537
538
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000539AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
540 AllocationAlignment alignment) {
541 Address top = allocation_info_.top();
542 int filler_size = Heap::GetFillToAlign(top, alignment);
543 int aligned_size_in_bytes = size_in_bytes + filler_size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000544
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000545 if (allocation_info_.limit() - top < aligned_size_in_bytes) {
546 // See if we can create room.
547 if (!EnsureAllocation(size_in_bytes, alignment)) {
548 return AllocationResult::Retry();
549 }
550
551 top = allocation_info_.top();
552 filler_size = Heap::GetFillToAlign(top, alignment);
553 aligned_size_in_bytes = size_in_bytes + filler_size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000554 }
555
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000556 HeapObject* obj = HeapObject::FromAddress(top);
557 allocation_info_.set_top(top + aligned_size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000558 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
559
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000560 if (filler_size > 0) {
561 obj = heap()->PrecedeWithFiller(obj, filler_size);
562 }
563
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000564 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
565
566 return obj;
567}
568
569
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000570AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
571 Address top = allocation_info_.top();
572 if (allocation_info_.limit() < top + size_in_bytes) {
573 // See if we can create room.
574 if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
575 return AllocationResult::Retry();
576 }
577
578 top = allocation_info_.top();
579 }
580
581 HeapObject* obj = HeapObject::FromAddress(top);
582 allocation_info_.set_top(top + size_in_bytes);
583 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
584
585 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
586
587 return obj;
588}
589
590
591AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
592 AllocationAlignment alignment) {
593#ifdef V8_HOST_ARCH_32_BIT
594 return alignment == kDoubleAligned
595 ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
596 : AllocateRawUnaligned(size_in_bytes);
597#else
598 return AllocateRawUnaligned(size_in_bytes);
599#endif
600}
601
602
603MUST_USE_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
604 int size_in_bytes, AllocationAlignment alignment) {
605 base::LockGuard<base::Mutex> guard(&mutex_);
606 return AllocateRaw(size_in_bytes, alignment);
607}
608
Ben Murdochc5610432016-08-08 18:44:38 +0100609LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
610 Executability executable, Space* owner) {
611 if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
612 STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
613 FATAL("Code page is too large.");
614 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000615 heap->incremental_marking()->SetOldSpacePageFlags(chunk);
616 return static_cast<LargePage*>(chunk);
617}
618
619
620intptr_t LargeObjectSpace::Available() {
Ben Murdochc5610432016-08-08 18:44:38 +0100621 return ObjectSizeFor(heap()->memory_allocator()->Available());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000622}
623
624
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000625LocalAllocationBuffer LocalAllocationBuffer::InvalidBuffer() {
626 return LocalAllocationBuffer(nullptr, AllocationInfo(nullptr, nullptr));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000627}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000628
629
630LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
631 AllocationResult result,
632 intptr_t size) {
633 if (result.IsRetry()) return InvalidBuffer();
634 HeapObject* obj = nullptr;
635 bool ok = result.To(&obj);
636 USE(ok);
637 DCHECK(ok);
638 Address top = HeapObject::cast(obj)->address();
639 return LocalAllocationBuffer(heap, AllocationInfo(top, top + size));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000640}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000641
642
643bool LocalAllocationBuffer::TryMerge(LocalAllocationBuffer* other) {
644 if (allocation_info_.top() == other->allocation_info_.limit()) {
645 allocation_info_.set_top(other->allocation_info_.top());
646 other->allocation_info_.Reset(nullptr, nullptr);
647 return true;
648 }
649 return false;
650}
651
652} // namespace internal
653} // namespace v8
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000654
655#endif // V8_HEAP_SPACES_INL_H_