blob: f9e40bb13a805d80eb8c5b643e35fe795badda47 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2011 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_HEAP_SPACES_INL_H_
6#define V8_HEAP_SPACES_INL_H_
7
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00008#include "src/heap/incremental-marking.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +00009#include "src/heap/spaces.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000010#include "src/isolate.h"
11#include "src/msan.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000012#include "src/profiler/heap-profiler.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000013#include "src/v8memory.h"
14
15namespace v8 {
16namespace internal {
17
18
19// -----------------------------------------------------------------------------
20// Bitmap
21
22void Bitmap::Clear(MemoryChunk* chunk) {
23 Bitmap* bitmap = chunk->markbits();
24 for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
25 chunk->ResetLiveBytes();
26}
27
Ben Murdochda12d292016-06-02 14:46:10 +010028void Bitmap::SetAllBits(MemoryChunk* chunk) {
29 Bitmap* bitmap = chunk->markbits();
30 for (int i = 0; i < bitmap->CellsCount(); i++)
31 bitmap->cells()[i] = 0xffffffff;
32}
Ben Murdochb8a8cc12014-11-26 15:28:44 +000033
34// -----------------------------------------------------------------------------
35// PageIterator
36
Ben Murdochb8a8cc12014-11-26 15:28:44 +000037PageIterator::PageIterator(PagedSpace* space)
38 : space_(space),
39 prev_page_(&space->anchor_),
40 next_page_(prev_page_->next_page()) {}
41
42
43bool PageIterator::has_next() { return next_page_ != &space_->anchor_; }
44
45
46Page* PageIterator::next() {
47 DCHECK(has_next());
48 prev_page_ = next_page_;
49 next_page_ = next_page_->next_page();
50 return prev_page_;
51}
52
53
54// -----------------------------------------------------------------------------
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000055// SemiSpaceIterator
Ben Murdochb8a8cc12014-11-26 15:28:44 +000056
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000057HeapObject* SemiSpaceIterator::Next() {
58 while (current_ != limit_) {
Ben Murdochc5610432016-08-08 18:44:38 +010059 if (Page::IsAlignedToPageSize(current_)) {
60 Page* page = Page::FromAllocationAreaAddress(current_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000061 page = page->next_page();
62 DCHECK(!page->is_anchor());
63 current_ = page->area_start();
64 if (current_ == limit_) return nullptr;
65 }
66 HeapObject* object = HeapObject::FromAddress(current_);
67 current_ += object->Size();
68 if (!object->IsFiller()) {
69 return object;
70 }
71 }
72 return nullptr;
73}
74
75
76HeapObject* SemiSpaceIterator::next_object() { return Next(); }
77
78
79// -----------------------------------------------------------------------------
80// NewSpacePageIterator
Ben Murdochb8a8cc12014-11-26 15:28:44 +000081
82NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
Ben Murdochc5610432016-08-08 18:44:38 +010083 : prev_page_(Page::FromAddress(space->ToSpaceStart())->prev_page()),
84 next_page_(Page::FromAddress(space->ToSpaceStart())),
85 last_page_(Page::FromAllocationAreaAddress(space->ToSpaceEnd())) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +000086
87NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
88 : prev_page_(space->anchor()),
89 next_page_(prev_page_->next_page()),
90 last_page_(prev_page_->prev_page()) {}
91
92NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
Ben Murdochc5610432016-08-08 18:44:38 +010093 : prev_page_(Page::FromAddress(start)->prev_page()),
94 next_page_(Page::FromAddress(start)),
95 last_page_(Page::FromAllocationAreaAddress(limit)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +000096 SemiSpace::AssertValidRange(start, limit);
97}
98
99
100bool NewSpacePageIterator::has_next() { return prev_page_ != last_page_; }
101
Ben Murdochc5610432016-08-08 18:44:38 +0100102Page* NewSpacePageIterator::next() {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000103 DCHECK(has_next());
104 prev_page_ = next_page_;
105 next_page_ = next_page_->next_page();
106 return prev_page_;
107}
108
109
110// -----------------------------------------------------------------------------
111// HeapObjectIterator
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000112
113HeapObject* HeapObjectIterator::Next() {
114 do {
115 HeapObject* next_obj = FromCurrentPage();
116 if (next_obj != NULL) return next_obj;
117 } while (AdvanceToNextPage());
118 return NULL;
119}
120
121
122HeapObject* HeapObjectIterator::next_object() { return Next(); }
123
124
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000125HeapObject* HeapObjectIterator::FromCurrentPage() {
126 while (cur_addr_ != cur_end_) {
127 if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
128 cur_addr_ = space_->limit();
129 continue;
130 }
131 HeapObject* obj = HeapObject::FromAddress(cur_addr_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000132 int obj_size = obj->Size();
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000133 cur_addr_ += obj_size;
134 DCHECK(cur_addr_ <= cur_end_);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000135 // TODO(hpayer): Remove the debugging code.
136 if (cur_addr_ > cur_end_) {
137 space_->heap()->isolate()->PushStackTraceAndDie(0xaaaaaaaa, obj, NULL,
138 obj_size);
139 }
140
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000141 if (!obj->IsFiller()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000142 if (obj->IsCode()) {
143 DCHECK_EQ(space_, space_->heap()->code_space());
144 DCHECK_CODEOBJECT_SIZE(obj_size, space_);
145 } else {
146 DCHECK_OBJECT_SIZE(obj_size);
147 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000148 return obj;
149 }
150 }
151 return NULL;
152}
153
Ben Murdochda12d292016-06-02 14:46:10 +0100154// -----------------------------------------------------------------------------
155// LargePageIterator
156
157LargePageIterator::LargePageIterator(LargeObjectSpace* space)
158 : next_page_(space->first_page()) {}
159
160LargePage* LargePageIterator::next() {
161 LargePage* result = next_page_;
162 if (next_page_ != nullptr) {
163 next_page_ = next_page_->next_page();
164 }
165 return result;
166}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000167
168// -----------------------------------------------------------------------------
169// MemoryAllocator
170
171#ifdef ENABLE_HEAP_PROTECTION
172
173void MemoryAllocator::Protect(Address start, size_t size) {
174 base::OS::Protect(start, size);
175}
176
177
178void MemoryAllocator::Unprotect(Address start, size_t size,
179 Executability executable) {
180 base::OS::Unprotect(start, size, executable);
181}
182
183
184void MemoryAllocator::ProtectChunkFromPage(Page* page) {
185 int id = GetChunkId(page);
186 base::OS::Protect(chunks_[id].address(), chunks_[id].size());
187}
188
189
190void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
191 int id = GetChunkId(page);
192 base::OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
193 chunks_[id].owner()->executable() == EXECUTABLE);
194}
195
196#endif
197
Ben Murdoch097c5b22016-05-18 11:27:45 +0100198// -----------------------------------------------------------------------------
199// SemiSpace
200
201bool SemiSpace::Contains(HeapObject* o) {
202 return id_ == kToSpace
203 ? MemoryChunk::FromAddress(o->address())->InToSpace()
204 : MemoryChunk::FromAddress(o->address())->InFromSpace();
205}
206
207bool SemiSpace::Contains(Object* o) {
208 return o->IsHeapObject() && Contains(HeapObject::cast(o));
209}
210
211bool SemiSpace::ContainsSlow(Address a) {
212 NewSpacePageIterator it(this);
213 while (it.has_next()) {
214 if (it.next() == MemoryChunk::FromAddress(a)) return true;
215 }
216 return false;
217}
218
219// --------------------------------------------------------------------------
220// NewSpace
221
222bool NewSpace::Contains(HeapObject* o) {
223 return MemoryChunk::FromAddress(o->address())->InNewSpace();
224}
225
226bool NewSpace::Contains(Object* o) {
227 return o->IsHeapObject() && Contains(HeapObject::cast(o));
228}
229
230bool NewSpace::ContainsSlow(Address a) {
231 return from_space_.ContainsSlow(a) || to_space_.ContainsSlow(a);
232}
233
234bool NewSpace::ToSpaceContainsSlow(Address a) {
235 return to_space_.ContainsSlow(a);
236}
237
238bool NewSpace::FromSpaceContainsSlow(Address a) {
239 return from_space_.ContainsSlow(a);
240}
241
242bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); }
243bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000244
245// --------------------------------------------------------------------------
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000246// AllocationResult
247
248AllocationSpace AllocationResult::RetrySpace() {
249 DCHECK(IsRetry());
250 return static_cast<AllocationSpace>(Smi::cast(object_)->value());
251}
252
Ben Murdochc5610432016-08-08 18:44:38 +0100253Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
254 SemiSpace* owner) {
Ben Murdochda12d292016-06-02 14:46:10 +0100255 DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
256 bool in_to_space = (owner->id() != kFromSpace);
257 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
258 : MemoryChunk::IN_FROM_SPACE);
259 DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
260 : MemoryChunk::IN_TO_SPACE));
Ben Murdochc5610432016-08-08 18:44:38 +0100261 Page* page = static_cast<Page*>(chunk);
Ben Murdochda12d292016-06-02 14:46:10 +0100262 heap->incremental_marking()->SetNewSpacePageFlags(page);
263 return page;
264}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000265
266// --------------------------------------------------------------------------
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000267// PagedSpace
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000268
Ben Murdochc5610432016-08-08 18:44:38 +0100269template <Page::InitializationMode mode>
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000270Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
271 PagedSpace* owner) {
272 Page* page = reinterpret_cast<Page*>(chunk);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000273 page->mutex_ = new base::Mutex();
274 DCHECK(page->area_size() <= kAllocatableMemory);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000275 DCHECK(chunk->owner() == owner);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000276
Ben Murdochda12d292016-06-02 14:46:10 +0100277 owner->IncreaseCapacity(page->area_size());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000278 heap->incremental_marking()->SetOldSpacePageFlags(chunk);
279
Ben Murdochda12d292016-06-02 14:46:10 +0100280 // Make sure that categories are initialized before freeing the area.
281 page->InitializeFreeListCategories();
Ben Murdochc5610432016-08-08 18:44:38 +0100282 // In the case we do not free the memory, we effectively account for the whole
283 // page as allocated memory that cannot be used for further allocations.
284 if (mode == kFreeMemory) {
285 owner->Free(page->area_start(), page->area_size());
286 }
Ben Murdochda12d292016-06-02 14:46:10 +0100287
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000288 return page;
289}
290
Ben Murdochc5610432016-08-08 18:44:38 +0100291Page* Page::ConvertNewToOld(Page* old_page, PagedSpace* new_owner) {
292 DCHECK(old_page->InNewSpace());
293 old_page->set_owner(new_owner);
294 old_page->SetFlags(0, ~0);
295 new_owner->AccountCommitted(old_page->size());
296 Page* new_page = Page::Initialize<kDoNotFreeMemory>(
297 old_page->heap(), old_page, NOT_EXECUTABLE, new_owner);
298 new_page->InsertAfter(new_owner->anchor()->prev_page());
299 return new_page;
300}
301
Ben Murdochda12d292016-06-02 14:46:10 +0100302void Page::InitializeFreeListCategories() {
303 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
304 categories_[i].Initialize(static_cast<FreeListCategoryType>(i));
305 }
306}
307
Ben Murdoch097c5b22016-05-18 11:27:45 +0100308void MemoryChunk::IncrementLiveBytesFromGC(HeapObject* object, int by) {
309 MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by);
310}
311
312void MemoryChunk::ResetLiveBytes() {
313 if (FLAG_trace_live_bytes) {
314 PrintIsolate(heap()->isolate(), "live-bytes: reset page=%p %d->0\n", this,
315 live_byte_count_);
316 }
317 live_byte_count_ = 0;
318}
319
320void MemoryChunk::IncrementLiveBytes(int by) {
Ben Murdochda12d292016-06-02 14:46:10 +0100321 if (IsFlagSet(BLACK_PAGE)) return;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100322 if (FLAG_trace_live_bytes) {
323 PrintIsolate(heap()->isolate(),
324 "live-bytes: update page=%p delta=%d %d->%d\n", this, by,
325 live_byte_count_, live_byte_count_ + by);
326 }
327 live_byte_count_ += by;
328 DCHECK_GE(live_byte_count_, 0);
329 DCHECK_LE(static_cast<size_t>(live_byte_count_), size_);
330}
331
332void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
333 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
334 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->SweepingDone()) {
335 static_cast<PagedSpace*>(chunk->owner())->Allocate(by);
336 }
337 chunk->IncrementLiveBytes(by);
338}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000339
340bool PagedSpace::Contains(Address addr) {
341 Page* p = Page::FromAddress(addr);
Ben Murdochc5610432016-08-08 18:44:38 +0100342 if (!Page::IsValid(p)) return false;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000343 return p->owner() == this;
344}
345
Ben Murdoch097c5b22016-05-18 11:27:45 +0100346bool PagedSpace::Contains(Object* o) {
347 if (!o->IsHeapObject()) return false;
348 Page* p = Page::FromAddress(HeapObject::cast(o)->address());
Ben Murdochc5610432016-08-08 18:44:38 +0100349 if (!Page::IsValid(p)) return false;
Ben Murdoch097c5b22016-05-18 11:27:45 +0100350 return p->owner() == this;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000351}
352
Ben Murdochda12d292016-06-02 14:46:10 +0100353void PagedSpace::UnlinkFreeListCategories(Page* page) {
354 DCHECK_EQ(this, page->owner());
355 page->ForAllFreeListCategories([this](FreeListCategory* category) {
356 DCHECK_EQ(free_list(), category->owner());
357 free_list()->RemoveCategory(category);
358 });
359}
360
361intptr_t PagedSpace::RelinkFreeListCategories(Page* page) {
362 DCHECK_EQ(this, page->owner());
363 intptr_t added = 0;
364 page->ForAllFreeListCategories([&added](FreeListCategory* category) {
365 added += category->available();
366 category->Relink();
367 });
368 return added;
369}
370
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000371MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
Ben Murdoch097c5b22016-05-18 11:27:45 +0100372 MemoryChunk* chunk = MemoryChunk::FromAddress(addr);
373 uintptr_t offset = addr - chunk->address();
374 if (offset < MemoryChunk::kHeaderSize || !chunk->HasPageHeader()) {
375 chunk = heap->lo_space()->FindPage(addr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000376 }
Ben Murdoch097c5b22016-05-18 11:27:45 +0100377 return chunk;
378}
379
380Page* Page::FromAnyPointerAddress(Heap* heap, Address addr) {
381 return static_cast<Page*>(MemoryChunk::FromAnyPointerAddress(heap, addr));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000382}
383
Ben Murdochda12d292016-06-02 14:46:10 +0100384void Page::MarkNeverAllocateForTesting() {
385 DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE));
386 SetFlag(NEVER_ALLOCATE_ON_PAGE);
387 reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
388}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000389
Ben Murdochda12d292016-06-02 14:46:10 +0100390void Page::MarkEvacuationCandidate() {
391 DCHECK(!IsFlagSet(NEVER_EVACUATE));
392 DCHECK_NULL(old_to_old_slots_);
393 DCHECK_NULL(typed_old_to_old_slots_);
394 SetFlag(EVACUATION_CANDIDATE);
395 reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this);
396}
397
398void Page::ClearEvacuationCandidate() {
Ben Murdochc5610432016-08-08 18:44:38 +0100399 if (!IsFlagSet(COMPACTION_WAS_ABORTED)) {
400 DCHECK_NULL(old_to_old_slots_);
401 DCHECK_NULL(typed_old_to_old_slots_);
402 }
Ben Murdochda12d292016-06-02 14:46:10 +0100403 ClearFlag(EVACUATION_CANDIDATE);
404 InitializeFreeListCategories();
405}
406
407MemoryChunkIterator::MemoryChunkIterator(Heap* heap, Mode mode)
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000408 : state_(kOldSpaceState),
Ben Murdochda12d292016-06-02 14:46:10 +0100409 mode_(mode),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000410 old_iterator_(heap->old_space()),
Ben Murdochda12d292016-06-02 14:46:10 +0100411 code_iterator_(heap->code_space()),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000412 map_iterator_(heap->map_space()),
413 lo_iterator_(heap->lo_space()) {}
414
Ben Murdochda12d292016-06-02 14:46:10 +0100415MemoryChunk* MemoryChunkIterator::next() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000416 switch (state_) {
417 case kOldSpaceState: {
418 if (old_iterator_.has_next()) {
419 return old_iterator_.next();
420 }
421 state_ = kMapState;
422 // Fall through.
423 }
424 case kMapState: {
Ben Murdochda12d292016-06-02 14:46:10 +0100425 if (mode_ != ALL_BUT_MAP_SPACE && map_iterator_.has_next()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000426 return map_iterator_.next();
427 }
Ben Murdochda12d292016-06-02 14:46:10 +0100428 state_ = kCodeState;
429 // Fall through.
430 }
431 case kCodeState: {
432 if (mode_ != ALL_BUT_CODE_SPACE && code_iterator_.has_next()) {
433 return code_iterator_.next();
434 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000435 state_ = kLargeObjectState;
436 // Fall through.
437 }
438 case kLargeObjectState: {
Ben Murdochda12d292016-06-02 14:46:10 +0100439 MemoryChunk* answer = lo_iterator_.next();
440 if (answer != nullptr) {
441 return answer;
442 }
443 state_ = kFinishedState;
444 // Fall through;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000445 }
446 case kFinishedState:
Ben Murdochda12d292016-06-02 14:46:10 +0100447 return nullptr;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000448 default:
449 break;
450 }
451 UNREACHABLE();
Ben Murdochda12d292016-06-02 14:46:10 +0100452 return nullptr;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000453}
454
Ben Murdochda12d292016-06-02 14:46:10 +0100455Page* FreeListCategory::page() {
456 return Page::FromAddress(reinterpret_cast<Address>(this));
457}
458
459FreeList* FreeListCategory::owner() {
460 return reinterpret_cast<PagedSpace*>(
461 Page::FromAddress(reinterpret_cast<Address>(this))->owner())
462 ->free_list();
463}
464
465bool FreeListCategory::is_linked() {
466 return prev_ != nullptr || next_ != nullptr || owner()->top(type_) == this;
467}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000468
469// Try linear allocation in the page of alloc_info's allocation top. Does
470// not contain slow case logic (e.g. move to the next page or try free list
471// allocation) so it can be used by all the allocation functions and for all
472// the paged spaces.
473HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
474 Address current_top = allocation_info_.top();
475 Address new_top = current_top + size_in_bytes;
476 if (new_top > allocation_info_.limit()) return NULL;
477
478 allocation_info_.set_top(new_top);
479 return HeapObject::FromAddress(current_top);
480}
481
482
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000483AllocationResult LocalAllocationBuffer::AllocateRawAligned(
484 int size_in_bytes, AllocationAlignment alignment) {
485 Address current_top = allocation_info_.top();
486 int filler_size = Heap::GetFillToAlign(current_top, alignment);
487
488 Address new_top = current_top + filler_size + size_in_bytes;
489 if (new_top > allocation_info_.limit()) return AllocationResult::Retry();
490
491 allocation_info_.set_top(new_top);
492 if (filler_size > 0) {
493 return heap_->PrecedeWithFiller(HeapObject::FromAddress(current_top),
494 filler_size);
495 }
496
497 return AllocationResult(HeapObject::FromAddress(current_top));
498}
499
500
501HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
502 AllocationAlignment alignment) {
503 Address current_top = allocation_info_.top();
504 int filler_size = Heap::GetFillToAlign(current_top, alignment);
505
506 Address new_top = current_top + filler_size + *size_in_bytes;
507 if (new_top > allocation_info_.limit()) return NULL;
508
509 allocation_info_.set_top(new_top);
510 if (filler_size > 0) {
511 *size_in_bytes += filler_size;
512 return heap()->PrecedeWithFiller(HeapObject::FromAddress(current_top),
513 filler_size);
514 }
515
516 return HeapObject::FromAddress(current_top);
517}
518
519
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000520// Raw allocation.
Ben Murdochda12d292016-06-02 14:46:10 +0100521AllocationResult PagedSpace::AllocateRawUnaligned(
522 int size_in_bytes, UpdateSkipList update_skip_list) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000523 HeapObject* object = AllocateLinearly(size_in_bytes);
524
525 if (object == NULL) {
526 object = free_list_.Allocate(size_in_bytes);
527 if (object == NULL) {
528 object = SlowAllocateRaw(size_in_bytes);
529 }
530 }
531
532 if (object != NULL) {
Ben Murdochda12d292016-06-02 14:46:10 +0100533 if (update_skip_list == UPDATE_SKIP_LIST && identity() == CODE_SPACE) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000534 SkipList::Update(object->address(), size_in_bytes);
535 }
536 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
537 return object;
538 }
539
540 return AllocationResult::Retry(identity());
541}
542
543
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000544AllocationResult PagedSpace::AllocateRawUnalignedSynchronized(
545 int size_in_bytes) {
546 base::LockGuard<base::Mutex> lock_guard(&space_mutex_);
547 return AllocateRawUnaligned(size_in_bytes);
548}
549
550
551// Raw allocation.
552AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
553 AllocationAlignment alignment) {
554 DCHECK(identity() == OLD_SPACE);
555 int allocation_size = size_in_bytes;
556 HeapObject* object = AllocateLinearlyAligned(&allocation_size, alignment);
557
558 if (object == NULL) {
559 // We don't know exactly how much filler we need to align until space is
560 // allocated, so assume the worst case.
561 int filler_size = Heap::GetMaximumFillToAlign(alignment);
562 allocation_size += filler_size;
563 object = free_list_.Allocate(allocation_size);
564 if (object == NULL) {
565 object = SlowAllocateRaw(allocation_size);
566 }
567 if (object != NULL && filler_size != 0) {
568 object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
569 alignment);
570 // Filler objects are initialized, so mark only the aligned object memory
571 // as uninitialized.
572 allocation_size = size_in_bytes;
573 }
574 }
575
576 if (object != NULL) {
577 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), allocation_size);
578 return object;
579 }
580
581 return AllocationResult::Retry(identity());
582}
583
584
585AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
586 AllocationAlignment alignment) {
587#ifdef V8_HOST_ARCH_32_BIT
Ben Murdoch097c5b22016-05-18 11:27:45 +0100588 AllocationResult result =
589 alignment == kDoubleAligned
590 ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
591 : AllocateRawUnaligned(size_in_bytes);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000592#else
Ben Murdoch097c5b22016-05-18 11:27:45 +0100593 AllocationResult result = AllocateRawUnaligned(size_in_bytes);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000594#endif
Ben Murdoch097c5b22016-05-18 11:27:45 +0100595 HeapObject* heap_obj = nullptr;
596 if (!result.IsRetry() && result.To(&heap_obj)) {
597 AllocationStep(heap_obj->address(), size_in_bytes);
598 }
599 return result;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000600}
601
602
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000603// -----------------------------------------------------------------------------
604// NewSpace
605
606
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000607AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
608 AllocationAlignment alignment) {
609 Address top = allocation_info_.top();
610 int filler_size = Heap::GetFillToAlign(top, alignment);
611 int aligned_size_in_bytes = size_in_bytes + filler_size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000612
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000613 if (allocation_info_.limit() - top < aligned_size_in_bytes) {
614 // See if we can create room.
615 if (!EnsureAllocation(size_in_bytes, alignment)) {
616 return AllocationResult::Retry();
617 }
618
619 top = allocation_info_.top();
620 filler_size = Heap::GetFillToAlign(top, alignment);
621 aligned_size_in_bytes = size_in_bytes + filler_size;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000622 }
623
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000624 HeapObject* obj = HeapObject::FromAddress(top);
625 allocation_info_.set_top(top + aligned_size_in_bytes);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000626 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
627
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000628 if (filler_size > 0) {
629 obj = heap()->PrecedeWithFiller(obj, filler_size);
630 }
631
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000632 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
633
634 return obj;
635}
636
637
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000638AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
639 Address top = allocation_info_.top();
640 if (allocation_info_.limit() < top + size_in_bytes) {
641 // See if we can create room.
642 if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
643 return AllocationResult::Retry();
644 }
645
646 top = allocation_info_.top();
647 }
648
649 HeapObject* obj = HeapObject::FromAddress(top);
650 allocation_info_.set_top(top + size_in_bytes);
651 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
652
653 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
654
655 return obj;
656}
657
658
659AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
660 AllocationAlignment alignment) {
661#ifdef V8_HOST_ARCH_32_BIT
662 return alignment == kDoubleAligned
663 ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
664 : AllocateRawUnaligned(size_in_bytes);
665#else
666 return AllocateRawUnaligned(size_in_bytes);
667#endif
668}
669
670
671MUST_USE_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
672 int size_in_bytes, AllocationAlignment alignment) {
673 base::LockGuard<base::Mutex> guard(&mutex_);
674 return AllocateRaw(size_in_bytes, alignment);
675}
676
Ben Murdochc5610432016-08-08 18:44:38 +0100677LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
678 Executability executable, Space* owner) {
679 if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
680 STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
681 FATAL("Code page is too large.");
682 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000683 heap->incremental_marking()->SetOldSpacePageFlags(chunk);
684 return static_cast<LargePage*>(chunk);
685}
686
687
688intptr_t LargeObjectSpace::Available() {
Ben Murdochc5610432016-08-08 18:44:38 +0100689 return ObjectSizeFor(heap()->memory_allocator()->Available());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000690}
691
692
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000693LocalAllocationBuffer LocalAllocationBuffer::InvalidBuffer() {
694 return LocalAllocationBuffer(nullptr, AllocationInfo(nullptr, nullptr));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000695}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000696
697
698LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
699 AllocationResult result,
700 intptr_t size) {
701 if (result.IsRetry()) return InvalidBuffer();
702 HeapObject* obj = nullptr;
703 bool ok = result.To(&obj);
704 USE(ok);
705 DCHECK(ok);
706 Address top = HeapObject::cast(obj)->address();
707 return LocalAllocationBuffer(heap, AllocationInfo(top, top + size));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000708}
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000709
710
711bool LocalAllocationBuffer::TryMerge(LocalAllocationBuffer* other) {
712 if (allocation_info_.top() == other->allocation_info_.limit()) {
713 allocation_info_.set_top(other->allocation_info_.top());
714 other->allocation_info_.Reset(nullptr, nullptr);
715 return true;
716 }
717 return false;
718}
719
720} // namespace internal
721} // namespace v8
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000722
723#endif // V8_HEAP_SPACES_INL_H_