Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1 | // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #ifndef V8_HEAP_SPACES_INL_H_ |
| 6 | #define V8_HEAP_SPACES_INL_H_ |
| 7 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 8 | #include "src/heap/incremental-marking.h" |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 9 | #include "src/heap/spaces.h" |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 10 | #include "src/isolate.h" |
| 11 | #include "src/msan.h" |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 12 | #include "src/profiler/heap-profiler.h" |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 13 | #include "src/v8memory.h" |
| 14 | |
| 15 | namespace v8 { |
| 16 | namespace internal { |
| 17 | |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 18 | template <class PAGE_TYPE> |
| 19 | PageIteratorImpl<PAGE_TYPE>& PageIteratorImpl<PAGE_TYPE>::operator++() { |
| 20 | p_ = p_->next_page(); |
| 21 | return *this; |
| 22 | } |
| 23 | |
| 24 | template <class PAGE_TYPE> |
| 25 | PageIteratorImpl<PAGE_TYPE> PageIteratorImpl<PAGE_TYPE>::operator++(int) { |
| 26 | PageIteratorImpl<PAGE_TYPE> tmp(*this); |
| 27 | operator++(); |
| 28 | return tmp; |
| 29 | } |
| 30 | |
| 31 | NewSpacePageRange::NewSpacePageRange(Address start, Address limit) |
| 32 | : range_(Page::FromAddress(start), |
| 33 | Page::FromAllocationAreaAddress(limit)->next_page()) { |
| 34 | SemiSpace::AssertValidRange(start, limit); |
| 35 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 36 | |
| 37 | // ----------------------------------------------------------------------------- |
| 38 | // Bitmap |
| 39 | |
| 40 | void Bitmap::Clear(MemoryChunk* chunk) { |
| 41 | Bitmap* bitmap = chunk->markbits(); |
| 42 | for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0; |
| 43 | chunk->ResetLiveBytes(); |
| 44 | } |
| 45 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 46 | void Bitmap::SetAllBits(MemoryChunk* chunk) { |
| 47 | Bitmap* bitmap = chunk->markbits(); |
| 48 | for (int i = 0; i < bitmap->CellsCount(); i++) |
| 49 | bitmap->cells()[i] = 0xffffffff; |
| 50 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 51 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 52 | |
| 53 | // ----------------------------------------------------------------------------- |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 54 | // SemiSpaceIterator |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 55 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 56 | HeapObject* SemiSpaceIterator::Next() { |
| 57 | while (current_ != limit_) { |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 58 | if (Page::IsAlignedToPageSize(current_)) { |
| 59 | Page* page = Page::FromAllocationAreaAddress(current_); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 60 | page = page->next_page(); |
| 61 | DCHECK(!page->is_anchor()); |
| 62 | current_ = page->area_start(); |
| 63 | if (current_ == limit_) return nullptr; |
| 64 | } |
| 65 | HeapObject* object = HeapObject::FromAddress(current_); |
| 66 | current_ += object->Size(); |
| 67 | if (!object->IsFiller()) { |
| 68 | return object; |
| 69 | } |
| 70 | } |
| 71 | return nullptr; |
| 72 | } |
| 73 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 74 | // ----------------------------------------------------------------------------- |
| 75 | // HeapObjectIterator |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 76 | |
| 77 | HeapObject* HeapObjectIterator::Next() { |
| 78 | do { |
| 79 | HeapObject* next_obj = FromCurrentPage(); |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 80 | if (next_obj != nullptr) return next_obj; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 81 | } while (AdvanceToNextPage()); |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 82 | return nullptr; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 83 | } |
| 84 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 85 | HeapObject* HeapObjectIterator::FromCurrentPage() { |
| 86 | while (cur_addr_ != cur_end_) { |
| 87 | if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) { |
| 88 | cur_addr_ = space_->limit(); |
| 89 | continue; |
| 90 | } |
| 91 | HeapObject* obj = HeapObject::FromAddress(cur_addr_); |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 92 | const int obj_size = obj->Size(); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 93 | cur_addr_ += obj_size; |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 94 | DCHECK_LE(cur_addr_, cur_end_); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 95 | if (!obj->IsFiller()) { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 96 | if (obj->IsCode()) { |
| 97 | DCHECK_EQ(space_, space_->heap()->code_space()); |
| 98 | DCHECK_CODEOBJECT_SIZE(obj_size, space_); |
| 99 | } else { |
| 100 | DCHECK_OBJECT_SIZE(obj_size); |
| 101 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 102 | return obj; |
| 103 | } |
| 104 | } |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 105 | return nullptr; |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 106 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 107 | |
| 108 | // ----------------------------------------------------------------------------- |
| 109 | // MemoryAllocator |
| 110 | |
| 111 | #ifdef ENABLE_HEAP_PROTECTION |
| 112 | |
| 113 | void MemoryAllocator::Protect(Address start, size_t size) { |
| 114 | base::OS::Protect(start, size); |
| 115 | } |
| 116 | |
| 117 | |
| 118 | void MemoryAllocator::Unprotect(Address start, size_t size, |
| 119 | Executability executable) { |
| 120 | base::OS::Unprotect(start, size, executable); |
| 121 | } |
| 122 | |
| 123 | |
| 124 | void MemoryAllocator::ProtectChunkFromPage(Page* page) { |
| 125 | int id = GetChunkId(page); |
| 126 | base::OS::Protect(chunks_[id].address(), chunks_[id].size()); |
| 127 | } |
| 128 | |
| 129 | |
| 130 | void MemoryAllocator::UnprotectChunkFromPage(Page* page) { |
| 131 | int id = GetChunkId(page); |
| 132 | base::OS::Unprotect(chunks_[id].address(), chunks_[id].size(), |
| 133 | chunks_[id].owner()->executable() == EXECUTABLE); |
| 134 | } |
| 135 | |
| 136 | #endif |
| 137 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 138 | // ----------------------------------------------------------------------------- |
| 139 | // SemiSpace |
| 140 | |
| 141 | bool SemiSpace::Contains(HeapObject* o) { |
| 142 | return id_ == kToSpace |
| 143 | ? MemoryChunk::FromAddress(o->address())->InToSpace() |
| 144 | : MemoryChunk::FromAddress(o->address())->InFromSpace(); |
| 145 | } |
| 146 | |
| 147 | bool SemiSpace::Contains(Object* o) { |
| 148 | return o->IsHeapObject() && Contains(HeapObject::cast(o)); |
| 149 | } |
| 150 | |
| 151 | bool SemiSpace::ContainsSlow(Address a) { |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 152 | for (Page* p : *this) { |
| 153 | if (p == MemoryChunk::FromAddress(a)) return true; |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 154 | } |
| 155 | return false; |
| 156 | } |
| 157 | |
| 158 | // -------------------------------------------------------------------------- |
| 159 | // NewSpace |
| 160 | |
| 161 | bool NewSpace::Contains(HeapObject* o) { |
| 162 | return MemoryChunk::FromAddress(o->address())->InNewSpace(); |
| 163 | } |
| 164 | |
| 165 | bool NewSpace::Contains(Object* o) { |
| 166 | return o->IsHeapObject() && Contains(HeapObject::cast(o)); |
| 167 | } |
| 168 | |
| 169 | bool NewSpace::ContainsSlow(Address a) { |
| 170 | return from_space_.ContainsSlow(a) || to_space_.ContainsSlow(a); |
| 171 | } |
| 172 | |
| 173 | bool NewSpace::ToSpaceContainsSlow(Address a) { |
| 174 | return to_space_.ContainsSlow(a); |
| 175 | } |
| 176 | |
| 177 | bool NewSpace::FromSpaceContainsSlow(Address a) { |
| 178 | return from_space_.ContainsSlow(a); |
| 179 | } |
| 180 | |
| 181 | bool NewSpace::ToSpaceContains(Object* o) { return to_space_.Contains(o); } |
| 182 | bool NewSpace::FromSpaceContains(Object* o) { return from_space_.Contains(o); } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 183 | |
| 184 | // -------------------------------------------------------------------------- |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 185 | // AllocationResult |
| 186 | |
| 187 | AllocationSpace AllocationResult::RetrySpace() { |
| 188 | DCHECK(IsRetry()); |
| 189 | return static_cast<AllocationSpace>(Smi::cast(object_)->value()); |
| 190 | } |
| 191 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 192 | Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable, |
| 193 | SemiSpace* owner) { |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 194 | DCHECK_EQ(executable, Executability::NOT_EXECUTABLE); |
| 195 | bool in_to_space = (owner->id() != kFromSpace); |
| 196 | chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE |
| 197 | : MemoryChunk::IN_FROM_SPACE); |
| 198 | DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE |
| 199 | : MemoryChunk::IN_TO_SPACE)); |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 200 | Page* page = static_cast<Page*>(chunk); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 201 | heap->incremental_marking()->SetNewSpacePageFlags(page); |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 202 | page->AllocateLocalTracker(); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 203 | return page; |
| 204 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 205 | |
| 206 | // -------------------------------------------------------------------------- |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 207 | // PagedSpace |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 208 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 209 | template <Page::InitializationMode mode> |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 210 | Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable, |
| 211 | PagedSpace* owner) { |
| 212 | Page* page = reinterpret_cast<Page*>(chunk); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 213 | DCHECK(page->area_size() <= kAllocatableMemory); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 214 | DCHECK(chunk->owner() == owner); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 215 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 216 | owner->IncreaseCapacity(page->area_size()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 217 | heap->incremental_marking()->SetOldSpacePageFlags(chunk); |
| 218 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 219 | // Make sure that categories are initialized before freeing the area. |
| 220 | page->InitializeFreeListCategories(); |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 221 | // In the case we do not free the memory, we effectively account for the whole |
| 222 | // page as allocated memory that cannot be used for further allocations. |
| 223 | if (mode == kFreeMemory) { |
| 224 | owner->Free(page->area_start(), page->area_size()); |
| 225 | } |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 226 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 227 | return page; |
| 228 | } |
| 229 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 230 | Page* Page::ConvertNewToOld(Page* old_page, PagedSpace* new_owner) { |
| 231 | DCHECK(old_page->InNewSpace()); |
| 232 | old_page->set_owner(new_owner); |
| 233 | old_page->SetFlags(0, ~0); |
| 234 | new_owner->AccountCommitted(old_page->size()); |
| 235 | Page* new_page = Page::Initialize<kDoNotFreeMemory>( |
| 236 | old_page->heap(), old_page, NOT_EXECUTABLE, new_owner); |
| 237 | new_page->InsertAfter(new_owner->anchor()->prev_page()); |
| 238 | return new_page; |
| 239 | } |
| 240 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 241 | void Page::InitializeFreeListCategories() { |
| 242 | for (int i = kFirstCategory; i < kNumberOfCategories; i++) { |
| 243 | categories_[i].Initialize(static_cast<FreeListCategoryType>(i)); |
| 244 | } |
| 245 | } |
| 246 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 247 | void MemoryChunk::IncrementLiveBytesFromGC(HeapObject* object, int by) { |
| 248 | MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by); |
| 249 | } |
| 250 | |
| 251 | void MemoryChunk::ResetLiveBytes() { |
| 252 | if (FLAG_trace_live_bytes) { |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 253 | PrintIsolate(heap()->isolate(), "live-bytes: reset page=%p %d->0\n", |
| 254 | static_cast<void*>(this), live_byte_count_); |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 255 | } |
| 256 | live_byte_count_ = 0; |
| 257 | } |
| 258 | |
| 259 | void MemoryChunk::IncrementLiveBytes(int by) { |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 260 | if (IsFlagSet(BLACK_PAGE)) return; |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 261 | if (FLAG_trace_live_bytes) { |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 262 | PrintIsolate( |
| 263 | heap()->isolate(), "live-bytes: update page=%p delta=%d %d->%d\n", |
| 264 | static_cast<void*>(this), by, live_byte_count_, live_byte_count_ + by); |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 265 | } |
| 266 | live_byte_count_ += by; |
| 267 | DCHECK_GE(live_byte_count_, 0); |
| 268 | DCHECK_LE(static_cast<size_t>(live_byte_count_), size_); |
| 269 | } |
| 270 | |
| 271 | void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) { |
| 272 | MemoryChunk* chunk = MemoryChunk::FromAddress(object->address()); |
| 273 | if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->SweepingDone()) { |
| 274 | static_cast<PagedSpace*>(chunk->owner())->Allocate(by); |
| 275 | } |
| 276 | chunk->IncrementLiveBytes(by); |
| 277 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 278 | |
| 279 | bool PagedSpace::Contains(Address addr) { |
| 280 | Page* p = Page::FromAddress(addr); |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 281 | if (!Page::IsValid(p)) return false; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 282 | return p->owner() == this; |
| 283 | } |
| 284 | |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 285 | bool PagedSpace::Contains(Object* o) { |
| 286 | if (!o->IsHeapObject()) return false; |
| 287 | Page* p = Page::FromAddress(HeapObject::cast(o)->address()); |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 288 | if (!Page::IsValid(p)) return false; |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 289 | return p->owner() == this; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 290 | } |
| 291 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 292 | void PagedSpace::UnlinkFreeListCategories(Page* page) { |
| 293 | DCHECK_EQ(this, page->owner()); |
| 294 | page->ForAllFreeListCategories([this](FreeListCategory* category) { |
| 295 | DCHECK_EQ(free_list(), category->owner()); |
| 296 | free_list()->RemoveCategory(category); |
| 297 | }); |
| 298 | } |
| 299 | |
| 300 | intptr_t PagedSpace::RelinkFreeListCategories(Page* page) { |
| 301 | DCHECK_EQ(this, page->owner()); |
| 302 | intptr_t added = 0; |
| 303 | page->ForAllFreeListCategories([&added](FreeListCategory* category) { |
| 304 | added += category->available(); |
| 305 | category->Relink(); |
| 306 | }); |
| 307 | return added; |
| 308 | } |
| 309 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 310 | MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) { |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 311 | MemoryChunk* chunk = MemoryChunk::FromAddress(addr); |
| 312 | uintptr_t offset = addr - chunk->address(); |
| 313 | if (offset < MemoryChunk::kHeaderSize || !chunk->HasPageHeader()) { |
| 314 | chunk = heap->lo_space()->FindPage(addr); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 315 | } |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 316 | return chunk; |
| 317 | } |
| 318 | |
| 319 | Page* Page::FromAnyPointerAddress(Heap* heap, Address addr) { |
| 320 | return static_cast<Page*>(MemoryChunk::FromAnyPointerAddress(heap, addr)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 321 | } |
| 322 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 323 | void Page::MarkNeverAllocateForTesting() { |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 324 | DCHECK(this->owner()->identity() != NEW_SPACE); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 325 | DCHECK(!IsFlagSet(NEVER_ALLOCATE_ON_PAGE)); |
| 326 | SetFlag(NEVER_ALLOCATE_ON_PAGE); |
| 327 | reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this); |
| 328 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 329 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 330 | void Page::MarkEvacuationCandidate() { |
| 331 | DCHECK(!IsFlagSet(NEVER_EVACUATE)); |
| 332 | DCHECK_NULL(old_to_old_slots_); |
| 333 | DCHECK_NULL(typed_old_to_old_slots_); |
| 334 | SetFlag(EVACUATION_CANDIDATE); |
| 335 | reinterpret_cast<PagedSpace*>(owner())->free_list()->EvictFreeListItems(this); |
| 336 | } |
| 337 | |
| 338 | void Page::ClearEvacuationCandidate() { |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 339 | if (!IsFlagSet(COMPACTION_WAS_ABORTED)) { |
| 340 | DCHECK_NULL(old_to_old_slots_); |
| 341 | DCHECK_NULL(typed_old_to_old_slots_); |
| 342 | } |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 343 | ClearFlag(EVACUATION_CANDIDATE); |
| 344 | InitializeFreeListCategories(); |
| 345 | } |
| 346 | |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 347 | MemoryChunkIterator::MemoryChunkIterator(Heap* heap) |
| 348 | : heap_(heap), |
| 349 | state_(kOldSpaceState), |
| 350 | old_iterator_(heap->old_space()->begin()), |
| 351 | code_iterator_(heap->code_space()->begin()), |
| 352 | map_iterator_(heap->map_space()->begin()), |
| 353 | lo_iterator_(heap->lo_space()->begin()) {} |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 354 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 355 | MemoryChunk* MemoryChunkIterator::next() { |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 356 | switch (state_) { |
| 357 | case kOldSpaceState: { |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 358 | if (old_iterator_ != heap_->old_space()->end()) return *(old_iterator_++); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 359 | state_ = kMapState; |
| 360 | // Fall through. |
| 361 | } |
| 362 | case kMapState: { |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 363 | if (map_iterator_ != heap_->map_space()->end()) return *(map_iterator_++); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 364 | state_ = kCodeState; |
| 365 | // Fall through. |
| 366 | } |
| 367 | case kCodeState: { |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 368 | if (code_iterator_ != heap_->code_space()->end()) |
| 369 | return *(code_iterator_++); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 370 | state_ = kLargeObjectState; |
| 371 | // Fall through. |
| 372 | } |
| 373 | case kLargeObjectState: { |
Ben Murdoch | 61f157c | 2016-09-16 13:49:30 +0100 | [diff] [blame] | 374 | if (lo_iterator_ != heap_->lo_space()->end()) return *(lo_iterator_++); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 375 | state_ = kFinishedState; |
| 376 | // Fall through; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 377 | } |
| 378 | case kFinishedState: |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 379 | return nullptr; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 380 | default: |
| 381 | break; |
| 382 | } |
| 383 | UNREACHABLE(); |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 384 | return nullptr; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 385 | } |
| 386 | |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 387 | Page* FreeListCategory::page() { |
| 388 | return Page::FromAddress(reinterpret_cast<Address>(this)); |
| 389 | } |
| 390 | |
| 391 | FreeList* FreeListCategory::owner() { |
| 392 | return reinterpret_cast<PagedSpace*>( |
| 393 | Page::FromAddress(reinterpret_cast<Address>(this))->owner()) |
| 394 | ->free_list(); |
| 395 | } |
| 396 | |
| 397 | bool FreeListCategory::is_linked() { |
| 398 | return prev_ != nullptr || next_ != nullptr || owner()->top(type_) == this; |
| 399 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 400 | |
| 401 | // Try linear allocation in the page of alloc_info's allocation top. Does |
| 402 | // not contain slow case logic (e.g. move to the next page or try free list |
| 403 | // allocation) so it can be used by all the allocation functions and for all |
| 404 | // the paged spaces. |
| 405 | HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) { |
| 406 | Address current_top = allocation_info_.top(); |
| 407 | Address new_top = current_top + size_in_bytes; |
| 408 | if (new_top > allocation_info_.limit()) return NULL; |
| 409 | |
| 410 | allocation_info_.set_top(new_top); |
| 411 | return HeapObject::FromAddress(current_top); |
| 412 | } |
| 413 | |
| 414 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 415 | AllocationResult LocalAllocationBuffer::AllocateRawAligned( |
| 416 | int size_in_bytes, AllocationAlignment alignment) { |
| 417 | Address current_top = allocation_info_.top(); |
| 418 | int filler_size = Heap::GetFillToAlign(current_top, alignment); |
| 419 | |
| 420 | Address new_top = current_top + filler_size + size_in_bytes; |
| 421 | if (new_top > allocation_info_.limit()) return AllocationResult::Retry(); |
| 422 | |
| 423 | allocation_info_.set_top(new_top); |
| 424 | if (filler_size > 0) { |
| 425 | return heap_->PrecedeWithFiller(HeapObject::FromAddress(current_top), |
| 426 | filler_size); |
| 427 | } |
| 428 | |
| 429 | return AllocationResult(HeapObject::FromAddress(current_top)); |
| 430 | } |
| 431 | |
| 432 | |
| 433 | HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes, |
| 434 | AllocationAlignment alignment) { |
| 435 | Address current_top = allocation_info_.top(); |
| 436 | int filler_size = Heap::GetFillToAlign(current_top, alignment); |
| 437 | |
| 438 | Address new_top = current_top + filler_size + *size_in_bytes; |
| 439 | if (new_top > allocation_info_.limit()) return NULL; |
| 440 | |
| 441 | allocation_info_.set_top(new_top); |
| 442 | if (filler_size > 0) { |
| 443 | *size_in_bytes += filler_size; |
| 444 | return heap()->PrecedeWithFiller(HeapObject::FromAddress(current_top), |
| 445 | filler_size); |
| 446 | } |
| 447 | |
| 448 | return HeapObject::FromAddress(current_top); |
| 449 | } |
| 450 | |
| 451 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 452 | // Raw allocation. |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 453 | AllocationResult PagedSpace::AllocateRawUnaligned( |
| 454 | int size_in_bytes, UpdateSkipList update_skip_list) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 455 | HeapObject* object = AllocateLinearly(size_in_bytes); |
| 456 | |
| 457 | if (object == NULL) { |
| 458 | object = free_list_.Allocate(size_in_bytes); |
| 459 | if (object == NULL) { |
| 460 | object = SlowAllocateRaw(size_in_bytes); |
| 461 | } |
| 462 | } |
| 463 | |
| 464 | if (object != NULL) { |
Ben Murdoch | da12d29 | 2016-06-02 14:46:10 +0100 | [diff] [blame] | 465 | if (update_skip_list == UPDATE_SKIP_LIST && identity() == CODE_SPACE) { |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 466 | SkipList::Update(object->address(), size_in_bytes); |
| 467 | } |
| 468 | MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes); |
| 469 | return object; |
| 470 | } |
| 471 | |
| 472 | return AllocationResult::Retry(identity()); |
| 473 | } |
| 474 | |
| 475 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 476 | AllocationResult PagedSpace::AllocateRawUnalignedSynchronized( |
| 477 | int size_in_bytes) { |
| 478 | base::LockGuard<base::Mutex> lock_guard(&space_mutex_); |
| 479 | return AllocateRawUnaligned(size_in_bytes); |
| 480 | } |
| 481 | |
| 482 | |
| 483 | // Raw allocation. |
| 484 | AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes, |
| 485 | AllocationAlignment alignment) { |
| 486 | DCHECK(identity() == OLD_SPACE); |
| 487 | int allocation_size = size_in_bytes; |
| 488 | HeapObject* object = AllocateLinearlyAligned(&allocation_size, alignment); |
| 489 | |
| 490 | if (object == NULL) { |
| 491 | // We don't know exactly how much filler we need to align until space is |
| 492 | // allocated, so assume the worst case. |
| 493 | int filler_size = Heap::GetMaximumFillToAlign(alignment); |
| 494 | allocation_size += filler_size; |
| 495 | object = free_list_.Allocate(allocation_size); |
| 496 | if (object == NULL) { |
| 497 | object = SlowAllocateRaw(allocation_size); |
| 498 | } |
| 499 | if (object != NULL && filler_size != 0) { |
| 500 | object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size, |
| 501 | alignment); |
| 502 | // Filler objects are initialized, so mark only the aligned object memory |
| 503 | // as uninitialized. |
| 504 | allocation_size = size_in_bytes; |
| 505 | } |
| 506 | } |
| 507 | |
| 508 | if (object != NULL) { |
| 509 | MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), allocation_size); |
| 510 | return object; |
| 511 | } |
| 512 | |
| 513 | return AllocationResult::Retry(identity()); |
| 514 | } |
| 515 | |
| 516 | |
| 517 | AllocationResult PagedSpace::AllocateRaw(int size_in_bytes, |
| 518 | AllocationAlignment alignment) { |
| 519 | #ifdef V8_HOST_ARCH_32_BIT |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 520 | AllocationResult result = |
| 521 | alignment == kDoubleAligned |
| 522 | ? AllocateRawAligned(size_in_bytes, kDoubleAligned) |
| 523 | : AllocateRawUnaligned(size_in_bytes); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 524 | #else |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 525 | AllocationResult result = AllocateRawUnaligned(size_in_bytes); |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 526 | #endif |
Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame] | 527 | HeapObject* heap_obj = nullptr; |
| 528 | if (!result.IsRetry() && result.To(&heap_obj)) { |
| 529 | AllocationStep(heap_obj->address(), size_in_bytes); |
| 530 | } |
| 531 | return result; |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 532 | } |
| 533 | |
| 534 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 535 | // ----------------------------------------------------------------------------- |
| 536 | // NewSpace |
| 537 | |
| 538 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 539 | AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes, |
| 540 | AllocationAlignment alignment) { |
| 541 | Address top = allocation_info_.top(); |
| 542 | int filler_size = Heap::GetFillToAlign(top, alignment); |
| 543 | int aligned_size_in_bytes = size_in_bytes + filler_size; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 544 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 545 | if (allocation_info_.limit() - top < aligned_size_in_bytes) { |
| 546 | // See if we can create room. |
| 547 | if (!EnsureAllocation(size_in_bytes, alignment)) { |
| 548 | return AllocationResult::Retry(); |
| 549 | } |
| 550 | |
| 551 | top = allocation_info_.top(); |
| 552 | filler_size = Heap::GetFillToAlign(top, alignment); |
| 553 | aligned_size_in_bytes = size_in_bytes + filler_size; |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 554 | } |
| 555 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 556 | HeapObject* obj = HeapObject::FromAddress(top); |
| 557 | allocation_info_.set_top(top + aligned_size_in_bytes); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 558 | DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
| 559 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 560 | if (filler_size > 0) { |
| 561 | obj = heap()->PrecedeWithFiller(obj, filler_size); |
| 562 | } |
| 563 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 564 | MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes); |
| 565 | |
| 566 | return obj; |
| 567 | } |
| 568 | |
| 569 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 570 | AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) { |
| 571 | Address top = allocation_info_.top(); |
| 572 | if (allocation_info_.limit() < top + size_in_bytes) { |
| 573 | // See if we can create room. |
| 574 | if (!EnsureAllocation(size_in_bytes, kWordAligned)) { |
| 575 | return AllocationResult::Retry(); |
| 576 | } |
| 577 | |
| 578 | top = allocation_info_.top(); |
| 579 | } |
| 580 | |
| 581 | HeapObject* obj = HeapObject::FromAddress(top); |
| 582 | allocation_info_.set_top(top + size_in_bytes); |
| 583 | DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
| 584 | |
| 585 | MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes); |
| 586 | |
| 587 | return obj; |
| 588 | } |
| 589 | |
| 590 | |
| 591 | AllocationResult NewSpace::AllocateRaw(int size_in_bytes, |
| 592 | AllocationAlignment alignment) { |
| 593 | #ifdef V8_HOST_ARCH_32_BIT |
| 594 | return alignment == kDoubleAligned |
| 595 | ? AllocateRawAligned(size_in_bytes, kDoubleAligned) |
| 596 | : AllocateRawUnaligned(size_in_bytes); |
| 597 | #else |
| 598 | return AllocateRawUnaligned(size_in_bytes); |
| 599 | #endif |
| 600 | } |
| 601 | |
| 602 | |
| 603 | MUST_USE_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized( |
| 604 | int size_in_bytes, AllocationAlignment alignment) { |
| 605 | base::LockGuard<base::Mutex> guard(&mutex_); |
| 606 | return AllocateRaw(size_in_bytes, alignment); |
| 607 | } |
| 608 | |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 609 | LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk, |
| 610 | Executability executable, Space* owner) { |
| 611 | if (executable && chunk->size() > LargePage::kMaxCodePageSize) { |
| 612 | STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset); |
| 613 | FATAL("Code page is too large."); |
| 614 | } |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 615 | heap->incremental_marking()->SetOldSpacePageFlags(chunk); |
| 616 | return static_cast<LargePage*>(chunk); |
| 617 | } |
| 618 | |
| 619 | |
| 620 | intptr_t LargeObjectSpace::Available() { |
Ben Murdoch | c561043 | 2016-08-08 18:44:38 +0100 | [diff] [blame] | 621 | return ObjectSizeFor(heap()->memory_allocator()->Available()); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 622 | } |
| 623 | |
| 624 | |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 625 | LocalAllocationBuffer LocalAllocationBuffer::InvalidBuffer() { |
| 626 | return LocalAllocationBuffer(nullptr, AllocationInfo(nullptr, nullptr)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 627 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 628 | |
| 629 | |
| 630 | LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap, |
| 631 | AllocationResult result, |
| 632 | intptr_t size) { |
| 633 | if (result.IsRetry()) return InvalidBuffer(); |
| 634 | HeapObject* obj = nullptr; |
| 635 | bool ok = result.To(&obj); |
| 636 | USE(ok); |
| 637 | DCHECK(ok); |
| 638 | Address top = HeapObject::cast(obj)->address(); |
| 639 | return LocalAllocationBuffer(heap, AllocationInfo(top, top + size)); |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 640 | } |
Ben Murdoch | 4a90d5f | 2016-03-22 12:00:34 +0000 | [diff] [blame] | 641 | |
| 642 | |
| 643 | bool LocalAllocationBuffer::TryMerge(LocalAllocationBuffer* other) { |
| 644 | if (allocation_info_.top() == other->allocation_info_.limit()) { |
| 645 | allocation_info_.set_top(other->allocation_info_.top()); |
| 646 | other->allocation_info_.Reset(nullptr, nullptr); |
| 647 | return true; |
| 648 | } |
| 649 | return false; |
| 650 | } |
| 651 | |
| 652 | } // namespace internal |
| 653 | } // namespace v8 |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 654 | |
| 655 | #endif // V8_HEAP_SPACES_INL_H_ |