Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame^] | 1 | // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #ifndef V8_HEAP_SPACES_INL_H_ |
| 6 | #define V8_HEAP_SPACES_INL_H_ |
| 7 | |
| 8 | #include "src/heap/spaces.h" |
| 9 | #include "src/heap-profiler.h" |
| 10 | #include "src/isolate.h" |
| 11 | #include "src/msan.h" |
| 12 | #include "src/v8memory.h" |
| 13 | |
| 14 | namespace v8 { |
| 15 | namespace internal { |
| 16 | |
| 17 | |
| 18 | // ----------------------------------------------------------------------------- |
| 19 | // Bitmap |
| 20 | |
| 21 | void Bitmap::Clear(MemoryChunk* chunk) { |
| 22 | Bitmap* bitmap = chunk->markbits(); |
| 23 | for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0; |
| 24 | chunk->ResetLiveBytes(); |
| 25 | } |
| 26 | |
| 27 | |
| 28 | // ----------------------------------------------------------------------------- |
| 29 | // PageIterator |
| 30 | |
| 31 | |
| 32 | PageIterator::PageIterator(PagedSpace* space) |
| 33 | : space_(space), |
| 34 | prev_page_(&space->anchor_), |
| 35 | next_page_(prev_page_->next_page()) {} |
| 36 | |
| 37 | |
| 38 | bool PageIterator::has_next() { return next_page_ != &space_->anchor_; } |
| 39 | |
| 40 | |
| 41 | Page* PageIterator::next() { |
| 42 | DCHECK(has_next()); |
| 43 | prev_page_ = next_page_; |
| 44 | next_page_ = next_page_->next_page(); |
| 45 | return prev_page_; |
| 46 | } |
| 47 | |
| 48 | |
| 49 | // ----------------------------------------------------------------------------- |
| 50 | // NewSpacePageIterator |
| 51 | |
| 52 | |
| 53 | NewSpacePageIterator::NewSpacePageIterator(NewSpace* space) |
| 54 | : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()), |
| 55 | next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())), |
| 56 | last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) {} |
| 57 | |
| 58 | NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space) |
| 59 | : prev_page_(space->anchor()), |
| 60 | next_page_(prev_page_->next_page()), |
| 61 | last_page_(prev_page_->prev_page()) {} |
| 62 | |
| 63 | NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit) |
| 64 | : prev_page_(NewSpacePage::FromAddress(start)->prev_page()), |
| 65 | next_page_(NewSpacePage::FromAddress(start)), |
| 66 | last_page_(NewSpacePage::FromLimit(limit)) { |
| 67 | SemiSpace::AssertValidRange(start, limit); |
| 68 | } |
| 69 | |
| 70 | |
| 71 | bool NewSpacePageIterator::has_next() { return prev_page_ != last_page_; } |
| 72 | |
| 73 | |
| 74 | NewSpacePage* NewSpacePageIterator::next() { |
| 75 | DCHECK(has_next()); |
| 76 | prev_page_ = next_page_; |
| 77 | next_page_ = next_page_->next_page(); |
| 78 | return prev_page_; |
| 79 | } |
| 80 | |
| 81 | |
| 82 | // ----------------------------------------------------------------------------- |
| 83 | // HeapObjectIterator |
| 84 | HeapObject* HeapObjectIterator::FromCurrentPage() { |
| 85 | while (cur_addr_ != cur_end_) { |
| 86 | if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) { |
| 87 | cur_addr_ = space_->limit(); |
| 88 | continue; |
| 89 | } |
| 90 | HeapObject* obj = HeapObject::FromAddress(cur_addr_); |
| 91 | int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj); |
| 92 | cur_addr_ += obj_size; |
| 93 | DCHECK(cur_addr_ <= cur_end_); |
| 94 | if (!obj->IsFiller()) { |
| 95 | DCHECK_OBJECT_SIZE(obj_size); |
| 96 | return obj; |
| 97 | } |
| 98 | } |
| 99 | return NULL; |
| 100 | } |
| 101 | |
| 102 | |
| 103 | // ----------------------------------------------------------------------------- |
| 104 | // MemoryAllocator |
| 105 | |
| 106 | #ifdef ENABLE_HEAP_PROTECTION |
| 107 | |
| 108 | void MemoryAllocator::Protect(Address start, size_t size) { |
| 109 | base::OS::Protect(start, size); |
| 110 | } |
| 111 | |
| 112 | |
| 113 | void MemoryAllocator::Unprotect(Address start, size_t size, |
| 114 | Executability executable) { |
| 115 | base::OS::Unprotect(start, size, executable); |
| 116 | } |
| 117 | |
| 118 | |
| 119 | void MemoryAllocator::ProtectChunkFromPage(Page* page) { |
| 120 | int id = GetChunkId(page); |
| 121 | base::OS::Protect(chunks_[id].address(), chunks_[id].size()); |
| 122 | } |
| 123 | |
| 124 | |
| 125 | void MemoryAllocator::UnprotectChunkFromPage(Page* page) { |
| 126 | int id = GetChunkId(page); |
| 127 | base::OS::Unprotect(chunks_[id].address(), chunks_[id].size(), |
| 128 | chunks_[id].owner()->executable() == EXECUTABLE); |
| 129 | } |
| 130 | |
| 131 | #endif |
| 132 | |
| 133 | |
| 134 | // -------------------------------------------------------------------------- |
| 135 | // PagedSpace |
| 136 | Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable, |
| 137 | PagedSpace* owner) { |
| 138 | Page* page = reinterpret_cast<Page*>(chunk); |
| 139 | DCHECK(page->area_size() <= kMaxRegularHeapObjectSize); |
| 140 | DCHECK(chunk->owner() == owner); |
| 141 | owner->IncreaseCapacity(page->area_size()); |
| 142 | owner->Free(page->area_start(), page->area_size()); |
| 143 | |
| 144 | heap->incremental_marking()->SetOldSpacePageFlags(chunk); |
| 145 | |
| 146 | return page; |
| 147 | } |
| 148 | |
| 149 | |
| 150 | bool PagedSpace::Contains(Address addr) { |
| 151 | Page* p = Page::FromAddress(addr); |
| 152 | if (!p->is_valid()) return false; |
| 153 | return p->owner() == this; |
| 154 | } |
| 155 | |
| 156 | |
| 157 | void MemoryChunk::set_scan_on_scavenge(bool scan) { |
| 158 | if (scan) { |
| 159 | if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages(); |
| 160 | SetFlag(SCAN_ON_SCAVENGE); |
| 161 | } else { |
| 162 | if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages(); |
| 163 | ClearFlag(SCAN_ON_SCAVENGE); |
| 164 | } |
| 165 | heap_->incremental_marking()->SetOldSpacePageFlags(this); |
| 166 | } |
| 167 | |
| 168 | |
| 169 | MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) { |
| 170 | MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>( |
| 171 | OffsetFrom(addr) & ~Page::kPageAlignmentMask); |
| 172 | if (maybe->owner() != NULL) return maybe; |
| 173 | LargeObjectIterator iterator(heap->lo_space()); |
| 174 | for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) { |
| 175 | // Fixed arrays are the only pointer-containing objects in large object |
| 176 | // space. |
| 177 | if (o->IsFixedArray()) { |
| 178 | MemoryChunk* chunk = MemoryChunk::FromAddress(o->address()); |
| 179 | if (chunk->Contains(addr)) { |
| 180 | return chunk; |
| 181 | } |
| 182 | } |
| 183 | } |
| 184 | UNREACHABLE(); |
| 185 | return NULL; |
| 186 | } |
| 187 | |
| 188 | |
| 189 | void MemoryChunk::UpdateHighWaterMark(Address mark) { |
| 190 | if (mark == NULL) return; |
| 191 | // Need to subtract one from the mark because when a chunk is full the |
| 192 | // top points to the next address after the chunk, which effectively belongs |
| 193 | // to another chunk. See the comment to Page::FromAllocationTop. |
| 194 | MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); |
| 195 | int new_mark = static_cast<int>(mark - chunk->address()); |
| 196 | if (new_mark > chunk->high_water_mark_) { |
| 197 | chunk->high_water_mark_ = new_mark; |
| 198 | } |
| 199 | } |
| 200 | |
| 201 | |
| 202 | PointerChunkIterator::PointerChunkIterator(Heap* heap) |
| 203 | : state_(kOldPointerState), |
| 204 | old_pointer_iterator_(heap->old_pointer_space()), |
| 205 | map_iterator_(heap->map_space()), |
| 206 | lo_iterator_(heap->lo_space()) {} |
| 207 | |
| 208 | |
| 209 | Page* Page::next_page() { |
| 210 | DCHECK(next_chunk()->owner() == owner()); |
| 211 | return static_cast<Page*>(next_chunk()); |
| 212 | } |
| 213 | |
| 214 | |
| 215 | Page* Page::prev_page() { |
| 216 | DCHECK(prev_chunk()->owner() == owner()); |
| 217 | return static_cast<Page*>(prev_chunk()); |
| 218 | } |
| 219 | |
| 220 | |
| 221 | void Page::set_next_page(Page* page) { |
| 222 | DCHECK(page->owner() == owner()); |
| 223 | set_next_chunk(page); |
| 224 | } |
| 225 | |
| 226 | |
| 227 | void Page::set_prev_page(Page* page) { |
| 228 | DCHECK(page->owner() == owner()); |
| 229 | set_prev_chunk(page); |
| 230 | } |
| 231 | |
| 232 | |
| 233 | // Try linear allocation in the page of alloc_info's allocation top. Does |
| 234 | // not contain slow case logic (e.g. move to the next page or try free list |
| 235 | // allocation) so it can be used by all the allocation functions and for all |
| 236 | // the paged spaces. |
| 237 | HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) { |
| 238 | Address current_top = allocation_info_.top(); |
| 239 | Address new_top = current_top + size_in_bytes; |
| 240 | if (new_top > allocation_info_.limit()) return NULL; |
| 241 | |
| 242 | allocation_info_.set_top(new_top); |
| 243 | return HeapObject::FromAddress(current_top); |
| 244 | } |
| 245 | |
| 246 | |
| 247 | // Raw allocation. |
| 248 | AllocationResult PagedSpace::AllocateRaw(int size_in_bytes) { |
| 249 | HeapObject* object = AllocateLinearly(size_in_bytes); |
| 250 | |
| 251 | if (object == NULL) { |
| 252 | object = free_list_.Allocate(size_in_bytes); |
| 253 | if (object == NULL) { |
| 254 | object = SlowAllocateRaw(size_in_bytes); |
| 255 | } |
| 256 | } |
| 257 | |
| 258 | if (object != NULL) { |
| 259 | if (identity() == CODE_SPACE) { |
| 260 | SkipList::Update(object->address(), size_in_bytes); |
| 261 | } |
| 262 | MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes); |
| 263 | return object; |
| 264 | } |
| 265 | |
| 266 | return AllocationResult::Retry(identity()); |
| 267 | } |
| 268 | |
| 269 | |
| 270 | // ----------------------------------------------------------------------------- |
| 271 | // NewSpace |
| 272 | |
| 273 | |
| 274 | AllocationResult NewSpace::AllocateRaw(int size_in_bytes) { |
| 275 | Address old_top = allocation_info_.top(); |
| 276 | |
| 277 | if (allocation_info_.limit() - old_top < size_in_bytes) { |
| 278 | return SlowAllocateRaw(size_in_bytes); |
| 279 | } |
| 280 | |
| 281 | HeapObject* obj = HeapObject::FromAddress(old_top); |
| 282 | allocation_info_.set_top(allocation_info_.top() + size_in_bytes); |
| 283 | DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
| 284 | |
| 285 | // The slow path above ultimately goes through AllocateRaw, so this suffices. |
| 286 | MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes); |
| 287 | |
| 288 | return obj; |
| 289 | } |
| 290 | |
| 291 | |
| 292 | LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) { |
| 293 | heap->incremental_marking()->SetOldSpacePageFlags(chunk); |
| 294 | return static_cast<LargePage*>(chunk); |
| 295 | } |
| 296 | |
| 297 | |
| 298 | intptr_t LargeObjectSpace::Available() { |
| 299 | return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available()); |
| 300 | } |
| 301 | |
| 302 | |
| 303 | bool FreeListNode::IsFreeListNode(HeapObject* object) { |
| 304 | Map* map = object->map(); |
| 305 | Heap* heap = object->GetHeap(); |
| 306 | return map == heap->raw_unchecked_free_space_map() || |
| 307 | map == heap->raw_unchecked_one_pointer_filler_map() || |
| 308 | map == heap->raw_unchecked_two_pointer_filler_map(); |
| 309 | } |
| 310 | } |
| 311 | } // namespace v8::internal |
| 312 | |
| 313 | #endif // V8_HEAP_SPACES_INL_H_ |