Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 1 | // Copyright 2011 the V8 project authors. All rights reserved. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2 | // Redistribution and use in source and binary forms, with or without |
| 3 | // modification, are permitted provided that the following conditions are |
| 4 | // met: |
| 5 | // |
| 6 | // * Redistributions of source code must retain the above copyright |
| 7 | // notice, this list of conditions and the following disclaimer. |
| 8 | // * Redistributions in binary form must reproduce the above |
| 9 | // copyright notice, this list of conditions and the following |
| 10 | // disclaimer in the documentation and/or other materials provided |
| 11 | // with the distribution. |
| 12 | // * Neither the name of Google Inc. nor the names of its |
| 13 | // contributors may be used to endorse or promote products derived |
| 14 | // from this software without specific prior written permission. |
| 15 | // |
| 16 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | |
| 28 | #ifndef V8_SPACES_INL_H_ |
| 29 | #define V8_SPACES_INL_H_ |
| 30 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 31 | #include "isolate.h" |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 32 | #include "spaces.h" |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 33 | #include "v8memory.h" |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 34 | |
| 35 | namespace v8 { |
| 36 | namespace internal { |
| 37 | |
| 38 | |
| 39 | // ----------------------------------------------------------------------------- |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 40 | // Bitmap |
| 41 | |
| 42 | void Bitmap::Clear(MemoryChunk* chunk) { |
| 43 | Bitmap* bitmap = chunk->markbits(); |
| 44 | for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0; |
| 45 | chunk->ResetLiveBytes(); |
| 46 | } |
| 47 | |
| 48 | |
| 49 | // ----------------------------------------------------------------------------- |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 50 | // PageIterator |
| 51 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 52 | |
| 53 | PageIterator::PageIterator(PagedSpace* space) |
| 54 | : space_(space), |
| 55 | prev_page_(&space->anchor_), |
| 56 | next_page_(prev_page_->next_page()) { } |
| 57 | |
| 58 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 59 | bool PageIterator::has_next() { |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 60 | return next_page_ != &space_->anchor_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 61 | } |
| 62 | |
| 63 | |
| 64 | Page* PageIterator::next() { |
| 65 | ASSERT(has_next()); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 66 | prev_page_ = next_page_; |
| 67 | next_page_ = next_page_->next_page(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 68 | return prev_page_; |
| 69 | } |
| 70 | |
| 71 | |
| 72 | // ----------------------------------------------------------------------------- |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 73 | // NewSpacePageIterator |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 74 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 75 | |
| 76 | NewSpacePageIterator::NewSpacePageIterator(NewSpace* space) |
| 77 | : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()), |
| 78 | next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())), |
| 79 | last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { } |
| 80 | |
| 81 | NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space) |
| 82 | : prev_page_(space->anchor()), |
| 83 | next_page_(prev_page_->next_page()), |
| 84 | last_page_(prev_page_->prev_page()) { } |
| 85 | |
| 86 | NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit) |
| 87 | : prev_page_(NewSpacePage::FromAddress(start)->prev_page()), |
| 88 | next_page_(NewSpacePage::FromAddress(start)), |
| 89 | last_page_(NewSpacePage::FromLimit(limit)) { |
| 90 | SemiSpace::AssertValidRange(start, limit); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 91 | } |
| 92 | |
| 93 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 94 | bool NewSpacePageIterator::has_next() { |
| 95 | return prev_page_ != last_page_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 96 | } |
| 97 | |
| 98 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 99 | NewSpacePage* NewSpacePageIterator::next() { |
| 100 | ASSERT(has_next()); |
| 101 | prev_page_ = next_page_; |
| 102 | next_page_ = next_page_->next_page(); |
| 103 | return prev_page_; |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame] | 104 | } |
| 105 | |
| 106 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 107 | // ----------------------------------------------------------------------------- |
| 108 | // HeapObjectIterator |
| 109 | HeapObject* HeapObjectIterator::FromCurrentPage() { |
| 110 | while (cur_addr_ != cur_end_) { |
| 111 | if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) { |
| 112 | cur_addr_ = space_->limit(); |
| 113 | continue; |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame] | 114 | } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 115 | HeapObject* obj = HeapObject::FromAddress(cur_addr_); |
| 116 | int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj); |
| 117 | cur_addr_ += obj_size; |
| 118 | ASSERT(cur_addr_ <= cur_end_); |
| 119 | if (!obj->IsFiller()) { |
| 120 | ASSERT_OBJECT_SIZE(obj_size); |
| 121 | return obj; |
| 122 | } |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame] | 123 | } |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 124 | return NULL; |
Steve Block | 791712a | 2010-08-27 10:21:07 +0100 | [diff] [blame] | 125 | } |
| 126 | |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 127 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 128 | // ----------------------------------------------------------------------------- |
| 129 | // MemoryAllocator |
| 130 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 131 | #ifdef ENABLE_HEAP_PROTECTION |
| 132 | |
| 133 | void MemoryAllocator::Protect(Address start, size_t size) { |
| 134 | OS::Protect(start, size); |
Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 135 | } |
| 136 | |
| 137 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 138 | void MemoryAllocator::Unprotect(Address start, |
| 139 | size_t size, |
| 140 | Executability executable) { |
| 141 | OS::Unprotect(start, size, executable); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 142 | } |
| 143 | |
| 144 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 145 | void MemoryAllocator::ProtectChunkFromPage(Page* page) { |
| 146 | int id = GetChunkId(page); |
| 147 | OS::Protect(chunks_[id].address(), chunks_[id].size()); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 148 | } |
| 149 | |
| 150 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 151 | void MemoryAllocator::UnprotectChunkFromPage(Page* page) { |
| 152 | int id = GetChunkId(page); |
| 153 | OS::Unprotect(chunks_[id].address(), chunks_[id].size(), |
| 154 | chunks_[id].owner()->executable() == EXECUTABLE); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 155 | } |
| 156 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 157 | #endif |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 158 | |
| 159 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 160 | // -------------------------------------------------------------------------- |
| 161 | // PagedSpace |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 162 | Page* Page::Initialize(Heap* heap, |
| 163 | MemoryChunk* chunk, |
| 164 | Executability executable, |
| 165 | PagedSpace* owner) { |
| 166 | Page* page = reinterpret_cast<Page*>(chunk); |
| 167 | ASSERT(chunk->size() == static_cast<size_t>(kPageSize)); |
| 168 | ASSERT(chunk->owner() == owner); |
| 169 | owner->IncreaseCapacity(page->area_size()); |
| 170 | owner->Free(page->area_start(), page->area_size()); |
| 171 | |
| 172 | heap->incremental_marking()->SetOldSpacePageFlags(chunk); |
| 173 | |
| 174 | return page; |
| 175 | } |
| 176 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 177 | |
| 178 | bool PagedSpace::Contains(Address addr) { |
| 179 | Page* p = Page::FromAddress(addr); |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 180 | if (!p->is_valid()) return false; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 181 | return p->owner() == this; |
| 182 | } |
| 183 | |
| 184 | |
| 185 | void MemoryChunk::set_scan_on_scavenge(bool scan) { |
| 186 | if (scan) { |
| 187 | if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages(); |
| 188 | SetFlag(SCAN_ON_SCAVENGE); |
| 189 | } else { |
| 190 | if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages(); |
| 191 | ClearFlag(SCAN_ON_SCAVENGE); |
| 192 | } |
| 193 | heap_->incremental_marking()->SetOldSpacePageFlags(this); |
| 194 | } |
| 195 | |
| 196 | |
| 197 | MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) { |
| 198 | MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>( |
| 199 | OffsetFrom(addr) & ~Page::kPageAlignmentMask); |
| 200 | if (maybe->owner() != NULL) return maybe; |
| 201 | LargeObjectIterator iterator(HEAP->lo_space()); |
| 202 | for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) { |
| 203 | // Fixed arrays are the only pointer-containing objects in large object |
| 204 | // space. |
| 205 | if (o->IsFixedArray()) { |
| 206 | MemoryChunk* chunk = MemoryChunk::FromAddress(o->address()); |
| 207 | if (chunk->Contains(addr)) { |
| 208 | return chunk; |
| 209 | } |
| 210 | } |
| 211 | } |
| 212 | UNREACHABLE(); |
| 213 | return NULL; |
| 214 | } |
| 215 | |
| 216 | |
| 217 | PointerChunkIterator::PointerChunkIterator(Heap* heap) |
| 218 | : state_(kOldPointerState), |
| 219 | old_pointer_iterator_(heap->old_pointer_space()), |
| 220 | map_iterator_(heap->map_space()), |
| 221 | lo_iterator_(heap->lo_space()) { } |
| 222 | |
| 223 | |
| 224 | Page* Page::next_page() { |
| 225 | ASSERT(next_chunk()->owner() == owner()); |
| 226 | return static_cast<Page*>(next_chunk()); |
| 227 | } |
| 228 | |
| 229 | |
| 230 | Page* Page::prev_page() { |
| 231 | ASSERT(prev_chunk()->owner() == owner()); |
| 232 | return static_cast<Page*>(prev_chunk()); |
| 233 | } |
| 234 | |
| 235 | |
| 236 | void Page::set_next_page(Page* page) { |
| 237 | ASSERT(page->owner() == owner()); |
| 238 | set_next_chunk(page); |
| 239 | } |
| 240 | |
| 241 | |
| 242 | void Page::set_prev_page(Page* page) { |
| 243 | ASSERT(page->owner() == owner()); |
| 244 | set_prev_chunk(page); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 245 | } |
| 246 | |
| 247 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 248 | // Try linear allocation in the page of alloc_info's allocation top. Does |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 249 | // not contain slow case logic (e.g. move to the next page or try free list |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 250 | // allocation) so it can be used by all the allocation functions and for all |
| 251 | // the paged spaces. |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 252 | HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) { |
| 253 | Address current_top = allocation_info_.top; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 254 | Address new_top = current_top + size_in_bytes; |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 255 | if (new_top > allocation_info_.limit) return NULL; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 256 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 257 | allocation_info_.top = new_top; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 258 | return HeapObject::FromAddress(current_top); |
| 259 | } |
| 260 | |
| 261 | |
| 262 | // Raw allocation. |
John Reck | 5913587 | 2010-11-02 12:39:01 -0700 | [diff] [blame] | 263 | MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) { |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 264 | HeapObject* object = AllocateLinearly(size_in_bytes); |
| 265 | if (object != NULL) { |
| 266 | if (identity() == CODE_SPACE) { |
| 267 | SkipList::Update(object->address(), size_in_bytes); |
| 268 | } |
| 269 | return object; |
| 270 | } |
| 271 | |
| 272 | object = free_list_.Allocate(size_in_bytes); |
| 273 | if (object != NULL) { |
| 274 | if (identity() == CODE_SPACE) { |
| 275 | SkipList::Update(object->address(), size_in_bytes); |
| 276 | } |
| 277 | return object; |
| 278 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 279 | |
| 280 | object = SlowAllocateRaw(size_in_bytes); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 281 | if (object != NULL) { |
| 282 | if (identity() == CODE_SPACE) { |
| 283 | SkipList::Update(object->address(), size_in_bytes); |
| 284 | } |
| 285 | return object; |
| 286 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 287 | |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 288 | return Failure::RetryAfterGC(identity()); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 289 | } |
| 290 | |
| 291 | |
| 292 | // ----------------------------------------------------------------------------- |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 293 | // NewSpace |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 294 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 295 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 296 | MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) { |
| 297 | Address old_top = allocation_info_.top; |
| 298 | if (allocation_info_.limit - old_top < size_in_bytes) { |
| 299 | return SlowAllocateRaw(size_in_bytes); |
| 300 | } |
| 301 | |
| 302 | Object* obj = HeapObject::FromAddress(allocation_info_.top); |
| 303 | allocation_info_.top += size_in_bytes; |
| 304 | ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
| 305 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 306 | return obj; |
| 307 | } |
| 308 | |
Steve Block | 3ce2e20 | 2009-11-05 08:53:23 +0000 | [diff] [blame] | 309 | |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 310 | LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) { |
| 311 | heap->incremental_marking()->SetOldSpacePageFlags(chunk); |
| 312 | return static_cast<LargePage*>(chunk); |
| 313 | } |
| 314 | |
| 315 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 316 | intptr_t LargeObjectSpace::Available() { |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 317 | return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available()); |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 318 | } |
| 319 | |
| 320 | |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 321 | template <typename StringType> |
| 322 | void NewSpace::ShrinkStringAtAllocationBoundary(String* string, int length) { |
| 323 | ASSERT(length <= string->length()); |
| 324 | ASSERT(string->IsSeqString()); |
| 325 | ASSERT(string->address() + StringType::SizeFor(string->length()) == |
| 326 | allocation_info_.top); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 327 | Address old_top = allocation_info_.top; |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 328 | allocation_info_.top = |
| 329 | string->address() + StringType::SizeFor(length); |
| 330 | string->set_length(length); |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 331 | if (Marking::IsBlack(Marking::MarkBitFrom(string))) { |
| 332 | int delta = static_cast<int>(old_top - allocation_info_.top); |
| 333 | MemoryChunk::IncrementLiveBytesFromMutator(string->address(), -delta); |
| 334 | } |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 335 | } |
| 336 | |
| 337 | |
Steve Block | 3ce2e20 | 2009-11-05 08:53:23 +0000 | [diff] [blame] | 338 | bool FreeListNode::IsFreeListNode(HeapObject* object) { |
Ben Murdoch | 3ef787d | 2012-04-12 10:51:47 +0100 | [diff] [blame] | 339 | Map* map = object->map(); |
| 340 | Heap* heap = object->GetHeap(); |
| 341 | return map == heap->raw_unchecked_free_space_map() |
| 342 | || map == heap->raw_unchecked_one_pointer_filler_map() |
| 343 | || map == heap->raw_unchecked_two_pointer_filler_map(); |
Steve Block | 3ce2e20 | 2009-11-05 08:53:23 +0000 | [diff] [blame] | 344 | } |
| 345 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 346 | } } // namespace v8::internal |
| 347 | |
| 348 | #endif // V8_SPACES_INL_H_ |