Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1 | // Copyright 2006-2008 the V8 project authors. All rights reserved. |
| 2 | // Redistribution and use in source and binary forms, with or without |
| 3 | // modification, are permitted provided that the following conditions are |
| 4 | // met: |
| 5 | // |
| 6 | // * Redistributions of source code must retain the above copyright |
| 7 | // notice, this list of conditions and the following disclaimer. |
| 8 | // * Redistributions in binary form must reproduce the above |
| 9 | // copyright notice, this list of conditions and the following |
| 10 | // disclaimer in the documentation and/or other materials provided |
| 11 | // with the distribution. |
| 12 | // * Neither the name of Google Inc. nor the names of its |
| 13 | // contributors may be used to endorse or promote products derived |
| 14 | // from this software without specific prior written permission. |
| 15 | // |
| 16 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | |
| 28 | #ifndef V8_SPACES_INL_H_ |
| 29 | #define V8_SPACES_INL_H_ |
| 30 | |
| 31 | #include "memory.h" |
| 32 | #include "spaces.h" |
| 33 | |
| 34 | namespace v8 { |
| 35 | namespace internal { |
| 36 | |
| 37 | |
| 38 | // ----------------------------------------------------------------------------- |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 39 | // PageIterator |
| 40 | |
| 41 | bool PageIterator::has_next() { |
| 42 | return prev_page_ != stop_page_; |
| 43 | } |
| 44 | |
| 45 | |
| 46 | Page* PageIterator::next() { |
| 47 | ASSERT(has_next()); |
| 48 | prev_page_ = (prev_page_ == NULL) |
| 49 | ? space_->first_page_ |
| 50 | : prev_page_->next_page(); |
| 51 | return prev_page_; |
| 52 | } |
| 53 | |
| 54 | |
| 55 | // ----------------------------------------------------------------------------- |
| 56 | // Page |
| 57 | |
| 58 | Page* Page::next_page() { |
| 59 | return MemoryAllocator::GetNextPage(this); |
| 60 | } |
| 61 | |
| 62 | |
| 63 | Address Page::AllocationTop() { |
| 64 | PagedSpace* owner = MemoryAllocator::PageOwner(this); |
| 65 | return owner->PageAllocationTop(this); |
| 66 | } |
| 67 | |
| 68 | |
| 69 | void Page::ClearRSet() { |
| 70 | // This method can be called in all rset states. |
| 71 | memset(RSetStart(), 0, kRSetEndOffset - kRSetStartOffset); |
| 72 | } |
| 73 | |
| 74 | |
| 75 | // Given a 32-bit address, separate its bits into: |
| 76 | // | page address | words (6) | bit offset (5) | pointer alignment (2) | |
| 77 | // The address of the rset word containing the bit for this word is computed as: |
| 78 | // page_address + words * 4 |
| 79 | // For a 64-bit address, if it is: |
| 80 | // | page address | words(5) | bit offset(5) | pointer alignment (3) | |
| 81 | // The address of the rset word containing the bit for this word is computed as: |
| 82 | // page_address + words * 4 + kRSetOffset. |
| 83 | // The rset is accessed as 32-bit words, and bit offsets in a 32-bit word, |
| 84 | // even on the X64 architecture. |
| 85 | |
| 86 | Address Page::ComputeRSetBitPosition(Address address, int offset, |
| 87 | uint32_t* bitmask) { |
| 88 | ASSERT(Page::is_rset_in_use()); |
| 89 | |
| 90 | Page* page = Page::FromAddress(address); |
| 91 | uint32_t bit_offset = ArithmeticShiftRight(page->Offset(address) + offset, |
| 92 | kPointerSizeLog2); |
| 93 | *bitmask = 1 << (bit_offset % kBitsPerInt); |
| 94 | |
| 95 | Address rset_address = |
| 96 | page->address() + kRSetOffset + (bit_offset / kBitsPerInt) * kIntSize; |
| 97 | // The remembered set address is either in the normal remembered set range |
| 98 | // of a page or else we have a large object page. |
| 99 | ASSERT((page->RSetStart() <= rset_address && rset_address < page->RSetEnd()) |
| 100 | || page->IsLargeObjectPage()); |
| 101 | |
| 102 | if (rset_address >= page->RSetEnd()) { |
| 103 | // We have a large object page, and the remembered set address is actually |
| 104 | // past the end of the object. |
| 105 | |
| 106 | // The first part of the remembered set is still located at the start of |
| 107 | // the page, but anything after kRSetEndOffset must be relocated to after |
| 108 | // the large object, i.e. after |
| 109 | // (page->ObjectAreaStart() + object size) |
| 110 | // We do that by adding the difference between the normal RSet's end and |
| 111 | // the object's end. |
| 112 | ASSERT(HeapObject::FromAddress(address)->IsFixedArray()); |
| 113 | int fixedarray_length = |
| 114 | FixedArray::SizeFor(Memory::int_at(page->ObjectAreaStart() |
| 115 | + Array::kLengthOffset)); |
| 116 | rset_address += kObjectStartOffset - kRSetEndOffset + fixedarray_length; |
| 117 | } |
| 118 | return rset_address; |
| 119 | } |
| 120 | |
| 121 | |
| 122 | void Page::SetRSet(Address address, int offset) { |
| 123 | uint32_t bitmask = 0; |
| 124 | Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask); |
| 125 | Memory::uint32_at(rset_address) |= bitmask; |
| 126 | |
| 127 | ASSERT(IsRSetSet(address, offset)); |
| 128 | } |
| 129 | |
| 130 | |
| 131 | // Clears the corresponding remembered set bit for a given address. |
| 132 | void Page::UnsetRSet(Address address, int offset) { |
| 133 | uint32_t bitmask = 0; |
| 134 | Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask); |
| 135 | Memory::uint32_at(rset_address) &= ~bitmask; |
| 136 | |
| 137 | ASSERT(!IsRSetSet(address, offset)); |
| 138 | } |
| 139 | |
| 140 | |
| 141 | bool Page::IsRSetSet(Address address, int offset) { |
| 142 | uint32_t bitmask = 0; |
| 143 | Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask); |
| 144 | return (Memory::uint32_at(rset_address) & bitmask) != 0; |
| 145 | } |
| 146 | |
| 147 | |
| 148 | // ----------------------------------------------------------------------------- |
| 149 | // MemoryAllocator |
| 150 | |
| 151 | bool MemoryAllocator::IsValidChunk(int chunk_id) { |
| 152 | if (!IsValidChunkId(chunk_id)) return false; |
| 153 | |
| 154 | ChunkInfo& c = chunks_[chunk_id]; |
| 155 | return (c.address() != NULL) && (c.size() != 0) && (c.owner() != NULL); |
| 156 | } |
| 157 | |
| 158 | |
| 159 | bool MemoryAllocator::IsValidChunkId(int chunk_id) { |
| 160 | return (0 <= chunk_id) && (chunk_id < max_nof_chunks_); |
| 161 | } |
| 162 | |
| 163 | |
| 164 | bool MemoryAllocator::IsPageInSpace(Page* p, PagedSpace* space) { |
| 165 | ASSERT(p->is_valid()); |
| 166 | |
| 167 | int chunk_id = GetChunkId(p); |
| 168 | if (!IsValidChunkId(chunk_id)) return false; |
| 169 | |
| 170 | ChunkInfo& c = chunks_[chunk_id]; |
| 171 | return (c.address() <= p->address()) && |
| 172 | (p->address() < c.address() + c.size()) && |
| 173 | (space == c.owner()); |
| 174 | } |
| 175 | |
| 176 | |
| 177 | Page* MemoryAllocator::GetNextPage(Page* p) { |
| 178 | ASSERT(p->is_valid()); |
| 179 | intptr_t raw_addr = p->opaque_header & ~Page::kPageAlignmentMask; |
| 180 | return Page::FromAddress(AddressFrom<Address>(raw_addr)); |
| 181 | } |
| 182 | |
| 183 | |
| 184 | int MemoryAllocator::GetChunkId(Page* p) { |
| 185 | ASSERT(p->is_valid()); |
| 186 | return p->opaque_header & Page::kPageAlignmentMask; |
| 187 | } |
| 188 | |
| 189 | |
| 190 | void MemoryAllocator::SetNextPage(Page* prev, Page* next) { |
| 191 | ASSERT(prev->is_valid()); |
| 192 | int chunk_id = GetChunkId(prev); |
| 193 | ASSERT_PAGE_ALIGNED(next->address()); |
| 194 | prev->opaque_header = OffsetFrom(next->address()) | chunk_id; |
| 195 | } |
| 196 | |
| 197 | |
| 198 | PagedSpace* MemoryAllocator::PageOwner(Page* page) { |
| 199 | int chunk_id = GetChunkId(page); |
| 200 | ASSERT(IsValidChunk(chunk_id)); |
| 201 | return chunks_[chunk_id].owner(); |
| 202 | } |
| 203 | |
| 204 | |
| 205 | bool MemoryAllocator::InInitialChunk(Address address) { |
| 206 | if (initial_chunk_ == NULL) return false; |
| 207 | |
| 208 | Address start = static_cast<Address>(initial_chunk_->address()); |
| 209 | return (start <= address) && (address < start + initial_chunk_->size()); |
| 210 | } |
| 211 | |
| 212 | |
| 213 | #ifdef ENABLE_HEAP_PROTECTION |
| 214 | |
| 215 | void MemoryAllocator::Protect(Address start, size_t size) { |
| 216 | OS::Protect(start, size); |
| 217 | } |
| 218 | |
| 219 | |
| 220 | void MemoryAllocator::Unprotect(Address start, |
| 221 | size_t size, |
| 222 | Executability executable) { |
| 223 | OS::Unprotect(start, size, executable); |
| 224 | } |
| 225 | |
| 226 | |
| 227 | void MemoryAllocator::ProtectChunkFromPage(Page* page) { |
| 228 | int id = GetChunkId(page); |
| 229 | OS::Protect(chunks_[id].address(), chunks_[id].size()); |
| 230 | } |
| 231 | |
| 232 | |
| 233 | void MemoryAllocator::UnprotectChunkFromPage(Page* page) { |
| 234 | int id = GetChunkId(page); |
| 235 | OS::Unprotect(chunks_[id].address(), chunks_[id].size(), |
| 236 | chunks_[id].owner()->executable() == EXECUTABLE); |
| 237 | } |
| 238 | |
| 239 | #endif |
| 240 | |
| 241 | |
| 242 | // -------------------------------------------------------------------------- |
| 243 | // PagedSpace |
| 244 | |
| 245 | bool PagedSpace::Contains(Address addr) { |
| 246 | Page* p = Page::FromAddress(addr); |
| 247 | ASSERT(p->is_valid()); |
| 248 | |
| 249 | return MemoryAllocator::IsPageInSpace(p, this); |
| 250 | } |
| 251 | |
| 252 | |
| 253 | // Try linear allocation in the page of alloc_info's allocation top. Does |
| 254 | // not contain slow case logic (eg, move to the next page or try free list |
| 255 | // allocation) so it can be used by all the allocation functions and for all |
| 256 | // the paged spaces. |
| 257 | HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info, |
| 258 | int size_in_bytes) { |
| 259 | Address current_top = alloc_info->top; |
| 260 | Address new_top = current_top + size_in_bytes; |
| 261 | if (new_top > alloc_info->limit) return NULL; |
| 262 | |
| 263 | alloc_info->top = new_top; |
| 264 | ASSERT(alloc_info->VerifyPagedAllocation()); |
| 265 | accounting_stats_.AllocateBytes(size_in_bytes); |
| 266 | return HeapObject::FromAddress(current_top); |
| 267 | } |
| 268 | |
| 269 | |
| 270 | // Raw allocation. |
| 271 | Object* PagedSpace::AllocateRaw(int size_in_bytes) { |
| 272 | ASSERT(HasBeenSetup()); |
| 273 | ASSERT_OBJECT_SIZE(size_in_bytes); |
| 274 | HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes); |
| 275 | if (object != NULL) return object; |
| 276 | |
| 277 | object = SlowAllocateRaw(size_in_bytes); |
| 278 | if (object != NULL) return object; |
| 279 | |
| 280 | return Failure::RetryAfterGC(size_in_bytes, identity()); |
| 281 | } |
| 282 | |
| 283 | |
| 284 | // Reallocating (and promoting) objects during a compacting collection. |
| 285 | Object* PagedSpace::MCAllocateRaw(int size_in_bytes) { |
| 286 | ASSERT(HasBeenSetup()); |
| 287 | ASSERT_OBJECT_SIZE(size_in_bytes); |
| 288 | HeapObject* object = AllocateLinearly(&mc_forwarding_info_, size_in_bytes); |
| 289 | if (object != NULL) return object; |
| 290 | |
| 291 | object = SlowMCAllocateRaw(size_in_bytes); |
| 292 | if (object != NULL) return object; |
| 293 | |
| 294 | return Failure::RetryAfterGC(size_in_bytes, identity()); |
| 295 | } |
| 296 | |
| 297 | |
| 298 | // ----------------------------------------------------------------------------- |
| 299 | // LargeObjectChunk |
| 300 | |
| 301 | HeapObject* LargeObjectChunk::GetObject() { |
| 302 | // Round the chunk address up to the nearest page-aligned address |
| 303 | // and return the heap object in that page. |
| 304 | Page* page = Page::FromAddress(RoundUp(address(), Page::kPageSize)); |
| 305 | return HeapObject::FromAddress(page->ObjectAreaStart()); |
| 306 | } |
| 307 | |
| 308 | |
| 309 | // ----------------------------------------------------------------------------- |
| 310 | // LargeObjectSpace |
| 311 | |
| 312 | int LargeObjectSpace::ExtraRSetBytesFor(int object_size) { |
| 313 | int extra_rset_bits = |
| 314 | RoundUp((object_size - Page::kObjectAreaSize) / kPointerSize, |
| 315 | kBitsPerInt); |
| 316 | return extra_rset_bits / kBitsPerByte; |
| 317 | } |
| 318 | |
| 319 | |
| 320 | Object* NewSpace::AllocateRawInternal(int size_in_bytes, |
| 321 | AllocationInfo* alloc_info) { |
| 322 | Address new_top = alloc_info->top + size_in_bytes; |
| 323 | if (new_top > alloc_info->limit) return Failure::RetryAfterGC(size_in_bytes); |
| 324 | |
| 325 | Object* obj = HeapObject::FromAddress(alloc_info->top); |
| 326 | alloc_info->top = new_top; |
| 327 | #ifdef DEBUG |
| 328 | SemiSpace* space = |
| 329 | (alloc_info == &allocation_info_) ? &to_space_ : &from_space_; |
| 330 | ASSERT(space->low() <= alloc_info->top |
| 331 | && alloc_info->top <= space->high() |
| 332 | && alloc_info->limit == space->high()); |
| 333 | #endif |
| 334 | return obj; |
| 335 | } |
| 336 | |
Steve Block | 3ce2e20 | 2009-11-05 08:53:23 +0000 | [diff] [blame] | 337 | |
| 338 | bool FreeListNode::IsFreeListNode(HeapObject* object) { |
| 339 | return object->map() == Heap::raw_unchecked_byte_array_map() |
| 340 | || object->map() == Heap::raw_unchecked_one_pointer_filler_map() |
| 341 | || object->map() == Heap::raw_unchecked_two_pointer_filler_map(); |
| 342 | } |
| 343 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 344 | } } // namespace v8::internal |
| 345 | |
| 346 | #endif // V8_SPACES_INL_H_ |