Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 1 | // Copyright 2006-2008 the V8 project authors. All rights reserved. |
| 2 | // Redistribution and use in source and binary forms, with or without |
| 3 | // modification, are permitted provided that the following conditions are |
| 4 | // met: |
| 5 | // |
| 6 | // * Redistributions of source code must retain the above copyright |
| 7 | // notice, this list of conditions and the following disclaimer. |
| 8 | // * Redistributions in binary form must reproduce the above |
| 9 | // copyright notice, this list of conditions and the following |
| 10 | // disclaimer in the documentation and/or other materials provided |
| 11 | // with the distribution. |
| 12 | // * Neither the name of Google Inc. nor the names of its |
| 13 | // contributors may be used to endorse or promote products derived |
| 14 | // from this software without specific prior written permission. |
| 15 | // |
| 16 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | |
| 28 | #ifndef V8_SPACES_INL_H_ |
| 29 | #define V8_SPACES_INL_H_ |
| 30 | |
| 31 | #include "memory.h" |
| 32 | #include "spaces.h" |
| 33 | |
| 34 | namespace v8 { |
| 35 | namespace internal { |
| 36 | |
| 37 | |
| 38 | // ----------------------------------------------------------------------------- |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 39 | // PageIterator |
| 40 | |
| 41 | bool PageIterator::has_next() { |
| 42 | return prev_page_ != stop_page_; |
| 43 | } |
| 44 | |
| 45 | |
| 46 | Page* PageIterator::next() { |
| 47 | ASSERT(has_next()); |
| 48 | prev_page_ = (prev_page_ == NULL) |
| 49 | ? space_->first_page_ |
| 50 | : prev_page_->next_page(); |
| 51 | return prev_page_; |
| 52 | } |
| 53 | |
| 54 | |
| 55 | // ----------------------------------------------------------------------------- |
| 56 | // Page |
| 57 | |
| 58 | Page* Page::next_page() { |
| 59 | return MemoryAllocator::GetNextPage(this); |
| 60 | } |
| 61 | |
| 62 | |
| 63 | Address Page::AllocationTop() { |
| 64 | PagedSpace* owner = MemoryAllocator::PageOwner(this); |
| 65 | return owner->PageAllocationTop(this); |
| 66 | } |
| 67 | |
| 68 | |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 69 | Address Page::AllocationWatermark() { |
| 70 | PagedSpace* owner = MemoryAllocator::PageOwner(this); |
| 71 | if (this == owner->AllocationTopPage()) { |
| 72 | return owner->top(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 73 | } |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 74 | return address() + AllocationWatermarkOffset(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 75 | } |
| 76 | |
| 77 | |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 78 | uint32_t Page::AllocationWatermarkOffset() { |
| 79 | return static_cast<uint32_t>((flags_ & kAllocationWatermarkOffsetMask) >> |
| 80 | kAllocationWatermarkOffsetShift); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 81 | } |
| 82 | |
| 83 | |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 84 | void Page::SetAllocationWatermark(Address allocation_watermark) { |
| 85 | if ((Heap::gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) { |
| 86 | // When iterating intergenerational references during scavenge |
| 87 | // we might decide to promote an encountered young object. |
| 88 | // We will allocate a space for such an object and put it |
| 89 | // into the promotion queue to process it later. |
| 90 | // If space for object was allocated somewhere beyond allocation |
| 91 | // watermark this might cause garbage pointers to appear under allocation |
| 92 | // watermark. To avoid visiting them during dirty regions iteration |
| 93 | // which might be still in progress we store a valid allocation watermark |
| 94 | // value and mark this page as having an invalid watermark. |
| 95 | SetCachedAllocationWatermark(AllocationWatermark()); |
| 96 | InvalidateWatermark(true); |
| 97 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 98 | |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 99 | flags_ = (flags_ & kFlagsMask) | |
| 100 | Offset(allocation_watermark) << kAllocationWatermarkOffsetShift; |
| 101 | ASSERT(AllocationWatermarkOffset() |
| 102 | == static_cast<uint32_t>(Offset(allocation_watermark))); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 103 | } |
| 104 | |
| 105 | |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 106 | void Page::SetCachedAllocationWatermark(Address allocation_watermark) { |
| 107 | mc_first_forwarded = allocation_watermark; |
| 108 | } |
| 109 | |
| 110 | |
| 111 | Address Page::CachedAllocationWatermark() { |
| 112 | return mc_first_forwarded; |
| 113 | } |
| 114 | |
| 115 | |
| 116 | uint32_t Page::GetRegionMarks() { |
| 117 | return dirty_regions_; |
| 118 | } |
| 119 | |
| 120 | |
| 121 | void Page::SetRegionMarks(uint32_t marks) { |
| 122 | dirty_regions_ = marks; |
| 123 | } |
| 124 | |
| 125 | |
| 126 | int Page::GetRegionNumberForAddress(Address addr) { |
| 127 | // Each page is divided into 256 byte regions. Each region has a corresponding |
| 128 | // dirty mark bit in the page header. Region can contain intergenerational |
| 129 | // references iff its dirty mark is set. |
| 130 | // A normal 8K page contains exactly 32 regions so all region marks fit |
| 131 | // into 32-bit integer field. To calculate a region number we just divide |
| 132 | // offset inside page by region size. |
| 133 | // A large page can contain more then 32 regions. But we want to avoid |
| 134 | // additional write barrier code for distinguishing between large and normal |
| 135 | // pages so we just ignore the fact that addr points into a large page and |
| 136 | // calculate region number as if addr pointed into a normal 8K page. This way |
| 137 | // we get a region number modulo 32 so for large pages several regions might |
| 138 | // be mapped to a single dirty mark. |
| 139 | ASSERT_PAGE_ALIGNED(this->address()); |
| 140 | STATIC_ASSERT((kPageAlignmentMask >> kRegionSizeLog2) < kBitsPerInt); |
| 141 | |
| 142 | // We are using masking with kPageAlignmentMask instead of Page::Offset() |
| 143 | // to get an offset to the beginning of 8K page containing addr not to the |
| 144 | // beginning of actual page which can be bigger then 8K. |
| 145 | intptr_t offset_inside_normal_page = OffsetFrom(addr) & kPageAlignmentMask; |
| 146 | return static_cast<int>(offset_inside_normal_page >> kRegionSizeLog2); |
| 147 | } |
| 148 | |
| 149 | |
| 150 | uint32_t Page::GetRegionMaskForAddress(Address addr) { |
| 151 | return 1 << GetRegionNumberForAddress(addr); |
| 152 | } |
| 153 | |
| 154 | |
| 155 | uint32_t Page::GetRegionMaskForSpan(Address start, int length_in_bytes) { |
| 156 | uint32_t result = 0; |
| 157 | if (length_in_bytes >= kPageSize) { |
| 158 | result = kAllRegionsDirtyMarks; |
| 159 | } else if (length_in_bytes > 0) { |
| 160 | int start_region = GetRegionNumberForAddress(start); |
| 161 | int end_region = |
| 162 | GetRegionNumberForAddress(start + length_in_bytes - kPointerSize); |
| 163 | uint32_t start_mask = (~0) << start_region; |
| 164 | uint32_t end_mask = ~((~1) << end_region); |
| 165 | result = start_mask & end_mask; |
| 166 | // if end_region < start_region, the mask is ored. |
| 167 | if (result == 0) result = start_mask | end_mask; |
| 168 | } |
| 169 | #ifdef DEBUG |
| 170 | if (FLAG_enable_slow_asserts) { |
| 171 | uint32_t expected = 0; |
| 172 | for (Address a = start; a < start + length_in_bytes; a += kPointerSize) { |
| 173 | expected |= GetRegionMaskForAddress(a); |
| 174 | } |
| 175 | ASSERT(expected == result); |
| 176 | } |
| 177 | #endif |
| 178 | return result; |
| 179 | } |
| 180 | |
| 181 | |
| 182 | void Page::MarkRegionDirty(Address address) { |
| 183 | SetRegionMarks(GetRegionMarks() | GetRegionMaskForAddress(address)); |
| 184 | } |
| 185 | |
| 186 | |
| 187 | bool Page::IsRegionDirty(Address address) { |
| 188 | return GetRegionMarks() & GetRegionMaskForAddress(address); |
| 189 | } |
| 190 | |
| 191 | |
| 192 | void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) { |
| 193 | int rstart = GetRegionNumberForAddress(start); |
| 194 | int rend = GetRegionNumberForAddress(end); |
| 195 | |
| 196 | if (reaches_limit) { |
| 197 | end += 1; |
| 198 | } |
| 199 | |
| 200 | if ((rend - rstart) == 0) { |
| 201 | return; |
| 202 | } |
| 203 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 204 | uint32_t bitmask = 0; |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 205 | |
| 206 | if ((OffsetFrom(start) & kRegionAlignmentMask) == 0 |
| 207 | || (start == ObjectAreaStart())) { |
| 208 | // First region is fully covered |
| 209 | bitmask = 1 << rstart; |
| 210 | } |
| 211 | |
| 212 | while (++rstart < rend) { |
| 213 | bitmask |= 1 << rstart; |
| 214 | } |
| 215 | |
| 216 | if (bitmask) { |
| 217 | SetRegionMarks(GetRegionMarks() & ~bitmask); |
| 218 | } |
| 219 | } |
| 220 | |
| 221 | |
| 222 | void Page::FlipMeaningOfInvalidatedWatermarkFlag() { |
Steve Block | 791712a | 2010-08-27 10:21:07 +0100 | [diff] [blame] | 223 | watermark_invalidated_mark_ ^= 1 << WATERMARK_INVALIDATED; |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 224 | } |
| 225 | |
| 226 | |
| 227 | bool Page::IsWatermarkValid() { |
Steve Block | 791712a | 2010-08-27 10:21:07 +0100 | [diff] [blame] | 228 | return (flags_ & (1 << WATERMARK_INVALIDATED)) != watermark_invalidated_mark_; |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 229 | } |
| 230 | |
| 231 | |
| 232 | void Page::InvalidateWatermark(bool value) { |
| 233 | if (value) { |
Steve Block | 791712a | 2010-08-27 10:21:07 +0100 | [diff] [blame] | 234 | flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) | |
| 235 | watermark_invalidated_mark_; |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 236 | } else { |
Steve Block | 791712a | 2010-08-27 10:21:07 +0100 | [diff] [blame] | 237 | flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) | |
| 238 | (watermark_invalidated_mark_ ^ (1 << WATERMARK_INVALIDATED)); |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 239 | } |
| 240 | |
| 241 | ASSERT(IsWatermarkValid() == !value); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 242 | } |
| 243 | |
| 244 | |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 245 | bool Page::GetPageFlag(PageFlag flag) { |
Steve Block | 791712a | 2010-08-27 10:21:07 +0100 | [diff] [blame] | 246 | return (flags_ & static_cast<intptr_t>(1 << flag)) != 0; |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 247 | } |
| 248 | |
| 249 | |
| 250 | void Page::SetPageFlag(PageFlag flag, bool value) { |
| 251 | if (value) { |
Steve Block | 791712a | 2010-08-27 10:21:07 +0100 | [diff] [blame] | 252 | flags_ |= static_cast<intptr_t>(1 << flag); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 253 | } else { |
Steve Block | 791712a | 2010-08-27 10:21:07 +0100 | [diff] [blame] | 254 | flags_ &= ~static_cast<intptr_t>(1 << flag); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 255 | } |
| 256 | } |
| 257 | |
| 258 | |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 259 | void Page::ClearPageFlags() { |
| 260 | flags_ = 0; |
| 261 | } |
| 262 | |
| 263 | |
| 264 | void Page::ClearGCFields() { |
| 265 | InvalidateWatermark(true); |
| 266 | SetAllocationWatermark(ObjectAreaStart()); |
| 267 | if (Heap::gc_state() == Heap::SCAVENGE) { |
| 268 | SetCachedAllocationWatermark(ObjectAreaStart()); |
| 269 | } |
| 270 | SetRegionMarks(kAllRegionsCleanMarks); |
| 271 | } |
| 272 | |
| 273 | |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 274 | bool Page::WasInUseBeforeMC() { |
| 275 | return GetPageFlag(WAS_IN_USE_BEFORE_MC); |
| 276 | } |
| 277 | |
| 278 | |
| 279 | void Page::SetWasInUseBeforeMC(bool was_in_use) { |
| 280 | SetPageFlag(WAS_IN_USE_BEFORE_MC, was_in_use); |
| 281 | } |
| 282 | |
| 283 | |
| 284 | bool Page::IsLargeObjectPage() { |
| 285 | return !GetPageFlag(IS_NORMAL_PAGE); |
| 286 | } |
| 287 | |
| 288 | |
| 289 | void Page::SetIsLargeObjectPage(bool is_large_object_page) { |
| 290 | SetPageFlag(IS_NORMAL_PAGE, !is_large_object_page); |
| 291 | } |
| 292 | |
Steve Block | 791712a | 2010-08-27 10:21:07 +0100 | [diff] [blame] | 293 | bool Page::IsPageExecutable() { |
| 294 | return GetPageFlag(IS_EXECUTABLE); |
| 295 | } |
| 296 | |
| 297 | |
| 298 | void Page::SetIsPageExecutable(bool is_page_executable) { |
| 299 | SetPageFlag(IS_EXECUTABLE, is_page_executable); |
| 300 | } |
| 301 | |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 302 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 303 | // ----------------------------------------------------------------------------- |
| 304 | // MemoryAllocator |
| 305 | |
Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame^] | 306 | void MemoryAllocator::ChunkInfo::init(Address a, size_t s, PagedSpace* o) { |
| 307 | address_ = a; |
| 308 | size_ = s; |
| 309 | owner_ = o; |
| 310 | executable_ = (o == NULL) ? NOT_EXECUTABLE : o->executable(); |
| 311 | } |
| 312 | |
| 313 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 314 | bool MemoryAllocator::IsValidChunk(int chunk_id) { |
| 315 | if (!IsValidChunkId(chunk_id)) return false; |
| 316 | |
| 317 | ChunkInfo& c = chunks_[chunk_id]; |
| 318 | return (c.address() != NULL) && (c.size() != 0) && (c.owner() != NULL); |
| 319 | } |
| 320 | |
| 321 | |
| 322 | bool MemoryAllocator::IsValidChunkId(int chunk_id) { |
| 323 | return (0 <= chunk_id) && (chunk_id < max_nof_chunks_); |
| 324 | } |
| 325 | |
| 326 | |
| 327 | bool MemoryAllocator::IsPageInSpace(Page* p, PagedSpace* space) { |
| 328 | ASSERT(p->is_valid()); |
| 329 | |
| 330 | int chunk_id = GetChunkId(p); |
| 331 | if (!IsValidChunkId(chunk_id)) return false; |
| 332 | |
| 333 | ChunkInfo& c = chunks_[chunk_id]; |
| 334 | return (c.address() <= p->address()) && |
| 335 | (p->address() < c.address() + c.size()) && |
| 336 | (space == c.owner()); |
| 337 | } |
| 338 | |
| 339 | |
| 340 | Page* MemoryAllocator::GetNextPage(Page* p) { |
| 341 | ASSERT(p->is_valid()); |
| 342 | intptr_t raw_addr = p->opaque_header & ~Page::kPageAlignmentMask; |
| 343 | return Page::FromAddress(AddressFrom<Address>(raw_addr)); |
| 344 | } |
| 345 | |
| 346 | |
| 347 | int MemoryAllocator::GetChunkId(Page* p) { |
| 348 | ASSERT(p->is_valid()); |
Andrei Popescu | 402d937 | 2010-02-26 13:31:12 +0000 | [diff] [blame] | 349 | return static_cast<int>(p->opaque_header & Page::kPageAlignmentMask); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 350 | } |
| 351 | |
| 352 | |
| 353 | void MemoryAllocator::SetNextPage(Page* prev, Page* next) { |
| 354 | ASSERT(prev->is_valid()); |
| 355 | int chunk_id = GetChunkId(prev); |
| 356 | ASSERT_PAGE_ALIGNED(next->address()); |
| 357 | prev->opaque_header = OffsetFrom(next->address()) | chunk_id; |
| 358 | } |
| 359 | |
| 360 | |
| 361 | PagedSpace* MemoryAllocator::PageOwner(Page* page) { |
| 362 | int chunk_id = GetChunkId(page); |
| 363 | ASSERT(IsValidChunk(chunk_id)); |
| 364 | return chunks_[chunk_id].owner(); |
| 365 | } |
| 366 | |
| 367 | |
| 368 | bool MemoryAllocator::InInitialChunk(Address address) { |
| 369 | if (initial_chunk_ == NULL) return false; |
| 370 | |
| 371 | Address start = static_cast<Address>(initial_chunk_->address()); |
| 372 | return (start <= address) && (address < start + initial_chunk_->size()); |
| 373 | } |
| 374 | |
| 375 | |
| 376 | #ifdef ENABLE_HEAP_PROTECTION |
| 377 | |
| 378 | void MemoryAllocator::Protect(Address start, size_t size) { |
| 379 | OS::Protect(start, size); |
| 380 | } |
| 381 | |
| 382 | |
| 383 | void MemoryAllocator::Unprotect(Address start, |
| 384 | size_t size, |
| 385 | Executability executable) { |
| 386 | OS::Unprotect(start, size, executable); |
| 387 | } |
| 388 | |
| 389 | |
| 390 | void MemoryAllocator::ProtectChunkFromPage(Page* page) { |
| 391 | int id = GetChunkId(page); |
| 392 | OS::Protect(chunks_[id].address(), chunks_[id].size()); |
| 393 | } |
| 394 | |
| 395 | |
| 396 | void MemoryAllocator::UnprotectChunkFromPage(Page* page) { |
| 397 | int id = GetChunkId(page); |
| 398 | OS::Unprotect(chunks_[id].address(), chunks_[id].size(), |
| 399 | chunks_[id].owner()->executable() == EXECUTABLE); |
| 400 | } |
| 401 | |
| 402 | #endif |
| 403 | |
| 404 | |
| 405 | // -------------------------------------------------------------------------- |
| 406 | // PagedSpace |
| 407 | |
| 408 | bool PagedSpace::Contains(Address addr) { |
| 409 | Page* p = Page::FromAddress(addr); |
| 410 | ASSERT(p->is_valid()); |
| 411 | |
| 412 | return MemoryAllocator::IsPageInSpace(p, this); |
| 413 | } |
| 414 | |
| 415 | |
| 416 | // Try linear allocation in the page of alloc_info's allocation top. Does |
| 417 | // not contain slow case logic (eg, move to the next page or try free list |
| 418 | // allocation) so it can be used by all the allocation functions and for all |
| 419 | // the paged spaces. |
| 420 | HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info, |
| 421 | int size_in_bytes) { |
| 422 | Address current_top = alloc_info->top; |
| 423 | Address new_top = current_top + size_in_bytes; |
| 424 | if (new_top > alloc_info->limit) return NULL; |
| 425 | |
| 426 | alloc_info->top = new_top; |
| 427 | ASSERT(alloc_info->VerifyPagedAllocation()); |
| 428 | accounting_stats_.AllocateBytes(size_in_bytes); |
| 429 | return HeapObject::FromAddress(current_top); |
| 430 | } |
| 431 | |
| 432 | |
| 433 | // Raw allocation. |
| 434 | Object* PagedSpace::AllocateRaw(int size_in_bytes) { |
| 435 | ASSERT(HasBeenSetup()); |
| 436 | ASSERT_OBJECT_SIZE(size_in_bytes); |
| 437 | HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes); |
| 438 | if (object != NULL) return object; |
| 439 | |
| 440 | object = SlowAllocateRaw(size_in_bytes); |
| 441 | if (object != NULL) return object; |
| 442 | |
| 443 | return Failure::RetryAfterGC(size_in_bytes, identity()); |
| 444 | } |
| 445 | |
| 446 | |
| 447 | // Reallocating (and promoting) objects during a compacting collection. |
| 448 | Object* PagedSpace::MCAllocateRaw(int size_in_bytes) { |
| 449 | ASSERT(HasBeenSetup()); |
| 450 | ASSERT_OBJECT_SIZE(size_in_bytes); |
| 451 | HeapObject* object = AllocateLinearly(&mc_forwarding_info_, size_in_bytes); |
| 452 | if (object != NULL) return object; |
| 453 | |
| 454 | object = SlowMCAllocateRaw(size_in_bytes); |
| 455 | if (object != NULL) return object; |
| 456 | |
| 457 | return Failure::RetryAfterGC(size_in_bytes, identity()); |
| 458 | } |
| 459 | |
| 460 | |
| 461 | // ----------------------------------------------------------------------------- |
| 462 | // LargeObjectChunk |
| 463 | |
| 464 | HeapObject* LargeObjectChunk::GetObject() { |
| 465 | // Round the chunk address up to the nearest page-aligned address |
| 466 | // and return the heap object in that page. |
| 467 | Page* page = Page::FromAddress(RoundUp(address(), Page::kPageSize)); |
| 468 | return HeapObject::FromAddress(page->ObjectAreaStart()); |
| 469 | } |
| 470 | |
| 471 | |
| 472 | // ----------------------------------------------------------------------------- |
| 473 | // LargeObjectSpace |
| 474 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 475 | Object* NewSpace::AllocateRawInternal(int size_in_bytes, |
| 476 | AllocationInfo* alloc_info) { |
| 477 | Address new_top = alloc_info->top + size_in_bytes; |
| 478 | if (new_top > alloc_info->limit) return Failure::RetryAfterGC(size_in_bytes); |
| 479 | |
| 480 | Object* obj = HeapObject::FromAddress(alloc_info->top); |
| 481 | alloc_info->top = new_top; |
| 482 | #ifdef DEBUG |
| 483 | SemiSpace* space = |
| 484 | (alloc_info == &allocation_info_) ? &to_space_ : &from_space_; |
| 485 | ASSERT(space->low() <= alloc_info->top |
| 486 | && alloc_info->top <= space->high() |
| 487 | && alloc_info->limit == space->high()); |
| 488 | #endif |
| 489 | return obj; |
| 490 | } |
| 491 | |
Steve Block | 3ce2e20 | 2009-11-05 08:53:23 +0000 | [diff] [blame] | 492 | |
| 493 | bool FreeListNode::IsFreeListNode(HeapObject* object) { |
| 494 | return object->map() == Heap::raw_unchecked_byte_array_map() |
| 495 | || object->map() == Heap::raw_unchecked_one_pointer_filler_map() |
| 496 | || object->map() == Heap::raw_unchecked_two_pointer_filler_map(); |
| 497 | } |
| 498 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 499 | } } // namespace v8::internal |
| 500 | |
| 501 | #endif // V8_SPACES_INL_H_ |