Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame^] | 1 | // Copyright 2006-2010 the V8 project authors. All rights reserved. |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 2 | // Redistribution and use in source and binary forms, with or without |
| 3 | // modification, are permitted provided that the following conditions are |
| 4 | // met: |
| 5 | // |
| 6 | // * Redistributions of source code must retain the above copyright |
| 7 | // notice, this list of conditions and the following disclaimer. |
| 8 | // * Redistributions in binary form must reproduce the above |
| 9 | // copyright notice, this list of conditions and the following |
| 10 | // disclaimer in the documentation and/or other materials provided |
| 11 | // with the distribution. |
| 12 | // * Neither the name of Google Inc. nor the names of its |
| 13 | // contributors may be used to endorse or promote products derived |
| 14 | // from this software without specific prior written permission. |
| 15 | // |
| 16 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | |
| 28 | #ifndef V8_SPACES_INL_H_ |
| 29 | #define V8_SPACES_INL_H_ |
| 30 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 31 | #include "isolate.h" |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 32 | #include "spaces.h" |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 33 | #include "v8memory.h" |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 34 | |
| 35 | namespace v8 { |
| 36 | namespace internal { |
| 37 | |
| 38 | |
| 39 | // ----------------------------------------------------------------------------- |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 40 | // PageIterator |
| 41 | |
| 42 | bool PageIterator::has_next() { |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame^] | 43 | return prev_page_ != stop_page_; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 44 | } |
| 45 | |
| 46 | |
| 47 | Page* PageIterator::next() { |
| 48 | ASSERT(has_next()); |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame^] | 49 | prev_page_ = (prev_page_ == NULL) |
| 50 | ? space_->first_page_ |
| 51 | : prev_page_->next_page(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 52 | return prev_page_; |
| 53 | } |
| 54 | |
| 55 | |
| 56 | // ----------------------------------------------------------------------------- |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame^] | 57 | // Page |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 58 | |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame^] | 59 | Page* Page::next_page() { |
| 60 | return heap_->isolate()->memory_allocator()->GetNextPage(this); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 61 | } |
| 62 | |
| 63 | |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame^] | 64 | Address Page::AllocationTop() { |
| 65 | PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this); |
| 66 | return owner->PageAllocationTop(this); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 67 | } |
| 68 | |
| 69 | |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame^] | 70 | Address Page::AllocationWatermark() { |
| 71 | PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this); |
| 72 | if (this == owner->AllocationTopPage()) { |
| 73 | return owner->top(); |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 74 | } |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame^] | 75 | return address() + AllocationWatermarkOffset(); |
| 76 | } |
| 77 | |
| 78 | |
| 79 | uint32_t Page::AllocationWatermarkOffset() { |
| 80 | return static_cast<uint32_t>((flags_ & kAllocationWatermarkOffsetMask) >> |
| 81 | kAllocationWatermarkOffsetShift); |
| 82 | } |
| 83 | |
| 84 | |
| 85 | void Page::SetAllocationWatermark(Address allocation_watermark) { |
| 86 | if ((heap_->gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) { |
| 87 | // When iterating intergenerational references during scavenge |
| 88 | // we might decide to promote an encountered young object. |
| 89 | // We will allocate a space for such an object and put it |
| 90 | // into the promotion queue to process it later. |
| 91 | // If space for object was allocated somewhere beyond allocation |
| 92 | // watermark this might cause garbage pointers to appear under allocation |
| 93 | // watermark. To avoid visiting them during dirty regions iteration |
| 94 | // which might be still in progress we store a valid allocation watermark |
| 95 | // value and mark this page as having an invalid watermark. |
| 96 | SetCachedAllocationWatermark(AllocationWatermark()); |
| 97 | InvalidateWatermark(true); |
| 98 | } |
| 99 | |
| 100 | flags_ = (flags_ & kFlagsMask) | |
| 101 | Offset(allocation_watermark) << kAllocationWatermarkOffsetShift; |
| 102 | ASSERT(AllocationWatermarkOffset() |
| 103 | == static_cast<uint32_t>(Offset(allocation_watermark))); |
| 104 | } |
| 105 | |
| 106 | |
| 107 | void Page::SetCachedAllocationWatermark(Address allocation_watermark) { |
| 108 | mc_first_forwarded = allocation_watermark; |
| 109 | } |
| 110 | |
| 111 | |
| 112 | Address Page::CachedAllocationWatermark() { |
| 113 | return mc_first_forwarded; |
| 114 | } |
| 115 | |
| 116 | |
| 117 | uint32_t Page::GetRegionMarks() { |
| 118 | return dirty_regions_; |
| 119 | } |
| 120 | |
| 121 | |
| 122 | void Page::SetRegionMarks(uint32_t marks) { |
| 123 | dirty_regions_ = marks; |
| 124 | } |
| 125 | |
| 126 | |
| 127 | int Page::GetRegionNumberForAddress(Address addr) { |
| 128 | // Each page is divided into 256 byte regions. Each region has a corresponding |
| 129 | // dirty mark bit in the page header. Region can contain intergenerational |
| 130 | // references iff its dirty mark is set. |
| 131 | // A normal 8K page contains exactly 32 regions so all region marks fit |
| 132 | // into 32-bit integer field. To calculate a region number we just divide |
| 133 | // offset inside page by region size. |
| 134 | // A large page can contain more then 32 regions. But we want to avoid |
| 135 | // additional write barrier code for distinguishing between large and normal |
| 136 | // pages so we just ignore the fact that addr points into a large page and |
| 137 | // calculate region number as if addr pointed into a normal 8K page. This way |
| 138 | // we get a region number modulo 32 so for large pages several regions might |
| 139 | // be mapped to a single dirty mark. |
| 140 | ASSERT_PAGE_ALIGNED(this->address()); |
| 141 | STATIC_ASSERT((kPageAlignmentMask >> kRegionSizeLog2) < kBitsPerInt); |
| 142 | |
| 143 | // We are using masking with kPageAlignmentMask instead of Page::Offset() |
| 144 | // to get an offset to the beginning of 8K page containing addr not to the |
| 145 | // beginning of actual page which can be bigger then 8K. |
| 146 | intptr_t offset_inside_normal_page = OffsetFrom(addr) & kPageAlignmentMask; |
| 147 | return static_cast<int>(offset_inside_normal_page >> kRegionSizeLog2); |
| 148 | } |
| 149 | |
| 150 | |
| 151 | uint32_t Page::GetRegionMaskForAddress(Address addr) { |
| 152 | return 1 << GetRegionNumberForAddress(addr); |
| 153 | } |
| 154 | |
| 155 | |
| 156 | uint32_t Page::GetRegionMaskForSpan(Address start, int length_in_bytes) { |
| 157 | uint32_t result = 0; |
| 158 | static const intptr_t kRegionMask = (1 << kRegionSizeLog2) - 1; |
| 159 | if (length_in_bytes + (OffsetFrom(start) & kRegionMask) >= kPageSize) { |
| 160 | result = kAllRegionsDirtyMarks; |
| 161 | } else if (length_in_bytes > 0) { |
| 162 | int start_region = GetRegionNumberForAddress(start); |
| 163 | int end_region = |
| 164 | GetRegionNumberForAddress(start + length_in_bytes - kPointerSize); |
| 165 | uint32_t start_mask = (~0) << start_region; |
| 166 | uint32_t end_mask = ~((~1) << end_region); |
| 167 | result = start_mask & end_mask; |
| 168 | // if end_region < start_region, the mask is ored. |
| 169 | if (result == 0) result = start_mask | end_mask; |
| 170 | } |
| 171 | #ifdef DEBUG |
| 172 | if (FLAG_enable_slow_asserts) { |
| 173 | uint32_t expected = 0; |
| 174 | for (Address a = start; a < start + length_in_bytes; a += kPointerSize) { |
| 175 | expected |= GetRegionMaskForAddress(a); |
| 176 | } |
| 177 | ASSERT(expected == result); |
| 178 | } |
| 179 | #endif |
| 180 | return result; |
| 181 | } |
| 182 | |
| 183 | |
| 184 | void Page::MarkRegionDirty(Address address) { |
| 185 | SetRegionMarks(GetRegionMarks() | GetRegionMaskForAddress(address)); |
| 186 | } |
| 187 | |
| 188 | |
| 189 | bool Page::IsRegionDirty(Address address) { |
| 190 | return GetRegionMarks() & GetRegionMaskForAddress(address); |
| 191 | } |
| 192 | |
| 193 | |
| 194 | void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) { |
| 195 | int rstart = GetRegionNumberForAddress(start); |
| 196 | int rend = GetRegionNumberForAddress(end); |
| 197 | |
| 198 | if (reaches_limit) { |
| 199 | end += 1; |
| 200 | } |
| 201 | |
| 202 | if ((rend - rstart) == 0) { |
| 203 | return; |
| 204 | } |
| 205 | |
| 206 | uint32_t bitmask = 0; |
| 207 | |
| 208 | if ((OffsetFrom(start) & kRegionAlignmentMask) == 0 |
| 209 | || (start == ObjectAreaStart())) { |
| 210 | // First region is fully covered |
| 211 | bitmask = 1 << rstart; |
| 212 | } |
| 213 | |
| 214 | while (++rstart < rend) { |
| 215 | bitmask |= 1 << rstart; |
| 216 | } |
| 217 | |
| 218 | if (bitmask) { |
| 219 | SetRegionMarks(GetRegionMarks() & ~bitmask); |
| 220 | } |
| 221 | } |
| 222 | |
| 223 | |
| 224 | void Page::FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap) { |
| 225 | heap->page_watermark_invalidated_mark_ ^= 1 << WATERMARK_INVALIDATED; |
| 226 | } |
| 227 | |
| 228 | |
| 229 | bool Page::IsWatermarkValid() { |
| 230 | return (flags_ & (1 << WATERMARK_INVALIDATED)) != |
| 231 | heap_->page_watermark_invalidated_mark_; |
| 232 | } |
| 233 | |
| 234 | |
| 235 | void Page::InvalidateWatermark(bool value) { |
| 236 | if (value) { |
| 237 | flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) | |
| 238 | heap_->page_watermark_invalidated_mark_; |
| 239 | } else { |
| 240 | flags_ = |
| 241 | (flags_ & ~(1 << WATERMARK_INVALIDATED)) | |
| 242 | (heap_->page_watermark_invalidated_mark_ ^ |
| 243 | (1 << WATERMARK_INVALIDATED)); |
| 244 | } |
| 245 | |
| 246 | ASSERT(IsWatermarkValid() == !value); |
| 247 | } |
| 248 | |
| 249 | |
| 250 | bool Page::GetPageFlag(PageFlag flag) { |
| 251 | return (flags_ & static_cast<intptr_t>(1 << flag)) != 0; |
| 252 | } |
| 253 | |
| 254 | |
| 255 | void Page::SetPageFlag(PageFlag flag, bool value) { |
| 256 | if (value) { |
| 257 | flags_ |= static_cast<intptr_t>(1 << flag); |
| 258 | } else { |
| 259 | flags_ &= ~static_cast<intptr_t>(1 << flag); |
| 260 | } |
| 261 | } |
| 262 | |
| 263 | |
| 264 | void Page::ClearPageFlags() { |
| 265 | flags_ = 0; |
| 266 | } |
| 267 | |
| 268 | |
| 269 | void Page::ClearGCFields() { |
| 270 | InvalidateWatermark(true); |
| 271 | SetAllocationWatermark(ObjectAreaStart()); |
| 272 | if (heap_->gc_state() == Heap::SCAVENGE) { |
| 273 | SetCachedAllocationWatermark(ObjectAreaStart()); |
| 274 | } |
| 275 | SetRegionMarks(kAllRegionsCleanMarks); |
| 276 | } |
| 277 | |
| 278 | |
| 279 | bool Page::WasInUseBeforeMC() { |
| 280 | return GetPageFlag(WAS_IN_USE_BEFORE_MC); |
| 281 | } |
| 282 | |
| 283 | |
| 284 | void Page::SetWasInUseBeforeMC(bool was_in_use) { |
| 285 | SetPageFlag(WAS_IN_USE_BEFORE_MC, was_in_use); |
| 286 | } |
| 287 | |
| 288 | |
| 289 | bool Page::IsLargeObjectPage() { |
| 290 | return !GetPageFlag(IS_NORMAL_PAGE); |
| 291 | } |
| 292 | |
| 293 | |
| 294 | void Page::SetIsLargeObjectPage(bool is_large_object_page) { |
| 295 | SetPageFlag(IS_NORMAL_PAGE, !is_large_object_page); |
| 296 | } |
| 297 | |
| 298 | Executability Page::PageExecutability() { |
| 299 | return GetPageFlag(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; |
| 300 | } |
| 301 | |
| 302 | |
| 303 | void Page::SetPageExecutability(Executability executable) { |
| 304 | SetPageFlag(IS_EXECUTABLE, executable == EXECUTABLE); |
Steve Block | 791712a | 2010-08-27 10:21:07 +0100 | [diff] [blame] | 305 | } |
| 306 | |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 307 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 308 | // ----------------------------------------------------------------------------- |
| 309 | // MemoryAllocator |
| 310 | |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame^] | 311 | void MemoryAllocator::ChunkInfo::init(Address a, size_t s, PagedSpace* o) { |
| 312 | address_ = a; |
| 313 | size_ = s; |
| 314 | owner_ = o; |
| 315 | executable_ = (o == NULL) ? NOT_EXECUTABLE : o->executable(); |
| 316 | owner_identity_ = (o == NULL) ? FIRST_SPACE : o->identity(); |
Iain Merrick | 9ac36c9 | 2010-09-13 15:29:50 +0100 | [diff] [blame] | 317 | } |
| 318 | |
| 319 | |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame^] | 320 | bool MemoryAllocator::IsValidChunk(int chunk_id) { |
| 321 | if (!IsValidChunkId(chunk_id)) return false; |
| 322 | |
| 323 | ChunkInfo& c = chunks_[chunk_id]; |
| 324 | return (c.address() != NULL) && (c.size() != 0) && (c.owner() != NULL); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 325 | } |
| 326 | |
| 327 | |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame^] | 328 | bool MemoryAllocator::IsValidChunkId(int chunk_id) { |
| 329 | return (0 <= chunk_id) && (chunk_id < max_nof_chunks_); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 330 | } |
| 331 | |
| 332 | |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame^] | 333 | bool MemoryAllocator::IsPageInSpace(Page* p, PagedSpace* space) { |
| 334 | ASSERT(p->is_valid()); |
| 335 | |
| 336 | int chunk_id = GetChunkId(p); |
| 337 | if (!IsValidChunkId(chunk_id)) return false; |
| 338 | |
| 339 | ChunkInfo& c = chunks_[chunk_id]; |
| 340 | return (c.address() <= p->address()) && |
| 341 | (p->address() < c.address() + c.size()) && |
| 342 | (space == c.owner()); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 343 | } |
| 344 | |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame^] | 345 | |
| 346 | Page* MemoryAllocator::GetNextPage(Page* p) { |
| 347 | ASSERT(p->is_valid()); |
| 348 | intptr_t raw_addr = p->opaque_header & ~Page::kPageAlignmentMask; |
| 349 | return Page::FromAddress(AddressFrom<Address>(raw_addr)); |
| 350 | } |
| 351 | |
| 352 | |
| 353 | int MemoryAllocator::GetChunkId(Page* p) { |
| 354 | ASSERT(p->is_valid()); |
| 355 | return static_cast<int>(p->opaque_header & Page::kPageAlignmentMask); |
| 356 | } |
| 357 | |
| 358 | |
| 359 | void MemoryAllocator::SetNextPage(Page* prev, Page* next) { |
| 360 | ASSERT(prev->is_valid()); |
| 361 | int chunk_id = GetChunkId(prev); |
| 362 | ASSERT_PAGE_ALIGNED(next->address()); |
| 363 | prev->opaque_header = OffsetFrom(next->address()) | chunk_id; |
| 364 | } |
| 365 | |
| 366 | |
| 367 | PagedSpace* MemoryAllocator::PageOwner(Page* page) { |
| 368 | int chunk_id = GetChunkId(page); |
| 369 | ASSERT(IsValidChunk(chunk_id)); |
| 370 | return chunks_[chunk_id].owner(); |
| 371 | } |
| 372 | |
| 373 | |
| 374 | bool MemoryAllocator::InInitialChunk(Address address) { |
| 375 | if (initial_chunk_ == NULL) return false; |
| 376 | |
| 377 | Address start = static_cast<Address>(initial_chunk_->address()); |
| 378 | return (start <= address) && (address < start + initial_chunk_->size()); |
| 379 | } |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 380 | |
| 381 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 382 | // -------------------------------------------------------------------------- |
| 383 | // PagedSpace |
| 384 | |
| 385 | bool PagedSpace::Contains(Address addr) { |
| 386 | Page* p = Page::FromAddress(addr); |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 387 | if (!p->is_valid()) return false; |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame^] | 388 | return heap()->isolate()->memory_allocator()->IsPageInSpace(p, this); |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 389 | } |
| 390 | |
| 391 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 392 | // Try linear allocation in the page of alloc_info's allocation top. Does |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame^] | 393 | // not contain slow case logic (eg, move to the next page or try free list |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 394 | // allocation) so it can be used by all the allocation functions and for all |
| 395 | // the paged spaces. |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame^] | 396 | HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info, |
| 397 | int size_in_bytes) { |
| 398 | Address current_top = alloc_info->top; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 399 | Address new_top = current_top + size_in_bytes; |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame^] | 400 | if (new_top > alloc_info->limit) return NULL; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 401 | |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame^] | 402 | alloc_info->top = new_top; |
| 403 | ASSERT(alloc_info->VerifyPagedAllocation()); |
| 404 | accounting_stats_.AllocateBytes(size_in_bytes); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 405 | return HeapObject::FromAddress(current_top); |
| 406 | } |
| 407 | |
| 408 | |
| 409 | // Raw allocation. |
John Reck | 5913587 | 2010-11-02 12:39:01 -0700 | [diff] [blame] | 410 | MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) { |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame^] | 411 | ASSERT(HasBeenSetup()); |
| 412 | ASSERT_OBJECT_SIZE(size_in_bytes); |
| 413 | HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes); |
| 414 | if (object != NULL) return object; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 415 | |
| 416 | object = SlowAllocateRaw(size_in_bytes); |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame^] | 417 | if (object != NULL) return object; |
| 418 | |
| 419 | return Failure::RetryAfterGC(identity()); |
| 420 | } |
| 421 | |
| 422 | |
| 423 | // Reallocating (and promoting) objects during a compacting collection. |
| 424 | MaybeObject* PagedSpace::MCAllocateRaw(int size_in_bytes) { |
| 425 | ASSERT(HasBeenSetup()); |
| 426 | ASSERT_OBJECT_SIZE(size_in_bytes); |
| 427 | HeapObject* object = AllocateLinearly(&mc_forwarding_info_, size_in_bytes); |
| 428 | if (object != NULL) return object; |
| 429 | |
| 430 | object = SlowMCAllocateRaw(size_in_bytes); |
| 431 | if (object != NULL) return object; |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 432 | |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 433 | return Failure::RetryAfterGC(identity()); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 434 | } |
| 435 | |
| 436 | |
| 437 | // ----------------------------------------------------------------------------- |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 438 | // NewSpace |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 439 | |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame^] | 440 | MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes, |
| 441 | AllocationInfo* alloc_info) { |
| 442 | Address new_top = alloc_info->top + size_in_bytes; |
| 443 | if (new_top > alloc_info->limit) return Failure::RetryAfterGC(); |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 444 | |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame^] | 445 | Object* obj = HeapObject::FromAddress(alloc_info->top); |
| 446 | alloc_info->top = new_top; |
| 447 | #ifdef DEBUG |
| 448 | SemiSpace* space = |
| 449 | (alloc_info == &allocation_info_) ? &to_space_ : &from_space_; |
| 450 | ASSERT(space->low() <= alloc_info->top |
| 451 | && alloc_info->top <= space->high() |
| 452 | && alloc_info->limit == space->high()); |
| 453 | #endif |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 454 | return obj; |
| 455 | } |
| 456 | |
Steve Block | 3ce2e20 | 2009-11-05 08:53:23 +0000 | [diff] [blame] | 457 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 458 | intptr_t LargeObjectSpace::Available() { |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame^] | 459 | return LargeObjectChunk::ObjectSizeFor( |
| 460 | heap()->isolate()->memory_allocator()->Available()); |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 461 | } |
| 462 | |
| 463 | |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 464 | template <typename StringType> |
| 465 | void NewSpace::ShrinkStringAtAllocationBoundary(String* string, int length) { |
| 466 | ASSERT(length <= string->length()); |
| 467 | ASSERT(string->IsSeqString()); |
| 468 | ASSERT(string->address() + StringType::SizeFor(string->length()) == |
| 469 | allocation_info_.top); |
| 470 | allocation_info_.top = |
| 471 | string->address() + StringType::SizeFor(length); |
| 472 | string->set_length(length); |
| 473 | } |
| 474 | |
| 475 | |
Steve Block | 3ce2e20 | 2009-11-05 08:53:23 +0000 | [diff] [blame] | 476 | bool FreeListNode::IsFreeListNode(HeapObject* object) { |
Ben Murdoch | 85b7179 | 2012-04-11 18:30:58 +0100 | [diff] [blame^] | 477 | return object->map() == HEAP->raw_unchecked_byte_array_map() |
| 478 | || object->map() == HEAP->raw_unchecked_one_pointer_filler_map() |
| 479 | || object->map() == HEAP->raw_unchecked_two_pointer_filler_map(); |
Steve Block | 3ce2e20 | 2009-11-05 08:53:23 +0000 | [diff] [blame] | 480 | } |
| 481 | |
Steve Block | a7e24c1 | 2009-10-30 11:49:00 +0000 | [diff] [blame] | 482 | } } // namespace v8::internal |
| 483 | |
| 484 | #endif // V8_SPACES_INL_H_ |