blob: f5f66545aadf3914e0943559556d341f02e13fdc [file] [log] [blame]
Ben Murdochb0fe1622011-05-05 13:52:32 +01001// Copyright 2006-2010 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_SPACES_INL_H_
29#define V8_SPACES_INL_H_
30
Steve Block44f0eee2011-05-26 01:26:41 +010031#include "isolate.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000032#include "spaces.h"
Steve Block44f0eee2011-05-26 01:26:41 +010033#include "v8memory.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000034
35namespace v8 {
36namespace internal {
37
38
39// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +000040// PageIterator
41
42bool PageIterator::has_next() {
43 return prev_page_ != stop_page_;
44}
45
46
47Page* PageIterator::next() {
48 ASSERT(has_next());
49 prev_page_ = (prev_page_ == NULL)
50 ? space_->first_page_
51 : prev_page_->next_page();
52 return prev_page_;
53}
54
55
56// -----------------------------------------------------------------------------
57// Page
58
59Page* Page::next_page() {
Steve Block44f0eee2011-05-26 01:26:41 +010060 return heap_->isolate()->memory_allocator()->GetNextPage(this);
Steve Blocka7e24c12009-10-30 11:49:00 +000061}
62
63
64Address Page::AllocationTop() {
Steve Block44f0eee2011-05-26 01:26:41 +010065 PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this);
Steve Blocka7e24c12009-10-30 11:49:00 +000066 return owner->PageAllocationTop(this);
67}
68
69
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010070Address Page::AllocationWatermark() {
Steve Block44f0eee2011-05-26 01:26:41 +010071 PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010072 if (this == owner->AllocationTopPage()) {
73 return owner->top();
Steve Blocka7e24c12009-10-30 11:49:00 +000074 }
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010075 return address() + AllocationWatermarkOffset();
Steve Blocka7e24c12009-10-30 11:49:00 +000076}
77
78
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010079uint32_t Page::AllocationWatermarkOffset() {
80 return static_cast<uint32_t>((flags_ & kAllocationWatermarkOffsetMask) >>
81 kAllocationWatermarkOffsetShift);
Steve Blocka7e24c12009-10-30 11:49:00 +000082}
83
84
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010085void Page::SetAllocationWatermark(Address allocation_watermark) {
Steve Block44f0eee2011-05-26 01:26:41 +010086 if ((heap_->gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010087 // When iterating intergenerational references during scavenge
88 // we might decide to promote an encountered young object.
89 // We will allocate a space for such an object and put it
90 // into the promotion queue to process it later.
91 // If space for object was allocated somewhere beyond allocation
92 // watermark this might cause garbage pointers to appear under allocation
93 // watermark. To avoid visiting them during dirty regions iteration
94 // which might be still in progress we store a valid allocation watermark
95 // value and mark this page as having an invalid watermark.
96 SetCachedAllocationWatermark(AllocationWatermark());
97 InvalidateWatermark(true);
98 }
Steve Blocka7e24c12009-10-30 11:49:00 +000099
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100100 flags_ = (flags_ & kFlagsMask) |
101 Offset(allocation_watermark) << kAllocationWatermarkOffsetShift;
102 ASSERT(AllocationWatermarkOffset()
103 == static_cast<uint32_t>(Offset(allocation_watermark)));
Steve Blocka7e24c12009-10-30 11:49:00 +0000104}
105
106
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100107void Page::SetCachedAllocationWatermark(Address allocation_watermark) {
108 mc_first_forwarded = allocation_watermark;
109}
110
111
112Address Page::CachedAllocationWatermark() {
113 return mc_first_forwarded;
114}
115
116
117uint32_t Page::GetRegionMarks() {
118 return dirty_regions_;
119}
120
121
122void Page::SetRegionMarks(uint32_t marks) {
123 dirty_regions_ = marks;
124}
125
126
127int Page::GetRegionNumberForAddress(Address addr) {
128 // Each page is divided into 256 byte regions. Each region has a corresponding
129 // dirty mark bit in the page header. Region can contain intergenerational
130 // references iff its dirty mark is set.
131 // A normal 8K page contains exactly 32 regions so all region marks fit
132 // into 32-bit integer field. To calculate a region number we just divide
133 // offset inside page by region size.
134 // A large page can contain more then 32 regions. But we want to avoid
135 // additional write barrier code for distinguishing between large and normal
136 // pages so we just ignore the fact that addr points into a large page and
137 // calculate region number as if addr pointed into a normal 8K page. This way
138 // we get a region number modulo 32 so for large pages several regions might
139 // be mapped to a single dirty mark.
140 ASSERT_PAGE_ALIGNED(this->address());
141 STATIC_ASSERT((kPageAlignmentMask >> kRegionSizeLog2) < kBitsPerInt);
142
143 // We are using masking with kPageAlignmentMask instead of Page::Offset()
144 // to get an offset to the beginning of 8K page containing addr not to the
145 // beginning of actual page which can be bigger then 8K.
146 intptr_t offset_inside_normal_page = OffsetFrom(addr) & kPageAlignmentMask;
147 return static_cast<int>(offset_inside_normal_page >> kRegionSizeLog2);
148}
149
150
151uint32_t Page::GetRegionMaskForAddress(Address addr) {
152 return 1 << GetRegionNumberForAddress(addr);
153}
154
155
156uint32_t Page::GetRegionMaskForSpan(Address start, int length_in_bytes) {
157 uint32_t result = 0;
Ben Murdoch257744e2011-11-30 15:57:28 +0000158 static const intptr_t kRegionMask = (1 << kRegionSizeLog2) - 1;
159 if (length_in_bytes + (OffsetFrom(start) & kRegionMask) >= kPageSize) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100160 result = kAllRegionsDirtyMarks;
161 } else if (length_in_bytes > 0) {
162 int start_region = GetRegionNumberForAddress(start);
163 int end_region =
164 GetRegionNumberForAddress(start + length_in_bytes - kPointerSize);
165 uint32_t start_mask = (~0) << start_region;
166 uint32_t end_mask = ~((~1) << end_region);
167 result = start_mask & end_mask;
168 // if end_region < start_region, the mask is ored.
169 if (result == 0) result = start_mask | end_mask;
170 }
171#ifdef DEBUG
172 if (FLAG_enable_slow_asserts) {
173 uint32_t expected = 0;
174 for (Address a = start; a < start + length_in_bytes; a += kPointerSize) {
175 expected |= GetRegionMaskForAddress(a);
176 }
177 ASSERT(expected == result);
178 }
179#endif
180 return result;
181}
182
183
184void Page::MarkRegionDirty(Address address) {
185 SetRegionMarks(GetRegionMarks() | GetRegionMaskForAddress(address));
186}
187
188
189bool Page::IsRegionDirty(Address address) {
190 return GetRegionMarks() & GetRegionMaskForAddress(address);
191}
192
193
194void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) {
195 int rstart = GetRegionNumberForAddress(start);
196 int rend = GetRegionNumberForAddress(end);
197
198 if (reaches_limit) {
199 end += 1;
200 }
201
202 if ((rend - rstart) == 0) {
203 return;
204 }
205
Steve Blocka7e24c12009-10-30 11:49:00 +0000206 uint32_t bitmask = 0;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100207
208 if ((OffsetFrom(start) & kRegionAlignmentMask) == 0
209 || (start == ObjectAreaStart())) {
210 // First region is fully covered
211 bitmask = 1 << rstart;
212 }
213
214 while (++rstart < rend) {
215 bitmask |= 1 << rstart;
216 }
217
218 if (bitmask) {
219 SetRegionMarks(GetRegionMarks() & ~bitmask);
220 }
221}
222
223
Steve Block44f0eee2011-05-26 01:26:41 +0100224void Page::FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap) {
225 heap->page_watermark_invalidated_mark_ ^= 1 << WATERMARK_INVALIDATED;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100226}
227
228
229bool Page::IsWatermarkValid() {
Steve Block44f0eee2011-05-26 01:26:41 +0100230 return (flags_ & (1 << WATERMARK_INVALIDATED)) !=
231 heap_->page_watermark_invalidated_mark_;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100232}
233
234
235void Page::InvalidateWatermark(bool value) {
236 if (value) {
Steve Block791712a2010-08-27 10:21:07 +0100237 flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
Steve Block44f0eee2011-05-26 01:26:41 +0100238 heap_->page_watermark_invalidated_mark_;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100239 } else {
Steve Block44f0eee2011-05-26 01:26:41 +0100240 flags_ =
241 (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
242 (heap_->page_watermark_invalidated_mark_ ^
243 (1 << WATERMARK_INVALIDATED));
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100244 }
245
246 ASSERT(IsWatermarkValid() == !value);
Steve Blocka7e24c12009-10-30 11:49:00 +0000247}
248
249
Steve Block6ded16b2010-05-10 14:33:55 +0100250bool Page::GetPageFlag(PageFlag flag) {
Steve Block791712a2010-08-27 10:21:07 +0100251 return (flags_ & static_cast<intptr_t>(1 << flag)) != 0;
Steve Block6ded16b2010-05-10 14:33:55 +0100252}
253
254
255void Page::SetPageFlag(PageFlag flag, bool value) {
256 if (value) {
Steve Block791712a2010-08-27 10:21:07 +0100257 flags_ |= static_cast<intptr_t>(1 << flag);
Steve Block6ded16b2010-05-10 14:33:55 +0100258 } else {
Steve Block791712a2010-08-27 10:21:07 +0100259 flags_ &= ~static_cast<intptr_t>(1 << flag);
Steve Block6ded16b2010-05-10 14:33:55 +0100260 }
261}
262
263
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100264void Page::ClearPageFlags() {
265 flags_ = 0;
266}
267
268
269void Page::ClearGCFields() {
270 InvalidateWatermark(true);
271 SetAllocationWatermark(ObjectAreaStart());
Steve Block44f0eee2011-05-26 01:26:41 +0100272 if (heap_->gc_state() == Heap::SCAVENGE) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100273 SetCachedAllocationWatermark(ObjectAreaStart());
274 }
275 SetRegionMarks(kAllRegionsCleanMarks);
276}
277
278
Steve Block6ded16b2010-05-10 14:33:55 +0100279bool Page::WasInUseBeforeMC() {
280 return GetPageFlag(WAS_IN_USE_BEFORE_MC);
281}
282
283
284void Page::SetWasInUseBeforeMC(bool was_in_use) {
285 SetPageFlag(WAS_IN_USE_BEFORE_MC, was_in_use);
286}
287
288
289bool Page::IsLargeObjectPage() {
290 return !GetPageFlag(IS_NORMAL_PAGE);
291}
292
293
294void Page::SetIsLargeObjectPage(bool is_large_object_page) {
295 SetPageFlag(IS_NORMAL_PAGE, !is_large_object_page);
296}
297
Steve Block791712a2010-08-27 10:21:07 +0100298bool Page::IsPageExecutable() {
299 return GetPageFlag(IS_EXECUTABLE);
300}
301
302
303void Page::SetIsPageExecutable(bool is_page_executable) {
304 SetPageFlag(IS_EXECUTABLE, is_page_executable);
305}
306
Steve Block6ded16b2010-05-10 14:33:55 +0100307
Steve Blocka7e24c12009-10-30 11:49:00 +0000308// -----------------------------------------------------------------------------
309// MemoryAllocator
310
Iain Merrick9ac36c92010-09-13 15:29:50 +0100311void MemoryAllocator::ChunkInfo::init(Address a, size_t s, PagedSpace* o) {
312 address_ = a;
313 size_ = s;
314 owner_ = o;
315 executable_ = (o == NULL) ? NOT_EXECUTABLE : o->executable();
Steve Block44f0eee2011-05-26 01:26:41 +0100316 owner_identity_ = (o == NULL) ? FIRST_SPACE : o->identity();
Iain Merrick9ac36c92010-09-13 15:29:50 +0100317}
318
319
Steve Blocka7e24c12009-10-30 11:49:00 +0000320bool MemoryAllocator::IsValidChunk(int chunk_id) {
321 if (!IsValidChunkId(chunk_id)) return false;
322
323 ChunkInfo& c = chunks_[chunk_id];
324 return (c.address() != NULL) && (c.size() != 0) && (c.owner() != NULL);
325}
326
327
328bool MemoryAllocator::IsValidChunkId(int chunk_id) {
329 return (0 <= chunk_id) && (chunk_id < max_nof_chunks_);
330}
331
332
333bool MemoryAllocator::IsPageInSpace(Page* p, PagedSpace* space) {
334 ASSERT(p->is_valid());
335
336 int chunk_id = GetChunkId(p);
337 if (!IsValidChunkId(chunk_id)) return false;
338
339 ChunkInfo& c = chunks_[chunk_id];
340 return (c.address() <= p->address()) &&
341 (p->address() < c.address() + c.size()) &&
342 (space == c.owner());
343}
344
345
346Page* MemoryAllocator::GetNextPage(Page* p) {
347 ASSERT(p->is_valid());
348 intptr_t raw_addr = p->opaque_header & ~Page::kPageAlignmentMask;
349 return Page::FromAddress(AddressFrom<Address>(raw_addr));
350}
351
352
353int MemoryAllocator::GetChunkId(Page* p) {
354 ASSERT(p->is_valid());
Andrei Popescu402d9372010-02-26 13:31:12 +0000355 return static_cast<int>(p->opaque_header & Page::kPageAlignmentMask);
Steve Blocka7e24c12009-10-30 11:49:00 +0000356}
357
358
359void MemoryAllocator::SetNextPage(Page* prev, Page* next) {
360 ASSERT(prev->is_valid());
361 int chunk_id = GetChunkId(prev);
362 ASSERT_PAGE_ALIGNED(next->address());
363 prev->opaque_header = OffsetFrom(next->address()) | chunk_id;
364}
365
366
367PagedSpace* MemoryAllocator::PageOwner(Page* page) {
368 int chunk_id = GetChunkId(page);
369 ASSERT(IsValidChunk(chunk_id));
370 return chunks_[chunk_id].owner();
371}
372
373
374bool MemoryAllocator::InInitialChunk(Address address) {
375 if (initial_chunk_ == NULL) return false;
376
377 Address start = static_cast<Address>(initial_chunk_->address());
378 return (start <= address) && (address < start + initial_chunk_->size());
379}
380
381
382#ifdef ENABLE_HEAP_PROTECTION
383
384void MemoryAllocator::Protect(Address start, size_t size) {
385 OS::Protect(start, size);
386}
387
388
389void MemoryAllocator::Unprotect(Address start,
390 size_t size,
391 Executability executable) {
392 OS::Unprotect(start, size, executable);
393}
394
395
396void MemoryAllocator::ProtectChunkFromPage(Page* page) {
397 int id = GetChunkId(page);
398 OS::Protect(chunks_[id].address(), chunks_[id].size());
399}
400
401
402void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
403 int id = GetChunkId(page);
404 OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
405 chunks_[id].owner()->executable() == EXECUTABLE);
406}
407
408#endif
409
410
411// --------------------------------------------------------------------------
412// PagedSpace
413
414bool PagedSpace::Contains(Address addr) {
415 Page* p = Page::FromAddress(addr);
Ben Murdochf87a2032010-10-22 12:50:53 +0100416 if (!p->is_valid()) return false;
Steve Block44f0eee2011-05-26 01:26:41 +0100417 return heap()->isolate()->memory_allocator()->IsPageInSpace(p, this);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100418}
419
420
Steve Blocka7e24c12009-10-30 11:49:00 +0000421// Try linear allocation in the page of alloc_info's allocation top. Does
422// not contain slow case logic (eg, move to the next page or try free list
423// allocation) so it can be used by all the allocation functions and for all
424// the paged spaces.
425HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info,
426 int size_in_bytes) {
427 Address current_top = alloc_info->top;
428 Address new_top = current_top + size_in_bytes;
429 if (new_top > alloc_info->limit) return NULL;
430
431 alloc_info->top = new_top;
432 ASSERT(alloc_info->VerifyPagedAllocation());
433 accounting_stats_.AllocateBytes(size_in_bytes);
434 return HeapObject::FromAddress(current_top);
435}
436
437
438// Raw allocation.
John Reck59135872010-11-02 12:39:01 -0700439MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000440 ASSERT(HasBeenSetup());
441 ASSERT_OBJECT_SIZE(size_in_bytes);
442 HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes);
443 if (object != NULL) return object;
444
445 object = SlowAllocateRaw(size_in_bytes);
446 if (object != NULL) return object;
447
Ben Murdochf87a2032010-10-22 12:50:53 +0100448 return Failure::RetryAfterGC(identity());
Steve Blocka7e24c12009-10-30 11:49:00 +0000449}
450
451
452// Reallocating (and promoting) objects during a compacting collection.
John Reck59135872010-11-02 12:39:01 -0700453MaybeObject* PagedSpace::MCAllocateRaw(int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000454 ASSERT(HasBeenSetup());
455 ASSERT_OBJECT_SIZE(size_in_bytes);
456 HeapObject* object = AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
457 if (object != NULL) return object;
458
459 object = SlowMCAllocateRaw(size_in_bytes);
460 if (object != NULL) return object;
461
Ben Murdochf87a2032010-10-22 12:50:53 +0100462 return Failure::RetryAfterGC(identity());
Steve Blocka7e24c12009-10-30 11:49:00 +0000463}
464
465
466// -----------------------------------------------------------------------------
467// LargeObjectChunk
468
Ben Murdochb0fe1622011-05-05 13:52:32 +0100469Address LargeObjectChunk::GetStartAddress() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000470 // Round the chunk address up to the nearest page-aligned address
471 // and return the heap object in that page.
472 Page* page = Page::FromAddress(RoundUp(address(), Page::kPageSize));
Ben Murdochb0fe1622011-05-05 13:52:32 +0100473 return page->ObjectAreaStart();
Steve Blocka7e24c12009-10-30 11:49:00 +0000474}
475
476
Ben Murdochb0fe1622011-05-05 13:52:32 +0100477void LargeObjectChunk::Free(Executability executable) {
Steve Block44f0eee2011-05-26 01:26:41 +0100478 Isolate* isolate =
479 Page::FromAddress(RoundUp(address(), Page::kPageSize))->heap_->isolate();
480 isolate->memory_allocator()->FreeRawMemory(address(), size(), executable);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100481}
482
Steve Blocka7e24c12009-10-30 11:49:00 +0000483// -----------------------------------------------------------------------------
Ben Murdochb0fe1622011-05-05 13:52:32 +0100484// NewSpace
Steve Blocka7e24c12009-10-30 11:49:00 +0000485
John Reck59135872010-11-02 12:39:01 -0700486MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes,
487 AllocationInfo* alloc_info) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000488 Address new_top = alloc_info->top + size_in_bytes;
Ben Murdochf87a2032010-10-22 12:50:53 +0100489 if (new_top > alloc_info->limit) return Failure::RetryAfterGC();
Steve Blocka7e24c12009-10-30 11:49:00 +0000490
491 Object* obj = HeapObject::FromAddress(alloc_info->top);
492 alloc_info->top = new_top;
493#ifdef DEBUG
494 SemiSpace* space =
495 (alloc_info == &allocation_info_) ? &to_space_ : &from_space_;
496 ASSERT(space->low() <= alloc_info->top
497 && alloc_info->top <= space->high()
498 && alloc_info->limit == space->high());
499#endif
500 return obj;
501}
502
Steve Block3ce2e202009-11-05 08:53:23 +0000503
Steve Block44f0eee2011-05-26 01:26:41 +0100504intptr_t LargeObjectSpace::Available() {
505 return LargeObjectChunk::ObjectSizeFor(
506 heap()->isolate()->memory_allocator()->Available());
507}
508
509
Ben Murdochb0fe1622011-05-05 13:52:32 +0100510template <typename StringType>
511void NewSpace::ShrinkStringAtAllocationBoundary(String* string, int length) {
512 ASSERT(length <= string->length());
513 ASSERT(string->IsSeqString());
514 ASSERT(string->address() + StringType::SizeFor(string->length()) ==
515 allocation_info_.top);
516 allocation_info_.top =
517 string->address() + StringType::SizeFor(length);
518 string->set_length(length);
519}
520
521
Steve Block3ce2e202009-11-05 08:53:23 +0000522bool FreeListNode::IsFreeListNode(HeapObject* object) {
Steve Block44f0eee2011-05-26 01:26:41 +0100523 return object->map() == HEAP->raw_unchecked_byte_array_map()
524 || object->map() == HEAP->raw_unchecked_one_pointer_filler_map()
525 || object->map() == HEAP->raw_unchecked_two_pointer_filler_map();
Steve Block3ce2e202009-11-05 08:53:23 +0000526}
527
Steve Blocka7e24c12009-10-30 11:49:00 +0000528} } // namespace v8::internal
529
530#endif // V8_SPACES_INL_H_