blob: 847bb9ada2dd81f4783f8d57749cd5c4c96082f2 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_SPACES_INL_H_
29#define V8_SPACES_INL_H_
30
31#include "memory.h"
32#include "spaces.h"
33
34namespace v8 {
35namespace internal {
36
37
38// -----------------------------------------------------------------------------
39// HeapObjectIterator
40
41bool HeapObjectIterator::has_next() {
42 if (cur_addr_ < cur_limit_) {
43 return true; // common case
44 }
45 ASSERT(cur_addr_ == cur_limit_);
46 return HasNextInNextPage(); // slow path
47}
48
49
50HeapObject* HeapObjectIterator::next() {
51 ASSERT(has_next());
52
53 HeapObject* obj = HeapObject::FromAddress(cur_addr_);
54 int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
55 ASSERT_OBJECT_SIZE(obj_size);
56
57 cur_addr_ += obj_size;
58 ASSERT(cur_addr_ <= cur_limit_);
59
60 return obj;
61}
62
63
64// -----------------------------------------------------------------------------
65// PageIterator
66
67bool PageIterator::has_next() {
68 return prev_page_ != stop_page_;
69}
70
71
72Page* PageIterator::next() {
73 ASSERT(has_next());
74 prev_page_ = (prev_page_ == NULL)
75 ? space_->first_page_
76 : prev_page_->next_page();
77 return prev_page_;
78}
79
80
81// -----------------------------------------------------------------------------
82// Page
83
84Page* Page::next_page() {
85 return MemoryAllocator::GetNextPage(this);
86}
87
88
89Address Page::AllocationTop() {
90 PagedSpace* owner = MemoryAllocator::PageOwner(this);
91 return owner->PageAllocationTop(this);
92}
93
94
95void Page::ClearRSet() {
96 // This method can be called in all rset states.
97 memset(RSetStart(), 0, kRSetEndOffset - kRSetStartOffset);
98}
99
100
101// Given a 32-bit address, separate its bits into:
102// | page address | words (6) | bit offset (5) | pointer alignment (2) |
103// The address of the rset word containing the bit for this word is computed as:
104// page_address + words * 4
105// For a 64-bit address, if it is:
106// | page address | words(5) | bit offset(5) | pointer alignment (3) |
107// The address of the rset word containing the bit for this word is computed as:
108// page_address + words * 4 + kRSetOffset.
109// The rset is accessed as 32-bit words, and bit offsets in a 32-bit word,
110// even on the X64 architecture.
111
112Address Page::ComputeRSetBitPosition(Address address, int offset,
113 uint32_t* bitmask) {
114 ASSERT(Page::is_rset_in_use());
115
116 Page* page = Page::FromAddress(address);
117 uint32_t bit_offset = ArithmeticShiftRight(page->Offset(address) + offset,
118 kPointerSizeLog2);
119 *bitmask = 1 << (bit_offset % kBitsPerInt);
120
121 Address rset_address =
122 page->address() + kRSetOffset + (bit_offset / kBitsPerInt) * kIntSize;
123 // The remembered set address is either in the normal remembered set range
124 // of a page or else we have a large object page.
125 ASSERT((page->RSetStart() <= rset_address && rset_address < page->RSetEnd())
126 || page->IsLargeObjectPage());
127
128 if (rset_address >= page->RSetEnd()) {
129 // We have a large object page, and the remembered set address is actually
130 // past the end of the object.
131
132 // The first part of the remembered set is still located at the start of
133 // the page, but anything after kRSetEndOffset must be relocated to after
134 // the large object, i.e. after
135 // (page->ObjectAreaStart() + object size)
136 // We do that by adding the difference between the normal RSet's end and
137 // the object's end.
138 ASSERT(HeapObject::FromAddress(address)->IsFixedArray());
139 int fixedarray_length =
140 FixedArray::SizeFor(Memory::int_at(page->ObjectAreaStart()
141 + Array::kLengthOffset));
142 rset_address += kObjectStartOffset - kRSetEndOffset + fixedarray_length;
143 }
144 return rset_address;
145}
146
147
148void Page::SetRSet(Address address, int offset) {
149 uint32_t bitmask = 0;
150 Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
151 Memory::uint32_at(rset_address) |= bitmask;
152
153 ASSERT(IsRSetSet(address, offset));
154}
155
156
157// Clears the corresponding remembered set bit for a given address.
158void Page::UnsetRSet(Address address, int offset) {
159 uint32_t bitmask = 0;
160 Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
161 Memory::uint32_at(rset_address) &= ~bitmask;
162
163 ASSERT(!IsRSetSet(address, offset));
164}
165
166
167bool Page::IsRSetSet(Address address, int offset) {
168 uint32_t bitmask = 0;
169 Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
170 return (Memory::uint32_at(rset_address) & bitmask) != 0;
171}
172
173
174// -----------------------------------------------------------------------------
175// MemoryAllocator
176
177bool MemoryAllocator::IsValidChunk(int chunk_id) {
178 if (!IsValidChunkId(chunk_id)) return false;
179
180 ChunkInfo& c = chunks_[chunk_id];
181 return (c.address() != NULL) && (c.size() != 0) && (c.owner() != NULL);
182}
183
184
185bool MemoryAllocator::IsValidChunkId(int chunk_id) {
186 return (0 <= chunk_id) && (chunk_id < max_nof_chunks_);
187}
188
189
190bool MemoryAllocator::IsPageInSpace(Page* p, PagedSpace* space) {
191 ASSERT(p->is_valid());
192
193 int chunk_id = GetChunkId(p);
194 if (!IsValidChunkId(chunk_id)) return false;
195
196 ChunkInfo& c = chunks_[chunk_id];
197 return (c.address() <= p->address()) &&
198 (p->address() < c.address() + c.size()) &&
199 (space == c.owner());
200}
201
202
203Page* MemoryAllocator::GetNextPage(Page* p) {
204 ASSERT(p->is_valid());
205 intptr_t raw_addr = p->opaque_header & ~Page::kPageAlignmentMask;
206 return Page::FromAddress(AddressFrom<Address>(raw_addr));
207}
208
209
210int MemoryAllocator::GetChunkId(Page* p) {
211 ASSERT(p->is_valid());
212 return p->opaque_header & Page::kPageAlignmentMask;
213}
214
215
216void MemoryAllocator::SetNextPage(Page* prev, Page* next) {
217 ASSERT(prev->is_valid());
218 int chunk_id = GetChunkId(prev);
219 ASSERT_PAGE_ALIGNED(next->address());
220 prev->opaque_header = OffsetFrom(next->address()) | chunk_id;
221}
222
223
224PagedSpace* MemoryAllocator::PageOwner(Page* page) {
225 int chunk_id = GetChunkId(page);
226 ASSERT(IsValidChunk(chunk_id));
227 return chunks_[chunk_id].owner();
228}
229
230
231bool MemoryAllocator::InInitialChunk(Address address) {
232 if (initial_chunk_ == NULL) return false;
233
234 Address start = static_cast<Address>(initial_chunk_->address());
235 return (start <= address) && (address < start + initial_chunk_->size());
236}
237
238
239#ifdef ENABLE_HEAP_PROTECTION
240
241void MemoryAllocator::Protect(Address start, size_t size) {
242 OS::Protect(start, size);
243}
244
245
246void MemoryAllocator::Unprotect(Address start,
247 size_t size,
248 Executability executable) {
249 OS::Unprotect(start, size, executable);
250}
251
252
253void MemoryAllocator::ProtectChunkFromPage(Page* page) {
254 int id = GetChunkId(page);
255 OS::Protect(chunks_[id].address(), chunks_[id].size());
256}
257
258
259void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
260 int id = GetChunkId(page);
261 OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
262 chunks_[id].owner()->executable() == EXECUTABLE);
263}
264
265#endif
266
267
268// --------------------------------------------------------------------------
269// PagedSpace
270
271bool PagedSpace::Contains(Address addr) {
272 Page* p = Page::FromAddress(addr);
273 ASSERT(p->is_valid());
274
275 return MemoryAllocator::IsPageInSpace(p, this);
276}
277
278
279// Try linear allocation in the page of alloc_info's allocation top. Does
280// not contain slow case logic (eg, move to the next page or try free list
281// allocation) so it can be used by all the allocation functions and for all
282// the paged spaces.
283HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info,
284 int size_in_bytes) {
285 Address current_top = alloc_info->top;
286 Address new_top = current_top + size_in_bytes;
287 if (new_top > alloc_info->limit) return NULL;
288
289 alloc_info->top = new_top;
290 ASSERT(alloc_info->VerifyPagedAllocation());
291 accounting_stats_.AllocateBytes(size_in_bytes);
292 return HeapObject::FromAddress(current_top);
293}
294
295
296// Raw allocation.
297Object* PagedSpace::AllocateRaw(int size_in_bytes) {
298 ASSERT(HasBeenSetup());
299 ASSERT_OBJECT_SIZE(size_in_bytes);
300 HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes);
301 if (object != NULL) return object;
302
303 object = SlowAllocateRaw(size_in_bytes);
304 if (object != NULL) return object;
305
306 return Failure::RetryAfterGC(size_in_bytes, identity());
307}
308
309
310// Reallocating (and promoting) objects during a compacting collection.
311Object* PagedSpace::MCAllocateRaw(int size_in_bytes) {
312 ASSERT(HasBeenSetup());
313 ASSERT_OBJECT_SIZE(size_in_bytes);
314 HeapObject* object = AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
315 if (object != NULL) return object;
316
317 object = SlowMCAllocateRaw(size_in_bytes);
318 if (object != NULL) return object;
319
320 return Failure::RetryAfterGC(size_in_bytes, identity());
321}
322
323
324// -----------------------------------------------------------------------------
325// LargeObjectChunk
326
327HeapObject* LargeObjectChunk::GetObject() {
328 // Round the chunk address up to the nearest page-aligned address
329 // and return the heap object in that page.
330 Page* page = Page::FromAddress(RoundUp(address(), Page::kPageSize));
331 return HeapObject::FromAddress(page->ObjectAreaStart());
332}
333
334
335// -----------------------------------------------------------------------------
336// LargeObjectSpace
337
338int LargeObjectSpace::ExtraRSetBytesFor(int object_size) {
339 int extra_rset_bits =
340 RoundUp((object_size - Page::kObjectAreaSize) / kPointerSize,
341 kBitsPerInt);
342 return extra_rset_bits / kBitsPerByte;
343}
344
345
346Object* NewSpace::AllocateRawInternal(int size_in_bytes,
347 AllocationInfo* alloc_info) {
348 Address new_top = alloc_info->top + size_in_bytes;
349 if (new_top > alloc_info->limit) return Failure::RetryAfterGC(size_in_bytes);
350
351 Object* obj = HeapObject::FromAddress(alloc_info->top);
352 alloc_info->top = new_top;
353#ifdef DEBUG
354 SemiSpace* space =
355 (alloc_info == &allocation_info_) ? &to_space_ : &from_space_;
356 ASSERT(space->low() <= alloc_info->top
357 && alloc_info->top <= space->high()
358 && alloc_info->limit == space->high());
359#endif
360 return obj;
361}
362
Steve Block3ce2e202009-11-05 08:53:23 +0000363
364bool FreeListNode::IsFreeListNode(HeapObject* object) {
365 return object->map() == Heap::raw_unchecked_byte_array_map()
366 || object->map() == Heap::raw_unchecked_one_pointer_filler_map()
367 || object->map() == Heap::raw_unchecked_two_pointer_filler_map();
368}
369
Steve Blocka7e24c12009-10-30 11:49:00 +0000370} } // namespace v8::internal
371
372#endif // V8_SPACES_INL_H_