blob: 66894c4f1c1a9511b3414aa73ca1c7c2fa167a56 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_SPACES_INL_H_
29#define V8_SPACES_INL_H_
30
31#include "memory.h"
32#include "spaces.h"
33
34namespace v8 {
35namespace internal {
36
37
38// -----------------------------------------------------------------------------
Steve Blocka7e24c12009-10-30 11:49:00 +000039// PageIterator
40
41bool PageIterator::has_next() {
42 return prev_page_ != stop_page_;
43}
44
45
46Page* PageIterator::next() {
47 ASSERT(has_next());
48 prev_page_ = (prev_page_ == NULL)
49 ? space_->first_page_
50 : prev_page_->next_page();
51 return prev_page_;
52}
53
54
55// -----------------------------------------------------------------------------
56// Page
57
58Page* Page::next_page() {
59 return MemoryAllocator::GetNextPage(this);
60}
61
62
63Address Page::AllocationTop() {
64 PagedSpace* owner = MemoryAllocator::PageOwner(this);
65 return owner->PageAllocationTop(this);
66}
67
68
69void Page::ClearRSet() {
70 // This method can be called in all rset states.
71 memset(RSetStart(), 0, kRSetEndOffset - kRSetStartOffset);
72}
73
74
75// Given a 32-bit address, separate its bits into:
76// | page address | words (6) | bit offset (5) | pointer alignment (2) |
77// The address of the rset word containing the bit for this word is computed as:
78// page_address + words * 4
79// For a 64-bit address, if it is:
80// | page address | words(5) | bit offset(5) | pointer alignment (3) |
81// The address of the rset word containing the bit for this word is computed as:
82// page_address + words * 4 + kRSetOffset.
83// The rset is accessed as 32-bit words, and bit offsets in a 32-bit word,
84// even on the X64 architecture.
85
86Address Page::ComputeRSetBitPosition(Address address, int offset,
87 uint32_t* bitmask) {
88 ASSERT(Page::is_rset_in_use());
89
90 Page* page = Page::FromAddress(address);
91 uint32_t bit_offset = ArithmeticShiftRight(page->Offset(address) + offset,
92 kPointerSizeLog2);
93 *bitmask = 1 << (bit_offset % kBitsPerInt);
94
95 Address rset_address =
96 page->address() + kRSetOffset + (bit_offset / kBitsPerInt) * kIntSize;
97 // The remembered set address is either in the normal remembered set range
98 // of a page or else we have a large object page.
99 ASSERT((page->RSetStart() <= rset_address && rset_address < page->RSetEnd())
100 || page->IsLargeObjectPage());
101
102 if (rset_address >= page->RSetEnd()) {
103 // We have a large object page, and the remembered set address is actually
104 // past the end of the object.
105
106 // The first part of the remembered set is still located at the start of
107 // the page, but anything after kRSetEndOffset must be relocated to after
108 // the large object, i.e. after
109 // (page->ObjectAreaStart() + object size)
110 // We do that by adding the difference between the normal RSet's end and
111 // the object's end.
112 ASSERT(HeapObject::FromAddress(address)->IsFixedArray());
113 int fixedarray_length =
114 FixedArray::SizeFor(Memory::int_at(page->ObjectAreaStart()
115 + Array::kLengthOffset));
116 rset_address += kObjectStartOffset - kRSetEndOffset + fixedarray_length;
117 }
118 return rset_address;
119}
120
121
122void Page::SetRSet(Address address, int offset) {
123 uint32_t bitmask = 0;
124 Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
125 Memory::uint32_at(rset_address) |= bitmask;
126
127 ASSERT(IsRSetSet(address, offset));
128}
129
130
131// Clears the corresponding remembered set bit for a given address.
132void Page::UnsetRSet(Address address, int offset) {
133 uint32_t bitmask = 0;
134 Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
135 Memory::uint32_at(rset_address) &= ~bitmask;
136
137 ASSERT(!IsRSetSet(address, offset));
138}
139
140
141bool Page::IsRSetSet(Address address, int offset) {
142 uint32_t bitmask = 0;
143 Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
144 return (Memory::uint32_at(rset_address) & bitmask) != 0;
145}
146
147
Steve Block6ded16b2010-05-10 14:33:55 +0100148bool Page::GetPageFlag(PageFlag flag) {
149 return (flags & flag) != 0;
150}
151
152
153void Page::SetPageFlag(PageFlag flag, bool value) {
154 if (value) {
155 flags |= flag;
156 } else {
157 flags &= ~flag;
158 }
159}
160
161
162bool Page::WasInUseBeforeMC() {
163 return GetPageFlag(WAS_IN_USE_BEFORE_MC);
164}
165
166
167void Page::SetWasInUseBeforeMC(bool was_in_use) {
168 SetPageFlag(WAS_IN_USE_BEFORE_MC, was_in_use);
169}
170
171
172bool Page::IsLargeObjectPage() {
173 return !GetPageFlag(IS_NORMAL_PAGE);
174}
175
176
177void Page::SetIsLargeObjectPage(bool is_large_object_page) {
178 SetPageFlag(IS_NORMAL_PAGE, !is_large_object_page);
179}
180
181
Steve Blocka7e24c12009-10-30 11:49:00 +0000182// -----------------------------------------------------------------------------
183// MemoryAllocator
184
185bool MemoryAllocator::IsValidChunk(int chunk_id) {
186 if (!IsValidChunkId(chunk_id)) return false;
187
188 ChunkInfo& c = chunks_[chunk_id];
189 return (c.address() != NULL) && (c.size() != 0) && (c.owner() != NULL);
190}
191
192
193bool MemoryAllocator::IsValidChunkId(int chunk_id) {
194 return (0 <= chunk_id) && (chunk_id < max_nof_chunks_);
195}
196
197
198bool MemoryAllocator::IsPageInSpace(Page* p, PagedSpace* space) {
199 ASSERT(p->is_valid());
200
201 int chunk_id = GetChunkId(p);
202 if (!IsValidChunkId(chunk_id)) return false;
203
204 ChunkInfo& c = chunks_[chunk_id];
205 return (c.address() <= p->address()) &&
206 (p->address() < c.address() + c.size()) &&
207 (space == c.owner());
208}
209
210
211Page* MemoryAllocator::GetNextPage(Page* p) {
212 ASSERT(p->is_valid());
213 intptr_t raw_addr = p->opaque_header & ~Page::kPageAlignmentMask;
214 return Page::FromAddress(AddressFrom<Address>(raw_addr));
215}
216
217
218int MemoryAllocator::GetChunkId(Page* p) {
219 ASSERT(p->is_valid());
Andrei Popescu402d9372010-02-26 13:31:12 +0000220 return static_cast<int>(p->opaque_header & Page::kPageAlignmentMask);
Steve Blocka7e24c12009-10-30 11:49:00 +0000221}
222
223
224void MemoryAllocator::SetNextPage(Page* prev, Page* next) {
225 ASSERT(prev->is_valid());
226 int chunk_id = GetChunkId(prev);
227 ASSERT_PAGE_ALIGNED(next->address());
228 prev->opaque_header = OffsetFrom(next->address()) | chunk_id;
229}
230
231
232PagedSpace* MemoryAllocator::PageOwner(Page* page) {
233 int chunk_id = GetChunkId(page);
234 ASSERT(IsValidChunk(chunk_id));
235 return chunks_[chunk_id].owner();
236}
237
238
239bool MemoryAllocator::InInitialChunk(Address address) {
240 if (initial_chunk_ == NULL) return false;
241
242 Address start = static_cast<Address>(initial_chunk_->address());
243 return (start <= address) && (address < start + initial_chunk_->size());
244}
245
246
247#ifdef ENABLE_HEAP_PROTECTION
248
249void MemoryAllocator::Protect(Address start, size_t size) {
250 OS::Protect(start, size);
251}
252
253
254void MemoryAllocator::Unprotect(Address start,
255 size_t size,
256 Executability executable) {
257 OS::Unprotect(start, size, executable);
258}
259
260
261void MemoryAllocator::ProtectChunkFromPage(Page* page) {
262 int id = GetChunkId(page);
263 OS::Protect(chunks_[id].address(), chunks_[id].size());
264}
265
266
267void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
268 int id = GetChunkId(page);
269 OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
270 chunks_[id].owner()->executable() == EXECUTABLE);
271}
272
273#endif
274
275
276// --------------------------------------------------------------------------
277// PagedSpace
278
279bool PagedSpace::Contains(Address addr) {
280 Page* p = Page::FromAddress(addr);
281 ASSERT(p->is_valid());
282
283 return MemoryAllocator::IsPageInSpace(p, this);
284}
285
286
287// Try linear allocation in the page of alloc_info's allocation top. Does
288// not contain slow case logic (eg, move to the next page or try free list
289// allocation) so it can be used by all the allocation functions and for all
290// the paged spaces.
291HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info,
292 int size_in_bytes) {
293 Address current_top = alloc_info->top;
294 Address new_top = current_top + size_in_bytes;
295 if (new_top > alloc_info->limit) return NULL;
296
297 alloc_info->top = new_top;
298 ASSERT(alloc_info->VerifyPagedAllocation());
299 accounting_stats_.AllocateBytes(size_in_bytes);
300 return HeapObject::FromAddress(current_top);
301}
302
303
304// Raw allocation.
305Object* PagedSpace::AllocateRaw(int size_in_bytes) {
306 ASSERT(HasBeenSetup());
307 ASSERT_OBJECT_SIZE(size_in_bytes);
308 HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes);
309 if (object != NULL) return object;
310
311 object = SlowAllocateRaw(size_in_bytes);
312 if (object != NULL) return object;
313
314 return Failure::RetryAfterGC(size_in_bytes, identity());
315}
316
317
318// Reallocating (and promoting) objects during a compacting collection.
319Object* PagedSpace::MCAllocateRaw(int size_in_bytes) {
320 ASSERT(HasBeenSetup());
321 ASSERT_OBJECT_SIZE(size_in_bytes);
322 HeapObject* object = AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
323 if (object != NULL) return object;
324
325 object = SlowMCAllocateRaw(size_in_bytes);
326 if (object != NULL) return object;
327
328 return Failure::RetryAfterGC(size_in_bytes, identity());
329}
330
331
332// -----------------------------------------------------------------------------
333// LargeObjectChunk
334
335HeapObject* LargeObjectChunk::GetObject() {
336 // Round the chunk address up to the nearest page-aligned address
337 // and return the heap object in that page.
338 Page* page = Page::FromAddress(RoundUp(address(), Page::kPageSize));
339 return HeapObject::FromAddress(page->ObjectAreaStart());
340}
341
342
343// -----------------------------------------------------------------------------
344// LargeObjectSpace
345
346int LargeObjectSpace::ExtraRSetBytesFor(int object_size) {
347 int extra_rset_bits =
348 RoundUp((object_size - Page::kObjectAreaSize) / kPointerSize,
349 kBitsPerInt);
350 return extra_rset_bits / kBitsPerByte;
351}
352
353
354Object* NewSpace::AllocateRawInternal(int size_in_bytes,
355 AllocationInfo* alloc_info) {
356 Address new_top = alloc_info->top + size_in_bytes;
357 if (new_top > alloc_info->limit) return Failure::RetryAfterGC(size_in_bytes);
358
359 Object* obj = HeapObject::FromAddress(alloc_info->top);
360 alloc_info->top = new_top;
361#ifdef DEBUG
362 SemiSpace* space =
363 (alloc_info == &allocation_info_) ? &to_space_ : &from_space_;
364 ASSERT(space->low() <= alloc_info->top
365 && alloc_info->top <= space->high()
366 && alloc_info->limit == space->high());
367#endif
368 return obj;
369}
370
Steve Block3ce2e202009-11-05 08:53:23 +0000371
372bool FreeListNode::IsFreeListNode(HeapObject* object) {
373 return object->map() == Heap::raw_unchecked_byte_array_map()
374 || object->map() == Heap::raw_unchecked_one_pointer_filler_map()
375 || object->map() == Heap::raw_unchecked_two_pointer_filler_map();
376}
377
Steve Blocka7e24c12009-10-30 11:49:00 +0000378} } // namespace v8::internal
379
380#endif // V8_SPACES_INL_H_