blob: 78062232166145cc88a09837ce3b2aee50672f2a [file] [log] [blame]
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_SPACES_INL_H_
29#define V8_SPACES_INL_H_
30
31#include "memory.h"
32#include "spaces.h"
33
kasperl@chromium.org71affb52009-05-26 05:44:31 +000034namespace v8 {
35namespace internal {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000036
37
38// -----------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000039// PageIterator
40
41bool PageIterator::has_next() {
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +000042 return prev_page_ != stop_page_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000043}
44
45
46Page* PageIterator::next() {
47 ASSERT(has_next());
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +000048 prev_page_ = (prev_page_ == NULL)
49 ? space_->first_page_
50 : prev_page_->next_page();
51 return prev_page_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000052}
53
54
55// -----------------------------------------------------------------------------
56// Page
57
58Page* Page::next_page() {
59 return MemoryAllocator::GetNextPage(this);
60}
61
62
63Address Page::AllocationTop() {
64 PagedSpace* owner = MemoryAllocator::PageOwner(this);
ager@chromium.org9258b6b2008-09-11 09:11:10 +000065 return owner->PageAllocationTop(this);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000066}
67
68
ricow@chromium.org30ce4112010-05-31 10:38:25 +000069Address Page::AllocationWatermark() {
70 PagedSpace* owner = MemoryAllocator::PageOwner(this);
71 if (this == owner->AllocationTopPage()) {
72 return owner->top();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000073 }
ricow@chromium.org30ce4112010-05-31 10:38:25 +000074 return address() + AllocationWatermarkOffset();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000075}
76
77
ricow@chromium.org30ce4112010-05-31 10:38:25 +000078uint32_t Page::AllocationWatermarkOffset() {
79 return static_cast<uint32_t>((flags_ & kAllocationWatermarkOffsetMask) >>
80 kAllocationWatermarkOffsetShift);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000081}
82
83
ricow@chromium.org30ce4112010-05-31 10:38:25 +000084void Page::SetAllocationWatermark(Address allocation_watermark) {
85 if ((Heap::gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) {
86 // When iterating intergenerational references during scavenge
87 // we might decide to promote an encountered young object.
88 // We will allocate a space for such an object and put it
89 // into the promotion queue to process it later.
90 // If space for object was allocated somewhere beyond allocation
91 // watermark this might cause garbage pointers to appear under allocation
92 // watermark. To avoid visiting them during dirty regions iteration
93 // which might be still in progress we store a valid allocation watermark
94 // value and mark this page as having an invalid watermark.
95 SetCachedAllocationWatermark(AllocationWatermark());
96 InvalidateWatermark(true);
97 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000098
ricow@chromium.org30ce4112010-05-31 10:38:25 +000099 flags_ = (flags_ & kFlagsMask) |
100 Offset(allocation_watermark) << kAllocationWatermarkOffsetShift;
101 ASSERT(AllocationWatermarkOffset()
102 == static_cast<uint32_t>(Offset(allocation_watermark)));
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000103}
104
105
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000106void Page::SetCachedAllocationWatermark(Address allocation_watermark) {
107 mc_first_forwarded = allocation_watermark;
108}
109
110
111Address Page::CachedAllocationWatermark() {
112 return mc_first_forwarded;
113}
114
115
116uint32_t Page::GetRegionMarks() {
117 return dirty_regions_;
118}
119
120
121void Page::SetRegionMarks(uint32_t marks) {
122 dirty_regions_ = marks;
123}
124
125
126int Page::GetRegionNumberForAddress(Address addr) {
127 // Each page is divided into 256 byte regions. Each region has a corresponding
128 // dirty mark bit in the page header. Region can contain intergenerational
129 // references iff its dirty mark is set.
130 // A normal 8K page contains exactly 32 regions so all region marks fit
131 // into 32-bit integer field. To calculate a region number we just divide
132 // offset inside page by region size.
133 // A large page can contain more then 32 regions. But we want to avoid
134 // additional write barrier code for distinguishing between large and normal
135 // pages so we just ignore the fact that addr points into a large page and
136 // calculate region number as if addr pointed into a normal 8K page. This way
137 // we get a region number modulo 32 so for large pages several regions might
138 // be mapped to a single dirty mark.
139 ASSERT_PAGE_ALIGNED(this->address());
140 STATIC_ASSERT((kPageAlignmentMask >> kRegionSizeLog2) < kBitsPerInt);
141
142 // We are using masking with kPageAlignmentMask instead of Page::Offset()
143 // to get an offset to the beginning of 8K page containing addr not to the
144 // beginning of actual page which can be bigger then 8K.
145 intptr_t offset_inside_normal_page = OffsetFrom(addr) & kPageAlignmentMask;
146 return static_cast<int>(offset_inside_normal_page >> kRegionSizeLog2);
147}
148
149
150uint32_t Page::GetRegionMaskForAddress(Address addr) {
151 return 1 << GetRegionNumberForAddress(addr);
152}
153
154
ager@chromium.org2cc82ae2010-06-14 07:35:38 +0000155uint32_t Page::GetRegionMaskForSpan(Address start, int length_in_bytes) {
156 uint32_t result = 0;
157 if (length_in_bytes >= kPageSize) {
158 result = kAllRegionsDirtyMarks;
159 } else if (length_in_bytes > 0) {
160 int start_region = GetRegionNumberForAddress(start);
161 int end_region =
162 GetRegionNumberForAddress(start + length_in_bytes - kPointerSize);
163 uint32_t start_mask = (~0) << start_region;
164 uint32_t end_mask = ~((~1) << end_region);
165 result = start_mask & end_mask;
166 // if end_region < start_region, the mask is ored.
167 if (result == 0) result = start_mask | end_mask;
168 }
169#ifdef DEBUG
170 if (FLAG_enable_slow_asserts) {
171 uint32_t expected = 0;
172 for (Address a = start; a < start + length_in_bytes; a += kPointerSize) {
173 expected |= GetRegionMaskForAddress(a);
174 }
175 ASSERT(expected == result);
176 }
177#endif
178 return result;
179}
180
181
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000182void Page::MarkRegionDirty(Address address) {
183 SetRegionMarks(GetRegionMarks() | GetRegionMaskForAddress(address));
184}
185
186
187bool Page::IsRegionDirty(Address address) {
188 return GetRegionMarks() & GetRegionMaskForAddress(address);
189}
190
191
192void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) {
193 int rstart = GetRegionNumberForAddress(start);
194 int rend = GetRegionNumberForAddress(end);
195
196 if (reaches_limit) {
197 end += 1;
198 }
199
200 if ((rend - rstart) == 0) {
201 return;
202 }
203
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000204 uint32_t bitmask = 0;
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000205
206 if ((OffsetFrom(start) & kRegionAlignmentMask) == 0
207 || (start == ObjectAreaStart())) {
208 // First region is fully covered
209 bitmask = 1 << rstart;
210 }
211
212 while (++rstart < rend) {
213 bitmask |= 1 << rstart;
214 }
215
216 if (bitmask) {
217 SetRegionMarks(GetRegionMarks() & ~bitmask);
218 }
219}
220
221
222void Page::FlipMeaningOfInvalidatedWatermarkFlag() {
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000223 watermark_invalidated_mark_ ^= 1 << WATERMARK_INVALIDATED;
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000224}
225
226
227bool Page::IsWatermarkValid() {
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000228 return (flags_ & (1 << WATERMARK_INVALIDATED)) != watermark_invalidated_mark_;
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000229}
230
231
232void Page::InvalidateWatermark(bool value) {
233 if (value) {
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000234 flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
235 watermark_invalidated_mark_;
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000236 } else {
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000237 flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
238 (watermark_invalidated_mark_ ^ (1 << WATERMARK_INVALIDATED));
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000239 }
240
241 ASSERT(IsWatermarkValid() == !value);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000242}
243
244
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000245bool Page::GetPageFlag(PageFlag flag) {
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000246 return (flags_ & static_cast<intptr_t>(1 << flag)) != 0;
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000247}
248
249
250void Page::SetPageFlag(PageFlag flag, bool value) {
251 if (value) {
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000252 flags_ |= static_cast<intptr_t>(1 << flag);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000253 } else {
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000254 flags_ &= ~static_cast<intptr_t>(1 << flag);
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000255 }
256}
257
258
ricow@chromium.org30ce4112010-05-31 10:38:25 +0000259void Page::ClearPageFlags() {
260 flags_ = 0;
261}
262
263
264void Page::ClearGCFields() {
265 InvalidateWatermark(true);
266 SetAllocationWatermark(ObjectAreaStart());
267 if (Heap::gc_state() == Heap::SCAVENGE) {
268 SetCachedAllocationWatermark(ObjectAreaStart());
269 }
270 SetRegionMarks(kAllRegionsCleanMarks);
271}
272
273
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000274bool Page::WasInUseBeforeMC() {
275 return GetPageFlag(WAS_IN_USE_BEFORE_MC);
276}
277
278
279void Page::SetWasInUseBeforeMC(bool was_in_use) {
280 SetPageFlag(WAS_IN_USE_BEFORE_MC, was_in_use);
281}
282
283
284bool Page::IsLargeObjectPage() {
285 return !GetPageFlag(IS_NORMAL_PAGE);
286}
287
288
289void Page::SetIsLargeObjectPage(bool is_large_object_page) {
290 SetPageFlag(IS_NORMAL_PAGE, !is_large_object_page);
291}
292
erik.corry@gmail.com145eff52010-08-23 11:36:18 +0000293bool Page::IsPageExecutable() {
294 return GetPageFlag(IS_EXECUTABLE);
295}
296
297
298void Page::SetIsPageExecutable(bool is_page_executable) {
299 SetPageFlag(IS_EXECUTABLE, is_page_executable);
300}
301
fschneider@chromium.org013f3e12010-04-26 13:27:52 +0000302
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000303// -----------------------------------------------------------------------------
304// MemoryAllocator
305
kmillikin@chromium.org3cdd9e12010-09-06 11:39:48 +0000306void MemoryAllocator::ChunkInfo::init(Address a, size_t s, PagedSpace* o) {
307 address_ = a;
308 size_ = s;
309 owner_ = o;
310 executable_ = (o == NULL) ? NOT_EXECUTABLE : o->executable();
311}
312
313
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000314bool MemoryAllocator::IsValidChunk(int chunk_id) {
315 if (!IsValidChunkId(chunk_id)) return false;
316
317 ChunkInfo& c = chunks_[chunk_id];
318 return (c.address() != NULL) && (c.size() != 0) && (c.owner() != NULL);
319}
320
321
322bool MemoryAllocator::IsValidChunkId(int chunk_id) {
323 return (0 <= chunk_id) && (chunk_id < max_nof_chunks_);
324}
325
326
327bool MemoryAllocator::IsPageInSpace(Page* p, PagedSpace* space) {
328 ASSERT(p->is_valid());
329
330 int chunk_id = GetChunkId(p);
331 if (!IsValidChunkId(chunk_id)) return false;
332
333 ChunkInfo& c = chunks_[chunk_id];
334 return (c.address() <= p->address()) &&
335 (p->address() < c.address() + c.size()) &&
336 (space == c.owner());
337}
338
339
340Page* MemoryAllocator::GetNextPage(Page* p) {
341 ASSERT(p->is_valid());
kasperl@chromium.org71affb52009-05-26 05:44:31 +0000342 intptr_t raw_addr = p->opaque_header & ~Page::kPageAlignmentMask;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000343 return Page::FromAddress(AddressFrom<Address>(raw_addr));
344}
345
346
347int MemoryAllocator::GetChunkId(Page* p) {
348 ASSERT(p->is_valid());
fschneider@chromium.orgb95b98b2010-02-23 10:34:29 +0000349 return static_cast<int>(p->opaque_header & Page::kPageAlignmentMask);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000350}
351
352
353void MemoryAllocator::SetNextPage(Page* prev, Page* next) {
354 ASSERT(prev->is_valid());
kasperl@chromium.org71affb52009-05-26 05:44:31 +0000355 int chunk_id = GetChunkId(prev);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000356 ASSERT_PAGE_ALIGNED(next->address());
357 prev->opaque_header = OffsetFrom(next->address()) | chunk_id;
358}
359
360
361PagedSpace* MemoryAllocator::PageOwner(Page* page) {
362 int chunk_id = GetChunkId(page);
363 ASSERT(IsValidChunk(chunk_id));
364 return chunks_[chunk_id].owner();
365}
366
367
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000368bool MemoryAllocator::InInitialChunk(Address address) {
369 if (initial_chunk_ == NULL) return false;
370
371 Address start = static_cast<Address>(initial_chunk_->address());
372 return (start <= address) && (address < start + initial_chunk_->size());
373}
374
375
376#ifdef ENABLE_HEAP_PROTECTION
377
378void MemoryAllocator::Protect(Address start, size_t size) {
379 OS::Protect(start, size);
380}
381
382
383void MemoryAllocator::Unprotect(Address start,
384 size_t size,
385 Executability executable) {
386 OS::Unprotect(start, size, executable);
387}
388
389
390void MemoryAllocator::ProtectChunkFromPage(Page* page) {
391 int id = GetChunkId(page);
392 OS::Protect(chunks_[id].address(), chunks_[id].size());
393}
394
395
396void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
397 int id = GetChunkId(page);
398 OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
399 chunks_[id].owner()->executable() == EXECUTABLE);
400}
401
402#endif
403
404
kasper.lund7276f142008-07-30 08:49:36 +0000405// --------------------------------------------------------------------------
406// PagedSpace
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000407
408bool PagedSpace::Contains(Address addr) {
409 Page* p = Page::FromAddress(addr);
whesse@chromium.org4a5224e2010-10-20 12:37:07 +0000410 if (!p->is_valid()) return false;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000411 return MemoryAllocator::IsPageInSpace(p, this);
412}
413
414
kasper.lund7276f142008-07-30 08:49:36 +0000415// Try linear allocation in the page of alloc_info's allocation top. Does
416// not contain slow case logic (eg, move to the next page or try free list
417// allocation) so it can be used by all the allocation functions and for all
418// the paged spaces.
419HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info,
420 int size_in_bytes) {
421 Address current_top = alloc_info->top;
422 Address new_top = current_top + size_in_bytes;
423 if (new_top > alloc_info->limit) return NULL;
424
425 alloc_info->top = new_top;
426 ASSERT(alloc_info->VerifyPagedAllocation());
427 accounting_stats_.AllocateBytes(size_in_bytes);
428 return HeapObject::FromAddress(current_top);
429}
430
431
432// Raw allocation.
lrn@chromium.org303ada72010-10-27 09:33:13 +0000433MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
kasper.lund7276f142008-07-30 08:49:36 +0000434 ASSERT(HasBeenSetup());
435 ASSERT_OBJECT_SIZE(size_in_bytes);
436 HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes);
437 if (object != NULL) return object;
438
439 object = SlowAllocateRaw(size_in_bytes);
440 if (object != NULL) return object;
441
whesse@chromium.org4a5224e2010-10-20 12:37:07 +0000442 return Failure::RetryAfterGC(identity());
kasper.lund7276f142008-07-30 08:49:36 +0000443}
444
445
446// Reallocating (and promoting) objects during a compacting collection.
lrn@chromium.org303ada72010-10-27 09:33:13 +0000447MaybeObject* PagedSpace::MCAllocateRaw(int size_in_bytes) {
kasper.lund7276f142008-07-30 08:49:36 +0000448 ASSERT(HasBeenSetup());
449 ASSERT_OBJECT_SIZE(size_in_bytes);
450 HeapObject* object = AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
451 if (object != NULL) return object;
452
453 object = SlowMCAllocateRaw(size_in_bytes);
454 if (object != NULL) return object;
455
whesse@chromium.org4a5224e2010-10-20 12:37:07 +0000456 return Failure::RetryAfterGC(identity());
kasper.lund7276f142008-07-30 08:49:36 +0000457}
458
459
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000460// -----------------------------------------------------------------------------
461// LargeObjectChunk
462
463HeapObject* LargeObjectChunk::GetObject() {
464 // Round the chunk address up to the nearest page-aligned address
465 // and return the heap object in that page.
466 Page* page = Page::FromAddress(RoundUp(address(), Page::kPageSize));
467 return HeapObject::FromAddress(page->ObjectAreaStart());
468}
469
470
471// -----------------------------------------------------------------------------
472// LargeObjectSpace
473
lrn@chromium.org303ada72010-10-27 09:33:13 +0000474MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes,
475 AllocationInfo* alloc_info) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000476 Address new_top = alloc_info->top + size_in_bytes;
whesse@chromium.org4a5224e2010-10-20 12:37:07 +0000477 if (new_top > alloc_info->limit) return Failure::RetryAfterGC();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000478
479 Object* obj = HeapObject::FromAddress(alloc_info->top);
480 alloc_info->top = new_top;
481#ifdef DEBUG
482 SemiSpace* space =
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000483 (alloc_info == &allocation_info_) ? &to_space_ : &from_space_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000484 ASSERT(space->low() <= alloc_info->top
485 && alloc_info->top <= space->high()
486 && alloc_info->limit == space->high());
487#endif
488 return obj;
489}
490
ager@chromium.org3811b432009-10-28 14:53:37 +0000491
492bool FreeListNode::IsFreeListNode(HeapObject* object) {
493 return object->map() == Heap::raw_unchecked_byte_array_map()
494 || object->map() == Heap::raw_unchecked_one_pointer_filler_map()
495 || object->map() == Heap::raw_unchecked_two_pointer_filler_map();
496}
497
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000498} } // namespace v8::internal
499
500#endif // V8_SPACES_INL_H_