blob: 60068c3d9110387f933a54ffcdc056e116e5e233 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_SPACES_H_
29#define V8_SPACES_H_
30
31#include "list-inl.h"
32#include "log.h"
33
34namespace v8 {
35namespace internal {
36
37// -----------------------------------------------------------------------------
38// Heap structures:
39//
40// A JS heap consists of a young generation, an old generation, and a large
41// object space. The young generation is divided into two semispaces. A
42// scavenger implements Cheney's copying algorithm. The old generation is
43// separated into a map space and an old object space. The map space contains
44// all (and only) map objects, the rest of old objects go into the old space.
45// The old generation is collected by a mark-sweep-compact collector.
46//
47// The semispaces of the young generation are contiguous. The old and map
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010048// spaces consists of a list of pages. A page has a page header and an object
49// area. A page size is deliberately chosen as 8K bytes.
50// The first word of a page is an opaque page header that has the
Steve Blocka7e24c12009-10-30 11:49:00 +000051// address of the next page and its ownership information. The second word may
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010052// have the allocation top address of this page. Heap objects are aligned to the
53// pointer size.
Steve Blocka7e24c12009-10-30 11:49:00 +000054//
55// There is a separate large object space for objects larger than
56// Page::kMaxHeapObjectSize, so that they do not have to move during
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010057// collection. The large object space is paged. Pages in large object space
58// may be larger than 8K.
Steve Blocka7e24c12009-10-30 11:49:00 +000059//
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010060// A card marking write barrier is used to keep track of intergenerational
61// references. Old space pages are divided into regions of Page::kRegionSize
62// size. Each region has a corresponding dirty bit in the page header which is
63// set if the region might contain pointers to new space. For details about
64// dirty bits encoding see comments in the Page::GetRegionNumberForAddress()
65// method body.
66//
67// During scavenges and mark-sweep collections we iterate intergenerational
68// pointers without decoding heap object maps so if the page belongs to old
69// pointer space or large object space it is essential to guarantee that
70// the page does not contain any garbage pointers to new space: every pointer
71// aligned word which satisfies the Heap::InNewSpace() predicate must be a
72// pointer to a live heap object in new space. Thus objects in old pointer
73// and large object spaces should have a special layout (e.g. no bare integer
74// fields). This requirement does not apply to map space which is iterated in
75// a special fashion. However we still require pointer fields of dead maps to
76// be cleaned.
77//
78// To enable lazy cleaning of old space pages we use a notion of allocation
79// watermark. Every pointer under watermark is considered to be well formed.
80// Page allocation watermark is not necessarily equal to page allocation top but
81// all alive objects on page should reside under allocation watermark.
82// During scavenge allocation watermark might be bumped and invalid pointers
83// might appear below it. To avoid following them we store a valid watermark
84// into special field in the page header and set a page WATERMARK_INVALIDATED
85// flag. For details see comments in the Page::SetAllocationWatermark() method
86// body.
87//
Steve Blocka7e24c12009-10-30 11:49:00 +000088
89// Some assertion macros used in the debugging mode.
90
Leon Clarkee46be812010-01-19 14:06:41 +000091#define ASSERT_PAGE_ALIGNED(address) \
Steve Blocka7e24c12009-10-30 11:49:00 +000092 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
93
Leon Clarkee46be812010-01-19 14:06:41 +000094#define ASSERT_OBJECT_ALIGNED(address) \
Steve Blocka7e24c12009-10-30 11:49:00 +000095 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
96
Leon Clarkee46be812010-01-19 14:06:41 +000097#define ASSERT_MAP_ALIGNED(address) \
98 ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
99
100#define ASSERT_OBJECT_SIZE(size) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000101 ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
102
Leon Clarkee46be812010-01-19 14:06:41 +0000103#define ASSERT_PAGE_OFFSET(offset) \
104 ASSERT((Page::kObjectStartOffset <= offset) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000105 && (offset <= Page::kPageSize))
106
Leon Clarkee46be812010-01-19 14:06:41 +0000107#define ASSERT_MAP_PAGE_INDEX(index) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000108 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
109
110
111class PagedSpace;
112class MemoryAllocator;
113class AllocationInfo;
114
115// -----------------------------------------------------------------------------
116// A page normally has 8K bytes. Large object pages may be larger. A page
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100117// address is always aligned to the 8K page size.
Steve Blocka7e24c12009-10-30 11:49:00 +0000118//
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100119// Each page starts with a header of Page::kPageHeaderSize size which contains
120// bookkeeping data.
Steve Blocka7e24c12009-10-30 11:49:00 +0000121//
122// The mark-compact collector transforms a map pointer into a page index and a
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100123// page offset. The exact encoding is described in the comments for
Leon Clarkee46be812010-01-19 14:06:41 +0000124// class MapWord in objects.h.
Steve Blocka7e24c12009-10-30 11:49:00 +0000125//
126// The only way to get a page pointer is by calling factory methods:
127// Page* p = Page::FromAddress(addr); or
128// Page* p = Page::FromAllocationTop(top);
129class Page {
130 public:
131 // Returns the page containing a given address. The address ranges
132 // from [page_addr .. page_addr + kPageSize[
133 //
134 // Note that this function only works for addresses in normal paged
135 // spaces and addresses in the first 8K of large object pages (i.e.,
136 // the start of large objects but not necessarily derived pointers
137 // within them).
138 INLINE(static Page* FromAddress(Address a)) {
139 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
140 }
141
142 // Returns the page containing an allocation top. Because an allocation
143 // top address can be the upper bound of the page, we need to subtract
144 // it with kPointerSize first. The address ranges from
145 // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
146 INLINE(static Page* FromAllocationTop(Address top)) {
147 Page* p = FromAddress(top - kPointerSize);
148 ASSERT_PAGE_OFFSET(p->Offset(top));
149 return p;
150 }
151
152 // Returns the start address of this page.
153 Address address() { return reinterpret_cast<Address>(this); }
154
155 // Checks whether this is a valid page address.
156 bool is_valid() { return address() != NULL; }
157
158 // Returns the next page of this page.
159 inline Page* next_page();
160
161 // Return the end of allocation in this page. Undefined for unused pages.
162 inline Address AllocationTop();
163
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100164 // Return the allocation watermark for the page.
165 // For old space pages it is guaranteed that the area under the watermark
166 // does not contain any garbage pointers to new space.
167 inline Address AllocationWatermark();
168
169 // Return the allocation watermark offset from the beginning of the page.
170 inline uint32_t AllocationWatermarkOffset();
171
172 inline void SetAllocationWatermark(Address allocation_watermark);
173
174 inline void SetCachedAllocationWatermark(Address allocation_watermark);
175 inline Address CachedAllocationWatermark();
176
Steve Blocka7e24c12009-10-30 11:49:00 +0000177 // Returns the start address of the object area in this page.
178 Address ObjectAreaStart() { return address() + kObjectStartOffset; }
179
180 // Returns the end address (exclusive) of the object area in this page.
181 Address ObjectAreaEnd() { return address() + Page::kPageSize; }
182
Steve Blocka7e24c12009-10-30 11:49:00 +0000183 // Checks whether an address is page aligned.
184 static bool IsAlignedToPageSize(Address a) {
185 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
186 }
187
Steve Block6ded16b2010-05-10 14:33:55 +0100188 // True if this page was in use before current compaction started.
189 // Result is valid only for pages owned by paged spaces and
190 // only after PagedSpace::PrepareForMarkCompact was called.
191 inline bool WasInUseBeforeMC();
192
193 inline void SetWasInUseBeforeMC(bool was_in_use);
194
Steve Blocka7e24c12009-10-30 11:49:00 +0000195 // True if this page is a large object page.
Steve Block6ded16b2010-05-10 14:33:55 +0100196 inline bool IsLargeObjectPage();
197
198 inline void SetIsLargeObjectPage(bool is_large_object_page);
Steve Blocka7e24c12009-10-30 11:49:00 +0000199
Steve Block791712a2010-08-27 10:21:07 +0100200 inline bool IsPageExecutable();
201
202 inline void SetIsPageExecutable(bool is_page_executable);
203
Steve Blocka7e24c12009-10-30 11:49:00 +0000204 // Returns the offset of a given address to this page.
205 INLINE(int Offset(Address a)) {
Steve Blockd0582a62009-12-15 09:54:21 +0000206 int offset = static_cast<int>(a - address());
Steve Blocka7e24c12009-10-30 11:49:00 +0000207 ASSERT_PAGE_OFFSET(offset);
208 return offset;
209 }
210
211 // Returns the address for a given offset to the this page.
212 Address OffsetToAddress(int offset) {
213 ASSERT_PAGE_OFFSET(offset);
214 return address() + offset;
215 }
216
217 // ---------------------------------------------------------------------
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100218 // Card marking support
Steve Blocka7e24c12009-10-30 11:49:00 +0000219
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100220 static const uint32_t kAllRegionsCleanMarks = 0x0;
221 static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF;
Steve Blocka7e24c12009-10-30 11:49:00 +0000222
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100223 inline uint32_t GetRegionMarks();
224 inline void SetRegionMarks(uint32_t dirty);
Steve Blocka7e24c12009-10-30 11:49:00 +0000225
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100226 inline uint32_t GetRegionMaskForAddress(Address addr);
227 inline uint32_t GetRegionMaskForSpan(Address start, int length_in_bytes);
228 inline int GetRegionNumberForAddress(Address addr);
Steve Blocka7e24c12009-10-30 11:49:00 +0000229
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100230 inline void MarkRegionDirty(Address addr);
231 inline bool IsRegionDirty(Address addr);
Steve Blocka7e24c12009-10-30 11:49:00 +0000232
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100233 inline void ClearRegionMarks(Address start,
234 Address end,
235 bool reaches_limit);
Steve Blocka7e24c12009-10-30 11:49:00 +0000236
Steve Blocka7e24c12009-10-30 11:49:00 +0000237 // Page size in bytes. This must be a multiple of the OS page size.
238 static const int kPageSize = 1 << kPageSizeBits;
239
240 // Page size mask.
241 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
242
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100243 static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
244 kIntSize + kPointerSize;
Steve Blocka7e24c12009-10-30 11:49:00 +0000245
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100246 // The start offset of the object area in a page. Aligned to both maps and
247 // code alignment to be suitable for both.
248 static const int kObjectStartOffset =
249 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kPageHeaderSize));
Steve Blocka7e24c12009-10-30 11:49:00 +0000250
251 // Object area size in bytes.
252 static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
253
254 // Maximum object size that fits in a page.
255 static const int kMaxHeapObjectSize = kObjectAreaSize;
256
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100257 static const int kDirtyFlagOffset = 2 * kPointerSize;
258 static const int kRegionSizeLog2 = 8;
259 static const int kRegionSize = 1 << kRegionSizeLog2;
260 static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
261
262 STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
263
Steve Block6ded16b2010-05-10 14:33:55 +0100264 enum PageFlag {
Steve Block791712a2010-08-27 10:21:07 +0100265 IS_NORMAL_PAGE = 0,
266 WAS_IN_USE_BEFORE_MC,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100267
268 // Page allocation watermark was bumped by preallocation during scavenge.
269 // Correct watermark can be retrieved by CachedAllocationWatermark() method
Steve Block791712a2010-08-27 10:21:07 +0100270 WATERMARK_INVALIDATED,
271 IS_EXECUTABLE,
272 NUM_PAGE_FLAGS // Must be last
Steve Block6ded16b2010-05-10 14:33:55 +0100273 };
Steve Block791712a2010-08-27 10:21:07 +0100274 static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1;
Steve Block6ded16b2010-05-10 14:33:55 +0100275
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100276 // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
277 // scavenge we just invalidate the watermark on each old space page after
278 // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
279 // flag at the beginning of the next scavenge and each page becomes marked as
280 // having a valid watermark.
281 //
282 // The following invariant must hold for pages in old pointer and map spaces:
283 // If page is in use then page is marked as having invalid watermark at
284 // the beginning and at the end of any GC.
285 //
286 // This invariant guarantees that after flipping flag meaning at the
287 // beginning of scavenge all pages in use will be marked as having valid
288 // watermark.
289 static inline void FlipMeaningOfInvalidatedWatermarkFlag();
290
291 // Returns true if the page allocation watermark was not altered during
292 // scavenge.
293 inline bool IsWatermarkValid();
294
295 inline void InvalidateWatermark(bool value);
296
Steve Block6ded16b2010-05-10 14:33:55 +0100297 inline bool GetPageFlag(PageFlag flag);
298 inline void SetPageFlag(PageFlag flag, bool value);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100299 inline void ClearPageFlags();
300
301 inline void ClearGCFields();
302
Steve Block791712a2010-08-27 10:21:07 +0100303 static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100304 static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1;
305 static const uint32_t kAllocationWatermarkOffsetMask =
306 ((1 << kAllocationWatermarkOffsetBits) - 1) <<
307 kAllocationWatermarkOffsetShift;
308
309 static const uint32_t kFlagsMask =
310 ((1 << kAllocationWatermarkOffsetShift) - 1);
311
312 STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
313 kAllocationWatermarkOffsetBits);
314
315 // This field contains the meaning of the WATERMARK_INVALIDATED flag.
316 // Instead of clearing this flag from all pages we just flip
317 // its meaning at the beginning of a scavenge.
318 static intptr_t watermark_invalidated_mark_;
Steve Block6ded16b2010-05-10 14:33:55 +0100319
Steve Blocka7e24c12009-10-30 11:49:00 +0000320 //---------------------------------------------------------------------------
321 // Page header description.
322 //
323 // If a page is not in the large object space, the first word,
324 // opaque_header, encodes the next page address (aligned to kPageSize 8K)
325 // and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use
326 // opaque_header. The value range of the opaque_header is [0..kPageSize[,
327 // or [next_page_start, next_page_end[. It cannot point to a valid address
328 // in the current page. If a page is in the large object space, the first
329 // word *may* (if the page start and large object chunk start are the
330 // same) contain the address of the next large object chunk.
331 intptr_t opaque_header;
332
333 // If the page is not in the large object space, the low-order bit of the
334 // second word is set. If the page is in the large object space, the
335 // second word *may* (if the page start and large object chunk start are
336 // the same) contain the large object chunk size. In either case, the
337 // low-order bit for large object pages will be cleared.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100338 // For normal pages this word is used to store page flags and
339 // offset of allocation top.
340 intptr_t flags_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000341
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100342 // This field contains dirty marks for regions covering the page. Only dirty
343 // regions might contain intergenerational references.
344 // Only 32 dirty marks are supported so for large object pages several regions
345 // might be mapped to a single dirty mark.
346 uint32_t dirty_regions_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000347
348 // The index of the page in its owner space.
349 int mc_page_index;
350
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100351 // During mark-compact collections this field contains the forwarding address
352 // of the first live object in this page.
353 // During scavenge collection this field is used to store allocation watermark
354 // if it is altered during scavenge.
Steve Blocka7e24c12009-10-30 11:49:00 +0000355 Address mc_first_forwarded;
Steve Blocka7e24c12009-10-30 11:49:00 +0000356};
357
358
359// ----------------------------------------------------------------------------
360// Space is the abstract superclass for all allocation spaces.
361class Space : public Malloced {
362 public:
363 Space(AllocationSpace id, Executability executable)
364 : id_(id), executable_(executable) {}
365
366 virtual ~Space() {}
367
368 // Does the space need executable memory?
369 Executability executable() { return executable_; }
370
371 // Identity used in error reporting.
372 AllocationSpace identity() { return id_; }
373
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800374 // Returns allocated size.
Ben Murdochf87a2032010-10-22 12:50:53 +0100375 virtual intptr_t Size() = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000376
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800377 // Returns size of objects. Can differ from the allocated size
378 // (e.g. see LargeObjectSpace).
379 virtual intptr_t SizeOfObjects() { return Size(); }
380
Steve Block6ded16b2010-05-10 14:33:55 +0100381#ifdef ENABLE_HEAP_PROTECTION
382 // Protect/unprotect the space by marking it read-only/writable.
383 virtual void Protect() = 0;
384 virtual void Unprotect() = 0;
385#endif
386
Steve Blocka7e24c12009-10-30 11:49:00 +0000387#ifdef DEBUG
388 virtual void Print() = 0;
389#endif
390
Leon Clarkee46be812010-01-19 14:06:41 +0000391 // After calling this we can allocate a certain number of bytes using only
392 // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
393 // without using freelists or causing a GC. This is used by partial
394 // snapshots. It returns true of space was reserved or false if a GC is
395 // needed. For paged spaces the space requested must include the space wasted
396 // at the end of each when allocating linearly.
397 virtual bool ReserveSpace(int bytes) = 0;
398
Steve Blocka7e24c12009-10-30 11:49:00 +0000399 private:
400 AllocationSpace id_;
401 Executability executable_;
402};
403
404
405// ----------------------------------------------------------------------------
406// All heap objects containing executable code (code objects) must be allocated
407// from a 2 GB range of memory, so that they can call each other using 32-bit
408// displacements. This happens automatically on 32-bit platforms, where 32-bit
409// displacements cover the entire 4GB virtual address space. On 64-bit
410// platforms, we support this using the CodeRange object, which reserves and
411// manages a range of virtual memory.
412class CodeRange : public AllStatic {
413 public:
414 // Reserves a range of virtual memory, but does not commit any of it.
415 // Can only be called once, at heap initialization time.
416 // Returns false on failure.
417 static bool Setup(const size_t requested_size);
418
419 // Frees the range of virtual memory, and frees the data structures used to
420 // manage it.
421 static void TearDown();
422
423 static bool exists() { return code_range_ != NULL; }
424 static bool contains(Address address) {
425 if (code_range_ == NULL) return false;
426 Address start = static_cast<Address>(code_range_->address());
427 return start <= address && address < start + code_range_->size();
428 }
429
430 // Allocates a chunk of memory from the large-object portion of
431 // the code range. On platforms with no separate code range, should
432 // not be called.
John Reck59135872010-11-02 12:39:01 -0700433 MUST_USE_RESULT static void* AllocateRawMemory(const size_t requested,
434 size_t* allocated);
Steve Blocka7e24c12009-10-30 11:49:00 +0000435 static void FreeRawMemory(void* buf, size_t length);
436
437 private:
438 // The reserved range of virtual memory that all code objects are put in.
439 static VirtualMemory* code_range_;
440 // Plain old data class, just a struct plus a constructor.
441 class FreeBlock {
442 public:
443 FreeBlock(Address start_arg, size_t size_arg)
444 : start(start_arg), size(size_arg) {}
445 FreeBlock(void* start_arg, size_t size_arg)
446 : start(static_cast<Address>(start_arg)), size(size_arg) {}
447
448 Address start;
449 size_t size;
450 };
451
452 // Freed blocks of memory are added to the free list. When the allocation
453 // list is exhausted, the free list is sorted and merged to make the new
454 // allocation list.
455 static List<FreeBlock> free_list_;
456 // Memory is allocated from the free blocks on the allocation list.
457 // The block at current_allocation_block_index_ is the current block.
458 static List<FreeBlock> allocation_list_;
459 static int current_allocation_block_index_;
460
461 // Finds a block on the allocation list that contains at least the
462 // requested amount of memory. If none is found, sorts and merges
463 // the existing free memory blocks, and searches again.
464 // If none can be found, terminates V8 with FatalProcessOutOfMemory.
465 static void GetNextAllocationBlock(size_t requested);
466 // Compares the start addresses of two free blocks.
467 static int CompareFreeBlockAddress(const FreeBlock* left,
468 const FreeBlock* right);
469};
470
471
472// ----------------------------------------------------------------------------
473// A space acquires chunks of memory from the operating system. The memory
474// allocator manages chunks for the paged heap spaces (old space and map
475// space). A paged chunk consists of pages. Pages in a chunk have contiguous
476// addresses and are linked as a list.
477//
478// The allocator keeps an initial chunk which is used for the new space. The
479// leftover regions of the initial chunk are used for the initial chunks of
480// old space and map space if they are big enough to hold at least one page.
481// The allocator assumes that there is one old space and one map space, each
482// expands the space by allocating kPagesPerChunk pages except the last
483// expansion (before running out of space). The first chunk may contain fewer
484// than kPagesPerChunk pages as well.
485//
486// The memory allocator also allocates chunks for the large object space, but
487// they are managed by the space itself. The new space does not expand.
Steve Block6ded16b2010-05-10 14:33:55 +0100488//
489// The fact that pages for paged spaces are allocated and deallocated in chunks
490// induces a constraint on the order of pages in a linked lists. We say that
491// pages are linked in the chunk-order if and only if every two consecutive
492// pages from the same chunk are consecutive in the linked list.
493//
494
Steve Blocka7e24c12009-10-30 11:49:00 +0000495
496class MemoryAllocator : public AllStatic {
497 public:
498 // Initializes its internal bookkeeping structures.
Russell Brenner90bac252010-11-18 13:33:46 -0800499 // Max capacity of the total space and executable memory limit.
500 static bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
Steve Blocka7e24c12009-10-30 11:49:00 +0000501
502 // Deletes valid chunks.
503 static void TearDown();
504
505 // Reserves an initial address range of virtual memory to be split between
506 // the two new space semispaces, the old space, and the map space. The
507 // memory is not yet committed or assigned to spaces and split into pages.
508 // The initial chunk is unmapped when the memory allocator is torn down.
509 // This function should only be called when there is not already a reserved
510 // initial chunk (initial_chunk_ should be NULL). It returns the start
511 // address of the initial chunk if successful, with the side effect of
512 // setting the initial chunk, or else NULL if unsuccessful and leaves the
513 // initial chunk NULL.
514 static void* ReserveInitialChunk(const size_t requested);
515
516 // Commits pages from an as-yet-unmanaged block of virtual memory into a
517 // paged space. The block should be part of the initial chunk reserved via
518 // a call to ReserveInitialChunk. The number of pages is always returned in
519 // the output parameter num_pages. This function assumes that the start
520 // address is non-null and that it is big enough to hold at least one
521 // page-aligned page. The call always succeeds, and num_pages is always
522 // greater than zero.
523 static Page* CommitPages(Address start, size_t size, PagedSpace* owner,
524 int* num_pages);
525
526 // Commit a contiguous block of memory from the initial chunk. Assumes that
527 // the address is not NULL, the size is greater than zero, and that the
528 // block is contained in the initial chunk. Returns true if it succeeded
529 // and false otherwise.
530 static bool CommitBlock(Address start, size_t size, Executability executable);
531
Steve Blocka7e24c12009-10-30 11:49:00 +0000532 // Uncommit a contiguous block of memory [start..(start+size)[.
533 // start is not NULL, the size is greater than zero, and the
534 // block is contained in the initial chunk. Returns true if it succeeded
535 // and false otherwise.
536 static bool UncommitBlock(Address start, size_t size);
537
Leon Clarke4515c472010-02-03 11:58:03 +0000538 // Zaps a contiguous block of memory [start..(start+size)[ thus
539 // filling it up with a recognizable non-NULL bit pattern.
540 static void ZapBlock(Address start, size_t size);
541
Steve Blocka7e24c12009-10-30 11:49:00 +0000542 // Attempts to allocate the requested (non-zero) number of pages from the
543 // OS. Fewer pages might be allocated than requested. If it fails to
544 // allocate memory for the OS or cannot allocate a single page, this
545 // function returns an invalid page pointer (NULL). The caller must check
546 // whether the returned page is valid (by calling Page::is_valid()). It is
547 // guaranteed that allocated pages have contiguous addresses. The actual
548 // number of allocated pages is returned in the output parameter
549 // allocated_pages. If the PagedSpace owner is executable and there is
550 // a code range, the pages are allocated from the code range.
551 static Page* AllocatePages(int requested_pages, int* allocated_pages,
552 PagedSpace* owner);
553
Steve Block6ded16b2010-05-10 14:33:55 +0100554 // Frees pages from a given page and after. Requires pages to be
555 // linked in chunk-order (see comment for class).
556 // If 'p' is the first page of a chunk, pages from 'p' are freed
557 // and this function returns an invalid page pointer.
558 // Otherwise, the function searches a page after 'p' that is
559 // the first page of a chunk. Pages after the found page
560 // are freed and the function returns 'p'.
Steve Blocka7e24c12009-10-30 11:49:00 +0000561 static Page* FreePages(Page* p);
562
Steve Block6ded16b2010-05-10 14:33:55 +0100563 // Frees all pages owned by given space.
564 static void FreeAllPages(PagedSpace* space);
565
Steve Blocka7e24c12009-10-30 11:49:00 +0000566 // Allocates and frees raw memory of certain size.
567 // These are just thin wrappers around OS::Allocate and OS::Free,
568 // but keep track of allocated bytes as part of heap.
569 // If the flag is EXECUTABLE and a code range exists, the requested
570 // memory is allocated from the code range. If a code range exists
571 // and the freed memory is in it, the code range manages the freed memory.
John Reck59135872010-11-02 12:39:01 -0700572 MUST_USE_RESULT static void* AllocateRawMemory(const size_t requested,
573 size_t* allocated,
574 Executability executable);
Steve Block791712a2010-08-27 10:21:07 +0100575 static void FreeRawMemory(void* buf,
576 size_t length,
577 Executability executable);
Iain Merrick9ac36c92010-09-13 15:29:50 +0100578 static void PerformAllocationCallback(ObjectSpace space,
579 AllocationAction action,
580 size_t size);
581
582 static void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
583 ObjectSpace space,
584 AllocationAction action);
585 static void RemoveMemoryAllocationCallback(
586 MemoryAllocationCallback callback);
587 static bool MemoryAllocationCallbackRegistered(
588 MemoryAllocationCallback callback);
Steve Blocka7e24c12009-10-30 11:49:00 +0000589
590 // Returns the maximum available bytes of heaps.
Ben Murdochf87a2032010-10-22 12:50:53 +0100591 static intptr_t Available() {
592 return capacity_ < size_ ? 0 : capacity_ - size_;
593 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000594
595 // Returns allocated spaces in bytes.
Ben Murdochf87a2032010-10-22 12:50:53 +0100596 static intptr_t Size() { return size_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000597
Russell Brenner90bac252010-11-18 13:33:46 -0800598 // Returns the maximum available executable bytes of heaps.
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800599 static intptr_t AvailableExecutable() {
Russell Brenner90bac252010-11-18 13:33:46 -0800600 if (capacity_executable_ < size_executable_) return 0;
601 return capacity_executable_ - size_executable_;
602 }
603
Steve Block791712a2010-08-27 10:21:07 +0100604 // Returns allocated executable spaces in bytes.
Ben Murdochf87a2032010-10-22 12:50:53 +0100605 static intptr_t SizeExecutable() { return size_executable_; }
Steve Block791712a2010-08-27 10:21:07 +0100606
Steve Blocka7e24c12009-10-30 11:49:00 +0000607 // Returns maximum available bytes that the old space can have.
Ben Murdochf87a2032010-10-22 12:50:53 +0100608 static intptr_t MaxAvailable() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000609 return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
610 }
611
612 // Links two pages.
613 static inline void SetNextPage(Page* prev, Page* next);
614
615 // Returns the next page of a given page.
616 static inline Page* GetNextPage(Page* p);
617
618 // Checks whether a page belongs to a space.
619 static inline bool IsPageInSpace(Page* p, PagedSpace* space);
620
621 // Returns the space that owns the given page.
622 static inline PagedSpace* PageOwner(Page* page);
623
624 // Finds the first/last page in the same chunk as a given page.
625 static Page* FindFirstPageInSameChunk(Page* p);
626 static Page* FindLastPageInSameChunk(Page* p);
627
Steve Block6ded16b2010-05-10 14:33:55 +0100628 // Relinks list of pages owned by space to make it chunk-ordered.
629 // Returns new first and last pages of space.
630 // Also returns last page in relinked list which has WasInUsedBeforeMC
631 // flag set.
632 static void RelinkPageListInChunkOrder(PagedSpace* space,
633 Page** first_page,
634 Page** last_page,
635 Page** last_page_in_use);
636
Steve Blocka7e24c12009-10-30 11:49:00 +0000637#ifdef ENABLE_HEAP_PROTECTION
638 // Protect/unprotect a block of memory by marking it read-only/writable.
639 static inline void Protect(Address start, size_t size);
640 static inline void Unprotect(Address start, size_t size,
641 Executability executable);
642
643 // Protect/unprotect a chunk given a page in the chunk.
644 static inline void ProtectChunkFromPage(Page* page);
645 static inline void UnprotectChunkFromPage(Page* page);
646#endif
647
648#ifdef DEBUG
649 // Reports statistic info of the space.
650 static void ReportStatistics();
651#endif
652
653 // Due to encoding limitation, we can only have 8K chunks.
Leon Clarkee46be812010-01-19 14:06:41 +0000654 static const int kMaxNofChunks = 1 << kPageSizeBits;
Steve Blocka7e24c12009-10-30 11:49:00 +0000655 // If a chunk has at least 16 pages, the maximum heap size is about
656 // 8K * 8K * 16 = 1G bytes.
657#ifdef V8_TARGET_ARCH_X64
658 static const int kPagesPerChunk = 32;
659#else
660 static const int kPagesPerChunk = 16;
661#endif
662 static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
663
664 private:
665 // Maximum space size in bytes.
Ben Murdochf87a2032010-10-22 12:50:53 +0100666 static intptr_t capacity_;
Russell Brenner90bac252010-11-18 13:33:46 -0800667 // Maximum subset of capacity_ that can be executable
668 static intptr_t capacity_executable_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000669
670 // Allocated space size in bytes.
Ben Murdochf87a2032010-10-22 12:50:53 +0100671 static intptr_t size_;
Steve Block791712a2010-08-27 10:21:07 +0100672 // Allocated executable space size in bytes.
Ben Murdochf87a2032010-10-22 12:50:53 +0100673 static intptr_t size_executable_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000674
Iain Merrick9ac36c92010-09-13 15:29:50 +0100675 struct MemoryAllocationCallbackRegistration {
676 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
677 ObjectSpace space,
678 AllocationAction action)
679 : callback(callback), space(space), action(action) {
680 }
681 MemoryAllocationCallback callback;
682 ObjectSpace space;
683 AllocationAction action;
684 };
685 // A List of callback that are triggered when memory is allocated or free'd
686 static List<MemoryAllocationCallbackRegistration>
687 memory_allocation_callbacks_;
688
Steve Blocka7e24c12009-10-30 11:49:00 +0000689 // The initial chunk of virtual memory.
690 static VirtualMemory* initial_chunk_;
691
692 // Allocated chunk info: chunk start address, chunk size, and owning space.
693 class ChunkInfo BASE_EMBEDDED {
694 public:
Iain Merrick9ac36c92010-09-13 15:29:50 +0100695 ChunkInfo() : address_(NULL),
696 size_(0),
697 owner_(NULL),
698 executable_(NOT_EXECUTABLE) {}
699 inline void init(Address a, size_t s, PagedSpace* o);
Steve Blocka7e24c12009-10-30 11:49:00 +0000700 Address address() { return address_; }
701 size_t size() { return size_; }
702 PagedSpace* owner() { return owner_; }
Iain Merrick9ac36c92010-09-13 15:29:50 +0100703 // We save executability of the owner to allow using it
704 // when collecting stats after the owner has been destroyed.
705 Executability executable() const { return executable_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000706
707 private:
708 Address address_;
709 size_t size_;
710 PagedSpace* owner_;
Iain Merrick9ac36c92010-09-13 15:29:50 +0100711 Executability executable_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000712 };
713
714 // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
715 static List<ChunkInfo> chunks_;
716 static List<int> free_chunk_ids_;
717 static int max_nof_chunks_;
718 static int top_;
719
720 // Push/pop a free chunk id onto/from the stack.
721 static void Push(int free_chunk_id);
722 static int Pop();
723 static bool OutOfChunkIds() { return top_ == 0; }
724
725 // Frees a chunk.
726 static void DeleteChunk(int chunk_id);
727
728 // Basic check whether a chunk id is in the valid range.
729 static inline bool IsValidChunkId(int chunk_id);
730
731 // Checks whether a chunk id identifies an allocated chunk.
732 static inline bool IsValidChunk(int chunk_id);
733
734 // Returns the chunk id that a page belongs to.
735 static inline int GetChunkId(Page* p);
736
737 // True if the address lies in the initial chunk.
738 static inline bool InInitialChunk(Address address);
739
740 // Initializes pages in a chunk. Returns the first page address.
741 // This function and GetChunkId() are provided for the mark-compact
742 // collector to rebuild page headers in the from space, which is
743 // used as a marking stack and its page headers are destroyed.
744 static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
745 PagedSpace* owner);
Steve Block6ded16b2010-05-10 14:33:55 +0100746
747 static Page* RelinkPagesInChunk(int chunk_id,
748 Address chunk_start,
749 size_t chunk_size,
750 Page* prev,
751 Page** last_page_in_use);
Steve Blocka7e24c12009-10-30 11:49:00 +0000752};
753
754
755// -----------------------------------------------------------------------------
756// Interface for heap object iterator to be implemented by all object space
757// object iterators.
758//
Leon Clarked91b9f72010-01-27 17:25:45 +0000759// NOTE: The space specific object iterators also implements the own next()
760// method which is used to avoid using virtual functions
Steve Blocka7e24c12009-10-30 11:49:00 +0000761// iterating a specific space.
762
763class ObjectIterator : public Malloced {
764 public:
765 virtual ~ObjectIterator() { }
766
Steve Blocka7e24c12009-10-30 11:49:00 +0000767 virtual HeapObject* next_object() = 0;
768};
769
770
771// -----------------------------------------------------------------------------
772// Heap object iterator in new/old/map spaces.
773//
774// A HeapObjectIterator iterates objects from a given address to the
775// top of a space. The given address must be below the current
776// allocation pointer (space top). There are some caveats.
777//
778// (1) If the space top changes upward during iteration (because of
779// allocating new objects), the iterator does not iterate objects
780// above the original space top. The caller must create a new
781// iterator starting from the old top in order to visit these new
782// objects.
783//
784// (2) If new objects are allocated below the original allocation top
785// (e.g., free-list allocation in paged spaces), the new objects
786// may or may not be iterated depending on their position with
787// respect to the current point of iteration.
788//
789// (3) The space top should not change downward during iteration,
790// otherwise the iterator will return not-necessarily-valid
791// objects.
792
793class HeapObjectIterator: public ObjectIterator {
794 public:
795 // Creates a new object iterator in a given space. If a start
796 // address is not given, the iterator starts from the space bottom.
797 // If the size function is not given, the iterator calls the default
798 // Object::Size().
799 explicit HeapObjectIterator(PagedSpace* space);
800 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
801 HeapObjectIterator(PagedSpace* space, Address start);
802 HeapObjectIterator(PagedSpace* space,
803 Address start,
804 HeapObjectCallback size_func);
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100805 HeapObjectIterator(Page* page, HeapObjectCallback size_func);
Steve Blocka7e24c12009-10-30 11:49:00 +0000806
Leon Clarked91b9f72010-01-27 17:25:45 +0000807 inline HeapObject* next() {
808 return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
809 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000810
811 // implementation of ObjectIterator.
Steve Blocka7e24c12009-10-30 11:49:00 +0000812 virtual HeapObject* next_object() { return next(); }
813
814 private:
815 Address cur_addr_; // current iteration point
816 Address end_addr_; // end iteration point
817 Address cur_limit_; // current page limit
818 HeapObjectCallback size_func_; // size function
819 Page* end_page_; // caches the page of the end address
820
Leon Clarked91b9f72010-01-27 17:25:45 +0000821 HeapObject* FromCurrentPage() {
822 ASSERT(cur_addr_ < cur_limit_);
823
824 HeapObject* obj = HeapObject::FromAddress(cur_addr_);
825 int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
826 ASSERT_OBJECT_SIZE(obj_size);
827
828 cur_addr_ += obj_size;
829 ASSERT(cur_addr_ <= cur_limit_);
830
831 return obj;
832 }
833
834 // Slow path of next, goes into the next page.
835 HeapObject* FromNextPage();
Steve Blocka7e24c12009-10-30 11:49:00 +0000836
837 // Initializes fields.
838 void Initialize(Address start, Address end, HeapObjectCallback size_func);
839
840#ifdef DEBUG
841 // Verifies whether fields have valid values.
842 void Verify();
843#endif
844};
845
846
847// -----------------------------------------------------------------------------
848// A PageIterator iterates the pages in a paged space.
849//
850// The PageIterator class provides three modes for iterating pages in a space:
851// PAGES_IN_USE iterates pages containing allocated objects.
852// PAGES_USED_BY_MC iterates pages that hold relocated objects during a
853// mark-compact collection.
854// ALL_PAGES iterates all pages in the space.
855//
856// There are some caveats.
857//
858// (1) If the space expands during iteration, new pages will not be
859// returned by the iterator in any mode.
860//
861// (2) If new objects are allocated during iteration, they will appear
862// in pages returned by the iterator. Allocation may cause the
863// allocation pointer or MC allocation pointer in the last page to
864// change between constructing the iterator and iterating the last
865// page.
866//
867// (3) The space should not shrink during iteration, otherwise the
868// iterator will return deallocated pages.
869
870class PageIterator BASE_EMBEDDED {
871 public:
872 enum Mode {
873 PAGES_IN_USE,
874 PAGES_USED_BY_MC,
875 ALL_PAGES
876 };
877
878 PageIterator(PagedSpace* space, Mode mode);
879
880 inline bool has_next();
881 inline Page* next();
882
883 private:
884 PagedSpace* space_;
885 Page* prev_page_; // Previous page returned.
886 Page* stop_page_; // Page to stop at (last page returned by the iterator).
887};
888
889
890// -----------------------------------------------------------------------------
891// A space has a list of pages. The next page can be accessed via
892// Page::next_page() call. The next page of the last page is an
893// invalid page pointer. A space can expand and shrink dynamically.
894
895// An abstraction of allocation and relocation pointers in a page-structured
896// space.
897class AllocationInfo {
898 public:
899 Address top; // current allocation top
900 Address limit; // current allocation limit
901
902#ifdef DEBUG
903 bool VerifyPagedAllocation() {
904 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
905 && (top <= limit);
906 }
907#endif
908};
909
910
911// An abstraction of the accounting statistics of a page-structured space.
912// The 'capacity' of a space is the number of object-area bytes (ie, not
913// including page bookkeeping structures) currently in the space. The 'size'
914// of a space is the number of allocated bytes, the 'waste' in the space is
915// the number of bytes that are not allocated and not available to
916// allocation without reorganizing the space via a GC (eg, small blocks due
917// to internal fragmentation, top of page areas in map space), and the bytes
918// 'available' is the number of unallocated bytes that are not waste. The
919// capacity is the sum of size, waste, and available.
920//
921// The stats are only set by functions that ensure they stay balanced. These
922// functions increase or decrease one of the non-capacity stats in
923// conjunction with capacity, or else they always balance increases and
924// decreases to the non-capacity stats.
925class AllocationStats BASE_EMBEDDED {
926 public:
927 AllocationStats() { Clear(); }
928
929 // Zero out all the allocation statistics (ie, no capacity).
930 void Clear() {
931 capacity_ = 0;
932 available_ = 0;
933 size_ = 0;
934 waste_ = 0;
935 }
936
937 // Reset the allocation statistics (ie, available = capacity with no
938 // wasted or allocated bytes).
939 void Reset() {
940 available_ = capacity_;
941 size_ = 0;
942 waste_ = 0;
943 }
944
945 // Accessors for the allocation statistics.
Ben Murdochf87a2032010-10-22 12:50:53 +0100946 intptr_t Capacity() { return capacity_; }
947 intptr_t Available() { return available_; }
948 intptr_t Size() { return size_; }
949 intptr_t Waste() { return waste_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000950
951 // Grow the space by adding available bytes.
952 void ExpandSpace(int size_in_bytes) {
953 capacity_ += size_in_bytes;
954 available_ += size_in_bytes;
955 }
956
957 // Shrink the space by removing available bytes.
958 void ShrinkSpace(int size_in_bytes) {
959 capacity_ -= size_in_bytes;
960 available_ -= size_in_bytes;
961 }
962
963 // Allocate from available bytes (available -> size).
Ben Murdochf87a2032010-10-22 12:50:53 +0100964 void AllocateBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000965 available_ -= size_in_bytes;
966 size_ += size_in_bytes;
967 }
968
969 // Free allocated bytes, making them available (size -> available).
Ben Murdochf87a2032010-10-22 12:50:53 +0100970 void DeallocateBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000971 size_ -= size_in_bytes;
972 available_ += size_in_bytes;
973 }
974
975 // Waste free bytes (available -> waste).
976 void WasteBytes(int size_in_bytes) {
977 available_ -= size_in_bytes;
978 waste_ += size_in_bytes;
979 }
980
981 // Consider the wasted bytes to be allocated, as they contain filler
982 // objects (waste -> size).
Ben Murdochf87a2032010-10-22 12:50:53 +0100983 void FillWastedBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000984 waste_ -= size_in_bytes;
985 size_ += size_in_bytes;
986 }
987
988 private:
Ben Murdochf87a2032010-10-22 12:50:53 +0100989 intptr_t capacity_;
990 intptr_t available_;
991 intptr_t size_;
992 intptr_t waste_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000993};
994
995
996class PagedSpace : public Space {
997 public:
998 // Creates a space with a maximum capacity, and an id.
Ben Murdochf87a2032010-10-22 12:50:53 +0100999 PagedSpace(intptr_t max_capacity,
1000 AllocationSpace id,
1001 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00001002
1003 virtual ~PagedSpace() {}
1004
1005 // Set up the space using the given address range of virtual memory (from
1006 // the memory allocator's initial chunk) if possible. If the block of
1007 // addresses is not big enough to contain a single page-aligned page, a
1008 // fresh chunk will be allocated.
1009 bool Setup(Address start, size_t size);
1010
1011 // Returns true if the space has been successfully set up and not
1012 // subsequently torn down.
1013 bool HasBeenSetup();
1014
1015 // Cleans up the space, frees all pages in this space except those belonging
1016 // to the initial chunk, uncommits addresses in the initial chunk.
1017 void TearDown();
1018
1019 // Checks whether an object/address is in this space.
1020 inline bool Contains(Address a);
1021 bool Contains(HeapObject* o) { return Contains(o->address()); }
1022
1023 // Given an address occupied by a live object, return that object if it is
1024 // in this space, or Failure::Exception() if it is not. The implementation
1025 // iterates over objects in the page containing the address, the cost is
1026 // linear in the number of objects in the page. It may be slow.
John Reck59135872010-11-02 12:39:01 -07001027 MUST_USE_RESULT MaybeObject* FindObject(Address addr);
Steve Blocka7e24c12009-10-30 11:49:00 +00001028
1029 // Checks whether page is currently in use by this space.
1030 bool IsUsed(Page* page);
1031
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001032 void MarkAllPagesClean();
Steve Blocka7e24c12009-10-30 11:49:00 +00001033
1034 // Prepares for a mark-compact GC.
Steve Block6ded16b2010-05-10 14:33:55 +01001035 virtual void PrepareForMarkCompact(bool will_compact);
Steve Blocka7e24c12009-10-30 11:49:00 +00001036
Steve Block6ded16b2010-05-10 14:33:55 +01001037 // The top of allocation in a page in this space. Undefined if page is unused.
1038 Address PageAllocationTop(Page* page) {
1039 return page == TopPageOf(allocation_info_) ? top()
1040 : PageAllocationLimit(page);
1041 }
1042
1043 // The limit of allocation for a page in this space.
1044 virtual Address PageAllocationLimit(Page* page) = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001045
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001046 void FlushTopPageWatermark() {
1047 AllocationTopPage()->SetCachedAllocationWatermark(top());
1048 AllocationTopPage()->InvalidateWatermark(true);
1049 }
1050
Steve Blocka7e24c12009-10-30 11:49:00 +00001051 // Current capacity without growing (Size() + Available() + Waste()).
Ben Murdochf87a2032010-10-22 12:50:53 +01001052 intptr_t Capacity() { return accounting_stats_.Capacity(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001053
Steve Block3ce2e202009-11-05 08:53:23 +00001054 // Total amount of memory committed for this space. For paged
1055 // spaces this equals the capacity.
Ben Murdochf87a2032010-10-22 12:50:53 +01001056 intptr_t CommittedMemory() { return Capacity(); }
Steve Block3ce2e202009-11-05 08:53:23 +00001057
Steve Blocka7e24c12009-10-30 11:49:00 +00001058 // Available bytes without growing.
Ben Murdochf87a2032010-10-22 12:50:53 +01001059 intptr_t Available() { return accounting_stats_.Available(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001060
1061 // Allocated bytes in this space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001062 virtual intptr_t Size() { return accounting_stats_.Size(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001063
1064 // Wasted bytes due to fragmentation and not recoverable until the
1065 // next GC of this space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001066 intptr_t Waste() { return accounting_stats_.Waste(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001067
1068 // Returns the address of the first object in this space.
1069 Address bottom() { return first_page_->ObjectAreaStart(); }
1070
1071 // Returns the allocation pointer in this space.
1072 Address top() { return allocation_info_.top; }
1073
1074 // Allocate the requested number of bytes in the space if possible, return a
1075 // failure object if not.
John Reck59135872010-11-02 12:39:01 -07001076 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001077
1078 // Allocate the requested number of bytes for relocation during mark-compact
1079 // collection.
John Reck59135872010-11-02 12:39:01 -07001080 MUST_USE_RESULT inline MaybeObject* MCAllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001081
Leon Clarkee46be812010-01-19 14:06:41 +00001082 virtual bool ReserveSpace(int bytes);
1083
1084 // Used by ReserveSpace.
1085 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001086
Steve Block6ded16b2010-05-10 14:33:55 +01001087 // Free all pages in range from prev (exclusive) to last (inclusive).
1088 // Freed pages are moved to the end of page list.
1089 void FreePages(Page* prev, Page* last);
1090
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001091 // Deallocates a block.
1092 virtual void DeallocateBlock(Address start,
1093 int size_in_bytes,
1094 bool add_to_freelist) = 0;
1095
Steve Block6ded16b2010-05-10 14:33:55 +01001096 // Set space allocation info.
1097 void SetTop(Address top) {
1098 allocation_info_.top = top;
1099 allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top));
1100 }
1101
Steve Blocka7e24c12009-10-30 11:49:00 +00001102 // ---------------------------------------------------------------------------
1103 // Mark-compact collection support functions
1104
1105 // Set the relocation point to the beginning of the space.
1106 void MCResetRelocationInfo();
1107
1108 // Writes relocation info to the top page.
1109 void MCWriteRelocationInfoToPage() {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001110 TopPageOf(mc_forwarding_info_)->
1111 SetAllocationWatermark(mc_forwarding_info_.top);
Steve Blocka7e24c12009-10-30 11:49:00 +00001112 }
1113
1114 // Computes the offset of a given address in this space to the beginning
1115 // of the space.
1116 int MCSpaceOffsetForAddress(Address addr);
1117
1118 // Updates the allocation pointer to the relocation top after a mark-compact
1119 // collection.
1120 virtual void MCCommitRelocationInfo() = 0;
1121
1122 // Releases half of unused pages.
1123 void Shrink();
1124
1125 // Ensures that the capacity is at least 'capacity'. Returns false on failure.
1126 bool EnsureCapacity(int capacity);
1127
1128#ifdef ENABLE_HEAP_PROTECTION
1129 // Protect/unprotect the space by marking it read-only/writable.
1130 void Protect();
1131 void Unprotect();
1132#endif
1133
1134#ifdef DEBUG
1135 // Print meta info and objects in this space.
1136 virtual void Print();
1137
1138 // Verify integrity of this space.
1139 virtual void Verify(ObjectVisitor* visitor);
1140
1141 // Overridden by subclasses to verify space-specific object
1142 // properties (e.g., only maps or free-list nodes are in map space).
1143 virtual void VerifyObject(HeapObject* obj) {}
1144
1145 // Report code object related statistics
1146 void CollectCodeStatistics();
1147 static void ReportCodeStatistics();
1148 static void ResetCodeStatistics();
1149#endif
1150
Steve Block6ded16b2010-05-10 14:33:55 +01001151 // Returns the page of the allocation pointer.
1152 Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
1153
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001154 void RelinkPageListInChunkOrder(bool deallocate_blocks);
1155
Steve Blocka7e24c12009-10-30 11:49:00 +00001156 protected:
1157 // Maximum capacity of this space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001158 intptr_t max_capacity_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001159
1160 // Accounting information for this space.
1161 AllocationStats accounting_stats_;
1162
1163 // The first page in this space.
1164 Page* first_page_;
1165
1166 // The last page in this space. Initially set in Setup, updated in
1167 // Expand and Shrink.
1168 Page* last_page_;
1169
Steve Block6ded16b2010-05-10 14:33:55 +01001170 // True if pages owned by this space are linked in chunk-order.
1171 // See comment for class MemoryAllocator for definition of chunk-order.
1172 bool page_list_is_chunk_ordered_;
1173
Steve Blocka7e24c12009-10-30 11:49:00 +00001174 // Normal allocation information.
1175 AllocationInfo allocation_info_;
1176
1177 // Relocation information during mark-compact collections.
1178 AllocationInfo mc_forwarding_info_;
1179
1180 // Bytes of each page that cannot be allocated. Possibly non-zero
1181 // for pages in spaces with only fixed-size objects. Always zero
1182 // for pages in spaces with variable sized objects (those pages are
1183 // padded with free-list nodes).
1184 int page_extra_;
1185
1186 // Sets allocation pointer to a page bottom.
1187 static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
1188
1189 // Returns the top page specified by an allocation info structure.
1190 static Page* TopPageOf(AllocationInfo alloc_info) {
1191 return Page::FromAllocationTop(alloc_info.limit);
1192 }
1193
Leon Clarked91b9f72010-01-27 17:25:45 +00001194 int CountPagesToTop() {
1195 Page* p = Page::FromAllocationTop(allocation_info_.top);
1196 PageIterator it(this, PageIterator::ALL_PAGES);
1197 int counter = 1;
1198 while (it.has_next()) {
1199 if (it.next() == p) return counter;
1200 counter++;
1201 }
1202 UNREACHABLE();
1203 return -1;
1204 }
1205
Steve Blocka7e24c12009-10-30 11:49:00 +00001206 // Expands the space by allocating a fixed number of pages. Returns false if
1207 // it cannot allocate requested number of pages from OS. Newly allocated
1208 // pages are append to the last_page;
1209 bool Expand(Page* last_page);
1210
1211 // Generic fast case allocation function that tries linear allocation in
1212 // the top page of 'alloc_info'. Returns NULL on failure.
1213 inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
1214 int size_in_bytes);
1215
1216 // During normal allocation or deserialization, roll to the next page in
1217 // the space (there is assumed to be one) and allocate there. This
1218 // function is space-dependent.
1219 virtual HeapObject* AllocateInNextPage(Page* current_page,
1220 int size_in_bytes) = 0;
1221
1222 // Slow path of AllocateRaw. This function is space-dependent.
John Reck59135872010-11-02 12:39:01 -07001223 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001224
1225 // Slow path of MCAllocateRaw.
John Reck59135872010-11-02 12:39:01 -07001226 MUST_USE_RESULT HeapObject* SlowMCAllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001227
1228#ifdef DEBUG
Leon Clarkee46be812010-01-19 14:06:41 +00001229 // Returns the number of total pages in this space.
1230 int CountTotalPages();
Steve Blocka7e24c12009-10-30 11:49:00 +00001231#endif
1232 private:
Steve Blocka7e24c12009-10-30 11:49:00 +00001233
1234 // Returns a pointer to the page of the relocation pointer.
1235 Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
1236
Steve Blocka7e24c12009-10-30 11:49:00 +00001237 friend class PageIterator;
1238};
1239
1240
1241#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1242class NumberAndSizeInfo BASE_EMBEDDED {
1243 public:
1244 NumberAndSizeInfo() : number_(0), bytes_(0) {}
1245
1246 int number() const { return number_; }
1247 void increment_number(int num) { number_ += num; }
1248
1249 int bytes() const { return bytes_; }
1250 void increment_bytes(int size) { bytes_ += size; }
1251
1252 void clear() {
1253 number_ = 0;
1254 bytes_ = 0;
1255 }
1256
1257 private:
1258 int number_;
1259 int bytes_;
1260};
1261
1262
1263// HistogramInfo class for recording a single "bar" of a histogram. This
1264// class is used for collecting statistics to print to stdout (when compiled
1265// with DEBUG) or to the log file (when compiled with
1266// ENABLE_LOGGING_AND_PROFILING).
1267class HistogramInfo: public NumberAndSizeInfo {
1268 public:
1269 HistogramInfo() : NumberAndSizeInfo() {}
1270
1271 const char* name() { return name_; }
1272 void set_name(const char* name) { name_ = name; }
1273
1274 private:
1275 const char* name_;
1276};
1277#endif
1278
1279
1280// -----------------------------------------------------------------------------
1281// SemiSpace in young generation
1282//
1283// A semispace is a contiguous chunk of memory. The mark-compact collector
1284// uses the memory in the from space as a marking stack when tracing live
1285// objects.
1286
1287class SemiSpace : public Space {
1288 public:
1289 // Constructor.
1290 SemiSpace() :Space(NEW_SPACE, NOT_EXECUTABLE) {
1291 start_ = NULL;
1292 age_mark_ = NULL;
1293 }
1294
1295 // Sets up the semispace using the given chunk.
1296 bool Setup(Address start, int initial_capacity, int maximum_capacity);
1297
1298 // Tear down the space. Heap memory was not allocated by the space, so it
1299 // is not deallocated here.
1300 void TearDown();
1301
1302 // True if the space has been set up but not torn down.
1303 bool HasBeenSetup() { return start_ != NULL; }
1304
1305 // Grow the size of the semispace by committing extra virtual memory.
1306 // Assumes that the caller has checked that the semispace has not reached
1307 // its maximum capacity (and thus there is space available in the reserved
1308 // address range to grow).
1309 bool Grow();
1310
1311 // Grow the semispace to the new capacity. The new capacity
1312 // requested must be larger than the current capacity.
1313 bool GrowTo(int new_capacity);
1314
1315 // Shrinks the semispace to the new capacity. The new capacity
1316 // requested must be more than the amount of used memory in the
1317 // semispace and less than the current capacity.
1318 bool ShrinkTo(int new_capacity);
1319
1320 // Returns the start address of the space.
1321 Address low() { return start_; }
1322 // Returns one past the end address of the space.
1323 Address high() { return low() + capacity_; }
1324
1325 // Age mark accessors.
1326 Address age_mark() { return age_mark_; }
1327 void set_age_mark(Address mark) { age_mark_ = mark; }
1328
1329 // True if the address is in the address range of this semispace (not
1330 // necessarily below the allocation pointer).
1331 bool Contains(Address a) {
1332 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1333 == reinterpret_cast<uintptr_t>(start_);
1334 }
1335
1336 // True if the object is a heap object in the address range of this
1337 // semispace (not necessarily below the allocation pointer).
1338 bool Contains(Object* o) {
1339 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1340 }
1341
1342 // The offset of an address from the beginning of the space.
Steve Blockd0582a62009-12-15 09:54:21 +00001343 int SpaceOffsetForAddress(Address addr) {
1344 return static_cast<int>(addr - low());
1345 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001346
Leon Clarkee46be812010-01-19 14:06:41 +00001347 // If we don't have these here then SemiSpace will be abstract. However
1348 // they should never be called.
Ben Murdochf87a2032010-10-22 12:50:53 +01001349 virtual intptr_t Size() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001350 UNREACHABLE();
1351 return 0;
1352 }
1353
Leon Clarkee46be812010-01-19 14:06:41 +00001354 virtual bool ReserveSpace(int bytes) {
1355 UNREACHABLE();
1356 return false;
1357 }
1358
Steve Blocka7e24c12009-10-30 11:49:00 +00001359 bool is_committed() { return committed_; }
1360 bool Commit();
1361 bool Uncommit();
1362
Steve Block6ded16b2010-05-10 14:33:55 +01001363#ifdef ENABLE_HEAP_PROTECTION
1364 // Protect/unprotect the space by marking it read-only/writable.
1365 virtual void Protect() {}
1366 virtual void Unprotect() {}
1367#endif
1368
Steve Blocka7e24c12009-10-30 11:49:00 +00001369#ifdef DEBUG
1370 virtual void Print();
1371 virtual void Verify();
1372#endif
1373
1374 // Returns the current capacity of the semi space.
1375 int Capacity() { return capacity_; }
1376
1377 // Returns the maximum capacity of the semi space.
1378 int MaximumCapacity() { return maximum_capacity_; }
1379
1380 // Returns the initial capacity of the semi space.
1381 int InitialCapacity() { return initial_capacity_; }
1382
1383 private:
1384 // The current and maximum capacity of the space.
1385 int capacity_;
1386 int maximum_capacity_;
1387 int initial_capacity_;
1388
1389 // The start address of the space.
1390 Address start_;
1391 // Used to govern object promotion during mark-compact collection.
1392 Address age_mark_;
1393
1394 // Masks and comparison values to test for containment in this semispace.
1395 uintptr_t address_mask_;
1396 uintptr_t object_mask_;
1397 uintptr_t object_expected_;
1398
1399 bool committed_;
1400
1401 public:
1402 TRACK_MEMORY("SemiSpace")
1403};
1404
1405
1406// A SemiSpaceIterator is an ObjectIterator that iterates over the active
1407// semispace of the heap's new space. It iterates over the objects in the
1408// semispace from a given start address (defaulting to the bottom of the
1409// semispace) to the top of the semispace. New objects allocated after the
1410// iterator is created are not iterated.
1411class SemiSpaceIterator : public ObjectIterator {
1412 public:
1413 // Create an iterator over the objects in the given space. If no start
1414 // address is given, the iterator starts from the bottom of the space. If
1415 // no size function is given, the iterator calls Object::Size().
1416 explicit SemiSpaceIterator(NewSpace* space);
1417 SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
1418 SemiSpaceIterator(NewSpace* space, Address start);
1419
Steve Blocka7e24c12009-10-30 11:49:00 +00001420 HeapObject* next() {
Leon Clarked91b9f72010-01-27 17:25:45 +00001421 if (current_ == limit_) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00001422
1423 HeapObject* object = HeapObject::FromAddress(current_);
1424 int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
1425
1426 current_ += size;
1427 return object;
1428 }
1429
1430 // Implementation of the ObjectIterator functions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001431 virtual HeapObject* next_object() { return next(); }
1432
1433 private:
1434 void Initialize(NewSpace* space, Address start, Address end,
1435 HeapObjectCallback size_func);
1436
1437 // The semispace.
1438 SemiSpace* space_;
1439 // The current iteration point.
1440 Address current_;
1441 // The end of iteration.
1442 Address limit_;
1443 // The callback function.
1444 HeapObjectCallback size_func_;
1445};
1446
1447
1448// -----------------------------------------------------------------------------
1449// The young generation space.
1450//
1451// The new space consists of a contiguous pair of semispaces. It simply
1452// forwards most functions to the appropriate semispace.
1453
1454class NewSpace : public Space {
1455 public:
1456 // Constructor.
1457 NewSpace() : Space(NEW_SPACE, NOT_EXECUTABLE) {}
1458
1459 // Sets up the new space using the given chunk.
1460 bool Setup(Address start, int size);
1461
1462 // Tears down the space. Heap memory was not allocated by the space, so it
1463 // is not deallocated here.
1464 void TearDown();
1465
1466 // True if the space has been set up but not torn down.
1467 bool HasBeenSetup() {
1468 return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
1469 }
1470
1471 // Flip the pair of spaces.
1472 void Flip();
1473
1474 // Grow the capacity of the semispaces. Assumes that they are not at
1475 // their maximum capacity.
1476 void Grow();
1477
1478 // Shrink the capacity of the semispaces.
1479 void Shrink();
1480
1481 // True if the address or object lies in the address range of either
1482 // semispace (not necessarily below the allocation pointer).
1483 bool Contains(Address a) {
1484 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1485 == reinterpret_cast<uintptr_t>(start_);
1486 }
1487 bool Contains(Object* o) {
1488 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1489 }
1490
1491 // Return the allocated bytes in the active semispace.
Ben Murdochf87a2032010-10-22 12:50:53 +01001492 virtual intptr_t Size() { return static_cast<int>(top() - bottom()); }
1493 // The same, but returning an int. We have to have the one that returns
1494 // intptr_t because it is inherited, but if we know we are dealing with the
1495 // new space, which can't get as big as the other spaces then this is useful:
1496 int SizeAsInt() { return static_cast<int>(Size()); }
Steve Block3ce2e202009-11-05 08:53:23 +00001497
Steve Blocka7e24c12009-10-30 11:49:00 +00001498 // Return the current capacity of a semispace.
Ben Murdochf87a2032010-10-22 12:50:53 +01001499 intptr_t Capacity() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001500 ASSERT(to_space_.Capacity() == from_space_.Capacity());
1501 return to_space_.Capacity();
1502 }
Steve Block3ce2e202009-11-05 08:53:23 +00001503
1504 // Return the total amount of memory committed for new space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001505 intptr_t CommittedMemory() {
Steve Block3ce2e202009-11-05 08:53:23 +00001506 if (from_space_.is_committed()) return 2 * Capacity();
1507 return Capacity();
1508 }
1509
Steve Blocka7e24c12009-10-30 11:49:00 +00001510 // Return the available bytes without growing in the active semispace.
Ben Murdochf87a2032010-10-22 12:50:53 +01001511 intptr_t Available() { return Capacity() - Size(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001512
1513 // Return the maximum capacity of a semispace.
1514 int MaximumCapacity() {
1515 ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
1516 return to_space_.MaximumCapacity();
1517 }
1518
1519 // Returns the initial capacity of a semispace.
1520 int InitialCapacity() {
1521 ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
1522 return to_space_.InitialCapacity();
1523 }
1524
1525 // Return the address of the allocation pointer in the active semispace.
1526 Address top() { return allocation_info_.top; }
1527 // Return the address of the first object in the active semispace.
1528 Address bottom() { return to_space_.low(); }
1529
1530 // Get the age mark of the inactive semispace.
1531 Address age_mark() { return from_space_.age_mark(); }
1532 // Set the age mark in the active semispace.
1533 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
1534
1535 // The start address of the space and a bit mask. Anding an address in the
1536 // new space with the mask will result in the start address.
1537 Address start() { return start_; }
1538 uintptr_t mask() { return address_mask_; }
1539
1540 // The allocation top and limit addresses.
1541 Address* allocation_top_address() { return &allocation_info_.top; }
1542 Address* allocation_limit_address() { return &allocation_info_.limit; }
1543
John Reck59135872010-11-02 12:39:01 -07001544 MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001545 return AllocateRawInternal(size_in_bytes, &allocation_info_);
1546 }
1547
1548 // Allocate the requested number of bytes for relocation during mark-compact
1549 // collection.
John Reck59135872010-11-02 12:39:01 -07001550 MUST_USE_RESULT MaybeObject* MCAllocateRaw(int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001551 return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
1552 }
1553
1554 // Reset the allocation pointer to the beginning of the active semispace.
1555 void ResetAllocationInfo();
1556 // Reset the reloction pointer to the bottom of the inactive semispace in
1557 // preparation for mark-compact collection.
1558 void MCResetRelocationInfo();
1559 // Update the allocation pointer in the active semispace after a
1560 // mark-compact collection.
1561 void MCCommitRelocationInfo();
1562
1563 // Get the extent of the inactive semispace (for use as a marking stack).
1564 Address FromSpaceLow() { return from_space_.low(); }
1565 Address FromSpaceHigh() { return from_space_.high(); }
1566
1567 // Get the extent of the active semispace (to sweep newly copied objects
1568 // during a scavenge collection).
1569 Address ToSpaceLow() { return to_space_.low(); }
1570 Address ToSpaceHigh() { return to_space_.high(); }
1571
1572 // Offsets from the beginning of the semispaces.
1573 int ToSpaceOffsetForAddress(Address a) {
1574 return to_space_.SpaceOffsetForAddress(a);
1575 }
1576 int FromSpaceOffsetForAddress(Address a) {
1577 return from_space_.SpaceOffsetForAddress(a);
1578 }
1579
1580 // True if the object is a heap object in the address range of the
1581 // respective semispace (not necessarily below the allocation pointer of the
1582 // semispace).
1583 bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
1584 bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
1585
1586 bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
1587 bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
1588
Leon Clarkee46be812010-01-19 14:06:41 +00001589 virtual bool ReserveSpace(int bytes);
1590
Steve Blocka7e24c12009-10-30 11:49:00 +00001591#ifdef ENABLE_HEAP_PROTECTION
1592 // Protect/unprotect the space by marking it read-only/writable.
1593 virtual void Protect();
1594 virtual void Unprotect();
1595#endif
1596
1597#ifdef DEBUG
1598 // Verify the active semispace.
1599 virtual void Verify();
1600 // Print the active semispace.
1601 virtual void Print() { to_space_.Print(); }
1602#endif
1603
1604#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1605 // Iterates the active semispace to collect statistics.
1606 void CollectStatistics();
1607 // Reports previously collected statistics of the active semispace.
1608 void ReportStatistics();
1609 // Clears previously collected statistics.
1610 void ClearHistograms();
1611
1612 // Record the allocation or promotion of a heap object. Note that we don't
1613 // record every single allocation, but only those that happen in the
1614 // to space during a scavenge GC.
1615 void RecordAllocation(HeapObject* obj);
1616 void RecordPromotion(HeapObject* obj);
1617#endif
1618
1619 // Return whether the operation succeded.
1620 bool CommitFromSpaceIfNeeded() {
1621 if (from_space_.is_committed()) return true;
1622 return from_space_.Commit();
1623 }
1624
1625 bool UncommitFromSpace() {
1626 if (!from_space_.is_committed()) return true;
1627 return from_space_.Uncommit();
1628 }
1629
1630 private:
1631 // The semispaces.
1632 SemiSpace to_space_;
1633 SemiSpace from_space_;
1634
1635 // Start address and bit mask for containment testing.
1636 Address start_;
1637 uintptr_t address_mask_;
1638 uintptr_t object_mask_;
1639 uintptr_t object_expected_;
1640
1641 // Allocation pointer and limit for normal allocation and allocation during
1642 // mark-compact collection.
1643 AllocationInfo allocation_info_;
1644 AllocationInfo mc_forwarding_info_;
1645
1646#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1647 HistogramInfo* allocated_histogram_;
1648 HistogramInfo* promoted_histogram_;
1649#endif
1650
1651 // Implementation of AllocateRaw and MCAllocateRaw.
John Reck59135872010-11-02 12:39:01 -07001652 MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(
1653 int size_in_bytes,
1654 AllocationInfo* alloc_info);
Steve Blocka7e24c12009-10-30 11:49:00 +00001655
1656 friend class SemiSpaceIterator;
1657
1658 public:
1659 TRACK_MEMORY("NewSpace")
1660};
1661
1662
1663// -----------------------------------------------------------------------------
1664// Free lists for old object spaces
1665//
1666// Free-list nodes are free blocks in the heap. They look like heap objects
1667// (free-list node pointers have the heap object tag, and they have a map like
1668// a heap object). They have a size and a next pointer. The next pointer is
1669// the raw address of the next free list node (or NULL).
1670class FreeListNode: public HeapObject {
1671 public:
1672 // Obtain a free-list node from a raw address. This is not a cast because
1673 // it does not check nor require that the first word at the address is a map
1674 // pointer.
1675 static FreeListNode* FromAddress(Address address) {
1676 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1677 }
1678
Steve Block3ce2e202009-11-05 08:53:23 +00001679 static inline bool IsFreeListNode(HeapObject* object);
1680
Steve Blocka7e24c12009-10-30 11:49:00 +00001681 // Set the size in bytes, which can be read with HeapObject::Size(). This
1682 // function also writes a map to the first word of the block so that it
1683 // looks like a heap object to the garbage collector and heap iteration
1684 // functions.
1685 void set_size(int size_in_bytes);
1686
1687 // Accessors for the next field.
1688 inline Address next();
1689 inline void set_next(Address next);
1690
1691 private:
1692 static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
1693
1694 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1695};
1696
1697
1698// The free list for the old space.
1699class OldSpaceFreeList BASE_EMBEDDED {
1700 public:
1701 explicit OldSpaceFreeList(AllocationSpace owner);
1702
1703 // Clear the free list.
1704 void Reset();
1705
1706 // Return the number of bytes available on the free list.
Ben Murdochf87a2032010-10-22 12:50:53 +01001707 intptr_t available() { return available_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001708
1709 // Place a node on the free list. The block of size 'size_in_bytes'
1710 // starting at 'start' is placed on the free list. The return value is the
1711 // number of bytes that have been lost due to internal fragmentation by
1712 // freeing the block. Bookkeeping information will be written to the block,
1713 // ie, its contents will be destroyed. The start address should be word
1714 // aligned, and the size should be a non-zero multiple of the word size.
1715 int Free(Address start, int size_in_bytes);
1716
1717 // Allocate a block of size 'size_in_bytes' from the free list. The block
1718 // is unitialized. A failure is returned if no block is available. The
1719 // number of bytes lost to fragmentation is returned in the output parameter
1720 // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
John Reck59135872010-11-02 12:39:01 -07001721 MUST_USE_RESULT MaybeObject* Allocate(int size_in_bytes, int* wasted_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001722
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001723 void MarkNodes();
1724
Steve Blocka7e24c12009-10-30 11:49:00 +00001725 private:
1726 // The size range of blocks, in bytes. (Smaller allocations are allowed, but
1727 // will always result in waste.)
1728 static const int kMinBlockSize = 2 * kPointerSize;
1729 static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
1730
1731 // The identity of the owning space, for building allocation Failure
1732 // objects.
1733 AllocationSpace owner_;
1734
1735 // Total available bytes in all blocks on this free list.
1736 int available_;
1737
1738 // Blocks are put on exact free lists in an array, indexed by size in words.
1739 // The available sizes are kept in an increasingly ordered list. Entries
1740 // corresponding to sizes < kMinBlockSize always have an empty free list
1741 // (but index kHead is used for the head of the size list).
1742 struct SizeNode {
1743 // Address of the head FreeListNode of the implied block size or NULL.
1744 Address head_node_;
1745 // Size (words) of the next larger available size if head_node_ != NULL.
1746 int next_size_;
1747 };
1748 static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
1749 SizeNode free_[kFreeListsLength];
1750
1751 // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
1752 static const int kHead = kMinBlockSize / kPointerSize - 1;
1753 static const int kEnd = kMaxInt;
1754
1755 // We keep a "finger" in the size list to speed up a common pattern:
1756 // repeated requests for the same or increasing sizes.
1757 int finger_;
1758
1759 // Starting from *prev, find and return the smallest size >= index (words),
1760 // or kEnd. Update *prev to be the largest size < index, or kHead.
1761 int FindSize(int index, int* prev) {
1762 int cur = free_[*prev].next_size_;
1763 while (cur < index) {
1764 *prev = cur;
1765 cur = free_[cur].next_size_;
1766 }
1767 return cur;
1768 }
1769
1770 // Remove an existing element from the size list.
1771 void RemoveSize(int index) {
1772 int prev = kHead;
1773 int cur = FindSize(index, &prev);
1774 ASSERT(cur == index);
1775 free_[prev].next_size_ = free_[cur].next_size_;
1776 finger_ = prev;
1777 }
1778
1779 // Insert a new element into the size list.
1780 void InsertSize(int index) {
1781 int prev = kHead;
1782 int cur = FindSize(index, &prev);
1783 ASSERT(cur != index);
1784 free_[prev].next_size_ = index;
1785 free_[index].next_size_ = cur;
1786 }
1787
1788 // The size list is not updated during a sequence of calls to Free, but is
1789 // rebuilt before the next allocation.
1790 void RebuildSizeList();
1791 bool needs_rebuild_;
1792
1793#ifdef DEBUG
1794 // Does this free list contain a free block located at the address of 'node'?
1795 bool Contains(FreeListNode* node);
1796#endif
1797
1798 DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
1799};
1800
1801
1802// The free list for the map space.
1803class FixedSizeFreeList BASE_EMBEDDED {
1804 public:
1805 FixedSizeFreeList(AllocationSpace owner, int object_size);
1806
1807 // Clear the free list.
1808 void Reset();
1809
1810 // Return the number of bytes available on the free list.
Ben Murdochf87a2032010-10-22 12:50:53 +01001811 intptr_t available() { return available_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001812
1813 // Place a node on the free list. The block starting at 'start' (assumed to
1814 // have size object_size_) is placed on the free list. Bookkeeping
1815 // information will be written to the block, ie, its contents will be
1816 // destroyed. The start address should be word aligned.
1817 void Free(Address start);
1818
1819 // Allocate a fixed sized block from the free list. The block is unitialized.
1820 // A failure is returned if no block is available.
John Reck59135872010-11-02 12:39:01 -07001821 MUST_USE_RESULT MaybeObject* Allocate();
Steve Blocka7e24c12009-10-30 11:49:00 +00001822
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001823 void MarkNodes();
1824
Steve Blocka7e24c12009-10-30 11:49:00 +00001825 private:
1826 // Available bytes on the free list.
Ben Murdochf87a2032010-10-22 12:50:53 +01001827 intptr_t available_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001828
1829 // The head of the free list.
1830 Address head_;
1831
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001832 // The tail of the free list.
1833 Address tail_;
1834
Steve Blocka7e24c12009-10-30 11:49:00 +00001835 // The identity of the owning space, for building allocation Failure
1836 // objects.
1837 AllocationSpace owner_;
1838
1839 // The size of the objects in this space.
1840 int object_size_;
1841
1842 DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
1843};
1844
1845
1846// -----------------------------------------------------------------------------
1847// Old object space (excluding map objects)
1848
1849class OldSpace : public PagedSpace {
1850 public:
1851 // Creates an old space object with a given maximum capacity.
1852 // The constructor does not allocate pages from OS.
Ben Murdochf87a2032010-10-22 12:50:53 +01001853 explicit OldSpace(intptr_t max_capacity,
Steve Blocka7e24c12009-10-30 11:49:00 +00001854 AllocationSpace id,
1855 Executability executable)
1856 : PagedSpace(max_capacity, id, executable), free_list_(id) {
1857 page_extra_ = 0;
1858 }
1859
1860 // The bytes available on the free list (ie, not above the linear allocation
1861 // pointer).
Ben Murdochf87a2032010-10-22 12:50:53 +01001862 intptr_t AvailableFree() { return free_list_.available(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001863
Steve Block6ded16b2010-05-10 14:33:55 +01001864 // The limit of allocation for a page in this space.
1865 virtual Address PageAllocationLimit(Page* page) {
1866 return page->ObjectAreaEnd();
Steve Blocka7e24c12009-10-30 11:49:00 +00001867 }
1868
1869 // Give a block of memory to the space's free list. It might be added to
1870 // the free list or accounted as waste.
Steve Block6ded16b2010-05-10 14:33:55 +01001871 // If add_to_freelist is false then just accounting stats are updated and
1872 // no attempt to add area to free list is made.
1873 void Free(Address start, int size_in_bytes, bool add_to_freelist) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001874 accounting_stats_.DeallocateBytes(size_in_bytes);
Steve Block6ded16b2010-05-10 14:33:55 +01001875
1876 if (add_to_freelist) {
1877 int wasted_bytes = free_list_.Free(start, size_in_bytes);
1878 accounting_stats_.WasteBytes(wasted_bytes);
1879 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001880 }
1881
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001882 virtual void DeallocateBlock(Address start,
1883 int size_in_bytes,
1884 bool add_to_freelist);
1885
Steve Blocka7e24c12009-10-30 11:49:00 +00001886 // Prepare for full garbage collection. Resets the relocation pointer and
1887 // clears the free list.
1888 virtual void PrepareForMarkCompact(bool will_compact);
1889
1890 // Updates the allocation pointer to the relocation top after a mark-compact
1891 // collection.
1892 virtual void MCCommitRelocationInfo();
1893
Leon Clarkee46be812010-01-19 14:06:41 +00001894 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1895
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001896 void MarkFreeListNodes() { free_list_.MarkNodes(); }
1897
Steve Blocka7e24c12009-10-30 11:49:00 +00001898#ifdef DEBUG
1899 // Reports statistics for the space
1900 void ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00001901#endif
1902
1903 protected:
1904 // Virtual function in the superclass. Slow path of AllocateRaw.
John Reck59135872010-11-02 12:39:01 -07001905 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001906
1907 // Virtual function in the superclass. Allocate linearly at the start of
1908 // the page after current_page (there is assumed to be one).
1909 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1910
1911 private:
1912 // The space's free list.
1913 OldSpaceFreeList free_list_;
1914
1915 public:
1916 TRACK_MEMORY("OldSpace")
1917};
1918
1919
1920// -----------------------------------------------------------------------------
1921// Old space for objects of a fixed size
1922
1923class FixedSpace : public PagedSpace {
1924 public:
Ben Murdochf87a2032010-10-22 12:50:53 +01001925 FixedSpace(intptr_t max_capacity,
Steve Blocka7e24c12009-10-30 11:49:00 +00001926 AllocationSpace id,
1927 int object_size_in_bytes,
1928 const char* name)
1929 : PagedSpace(max_capacity, id, NOT_EXECUTABLE),
1930 object_size_in_bytes_(object_size_in_bytes),
1931 name_(name),
1932 free_list_(id, object_size_in_bytes) {
1933 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
1934 }
1935
Steve Block6ded16b2010-05-10 14:33:55 +01001936 // The limit of allocation for a page in this space.
1937 virtual Address PageAllocationLimit(Page* page) {
1938 return page->ObjectAreaEnd() - page_extra_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001939 }
1940
1941 int object_size_in_bytes() { return object_size_in_bytes_; }
1942
1943 // Give a fixed sized block of memory to the space's free list.
Steve Block6ded16b2010-05-10 14:33:55 +01001944 // If add_to_freelist is false then just accounting stats are updated and
1945 // no attempt to add area to free list is made.
1946 void Free(Address start, bool add_to_freelist) {
1947 if (add_to_freelist) {
1948 free_list_.Free(start);
1949 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001950 accounting_stats_.DeallocateBytes(object_size_in_bytes_);
1951 }
1952
1953 // Prepares for a mark-compact GC.
1954 virtual void PrepareForMarkCompact(bool will_compact);
1955
1956 // Updates the allocation pointer to the relocation top after a mark-compact
1957 // collection.
1958 virtual void MCCommitRelocationInfo();
1959
Leon Clarkee46be812010-01-19 14:06:41 +00001960 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1961
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001962 virtual void DeallocateBlock(Address start,
1963 int size_in_bytes,
1964 bool add_to_freelist);
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001965
1966 void MarkFreeListNodes() { free_list_.MarkNodes(); }
1967
Steve Blocka7e24c12009-10-30 11:49:00 +00001968#ifdef DEBUG
1969 // Reports statistic info of the space
1970 void ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00001971#endif
1972
1973 protected:
1974 // Virtual function in the superclass. Slow path of AllocateRaw.
John Reck59135872010-11-02 12:39:01 -07001975 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001976
1977 // Virtual function in the superclass. Allocate linearly at the start of
1978 // the page after current_page (there is assumed to be one).
1979 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1980
Leon Clarkee46be812010-01-19 14:06:41 +00001981 void ResetFreeList() {
1982 free_list_.Reset();
1983 }
1984
Steve Blocka7e24c12009-10-30 11:49:00 +00001985 private:
1986 // The size of objects in this space.
1987 int object_size_in_bytes_;
1988
1989 // The name of this space.
1990 const char* name_;
1991
1992 // The space's free list.
1993 FixedSizeFreeList free_list_;
1994};
1995
1996
1997// -----------------------------------------------------------------------------
1998// Old space for all map objects
1999
2000class MapSpace : public FixedSpace {
2001 public:
2002 // Creates a map space object with a maximum capacity.
Ben Murdochf87a2032010-10-22 12:50:53 +01002003 MapSpace(intptr_t max_capacity, int max_map_space_pages, AllocationSpace id)
Leon Clarked91b9f72010-01-27 17:25:45 +00002004 : FixedSpace(max_capacity, id, Map::kSize, "map"),
2005 max_map_space_pages_(max_map_space_pages) {
2006 ASSERT(max_map_space_pages < kMaxMapPageIndex);
2007 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002008
2009 // Prepares for a mark-compact GC.
2010 virtual void PrepareForMarkCompact(bool will_compact);
2011
2012 // Given an index, returns the page address.
2013 Address PageAddress(int page_index) { return page_addresses_[page_index]; }
2014
Leon Clarked91b9f72010-01-27 17:25:45 +00002015 static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
Steve Blocka7e24c12009-10-30 11:49:00 +00002016
Leon Clarkee46be812010-01-19 14:06:41 +00002017 // Are map pointers encodable into map word?
2018 bool MapPointersEncodable() {
2019 if (!FLAG_use_big_map_space) {
Leon Clarked91b9f72010-01-27 17:25:45 +00002020 ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
Leon Clarkee46be812010-01-19 14:06:41 +00002021 return true;
2022 }
Leon Clarked91b9f72010-01-27 17:25:45 +00002023 return CountPagesToTop() <= max_map_space_pages_;
Leon Clarkee46be812010-01-19 14:06:41 +00002024 }
2025
2026 // Should be called after forced sweep to find out if map space needs
2027 // compaction.
2028 bool NeedsCompaction(int live_maps) {
Leon Clarked91b9f72010-01-27 17:25:45 +00002029 return !MapPointersEncodable() && live_maps <= CompactionThreshold();
Leon Clarkee46be812010-01-19 14:06:41 +00002030 }
2031
2032 Address TopAfterCompaction(int live_maps) {
2033 ASSERT(NeedsCompaction(live_maps));
2034
2035 int pages_left = live_maps / kMapsPerPage;
2036 PageIterator it(this, PageIterator::ALL_PAGES);
2037 while (pages_left-- > 0) {
2038 ASSERT(it.has_next());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002039 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
Leon Clarkee46be812010-01-19 14:06:41 +00002040 }
2041 ASSERT(it.has_next());
2042 Page* top_page = it.next();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002043 top_page->SetRegionMarks(Page::kAllRegionsCleanMarks);
Leon Clarkee46be812010-01-19 14:06:41 +00002044 ASSERT(top_page->is_valid());
2045
2046 int offset = live_maps % kMapsPerPage * Map::kSize;
2047 Address top = top_page->ObjectAreaStart() + offset;
2048 ASSERT(top < top_page->ObjectAreaEnd());
2049 ASSERT(Contains(top));
2050
2051 return top;
2052 }
2053
2054 void FinishCompaction(Address new_top, int live_maps) {
2055 Page* top_page = Page::FromAddress(new_top);
2056 ASSERT(top_page->is_valid());
2057
2058 SetAllocationInfo(&allocation_info_, top_page);
2059 allocation_info_.top = new_top;
2060
2061 int new_size = live_maps * Map::kSize;
2062 accounting_stats_.DeallocateBytes(accounting_stats_.Size());
2063 accounting_stats_.AllocateBytes(new_size);
2064
2065#ifdef DEBUG
2066 if (FLAG_enable_slow_asserts) {
Leon Clarked91b9f72010-01-27 17:25:45 +00002067 intptr_t actual_size = 0;
Leon Clarkee46be812010-01-19 14:06:41 +00002068 for (Page* p = first_page_; p != top_page; p = p->next_page())
2069 actual_size += kMapsPerPage * Map::kSize;
2070 actual_size += (new_top - top_page->ObjectAreaStart());
2071 ASSERT(accounting_stats_.Size() == actual_size);
2072 }
2073#endif
2074
2075 Shrink();
2076 ResetFreeList();
2077 }
2078
Steve Blocka7e24c12009-10-30 11:49:00 +00002079 protected:
2080#ifdef DEBUG
2081 virtual void VerifyObject(HeapObject* obj);
2082#endif
2083
2084 private:
Leon Clarkee46be812010-01-19 14:06:41 +00002085 static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
2086
2087 // Do map space compaction if there is a page gap.
Leon Clarked91b9f72010-01-27 17:25:45 +00002088 int CompactionThreshold() {
2089 return kMapsPerPage * (max_map_space_pages_ - 1);
2090 }
2091
2092 const int max_map_space_pages_;
Leon Clarkee46be812010-01-19 14:06:41 +00002093
Steve Blocka7e24c12009-10-30 11:49:00 +00002094 // An array of page start address in a map space.
Leon Clarked91b9f72010-01-27 17:25:45 +00002095 Address page_addresses_[kMaxMapPageIndex];
Steve Blocka7e24c12009-10-30 11:49:00 +00002096
2097 public:
2098 TRACK_MEMORY("MapSpace")
2099};
2100
2101
2102// -----------------------------------------------------------------------------
2103// Old space for all global object property cell objects
2104
2105class CellSpace : public FixedSpace {
2106 public:
2107 // Creates a property cell space object with a maximum capacity.
Ben Murdochf87a2032010-10-22 12:50:53 +01002108 CellSpace(intptr_t max_capacity, AllocationSpace id)
Steve Blocka7e24c12009-10-30 11:49:00 +00002109 : FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {}
2110
2111 protected:
2112#ifdef DEBUG
2113 virtual void VerifyObject(HeapObject* obj);
2114#endif
2115
2116 public:
2117 TRACK_MEMORY("CellSpace")
2118};
2119
2120
2121// -----------------------------------------------------------------------------
2122// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2123// the large object space. A large object is allocated from OS heap with
2124// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2125// A large object always starts at Page::kObjectStartOffset to a page.
2126// Large objects do not move during garbage collections.
2127
2128// A LargeObjectChunk holds exactly one large object page with exactly one
2129// large object.
2130class LargeObjectChunk {
2131 public:
2132 // Allocates a new LargeObjectChunk that contains a large object page
2133 // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002134 // object) bytes after the object area start of that page.
2135 // The allocated chunk size is set in the output parameter chunk_size.
Steve Blocka7e24c12009-10-30 11:49:00 +00002136 static LargeObjectChunk* New(int size_in_bytes,
2137 size_t* chunk_size,
2138 Executability executable);
2139
2140 // Interpret a raw address as a large object chunk.
2141 static LargeObjectChunk* FromAddress(Address address) {
2142 return reinterpret_cast<LargeObjectChunk*>(address);
2143 }
2144
2145 // Returns the address of this chunk.
2146 Address address() { return reinterpret_cast<Address>(this); }
2147
2148 // Accessors for the fields of the chunk.
2149 LargeObjectChunk* next() { return next_; }
2150 void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
2151
Steve Block791712a2010-08-27 10:21:07 +01002152 size_t size() { return size_ & ~Page::kPageFlagMask; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002153 void set_size(size_t size_in_bytes) { size_ = size_in_bytes; }
2154
2155 // Returns the object in this chunk.
2156 inline HeapObject* GetObject();
2157
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002158 // Given a requested size returns the physical size of a chunk to be
2159 // allocated.
Steve Blocka7e24c12009-10-30 11:49:00 +00002160 static int ChunkSizeFor(int size_in_bytes);
2161
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002162 // Given a chunk size, returns the object size it can accommodate. Used by
2163 // LargeObjectSpace::Available.
Ben Murdochf87a2032010-10-22 12:50:53 +01002164 static intptr_t ObjectSizeFor(intptr_t chunk_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002165 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2166 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2167 }
2168
2169 private:
2170 // A pointer to the next large object chunk in the space or NULL.
2171 LargeObjectChunk* next_;
2172
2173 // The size of this chunk.
2174 size_t size_;
2175
2176 public:
2177 TRACK_MEMORY("LargeObjectChunk")
2178};
2179
2180
2181class LargeObjectSpace : public Space {
2182 public:
2183 explicit LargeObjectSpace(AllocationSpace id);
2184 virtual ~LargeObjectSpace() {}
2185
2186 // Initializes internal data structures.
2187 bool Setup();
2188
2189 // Releases internal resources, frees objects in this space.
2190 void TearDown();
2191
2192 // Allocates a (non-FixedArray, non-Code) large object.
John Reck59135872010-11-02 12:39:01 -07002193 MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00002194 // Allocates a large Code object.
John Reck59135872010-11-02 12:39:01 -07002195 MUST_USE_RESULT MaybeObject* AllocateRawCode(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00002196 // Allocates a large FixedArray.
John Reck59135872010-11-02 12:39:01 -07002197 MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00002198
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002199 // Available bytes for objects in this space.
Ben Murdochf87a2032010-10-22 12:50:53 +01002200 intptr_t Available() {
Steve Blocka7e24c12009-10-30 11:49:00 +00002201 return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
2202 }
2203
Ben Murdochf87a2032010-10-22 12:50:53 +01002204 virtual intptr_t Size() {
Steve Blocka7e24c12009-10-30 11:49:00 +00002205 return size_;
2206 }
2207
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002208 virtual intptr_t SizeOfObjects() {
2209 return objects_size_;
2210 }
2211
Steve Blocka7e24c12009-10-30 11:49:00 +00002212 int PageCount() {
2213 return page_count_;
2214 }
2215
2216 // Finds an object for a given address, returns Failure::Exception()
2217 // if it is not found. The function iterates through all objects in this
2218 // space, may be slow.
John Reck59135872010-11-02 12:39:01 -07002219 MaybeObject* FindObject(Address a);
Steve Blocka7e24c12009-10-30 11:49:00 +00002220
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002221 // Finds a large object page containing the given pc, returns NULL
2222 // if such a page doesn't exist.
2223 LargeObjectChunk* FindChunkContainingPc(Address pc);
2224
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002225 // Iterates objects covered by dirty regions.
2226 void IterateDirtyRegions(ObjectSlotCallback func);
Steve Blocka7e24c12009-10-30 11:49:00 +00002227
2228 // Frees unmarked objects.
2229 void FreeUnmarkedObjects();
2230
2231 // Checks whether a heap object is in this space; O(1).
2232 bool Contains(HeapObject* obj);
2233
2234 // Checks whether the space is empty.
2235 bool IsEmpty() { return first_chunk_ == NULL; }
2236
Leon Clarkee46be812010-01-19 14:06:41 +00002237 // See the comments for ReserveSpace in the Space class. This has to be
2238 // called after ReserveSpace has been called on the paged spaces, since they
2239 // may use some memory, leaving less for large objects.
2240 virtual bool ReserveSpace(int bytes);
2241
Steve Blocka7e24c12009-10-30 11:49:00 +00002242#ifdef ENABLE_HEAP_PROTECTION
2243 // Protect/unprotect the space by marking it read-only/writable.
2244 void Protect();
2245 void Unprotect();
2246#endif
2247
2248#ifdef DEBUG
2249 virtual void Verify();
2250 virtual void Print();
2251 void ReportStatistics();
2252 void CollectCodeStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00002253#endif
2254 // Checks whether an address is in the object area in this space. It
2255 // iterates all objects in the space. May be slow.
2256 bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
2257
2258 private:
2259 // The head of the linked list of large object chunks.
2260 LargeObjectChunk* first_chunk_;
Ben Murdochf87a2032010-10-22 12:50:53 +01002261 intptr_t size_; // allocated bytes
Steve Blocka7e24c12009-10-30 11:49:00 +00002262 int page_count_; // number of chunks
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002263 intptr_t objects_size_; // size of objects
Steve Blocka7e24c12009-10-30 11:49:00 +00002264
2265 // Shared implementation of AllocateRaw, AllocateRawCode and
2266 // AllocateRawFixedArray.
John Reck59135872010-11-02 12:39:01 -07002267 MUST_USE_RESULT MaybeObject* AllocateRawInternal(int requested_size,
2268 int object_size,
2269 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00002270
Steve Blocka7e24c12009-10-30 11:49:00 +00002271 friend class LargeObjectIterator;
2272
2273 public:
2274 TRACK_MEMORY("LargeObjectSpace")
2275};
2276
2277
2278class LargeObjectIterator: public ObjectIterator {
2279 public:
2280 explicit LargeObjectIterator(LargeObjectSpace* space);
2281 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
2282
Steve Blocka7e24c12009-10-30 11:49:00 +00002283 HeapObject* next();
2284
2285 // implementation of ObjectIterator.
Steve Blocka7e24c12009-10-30 11:49:00 +00002286 virtual HeapObject* next_object() { return next(); }
2287
2288 private:
2289 LargeObjectChunk* current_;
2290 HeapObjectCallback size_func_;
2291};
2292
2293
2294} } // namespace v8::internal
2295
2296#endif // V8_SPACES_H_