blob: 4f2d07b0c8176118fca2722d135e6da097a50b34 [file] [log] [blame]
Ben Murdochb0fe1622011-05-05 13:52:32 +01001// Copyright 2006-2010 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_SPACES_H_
29#define V8_SPACES_H_
30
31#include "list-inl.h"
32#include "log.h"
33
34namespace v8 {
35namespace internal {
36
37// -----------------------------------------------------------------------------
38// Heap structures:
39//
40// A JS heap consists of a young generation, an old generation, and a large
41// object space. The young generation is divided into two semispaces. A
42// scavenger implements Cheney's copying algorithm. The old generation is
43// separated into a map space and an old object space. The map space contains
44// all (and only) map objects, the rest of old objects go into the old space.
45// The old generation is collected by a mark-sweep-compact collector.
46//
47// The semispaces of the young generation are contiguous. The old and map
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010048// spaces consists of a list of pages. A page has a page header and an object
49// area. A page size is deliberately chosen as 8K bytes.
50// The first word of a page is an opaque page header that has the
Steve Blocka7e24c12009-10-30 11:49:00 +000051// address of the next page and its ownership information. The second word may
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010052// have the allocation top address of this page. Heap objects are aligned to the
53// pointer size.
Steve Blocka7e24c12009-10-30 11:49:00 +000054//
55// There is a separate large object space for objects larger than
56// Page::kMaxHeapObjectSize, so that they do not have to move during
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010057// collection. The large object space is paged. Pages in large object space
58// may be larger than 8K.
Steve Blocka7e24c12009-10-30 11:49:00 +000059//
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010060// A card marking write barrier is used to keep track of intergenerational
61// references. Old space pages are divided into regions of Page::kRegionSize
62// size. Each region has a corresponding dirty bit in the page header which is
63// set if the region might contain pointers to new space. For details about
64// dirty bits encoding see comments in the Page::GetRegionNumberForAddress()
65// method body.
66//
67// During scavenges and mark-sweep collections we iterate intergenerational
68// pointers without decoding heap object maps so if the page belongs to old
69// pointer space or large object space it is essential to guarantee that
70// the page does not contain any garbage pointers to new space: every pointer
71// aligned word which satisfies the Heap::InNewSpace() predicate must be a
72// pointer to a live heap object in new space. Thus objects in old pointer
73// and large object spaces should have a special layout (e.g. no bare integer
74// fields). This requirement does not apply to map space which is iterated in
75// a special fashion. However we still require pointer fields of dead maps to
76// be cleaned.
77//
78// To enable lazy cleaning of old space pages we use a notion of allocation
79// watermark. Every pointer under watermark is considered to be well formed.
80// Page allocation watermark is not necessarily equal to page allocation top but
81// all alive objects on page should reside under allocation watermark.
82// During scavenge allocation watermark might be bumped and invalid pointers
83// might appear below it. To avoid following them we store a valid watermark
84// into special field in the page header and set a page WATERMARK_INVALIDATED
85// flag. For details see comments in the Page::SetAllocationWatermark() method
86// body.
87//
Steve Blocka7e24c12009-10-30 11:49:00 +000088
89// Some assertion macros used in the debugging mode.
90
Leon Clarkee46be812010-01-19 14:06:41 +000091#define ASSERT_PAGE_ALIGNED(address) \
Steve Blocka7e24c12009-10-30 11:49:00 +000092 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
93
Leon Clarkee46be812010-01-19 14:06:41 +000094#define ASSERT_OBJECT_ALIGNED(address) \
Steve Blocka7e24c12009-10-30 11:49:00 +000095 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
96
Leon Clarkee46be812010-01-19 14:06:41 +000097#define ASSERT_MAP_ALIGNED(address) \
98 ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
99
100#define ASSERT_OBJECT_SIZE(size) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000101 ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
102
Leon Clarkee46be812010-01-19 14:06:41 +0000103#define ASSERT_PAGE_OFFSET(offset) \
104 ASSERT((Page::kObjectStartOffset <= offset) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000105 && (offset <= Page::kPageSize))
106
Leon Clarkee46be812010-01-19 14:06:41 +0000107#define ASSERT_MAP_PAGE_INDEX(index) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000108 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
109
110
111class PagedSpace;
112class MemoryAllocator;
113class AllocationInfo;
114
115// -----------------------------------------------------------------------------
116// A page normally has 8K bytes. Large object pages may be larger. A page
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100117// address is always aligned to the 8K page size.
Steve Blocka7e24c12009-10-30 11:49:00 +0000118//
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100119// Each page starts with a header of Page::kPageHeaderSize size which contains
120// bookkeeping data.
Steve Blocka7e24c12009-10-30 11:49:00 +0000121//
122// The mark-compact collector transforms a map pointer into a page index and a
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100123// page offset. The exact encoding is described in the comments for
Leon Clarkee46be812010-01-19 14:06:41 +0000124// class MapWord in objects.h.
Steve Blocka7e24c12009-10-30 11:49:00 +0000125//
126// The only way to get a page pointer is by calling factory methods:
127// Page* p = Page::FromAddress(addr); or
128// Page* p = Page::FromAllocationTop(top);
129class Page {
130 public:
131 // Returns the page containing a given address. The address ranges
132 // from [page_addr .. page_addr + kPageSize[
133 //
134 // Note that this function only works for addresses in normal paged
135 // spaces and addresses in the first 8K of large object pages (i.e.,
136 // the start of large objects but not necessarily derived pointers
137 // within them).
138 INLINE(static Page* FromAddress(Address a)) {
139 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
140 }
141
142 // Returns the page containing an allocation top. Because an allocation
143 // top address can be the upper bound of the page, we need to subtract
144 // it with kPointerSize first. The address ranges from
145 // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
146 INLINE(static Page* FromAllocationTop(Address top)) {
147 Page* p = FromAddress(top - kPointerSize);
148 ASSERT_PAGE_OFFSET(p->Offset(top));
149 return p;
150 }
151
152 // Returns the start address of this page.
153 Address address() { return reinterpret_cast<Address>(this); }
154
155 // Checks whether this is a valid page address.
156 bool is_valid() { return address() != NULL; }
157
158 // Returns the next page of this page.
159 inline Page* next_page();
160
161 // Return the end of allocation in this page. Undefined for unused pages.
162 inline Address AllocationTop();
163
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100164 // Return the allocation watermark for the page.
165 // For old space pages it is guaranteed that the area under the watermark
166 // does not contain any garbage pointers to new space.
167 inline Address AllocationWatermark();
168
169 // Return the allocation watermark offset from the beginning of the page.
170 inline uint32_t AllocationWatermarkOffset();
171
172 inline void SetAllocationWatermark(Address allocation_watermark);
173
174 inline void SetCachedAllocationWatermark(Address allocation_watermark);
175 inline Address CachedAllocationWatermark();
176
Steve Blocka7e24c12009-10-30 11:49:00 +0000177 // Returns the start address of the object area in this page.
178 Address ObjectAreaStart() { return address() + kObjectStartOffset; }
179
180 // Returns the end address (exclusive) of the object area in this page.
181 Address ObjectAreaEnd() { return address() + Page::kPageSize; }
182
Steve Blocka7e24c12009-10-30 11:49:00 +0000183 // Checks whether an address is page aligned.
184 static bool IsAlignedToPageSize(Address a) {
185 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
186 }
187
Steve Block6ded16b2010-05-10 14:33:55 +0100188 // True if this page was in use before current compaction started.
189 // Result is valid only for pages owned by paged spaces and
190 // only after PagedSpace::PrepareForMarkCompact was called.
191 inline bool WasInUseBeforeMC();
192
193 inline void SetWasInUseBeforeMC(bool was_in_use);
194
Steve Blocka7e24c12009-10-30 11:49:00 +0000195 // True if this page is a large object page.
Steve Block6ded16b2010-05-10 14:33:55 +0100196 inline bool IsLargeObjectPage();
197
198 inline void SetIsLargeObjectPage(bool is_large_object_page);
Steve Blocka7e24c12009-10-30 11:49:00 +0000199
Steve Block791712a2010-08-27 10:21:07 +0100200 inline bool IsPageExecutable();
201
202 inline void SetIsPageExecutable(bool is_page_executable);
203
Steve Blocka7e24c12009-10-30 11:49:00 +0000204 // Returns the offset of a given address to this page.
205 INLINE(int Offset(Address a)) {
Steve Blockd0582a62009-12-15 09:54:21 +0000206 int offset = static_cast<int>(a - address());
Steve Blocka7e24c12009-10-30 11:49:00 +0000207 ASSERT_PAGE_OFFSET(offset);
208 return offset;
209 }
210
211 // Returns the address for a given offset to the this page.
212 Address OffsetToAddress(int offset) {
213 ASSERT_PAGE_OFFSET(offset);
214 return address() + offset;
215 }
216
217 // ---------------------------------------------------------------------
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100218 // Card marking support
Steve Blocka7e24c12009-10-30 11:49:00 +0000219
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100220 static const uint32_t kAllRegionsCleanMarks = 0x0;
221 static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF;
Steve Blocka7e24c12009-10-30 11:49:00 +0000222
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100223 inline uint32_t GetRegionMarks();
224 inline void SetRegionMarks(uint32_t dirty);
Steve Blocka7e24c12009-10-30 11:49:00 +0000225
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100226 inline uint32_t GetRegionMaskForAddress(Address addr);
227 inline uint32_t GetRegionMaskForSpan(Address start, int length_in_bytes);
228 inline int GetRegionNumberForAddress(Address addr);
Steve Blocka7e24c12009-10-30 11:49:00 +0000229
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100230 inline void MarkRegionDirty(Address addr);
231 inline bool IsRegionDirty(Address addr);
Steve Blocka7e24c12009-10-30 11:49:00 +0000232
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100233 inline void ClearRegionMarks(Address start,
234 Address end,
235 bool reaches_limit);
Steve Blocka7e24c12009-10-30 11:49:00 +0000236
Steve Blocka7e24c12009-10-30 11:49:00 +0000237 // Page size in bytes. This must be a multiple of the OS page size.
238 static const int kPageSize = 1 << kPageSizeBits;
239
240 // Page size mask.
241 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
242
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100243 static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
244 kIntSize + kPointerSize;
Steve Blocka7e24c12009-10-30 11:49:00 +0000245
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100246 // The start offset of the object area in a page. Aligned to both maps and
247 // code alignment to be suitable for both.
248 static const int kObjectStartOffset =
249 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kPageHeaderSize));
Steve Blocka7e24c12009-10-30 11:49:00 +0000250
251 // Object area size in bytes.
252 static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
253
254 // Maximum object size that fits in a page.
255 static const int kMaxHeapObjectSize = kObjectAreaSize;
256
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100257 static const int kDirtyFlagOffset = 2 * kPointerSize;
258 static const int kRegionSizeLog2 = 8;
259 static const int kRegionSize = 1 << kRegionSizeLog2;
260 static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
261
262 STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
263
Steve Block6ded16b2010-05-10 14:33:55 +0100264 enum PageFlag {
Steve Block791712a2010-08-27 10:21:07 +0100265 IS_NORMAL_PAGE = 0,
266 WAS_IN_USE_BEFORE_MC,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100267
268 // Page allocation watermark was bumped by preallocation during scavenge.
269 // Correct watermark can be retrieved by CachedAllocationWatermark() method
Steve Block791712a2010-08-27 10:21:07 +0100270 WATERMARK_INVALIDATED,
271 IS_EXECUTABLE,
272 NUM_PAGE_FLAGS // Must be last
Steve Block6ded16b2010-05-10 14:33:55 +0100273 };
Steve Block791712a2010-08-27 10:21:07 +0100274 static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1;
Steve Block6ded16b2010-05-10 14:33:55 +0100275
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100276 // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
277 // scavenge we just invalidate the watermark on each old space page after
278 // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
279 // flag at the beginning of the next scavenge and each page becomes marked as
280 // having a valid watermark.
281 //
282 // The following invariant must hold for pages in old pointer and map spaces:
283 // If page is in use then page is marked as having invalid watermark at
284 // the beginning and at the end of any GC.
285 //
286 // This invariant guarantees that after flipping flag meaning at the
287 // beginning of scavenge all pages in use will be marked as having valid
288 // watermark.
289 static inline void FlipMeaningOfInvalidatedWatermarkFlag();
290
291 // Returns true if the page allocation watermark was not altered during
292 // scavenge.
293 inline bool IsWatermarkValid();
294
295 inline void InvalidateWatermark(bool value);
296
Steve Block6ded16b2010-05-10 14:33:55 +0100297 inline bool GetPageFlag(PageFlag flag);
298 inline void SetPageFlag(PageFlag flag, bool value);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100299 inline void ClearPageFlags();
300
301 inline void ClearGCFields();
302
Steve Block791712a2010-08-27 10:21:07 +0100303 static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100304 static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1;
305 static const uint32_t kAllocationWatermarkOffsetMask =
306 ((1 << kAllocationWatermarkOffsetBits) - 1) <<
307 kAllocationWatermarkOffsetShift;
308
309 static const uint32_t kFlagsMask =
310 ((1 << kAllocationWatermarkOffsetShift) - 1);
311
312 STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
313 kAllocationWatermarkOffsetBits);
314
315 // This field contains the meaning of the WATERMARK_INVALIDATED flag.
316 // Instead of clearing this flag from all pages we just flip
317 // its meaning at the beginning of a scavenge.
318 static intptr_t watermark_invalidated_mark_;
Steve Block6ded16b2010-05-10 14:33:55 +0100319
Steve Blocka7e24c12009-10-30 11:49:00 +0000320 //---------------------------------------------------------------------------
321 // Page header description.
322 //
323 // If a page is not in the large object space, the first word,
324 // opaque_header, encodes the next page address (aligned to kPageSize 8K)
325 // and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use
326 // opaque_header. The value range of the opaque_header is [0..kPageSize[,
327 // or [next_page_start, next_page_end[. It cannot point to a valid address
328 // in the current page. If a page is in the large object space, the first
329 // word *may* (if the page start and large object chunk start are the
330 // same) contain the address of the next large object chunk.
331 intptr_t opaque_header;
332
333 // If the page is not in the large object space, the low-order bit of the
334 // second word is set. If the page is in the large object space, the
335 // second word *may* (if the page start and large object chunk start are
336 // the same) contain the large object chunk size. In either case, the
337 // low-order bit for large object pages will be cleared.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100338 // For normal pages this word is used to store page flags and
339 // offset of allocation top.
340 intptr_t flags_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000341
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100342 // This field contains dirty marks for regions covering the page. Only dirty
343 // regions might contain intergenerational references.
344 // Only 32 dirty marks are supported so for large object pages several regions
345 // might be mapped to a single dirty mark.
346 uint32_t dirty_regions_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000347
348 // The index of the page in its owner space.
349 int mc_page_index;
350
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100351 // During mark-compact collections this field contains the forwarding address
352 // of the first live object in this page.
353 // During scavenge collection this field is used to store allocation watermark
354 // if it is altered during scavenge.
Steve Blocka7e24c12009-10-30 11:49:00 +0000355 Address mc_first_forwarded;
Steve Blocka7e24c12009-10-30 11:49:00 +0000356};
357
358
359// ----------------------------------------------------------------------------
360// Space is the abstract superclass for all allocation spaces.
361class Space : public Malloced {
362 public:
363 Space(AllocationSpace id, Executability executable)
364 : id_(id), executable_(executable) {}
365
366 virtual ~Space() {}
367
368 // Does the space need executable memory?
369 Executability executable() { return executable_; }
370
371 // Identity used in error reporting.
372 AllocationSpace identity() { return id_; }
373
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800374 // Returns allocated size.
Ben Murdochf87a2032010-10-22 12:50:53 +0100375 virtual intptr_t Size() = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000376
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800377 // Returns size of objects. Can differ from the allocated size
378 // (e.g. see LargeObjectSpace).
379 virtual intptr_t SizeOfObjects() { return Size(); }
380
Steve Block6ded16b2010-05-10 14:33:55 +0100381#ifdef ENABLE_HEAP_PROTECTION
382 // Protect/unprotect the space by marking it read-only/writable.
383 virtual void Protect() = 0;
384 virtual void Unprotect() = 0;
385#endif
386
Steve Blocka7e24c12009-10-30 11:49:00 +0000387#ifdef DEBUG
388 virtual void Print() = 0;
389#endif
390
Leon Clarkee46be812010-01-19 14:06:41 +0000391 // After calling this we can allocate a certain number of bytes using only
392 // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
393 // without using freelists or causing a GC. This is used by partial
394 // snapshots. It returns true of space was reserved or false if a GC is
395 // needed. For paged spaces the space requested must include the space wasted
396 // at the end of each when allocating linearly.
397 virtual bool ReserveSpace(int bytes) = 0;
398
Steve Blocka7e24c12009-10-30 11:49:00 +0000399 private:
400 AllocationSpace id_;
401 Executability executable_;
402};
403
404
405// ----------------------------------------------------------------------------
406// All heap objects containing executable code (code objects) must be allocated
407// from a 2 GB range of memory, so that they can call each other using 32-bit
408// displacements. This happens automatically on 32-bit platforms, where 32-bit
409// displacements cover the entire 4GB virtual address space. On 64-bit
410// platforms, we support this using the CodeRange object, which reserves and
411// manages a range of virtual memory.
412class CodeRange : public AllStatic {
413 public:
414 // Reserves a range of virtual memory, but does not commit any of it.
415 // Can only be called once, at heap initialization time.
416 // Returns false on failure.
417 static bool Setup(const size_t requested_size);
418
419 // Frees the range of virtual memory, and frees the data structures used to
420 // manage it.
421 static void TearDown();
422
423 static bool exists() { return code_range_ != NULL; }
424 static bool contains(Address address) {
425 if (code_range_ == NULL) return false;
426 Address start = static_cast<Address>(code_range_->address());
427 return start <= address && address < start + code_range_->size();
428 }
429
430 // Allocates a chunk of memory from the large-object portion of
431 // the code range. On platforms with no separate code range, should
432 // not be called.
John Reck59135872010-11-02 12:39:01 -0700433 MUST_USE_RESULT static void* AllocateRawMemory(const size_t requested,
434 size_t* allocated);
Steve Blocka7e24c12009-10-30 11:49:00 +0000435 static void FreeRawMemory(void* buf, size_t length);
436
437 private:
438 // The reserved range of virtual memory that all code objects are put in.
439 static VirtualMemory* code_range_;
440 // Plain old data class, just a struct plus a constructor.
441 class FreeBlock {
442 public:
443 FreeBlock(Address start_arg, size_t size_arg)
444 : start(start_arg), size(size_arg) {}
445 FreeBlock(void* start_arg, size_t size_arg)
446 : start(static_cast<Address>(start_arg)), size(size_arg) {}
447
448 Address start;
449 size_t size;
450 };
451
452 // Freed blocks of memory are added to the free list. When the allocation
453 // list is exhausted, the free list is sorted and merged to make the new
454 // allocation list.
455 static List<FreeBlock> free_list_;
456 // Memory is allocated from the free blocks on the allocation list.
457 // The block at current_allocation_block_index_ is the current block.
458 static List<FreeBlock> allocation_list_;
459 static int current_allocation_block_index_;
460
461 // Finds a block on the allocation list that contains at least the
462 // requested amount of memory. If none is found, sorts and merges
463 // the existing free memory blocks, and searches again.
464 // If none can be found, terminates V8 with FatalProcessOutOfMemory.
465 static void GetNextAllocationBlock(size_t requested);
466 // Compares the start addresses of two free blocks.
467 static int CompareFreeBlockAddress(const FreeBlock* left,
468 const FreeBlock* right);
469};
470
471
472// ----------------------------------------------------------------------------
473// A space acquires chunks of memory from the operating system. The memory
474// allocator manages chunks for the paged heap spaces (old space and map
475// space). A paged chunk consists of pages. Pages in a chunk have contiguous
476// addresses and are linked as a list.
477//
478// The allocator keeps an initial chunk which is used for the new space. The
479// leftover regions of the initial chunk are used for the initial chunks of
480// old space and map space if they are big enough to hold at least one page.
481// The allocator assumes that there is one old space and one map space, each
482// expands the space by allocating kPagesPerChunk pages except the last
483// expansion (before running out of space). The first chunk may contain fewer
484// than kPagesPerChunk pages as well.
485//
486// The memory allocator also allocates chunks for the large object space, but
487// they are managed by the space itself. The new space does not expand.
Steve Block6ded16b2010-05-10 14:33:55 +0100488//
489// The fact that pages for paged spaces are allocated and deallocated in chunks
490// induces a constraint on the order of pages in a linked lists. We say that
491// pages are linked in the chunk-order if and only if every two consecutive
492// pages from the same chunk are consecutive in the linked list.
493//
494
Steve Blocka7e24c12009-10-30 11:49:00 +0000495
496class MemoryAllocator : public AllStatic {
497 public:
498 // Initializes its internal bookkeeping structures.
Russell Brenner90bac252010-11-18 13:33:46 -0800499 // Max capacity of the total space and executable memory limit.
500 static bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
Steve Blocka7e24c12009-10-30 11:49:00 +0000501
502 // Deletes valid chunks.
503 static void TearDown();
504
505 // Reserves an initial address range of virtual memory to be split between
506 // the two new space semispaces, the old space, and the map space. The
507 // memory is not yet committed or assigned to spaces and split into pages.
508 // The initial chunk is unmapped when the memory allocator is torn down.
509 // This function should only be called when there is not already a reserved
510 // initial chunk (initial_chunk_ should be NULL). It returns the start
511 // address of the initial chunk if successful, with the side effect of
512 // setting the initial chunk, or else NULL if unsuccessful and leaves the
513 // initial chunk NULL.
514 static void* ReserveInitialChunk(const size_t requested);
515
516 // Commits pages from an as-yet-unmanaged block of virtual memory into a
517 // paged space. The block should be part of the initial chunk reserved via
518 // a call to ReserveInitialChunk. The number of pages is always returned in
519 // the output parameter num_pages. This function assumes that the start
520 // address is non-null and that it is big enough to hold at least one
521 // page-aligned page. The call always succeeds, and num_pages is always
522 // greater than zero.
523 static Page* CommitPages(Address start, size_t size, PagedSpace* owner,
524 int* num_pages);
525
526 // Commit a contiguous block of memory from the initial chunk. Assumes that
527 // the address is not NULL, the size is greater than zero, and that the
528 // block is contained in the initial chunk. Returns true if it succeeded
529 // and false otherwise.
530 static bool CommitBlock(Address start, size_t size, Executability executable);
531
Steve Blocka7e24c12009-10-30 11:49:00 +0000532 // Uncommit a contiguous block of memory [start..(start+size)[.
533 // start is not NULL, the size is greater than zero, and the
534 // block is contained in the initial chunk. Returns true if it succeeded
535 // and false otherwise.
536 static bool UncommitBlock(Address start, size_t size);
537
Leon Clarke4515c472010-02-03 11:58:03 +0000538 // Zaps a contiguous block of memory [start..(start+size)[ thus
539 // filling it up with a recognizable non-NULL bit pattern.
540 static void ZapBlock(Address start, size_t size);
541
Steve Blocka7e24c12009-10-30 11:49:00 +0000542 // Attempts to allocate the requested (non-zero) number of pages from the
543 // OS. Fewer pages might be allocated than requested. If it fails to
544 // allocate memory for the OS or cannot allocate a single page, this
545 // function returns an invalid page pointer (NULL). The caller must check
546 // whether the returned page is valid (by calling Page::is_valid()). It is
547 // guaranteed that allocated pages have contiguous addresses. The actual
548 // number of allocated pages is returned in the output parameter
549 // allocated_pages. If the PagedSpace owner is executable and there is
550 // a code range, the pages are allocated from the code range.
551 static Page* AllocatePages(int requested_pages, int* allocated_pages,
552 PagedSpace* owner);
553
Steve Block6ded16b2010-05-10 14:33:55 +0100554 // Frees pages from a given page and after. Requires pages to be
555 // linked in chunk-order (see comment for class).
556 // If 'p' is the first page of a chunk, pages from 'p' are freed
557 // and this function returns an invalid page pointer.
558 // Otherwise, the function searches a page after 'p' that is
559 // the first page of a chunk. Pages after the found page
560 // are freed and the function returns 'p'.
Steve Blocka7e24c12009-10-30 11:49:00 +0000561 static Page* FreePages(Page* p);
562
Steve Block6ded16b2010-05-10 14:33:55 +0100563 // Frees all pages owned by given space.
564 static void FreeAllPages(PagedSpace* space);
565
Steve Blocka7e24c12009-10-30 11:49:00 +0000566 // Allocates and frees raw memory of certain size.
567 // These are just thin wrappers around OS::Allocate and OS::Free,
568 // but keep track of allocated bytes as part of heap.
569 // If the flag is EXECUTABLE and a code range exists, the requested
570 // memory is allocated from the code range. If a code range exists
571 // and the freed memory is in it, the code range manages the freed memory.
John Reck59135872010-11-02 12:39:01 -0700572 MUST_USE_RESULT static void* AllocateRawMemory(const size_t requested,
573 size_t* allocated,
574 Executability executable);
Steve Block791712a2010-08-27 10:21:07 +0100575 static void FreeRawMemory(void* buf,
576 size_t length,
577 Executability executable);
Iain Merrick9ac36c92010-09-13 15:29:50 +0100578 static void PerformAllocationCallback(ObjectSpace space,
579 AllocationAction action,
580 size_t size);
581
582 static void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
583 ObjectSpace space,
584 AllocationAction action);
585 static void RemoveMemoryAllocationCallback(
586 MemoryAllocationCallback callback);
587 static bool MemoryAllocationCallbackRegistered(
588 MemoryAllocationCallback callback);
Steve Blocka7e24c12009-10-30 11:49:00 +0000589
590 // Returns the maximum available bytes of heaps.
Ben Murdochf87a2032010-10-22 12:50:53 +0100591 static intptr_t Available() {
592 return capacity_ < size_ ? 0 : capacity_ - size_;
593 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000594
595 // Returns allocated spaces in bytes.
Ben Murdochf87a2032010-10-22 12:50:53 +0100596 static intptr_t Size() { return size_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000597
Russell Brenner90bac252010-11-18 13:33:46 -0800598 // Returns the maximum available executable bytes of heaps.
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800599 static intptr_t AvailableExecutable() {
Russell Brenner90bac252010-11-18 13:33:46 -0800600 if (capacity_executable_ < size_executable_) return 0;
601 return capacity_executable_ - size_executable_;
602 }
603
Steve Block791712a2010-08-27 10:21:07 +0100604 // Returns allocated executable spaces in bytes.
Ben Murdochf87a2032010-10-22 12:50:53 +0100605 static intptr_t SizeExecutable() { return size_executable_; }
Steve Block791712a2010-08-27 10:21:07 +0100606
Steve Blocka7e24c12009-10-30 11:49:00 +0000607 // Returns maximum available bytes that the old space can have.
Ben Murdochf87a2032010-10-22 12:50:53 +0100608 static intptr_t MaxAvailable() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000609 return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
610 }
611
Ben Murdochb0fe1622011-05-05 13:52:32 +0100612 // Sanity check on a pointer.
613 static bool SafeIsInAPageChunk(Address addr);
614
Steve Blocka7e24c12009-10-30 11:49:00 +0000615 // Links two pages.
616 static inline void SetNextPage(Page* prev, Page* next);
617
618 // Returns the next page of a given page.
619 static inline Page* GetNextPage(Page* p);
620
621 // Checks whether a page belongs to a space.
622 static inline bool IsPageInSpace(Page* p, PagedSpace* space);
623
624 // Returns the space that owns the given page.
625 static inline PagedSpace* PageOwner(Page* page);
626
627 // Finds the first/last page in the same chunk as a given page.
628 static Page* FindFirstPageInSameChunk(Page* p);
629 static Page* FindLastPageInSameChunk(Page* p);
630
Steve Block6ded16b2010-05-10 14:33:55 +0100631 // Relinks list of pages owned by space to make it chunk-ordered.
632 // Returns new first and last pages of space.
633 // Also returns last page in relinked list which has WasInUsedBeforeMC
634 // flag set.
635 static void RelinkPageListInChunkOrder(PagedSpace* space,
636 Page** first_page,
637 Page** last_page,
638 Page** last_page_in_use);
639
Steve Blocka7e24c12009-10-30 11:49:00 +0000640#ifdef ENABLE_HEAP_PROTECTION
641 // Protect/unprotect a block of memory by marking it read-only/writable.
642 static inline void Protect(Address start, size_t size);
643 static inline void Unprotect(Address start, size_t size,
644 Executability executable);
645
646 // Protect/unprotect a chunk given a page in the chunk.
647 static inline void ProtectChunkFromPage(Page* page);
648 static inline void UnprotectChunkFromPage(Page* page);
649#endif
650
651#ifdef DEBUG
652 // Reports statistic info of the space.
653 static void ReportStatistics();
654#endif
655
Ben Murdochb0fe1622011-05-05 13:52:32 +0100656 static void AddToAllocatedChunks(Address addr, intptr_t size);
657 static void RemoveFromAllocatedChunks(Address addr, intptr_t size);
658 // Note: This only checks the regular chunks, not the odd-sized initial
659 // chunk.
660 static bool InAllocatedChunks(Address addr);
661
Steve Blocka7e24c12009-10-30 11:49:00 +0000662 // Due to encoding limitation, we can only have 8K chunks.
Leon Clarkee46be812010-01-19 14:06:41 +0000663 static const int kMaxNofChunks = 1 << kPageSizeBits;
Steve Blocka7e24c12009-10-30 11:49:00 +0000664 // If a chunk has at least 16 pages, the maximum heap size is about
665 // 8K * 8K * 16 = 1G bytes.
666#ifdef V8_TARGET_ARCH_X64
667 static const int kPagesPerChunk = 32;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100668 // On 64 bit the chunk table consists of 4 levels of 4096-entry tables.
669 static const int kPagesPerChunkLog2 = 5;
670 static const int kChunkTableLevels = 4;
671 static const int kChunkTableBitsPerLevel = 12;
Steve Blocka7e24c12009-10-30 11:49:00 +0000672#else
673 static const int kPagesPerChunk = 16;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100674 // On 32 bit the chunk table consists of 2 levels of 256-entry tables.
675 static const int kPagesPerChunkLog2 = 4;
676 static const int kChunkTableLevels = 2;
677 static const int kChunkTableBitsPerLevel = 8;
Steve Blocka7e24c12009-10-30 11:49:00 +0000678#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000679
680 private:
Ben Murdochb0fe1622011-05-05 13:52:32 +0100681 static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
682 static const int kChunkSizeLog2 = kPagesPerChunkLog2 + kPageSizeBits;
683 static const int kChunkTableTopLevelEntries =
684 1 << (sizeof(intptr_t) * kBitsPerByte - kChunkSizeLog2 -
685 (kChunkTableLevels - 1) * kChunkTableBitsPerLevel);
686
687 // The chunks are not chunk-size aligned so for a given chunk-sized area of
688 // memory there can be two chunks that cover it.
689 static const int kChunkTableFineGrainedWordsPerEntry = 2;
690 static const uintptr_t kUnusedChunkTableEntry = 0;
691
Steve Blocka7e24c12009-10-30 11:49:00 +0000692 // Maximum space size in bytes.
Ben Murdochf87a2032010-10-22 12:50:53 +0100693 static intptr_t capacity_;
Russell Brenner90bac252010-11-18 13:33:46 -0800694 // Maximum subset of capacity_ that can be executable
695 static intptr_t capacity_executable_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000696
Ben Murdochb0fe1622011-05-05 13:52:32 +0100697 // Top level table to track whether memory is part of a chunk or not.
698 static uintptr_t chunk_table_[kChunkTableTopLevelEntries];
699
Steve Blocka7e24c12009-10-30 11:49:00 +0000700 // Allocated space size in bytes.
Ben Murdochf87a2032010-10-22 12:50:53 +0100701 static intptr_t size_;
Steve Block791712a2010-08-27 10:21:07 +0100702 // Allocated executable space size in bytes.
Ben Murdochf87a2032010-10-22 12:50:53 +0100703 static intptr_t size_executable_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000704
Iain Merrick9ac36c92010-09-13 15:29:50 +0100705 struct MemoryAllocationCallbackRegistration {
706 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
707 ObjectSpace space,
708 AllocationAction action)
709 : callback(callback), space(space), action(action) {
710 }
711 MemoryAllocationCallback callback;
712 ObjectSpace space;
713 AllocationAction action;
714 };
715 // A List of callback that are triggered when memory is allocated or free'd
716 static List<MemoryAllocationCallbackRegistration>
717 memory_allocation_callbacks_;
718
Steve Blocka7e24c12009-10-30 11:49:00 +0000719 // The initial chunk of virtual memory.
720 static VirtualMemory* initial_chunk_;
721
722 // Allocated chunk info: chunk start address, chunk size, and owning space.
723 class ChunkInfo BASE_EMBEDDED {
724 public:
Iain Merrick9ac36c92010-09-13 15:29:50 +0100725 ChunkInfo() : address_(NULL),
726 size_(0),
727 owner_(NULL),
728 executable_(NOT_EXECUTABLE) {}
729 inline void init(Address a, size_t s, PagedSpace* o);
Steve Blocka7e24c12009-10-30 11:49:00 +0000730 Address address() { return address_; }
731 size_t size() { return size_; }
732 PagedSpace* owner() { return owner_; }
Iain Merrick9ac36c92010-09-13 15:29:50 +0100733 // We save executability of the owner to allow using it
734 // when collecting stats after the owner has been destroyed.
735 Executability executable() const { return executable_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000736
737 private:
738 Address address_;
739 size_t size_;
740 PagedSpace* owner_;
Iain Merrick9ac36c92010-09-13 15:29:50 +0100741 Executability executable_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000742 };
743
744 // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
745 static List<ChunkInfo> chunks_;
746 static List<int> free_chunk_ids_;
747 static int max_nof_chunks_;
748 static int top_;
749
750 // Push/pop a free chunk id onto/from the stack.
751 static void Push(int free_chunk_id);
752 static int Pop();
753 static bool OutOfChunkIds() { return top_ == 0; }
754
755 // Frees a chunk.
756 static void DeleteChunk(int chunk_id);
757
Ben Murdochb0fe1622011-05-05 13:52:32 +0100758 // Helpers to maintain and query the chunk tables.
759 static void AddChunkUsingAddress(
760 uintptr_t chunk_start, // Where the chunk starts.
761 uintptr_t chunk_index_base); // Used to place the chunk in the tables.
762 static void RemoveChunkFoundUsingAddress(
763 uintptr_t chunk_start, // Where the chunk starts.
764 uintptr_t chunk_index_base); // Used to locate the entry in the tables.
765 // Controls whether the lookup creates intermediate levels of tables as
766 // needed.
767 enum CreateTables { kDontCreateTables, kCreateTablesAsNeeded };
768 static uintptr_t* AllocatedChunksFinder(uintptr_t* table,
769 uintptr_t address,
770 int bit_position,
771 CreateTables create_as_needed);
772 static void FreeChunkTables(uintptr_t* array, int length, int level);
773 static int FineGrainedIndexForAddress(uintptr_t address) {
774 int index = ((address >> kChunkSizeLog2) &
775 ((1 << kChunkTableBitsPerLevel) - 1));
776 return index * kChunkTableFineGrainedWordsPerEntry;
777 }
778
779
Steve Blocka7e24c12009-10-30 11:49:00 +0000780 // Basic check whether a chunk id is in the valid range.
781 static inline bool IsValidChunkId(int chunk_id);
782
783 // Checks whether a chunk id identifies an allocated chunk.
784 static inline bool IsValidChunk(int chunk_id);
785
786 // Returns the chunk id that a page belongs to.
787 static inline int GetChunkId(Page* p);
788
789 // True if the address lies in the initial chunk.
790 static inline bool InInitialChunk(Address address);
791
792 // Initializes pages in a chunk. Returns the first page address.
793 // This function and GetChunkId() are provided for the mark-compact
794 // collector to rebuild page headers in the from space, which is
795 // used as a marking stack and its page headers are destroyed.
796 static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
797 PagedSpace* owner);
Steve Block6ded16b2010-05-10 14:33:55 +0100798
799 static Page* RelinkPagesInChunk(int chunk_id,
800 Address chunk_start,
801 size_t chunk_size,
802 Page* prev,
803 Page** last_page_in_use);
Steve Blocka7e24c12009-10-30 11:49:00 +0000804};
805
806
807// -----------------------------------------------------------------------------
808// Interface for heap object iterator to be implemented by all object space
809// object iterators.
810//
Leon Clarked91b9f72010-01-27 17:25:45 +0000811// NOTE: The space specific object iterators also implements the own next()
812// method which is used to avoid using virtual functions
Steve Blocka7e24c12009-10-30 11:49:00 +0000813// iterating a specific space.
814
815class ObjectIterator : public Malloced {
816 public:
817 virtual ~ObjectIterator() { }
818
Steve Blocka7e24c12009-10-30 11:49:00 +0000819 virtual HeapObject* next_object() = 0;
820};
821
822
823// -----------------------------------------------------------------------------
824// Heap object iterator in new/old/map spaces.
825//
826// A HeapObjectIterator iterates objects from a given address to the
827// top of a space. The given address must be below the current
828// allocation pointer (space top). There are some caveats.
829//
830// (1) If the space top changes upward during iteration (because of
831// allocating new objects), the iterator does not iterate objects
832// above the original space top. The caller must create a new
833// iterator starting from the old top in order to visit these new
834// objects.
835//
836// (2) If new objects are allocated below the original allocation top
837// (e.g., free-list allocation in paged spaces), the new objects
838// may or may not be iterated depending on their position with
839// respect to the current point of iteration.
840//
841// (3) The space top should not change downward during iteration,
842// otherwise the iterator will return not-necessarily-valid
843// objects.
844
845class HeapObjectIterator: public ObjectIterator {
846 public:
847 // Creates a new object iterator in a given space. If a start
848 // address is not given, the iterator starts from the space bottom.
849 // If the size function is not given, the iterator calls the default
850 // Object::Size().
851 explicit HeapObjectIterator(PagedSpace* space);
852 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
853 HeapObjectIterator(PagedSpace* space, Address start);
854 HeapObjectIterator(PagedSpace* space,
855 Address start,
856 HeapObjectCallback size_func);
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100857 HeapObjectIterator(Page* page, HeapObjectCallback size_func);
Steve Blocka7e24c12009-10-30 11:49:00 +0000858
Leon Clarked91b9f72010-01-27 17:25:45 +0000859 inline HeapObject* next() {
860 return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
861 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000862
863 // implementation of ObjectIterator.
Steve Blocka7e24c12009-10-30 11:49:00 +0000864 virtual HeapObject* next_object() { return next(); }
865
866 private:
867 Address cur_addr_; // current iteration point
868 Address end_addr_; // end iteration point
869 Address cur_limit_; // current page limit
870 HeapObjectCallback size_func_; // size function
871 Page* end_page_; // caches the page of the end address
872
Leon Clarked91b9f72010-01-27 17:25:45 +0000873 HeapObject* FromCurrentPage() {
874 ASSERT(cur_addr_ < cur_limit_);
875
876 HeapObject* obj = HeapObject::FromAddress(cur_addr_);
877 int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
878 ASSERT_OBJECT_SIZE(obj_size);
879
880 cur_addr_ += obj_size;
881 ASSERT(cur_addr_ <= cur_limit_);
882
883 return obj;
884 }
885
886 // Slow path of next, goes into the next page.
887 HeapObject* FromNextPage();
Steve Blocka7e24c12009-10-30 11:49:00 +0000888
889 // Initializes fields.
890 void Initialize(Address start, Address end, HeapObjectCallback size_func);
891
892#ifdef DEBUG
893 // Verifies whether fields have valid values.
894 void Verify();
895#endif
896};
897
898
899// -----------------------------------------------------------------------------
900// A PageIterator iterates the pages in a paged space.
901//
902// The PageIterator class provides three modes for iterating pages in a space:
903// PAGES_IN_USE iterates pages containing allocated objects.
904// PAGES_USED_BY_MC iterates pages that hold relocated objects during a
905// mark-compact collection.
906// ALL_PAGES iterates all pages in the space.
907//
908// There are some caveats.
909//
910// (1) If the space expands during iteration, new pages will not be
911// returned by the iterator in any mode.
912//
913// (2) If new objects are allocated during iteration, they will appear
914// in pages returned by the iterator. Allocation may cause the
915// allocation pointer or MC allocation pointer in the last page to
916// change between constructing the iterator and iterating the last
917// page.
918//
919// (3) The space should not shrink during iteration, otherwise the
920// iterator will return deallocated pages.
921
922class PageIterator BASE_EMBEDDED {
923 public:
924 enum Mode {
925 PAGES_IN_USE,
926 PAGES_USED_BY_MC,
927 ALL_PAGES
928 };
929
930 PageIterator(PagedSpace* space, Mode mode);
931
932 inline bool has_next();
933 inline Page* next();
934
935 private:
936 PagedSpace* space_;
937 Page* prev_page_; // Previous page returned.
938 Page* stop_page_; // Page to stop at (last page returned by the iterator).
939};
940
941
942// -----------------------------------------------------------------------------
943// A space has a list of pages. The next page can be accessed via
944// Page::next_page() call. The next page of the last page is an
945// invalid page pointer. A space can expand and shrink dynamically.
946
947// An abstraction of allocation and relocation pointers in a page-structured
948// space.
949class AllocationInfo {
950 public:
951 Address top; // current allocation top
952 Address limit; // current allocation limit
953
954#ifdef DEBUG
955 bool VerifyPagedAllocation() {
956 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
957 && (top <= limit);
958 }
959#endif
960};
961
962
963// An abstraction of the accounting statistics of a page-structured space.
964// The 'capacity' of a space is the number of object-area bytes (ie, not
965// including page bookkeeping structures) currently in the space. The 'size'
966// of a space is the number of allocated bytes, the 'waste' in the space is
967// the number of bytes that are not allocated and not available to
968// allocation without reorganizing the space via a GC (eg, small blocks due
969// to internal fragmentation, top of page areas in map space), and the bytes
970// 'available' is the number of unallocated bytes that are not waste. The
971// capacity is the sum of size, waste, and available.
972//
973// The stats are only set by functions that ensure they stay balanced. These
974// functions increase or decrease one of the non-capacity stats in
975// conjunction with capacity, or else they always balance increases and
976// decreases to the non-capacity stats.
977class AllocationStats BASE_EMBEDDED {
978 public:
979 AllocationStats() { Clear(); }
980
981 // Zero out all the allocation statistics (ie, no capacity).
982 void Clear() {
983 capacity_ = 0;
984 available_ = 0;
985 size_ = 0;
986 waste_ = 0;
987 }
988
989 // Reset the allocation statistics (ie, available = capacity with no
990 // wasted or allocated bytes).
991 void Reset() {
992 available_ = capacity_;
993 size_ = 0;
994 waste_ = 0;
995 }
996
997 // Accessors for the allocation statistics.
Ben Murdochf87a2032010-10-22 12:50:53 +0100998 intptr_t Capacity() { return capacity_; }
999 intptr_t Available() { return available_; }
1000 intptr_t Size() { return size_; }
1001 intptr_t Waste() { return waste_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001002
1003 // Grow the space by adding available bytes.
1004 void ExpandSpace(int size_in_bytes) {
1005 capacity_ += size_in_bytes;
1006 available_ += size_in_bytes;
1007 }
1008
1009 // Shrink the space by removing available bytes.
1010 void ShrinkSpace(int size_in_bytes) {
1011 capacity_ -= size_in_bytes;
1012 available_ -= size_in_bytes;
1013 }
1014
1015 // Allocate from available bytes (available -> size).
Ben Murdochf87a2032010-10-22 12:50:53 +01001016 void AllocateBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001017 available_ -= size_in_bytes;
1018 size_ += size_in_bytes;
1019 }
1020
1021 // Free allocated bytes, making them available (size -> available).
Ben Murdochf87a2032010-10-22 12:50:53 +01001022 void DeallocateBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001023 size_ -= size_in_bytes;
1024 available_ += size_in_bytes;
1025 }
1026
1027 // Waste free bytes (available -> waste).
1028 void WasteBytes(int size_in_bytes) {
1029 available_ -= size_in_bytes;
1030 waste_ += size_in_bytes;
1031 }
1032
1033 // Consider the wasted bytes to be allocated, as they contain filler
1034 // objects (waste -> size).
Ben Murdochf87a2032010-10-22 12:50:53 +01001035 void FillWastedBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001036 waste_ -= size_in_bytes;
1037 size_ += size_in_bytes;
1038 }
1039
1040 private:
Ben Murdochf87a2032010-10-22 12:50:53 +01001041 intptr_t capacity_;
1042 intptr_t available_;
1043 intptr_t size_;
1044 intptr_t waste_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001045};
1046
1047
1048class PagedSpace : public Space {
1049 public:
1050 // Creates a space with a maximum capacity, and an id.
Ben Murdochf87a2032010-10-22 12:50:53 +01001051 PagedSpace(intptr_t max_capacity,
1052 AllocationSpace id,
1053 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00001054
1055 virtual ~PagedSpace() {}
1056
1057 // Set up the space using the given address range of virtual memory (from
1058 // the memory allocator's initial chunk) if possible. If the block of
1059 // addresses is not big enough to contain a single page-aligned page, a
1060 // fresh chunk will be allocated.
1061 bool Setup(Address start, size_t size);
1062
1063 // Returns true if the space has been successfully set up and not
1064 // subsequently torn down.
1065 bool HasBeenSetup();
1066
1067 // Cleans up the space, frees all pages in this space except those belonging
1068 // to the initial chunk, uncommits addresses in the initial chunk.
1069 void TearDown();
1070
1071 // Checks whether an object/address is in this space.
1072 inline bool Contains(Address a);
1073 bool Contains(HeapObject* o) { return Contains(o->address()); }
Ben Murdochb0fe1622011-05-05 13:52:32 +01001074 // Never crashes even if a is not a valid pointer.
1075 inline bool SafeContains(Address a);
Steve Blocka7e24c12009-10-30 11:49:00 +00001076
1077 // Given an address occupied by a live object, return that object if it is
1078 // in this space, or Failure::Exception() if it is not. The implementation
1079 // iterates over objects in the page containing the address, the cost is
1080 // linear in the number of objects in the page. It may be slow.
John Reck59135872010-11-02 12:39:01 -07001081 MUST_USE_RESULT MaybeObject* FindObject(Address addr);
Steve Blocka7e24c12009-10-30 11:49:00 +00001082
1083 // Checks whether page is currently in use by this space.
1084 bool IsUsed(Page* page);
1085
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001086 void MarkAllPagesClean();
Steve Blocka7e24c12009-10-30 11:49:00 +00001087
1088 // Prepares for a mark-compact GC.
Steve Block6ded16b2010-05-10 14:33:55 +01001089 virtual void PrepareForMarkCompact(bool will_compact);
Steve Blocka7e24c12009-10-30 11:49:00 +00001090
Steve Block6ded16b2010-05-10 14:33:55 +01001091 // The top of allocation in a page in this space. Undefined if page is unused.
1092 Address PageAllocationTop(Page* page) {
1093 return page == TopPageOf(allocation_info_) ? top()
1094 : PageAllocationLimit(page);
1095 }
1096
1097 // The limit of allocation for a page in this space.
1098 virtual Address PageAllocationLimit(Page* page) = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001099
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001100 void FlushTopPageWatermark() {
1101 AllocationTopPage()->SetCachedAllocationWatermark(top());
1102 AllocationTopPage()->InvalidateWatermark(true);
1103 }
1104
Steve Blocka7e24c12009-10-30 11:49:00 +00001105 // Current capacity without growing (Size() + Available() + Waste()).
Ben Murdochf87a2032010-10-22 12:50:53 +01001106 intptr_t Capacity() { return accounting_stats_.Capacity(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001107
Steve Block3ce2e202009-11-05 08:53:23 +00001108 // Total amount of memory committed for this space. For paged
1109 // spaces this equals the capacity.
Ben Murdochf87a2032010-10-22 12:50:53 +01001110 intptr_t CommittedMemory() { return Capacity(); }
Steve Block3ce2e202009-11-05 08:53:23 +00001111
Steve Blocka7e24c12009-10-30 11:49:00 +00001112 // Available bytes without growing.
Ben Murdochf87a2032010-10-22 12:50:53 +01001113 intptr_t Available() { return accounting_stats_.Available(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001114
1115 // Allocated bytes in this space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001116 virtual intptr_t Size() { return accounting_stats_.Size(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001117
1118 // Wasted bytes due to fragmentation and not recoverable until the
1119 // next GC of this space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001120 intptr_t Waste() { return accounting_stats_.Waste(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001121
1122 // Returns the address of the first object in this space.
1123 Address bottom() { return first_page_->ObjectAreaStart(); }
1124
1125 // Returns the allocation pointer in this space.
1126 Address top() { return allocation_info_.top; }
1127
1128 // Allocate the requested number of bytes in the space if possible, return a
1129 // failure object if not.
John Reck59135872010-11-02 12:39:01 -07001130 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001131
1132 // Allocate the requested number of bytes for relocation during mark-compact
1133 // collection.
John Reck59135872010-11-02 12:39:01 -07001134 MUST_USE_RESULT inline MaybeObject* MCAllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001135
Leon Clarkee46be812010-01-19 14:06:41 +00001136 virtual bool ReserveSpace(int bytes);
1137
1138 // Used by ReserveSpace.
1139 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001140
Steve Block6ded16b2010-05-10 14:33:55 +01001141 // Free all pages in range from prev (exclusive) to last (inclusive).
1142 // Freed pages are moved to the end of page list.
1143 void FreePages(Page* prev, Page* last);
1144
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001145 // Deallocates a block.
1146 virtual void DeallocateBlock(Address start,
1147 int size_in_bytes,
1148 bool add_to_freelist) = 0;
1149
Steve Block6ded16b2010-05-10 14:33:55 +01001150 // Set space allocation info.
1151 void SetTop(Address top) {
1152 allocation_info_.top = top;
1153 allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top));
1154 }
1155
Steve Blocka7e24c12009-10-30 11:49:00 +00001156 // ---------------------------------------------------------------------------
1157 // Mark-compact collection support functions
1158
1159 // Set the relocation point to the beginning of the space.
1160 void MCResetRelocationInfo();
1161
1162 // Writes relocation info to the top page.
1163 void MCWriteRelocationInfoToPage() {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001164 TopPageOf(mc_forwarding_info_)->
1165 SetAllocationWatermark(mc_forwarding_info_.top);
Steve Blocka7e24c12009-10-30 11:49:00 +00001166 }
1167
1168 // Computes the offset of a given address in this space to the beginning
1169 // of the space.
1170 int MCSpaceOffsetForAddress(Address addr);
1171
1172 // Updates the allocation pointer to the relocation top after a mark-compact
1173 // collection.
1174 virtual void MCCommitRelocationInfo() = 0;
1175
1176 // Releases half of unused pages.
1177 void Shrink();
1178
1179 // Ensures that the capacity is at least 'capacity'. Returns false on failure.
1180 bool EnsureCapacity(int capacity);
1181
1182#ifdef ENABLE_HEAP_PROTECTION
1183 // Protect/unprotect the space by marking it read-only/writable.
1184 void Protect();
1185 void Unprotect();
1186#endif
1187
1188#ifdef DEBUG
1189 // Print meta info and objects in this space.
1190 virtual void Print();
1191
1192 // Verify integrity of this space.
1193 virtual void Verify(ObjectVisitor* visitor);
1194
1195 // Overridden by subclasses to verify space-specific object
1196 // properties (e.g., only maps or free-list nodes are in map space).
1197 virtual void VerifyObject(HeapObject* obj) {}
1198
1199 // Report code object related statistics
1200 void CollectCodeStatistics();
1201 static void ReportCodeStatistics();
1202 static void ResetCodeStatistics();
1203#endif
1204
Steve Block6ded16b2010-05-10 14:33:55 +01001205 // Returns the page of the allocation pointer.
1206 Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
1207
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001208 void RelinkPageListInChunkOrder(bool deallocate_blocks);
1209
Steve Blocka7e24c12009-10-30 11:49:00 +00001210 protected:
1211 // Maximum capacity of this space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001212 intptr_t max_capacity_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001213
1214 // Accounting information for this space.
1215 AllocationStats accounting_stats_;
1216
1217 // The first page in this space.
1218 Page* first_page_;
1219
1220 // The last page in this space. Initially set in Setup, updated in
1221 // Expand and Shrink.
1222 Page* last_page_;
1223
Steve Block6ded16b2010-05-10 14:33:55 +01001224 // True if pages owned by this space are linked in chunk-order.
1225 // See comment for class MemoryAllocator for definition of chunk-order.
1226 bool page_list_is_chunk_ordered_;
1227
Steve Blocka7e24c12009-10-30 11:49:00 +00001228 // Normal allocation information.
1229 AllocationInfo allocation_info_;
1230
1231 // Relocation information during mark-compact collections.
1232 AllocationInfo mc_forwarding_info_;
1233
1234 // Bytes of each page that cannot be allocated. Possibly non-zero
1235 // for pages in spaces with only fixed-size objects. Always zero
1236 // for pages in spaces with variable sized objects (those pages are
1237 // padded with free-list nodes).
1238 int page_extra_;
1239
1240 // Sets allocation pointer to a page bottom.
1241 static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
1242
1243 // Returns the top page specified by an allocation info structure.
1244 static Page* TopPageOf(AllocationInfo alloc_info) {
1245 return Page::FromAllocationTop(alloc_info.limit);
1246 }
1247
Leon Clarked91b9f72010-01-27 17:25:45 +00001248 int CountPagesToTop() {
1249 Page* p = Page::FromAllocationTop(allocation_info_.top);
1250 PageIterator it(this, PageIterator::ALL_PAGES);
1251 int counter = 1;
1252 while (it.has_next()) {
1253 if (it.next() == p) return counter;
1254 counter++;
1255 }
1256 UNREACHABLE();
1257 return -1;
1258 }
1259
Steve Blocka7e24c12009-10-30 11:49:00 +00001260 // Expands the space by allocating a fixed number of pages. Returns false if
1261 // it cannot allocate requested number of pages from OS. Newly allocated
1262 // pages are append to the last_page;
1263 bool Expand(Page* last_page);
1264
1265 // Generic fast case allocation function that tries linear allocation in
1266 // the top page of 'alloc_info'. Returns NULL on failure.
1267 inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
1268 int size_in_bytes);
1269
1270 // During normal allocation or deserialization, roll to the next page in
1271 // the space (there is assumed to be one) and allocate there. This
1272 // function is space-dependent.
1273 virtual HeapObject* AllocateInNextPage(Page* current_page,
1274 int size_in_bytes) = 0;
1275
1276 // Slow path of AllocateRaw. This function is space-dependent.
John Reck59135872010-11-02 12:39:01 -07001277 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001278
1279 // Slow path of MCAllocateRaw.
John Reck59135872010-11-02 12:39:01 -07001280 MUST_USE_RESULT HeapObject* SlowMCAllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001281
1282#ifdef DEBUG
Leon Clarkee46be812010-01-19 14:06:41 +00001283 // Returns the number of total pages in this space.
1284 int CountTotalPages();
Steve Blocka7e24c12009-10-30 11:49:00 +00001285#endif
1286 private:
Steve Blocka7e24c12009-10-30 11:49:00 +00001287
1288 // Returns a pointer to the page of the relocation pointer.
1289 Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
1290
Steve Blocka7e24c12009-10-30 11:49:00 +00001291 friend class PageIterator;
1292};
1293
1294
1295#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1296class NumberAndSizeInfo BASE_EMBEDDED {
1297 public:
1298 NumberAndSizeInfo() : number_(0), bytes_(0) {}
1299
1300 int number() const { return number_; }
1301 void increment_number(int num) { number_ += num; }
1302
1303 int bytes() const { return bytes_; }
1304 void increment_bytes(int size) { bytes_ += size; }
1305
1306 void clear() {
1307 number_ = 0;
1308 bytes_ = 0;
1309 }
1310
1311 private:
1312 int number_;
1313 int bytes_;
1314};
1315
1316
1317// HistogramInfo class for recording a single "bar" of a histogram. This
1318// class is used for collecting statistics to print to stdout (when compiled
1319// with DEBUG) or to the log file (when compiled with
1320// ENABLE_LOGGING_AND_PROFILING).
1321class HistogramInfo: public NumberAndSizeInfo {
1322 public:
1323 HistogramInfo() : NumberAndSizeInfo() {}
1324
1325 const char* name() { return name_; }
1326 void set_name(const char* name) { name_ = name; }
1327
1328 private:
1329 const char* name_;
1330};
1331#endif
1332
1333
1334// -----------------------------------------------------------------------------
1335// SemiSpace in young generation
1336//
1337// A semispace is a contiguous chunk of memory. The mark-compact collector
1338// uses the memory in the from space as a marking stack when tracing live
1339// objects.
1340
1341class SemiSpace : public Space {
1342 public:
1343 // Constructor.
1344 SemiSpace() :Space(NEW_SPACE, NOT_EXECUTABLE) {
1345 start_ = NULL;
1346 age_mark_ = NULL;
1347 }
1348
1349 // Sets up the semispace using the given chunk.
1350 bool Setup(Address start, int initial_capacity, int maximum_capacity);
1351
1352 // Tear down the space. Heap memory was not allocated by the space, so it
1353 // is not deallocated here.
1354 void TearDown();
1355
1356 // True if the space has been set up but not torn down.
1357 bool HasBeenSetup() { return start_ != NULL; }
1358
1359 // Grow the size of the semispace by committing extra virtual memory.
1360 // Assumes that the caller has checked that the semispace has not reached
1361 // its maximum capacity (and thus there is space available in the reserved
1362 // address range to grow).
1363 bool Grow();
1364
1365 // Grow the semispace to the new capacity. The new capacity
1366 // requested must be larger than the current capacity.
1367 bool GrowTo(int new_capacity);
1368
1369 // Shrinks the semispace to the new capacity. The new capacity
1370 // requested must be more than the amount of used memory in the
1371 // semispace and less than the current capacity.
1372 bool ShrinkTo(int new_capacity);
1373
1374 // Returns the start address of the space.
1375 Address low() { return start_; }
1376 // Returns one past the end address of the space.
1377 Address high() { return low() + capacity_; }
1378
1379 // Age mark accessors.
1380 Address age_mark() { return age_mark_; }
1381 void set_age_mark(Address mark) { age_mark_ = mark; }
1382
1383 // True if the address is in the address range of this semispace (not
1384 // necessarily below the allocation pointer).
1385 bool Contains(Address a) {
1386 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1387 == reinterpret_cast<uintptr_t>(start_);
1388 }
1389
1390 // True if the object is a heap object in the address range of this
1391 // semispace (not necessarily below the allocation pointer).
1392 bool Contains(Object* o) {
1393 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1394 }
1395
1396 // The offset of an address from the beginning of the space.
Steve Blockd0582a62009-12-15 09:54:21 +00001397 int SpaceOffsetForAddress(Address addr) {
1398 return static_cast<int>(addr - low());
1399 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001400
Leon Clarkee46be812010-01-19 14:06:41 +00001401 // If we don't have these here then SemiSpace will be abstract. However
1402 // they should never be called.
Ben Murdochf87a2032010-10-22 12:50:53 +01001403 virtual intptr_t Size() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001404 UNREACHABLE();
1405 return 0;
1406 }
1407
Leon Clarkee46be812010-01-19 14:06:41 +00001408 virtual bool ReserveSpace(int bytes) {
1409 UNREACHABLE();
1410 return false;
1411 }
1412
Steve Blocka7e24c12009-10-30 11:49:00 +00001413 bool is_committed() { return committed_; }
1414 bool Commit();
1415 bool Uncommit();
1416
Steve Block6ded16b2010-05-10 14:33:55 +01001417#ifdef ENABLE_HEAP_PROTECTION
1418 // Protect/unprotect the space by marking it read-only/writable.
1419 virtual void Protect() {}
1420 virtual void Unprotect() {}
1421#endif
1422
Steve Blocka7e24c12009-10-30 11:49:00 +00001423#ifdef DEBUG
1424 virtual void Print();
1425 virtual void Verify();
1426#endif
1427
1428 // Returns the current capacity of the semi space.
1429 int Capacity() { return capacity_; }
1430
1431 // Returns the maximum capacity of the semi space.
1432 int MaximumCapacity() { return maximum_capacity_; }
1433
1434 // Returns the initial capacity of the semi space.
1435 int InitialCapacity() { return initial_capacity_; }
1436
1437 private:
1438 // The current and maximum capacity of the space.
1439 int capacity_;
1440 int maximum_capacity_;
1441 int initial_capacity_;
1442
1443 // The start address of the space.
1444 Address start_;
1445 // Used to govern object promotion during mark-compact collection.
1446 Address age_mark_;
1447
1448 // Masks and comparison values to test for containment in this semispace.
1449 uintptr_t address_mask_;
1450 uintptr_t object_mask_;
1451 uintptr_t object_expected_;
1452
1453 bool committed_;
1454
1455 public:
1456 TRACK_MEMORY("SemiSpace")
1457};
1458
1459
1460// A SemiSpaceIterator is an ObjectIterator that iterates over the active
1461// semispace of the heap's new space. It iterates over the objects in the
1462// semispace from a given start address (defaulting to the bottom of the
1463// semispace) to the top of the semispace. New objects allocated after the
1464// iterator is created are not iterated.
1465class SemiSpaceIterator : public ObjectIterator {
1466 public:
1467 // Create an iterator over the objects in the given space. If no start
1468 // address is given, the iterator starts from the bottom of the space. If
1469 // no size function is given, the iterator calls Object::Size().
1470 explicit SemiSpaceIterator(NewSpace* space);
1471 SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
1472 SemiSpaceIterator(NewSpace* space, Address start);
1473
Steve Blocka7e24c12009-10-30 11:49:00 +00001474 HeapObject* next() {
Leon Clarked91b9f72010-01-27 17:25:45 +00001475 if (current_ == limit_) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00001476
1477 HeapObject* object = HeapObject::FromAddress(current_);
1478 int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
1479
1480 current_ += size;
1481 return object;
1482 }
1483
1484 // Implementation of the ObjectIterator functions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001485 virtual HeapObject* next_object() { return next(); }
1486
1487 private:
1488 void Initialize(NewSpace* space, Address start, Address end,
1489 HeapObjectCallback size_func);
1490
1491 // The semispace.
1492 SemiSpace* space_;
1493 // The current iteration point.
1494 Address current_;
1495 // The end of iteration.
1496 Address limit_;
1497 // The callback function.
1498 HeapObjectCallback size_func_;
1499};
1500
1501
1502// -----------------------------------------------------------------------------
1503// The young generation space.
1504//
1505// The new space consists of a contiguous pair of semispaces. It simply
1506// forwards most functions to the appropriate semispace.
1507
1508class NewSpace : public Space {
1509 public:
1510 // Constructor.
1511 NewSpace() : Space(NEW_SPACE, NOT_EXECUTABLE) {}
1512
1513 // Sets up the new space using the given chunk.
1514 bool Setup(Address start, int size);
1515
1516 // Tears down the space. Heap memory was not allocated by the space, so it
1517 // is not deallocated here.
1518 void TearDown();
1519
1520 // True if the space has been set up but not torn down.
1521 bool HasBeenSetup() {
1522 return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
1523 }
1524
1525 // Flip the pair of spaces.
1526 void Flip();
1527
1528 // Grow the capacity of the semispaces. Assumes that they are not at
1529 // their maximum capacity.
1530 void Grow();
1531
1532 // Shrink the capacity of the semispaces.
1533 void Shrink();
1534
1535 // True if the address or object lies in the address range of either
1536 // semispace (not necessarily below the allocation pointer).
1537 bool Contains(Address a) {
1538 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1539 == reinterpret_cast<uintptr_t>(start_);
1540 }
1541 bool Contains(Object* o) {
1542 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1543 }
1544
1545 // Return the allocated bytes in the active semispace.
Ben Murdochf87a2032010-10-22 12:50:53 +01001546 virtual intptr_t Size() { return static_cast<int>(top() - bottom()); }
1547 // The same, but returning an int. We have to have the one that returns
1548 // intptr_t because it is inherited, but if we know we are dealing with the
1549 // new space, which can't get as big as the other spaces then this is useful:
1550 int SizeAsInt() { return static_cast<int>(Size()); }
Steve Block3ce2e202009-11-05 08:53:23 +00001551
Steve Blocka7e24c12009-10-30 11:49:00 +00001552 // Return the current capacity of a semispace.
Ben Murdochf87a2032010-10-22 12:50:53 +01001553 intptr_t Capacity() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001554 ASSERT(to_space_.Capacity() == from_space_.Capacity());
1555 return to_space_.Capacity();
1556 }
Steve Block3ce2e202009-11-05 08:53:23 +00001557
1558 // Return the total amount of memory committed for new space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001559 intptr_t CommittedMemory() {
Steve Block3ce2e202009-11-05 08:53:23 +00001560 if (from_space_.is_committed()) return 2 * Capacity();
1561 return Capacity();
1562 }
1563
Steve Blocka7e24c12009-10-30 11:49:00 +00001564 // Return the available bytes without growing in the active semispace.
Ben Murdochf87a2032010-10-22 12:50:53 +01001565 intptr_t Available() { return Capacity() - Size(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001566
1567 // Return the maximum capacity of a semispace.
1568 int MaximumCapacity() {
1569 ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
1570 return to_space_.MaximumCapacity();
1571 }
1572
1573 // Returns the initial capacity of a semispace.
1574 int InitialCapacity() {
1575 ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
1576 return to_space_.InitialCapacity();
1577 }
1578
1579 // Return the address of the allocation pointer in the active semispace.
1580 Address top() { return allocation_info_.top; }
1581 // Return the address of the first object in the active semispace.
1582 Address bottom() { return to_space_.low(); }
1583
1584 // Get the age mark of the inactive semispace.
1585 Address age_mark() { return from_space_.age_mark(); }
1586 // Set the age mark in the active semispace.
1587 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
1588
1589 // The start address of the space and a bit mask. Anding an address in the
1590 // new space with the mask will result in the start address.
1591 Address start() { return start_; }
1592 uintptr_t mask() { return address_mask_; }
1593
1594 // The allocation top and limit addresses.
1595 Address* allocation_top_address() { return &allocation_info_.top; }
1596 Address* allocation_limit_address() { return &allocation_info_.limit; }
1597
John Reck59135872010-11-02 12:39:01 -07001598 MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001599 return AllocateRawInternal(size_in_bytes, &allocation_info_);
1600 }
1601
1602 // Allocate the requested number of bytes for relocation during mark-compact
1603 // collection.
John Reck59135872010-11-02 12:39:01 -07001604 MUST_USE_RESULT MaybeObject* MCAllocateRaw(int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001605 return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
1606 }
1607
1608 // Reset the allocation pointer to the beginning of the active semispace.
1609 void ResetAllocationInfo();
1610 // Reset the reloction pointer to the bottom of the inactive semispace in
1611 // preparation for mark-compact collection.
1612 void MCResetRelocationInfo();
1613 // Update the allocation pointer in the active semispace after a
1614 // mark-compact collection.
1615 void MCCommitRelocationInfo();
1616
1617 // Get the extent of the inactive semispace (for use as a marking stack).
1618 Address FromSpaceLow() { return from_space_.low(); }
1619 Address FromSpaceHigh() { return from_space_.high(); }
1620
1621 // Get the extent of the active semispace (to sweep newly copied objects
1622 // during a scavenge collection).
1623 Address ToSpaceLow() { return to_space_.low(); }
1624 Address ToSpaceHigh() { return to_space_.high(); }
1625
1626 // Offsets from the beginning of the semispaces.
1627 int ToSpaceOffsetForAddress(Address a) {
1628 return to_space_.SpaceOffsetForAddress(a);
1629 }
1630 int FromSpaceOffsetForAddress(Address a) {
1631 return from_space_.SpaceOffsetForAddress(a);
1632 }
1633
1634 // True if the object is a heap object in the address range of the
1635 // respective semispace (not necessarily below the allocation pointer of the
1636 // semispace).
1637 bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
1638 bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
1639
1640 bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
1641 bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
1642
Leon Clarkee46be812010-01-19 14:06:41 +00001643 virtual bool ReserveSpace(int bytes);
1644
Ben Murdochb0fe1622011-05-05 13:52:32 +01001645 // Resizes a sequential string which must be the most recent thing that was
1646 // allocated in new space.
1647 template <typename StringType>
1648 inline void ShrinkStringAtAllocationBoundary(String* string, int len);
1649
Steve Blocka7e24c12009-10-30 11:49:00 +00001650#ifdef ENABLE_HEAP_PROTECTION
1651 // Protect/unprotect the space by marking it read-only/writable.
1652 virtual void Protect();
1653 virtual void Unprotect();
1654#endif
1655
1656#ifdef DEBUG
1657 // Verify the active semispace.
1658 virtual void Verify();
1659 // Print the active semispace.
1660 virtual void Print() { to_space_.Print(); }
1661#endif
1662
1663#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1664 // Iterates the active semispace to collect statistics.
1665 void CollectStatistics();
1666 // Reports previously collected statistics of the active semispace.
1667 void ReportStatistics();
1668 // Clears previously collected statistics.
1669 void ClearHistograms();
1670
1671 // Record the allocation or promotion of a heap object. Note that we don't
1672 // record every single allocation, but only those that happen in the
1673 // to space during a scavenge GC.
1674 void RecordAllocation(HeapObject* obj);
1675 void RecordPromotion(HeapObject* obj);
1676#endif
1677
1678 // Return whether the operation succeded.
1679 bool CommitFromSpaceIfNeeded() {
1680 if (from_space_.is_committed()) return true;
1681 return from_space_.Commit();
1682 }
1683
1684 bool UncommitFromSpace() {
1685 if (!from_space_.is_committed()) return true;
1686 return from_space_.Uncommit();
1687 }
1688
1689 private:
1690 // The semispaces.
1691 SemiSpace to_space_;
1692 SemiSpace from_space_;
1693
1694 // Start address and bit mask for containment testing.
1695 Address start_;
1696 uintptr_t address_mask_;
1697 uintptr_t object_mask_;
1698 uintptr_t object_expected_;
1699
1700 // Allocation pointer and limit for normal allocation and allocation during
1701 // mark-compact collection.
1702 AllocationInfo allocation_info_;
1703 AllocationInfo mc_forwarding_info_;
1704
1705#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1706 HistogramInfo* allocated_histogram_;
1707 HistogramInfo* promoted_histogram_;
1708#endif
1709
1710 // Implementation of AllocateRaw and MCAllocateRaw.
John Reck59135872010-11-02 12:39:01 -07001711 MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(
1712 int size_in_bytes,
1713 AllocationInfo* alloc_info);
Steve Blocka7e24c12009-10-30 11:49:00 +00001714
1715 friend class SemiSpaceIterator;
1716
1717 public:
1718 TRACK_MEMORY("NewSpace")
1719};
1720
1721
1722// -----------------------------------------------------------------------------
1723// Free lists for old object spaces
1724//
1725// Free-list nodes are free blocks in the heap. They look like heap objects
1726// (free-list node pointers have the heap object tag, and they have a map like
1727// a heap object). They have a size and a next pointer. The next pointer is
1728// the raw address of the next free list node (or NULL).
1729class FreeListNode: public HeapObject {
1730 public:
1731 // Obtain a free-list node from a raw address. This is not a cast because
1732 // it does not check nor require that the first word at the address is a map
1733 // pointer.
1734 static FreeListNode* FromAddress(Address address) {
1735 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1736 }
1737
Steve Block3ce2e202009-11-05 08:53:23 +00001738 static inline bool IsFreeListNode(HeapObject* object);
1739
Steve Blocka7e24c12009-10-30 11:49:00 +00001740 // Set the size in bytes, which can be read with HeapObject::Size(). This
1741 // function also writes a map to the first word of the block so that it
1742 // looks like a heap object to the garbage collector and heap iteration
1743 // functions.
1744 void set_size(int size_in_bytes);
1745
1746 // Accessors for the next field.
1747 inline Address next();
1748 inline void set_next(Address next);
1749
1750 private:
1751 static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
1752
1753 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1754};
1755
1756
1757// The free list for the old space.
1758class OldSpaceFreeList BASE_EMBEDDED {
1759 public:
1760 explicit OldSpaceFreeList(AllocationSpace owner);
1761
1762 // Clear the free list.
1763 void Reset();
1764
1765 // Return the number of bytes available on the free list.
Ben Murdochf87a2032010-10-22 12:50:53 +01001766 intptr_t available() { return available_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001767
1768 // Place a node on the free list. The block of size 'size_in_bytes'
1769 // starting at 'start' is placed on the free list. The return value is the
1770 // number of bytes that have been lost due to internal fragmentation by
1771 // freeing the block. Bookkeeping information will be written to the block,
1772 // ie, its contents will be destroyed. The start address should be word
1773 // aligned, and the size should be a non-zero multiple of the word size.
1774 int Free(Address start, int size_in_bytes);
1775
1776 // Allocate a block of size 'size_in_bytes' from the free list. The block
1777 // is unitialized. A failure is returned if no block is available. The
1778 // number of bytes lost to fragmentation is returned in the output parameter
1779 // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
John Reck59135872010-11-02 12:39:01 -07001780 MUST_USE_RESULT MaybeObject* Allocate(int size_in_bytes, int* wasted_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001781
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001782 void MarkNodes();
1783
Steve Blocka7e24c12009-10-30 11:49:00 +00001784 private:
1785 // The size range of blocks, in bytes. (Smaller allocations are allowed, but
1786 // will always result in waste.)
1787 static const int kMinBlockSize = 2 * kPointerSize;
1788 static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
1789
1790 // The identity of the owning space, for building allocation Failure
1791 // objects.
1792 AllocationSpace owner_;
1793
1794 // Total available bytes in all blocks on this free list.
1795 int available_;
1796
1797 // Blocks are put on exact free lists in an array, indexed by size in words.
1798 // The available sizes are kept in an increasingly ordered list. Entries
1799 // corresponding to sizes < kMinBlockSize always have an empty free list
1800 // (but index kHead is used for the head of the size list).
1801 struct SizeNode {
1802 // Address of the head FreeListNode of the implied block size or NULL.
1803 Address head_node_;
1804 // Size (words) of the next larger available size if head_node_ != NULL.
1805 int next_size_;
1806 };
1807 static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
1808 SizeNode free_[kFreeListsLength];
1809
1810 // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
1811 static const int kHead = kMinBlockSize / kPointerSize - 1;
1812 static const int kEnd = kMaxInt;
1813
1814 // We keep a "finger" in the size list to speed up a common pattern:
1815 // repeated requests for the same or increasing sizes.
1816 int finger_;
1817
1818 // Starting from *prev, find and return the smallest size >= index (words),
1819 // or kEnd. Update *prev to be the largest size < index, or kHead.
1820 int FindSize(int index, int* prev) {
1821 int cur = free_[*prev].next_size_;
1822 while (cur < index) {
1823 *prev = cur;
1824 cur = free_[cur].next_size_;
1825 }
1826 return cur;
1827 }
1828
1829 // Remove an existing element from the size list.
1830 void RemoveSize(int index) {
1831 int prev = kHead;
1832 int cur = FindSize(index, &prev);
1833 ASSERT(cur == index);
1834 free_[prev].next_size_ = free_[cur].next_size_;
1835 finger_ = prev;
1836 }
1837
1838 // Insert a new element into the size list.
1839 void InsertSize(int index) {
1840 int prev = kHead;
1841 int cur = FindSize(index, &prev);
1842 ASSERT(cur != index);
1843 free_[prev].next_size_ = index;
1844 free_[index].next_size_ = cur;
1845 }
1846
1847 // The size list is not updated during a sequence of calls to Free, but is
1848 // rebuilt before the next allocation.
1849 void RebuildSizeList();
1850 bool needs_rebuild_;
1851
1852#ifdef DEBUG
1853 // Does this free list contain a free block located at the address of 'node'?
1854 bool Contains(FreeListNode* node);
1855#endif
1856
1857 DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
1858};
1859
1860
1861// The free list for the map space.
1862class FixedSizeFreeList BASE_EMBEDDED {
1863 public:
1864 FixedSizeFreeList(AllocationSpace owner, int object_size);
1865
1866 // Clear the free list.
1867 void Reset();
1868
1869 // Return the number of bytes available on the free list.
Ben Murdochf87a2032010-10-22 12:50:53 +01001870 intptr_t available() { return available_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001871
1872 // Place a node on the free list. The block starting at 'start' (assumed to
1873 // have size object_size_) is placed on the free list. Bookkeeping
1874 // information will be written to the block, ie, its contents will be
1875 // destroyed. The start address should be word aligned.
1876 void Free(Address start);
1877
1878 // Allocate a fixed sized block from the free list. The block is unitialized.
1879 // A failure is returned if no block is available.
John Reck59135872010-11-02 12:39:01 -07001880 MUST_USE_RESULT MaybeObject* Allocate();
Steve Blocka7e24c12009-10-30 11:49:00 +00001881
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001882 void MarkNodes();
1883
Steve Blocka7e24c12009-10-30 11:49:00 +00001884 private:
1885 // Available bytes on the free list.
Ben Murdochf87a2032010-10-22 12:50:53 +01001886 intptr_t available_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001887
1888 // The head of the free list.
1889 Address head_;
1890
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001891 // The tail of the free list.
1892 Address tail_;
1893
Steve Blocka7e24c12009-10-30 11:49:00 +00001894 // The identity of the owning space, for building allocation Failure
1895 // objects.
1896 AllocationSpace owner_;
1897
1898 // The size of the objects in this space.
1899 int object_size_;
1900
1901 DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
1902};
1903
1904
1905// -----------------------------------------------------------------------------
1906// Old object space (excluding map objects)
1907
1908class OldSpace : public PagedSpace {
1909 public:
1910 // Creates an old space object with a given maximum capacity.
1911 // The constructor does not allocate pages from OS.
Ben Murdochf87a2032010-10-22 12:50:53 +01001912 explicit OldSpace(intptr_t max_capacity,
Steve Blocka7e24c12009-10-30 11:49:00 +00001913 AllocationSpace id,
1914 Executability executable)
1915 : PagedSpace(max_capacity, id, executable), free_list_(id) {
1916 page_extra_ = 0;
1917 }
1918
1919 // The bytes available on the free list (ie, not above the linear allocation
1920 // pointer).
Ben Murdochf87a2032010-10-22 12:50:53 +01001921 intptr_t AvailableFree() { return free_list_.available(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001922
Steve Block6ded16b2010-05-10 14:33:55 +01001923 // The limit of allocation for a page in this space.
1924 virtual Address PageAllocationLimit(Page* page) {
1925 return page->ObjectAreaEnd();
Steve Blocka7e24c12009-10-30 11:49:00 +00001926 }
1927
1928 // Give a block of memory to the space's free list. It might be added to
1929 // the free list or accounted as waste.
Steve Block6ded16b2010-05-10 14:33:55 +01001930 // If add_to_freelist is false then just accounting stats are updated and
1931 // no attempt to add area to free list is made.
1932 void Free(Address start, int size_in_bytes, bool add_to_freelist) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001933 accounting_stats_.DeallocateBytes(size_in_bytes);
Steve Block6ded16b2010-05-10 14:33:55 +01001934
1935 if (add_to_freelist) {
1936 int wasted_bytes = free_list_.Free(start, size_in_bytes);
1937 accounting_stats_.WasteBytes(wasted_bytes);
1938 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001939 }
1940
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001941 virtual void DeallocateBlock(Address start,
1942 int size_in_bytes,
1943 bool add_to_freelist);
1944
Steve Blocka7e24c12009-10-30 11:49:00 +00001945 // Prepare for full garbage collection. Resets the relocation pointer and
1946 // clears the free list.
1947 virtual void PrepareForMarkCompact(bool will_compact);
1948
1949 // Updates the allocation pointer to the relocation top after a mark-compact
1950 // collection.
1951 virtual void MCCommitRelocationInfo();
1952
Leon Clarkee46be812010-01-19 14:06:41 +00001953 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1954
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001955 void MarkFreeListNodes() { free_list_.MarkNodes(); }
1956
Steve Blocka7e24c12009-10-30 11:49:00 +00001957#ifdef DEBUG
1958 // Reports statistics for the space
1959 void ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00001960#endif
1961
1962 protected:
1963 // Virtual function in the superclass. Slow path of AllocateRaw.
John Reck59135872010-11-02 12:39:01 -07001964 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001965
1966 // Virtual function in the superclass. Allocate linearly at the start of
1967 // the page after current_page (there is assumed to be one).
1968 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1969
1970 private:
1971 // The space's free list.
1972 OldSpaceFreeList free_list_;
1973
1974 public:
1975 TRACK_MEMORY("OldSpace")
1976};
1977
1978
1979// -----------------------------------------------------------------------------
1980// Old space for objects of a fixed size
1981
1982class FixedSpace : public PagedSpace {
1983 public:
Ben Murdochf87a2032010-10-22 12:50:53 +01001984 FixedSpace(intptr_t max_capacity,
Steve Blocka7e24c12009-10-30 11:49:00 +00001985 AllocationSpace id,
1986 int object_size_in_bytes,
1987 const char* name)
1988 : PagedSpace(max_capacity, id, NOT_EXECUTABLE),
1989 object_size_in_bytes_(object_size_in_bytes),
1990 name_(name),
1991 free_list_(id, object_size_in_bytes) {
1992 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
1993 }
1994
Steve Block6ded16b2010-05-10 14:33:55 +01001995 // The limit of allocation for a page in this space.
1996 virtual Address PageAllocationLimit(Page* page) {
1997 return page->ObjectAreaEnd() - page_extra_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001998 }
1999
2000 int object_size_in_bytes() { return object_size_in_bytes_; }
2001
2002 // Give a fixed sized block of memory to the space's free list.
Steve Block6ded16b2010-05-10 14:33:55 +01002003 // If add_to_freelist is false then just accounting stats are updated and
2004 // no attempt to add area to free list is made.
2005 void Free(Address start, bool add_to_freelist) {
2006 if (add_to_freelist) {
2007 free_list_.Free(start);
2008 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002009 accounting_stats_.DeallocateBytes(object_size_in_bytes_);
2010 }
2011
2012 // Prepares for a mark-compact GC.
2013 virtual void PrepareForMarkCompact(bool will_compact);
2014
2015 // Updates the allocation pointer to the relocation top after a mark-compact
2016 // collection.
2017 virtual void MCCommitRelocationInfo();
2018
Leon Clarkee46be812010-01-19 14:06:41 +00002019 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
2020
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002021 virtual void DeallocateBlock(Address start,
2022 int size_in_bytes,
2023 bool add_to_freelist);
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002024
2025 void MarkFreeListNodes() { free_list_.MarkNodes(); }
2026
Steve Blocka7e24c12009-10-30 11:49:00 +00002027#ifdef DEBUG
2028 // Reports statistic info of the space
2029 void ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00002030#endif
2031
2032 protected:
2033 // Virtual function in the superclass. Slow path of AllocateRaw.
John Reck59135872010-11-02 12:39:01 -07002034 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00002035
2036 // Virtual function in the superclass. Allocate linearly at the start of
2037 // the page after current_page (there is assumed to be one).
2038 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
2039
Leon Clarkee46be812010-01-19 14:06:41 +00002040 void ResetFreeList() {
2041 free_list_.Reset();
2042 }
2043
Steve Blocka7e24c12009-10-30 11:49:00 +00002044 private:
2045 // The size of objects in this space.
2046 int object_size_in_bytes_;
2047
2048 // The name of this space.
2049 const char* name_;
2050
2051 // The space's free list.
2052 FixedSizeFreeList free_list_;
2053};
2054
2055
2056// -----------------------------------------------------------------------------
2057// Old space for all map objects
2058
2059class MapSpace : public FixedSpace {
2060 public:
2061 // Creates a map space object with a maximum capacity.
Ben Murdochf87a2032010-10-22 12:50:53 +01002062 MapSpace(intptr_t max_capacity, int max_map_space_pages, AllocationSpace id)
Leon Clarked91b9f72010-01-27 17:25:45 +00002063 : FixedSpace(max_capacity, id, Map::kSize, "map"),
2064 max_map_space_pages_(max_map_space_pages) {
2065 ASSERT(max_map_space_pages < kMaxMapPageIndex);
2066 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002067
2068 // Prepares for a mark-compact GC.
2069 virtual void PrepareForMarkCompact(bool will_compact);
2070
2071 // Given an index, returns the page address.
2072 Address PageAddress(int page_index) { return page_addresses_[page_index]; }
2073
Leon Clarked91b9f72010-01-27 17:25:45 +00002074 static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
Steve Blocka7e24c12009-10-30 11:49:00 +00002075
Leon Clarkee46be812010-01-19 14:06:41 +00002076 // Are map pointers encodable into map word?
2077 bool MapPointersEncodable() {
2078 if (!FLAG_use_big_map_space) {
Leon Clarked91b9f72010-01-27 17:25:45 +00002079 ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
Leon Clarkee46be812010-01-19 14:06:41 +00002080 return true;
2081 }
Leon Clarked91b9f72010-01-27 17:25:45 +00002082 return CountPagesToTop() <= max_map_space_pages_;
Leon Clarkee46be812010-01-19 14:06:41 +00002083 }
2084
2085 // Should be called after forced sweep to find out if map space needs
2086 // compaction.
2087 bool NeedsCompaction(int live_maps) {
Leon Clarked91b9f72010-01-27 17:25:45 +00002088 return !MapPointersEncodable() && live_maps <= CompactionThreshold();
Leon Clarkee46be812010-01-19 14:06:41 +00002089 }
2090
2091 Address TopAfterCompaction(int live_maps) {
2092 ASSERT(NeedsCompaction(live_maps));
2093
2094 int pages_left = live_maps / kMapsPerPage;
2095 PageIterator it(this, PageIterator::ALL_PAGES);
2096 while (pages_left-- > 0) {
2097 ASSERT(it.has_next());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002098 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
Leon Clarkee46be812010-01-19 14:06:41 +00002099 }
2100 ASSERT(it.has_next());
2101 Page* top_page = it.next();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002102 top_page->SetRegionMarks(Page::kAllRegionsCleanMarks);
Leon Clarkee46be812010-01-19 14:06:41 +00002103 ASSERT(top_page->is_valid());
2104
2105 int offset = live_maps % kMapsPerPage * Map::kSize;
2106 Address top = top_page->ObjectAreaStart() + offset;
2107 ASSERT(top < top_page->ObjectAreaEnd());
2108 ASSERT(Contains(top));
2109
2110 return top;
2111 }
2112
2113 void FinishCompaction(Address new_top, int live_maps) {
2114 Page* top_page = Page::FromAddress(new_top);
2115 ASSERT(top_page->is_valid());
2116
2117 SetAllocationInfo(&allocation_info_, top_page);
2118 allocation_info_.top = new_top;
2119
2120 int new_size = live_maps * Map::kSize;
2121 accounting_stats_.DeallocateBytes(accounting_stats_.Size());
2122 accounting_stats_.AllocateBytes(new_size);
2123
2124#ifdef DEBUG
2125 if (FLAG_enable_slow_asserts) {
Leon Clarked91b9f72010-01-27 17:25:45 +00002126 intptr_t actual_size = 0;
Leon Clarkee46be812010-01-19 14:06:41 +00002127 for (Page* p = first_page_; p != top_page; p = p->next_page())
2128 actual_size += kMapsPerPage * Map::kSize;
2129 actual_size += (new_top - top_page->ObjectAreaStart());
2130 ASSERT(accounting_stats_.Size() == actual_size);
2131 }
2132#endif
2133
2134 Shrink();
2135 ResetFreeList();
2136 }
2137
Steve Blocka7e24c12009-10-30 11:49:00 +00002138 protected:
2139#ifdef DEBUG
2140 virtual void VerifyObject(HeapObject* obj);
2141#endif
2142
2143 private:
Leon Clarkee46be812010-01-19 14:06:41 +00002144 static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
2145
2146 // Do map space compaction if there is a page gap.
Leon Clarked91b9f72010-01-27 17:25:45 +00002147 int CompactionThreshold() {
2148 return kMapsPerPage * (max_map_space_pages_ - 1);
2149 }
2150
2151 const int max_map_space_pages_;
Leon Clarkee46be812010-01-19 14:06:41 +00002152
Steve Blocka7e24c12009-10-30 11:49:00 +00002153 // An array of page start address in a map space.
Leon Clarked91b9f72010-01-27 17:25:45 +00002154 Address page_addresses_[kMaxMapPageIndex];
Steve Blocka7e24c12009-10-30 11:49:00 +00002155
2156 public:
2157 TRACK_MEMORY("MapSpace")
2158};
2159
2160
2161// -----------------------------------------------------------------------------
2162// Old space for all global object property cell objects
2163
2164class CellSpace : public FixedSpace {
2165 public:
2166 // Creates a property cell space object with a maximum capacity.
Ben Murdochf87a2032010-10-22 12:50:53 +01002167 CellSpace(intptr_t max_capacity, AllocationSpace id)
Steve Blocka7e24c12009-10-30 11:49:00 +00002168 : FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {}
2169
2170 protected:
2171#ifdef DEBUG
2172 virtual void VerifyObject(HeapObject* obj);
2173#endif
2174
2175 public:
2176 TRACK_MEMORY("CellSpace")
2177};
2178
2179
2180// -----------------------------------------------------------------------------
2181// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2182// the large object space. A large object is allocated from OS heap with
2183// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2184// A large object always starts at Page::kObjectStartOffset to a page.
2185// Large objects do not move during garbage collections.
2186
2187// A LargeObjectChunk holds exactly one large object page with exactly one
2188// large object.
2189class LargeObjectChunk {
2190 public:
2191 // Allocates a new LargeObjectChunk that contains a large object page
2192 // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002193 // object) bytes after the object area start of that page.
Ben Murdochb0fe1622011-05-05 13:52:32 +01002194 static LargeObjectChunk* New(int size_in_bytes, Executability executable);
2195
2196 // Free the memory associated with the chunk.
2197 inline void Free(Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00002198
2199 // Interpret a raw address as a large object chunk.
2200 static LargeObjectChunk* FromAddress(Address address) {
2201 return reinterpret_cast<LargeObjectChunk*>(address);
2202 }
2203
2204 // Returns the address of this chunk.
2205 Address address() { return reinterpret_cast<Address>(this); }
2206
2207 // Accessors for the fields of the chunk.
2208 LargeObjectChunk* next() { return next_; }
2209 void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
Steve Block791712a2010-08-27 10:21:07 +01002210 size_t size() { return size_ & ~Page::kPageFlagMask; }
Ben Murdochb0fe1622011-05-05 13:52:32 +01002211
2212 // Compute the start address in the chunk.
2213 inline Address GetStartAddress();
Steve Blocka7e24c12009-10-30 11:49:00 +00002214
2215 // Returns the object in this chunk.
Ben Murdochb0fe1622011-05-05 13:52:32 +01002216 HeapObject* GetObject() { return HeapObject::FromAddress(GetStartAddress()); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002217
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002218 // Given a requested size returns the physical size of a chunk to be
2219 // allocated.
Steve Blocka7e24c12009-10-30 11:49:00 +00002220 static int ChunkSizeFor(int size_in_bytes);
2221
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002222 // Given a chunk size, returns the object size it can accommodate. Used by
2223 // LargeObjectSpace::Available.
Ben Murdochf87a2032010-10-22 12:50:53 +01002224 static intptr_t ObjectSizeFor(intptr_t chunk_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002225 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2226 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2227 }
2228
2229 private:
2230 // A pointer to the next large object chunk in the space or NULL.
2231 LargeObjectChunk* next_;
2232
Ben Murdochb0fe1622011-05-05 13:52:32 +01002233 // The total size of this chunk.
Steve Blocka7e24c12009-10-30 11:49:00 +00002234 size_t size_;
2235
2236 public:
2237 TRACK_MEMORY("LargeObjectChunk")
2238};
2239
2240
2241class LargeObjectSpace : public Space {
2242 public:
2243 explicit LargeObjectSpace(AllocationSpace id);
2244 virtual ~LargeObjectSpace() {}
2245
2246 // Initializes internal data structures.
2247 bool Setup();
2248
2249 // Releases internal resources, frees objects in this space.
2250 void TearDown();
2251
2252 // Allocates a (non-FixedArray, non-Code) large object.
John Reck59135872010-11-02 12:39:01 -07002253 MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00002254 // Allocates a large Code object.
John Reck59135872010-11-02 12:39:01 -07002255 MUST_USE_RESULT MaybeObject* AllocateRawCode(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00002256 // Allocates a large FixedArray.
John Reck59135872010-11-02 12:39:01 -07002257 MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00002258
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002259 // Available bytes for objects in this space.
Ben Murdochf87a2032010-10-22 12:50:53 +01002260 intptr_t Available() {
Steve Blocka7e24c12009-10-30 11:49:00 +00002261 return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
2262 }
2263
Ben Murdochf87a2032010-10-22 12:50:53 +01002264 virtual intptr_t Size() {
Steve Blocka7e24c12009-10-30 11:49:00 +00002265 return size_;
2266 }
2267
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002268 virtual intptr_t SizeOfObjects() {
2269 return objects_size_;
2270 }
2271
Steve Blocka7e24c12009-10-30 11:49:00 +00002272 int PageCount() {
2273 return page_count_;
2274 }
2275
2276 // Finds an object for a given address, returns Failure::Exception()
2277 // if it is not found. The function iterates through all objects in this
2278 // space, may be slow.
John Reck59135872010-11-02 12:39:01 -07002279 MaybeObject* FindObject(Address a);
Steve Blocka7e24c12009-10-30 11:49:00 +00002280
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002281 // Finds a large object page containing the given pc, returns NULL
2282 // if such a page doesn't exist.
2283 LargeObjectChunk* FindChunkContainingPc(Address pc);
2284
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002285 // Iterates objects covered by dirty regions.
2286 void IterateDirtyRegions(ObjectSlotCallback func);
Steve Blocka7e24c12009-10-30 11:49:00 +00002287
2288 // Frees unmarked objects.
2289 void FreeUnmarkedObjects();
2290
2291 // Checks whether a heap object is in this space; O(1).
2292 bool Contains(HeapObject* obj);
2293
2294 // Checks whether the space is empty.
2295 bool IsEmpty() { return first_chunk_ == NULL; }
2296
Leon Clarkee46be812010-01-19 14:06:41 +00002297 // See the comments for ReserveSpace in the Space class. This has to be
2298 // called after ReserveSpace has been called on the paged spaces, since they
2299 // may use some memory, leaving less for large objects.
2300 virtual bool ReserveSpace(int bytes);
2301
Steve Blocka7e24c12009-10-30 11:49:00 +00002302#ifdef ENABLE_HEAP_PROTECTION
2303 // Protect/unprotect the space by marking it read-only/writable.
2304 void Protect();
2305 void Unprotect();
2306#endif
2307
2308#ifdef DEBUG
2309 virtual void Verify();
2310 virtual void Print();
2311 void ReportStatistics();
2312 void CollectCodeStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00002313#endif
2314 // Checks whether an address is in the object area in this space. It
2315 // iterates all objects in the space. May be slow.
2316 bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
2317
2318 private:
2319 // The head of the linked list of large object chunks.
2320 LargeObjectChunk* first_chunk_;
Ben Murdochf87a2032010-10-22 12:50:53 +01002321 intptr_t size_; // allocated bytes
Steve Blocka7e24c12009-10-30 11:49:00 +00002322 int page_count_; // number of chunks
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002323 intptr_t objects_size_; // size of objects
Steve Blocka7e24c12009-10-30 11:49:00 +00002324
2325 // Shared implementation of AllocateRaw, AllocateRawCode and
2326 // AllocateRawFixedArray.
John Reck59135872010-11-02 12:39:01 -07002327 MUST_USE_RESULT MaybeObject* AllocateRawInternal(int requested_size,
2328 int object_size,
2329 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00002330
Steve Blocka7e24c12009-10-30 11:49:00 +00002331 friend class LargeObjectIterator;
2332
2333 public:
2334 TRACK_MEMORY("LargeObjectSpace")
2335};
2336
2337
2338class LargeObjectIterator: public ObjectIterator {
2339 public:
2340 explicit LargeObjectIterator(LargeObjectSpace* space);
2341 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
2342
Steve Blocka7e24c12009-10-30 11:49:00 +00002343 HeapObject* next();
2344
2345 // implementation of ObjectIterator.
Steve Blocka7e24c12009-10-30 11:49:00 +00002346 virtual HeapObject* next_object() { return next(); }
2347
2348 private:
2349 LargeObjectChunk* current_;
2350 HeapObjectCallback size_func_;
2351};
2352
2353
2354} } // namespace v8::internal
2355
2356#endif // V8_SPACES_H_