blob: 051ce37cf8969279144dde119ea1e689e44370d9 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_SPACES_H_
29#define V8_SPACES_H_
30
31#include "list-inl.h"
32#include "log.h"
33
34namespace v8 {
35namespace internal {
36
37// -----------------------------------------------------------------------------
38// Heap structures:
39//
40// A JS heap consists of a young generation, an old generation, and a large
41// object space. The young generation is divided into two semispaces. A
42// scavenger implements Cheney's copying algorithm. The old generation is
43// separated into a map space and an old object space. The map space contains
44// all (and only) map objects, the rest of old objects go into the old space.
45// The old generation is collected by a mark-sweep-compact collector.
46//
47// The semispaces of the young generation are contiguous. The old and map
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010048// spaces consists of a list of pages. A page has a page header and an object
49// area. A page size is deliberately chosen as 8K bytes.
50// The first word of a page is an opaque page header that has the
Steve Blocka7e24c12009-10-30 11:49:00 +000051// address of the next page and its ownership information. The second word may
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010052// have the allocation top address of this page. Heap objects are aligned to the
53// pointer size.
Steve Blocka7e24c12009-10-30 11:49:00 +000054//
55// There is a separate large object space for objects larger than
56// Page::kMaxHeapObjectSize, so that they do not have to move during
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010057// collection. The large object space is paged. Pages in large object space
58// may be larger than 8K.
Steve Blocka7e24c12009-10-30 11:49:00 +000059//
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010060// A card marking write barrier is used to keep track of intergenerational
61// references. Old space pages are divided into regions of Page::kRegionSize
62// size. Each region has a corresponding dirty bit in the page header which is
63// set if the region might contain pointers to new space. For details about
64// dirty bits encoding see comments in the Page::GetRegionNumberForAddress()
65// method body.
66//
67// During scavenges and mark-sweep collections we iterate intergenerational
68// pointers without decoding heap object maps so if the page belongs to old
69// pointer space or large object space it is essential to guarantee that
70// the page does not contain any garbage pointers to new space: every pointer
71// aligned word which satisfies the Heap::InNewSpace() predicate must be a
72// pointer to a live heap object in new space. Thus objects in old pointer
73// and large object spaces should have a special layout (e.g. no bare integer
74// fields). This requirement does not apply to map space which is iterated in
75// a special fashion. However we still require pointer fields of dead maps to
76// be cleaned.
77//
78// To enable lazy cleaning of old space pages we use a notion of allocation
79// watermark. Every pointer under watermark is considered to be well formed.
80// Page allocation watermark is not necessarily equal to page allocation top but
81// all alive objects on page should reside under allocation watermark.
82// During scavenge allocation watermark might be bumped and invalid pointers
83// might appear below it. To avoid following them we store a valid watermark
84// into special field in the page header and set a page WATERMARK_INVALIDATED
85// flag. For details see comments in the Page::SetAllocationWatermark() method
86// body.
87//
Steve Blocka7e24c12009-10-30 11:49:00 +000088
89// Some assertion macros used in the debugging mode.
90
Leon Clarkee46be812010-01-19 14:06:41 +000091#define ASSERT_PAGE_ALIGNED(address) \
Steve Blocka7e24c12009-10-30 11:49:00 +000092 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
93
Leon Clarkee46be812010-01-19 14:06:41 +000094#define ASSERT_OBJECT_ALIGNED(address) \
Steve Blocka7e24c12009-10-30 11:49:00 +000095 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
96
Leon Clarkee46be812010-01-19 14:06:41 +000097#define ASSERT_MAP_ALIGNED(address) \
98 ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
99
100#define ASSERT_OBJECT_SIZE(size) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000101 ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
102
Leon Clarkee46be812010-01-19 14:06:41 +0000103#define ASSERT_PAGE_OFFSET(offset) \
104 ASSERT((Page::kObjectStartOffset <= offset) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000105 && (offset <= Page::kPageSize))
106
Leon Clarkee46be812010-01-19 14:06:41 +0000107#define ASSERT_MAP_PAGE_INDEX(index) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000108 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
109
110
111class PagedSpace;
112class MemoryAllocator;
113class AllocationInfo;
114
115// -----------------------------------------------------------------------------
116// A page normally has 8K bytes. Large object pages may be larger. A page
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100117// address is always aligned to the 8K page size.
Steve Blocka7e24c12009-10-30 11:49:00 +0000118//
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100119// Each page starts with a header of Page::kPageHeaderSize size which contains
120// bookkeeping data.
Steve Blocka7e24c12009-10-30 11:49:00 +0000121//
122// The mark-compact collector transforms a map pointer into a page index and a
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100123// page offset. The exact encoding is described in the comments for
Leon Clarkee46be812010-01-19 14:06:41 +0000124// class MapWord in objects.h.
Steve Blocka7e24c12009-10-30 11:49:00 +0000125//
126// The only way to get a page pointer is by calling factory methods:
127// Page* p = Page::FromAddress(addr); or
128// Page* p = Page::FromAllocationTop(top);
129class Page {
130 public:
131 // Returns the page containing a given address. The address ranges
132 // from [page_addr .. page_addr + kPageSize[
133 //
134 // Note that this function only works for addresses in normal paged
135 // spaces and addresses in the first 8K of large object pages (i.e.,
136 // the start of large objects but not necessarily derived pointers
137 // within them).
138 INLINE(static Page* FromAddress(Address a)) {
139 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
140 }
141
142 // Returns the page containing an allocation top. Because an allocation
143 // top address can be the upper bound of the page, we need to subtract
144 // it with kPointerSize first. The address ranges from
145 // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
146 INLINE(static Page* FromAllocationTop(Address top)) {
147 Page* p = FromAddress(top - kPointerSize);
148 ASSERT_PAGE_OFFSET(p->Offset(top));
149 return p;
150 }
151
152 // Returns the start address of this page.
153 Address address() { return reinterpret_cast<Address>(this); }
154
155 // Checks whether this is a valid page address.
156 bool is_valid() { return address() != NULL; }
157
158 // Returns the next page of this page.
159 inline Page* next_page();
160
161 // Return the end of allocation in this page. Undefined for unused pages.
162 inline Address AllocationTop();
163
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100164 // Return the allocation watermark for the page.
165 // For old space pages it is guaranteed that the area under the watermark
166 // does not contain any garbage pointers to new space.
167 inline Address AllocationWatermark();
168
169 // Return the allocation watermark offset from the beginning of the page.
170 inline uint32_t AllocationWatermarkOffset();
171
172 inline void SetAllocationWatermark(Address allocation_watermark);
173
174 inline void SetCachedAllocationWatermark(Address allocation_watermark);
175 inline Address CachedAllocationWatermark();
176
Steve Blocka7e24c12009-10-30 11:49:00 +0000177 // Returns the start address of the object area in this page.
178 Address ObjectAreaStart() { return address() + kObjectStartOffset; }
179
180 // Returns the end address (exclusive) of the object area in this page.
181 Address ObjectAreaEnd() { return address() + Page::kPageSize; }
182
Steve Blocka7e24c12009-10-30 11:49:00 +0000183 // Checks whether an address is page aligned.
184 static bool IsAlignedToPageSize(Address a) {
185 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
186 }
187
Steve Block6ded16b2010-05-10 14:33:55 +0100188 // True if this page was in use before current compaction started.
189 // Result is valid only for pages owned by paged spaces and
190 // only after PagedSpace::PrepareForMarkCompact was called.
191 inline bool WasInUseBeforeMC();
192
193 inline void SetWasInUseBeforeMC(bool was_in_use);
194
Steve Blocka7e24c12009-10-30 11:49:00 +0000195 // True if this page is a large object page.
Steve Block6ded16b2010-05-10 14:33:55 +0100196 inline bool IsLargeObjectPage();
197
198 inline void SetIsLargeObjectPage(bool is_large_object_page);
Steve Blocka7e24c12009-10-30 11:49:00 +0000199
200 // Returns the offset of a given address to this page.
201 INLINE(int Offset(Address a)) {
Steve Blockd0582a62009-12-15 09:54:21 +0000202 int offset = static_cast<int>(a - address());
Steve Blocka7e24c12009-10-30 11:49:00 +0000203 ASSERT_PAGE_OFFSET(offset);
204 return offset;
205 }
206
207 // Returns the address for a given offset to the this page.
208 Address OffsetToAddress(int offset) {
209 ASSERT_PAGE_OFFSET(offset);
210 return address() + offset;
211 }
212
213 // ---------------------------------------------------------------------
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100214 // Card marking support
Steve Blocka7e24c12009-10-30 11:49:00 +0000215
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100216 static const uint32_t kAllRegionsCleanMarks = 0x0;
217 static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF;
Steve Blocka7e24c12009-10-30 11:49:00 +0000218
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100219 inline uint32_t GetRegionMarks();
220 inline void SetRegionMarks(uint32_t dirty);
Steve Blocka7e24c12009-10-30 11:49:00 +0000221
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100222 inline uint32_t GetRegionMaskForAddress(Address addr);
223 inline uint32_t GetRegionMaskForSpan(Address start, int length_in_bytes);
224 inline int GetRegionNumberForAddress(Address addr);
Steve Blocka7e24c12009-10-30 11:49:00 +0000225
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100226 inline void MarkRegionDirty(Address addr);
227 inline bool IsRegionDirty(Address addr);
Steve Blocka7e24c12009-10-30 11:49:00 +0000228
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100229 inline void ClearRegionMarks(Address start,
230 Address end,
231 bool reaches_limit);
Steve Blocka7e24c12009-10-30 11:49:00 +0000232
Steve Blocka7e24c12009-10-30 11:49:00 +0000233 // Page size in bytes. This must be a multiple of the OS page size.
234 static const int kPageSize = 1 << kPageSizeBits;
235
236 // Page size mask.
237 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
238
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100239 static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
240 kIntSize + kPointerSize;
Steve Blocka7e24c12009-10-30 11:49:00 +0000241
242 // The start offset of the object area in a page.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100243 static const int kObjectStartOffset = MAP_POINTER_ALIGN(kPageHeaderSize);
Steve Blocka7e24c12009-10-30 11:49:00 +0000244
245 // Object area size in bytes.
246 static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
247
248 // Maximum object size that fits in a page.
249 static const int kMaxHeapObjectSize = kObjectAreaSize;
250
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100251 static const int kDirtyFlagOffset = 2 * kPointerSize;
252 static const int kRegionSizeLog2 = 8;
253 static const int kRegionSize = 1 << kRegionSizeLog2;
254 static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
255
256 STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
257
Steve Block6ded16b2010-05-10 14:33:55 +0100258 enum PageFlag {
259 IS_NORMAL_PAGE = 1 << 0,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100260 WAS_IN_USE_BEFORE_MC = 1 << 1,
261
262 // Page allocation watermark was bumped by preallocation during scavenge.
263 // Correct watermark can be retrieved by CachedAllocationWatermark() method
264 WATERMARK_INVALIDATED = 1 << 2
Steve Block6ded16b2010-05-10 14:33:55 +0100265 };
266
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100267 // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
268 // scavenge we just invalidate the watermark on each old space page after
269 // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
270 // flag at the beginning of the next scavenge and each page becomes marked as
271 // having a valid watermark.
272 //
273 // The following invariant must hold for pages in old pointer and map spaces:
274 // If page is in use then page is marked as having invalid watermark at
275 // the beginning and at the end of any GC.
276 //
277 // This invariant guarantees that after flipping flag meaning at the
278 // beginning of scavenge all pages in use will be marked as having valid
279 // watermark.
280 static inline void FlipMeaningOfInvalidatedWatermarkFlag();
281
282 // Returns true if the page allocation watermark was not altered during
283 // scavenge.
284 inline bool IsWatermarkValid();
285
286 inline void InvalidateWatermark(bool value);
287
Steve Block6ded16b2010-05-10 14:33:55 +0100288 inline bool GetPageFlag(PageFlag flag);
289 inline void SetPageFlag(PageFlag flag, bool value);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100290 inline void ClearPageFlags();
291
292 inline void ClearGCFields();
293
294 static const int kAllocationWatermarkOffsetShift = 3;
295 static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1;
296 static const uint32_t kAllocationWatermarkOffsetMask =
297 ((1 << kAllocationWatermarkOffsetBits) - 1) <<
298 kAllocationWatermarkOffsetShift;
299
300 static const uint32_t kFlagsMask =
301 ((1 << kAllocationWatermarkOffsetShift) - 1);
302
303 STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
304 kAllocationWatermarkOffsetBits);
305
306 // This field contains the meaning of the WATERMARK_INVALIDATED flag.
307 // Instead of clearing this flag from all pages we just flip
308 // its meaning at the beginning of a scavenge.
309 static intptr_t watermark_invalidated_mark_;
Steve Block6ded16b2010-05-10 14:33:55 +0100310
Steve Blocka7e24c12009-10-30 11:49:00 +0000311 //---------------------------------------------------------------------------
312 // Page header description.
313 //
314 // If a page is not in the large object space, the first word,
315 // opaque_header, encodes the next page address (aligned to kPageSize 8K)
316 // and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use
317 // opaque_header. The value range of the opaque_header is [0..kPageSize[,
318 // or [next_page_start, next_page_end[. It cannot point to a valid address
319 // in the current page. If a page is in the large object space, the first
320 // word *may* (if the page start and large object chunk start are the
321 // same) contain the address of the next large object chunk.
322 intptr_t opaque_header;
323
324 // If the page is not in the large object space, the low-order bit of the
325 // second word is set. If the page is in the large object space, the
326 // second word *may* (if the page start and large object chunk start are
327 // the same) contain the large object chunk size. In either case, the
328 // low-order bit for large object pages will be cleared.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100329 // For normal pages this word is used to store page flags and
330 // offset of allocation top.
331 intptr_t flags_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000332
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100333 // This field contains dirty marks for regions covering the page. Only dirty
334 // regions might contain intergenerational references.
335 // Only 32 dirty marks are supported so for large object pages several regions
336 // might be mapped to a single dirty mark.
337 uint32_t dirty_regions_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000338
339 // The index of the page in its owner space.
340 int mc_page_index;
341
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100342 // During mark-compact collections this field contains the forwarding address
343 // of the first live object in this page.
344 // During scavenge collection this field is used to store allocation watermark
345 // if it is altered during scavenge.
Steve Blocka7e24c12009-10-30 11:49:00 +0000346 Address mc_first_forwarded;
Steve Blocka7e24c12009-10-30 11:49:00 +0000347};
348
349
350// ----------------------------------------------------------------------------
351// Space is the abstract superclass for all allocation spaces.
352class Space : public Malloced {
353 public:
354 Space(AllocationSpace id, Executability executable)
355 : id_(id), executable_(executable) {}
356
357 virtual ~Space() {}
358
359 // Does the space need executable memory?
360 Executability executable() { return executable_; }
361
362 // Identity used in error reporting.
363 AllocationSpace identity() { return id_; }
364
365 virtual int Size() = 0;
366
Steve Block6ded16b2010-05-10 14:33:55 +0100367#ifdef ENABLE_HEAP_PROTECTION
368 // Protect/unprotect the space by marking it read-only/writable.
369 virtual void Protect() = 0;
370 virtual void Unprotect() = 0;
371#endif
372
Steve Blocka7e24c12009-10-30 11:49:00 +0000373#ifdef DEBUG
374 virtual void Print() = 0;
375#endif
376
Leon Clarkee46be812010-01-19 14:06:41 +0000377 // After calling this we can allocate a certain number of bytes using only
378 // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
379 // without using freelists or causing a GC. This is used by partial
380 // snapshots. It returns true of space was reserved or false if a GC is
381 // needed. For paged spaces the space requested must include the space wasted
382 // at the end of each when allocating linearly.
383 virtual bool ReserveSpace(int bytes) = 0;
384
Steve Blocka7e24c12009-10-30 11:49:00 +0000385 private:
386 AllocationSpace id_;
387 Executability executable_;
388};
389
390
391// ----------------------------------------------------------------------------
392// All heap objects containing executable code (code objects) must be allocated
393// from a 2 GB range of memory, so that they can call each other using 32-bit
394// displacements. This happens automatically on 32-bit platforms, where 32-bit
395// displacements cover the entire 4GB virtual address space. On 64-bit
396// platforms, we support this using the CodeRange object, which reserves and
397// manages a range of virtual memory.
398class CodeRange : public AllStatic {
399 public:
400 // Reserves a range of virtual memory, but does not commit any of it.
401 // Can only be called once, at heap initialization time.
402 // Returns false on failure.
403 static bool Setup(const size_t requested_size);
404
405 // Frees the range of virtual memory, and frees the data structures used to
406 // manage it.
407 static void TearDown();
408
409 static bool exists() { return code_range_ != NULL; }
410 static bool contains(Address address) {
411 if (code_range_ == NULL) return false;
412 Address start = static_cast<Address>(code_range_->address());
413 return start <= address && address < start + code_range_->size();
414 }
415
416 // Allocates a chunk of memory from the large-object portion of
417 // the code range. On platforms with no separate code range, should
418 // not be called.
419 static void* AllocateRawMemory(const size_t requested, size_t* allocated);
420 static void FreeRawMemory(void* buf, size_t length);
421
422 private:
423 // The reserved range of virtual memory that all code objects are put in.
424 static VirtualMemory* code_range_;
425 // Plain old data class, just a struct plus a constructor.
426 class FreeBlock {
427 public:
428 FreeBlock(Address start_arg, size_t size_arg)
429 : start(start_arg), size(size_arg) {}
430 FreeBlock(void* start_arg, size_t size_arg)
431 : start(static_cast<Address>(start_arg)), size(size_arg) {}
432
433 Address start;
434 size_t size;
435 };
436
437 // Freed blocks of memory are added to the free list. When the allocation
438 // list is exhausted, the free list is sorted and merged to make the new
439 // allocation list.
440 static List<FreeBlock> free_list_;
441 // Memory is allocated from the free blocks on the allocation list.
442 // The block at current_allocation_block_index_ is the current block.
443 static List<FreeBlock> allocation_list_;
444 static int current_allocation_block_index_;
445
446 // Finds a block on the allocation list that contains at least the
447 // requested amount of memory. If none is found, sorts and merges
448 // the existing free memory blocks, and searches again.
449 // If none can be found, terminates V8 with FatalProcessOutOfMemory.
450 static void GetNextAllocationBlock(size_t requested);
451 // Compares the start addresses of two free blocks.
452 static int CompareFreeBlockAddress(const FreeBlock* left,
453 const FreeBlock* right);
454};
455
456
457// ----------------------------------------------------------------------------
458// A space acquires chunks of memory from the operating system. The memory
459// allocator manages chunks for the paged heap spaces (old space and map
460// space). A paged chunk consists of pages. Pages in a chunk have contiguous
461// addresses and are linked as a list.
462//
463// The allocator keeps an initial chunk which is used for the new space. The
464// leftover regions of the initial chunk are used for the initial chunks of
465// old space and map space if they are big enough to hold at least one page.
466// The allocator assumes that there is one old space and one map space, each
467// expands the space by allocating kPagesPerChunk pages except the last
468// expansion (before running out of space). The first chunk may contain fewer
469// than kPagesPerChunk pages as well.
470//
471// The memory allocator also allocates chunks for the large object space, but
472// they are managed by the space itself. The new space does not expand.
Steve Block6ded16b2010-05-10 14:33:55 +0100473//
474// The fact that pages for paged spaces are allocated and deallocated in chunks
475// induces a constraint on the order of pages in a linked lists. We say that
476// pages are linked in the chunk-order if and only if every two consecutive
477// pages from the same chunk are consecutive in the linked list.
478//
479
Steve Blocka7e24c12009-10-30 11:49:00 +0000480
481class MemoryAllocator : public AllStatic {
482 public:
483 // Initializes its internal bookkeeping structures.
484 // Max capacity of the total space.
485 static bool Setup(int max_capacity);
486
487 // Deletes valid chunks.
488 static void TearDown();
489
490 // Reserves an initial address range of virtual memory to be split between
491 // the two new space semispaces, the old space, and the map space. The
492 // memory is not yet committed or assigned to spaces and split into pages.
493 // The initial chunk is unmapped when the memory allocator is torn down.
494 // This function should only be called when there is not already a reserved
495 // initial chunk (initial_chunk_ should be NULL). It returns the start
496 // address of the initial chunk if successful, with the side effect of
497 // setting the initial chunk, or else NULL if unsuccessful and leaves the
498 // initial chunk NULL.
499 static void* ReserveInitialChunk(const size_t requested);
500
501 // Commits pages from an as-yet-unmanaged block of virtual memory into a
502 // paged space. The block should be part of the initial chunk reserved via
503 // a call to ReserveInitialChunk. The number of pages is always returned in
504 // the output parameter num_pages. This function assumes that the start
505 // address is non-null and that it is big enough to hold at least one
506 // page-aligned page. The call always succeeds, and num_pages is always
507 // greater than zero.
508 static Page* CommitPages(Address start, size_t size, PagedSpace* owner,
509 int* num_pages);
510
511 // Commit a contiguous block of memory from the initial chunk. Assumes that
512 // the address is not NULL, the size is greater than zero, and that the
513 // block is contained in the initial chunk. Returns true if it succeeded
514 // and false otherwise.
515 static bool CommitBlock(Address start, size_t size, Executability executable);
516
Steve Blocka7e24c12009-10-30 11:49:00 +0000517 // Uncommit a contiguous block of memory [start..(start+size)[.
518 // start is not NULL, the size is greater than zero, and the
519 // block is contained in the initial chunk. Returns true if it succeeded
520 // and false otherwise.
521 static bool UncommitBlock(Address start, size_t size);
522
Leon Clarke4515c472010-02-03 11:58:03 +0000523 // Zaps a contiguous block of memory [start..(start+size)[ thus
524 // filling it up with a recognizable non-NULL bit pattern.
525 static void ZapBlock(Address start, size_t size);
526
Steve Blocka7e24c12009-10-30 11:49:00 +0000527 // Attempts to allocate the requested (non-zero) number of pages from the
528 // OS. Fewer pages might be allocated than requested. If it fails to
529 // allocate memory for the OS or cannot allocate a single page, this
530 // function returns an invalid page pointer (NULL). The caller must check
531 // whether the returned page is valid (by calling Page::is_valid()). It is
532 // guaranteed that allocated pages have contiguous addresses. The actual
533 // number of allocated pages is returned in the output parameter
534 // allocated_pages. If the PagedSpace owner is executable and there is
535 // a code range, the pages are allocated from the code range.
536 static Page* AllocatePages(int requested_pages, int* allocated_pages,
537 PagedSpace* owner);
538
Steve Block6ded16b2010-05-10 14:33:55 +0100539 // Frees pages from a given page and after. Requires pages to be
540 // linked in chunk-order (see comment for class).
541 // If 'p' is the first page of a chunk, pages from 'p' are freed
542 // and this function returns an invalid page pointer.
543 // Otherwise, the function searches a page after 'p' that is
544 // the first page of a chunk. Pages after the found page
545 // are freed and the function returns 'p'.
Steve Blocka7e24c12009-10-30 11:49:00 +0000546 static Page* FreePages(Page* p);
547
Steve Block6ded16b2010-05-10 14:33:55 +0100548 // Frees all pages owned by given space.
549 static void FreeAllPages(PagedSpace* space);
550
Steve Blocka7e24c12009-10-30 11:49:00 +0000551 // Allocates and frees raw memory of certain size.
552 // These are just thin wrappers around OS::Allocate and OS::Free,
553 // but keep track of allocated bytes as part of heap.
554 // If the flag is EXECUTABLE and a code range exists, the requested
555 // memory is allocated from the code range. If a code range exists
556 // and the freed memory is in it, the code range manages the freed memory.
557 static void* AllocateRawMemory(const size_t requested,
558 size_t* allocated,
559 Executability executable);
560 static void FreeRawMemory(void* buf, size_t length);
561
562 // Returns the maximum available bytes of heaps.
563 static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
564
565 // Returns allocated spaces in bytes.
566 static int Size() { return size_; }
567
568 // Returns maximum available bytes that the old space can have.
569 static int MaxAvailable() {
570 return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
571 }
572
573 // Links two pages.
574 static inline void SetNextPage(Page* prev, Page* next);
575
576 // Returns the next page of a given page.
577 static inline Page* GetNextPage(Page* p);
578
579 // Checks whether a page belongs to a space.
580 static inline bool IsPageInSpace(Page* p, PagedSpace* space);
581
582 // Returns the space that owns the given page.
583 static inline PagedSpace* PageOwner(Page* page);
584
585 // Finds the first/last page in the same chunk as a given page.
586 static Page* FindFirstPageInSameChunk(Page* p);
587 static Page* FindLastPageInSameChunk(Page* p);
588
Steve Block6ded16b2010-05-10 14:33:55 +0100589 // Relinks list of pages owned by space to make it chunk-ordered.
590 // Returns new first and last pages of space.
591 // Also returns last page in relinked list which has WasInUsedBeforeMC
592 // flag set.
593 static void RelinkPageListInChunkOrder(PagedSpace* space,
594 Page** first_page,
595 Page** last_page,
596 Page** last_page_in_use);
597
Steve Blocka7e24c12009-10-30 11:49:00 +0000598#ifdef ENABLE_HEAP_PROTECTION
599 // Protect/unprotect a block of memory by marking it read-only/writable.
600 static inline void Protect(Address start, size_t size);
601 static inline void Unprotect(Address start, size_t size,
602 Executability executable);
603
604 // Protect/unprotect a chunk given a page in the chunk.
605 static inline void ProtectChunkFromPage(Page* page);
606 static inline void UnprotectChunkFromPage(Page* page);
607#endif
608
609#ifdef DEBUG
610 // Reports statistic info of the space.
611 static void ReportStatistics();
612#endif
613
614 // Due to encoding limitation, we can only have 8K chunks.
Leon Clarkee46be812010-01-19 14:06:41 +0000615 static const int kMaxNofChunks = 1 << kPageSizeBits;
Steve Blocka7e24c12009-10-30 11:49:00 +0000616 // If a chunk has at least 16 pages, the maximum heap size is about
617 // 8K * 8K * 16 = 1G bytes.
618#ifdef V8_TARGET_ARCH_X64
619 static const int kPagesPerChunk = 32;
620#else
621 static const int kPagesPerChunk = 16;
622#endif
623 static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
624
625 private:
626 // Maximum space size in bytes.
627 static int capacity_;
628
629 // Allocated space size in bytes.
630 static int size_;
631
632 // The initial chunk of virtual memory.
633 static VirtualMemory* initial_chunk_;
634
635 // Allocated chunk info: chunk start address, chunk size, and owning space.
636 class ChunkInfo BASE_EMBEDDED {
637 public:
638 ChunkInfo() : address_(NULL), size_(0), owner_(NULL) {}
639 void init(Address a, size_t s, PagedSpace* o) {
640 address_ = a;
641 size_ = s;
642 owner_ = o;
643 }
644 Address address() { return address_; }
645 size_t size() { return size_; }
646 PagedSpace* owner() { return owner_; }
647
648 private:
649 Address address_;
650 size_t size_;
651 PagedSpace* owner_;
652 };
653
654 // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
655 static List<ChunkInfo> chunks_;
656 static List<int> free_chunk_ids_;
657 static int max_nof_chunks_;
658 static int top_;
659
660 // Push/pop a free chunk id onto/from the stack.
661 static void Push(int free_chunk_id);
662 static int Pop();
663 static bool OutOfChunkIds() { return top_ == 0; }
664
665 // Frees a chunk.
666 static void DeleteChunk(int chunk_id);
667
668 // Basic check whether a chunk id is in the valid range.
669 static inline bool IsValidChunkId(int chunk_id);
670
671 // Checks whether a chunk id identifies an allocated chunk.
672 static inline bool IsValidChunk(int chunk_id);
673
674 // Returns the chunk id that a page belongs to.
675 static inline int GetChunkId(Page* p);
676
677 // True if the address lies in the initial chunk.
678 static inline bool InInitialChunk(Address address);
679
680 // Initializes pages in a chunk. Returns the first page address.
681 // This function and GetChunkId() are provided for the mark-compact
682 // collector to rebuild page headers in the from space, which is
683 // used as a marking stack and its page headers are destroyed.
684 static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
685 PagedSpace* owner);
Steve Block6ded16b2010-05-10 14:33:55 +0100686
687 static Page* RelinkPagesInChunk(int chunk_id,
688 Address chunk_start,
689 size_t chunk_size,
690 Page* prev,
691 Page** last_page_in_use);
Steve Blocka7e24c12009-10-30 11:49:00 +0000692};
693
694
695// -----------------------------------------------------------------------------
696// Interface for heap object iterator to be implemented by all object space
697// object iterators.
698//
Leon Clarked91b9f72010-01-27 17:25:45 +0000699// NOTE: The space specific object iterators also implements the own next()
700// method which is used to avoid using virtual functions
Steve Blocka7e24c12009-10-30 11:49:00 +0000701// iterating a specific space.
702
703class ObjectIterator : public Malloced {
704 public:
705 virtual ~ObjectIterator() { }
706
Steve Blocka7e24c12009-10-30 11:49:00 +0000707 virtual HeapObject* next_object() = 0;
708};
709
710
711// -----------------------------------------------------------------------------
712// Heap object iterator in new/old/map spaces.
713//
714// A HeapObjectIterator iterates objects from a given address to the
715// top of a space. The given address must be below the current
716// allocation pointer (space top). There are some caveats.
717//
718// (1) If the space top changes upward during iteration (because of
719// allocating new objects), the iterator does not iterate objects
720// above the original space top. The caller must create a new
721// iterator starting from the old top in order to visit these new
722// objects.
723//
724// (2) If new objects are allocated below the original allocation top
725// (e.g., free-list allocation in paged spaces), the new objects
726// may or may not be iterated depending on their position with
727// respect to the current point of iteration.
728//
729// (3) The space top should not change downward during iteration,
730// otherwise the iterator will return not-necessarily-valid
731// objects.
732
733class HeapObjectIterator: public ObjectIterator {
734 public:
735 // Creates a new object iterator in a given space. If a start
736 // address is not given, the iterator starts from the space bottom.
737 // If the size function is not given, the iterator calls the default
738 // Object::Size().
739 explicit HeapObjectIterator(PagedSpace* space);
740 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
741 HeapObjectIterator(PagedSpace* space, Address start);
742 HeapObjectIterator(PagedSpace* space,
743 Address start,
744 HeapObjectCallback size_func);
745
Leon Clarked91b9f72010-01-27 17:25:45 +0000746 inline HeapObject* next() {
747 return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
748 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000749
750 // implementation of ObjectIterator.
Steve Blocka7e24c12009-10-30 11:49:00 +0000751 virtual HeapObject* next_object() { return next(); }
752
753 private:
754 Address cur_addr_; // current iteration point
755 Address end_addr_; // end iteration point
756 Address cur_limit_; // current page limit
757 HeapObjectCallback size_func_; // size function
758 Page* end_page_; // caches the page of the end address
759
Leon Clarked91b9f72010-01-27 17:25:45 +0000760 HeapObject* FromCurrentPage() {
761 ASSERT(cur_addr_ < cur_limit_);
762
763 HeapObject* obj = HeapObject::FromAddress(cur_addr_);
764 int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
765 ASSERT_OBJECT_SIZE(obj_size);
766
767 cur_addr_ += obj_size;
768 ASSERT(cur_addr_ <= cur_limit_);
769
770 return obj;
771 }
772
773 // Slow path of next, goes into the next page.
774 HeapObject* FromNextPage();
Steve Blocka7e24c12009-10-30 11:49:00 +0000775
776 // Initializes fields.
777 void Initialize(Address start, Address end, HeapObjectCallback size_func);
778
779#ifdef DEBUG
780 // Verifies whether fields have valid values.
781 void Verify();
782#endif
783};
784
785
786// -----------------------------------------------------------------------------
787// A PageIterator iterates the pages in a paged space.
788//
789// The PageIterator class provides three modes for iterating pages in a space:
790// PAGES_IN_USE iterates pages containing allocated objects.
791// PAGES_USED_BY_MC iterates pages that hold relocated objects during a
792// mark-compact collection.
793// ALL_PAGES iterates all pages in the space.
794//
795// There are some caveats.
796//
797// (1) If the space expands during iteration, new pages will not be
798// returned by the iterator in any mode.
799//
800// (2) If new objects are allocated during iteration, they will appear
801// in pages returned by the iterator. Allocation may cause the
802// allocation pointer or MC allocation pointer in the last page to
803// change between constructing the iterator and iterating the last
804// page.
805//
806// (3) The space should not shrink during iteration, otherwise the
807// iterator will return deallocated pages.
808
809class PageIterator BASE_EMBEDDED {
810 public:
811 enum Mode {
812 PAGES_IN_USE,
813 PAGES_USED_BY_MC,
814 ALL_PAGES
815 };
816
817 PageIterator(PagedSpace* space, Mode mode);
818
819 inline bool has_next();
820 inline Page* next();
821
822 private:
823 PagedSpace* space_;
824 Page* prev_page_; // Previous page returned.
825 Page* stop_page_; // Page to stop at (last page returned by the iterator).
826};
827
828
829// -----------------------------------------------------------------------------
830// A space has a list of pages. The next page can be accessed via
831// Page::next_page() call. The next page of the last page is an
832// invalid page pointer. A space can expand and shrink dynamically.
833
834// An abstraction of allocation and relocation pointers in a page-structured
835// space.
836class AllocationInfo {
837 public:
838 Address top; // current allocation top
839 Address limit; // current allocation limit
840
841#ifdef DEBUG
842 bool VerifyPagedAllocation() {
843 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
844 && (top <= limit);
845 }
846#endif
847};
848
849
850// An abstraction of the accounting statistics of a page-structured space.
851// The 'capacity' of a space is the number of object-area bytes (ie, not
852// including page bookkeeping structures) currently in the space. The 'size'
853// of a space is the number of allocated bytes, the 'waste' in the space is
854// the number of bytes that are not allocated and not available to
855// allocation without reorganizing the space via a GC (eg, small blocks due
856// to internal fragmentation, top of page areas in map space), and the bytes
857// 'available' is the number of unallocated bytes that are not waste. The
858// capacity is the sum of size, waste, and available.
859//
860// The stats are only set by functions that ensure they stay balanced. These
861// functions increase or decrease one of the non-capacity stats in
862// conjunction with capacity, or else they always balance increases and
863// decreases to the non-capacity stats.
864class AllocationStats BASE_EMBEDDED {
865 public:
866 AllocationStats() { Clear(); }
867
868 // Zero out all the allocation statistics (ie, no capacity).
869 void Clear() {
870 capacity_ = 0;
871 available_ = 0;
872 size_ = 0;
873 waste_ = 0;
874 }
875
876 // Reset the allocation statistics (ie, available = capacity with no
877 // wasted or allocated bytes).
878 void Reset() {
879 available_ = capacity_;
880 size_ = 0;
881 waste_ = 0;
882 }
883
884 // Accessors for the allocation statistics.
885 int Capacity() { return capacity_; }
886 int Available() { return available_; }
887 int Size() { return size_; }
888 int Waste() { return waste_; }
889
890 // Grow the space by adding available bytes.
891 void ExpandSpace(int size_in_bytes) {
892 capacity_ += size_in_bytes;
893 available_ += size_in_bytes;
894 }
895
896 // Shrink the space by removing available bytes.
897 void ShrinkSpace(int size_in_bytes) {
898 capacity_ -= size_in_bytes;
899 available_ -= size_in_bytes;
900 }
901
902 // Allocate from available bytes (available -> size).
903 void AllocateBytes(int size_in_bytes) {
904 available_ -= size_in_bytes;
905 size_ += size_in_bytes;
906 }
907
908 // Free allocated bytes, making them available (size -> available).
909 void DeallocateBytes(int size_in_bytes) {
910 size_ -= size_in_bytes;
911 available_ += size_in_bytes;
912 }
913
914 // Waste free bytes (available -> waste).
915 void WasteBytes(int size_in_bytes) {
916 available_ -= size_in_bytes;
917 waste_ += size_in_bytes;
918 }
919
920 // Consider the wasted bytes to be allocated, as they contain filler
921 // objects (waste -> size).
922 void FillWastedBytes(int size_in_bytes) {
923 waste_ -= size_in_bytes;
924 size_ += size_in_bytes;
925 }
926
927 private:
928 int capacity_;
929 int available_;
930 int size_;
931 int waste_;
932};
933
934
935class PagedSpace : public Space {
936 public:
937 // Creates a space with a maximum capacity, and an id.
938 PagedSpace(int max_capacity, AllocationSpace id, Executability executable);
939
940 virtual ~PagedSpace() {}
941
942 // Set up the space using the given address range of virtual memory (from
943 // the memory allocator's initial chunk) if possible. If the block of
944 // addresses is not big enough to contain a single page-aligned page, a
945 // fresh chunk will be allocated.
946 bool Setup(Address start, size_t size);
947
948 // Returns true if the space has been successfully set up and not
949 // subsequently torn down.
950 bool HasBeenSetup();
951
952 // Cleans up the space, frees all pages in this space except those belonging
953 // to the initial chunk, uncommits addresses in the initial chunk.
954 void TearDown();
955
956 // Checks whether an object/address is in this space.
957 inline bool Contains(Address a);
958 bool Contains(HeapObject* o) { return Contains(o->address()); }
959
960 // Given an address occupied by a live object, return that object if it is
961 // in this space, or Failure::Exception() if it is not. The implementation
962 // iterates over objects in the page containing the address, the cost is
963 // linear in the number of objects in the page. It may be slow.
964 Object* FindObject(Address addr);
965
966 // Checks whether page is currently in use by this space.
967 bool IsUsed(Page* page);
968
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100969 void MarkAllPagesClean();
Steve Blocka7e24c12009-10-30 11:49:00 +0000970
971 // Prepares for a mark-compact GC.
Steve Block6ded16b2010-05-10 14:33:55 +0100972 virtual void PrepareForMarkCompact(bool will_compact);
Steve Blocka7e24c12009-10-30 11:49:00 +0000973
Steve Block6ded16b2010-05-10 14:33:55 +0100974 // The top of allocation in a page in this space. Undefined if page is unused.
975 Address PageAllocationTop(Page* page) {
976 return page == TopPageOf(allocation_info_) ? top()
977 : PageAllocationLimit(page);
978 }
979
980 // The limit of allocation for a page in this space.
981 virtual Address PageAllocationLimit(Page* page) = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000982
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100983 void FlushTopPageWatermark() {
984 AllocationTopPage()->SetCachedAllocationWatermark(top());
985 AllocationTopPage()->InvalidateWatermark(true);
986 }
987
Steve Blocka7e24c12009-10-30 11:49:00 +0000988 // Current capacity without growing (Size() + Available() + Waste()).
989 int Capacity() { return accounting_stats_.Capacity(); }
990
Steve Block3ce2e202009-11-05 08:53:23 +0000991 // Total amount of memory committed for this space. For paged
992 // spaces this equals the capacity.
993 int CommittedMemory() { return Capacity(); }
994
Steve Blocka7e24c12009-10-30 11:49:00 +0000995 // Available bytes without growing.
996 int Available() { return accounting_stats_.Available(); }
997
998 // Allocated bytes in this space.
999 virtual int Size() { return accounting_stats_.Size(); }
1000
1001 // Wasted bytes due to fragmentation and not recoverable until the
1002 // next GC of this space.
1003 int Waste() { return accounting_stats_.Waste(); }
1004
1005 // Returns the address of the first object in this space.
1006 Address bottom() { return first_page_->ObjectAreaStart(); }
1007
1008 // Returns the allocation pointer in this space.
1009 Address top() { return allocation_info_.top; }
1010
1011 // Allocate the requested number of bytes in the space if possible, return a
1012 // failure object if not.
1013 inline Object* AllocateRaw(int size_in_bytes);
1014
1015 // Allocate the requested number of bytes for relocation during mark-compact
1016 // collection.
1017 inline Object* MCAllocateRaw(int size_in_bytes);
1018
Leon Clarkee46be812010-01-19 14:06:41 +00001019 virtual bool ReserveSpace(int bytes);
1020
1021 // Used by ReserveSpace.
1022 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001023
Steve Block6ded16b2010-05-10 14:33:55 +01001024 // Free all pages in range from prev (exclusive) to last (inclusive).
1025 // Freed pages are moved to the end of page list.
1026 void FreePages(Page* prev, Page* last);
1027
1028 // Set space allocation info.
1029 void SetTop(Address top) {
1030 allocation_info_.top = top;
1031 allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top));
1032 }
1033
Steve Blocka7e24c12009-10-30 11:49:00 +00001034 // ---------------------------------------------------------------------------
1035 // Mark-compact collection support functions
1036
1037 // Set the relocation point to the beginning of the space.
1038 void MCResetRelocationInfo();
1039
1040 // Writes relocation info to the top page.
1041 void MCWriteRelocationInfoToPage() {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001042 TopPageOf(mc_forwarding_info_)->
1043 SetAllocationWatermark(mc_forwarding_info_.top);
Steve Blocka7e24c12009-10-30 11:49:00 +00001044 }
1045
1046 // Computes the offset of a given address in this space to the beginning
1047 // of the space.
1048 int MCSpaceOffsetForAddress(Address addr);
1049
1050 // Updates the allocation pointer to the relocation top after a mark-compact
1051 // collection.
1052 virtual void MCCommitRelocationInfo() = 0;
1053
1054 // Releases half of unused pages.
1055 void Shrink();
1056
1057 // Ensures that the capacity is at least 'capacity'. Returns false on failure.
1058 bool EnsureCapacity(int capacity);
1059
1060#ifdef ENABLE_HEAP_PROTECTION
1061 // Protect/unprotect the space by marking it read-only/writable.
1062 void Protect();
1063 void Unprotect();
1064#endif
1065
1066#ifdef DEBUG
1067 // Print meta info and objects in this space.
1068 virtual void Print();
1069
1070 // Verify integrity of this space.
1071 virtual void Verify(ObjectVisitor* visitor);
1072
1073 // Overridden by subclasses to verify space-specific object
1074 // properties (e.g., only maps or free-list nodes are in map space).
1075 virtual void VerifyObject(HeapObject* obj) {}
1076
1077 // Report code object related statistics
1078 void CollectCodeStatistics();
1079 static void ReportCodeStatistics();
1080 static void ResetCodeStatistics();
1081#endif
1082
Steve Block6ded16b2010-05-10 14:33:55 +01001083 // Returns the page of the allocation pointer.
1084 Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
1085
Steve Blocka7e24c12009-10-30 11:49:00 +00001086 protected:
1087 // Maximum capacity of this space.
1088 int max_capacity_;
1089
1090 // Accounting information for this space.
1091 AllocationStats accounting_stats_;
1092
1093 // The first page in this space.
1094 Page* first_page_;
1095
1096 // The last page in this space. Initially set in Setup, updated in
1097 // Expand and Shrink.
1098 Page* last_page_;
1099
Steve Block6ded16b2010-05-10 14:33:55 +01001100 // True if pages owned by this space are linked in chunk-order.
1101 // See comment for class MemoryAllocator for definition of chunk-order.
1102 bool page_list_is_chunk_ordered_;
1103
Steve Blocka7e24c12009-10-30 11:49:00 +00001104 // Normal allocation information.
1105 AllocationInfo allocation_info_;
1106
1107 // Relocation information during mark-compact collections.
1108 AllocationInfo mc_forwarding_info_;
1109
1110 // Bytes of each page that cannot be allocated. Possibly non-zero
1111 // for pages in spaces with only fixed-size objects. Always zero
1112 // for pages in spaces with variable sized objects (those pages are
1113 // padded with free-list nodes).
1114 int page_extra_;
1115
1116 // Sets allocation pointer to a page bottom.
1117 static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
1118
1119 // Returns the top page specified by an allocation info structure.
1120 static Page* TopPageOf(AllocationInfo alloc_info) {
1121 return Page::FromAllocationTop(alloc_info.limit);
1122 }
1123
Leon Clarked91b9f72010-01-27 17:25:45 +00001124 int CountPagesToTop() {
1125 Page* p = Page::FromAllocationTop(allocation_info_.top);
1126 PageIterator it(this, PageIterator::ALL_PAGES);
1127 int counter = 1;
1128 while (it.has_next()) {
1129 if (it.next() == p) return counter;
1130 counter++;
1131 }
1132 UNREACHABLE();
1133 return -1;
1134 }
1135
Steve Blocka7e24c12009-10-30 11:49:00 +00001136 // Expands the space by allocating a fixed number of pages. Returns false if
1137 // it cannot allocate requested number of pages from OS. Newly allocated
1138 // pages are append to the last_page;
1139 bool Expand(Page* last_page);
1140
1141 // Generic fast case allocation function that tries linear allocation in
1142 // the top page of 'alloc_info'. Returns NULL on failure.
1143 inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
1144 int size_in_bytes);
1145
1146 // During normal allocation or deserialization, roll to the next page in
1147 // the space (there is assumed to be one) and allocate there. This
1148 // function is space-dependent.
1149 virtual HeapObject* AllocateInNextPage(Page* current_page,
1150 int size_in_bytes) = 0;
1151
1152 // Slow path of AllocateRaw. This function is space-dependent.
1153 virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
1154
1155 // Slow path of MCAllocateRaw.
1156 HeapObject* SlowMCAllocateRaw(int size_in_bytes);
1157
1158#ifdef DEBUG
Leon Clarkee46be812010-01-19 14:06:41 +00001159 // Returns the number of total pages in this space.
1160 int CountTotalPages();
Steve Blocka7e24c12009-10-30 11:49:00 +00001161#endif
1162 private:
Steve Blocka7e24c12009-10-30 11:49:00 +00001163
1164 // Returns a pointer to the page of the relocation pointer.
1165 Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
1166
Steve Blocka7e24c12009-10-30 11:49:00 +00001167 friend class PageIterator;
1168};
1169
1170
1171#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1172class NumberAndSizeInfo BASE_EMBEDDED {
1173 public:
1174 NumberAndSizeInfo() : number_(0), bytes_(0) {}
1175
1176 int number() const { return number_; }
1177 void increment_number(int num) { number_ += num; }
1178
1179 int bytes() const { return bytes_; }
1180 void increment_bytes(int size) { bytes_ += size; }
1181
1182 void clear() {
1183 number_ = 0;
1184 bytes_ = 0;
1185 }
1186
1187 private:
1188 int number_;
1189 int bytes_;
1190};
1191
1192
1193// HistogramInfo class for recording a single "bar" of a histogram. This
1194// class is used for collecting statistics to print to stdout (when compiled
1195// with DEBUG) or to the log file (when compiled with
1196// ENABLE_LOGGING_AND_PROFILING).
1197class HistogramInfo: public NumberAndSizeInfo {
1198 public:
1199 HistogramInfo() : NumberAndSizeInfo() {}
1200
1201 const char* name() { return name_; }
1202 void set_name(const char* name) { name_ = name; }
1203
1204 private:
1205 const char* name_;
1206};
1207#endif
1208
1209
1210// -----------------------------------------------------------------------------
1211// SemiSpace in young generation
1212//
1213// A semispace is a contiguous chunk of memory. The mark-compact collector
1214// uses the memory in the from space as a marking stack when tracing live
1215// objects.
1216
1217class SemiSpace : public Space {
1218 public:
1219 // Constructor.
1220 SemiSpace() :Space(NEW_SPACE, NOT_EXECUTABLE) {
1221 start_ = NULL;
1222 age_mark_ = NULL;
1223 }
1224
1225 // Sets up the semispace using the given chunk.
1226 bool Setup(Address start, int initial_capacity, int maximum_capacity);
1227
1228 // Tear down the space. Heap memory was not allocated by the space, so it
1229 // is not deallocated here.
1230 void TearDown();
1231
1232 // True if the space has been set up but not torn down.
1233 bool HasBeenSetup() { return start_ != NULL; }
1234
1235 // Grow the size of the semispace by committing extra virtual memory.
1236 // Assumes that the caller has checked that the semispace has not reached
1237 // its maximum capacity (and thus there is space available in the reserved
1238 // address range to grow).
1239 bool Grow();
1240
1241 // Grow the semispace to the new capacity. The new capacity
1242 // requested must be larger than the current capacity.
1243 bool GrowTo(int new_capacity);
1244
1245 // Shrinks the semispace to the new capacity. The new capacity
1246 // requested must be more than the amount of used memory in the
1247 // semispace and less than the current capacity.
1248 bool ShrinkTo(int new_capacity);
1249
1250 // Returns the start address of the space.
1251 Address low() { return start_; }
1252 // Returns one past the end address of the space.
1253 Address high() { return low() + capacity_; }
1254
1255 // Age mark accessors.
1256 Address age_mark() { return age_mark_; }
1257 void set_age_mark(Address mark) { age_mark_ = mark; }
1258
1259 // True if the address is in the address range of this semispace (not
1260 // necessarily below the allocation pointer).
1261 bool Contains(Address a) {
1262 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1263 == reinterpret_cast<uintptr_t>(start_);
1264 }
1265
1266 // True if the object is a heap object in the address range of this
1267 // semispace (not necessarily below the allocation pointer).
1268 bool Contains(Object* o) {
1269 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1270 }
1271
1272 // The offset of an address from the beginning of the space.
Steve Blockd0582a62009-12-15 09:54:21 +00001273 int SpaceOffsetForAddress(Address addr) {
1274 return static_cast<int>(addr - low());
1275 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001276
Leon Clarkee46be812010-01-19 14:06:41 +00001277 // If we don't have these here then SemiSpace will be abstract. However
1278 // they should never be called.
Steve Blocka7e24c12009-10-30 11:49:00 +00001279 virtual int Size() {
1280 UNREACHABLE();
1281 return 0;
1282 }
1283
Leon Clarkee46be812010-01-19 14:06:41 +00001284 virtual bool ReserveSpace(int bytes) {
1285 UNREACHABLE();
1286 return false;
1287 }
1288
Steve Blocka7e24c12009-10-30 11:49:00 +00001289 bool is_committed() { return committed_; }
1290 bool Commit();
1291 bool Uncommit();
1292
Steve Block6ded16b2010-05-10 14:33:55 +01001293#ifdef ENABLE_HEAP_PROTECTION
1294 // Protect/unprotect the space by marking it read-only/writable.
1295 virtual void Protect() {}
1296 virtual void Unprotect() {}
1297#endif
1298
Steve Blocka7e24c12009-10-30 11:49:00 +00001299#ifdef DEBUG
1300 virtual void Print();
1301 virtual void Verify();
1302#endif
1303
1304 // Returns the current capacity of the semi space.
1305 int Capacity() { return capacity_; }
1306
1307 // Returns the maximum capacity of the semi space.
1308 int MaximumCapacity() { return maximum_capacity_; }
1309
1310 // Returns the initial capacity of the semi space.
1311 int InitialCapacity() { return initial_capacity_; }
1312
1313 private:
1314 // The current and maximum capacity of the space.
1315 int capacity_;
1316 int maximum_capacity_;
1317 int initial_capacity_;
1318
1319 // The start address of the space.
1320 Address start_;
1321 // Used to govern object promotion during mark-compact collection.
1322 Address age_mark_;
1323
1324 // Masks and comparison values to test for containment in this semispace.
1325 uintptr_t address_mask_;
1326 uintptr_t object_mask_;
1327 uintptr_t object_expected_;
1328
1329 bool committed_;
1330
1331 public:
1332 TRACK_MEMORY("SemiSpace")
1333};
1334
1335
1336// A SemiSpaceIterator is an ObjectIterator that iterates over the active
1337// semispace of the heap's new space. It iterates over the objects in the
1338// semispace from a given start address (defaulting to the bottom of the
1339// semispace) to the top of the semispace. New objects allocated after the
1340// iterator is created are not iterated.
1341class SemiSpaceIterator : public ObjectIterator {
1342 public:
1343 // Create an iterator over the objects in the given space. If no start
1344 // address is given, the iterator starts from the bottom of the space. If
1345 // no size function is given, the iterator calls Object::Size().
1346 explicit SemiSpaceIterator(NewSpace* space);
1347 SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
1348 SemiSpaceIterator(NewSpace* space, Address start);
1349
Steve Blocka7e24c12009-10-30 11:49:00 +00001350 HeapObject* next() {
Leon Clarked91b9f72010-01-27 17:25:45 +00001351 if (current_ == limit_) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00001352
1353 HeapObject* object = HeapObject::FromAddress(current_);
1354 int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
1355
1356 current_ += size;
1357 return object;
1358 }
1359
1360 // Implementation of the ObjectIterator functions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001361 virtual HeapObject* next_object() { return next(); }
1362
1363 private:
1364 void Initialize(NewSpace* space, Address start, Address end,
1365 HeapObjectCallback size_func);
1366
1367 // The semispace.
1368 SemiSpace* space_;
1369 // The current iteration point.
1370 Address current_;
1371 // The end of iteration.
1372 Address limit_;
1373 // The callback function.
1374 HeapObjectCallback size_func_;
1375};
1376
1377
1378// -----------------------------------------------------------------------------
1379// The young generation space.
1380//
1381// The new space consists of a contiguous pair of semispaces. It simply
1382// forwards most functions to the appropriate semispace.
1383
1384class NewSpace : public Space {
1385 public:
1386 // Constructor.
1387 NewSpace() : Space(NEW_SPACE, NOT_EXECUTABLE) {}
1388
1389 // Sets up the new space using the given chunk.
1390 bool Setup(Address start, int size);
1391
1392 // Tears down the space. Heap memory was not allocated by the space, so it
1393 // is not deallocated here.
1394 void TearDown();
1395
1396 // True if the space has been set up but not torn down.
1397 bool HasBeenSetup() {
1398 return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
1399 }
1400
1401 // Flip the pair of spaces.
1402 void Flip();
1403
1404 // Grow the capacity of the semispaces. Assumes that they are not at
1405 // their maximum capacity.
1406 void Grow();
1407
1408 // Shrink the capacity of the semispaces.
1409 void Shrink();
1410
1411 // True if the address or object lies in the address range of either
1412 // semispace (not necessarily below the allocation pointer).
1413 bool Contains(Address a) {
1414 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1415 == reinterpret_cast<uintptr_t>(start_);
1416 }
1417 bool Contains(Object* o) {
1418 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1419 }
1420
1421 // Return the allocated bytes in the active semispace.
Steve Blockd0582a62009-12-15 09:54:21 +00001422 virtual int Size() { return static_cast<int>(top() - bottom()); }
Steve Block3ce2e202009-11-05 08:53:23 +00001423
Steve Blocka7e24c12009-10-30 11:49:00 +00001424 // Return the current capacity of a semispace.
1425 int Capacity() {
1426 ASSERT(to_space_.Capacity() == from_space_.Capacity());
1427 return to_space_.Capacity();
1428 }
Steve Block3ce2e202009-11-05 08:53:23 +00001429
1430 // Return the total amount of memory committed for new space.
1431 int CommittedMemory() {
1432 if (from_space_.is_committed()) return 2 * Capacity();
1433 return Capacity();
1434 }
1435
Steve Blocka7e24c12009-10-30 11:49:00 +00001436 // Return the available bytes without growing in the active semispace.
1437 int Available() { return Capacity() - Size(); }
1438
1439 // Return the maximum capacity of a semispace.
1440 int MaximumCapacity() {
1441 ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
1442 return to_space_.MaximumCapacity();
1443 }
1444
1445 // Returns the initial capacity of a semispace.
1446 int InitialCapacity() {
1447 ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
1448 return to_space_.InitialCapacity();
1449 }
1450
1451 // Return the address of the allocation pointer in the active semispace.
1452 Address top() { return allocation_info_.top; }
1453 // Return the address of the first object in the active semispace.
1454 Address bottom() { return to_space_.low(); }
1455
1456 // Get the age mark of the inactive semispace.
1457 Address age_mark() { return from_space_.age_mark(); }
1458 // Set the age mark in the active semispace.
1459 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
1460
1461 // The start address of the space and a bit mask. Anding an address in the
1462 // new space with the mask will result in the start address.
1463 Address start() { return start_; }
1464 uintptr_t mask() { return address_mask_; }
1465
1466 // The allocation top and limit addresses.
1467 Address* allocation_top_address() { return &allocation_info_.top; }
1468 Address* allocation_limit_address() { return &allocation_info_.limit; }
1469
1470 Object* AllocateRaw(int size_in_bytes) {
1471 return AllocateRawInternal(size_in_bytes, &allocation_info_);
1472 }
1473
1474 // Allocate the requested number of bytes for relocation during mark-compact
1475 // collection.
1476 Object* MCAllocateRaw(int size_in_bytes) {
1477 return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
1478 }
1479
1480 // Reset the allocation pointer to the beginning of the active semispace.
1481 void ResetAllocationInfo();
1482 // Reset the reloction pointer to the bottom of the inactive semispace in
1483 // preparation for mark-compact collection.
1484 void MCResetRelocationInfo();
1485 // Update the allocation pointer in the active semispace after a
1486 // mark-compact collection.
1487 void MCCommitRelocationInfo();
1488
1489 // Get the extent of the inactive semispace (for use as a marking stack).
1490 Address FromSpaceLow() { return from_space_.low(); }
1491 Address FromSpaceHigh() { return from_space_.high(); }
1492
1493 // Get the extent of the active semispace (to sweep newly copied objects
1494 // during a scavenge collection).
1495 Address ToSpaceLow() { return to_space_.low(); }
1496 Address ToSpaceHigh() { return to_space_.high(); }
1497
1498 // Offsets from the beginning of the semispaces.
1499 int ToSpaceOffsetForAddress(Address a) {
1500 return to_space_.SpaceOffsetForAddress(a);
1501 }
1502 int FromSpaceOffsetForAddress(Address a) {
1503 return from_space_.SpaceOffsetForAddress(a);
1504 }
1505
1506 // True if the object is a heap object in the address range of the
1507 // respective semispace (not necessarily below the allocation pointer of the
1508 // semispace).
1509 bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
1510 bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
1511
1512 bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
1513 bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
1514
Leon Clarkee46be812010-01-19 14:06:41 +00001515 virtual bool ReserveSpace(int bytes);
1516
Steve Blocka7e24c12009-10-30 11:49:00 +00001517#ifdef ENABLE_HEAP_PROTECTION
1518 // Protect/unprotect the space by marking it read-only/writable.
1519 virtual void Protect();
1520 virtual void Unprotect();
1521#endif
1522
1523#ifdef DEBUG
1524 // Verify the active semispace.
1525 virtual void Verify();
1526 // Print the active semispace.
1527 virtual void Print() { to_space_.Print(); }
1528#endif
1529
1530#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1531 // Iterates the active semispace to collect statistics.
1532 void CollectStatistics();
1533 // Reports previously collected statistics of the active semispace.
1534 void ReportStatistics();
1535 // Clears previously collected statistics.
1536 void ClearHistograms();
1537
1538 // Record the allocation or promotion of a heap object. Note that we don't
1539 // record every single allocation, but only those that happen in the
1540 // to space during a scavenge GC.
1541 void RecordAllocation(HeapObject* obj);
1542 void RecordPromotion(HeapObject* obj);
1543#endif
1544
1545 // Return whether the operation succeded.
1546 bool CommitFromSpaceIfNeeded() {
1547 if (from_space_.is_committed()) return true;
1548 return from_space_.Commit();
1549 }
1550
1551 bool UncommitFromSpace() {
1552 if (!from_space_.is_committed()) return true;
1553 return from_space_.Uncommit();
1554 }
1555
1556 private:
1557 // The semispaces.
1558 SemiSpace to_space_;
1559 SemiSpace from_space_;
1560
1561 // Start address and bit mask for containment testing.
1562 Address start_;
1563 uintptr_t address_mask_;
1564 uintptr_t object_mask_;
1565 uintptr_t object_expected_;
1566
1567 // Allocation pointer and limit for normal allocation and allocation during
1568 // mark-compact collection.
1569 AllocationInfo allocation_info_;
1570 AllocationInfo mc_forwarding_info_;
1571
1572#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1573 HistogramInfo* allocated_histogram_;
1574 HistogramInfo* promoted_histogram_;
1575#endif
1576
1577 // Implementation of AllocateRaw and MCAllocateRaw.
1578 inline Object* AllocateRawInternal(int size_in_bytes,
1579 AllocationInfo* alloc_info);
1580
1581 friend class SemiSpaceIterator;
1582
1583 public:
1584 TRACK_MEMORY("NewSpace")
1585};
1586
1587
1588// -----------------------------------------------------------------------------
1589// Free lists for old object spaces
1590//
1591// Free-list nodes are free blocks in the heap. They look like heap objects
1592// (free-list node pointers have the heap object tag, and they have a map like
1593// a heap object). They have a size and a next pointer. The next pointer is
1594// the raw address of the next free list node (or NULL).
1595class FreeListNode: public HeapObject {
1596 public:
1597 // Obtain a free-list node from a raw address. This is not a cast because
1598 // it does not check nor require that the first word at the address is a map
1599 // pointer.
1600 static FreeListNode* FromAddress(Address address) {
1601 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1602 }
1603
Steve Block3ce2e202009-11-05 08:53:23 +00001604 static inline bool IsFreeListNode(HeapObject* object);
1605
Steve Blocka7e24c12009-10-30 11:49:00 +00001606 // Set the size in bytes, which can be read with HeapObject::Size(). This
1607 // function also writes a map to the first word of the block so that it
1608 // looks like a heap object to the garbage collector and heap iteration
1609 // functions.
1610 void set_size(int size_in_bytes);
1611
1612 // Accessors for the next field.
1613 inline Address next();
1614 inline void set_next(Address next);
1615
1616 private:
1617 static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
1618
1619 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1620};
1621
1622
1623// The free list for the old space.
1624class OldSpaceFreeList BASE_EMBEDDED {
1625 public:
1626 explicit OldSpaceFreeList(AllocationSpace owner);
1627
1628 // Clear the free list.
1629 void Reset();
1630
1631 // Return the number of bytes available on the free list.
1632 int available() { return available_; }
1633
1634 // Place a node on the free list. The block of size 'size_in_bytes'
1635 // starting at 'start' is placed on the free list. The return value is the
1636 // number of bytes that have been lost due to internal fragmentation by
1637 // freeing the block. Bookkeeping information will be written to the block,
1638 // ie, its contents will be destroyed. The start address should be word
1639 // aligned, and the size should be a non-zero multiple of the word size.
1640 int Free(Address start, int size_in_bytes);
1641
1642 // Allocate a block of size 'size_in_bytes' from the free list. The block
1643 // is unitialized. A failure is returned if no block is available. The
1644 // number of bytes lost to fragmentation is returned in the output parameter
1645 // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
1646 Object* Allocate(int size_in_bytes, int* wasted_bytes);
1647
1648 private:
1649 // The size range of blocks, in bytes. (Smaller allocations are allowed, but
1650 // will always result in waste.)
1651 static const int kMinBlockSize = 2 * kPointerSize;
1652 static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
1653
1654 // The identity of the owning space, for building allocation Failure
1655 // objects.
1656 AllocationSpace owner_;
1657
1658 // Total available bytes in all blocks on this free list.
1659 int available_;
1660
1661 // Blocks are put on exact free lists in an array, indexed by size in words.
1662 // The available sizes are kept in an increasingly ordered list. Entries
1663 // corresponding to sizes < kMinBlockSize always have an empty free list
1664 // (but index kHead is used for the head of the size list).
1665 struct SizeNode {
1666 // Address of the head FreeListNode of the implied block size or NULL.
1667 Address head_node_;
1668 // Size (words) of the next larger available size if head_node_ != NULL.
1669 int next_size_;
1670 };
1671 static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
1672 SizeNode free_[kFreeListsLength];
1673
1674 // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
1675 static const int kHead = kMinBlockSize / kPointerSize - 1;
1676 static const int kEnd = kMaxInt;
1677
1678 // We keep a "finger" in the size list to speed up a common pattern:
1679 // repeated requests for the same or increasing sizes.
1680 int finger_;
1681
1682 // Starting from *prev, find and return the smallest size >= index (words),
1683 // or kEnd. Update *prev to be the largest size < index, or kHead.
1684 int FindSize(int index, int* prev) {
1685 int cur = free_[*prev].next_size_;
1686 while (cur < index) {
1687 *prev = cur;
1688 cur = free_[cur].next_size_;
1689 }
1690 return cur;
1691 }
1692
1693 // Remove an existing element from the size list.
1694 void RemoveSize(int index) {
1695 int prev = kHead;
1696 int cur = FindSize(index, &prev);
1697 ASSERT(cur == index);
1698 free_[prev].next_size_ = free_[cur].next_size_;
1699 finger_ = prev;
1700 }
1701
1702 // Insert a new element into the size list.
1703 void InsertSize(int index) {
1704 int prev = kHead;
1705 int cur = FindSize(index, &prev);
1706 ASSERT(cur != index);
1707 free_[prev].next_size_ = index;
1708 free_[index].next_size_ = cur;
1709 }
1710
1711 // The size list is not updated during a sequence of calls to Free, but is
1712 // rebuilt before the next allocation.
1713 void RebuildSizeList();
1714 bool needs_rebuild_;
1715
1716#ifdef DEBUG
1717 // Does this free list contain a free block located at the address of 'node'?
1718 bool Contains(FreeListNode* node);
1719#endif
1720
1721 DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
1722};
1723
1724
1725// The free list for the map space.
1726class FixedSizeFreeList BASE_EMBEDDED {
1727 public:
1728 FixedSizeFreeList(AllocationSpace owner, int object_size);
1729
1730 // Clear the free list.
1731 void Reset();
1732
1733 // Return the number of bytes available on the free list.
1734 int available() { return available_; }
1735
1736 // Place a node on the free list. The block starting at 'start' (assumed to
1737 // have size object_size_) is placed on the free list. Bookkeeping
1738 // information will be written to the block, ie, its contents will be
1739 // destroyed. The start address should be word aligned.
1740 void Free(Address start);
1741
1742 // Allocate a fixed sized block from the free list. The block is unitialized.
1743 // A failure is returned if no block is available.
1744 Object* Allocate();
1745
1746 private:
1747 // Available bytes on the free list.
1748 int available_;
1749
1750 // The head of the free list.
1751 Address head_;
1752
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001753 // The tail of the free list.
1754 Address tail_;
1755
Steve Blocka7e24c12009-10-30 11:49:00 +00001756 // The identity of the owning space, for building allocation Failure
1757 // objects.
1758 AllocationSpace owner_;
1759
1760 // The size of the objects in this space.
1761 int object_size_;
1762
1763 DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
1764};
1765
1766
1767// -----------------------------------------------------------------------------
1768// Old object space (excluding map objects)
1769
1770class OldSpace : public PagedSpace {
1771 public:
1772 // Creates an old space object with a given maximum capacity.
1773 // The constructor does not allocate pages from OS.
1774 explicit OldSpace(int max_capacity,
1775 AllocationSpace id,
1776 Executability executable)
1777 : PagedSpace(max_capacity, id, executable), free_list_(id) {
1778 page_extra_ = 0;
1779 }
1780
1781 // The bytes available on the free list (ie, not above the linear allocation
1782 // pointer).
1783 int AvailableFree() { return free_list_.available(); }
1784
Steve Block6ded16b2010-05-10 14:33:55 +01001785 // The limit of allocation for a page in this space.
1786 virtual Address PageAllocationLimit(Page* page) {
1787 return page->ObjectAreaEnd();
Steve Blocka7e24c12009-10-30 11:49:00 +00001788 }
1789
1790 // Give a block of memory to the space's free list. It might be added to
1791 // the free list or accounted as waste.
Steve Block6ded16b2010-05-10 14:33:55 +01001792 // If add_to_freelist is false then just accounting stats are updated and
1793 // no attempt to add area to free list is made.
1794 void Free(Address start, int size_in_bytes, bool add_to_freelist) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001795 accounting_stats_.DeallocateBytes(size_in_bytes);
Steve Block6ded16b2010-05-10 14:33:55 +01001796
1797 if (add_to_freelist) {
1798 int wasted_bytes = free_list_.Free(start, size_in_bytes);
1799 accounting_stats_.WasteBytes(wasted_bytes);
1800 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001801 }
1802
1803 // Prepare for full garbage collection. Resets the relocation pointer and
1804 // clears the free list.
1805 virtual void PrepareForMarkCompact(bool will_compact);
1806
1807 // Updates the allocation pointer to the relocation top after a mark-compact
1808 // collection.
1809 virtual void MCCommitRelocationInfo();
1810
Leon Clarkee46be812010-01-19 14:06:41 +00001811 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1812
Steve Blocka7e24c12009-10-30 11:49:00 +00001813#ifdef DEBUG
1814 // Reports statistics for the space
1815 void ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00001816#endif
1817
1818 protected:
1819 // Virtual function in the superclass. Slow path of AllocateRaw.
1820 HeapObject* SlowAllocateRaw(int size_in_bytes);
1821
1822 // Virtual function in the superclass. Allocate linearly at the start of
1823 // the page after current_page (there is assumed to be one).
1824 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1825
1826 private:
1827 // The space's free list.
1828 OldSpaceFreeList free_list_;
1829
1830 public:
1831 TRACK_MEMORY("OldSpace")
1832};
1833
1834
1835// -----------------------------------------------------------------------------
1836// Old space for objects of a fixed size
1837
1838class FixedSpace : public PagedSpace {
1839 public:
1840 FixedSpace(int max_capacity,
1841 AllocationSpace id,
1842 int object_size_in_bytes,
1843 const char* name)
1844 : PagedSpace(max_capacity, id, NOT_EXECUTABLE),
1845 object_size_in_bytes_(object_size_in_bytes),
1846 name_(name),
1847 free_list_(id, object_size_in_bytes) {
1848 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
1849 }
1850
Steve Block6ded16b2010-05-10 14:33:55 +01001851 // The limit of allocation for a page in this space.
1852 virtual Address PageAllocationLimit(Page* page) {
1853 return page->ObjectAreaEnd() - page_extra_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001854 }
1855
1856 int object_size_in_bytes() { return object_size_in_bytes_; }
1857
1858 // Give a fixed sized block of memory to the space's free list.
Steve Block6ded16b2010-05-10 14:33:55 +01001859 // If add_to_freelist is false then just accounting stats are updated and
1860 // no attempt to add area to free list is made.
1861 void Free(Address start, bool add_to_freelist) {
1862 if (add_to_freelist) {
1863 free_list_.Free(start);
1864 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001865 accounting_stats_.DeallocateBytes(object_size_in_bytes_);
1866 }
1867
1868 // Prepares for a mark-compact GC.
1869 virtual void PrepareForMarkCompact(bool will_compact);
1870
1871 // Updates the allocation pointer to the relocation top after a mark-compact
1872 // collection.
1873 virtual void MCCommitRelocationInfo();
1874
Leon Clarkee46be812010-01-19 14:06:41 +00001875 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1876
Steve Blocka7e24c12009-10-30 11:49:00 +00001877#ifdef DEBUG
1878 // Reports statistic info of the space
1879 void ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00001880#endif
1881
1882 protected:
1883 // Virtual function in the superclass. Slow path of AllocateRaw.
1884 HeapObject* SlowAllocateRaw(int size_in_bytes);
1885
1886 // Virtual function in the superclass. Allocate linearly at the start of
1887 // the page after current_page (there is assumed to be one).
1888 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1889
Leon Clarkee46be812010-01-19 14:06:41 +00001890 void ResetFreeList() {
1891 free_list_.Reset();
1892 }
1893
Steve Blocka7e24c12009-10-30 11:49:00 +00001894 private:
1895 // The size of objects in this space.
1896 int object_size_in_bytes_;
1897
1898 // The name of this space.
1899 const char* name_;
1900
1901 // The space's free list.
1902 FixedSizeFreeList free_list_;
1903};
1904
1905
1906// -----------------------------------------------------------------------------
1907// Old space for all map objects
1908
1909class MapSpace : public FixedSpace {
1910 public:
1911 // Creates a map space object with a maximum capacity.
Leon Clarked91b9f72010-01-27 17:25:45 +00001912 MapSpace(int max_capacity, int max_map_space_pages, AllocationSpace id)
1913 : FixedSpace(max_capacity, id, Map::kSize, "map"),
1914 max_map_space_pages_(max_map_space_pages) {
1915 ASSERT(max_map_space_pages < kMaxMapPageIndex);
1916 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001917
1918 // Prepares for a mark-compact GC.
1919 virtual void PrepareForMarkCompact(bool will_compact);
1920
1921 // Given an index, returns the page address.
1922 Address PageAddress(int page_index) { return page_addresses_[page_index]; }
1923
Leon Clarked91b9f72010-01-27 17:25:45 +00001924 static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
Steve Blocka7e24c12009-10-30 11:49:00 +00001925
Leon Clarkee46be812010-01-19 14:06:41 +00001926 // Are map pointers encodable into map word?
1927 bool MapPointersEncodable() {
1928 if (!FLAG_use_big_map_space) {
Leon Clarked91b9f72010-01-27 17:25:45 +00001929 ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
Leon Clarkee46be812010-01-19 14:06:41 +00001930 return true;
1931 }
Leon Clarked91b9f72010-01-27 17:25:45 +00001932 return CountPagesToTop() <= max_map_space_pages_;
Leon Clarkee46be812010-01-19 14:06:41 +00001933 }
1934
1935 // Should be called after forced sweep to find out if map space needs
1936 // compaction.
1937 bool NeedsCompaction(int live_maps) {
Leon Clarked91b9f72010-01-27 17:25:45 +00001938 return !MapPointersEncodable() && live_maps <= CompactionThreshold();
Leon Clarkee46be812010-01-19 14:06:41 +00001939 }
1940
1941 Address TopAfterCompaction(int live_maps) {
1942 ASSERT(NeedsCompaction(live_maps));
1943
1944 int pages_left = live_maps / kMapsPerPage;
1945 PageIterator it(this, PageIterator::ALL_PAGES);
1946 while (pages_left-- > 0) {
1947 ASSERT(it.has_next());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001948 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
Leon Clarkee46be812010-01-19 14:06:41 +00001949 }
1950 ASSERT(it.has_next());
1951 Page* top_page = it.next();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001952 top_page->SetRegionMarks(Page::kAllRegionsCleanMarks);
Leon Clarkee46be812010-01-19 14:06:41 +00001953 ASSERT(top_page->is_valid());
1954
1955 int offset = live_maps % kMapsPerPage * Map::kSize;
1956 Address top = top_page->ObjectAreaStart() + offset;
1957 ASSERT(top < top_page->ObjectAreaEnd());
1958 ASSERT(Contains(top));
1959
1960 return top;
1961 }
1962
1963 void FinishCompaction(Address new_top, int live_maps) {
1964 Page* top_page = Page::FromAddress(new_top);
1965 ASSERT(top_page->is_valid());
1966
1967 SetAllocationInfo(&allocation_info_, top_page);
1968 allocation_info_.top = new_top;
1969
1970 int new_size = live_maps * Map::kSize;
1971 accounting_stats_.DeallocateBytes(accounting_stats_.Size());
1972 accounting_stats_.AllocateBytes(new_size);
1973
1974#ifdef DEBUG
1975 if (FLAG_enable_slow_asserts) {
Leon Clarked91b9f72010-01-27 17:25:45 +00001976 intptr_t actual_size = 0;
Leon Clarkee46be812010-01-19 14:06:41 +00001977 for (Page* p = first_page_; p != top_page; p = p->next_page())
1978 actual_size += kMapsPerPage * Map::kSize;
1979 actual_size += (new_top - top_page->ObjectAreaStart());
1980 ASSERT(accounting_stats_.Size() == actual_size);
1981 }
1982#endif
1983
1984 Shrink();
1985 ResetFreeList();
1986 }
1987
Steve Blocka7e24c12009-10-30 11:49:00 +00001988 protected:
1989#ifdef DEBUG
1990 virtual void VerifyObject(HeapObject* obj);
1991#endif
1992
1993 private:
Leon Clarkee46be812010-01-19 14:06:41 +00001994 static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
1995
1996 // Do map space compaction if there is a page gap.
Leon Clarked91b9f72010-01-27 17:25:45 +00001997 int CompactionThreshold() {
1998 return kMapsPerPage * (max_map_space_pages_ - 1);
1999 }
2000
2001 const int max_map_space_pages_;
Leon Clarkee46be812010-01-19 14:06:41 +00002002
Steve Blocka7e24c12009-10-30 11:49:00 +00002003 // An array of page start address in a map space.
Leon Clarked91b9f72010-01-27 17:25:45 +00002004 Address page_addresses_[kMaxMapPageIndex];
Steve Blocka7e24c12009-10-30 11:49:00 +00002005
2006 public:
2007 TRACK_MEMORY("MapSpace")
2008};
2009
2010
2011// -----------------------------------------------------------------------------
2012// Old space for all global object property cell objects
2013
2014class CellSpace : public FixedSpace {
2015 public:
2016 // Creates a property cell space object with a maximum capacity.
2017 CellSpace(int max_capacity, AllocationSpace id)
2018 : FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {}
2019
2020 protected:
2021#ifdef DEBUG
2022 virtual void VerifyObject(HeapObject* obj);
2023#endif
2024
2025 public:
2026 TRACK_MEMORY("CellSpace")
2027};
2028
2029
2030// -----------------------------------------------------------------------------
2031// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2032// the large object space. A large object is allocated from OS heap with
2033// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2034// A large object always starts at Page::kObjectStartOffset to a page.
2035// Large objects do not move during garbage collections.
2036
2037// A LargeObjectChunk holds exactly one large object page with exactly one
2038// large object.
2039class LargeObjectChunk {
2040 public:
2041 // Allocates a new LargeObjectChunk that contains a large object page
2042 // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002043 // object) bytes after the object area start of that page.
2044 // The allocated chunk size is set in the output parameter chunk_size.
Steve Blocka7e24c12009-10-30 11:49:00 +00002045 static LargeObjectChunk* New(int size_in_bytes,
2046 size_t* chunk_size,
2047 Executability executable);
2048
2049 // Interpret a raw address as a large object chunk.
2050 static LargeObjectChunk* FromAddress(Address address) {
2051 return reinterpret_cast<LargeObjectChunk*>(address);
2052 }
2053
2054 // Returns the address of this chunk.
2055 Address address() { return reinterpret_cast<Address>(this); }
2056
2057 // Accessors for the fields of the chunk.
2058 LargeObjectChunk* next() { return next_; }
2059 void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
2060
2061 size_t size() { return size_; }
2062 void set_size(size_t size_in_bytes) { size_ = size_in_bytes; }
2063
2064 // Returns the object in this chunk.
2065 inline HeapObject* GetObject();
2066
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002067 // Given a requested size returns the physical size of a chunk to be
2068 // allocated.
Steve Blocka7e24c12009-10-30 11:49:00 +00002069 static int ChunkSizeFor(int size_in_bytes);
2070
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002071 // Given a chunk size, returns the object size it can accommodate. Used by
2072 // LargeObjectSpace::Available.
Steve Blocka7e24c12009-10-30 11:49:00 +00002073 static int ObjectSizeFor(int chunk_size) {
2074 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2075 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2076 }
2077
2078 private:
2079 // A pointer to the next large object chunk in the space or NULL.
2080 LargeObjectChunk* next_;
2081
2082 // The size of this chunk.
2083 size_t size_;
2084
2085 public:
2086 TRACK_MEMORY("LargeObjectChunk")
2087};
2088
2089
2090class LargeObjectSpace : public Space {
2091 public:
2092 explicit LargeObjectSpace(AllocationSpace id);
2093 virtual ~LargeObjectSpace() {}
2094
2095 // Initializes internal data structures.
2096 bool Setup();
2097
2098 // Releases internal resources, frees objects in this space.
2099 void TearDown();
2100
2101 // Allocates a (non-FixedArray, non-Code) large object.
2102 Object* AllocateRaw(int size_in_bytes);
2103 // Allocates a large Code object.
2104 Object* AllocateRawCode(int size_in_bytes);
2105 // Allocates a large FixedArray.
2106 Object* AllocateRawFixedArray(int size_in_bytes);
2107
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002108 // Available bytes for objects in this space.
Steve Blocka7e24c12009-10-30 11:49:00 +00002109 int Available() {
2110 return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
2111 }
2112
2113 virtual int Size() {
2114 return size_;
2115 }
2116
2117 int PageCount() {
2118 return page_count_;
2119 }
2120
2121 // Finds an object for a given address, returns Failure::Exception()
2122 // if it is not found. The function iterates through all objects in this
2123 // space, may be slow.
2124 Object* FindObject(Address a);
2125
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002126 // Iterates objects covered by dirty regions.
2127 void IterateDirtyRegions(ObjectSlotCallback func);
Steve Blocka7e24c12009-10-30 11:49:00 +00002128
2129 // Frees unmarked objects.
2130 void FreeUnmarkedObjects();
2131
2132 // Checks whether a heap object is in this space; O(1).
2133 bool Contains(HeapObject* obj);
2134
2135 // Checks whether the space is empty.
2136 bool IsEmpty() { return first_chunk_ == NULL; }
2137
Leon Clarkee46be812010-01-19 14:06:41 +00002138 // See the comments for ReserveSpace in the Space class. This has to be
2139 // called after ReserveSpace has been called on the paged spaces, since they
2140 // may use some memory, leaving less for large objects.
2141 virtual bool ReserveSpace(int bytes);
2142
Steve Blocka7e24c12009-10-30 11:49:00 +00002143#ifdef ENABLE_HEAP_PROTECTION
2144 // Protect/unprotect the space by marking it read-only/writable.
2145 void Protect();
2146 void Unprotect();
2147#endif
2148
2149#ifdef DEBUG
2150 virtual void Verify();
2151 virtual void Print();
2152 void ReportStatistics();
2153 void CollectCodeStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00002154#endif
2155 // Checks whether an address is in the object area in this space. It
2156 // iterates all objects in the space. May be slow.
2157 bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
2158
2159 private:
2160 // The head of the linked list of large object chunks.
2161 LargeObjectChunk* first_chunk_;
2162 int size_; // allocated bytes
2163 int page_count_; // number of chunks
2164
2165
2166 // Shared implementation of AllocateRaw, AllocateRawCode and
2167 // AllocateRawFixedArray.
2168 Object* AllocateRawInternal(int requested_size,
2169 int object_size,
2170 Executability executable);
2171
Steve Blocka7e24c12009-10-30 11:49:00 +00002172 friend class LargeObjectIterator;
2173
2174 public:
2175 TRACK_MEMORY("LargeObjectSpace")
2176};
2177
2178
2179class LargeObjectIterator: public ObjectIterator {
2180 public:
2181 explicit LargeObjectIterator(LargeObjectSpace* space);
2182 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
2183
Steve Blocka7e24c12009-10-30 11:49:00 +00002184 HeapObject* next();
2185
2186 // implementation of ObjectIterator.
Steve Blocka7e24c12009-10-30 11:49:00 +00002187 virtual HeapObject* next_object() { return next(); }
2188
2189 private:
2190 LargeObjectChunk* current_;
2191 HeapObjectCallback size_func_;
2192};
2193
2194
2195} } // namespace v8::internal
2196
2197#endif // V8_SPACES_H_