blob: 04e0c79f0957d3375b5b2e8730836ea6432a2217 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_SPACES_H_
29#define V8_SPACES_H_
30
31#include "list-inl.h"
32#include "log.h"
33
34namespace v8 {
35namespace internal {
36
37// -----------------------------------------------------------------------------
38// Heap structures:
39//
40// A JS heap consists of a young generation, an old generation, and a large
41// object space. The young generation is divided into two semispaces. A
42// scavenger implements Cheney's copying algorithm. The old generation is
43// separated into a map space and an old object space. The map space contains
44// all (and only) map objects, the rest of old objects go into the old space.
45// The old generation is collected by a mark-sweep-compact collector.
46//
47// The semispaces of the young generation are contiguous. The old and map
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010048// spaces consists of a list of pages. A page has a page header and an object
49// area. A page size is deliberately chosen as 8K bytes.
50// The first word of a page is an opaque page header that has the
Steve Blocka7e24c12009-10-30 11:49:00 +000051// address of the next page and its ownership information. The second word may
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010052// have the allocation top address of this page. Heap objects are aligned to the
53// pointer size.
Steve Blocka7e24c12009-10-30 11:49:00 +000054//
55// There is a separate large object space for objects larger than
56// Page::kMaxHeapObjectSize, so that they do not have to move during
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010057// collection. The large object space is paged. Pages in large object space
58// may be larger than 8K.
Steve Blocka7e24c12009-10-30 11:49:00 +000059//
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010060// A card marking write barrier is used to keep track of intergenerational
61// references. Old space pages are divided into regions of Page::kRegionSize
62// size. Each region has a corresponding dirty bit in the page header which is
63// set if the region might contain pointers to new space. For details about
64// dirty bits encoding see comments in the Page::GetRegionNumberForAddress()
65// method body.
66//
67// During scavenges and mark-sweep collections we iterate intergenerational
68// pointers without decoding heap object maps so if the page belongs to old
69// pointer space or large object space it is essential to guarantee that
70// the page does not contain any garbage pointers to new space: every pointer
71// aligned word which satisfies the Heap::InNewSpace() predicate must be a
72// pointer to a live heap object in new space. Thus objects in old pointer
73// and large object spaces should have a special layout (e.g. no bare integer
74// fields). This requirement does not apply to map space which is iterated in
75// a special fashion. However we still require pointer fields of dead maps to
76// be cleaned.
77//
78// To enable lazy cleaning of old space pages we use a notion of allocation
79// watermark. Every pointer under watermark is considered to be well formed.
80// Page allocation watermark is not necessarily equal to page allocation top but
81// all alive objects on page should reside under allocation watermark.
82// During scavenge allocation watermark might be bumped and invalid pointers
83// might appear below it. To avoid following them we store a valid watermark
84// into special field in the page header and set a page WATERMARK_INVALIDATED
85// flag. For details see comments in the Page::SetAllocationWatermark() method
86// body.
87//
Steve Blocka7e24c12009-10-30 11:49:00 +000088
89// Some assertion macros used in the debugging mode.
90
Leon Clarkee46be812010-01-19 14:06:41 +000091#define ASSERT_PAGE_ALIGNED(address) \
Steve Blocka7e24c12009-10-30 11:49:00 +000092 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
93
Leon Clarkee46be812010-01-19 14:06:41 +000094#define ASSERT_OBJECT_ALIGNED(address) \
Steve Blocka7e24c12009-10-30 11:49:00 +000095 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
96
Leon Clarkee46be812010-01-19 14:06:41 +000097#define ASSERT_MAP_ALIGNED(address) \
98 ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
99
100#define ASSERT_OBJECT_SIZE(size) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000101 ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
102
Leon Clarkee46be812010-01-19 14:06:41 +0000103#define ASSERT_PAGE_OFFSET(offset) \
104 ASSERT((Page::kObjectStartOffset <= offset) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000105 && (offset <= Page::kPageSize))
106
Leon Clarkee46be812010-01-19 14:06:41 +0000107#define ASSERT_MAP_PAGE_INDEX(index) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000108 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
109
110
111class PagedSpace;
112class MemoryAllocator;
113class AllocationInfo;
114
115// -----------------------------------------------------------------------------
116// A page normally has 8K bytes. Large object pages may be larger. A page
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100117// address is always aligned to the 8K page size.
Steve Blocka7e24c12009-10-30 11:49:00 +0000118//
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100119// Each page starts with a header of Page::kPageHeaderSize size which contains
120// bookkeeping data.
Steve Blocka7e24c12009-10-30 11:49:00 +0000121//
122// The mark-compact collector transforms a map pointer into a page index and a
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100123// page offset. The exact encoding is described in the comments for
Leon Clarkee46be812010-01-19 14:06:41 +0000124// class MapWord in objects.h.
Steve Blocka7e24c12009-10-30 11:49:00 +0000125//
126// The only way to get a page pointer is by calling factory methods:
127// Page* p = Page::FromAddress(addr); or
128// Page* p = Page::FromAllocationTop(top);
129class Page {
130 public:
131 // Returns the page containing a given address. The address ranges
132 // from [page_addr .. page_addr + kPageSize[
133 //
134 // Note that this function only works for addresses in normal paged
135 // spaces and addresses in the first 8K of large object pages (i.e.,
136 // the start of large objects but not necessarily derived pointers
137 // within them).
138 INLINE(static Page* FromAddress(Address a)) {
139 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
140 }
141
142 // Returns the page containing an allocation top. Because an allocation
143 // top address can be the upper bound of the page, we need to subtract
144 // it with kPointerSize first. The address ranges from
145 // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
146 INLINE(static Page* FromAllocationTop(Address top)) {
147 Page* p = FromAddress(top - kPointerSize);
148 ASSERT_PAGE_OFFSET(p->Offset(top));
149 return p;
150 }
151
152 // Returns the start address of this page.
153 Address address() { return reinterpret_cast<Address>(this); }
154
155 // Checks whether this is a valid page address.
156 bool is_valid() { return address() != NULL; }
157
158 // Returns the next page of this page.
159 inline Page* next_page();
160
161 // Return the end of allocation in this page. Undefined for unused pages.
162 inline Address AllocationTop();
163
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100164 // Return the allocation watermark for the page.
165 // For old space pages it is guaranteed that the area under the watermark
166 // does not contain any garbage pointers to new space.
167 inline Address AllocationWatermark();
168
169 // Return the allocation watermark offset from the beginning of the page.
170 inline uint32_t AllocationWatermarkOffset();
171
172 inline void SetAllocationWatermark(Address allocation_watermark);
173
174 inline void SetCachedAllocationWatermark(Address allocation_watermark);
175 inline Address CachedAllocationWatermark();
176
Steve Blocka7e24c12009-10-30 11:49:00 +0000177 // Returns the start address of the object area in this page.
178 Address ObjectAreaStart() { return address() + kObjectStartOffset; }
179
180 // Returns the end address (exclusive) of the object area in this page.
181 Address ObjectAreaEnd() { return address() + Page::kPageSize; }
182
Steve Blocka7e24c12009-10-30 11:49:00 +0000183 // Checks whether an address is page aligned.
184 static bool IsAlignedToPageSize(Address a) {
185 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
186 }
187
Steve Block6ded16b2010-05-10 14:33:55 +0100188 // True if this page was in use before current compaction started.
189 // Result is valid only for pages owned by paged spaces and
190 // only after PagedSpace::PrepareForMarkCompact was called.
191 inline bool WasInUseBeforeMC();
192
193 inline void SetWasInUseBeforeMC(bool was_in_use);
194
Steve Blocka7e24c12009-10-30 11:49:00 +0000195 // True if this page is a large object page.
Steve Block6ded16b2010-05-10 14:33:55 +0100196 inline bool IsLargeObjectPage();
197
198 inline void SetIsLargeObjectPage(bool is_large_object_page);
Steve Blocka7e24c12009-10-30 11:49:00 +0000199
Steve Block791712a2010-08-27 10:21:07 +0100200 inline bool IsPageExecutable();
201
202 inline void SetIsPageExecutable(bool is_page_executable);
203
Steve Blocka7e24c12009-10-30 11:49:00 +0000204 // Returns the offset of a given address to this page.
205 INLINE(int Offset(Address a)) {
Steve Blockd0582a62009-12-15 09:54:21 +0000206 int offset = static_cast<int>(a - address());
Steve Blocka7e24c12009-10-30 11:49:00 +0000207 ASSERT_PAGE_OFFSET(offset);
208 return offset;
209 }
210
211 // Returns the address for a given offset to the this page.
212 Address OffsetToAddress(int offset) {
213 ASSERT_PAGE_OFFSET(offset);
214 return address() + offset;
215 }
216
217 // ---------------------------------------------------------------------
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100218 // Card marking support
Steve Blocka7e24c12009-10-30 11:49:00 +0000219
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100220 static const uint32_t kAllRegionsCleanMarks = 0x0;
221 static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF;
Steve Blocka7e24c12009-10-30 11:49:00 +0000222
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100223 inline uint32_t GetRegionMarks();
224 inline void SetRegionMarks(uint32_t dirty);
Steve Blocka7e24c12009-10-30 11:49:00 +0000225
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100226 inline uint32_t GetRegionMaskForAddress(Address addr);
227 inline uint32_t GetRegionMaskForSpan(Address start, int length_in_bytes);
228 inline int GetRegionNumberForAddress(Address addr);
Steve Blocka7e24c12009-10-30 11:49:00 +0000229
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100230 inline void MarkRegionDirty(Address addr);
231 inline bool IsRegionDirty(Address addr);
Steve Blocka7e24c12009-10-30 11:49:00 +0000232
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100233 inline void ClearRegionMarks(Address start,
234 Address end,
235 bool reaches_limit);
Steve Blocka7e24c12009-10-30 11:49:00 +0000236
Steve Blocka7e24c12009-10-30 11:49:00 +0000237 // Page size in bytes. This must be a multiple of the OS page size.
238 static const int kPageSize = 1 << kPageSizeBits;
239
240 // Page size mask.
241 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
242
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100243 static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
244 kIntSize + kPointerSize;
Steve Blocka7e24c12009-10-30 11:49:00 +0000245
246 // The start offset of the object area in a page.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100247 static const int kObjectStartOffset = MAP_POINTER_ALIGN(kPageHeaderSize);
Steve Blocka7e24c12009-10-30 11:49:00 +0000248
249 // Object area size in bytes.
250 static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
251
252 // Maximum object size that fits in a page.
253 static const int kMaxHeapObjectSize = kObjectAreaSize;
254
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100255 static const int kDirtyFlagOffset = 2 * kPointerSize;
256 static const int kRegionSizeLog2 = 8;
257 static const int kRegionSize = 1 << kRegionSizeLog2;
258 static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
259
260 STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
261
Steve Block6ded16b2010-05-10 14:33:55 +0100262 enum PageFlag {
Steve Block791712a2010-08-27 10:21:07 +0100263 IS_NORMAL_PAGE = 0,
264 WAS_IN_USE_BEFORE_MC,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100265
266 // Page allocation watermark was bumped by preallocation during scavenge.
267 // Correct watermark can be retrieved by CachedAllocationWatermark() method
Steve Block791712a2010-08-27 10:21:07 +0100268 WATERMARK_INVALIDATED,
269 IS_EXECUTABLE,
270 NUM_PAGE_FLAGS // Must be last
Steve Block6ded16b2010-05-10 14:33:55 +0100271 };
Steve Block791712a2010-08-27 10:21:07 +0100272 static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1;
Steve Block6ded16b2010-05-10 14:33:55 +0100273
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100274 // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
275 // scavenge we just invalidate the watermark on each old space page after
276 // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
277 // flag at the beginning of the next scavenge and each page becomes marked as
278 // having a valid watermark.
279 //
280 // The following invariant must hold for pages in old pointer and map spaces:
281 // If page is in use then page is marked as having invalid watermark at
282 // the beginning and at the end of any GC.
283 //
284 // This invariant guarantees that after flipping flag meaning at the
285 // beginning of scavenge all pages in use will be marked as having valid
286 // watermark.
287 static inline void FlipMeaningOfInvalidatedWatermarkFlag();
288
289 // Returns true if the page allocation watermark was not altered during
290 // scavenge.
291 inline bool IsWatermarkValid();
292
293 inline void InvalidateWatermark(bool value);
294
Steve Block6ded16b2010-05-10 14:33:55 +0100295 inline bool GetPageFlag(PageFlag flag);
296 inline void SetPageFlag(PageFlag flag, bool value);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100297 inline void ClearPageFlags();
298
299 inline void ClearGCFields();
300
Steve Block791712a2010-08-27 10:21:07 +0100301 static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100302 static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1;
303 static const uint32_t kAllocationWatermarkOffsetMask =
304 ((1 << kAllocationWatermarkOffsetBits) - 1) <<
305 kAllocationWatermarkOffsetShift;
306
307 static const uint32_t kFlagsMask =
308 ((1 << kAllocationWatermarkOffsetShift) - 1);
309
310 STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
311 kAllocationWatermarkOffsetBits);
312
313 // This field contains the meaning of the WATERMARK_INVALIDATED flag.
314 // Instead of clearing this flag from all pages we just flip
315 // its meaning at the beginning of a scavenge.
316 static intptr_t watermark_invalidated_mark_;
Steve Block6ded16b2010-05-10 14:33:55 +0100317
Steve Blocka7e24c12009-10-30 11:49:00 +0000318 //---------------------------------------------------------------------------
319 // Page header description.
320 //
321 // If a page is not in the large object space, the first word,
322 // opaque_header, encodes the next page address (aligned to kPageSize 8K)
323 // and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use
324 // opaque_header. The value range of the opaque_header is [0..kPageSize[,
325 // or [next_page_start, next_page_end[. It cannot point to a valid address
326 // in the current page. If a page is in the large object space, the first
327 // word *may* (if the page start and large object chunk start are the
328 // same) contain the address of the next large object chunk.
329 intptr_t opaque_header;
330
331 // If the page is not in the large object space, the low-order bit of the
332 // second word is set. If the page is in the large object space, the
333 // second word *may* (if the page start and large object chunk start are
334 // the same) contain the large object chunk size. In either case, the
335 // low-order bit for large object pages will be cleared.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100336 // For normal pages this word is used to store page flags and
337 // offset of allocation top.
338 intptr_t flags_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000339
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100340 // This field contains dirty marks for regions covering the page. Only dirty
341 // regions might contain intergenerational references.
342 // Only 32 dirty marks are supported so for large object pages several regions
343 // might be mapped to a single dirty mark.
344 uint32_t dirty_regions_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000345
346 // The index of the page in its owner space.
347 int mc_page_index;
348
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100349 // During mark-compact collections this field contains the forwarding address
350 // of the first live object in this page.
351 // During scavenge collection this field is used to store allocation watermark
352 // if it is altered during scavenge.
Steve Blocka7e24c12009-10-30 11:49:00 +0000353 Address mc_first_forwarded;
Steve Blocka7e24c12009-10-30 11:49:00 +0000354};
355
356
357// ----------------------------------------------------------------------------
358// Space is the abstract superclass for all allocation spaces.
359class Space : public Malloced {
360 public:
361 Space(AllocationSpace id, Executability executable)
362 : id_(id), executable_(executable) {}
363
364 virtual ~Space() {}
365
366 // Does the space need executable memory?
367 Executability executable() { return executable_; }
368
369 // Identity used in error reporting.
370 AllocationSpace identity() { return id_; }
371
372 virtual int Size() = 0;
373
Steve Block6ded16b2010-05-10 14:33:55 +0100374#ifdef ENABLE_HEAP_PROTECTION
375 // Protect/unprotect the space by marking it read-only/writable.
376 virtual void Protect() = 0;
377 virtual void Unprotect() = 0;
378#endif
379
Steve Blocka7e24c12009-10-30 11:49:00 +0000380#ifdef DEBUG
381 virtual void Print() = 0;
382#endif
383
Leon Clarkee46be812010-01-19 14:06:41 +0000384 // After calling this we can allocate a certain number of bytes using only
385 // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
386 // without using freelists or causing a GC. This is used by partial
387 // snapshots. It returns true of space was reserved or false if a GC is
388 // needed. For paged spaces the space requested must include the space wasted
389 // at the end of each when allocating linearly.
390 virtual bool ReserveSpace(int bytes) = 0;
391
Steve Blocka7e24c12009-10-30 11:49:00 +0000392 private:
393 AllocationSpace id_;
394 Executability executable_;
395};
396
397
398// ----------------------------------------------------------------------------
399// All heap objects containing executable code (code objects) must be allocated
400// from a 2 GB range of memory, so that they can call each other using 32-bit
401// displacements. This happens automatically on 32-bit platforms, where 32-bit
402// displacements cover the entire 4GB virtual address space. On 64-bit
403// platforms, we support this using the CodeRange object, which reserves and
404// manages a range of virtual memory.
405class CodeRange : public AllStatic {
406 public:
407 // Reserves a range of virtual memory, but does not commit any of it.
408 // Can only be called once, at heap initialization time.
409 // Returns false on failure.
410 static bool Setup(const size_t requested_size);
411
412 // Frees the range of virtual memory, and frees the data structures used to
413 // manage it.
414 static void TearDown();
415
416 static bool exists() { return code_range_ != NULL; }
417 static bool contains(Address address) {
418 if (code_range_ == NULL) return false;
419 Address start = static_cast<Address>(code_range_->address());
420 return start <= address && address < start + code_range_->size();
421 }
422
423 // Allocates a chunk of memory from the large-object portion of
424 // the code range. On platforms with no separate code range, should
425 // not be called.
426 static void* AllocateRawMemory(const size_t requested, size_t* allocated);
427 static void FreeRawMemory(void* buf, size_t length);
428
429 private:
430 // The reserved range of virtual memory that all code objects are put in.
431 static VirtualMemory* code_range_;
432 // Plain old data class, just a struct plus a constructor.
433 class FreeBlock {
434 public:
435 FreeBlock(Address start_arg, size_t size_arg)
436 : start(start_arg), size(size_arg) {}
437 FreeBlock(void* start_arg, size_t size_arg)
438 : start(static_cast<Address>(start_arg)), size(size_arg) {}
439
440 Address start;
441 size_t size;
442 };
443
444 // Freed blocks of memory are added to the free list. When the allocation
445 // list is exhausted, the free list is sorted and merged to make the new
446 // allocation list.
447 static List<FreeBlock> free_list_;
448 // Memory is allocated from the free blocks on the allocation list.
449 // The block at current_allocation_block_index_ is the current block.
450 static List<FreeBlock> allocation_list_;
451 static int current_allocation_block_index_;
452
453 // Finds a block on the allocation list that contains at least the
454 // requested amount of memory. If none is found, sorts and merges
455 // the existing free memory blocks, and searches again.
456 // If none can be found, terminates V8 with FatalProcessOutOfMemory.
457 static void GetNextAllocationBlock(size_t requested);
458 // Compares the start addresses of two free blocks.
459 static int CompareFreeBlockAddress(const FreeBlock* left,
460 const FreeBlock* right);
461};
462
463
464// ----------------------------------------------------------------------------
465// A space acquires chunks of memory from the operating system. The memory
466// allocator manages chunks for the paged heap spaces (old space and map
467// space). A paged chunk consists of pages. Pages in a chunk have contiguous
468// addresses and are linked as a list.
469//
470// The allocator keeps an initial chunk which is used for the new space. The
471// leftover regions of the initial chunk are used for the initial chunks of
472// old space and map space if they are big enough to hold at least one page.
473// The allocator assumes that there is one old space and one map space, each
474// expands the space by allocating kPagesPerChunk pages except the last
475// expansion (before running out of space). The first chunk may contain fewer
476// than kPagesPerChunk pages as well.
477//
478// The memory allocator also allocates chunks for the large object space, but
479// they are managed by the space itself. The new space does not expand.
Steve Block6ded16b2010-05-10 14:33:55 +0100480//
481// The fact that pages for paged spaces are allocated and deallocated in chunks
482// induces a constraint on the order of pages in a linked lists. We say that
483// pages are linked in the chunk-order if and only if every two consecutive
484// pages from the same chunk are consecutive in the linked list.
485//
486
Steve Blocka7e24c12009-10-30 11:49:00 +0000487
488class MemoryAllocator : public AllStatic {
489 public:
490 // Initializes its internal bookkeeping structures.
491 // Max capacity of the total space.
492 static bool Setup(int max_capacity);
493
494 // Deletes valid chunks.
495 static void TearDown();
496
497 // Reserves an initial address range of virtual memory to be split between
498 // the two new space semispaces, the old space, and the map space. The
499 // memory is not yet committed or assigned to spaces and split into pages.
500 // The initial chunk is unmapped when the memory allocator is torn down.
501 // This function should only be called when there is not already a reserved
502 // initial chunk (initial_chunk_ should be NULL). It returns the start
503 // address of the initial chunk if successful, with the side effect of
504 // setting the initial chunk, or else NULL if unsuccessful and leaves the
505 // initial chunk NULL.
506 static void* ReserveInitialChunk(const size_t requested);
507
508 // Commits pages from an as-yet-unmanaged block of virtual memory into a
509 // paged space. The block should be part of the initial chunk reserved via
510 // a call to ReserveInitialChunk. The number of pages is always returned in
511 // the output parameter num_pages. This function assumes that the start
512 // address is non-null and that it is big enough to hold at least one
513 // page-aligned page. The call always succeeds, and num_pages is always
514 // greater than zero.
515 static Page* CommitPages(Address start, size_t size, PagedSpace* owner,
516 int* num_pages);
517
518 // Commit a contiguous block of memory from the initial chunk. Assumes that
519 // the address is not NULL, the size is greater than zero, and that the
520 // block is contained in the initial chunk. Returns true if it succeeded
521 // and false otherwise.
522 static bool CommitBlock(Address start, size_t size, Executability executable);
523
Steve Blocka7e24c12009-10-30 11:49:00 +0000524 // Uncommit a contiguous block of memory [start..(start+size)[.
525 // start is not NULL, the size is greater than zero, and the
526 // block is contained in the initial chunk. Returns true if it succeeded
527 // and false otherwise.
528 static bool UncommitBlock(Address start, size_t size);
529
Leon Clarke4515c472010-02-03 11:58:03 +0000530 // Zaps a contiguous block of memory [start..(start+size)[ thus
531 // filling it up with a recognizable non-NULL bit pattern.
532 static void ZapBlock(Address start, size_t size);
533
Steve Blocka7e24c12009-10-30 11:49:00 +0000534 // Attempts to allocate the requested (non-zero) number of pages from the
535 // OS. Fewer pages might be allocated than requested. If it fails to
536 // allocate memory for the OS or cannot allocate a single page, this
537 // function returns an invalid page pointer (NULL). The caller must check
538 // whether the returned page is valid (by calling Page::is_valid()). It is
539 // guaranteed that allocated pages have contiguous addresses. The actual
540 // number of allocated pages is returned in the output parameter
541 // allocated_pages. If the PagedSpace owner is executable and there is
542 // a code range, the pages are allocated from the code range.
543 static Page* AllocatePages(int requested_pages, int* allocated_pages,
544 PagedSpace* owner);
545
Steve Block6ded16b2010-05-10 14:33:55 +0100546 // Frees pages from a given page and after. Requires pages to be
547 // linked in chunk-order (see comment for class).
548 // If 'p' is the first page of a chunk, pages from 'p' are freed
549 // and this function returns an invalid page pointer.
550 // Otherwise, the function searches a page after 'p' that is
551 // the first page of a chunk. Pages after the found page
552 // are freed and the function returns 'p'.
Steve Blocka7e24c12009-10-30 11:49:00 +0000553 static Page* FreePages(Page* p);
554
Steve Block6ded16b2010-05-10 14:33:55 +0100555 // Frees all pages owned by given space.
556 static void FreeAllPages(PagedSpace* space);
557
Steve Blocka7e24c12009-10-30 11:49:00 +0000558 // Allocates and frees raw memory of certain size.
559 // These are just thin wrappers around OS::Allocate and OS::Free,
560 // but keep track of allocated bytes as part of heap.
561 // If the flag is EXECUTABLE and a code range exists, the requested
562 // memory is allocated from the code range. If a code range exists
563 // and the freed memory is in it, the code range manages the freed memory.
564 static void* AllocateRawMemory(const size_t requested,
565 size_t* allocated,
566 Executability executable);
Steve Block791712a2010-08-27 10:21:07 +0100567 static void FreeRawMemory(void* buf,
568 size_t length,
569 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +0000570
571 // Returns the maximum available bytes of heaps.
572 static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
573
574 // Returns allocated spaces in bytes.
575 static int Size() { return size_; }
576
Steve Block791712a2010-08-27 10:21:07 +0100577 // Returns allocated executable spaces in bytes.
578 static int SizeExecutable() { return size_executable_; }
579
Steve Blocka7e24c12009-10-30 11:49:00 +0000580 // Returns maximum available bytes that the old space can have.
581 static int MaxAvailable() {
582 return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
583 }
584
585 // Links two pages.
586 static inline void SetNextPage(Page* prev, Page* next);
587
588 // Returns the next page of a given page.
589 static inline Page* GetNextPage(Page* p);
590
591 // Checks whether a page belongs to a space.
592 static inline bool IsPageInSpace(Page* p, PagedSpace* space);
593
594 // Returns the space that owns the given page.
595 static inline PagedSpace* PageOwner(Page* page);
596
597 // Finds the first/last page in the same chunk as a given page.
598 static Page* FindFirstPageInSameChunk(Page* p);
599 static Page* FindLastPageInSameChunk(Page* p);
600
Steve Block6ded16b2010-05-10 14:33:55 +0100601 // Relinks list of pages owned by space to make it chunk-ordered.
602 // Returns new first and last pages of space.
603 // Also returns last page in relinked list which has WasInUsedBeforeMC
604 // flag set.
605 static void RelinkPageListInChunkOrder(PagedSpace* space,
606 Page** first_page,
607 Page** last_page,
608 Page** last_page_in_use);
609
Steve Blocka7e24c12009-10-30 11:49:00 +0000610#ifdef ENABLE_HEAP_PROTECTION
611 // Protect/unprotect a block of memory by marking it read-only/writable.
612 static inline void Protect(Address start, size_t size);
613 static inline void Unprotect(Address start, size_t size,
614 Executability executable);
615
616 // Protect/unprotect a chunk given a page in the chunk.
617 static inline void ProtectChunkFromPage(Page* page);
618 static inline void UnprotectChunkFromPage(Page* page);
619#endif
620
621#ifdef DEBUG
622 // Reports statistic info of the space.
623 static void ReportStatistics();
624#endif
625
626 // Due to encoding limitation, we can only have 8K chunks.
Leon Clarkee46be812010-01-19 14:06:41 +0000627 static const int kMaxNofChunks = 1 << kPageSizeBits;
Steve Blocka7e24c12009-10-30 11:49:00 +0000628 // If a chunk has at least 16 pages, the maximum heap size is about
629 // 8K * 8K * 16 = 1G bytes.
630#ifdef V8_TARGET_ARCH_X64
631 static const int kPagesPerChunk = 32;
632#else
633 static const int kPagesPerChunk = 16;
634#endif
635 static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
636
637 private:
638 // Maximum space size in bytes.
639 static int capacity_;
640
641 // Allocated space size in bytes.
642 static int size_;
Steve Block791712a2010-08-27 10:21:07 +0100643 // Allocated executable space size in bytes.
644 static int size_executable_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000645
646 // The initial chunk of virtual memory.
647 static VirtualMemory* initial_chunk_;
648
649 // Allocated chunk info: chunk start address, chunk size, and owning space.
650 class ChunkInfo BASE_EMBEDDED {
651 public:
652 ChunkInfo() : address_(NULL), size_(0), owner_(NULL) {}
653 void init(Address a, size_t s, PagedSpace* o) {
654 address_ = a;
655 size_ = s;
656 owner_ = o;
657 }
658 Address address() { return address_; }
659 size_t size() { return size_; }
660 PagedSpace* owner() { return owner_; }
661
662 private:
663 Address address_;
664 size_t size_;
665 PagedSpace* owner_;
666 };
667
668 // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
669 static List<ChunkInfo> chunks_;
670 static List<int> free_chunk_ids_;
671 static int max_nof_chunks_;
672 static int top_;
673
674 // Push/pop a free chunk id onto/from the stack.
675 static void Push(int free_chunk_id);
676 static int Pop();
677 static bool OutOfChunkIds() { return top_ == 0; }
678
679 // Frees a chunk.
680 static void DeleteChunk(int chunk_id);
681
682 // Basic check whether a chunk id is in the valid range.
683 static inline bool IsValidChunkId(int chunk_id);
684
685 // Checks whether a chunk id identifies an allocated chunk.
686 static inline bool IsValidChunk(int chunk_id);
687
688 // Returns the chunk id that a page belongs to.
689 static inline int GetChunkId(Page* p);
690
691 // True if the address lies in the initial chunk.
692 static inline bool InInitialChunk(Address address);
693
694 // Initializes pages in a chunk. Returns the first page address.
695 // This function and GetChunkId() are provided for the mark-compact
696 // collector to rebuild page headers in the from space, which is
697 // used as a marking stack and its page headers are destroyed.
698 static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
699 PagedSpace* owner);
Steve Block6ded16b2010-05-10 14:33:55 +0100700
701 static Page* RelinkPagesInChunk(int chunk_id,
702 Address chunk_start,
703 size_t chunk_size,
704 Page* prev,
705 Page** last_page_in_use);
Steve Blocka7e24c12009-10-30 11:49:00 +0000706};
707
708
709// -----------------------------------------------------------------------------
710// Interface for heap object iterator to be implemented by all object space
711// object iterators.
712//
Leon Clarked91b9f72010-01-27 17:25:45 +0000713// NOTE: The space specific object iterators also implements the own next()
714// method which is used to avoid using virtual functions
Steve Blocka7e24c12009-10-30 11:49:00 +0000715// iterating a specific space.
716
717class ObjectIterator : public Malloced {
718 public:
719 virtual ~ObjectIterator() { }
720
Steve Blocka7e24c12009-10-30 11:49:00 +0000721 virtual HeapObject* next_object() = 0;
722};
723
724
725// -----------------------------------------------------------------------------
726// Heap object iterator in new/old/map spaces.
727//
728// A HeapObjectIterator iterates objects from a given address to the
729// top of a space. The given address must be below the current
730// allocation pointer (space top). There are some caveats.
731//
732// (1) If the space top changes upward during iteration (because of
733// allocating new objects), the iterator does not iterate objects
734// above the original space top. The caller must create a new
735// iterator starting from the old top in order to visit these new
736// objects.
737//
738// (2) If new objects are allocated below the original allocation top
739// (e.g., free-list allocation in paged spaces), the new objects
740// may or may not be iterated depending on their position with
741// respect to the current point of iteration.
742//
743// (3) The space top should not change downward during iteration,
744// otherwise the iterator will return not-necessarily-valid
745// objects.
746
747class HeapObjectIterator: public ObjectIterator {
748 public:
749 // Creates a new object iterator in a given space. If a start
750 // address is not given, the iterator starts from the space bottom.
751 // If the size function is not given, the iterator calls the default
752 // Object::Size().
753 explicit HeapObjectIterator(PagedSpace* space);
754 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
755 HeapObjectIterator(PagedSpace* space, Address start);
756 HeapObjectIterator(PagedSpace* space,
757 Address start,
758 HeapObjectCallback size_func);
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100759 HeapObjectIterator(Page* page, HeapObjectCallback size_func);
Steve Blocka7e24c12009-10-30 11:49:00 +0000760
Leon Clarked91b9f72010-01-27 17:25:45 +0000761 inline HeapObject* next() {
762 return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
763 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000764
765 // implementation of ObjectIterator.
Steve Blocka7e24c12009-10-30 11:49:00 +0000766 virtual HeapObject* next_object() { return next(); }
767
768 private:
769 Address cur_addr_; // current iteration point
770 Address end_addr_; // end iteration point
771 Address cur_limit_; // current page limit
772 HeapObjectCallback size_func_; // size function
773 Page* end_page_; // caches the page of the end address
774
Leon Clarked91b9f72010-01-27 17:25:45 +0000775 HeapObject* FromCurrentPage() {
776 ASSERT(cur_addr_ < cur_limit_);
777
778 HeapObject* obj = HeapObject::FromAddress(cur_addr_);
779 int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
780 ASSERT_OBJECT_SIZE(obj_size);
781
782 cur_addr_ += obj_size;
783 ASSERT(cur_addr_ <= cur_limit_);
784
785 return obj;
786 }
787
788 // Slow path of next, goes into the next page.
789 HeapObject* FromNextPage();
Steve Blocka7e24c12009-10-30 11:49:00 +0000790
791 // Initializes fields.
792 void Initialize(Address start, Address end, HeapObjectCallback size_func);
793
794#ifdef DEBUG
795 // Verifies whether fields have valid values.
796 void Verify();
797#endif
798};
799
800
801// -----------------------------------------------------------------------------
802// A PageIterator iterates the pages in a paged space.
803//
804// The PageIterator class provides three modes for iterating pages in a space:
805// PAGES_IN_USE iterates pages containing allocated objects.
806// PAGES_USED_BY_MC iterates pages that hold relocated objects during a
807// mark-compact collection.
808// ALL_PAGES iterates all pages in the space.
809//
810// There are some caveats.
811//
812// (1) If the space expands during iteration, new pages will not be
813// returned by the iterator in any mode.
814//
815// (2) If new objects are allocated during iteration, they will appear
816// in pages returned by the iterator. Allocation may cause the
817// allocation pointer or MC allocation pointer in the last page to
818// change between constructing the iterator and iterating the last
819// page.
820//
821// (3) The space should not shrink during iteration, otherwise the
822// iterator will return deallocated pages.
823
824class PageIterator BASE_EMBEDDED {
825 public:
826 enum Mode {
827 PAGES_IN_USE,
828 PAGES_USED_BY_MC,
829 ALL_PAGES
830 };
831
832 PageIterator(PagedSpace* space, Mode mode);
833
834 inline bool has_next();
835 inline Page* next();
836
837 private:
838 PagedSpace* space_;
839 Page* prev_page_; // Previous page returned.
840 Page* stop_page_; // Page to stop at (last page returned by the iterator).
841};
842
843
844// -----------------------------------------------------------------------------
845// A space has a list of pages. The next page can be accessed via
846// Page::next_page() call. The next page of the last page is an
847// invalid page pointer. A space can expand and shrink dynamically.
848
849// An abstraction of allocation and relocation pointers in a page-structured
850// space.
851class AllocationInfo {
852 public:
853 Address top; // current allocation top
854 Address limit; // current allocation limit
855
856#ifdef DEBUG
857 bool VerifyPagedAllocation() {
858 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
859 && (top <= limit);
860 }
861#endif
862};
863
864
865// An abstraction of the accounting statistics of a page-structured space.
866// The 'capacity' of a space is the number of object-area bytes (ie, not
867// including page bookkeeping structures) currently in the space. The 'size'
868// of a space is the number of allocated bytes, the 'waste' in the space is
869// the number of bytes that are not allocated and not available to
870// allocation without reorganizing the space via a GC (eg, small blocks due
871// to internal fragmentation, top of page areas in map space), and the bytes
872// 'available' is the number of unallocated bytes that are not waste. The
873// capacity is the sum of size, waste, and available.
874//
875// The stats are only set by functions that ensure they stay balanced. These
876// functions increase or decrease one of the non-capacity stats in
877// conjunction with capacity, or else they always balance increases and
878// decreases to the non-capacity stats.
879class AllocationStats BASE_EMBEDDED {
880 public:
881 AllocationStats() { Clear(); }
882
883 // Zero out all the allocation statistics (ie, no capacity).
884 void Clear() {
885 capacity_ = 0;
886 available_ = 0;
887 size_ = 0;
888 waste_ = 0;
889 }
890
891 // Reset the allocation statistics (ie, available = capacity with no
892 // wasted or allocated bytes).
893 void Reset() {
894 available_ = capacity_;
895 size_ = 0;
896 waste_ = 0;
897 }
898
899 // Accessors for the allocation statistics.
900 int Capacity() { return capacity_; }
901 int Available() { return available_; }
902 int Size() { return size_; }
903 int Waste() { return waste_; }
904
905 // Grow the space by adding available bytes.
906 void ExpandSpace(int size_in_bytes) {
907 capacity_ += size_in_bytes;
908 available_ += size_in_bytes;
909 }
910
911 // Shrink the space by removing available bytes.
912 void ShrinkSpace(int size_in_bytes) {
913 capacity_ -= size_in_bytes;
914 available_ -= size_in_bytes;
915 }
916
917 // Allocate from available bytes (available -> size).
918 void AllocateBytes(int size_in_bytes) {
919 available_ -= size_in_bytes;
920 size_ += size_in_bytes;
921 }
922
923 // Free allocated bytes, making them available (size -> available).
924 void DeallocateBytes(int size_in_bytes) {
925 size_ -= size_in_bytes;
926 available_ += size_in_bytes;
927 }
928
929 // Waste free bytes (available -> waste).
930 void WasteBytes(int size_in_bytes) {
931 available_ -= size_in_bytes;
932 waste_ += size_in_bytes;
933 }
934
935 // Consider the wasted bytes to be allocated, as they contain filler
936 // objects (waste -> size).
937 void FillWastedBytes(int size_in_bytes) {
938 waste_ -= size_in_bytes;
939 size_ += size_in_bytes;
940 }
941
942 private:
943 int capacity_;
944 int available_;
945 int size_;
946 int waste_;
947};
948
949
950class PagedSpace : public Space {
951 public:
952 // Creates a space with a maximum capacity, and an id.
953 PagedSpace(int max_capacity, AllocationSpace id, Executability executable);
954
955 virtual ~PagedSpace() {}
956
957 // Set up the space using the given address range of virtual memory (from
958 // the memory allocator's initial chunk) if possible. If the block of
959 // addresses is not big enough to contain a single page-aligned page, a
960 // fresh chunk will be allocated.
961 bool Setup(Address start, size_t size);
962
963 // Returns true if the space has been successfully set up and not
964 // subsequently torn down.
965 bool HasBeenSetup();
966
967 // Cleans up the space, frees all pages in this space except those belonging
968 // to the initial chunk, uncommits addresses in the initial chunk.
969 void TearDown();
970
971 // Checks whether an object/address is in this space.
972 inline bool Contains(Address a);
973 bool Contains(HeapObject* o) { return Contains(o->address()); }
974
975 // Given an address occupied by a live object, return that object if it is
976 // in this space, or Failure::Exception() if it is not. The implementation
977 // iterates over objects in the page containing the address, the cost is
978 // linear in the number of objects in the page. It may be slow.
979 Object* FindObject(Address addr);
980
981 // Checks whether page is currently in use by this space.
982 bool IsUsed(Page* page);
983
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100984 void MarkAllPagesClean();
Steve Blocka7e24c12009-10-30 11:49:00 +0000985
986 // Prepares for a mark-compact GC.
Steve Block6ded16b2010-05-10 14:33:55 +0100987 virtual void PrepareForMarkCompact(bool will_compact);
Steve Blocka7e24c12009-10-30 11:49:00 +0000988
Steve Block6ded16b2010-05-10 14:33:55 +0100989 // The top of allocation in a page in this space. Undefined if page is unused.
990 Address PageAllocationTop(Page* page) {
991 return page == TopPageOf(allocation_info_) ? top()
992 : PageAllocationLimit(page);
993 }
994
995 // The limit of allocation for a page in this space.
996 virtual Address PageAllocationLimit(Page* page) = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000997
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100998 void FlushTopPageWatermark() {
999 AllocationTopPage()->SetCachedAllocationWatermark(top());
1000 AllocationTopPage()->InvalidateWatermark(true);
1001 }
1002
Steve Blocka7e24c12009-10-30 11:49:00 +00001003 // Current capacity without growing (Size() + Available() + Waste()).
1004 int Capacity() { return accounting_stats_.Capacity(); }
1005
Steve Block3ce2e202009-11-05 08:53:23 +00001006 // Total amount of memory committed for this space. For paged
1007 // spaces this equals the capacity.
1008 int CommittedMemory() { return Capacity(); }
1009
Steve Blocka7e24c12009-10-30 11:49:00 +00001010 // Available bytes without growing.
1011 int Available() { return accounting_stats_.Available(); }
1012
1013 // Allocated bytes in this space.
1014 virtual int Size() { return accounting_stats_.Size(); }
1015
1016 // Wasted bytes due to fragmentation and not recoverable until the
1017 // next GC of this space.
1018 int Waste() { return accounting_stats_.Waste(); }
1019
1020 // Returns the address of the first object in this space.
1021 Address bottom() { return first_page_->ObjectAreaStart(); }
1022
1023 // Returns the allocation pointer in this space.
1024 Address top() { return allocation_info_.top; }
1025
1026 // Allocate the requested number of bytes in the space if possible, return a
1027 // failure object if not.
1028 inline Object* AllocateRaw(int size_in_bytes);
1029
1030 // Allocate the requested number of bytes for relocation during mark-compact
1031 // collection.
1032 inline Object* MCAllocateRaw(int size_in_bytes);
1033
Leon Clarkee46be812010-01-19 14:06:41 +00001034 virtual bool ReserveSpace(int bytes);
1035
1036 // Used by ReserveSpace.
1037 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001038
Steve Block6ded16b2010-05-10 14:33:55 +01001039 // Free all pages in range from prev (exclusive) to last (inclusive).
1040 // Freed pages are moved to the end of page list.
1041 void FreePages(Page* prev, Page* last);
1042
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001043 // Deallocates a block.
1044 virtual void DeallocateBlock(Address start,
1045 int size_in_bytes,
1046 bool add_to_freelist) = 0;
1047
Steve Block6ded16b2010-05-10 14:33:55 +01001048 // Set space allocation info.
1049 void SetTop(Address top) {
1050 allocation_info_.top = top;
1051 allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top));
1052 }
1053
Steve Blocka7e24c12009-10-30 11:49:00 +00001054 // ---------------------------------------------------------------------------
1055 // Mark-compact collection support functions
1056
1057 // Set the relocation point to the beginning of the space.
1058 void MCResetRelocationInfo();
1059
1060 // Writes relocation info to the top page.
1061 void MCWriteRelocationInfoToPage() {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001062 TopPageOf(mc_forwarding_info_)->
1063 SetAllocationWatermark(mc_forwarding_info_.top);
Steve Blocka7e24c12009-10-30 11:49:00 +00001064 }
1065
1066 // Computes the offset of a given address in this space to the beginning
1067 // of the space.
1068 int MCSpaceOffsetForAddress(Address addr);
1069
1070 // Updates the allocation pointer to the relocation top after a mark-compact
1071 // collection.
1072 virtual void MCCommitRelocationInfo() = 0;
1073
1074 // Releases half of unused pages.
1075 void Shrink();
1076
1077 // Ensures that the capacity is at least 'capacity'. Returns false on failure.
1078 bool EnsureCapacity(int capacity);
1079
1080#ifdef ENABLE_HEAP_PROTECTION
1081 // Protect/unprotect the space by marking it read-only/writable.
1082 void Protect();
1083 void Unprotect();
1084#endif
1085
1086#ifdef DEBUG
1087 // Print meta info and objects in this space.
1088 virtual void Print();
1089
1090 // Verify integrity of this space.
1091 virtual void Verify(ObjectVisitor* visitor);
1092
1093 // Overridden by subclasses to verify space-specific object
1094 // properties (e.g., only maps or free-list nodes are in map space).
1095 virtual void VerifyObject(HeapObject* obj) {}
1096
1097 // Report code object related statistics
1098 void CollectCodeStatistics();
1099 static void ReportCodeStatistics();
1100 static void ResetCodeStatistics();
1101#endif
1102
Steve Block6ded16b2010-05-10 14:33:55 +01001103 // Returns the page of the allocation pointer.
1104 Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
1105
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001106 void RelinkPageListInChunkOrder(bool deallocate_blocks);
1107
Steve Blocka7e24c12009-10-30 11:49:00 +00001108 protected:
1109 // Maximum capacity of this space.
1110 int max_capacity_;
1111
1112 // Accounting information for this space.
1113 AllocationStats accounting_stats_;
1114
1115 // The first page in this space.
1116 Page* first_page_;
1117
1118 // The last page in this space. Initially set in Setup, updated in
1119 // Expand and Shrink.
1120 Page* last_page_;
1121
Steve Block6ded16b2010-05-10 14:33:55 +01001122 // True if pages owned by this space are linked in chunk-order.
1123 // See comment for class MemoryAllocator for definition of chunk-order.
1124 bool page_list_is_chunk_ordered_;
1125
Steve Blocka7e24c12009-10-30 11:49:00 +00001126 // Normal allocation information.
1127 AllocationInfo allocation_info_;
1128
1129 // Relocation information during mark-compact collections.
1130 AllocationInfo mc_forwarding_info_;
1131
1132 // Bytes of each page that cannot be allocated. Possibly non-zero
1133 // for pages in spaces with only fixed-size objects. Always zero
1134 // for pages in spaces with variable sized objects (those pages are
1135 // padded with free-list nodes).
1136 int page_extra_;
1137
1138 // Sets allocation pointer to a page bottom.
1139 static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
1140
1141 // Returns the top page specified by an allocation info structure.
1142 static Page* TopPageOf(AllocationInfo alloc_info) {
1143 return Page::FromAllocationTop(alloc_info.limit);
1144 }
1145
Leon Clarked91b9f72010-01-27 17:25:45 +00001146 int CountPagesToTop() {
1147 Page* p = Page::FromAllocationTop(allocation_info_.top);
1148 PageIterator it(this, PageIterator::ALL_PAGES);
1149 int counter = 1;
1150 while (it.has_next()) {
1151 if (it.next() == p) return counter;
1152 counter++;
1153 }
1154 UNREACHABLE();
1155 return -1;
1156 }
1157
Steve Blocka7e24c12009-10-30 11:49:00 +00001158 // Expands the space by allocating a fixed number of pages. Returns false if
1159 // it cannot allocate requested number of pages from OS. Newly allocated
1160 // pages are append to the last_page;
1161 bool Expand(Page* last_page);
1162
1163 // Generic fast case allocation function that tries linear allocation in
1164 // the top page of 'alloc_info'. Returns NULL on failure.
1165 inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
1166 int size_in_bytes);
1167
1168 // During normal allocation or deserialization, roll to the next page in
1169 // the space (there is assumed to be one) and allocate there. This
1170 // function is space-dependent.
1171 virtual HeapObject* AllocateInNextPage(Page* current_page,
1172 int size_in_bytes) = 0;
1173
1174 // Slow path of AllocateRaw. This function is space-dependent.
1175 virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
1176
1177 // Slow path of MCAllocateRaw.
1178 HeapObject* SlowMCAllocateRaw(int size_in_bytes);
1179
1180#ifdef DEBUG
Leon Clarkee46be812010-01-19 14:06:41 +00001181 // Returns the number of total pages in this space.
1182 int CountTotalPages();
Steve Blocka7e24c12009-10-30 11:49:00 +00001183#endif
1184 private:
Steve Blocka7e24c12009-10-30 11:49:00 +00001185
1186 // Returns a pointer to the page of the relocation pointer.
1187 Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
1188
Steve Blocka7e24c12009-10-30 11:49:00 +00001189 friend class PageIterator;
1190};
1191
1192
1193#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1194class NumberAndSizeInfo BASE_EMBEDDED {
1195 public:
1196 NumberAndSizeInfo() : number_(0), bytes_(0) {}
1197
1198 int number() const { return number_; }
1199 void increment_number(int num) { number_ += num; }
1200
1201 int bytes() const { return bytes_; }
1202 void increment_bytes(int size) { bytes_ += size; }
1203
1204 void clear() {
1205 number_ = 0;
1206 bytes_ = 0;
1207 }
1208
1209 private:
1210 int number_;
1211 int bytes_;
1212};
1213
1214
1215// HistogramInfo class for recording a single "bar" of a histogram. This
1216// class is used for collecting statistics to print to stdout (when compiled
1217// with DEBUG) or to the log file (when compiled with
1218// ENABLE_LOGGING_AND_PROFILING).
1219class HistogramInfo: public NumberAndSizeInfo {
1220 public:
1221 HistogramInfo() : NumberAndSizeInfo() {}
1222
1223 const char* name() { return name_; }
1224 void set_name(const char* name) { name_ = name; }
1225
1226 private:
1227 const char* name_;
1228};
1229#endif
1230
1231
1232// -----------------------------------------------------------------------------
1233// SemiSpace in young generation
1234//
1235// A semispace is a contiguous chunk of memory. The mark-compact collector
1236// uses the memory in the from space as a marking stack when tracing live
1237// objects.
1238
1239class SemiSpace : public Space {
1240 public:
1241 // Constructor.
1242 SemiSpace() :Space(NEW_SPACE, NOT_EXECUTABLE) {
1243 start_ = NULL;
1244 age_mark_ = NULL;
1245 }
1246
1247 // Sets up the semispace using the given chunk.
1248 bool Setup(Address start, int initial_capacity, int maximum_capacity);
1249
1250 // Tear down the space. Heap memory was not allocated by the space, so it
1251 // is not deallocated here.
1252 void TearDown();
1253
1254 // True if the space has been set up but not torn down.
1255 bool HasBeenSetup() { return start_ != NULL; }
1256
1257 // Grow the size of the semispace by committing extra virtual memory.
1258 // Assumes that the caller has checked that the semispace has not reached
1259 // its maximum capacity (and thus there is space available in the reserved
1260 // address range to grow).
1261 bool Grow();
1262
1263 // Grow the semispace to the new capacity. The new capacity
1264 // requested must be larger than the current capacity.
1265 bool GrowTo(int new_capacity);
1266
1267 // Shrinks the semispace to the new capacity. The new capacity
1268 // requested must be more than the amount of used memory in the
1269 // semispace and less than the current capacity.
1270 bool ShrinkTo(int new_capacity);
1271
1272 // Returns the start address of the space.
1273 Address low() { return start_; }
1274 // Returns one past the end address of the space.
1275 Address high() { return low() + capacity_; }
1276
1277 // Age mark accessors.
1278 Address age_mark() { return age_mark_; }
1279 void set_age_mark(Address mark) { age_mark_ = mark; }
1280
1281 // True if the address is in the address range of this semispace (not
1282 // necessarily below the allocation pointer).
1283 bool Contains(Address a) {
1284 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1285 == reinterpret_cast<uintptr_t>(start_);
1286 }
1287
1288 // True if the object is a heap object in the address range of this
1289 // semispace (not necessarily below the allocation pointer).
1290 bool Contains(Object* o) {
1291 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1292 }
1293
1294 // The offset of an address from the beginning of the space.
Steve Blockd0582a62009-12-15 09:54:21 +00001295 int SpaceOffsetForAddress(Address addr) {
1296 return static_cast<int>(addr - low());
1297 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001298
Leon Clarkee46be812010-01-19 14:06:41 +00001299 // If we don't have these here then SemiSpace will be abstract. However
1300 // they should never be called.
Steve Blocka7e24c12009-10-30 11:49:00 +00001301 virtual int Size() {
1302 UNREACHABLE();
1303 return 0;
1304 }
1305
Leon Clarkee46be812010-01-19 14:06:41 +00001306 virtual bool ReserveSpace(int bytes) {
1307 UNREACHABLE();
1308 return false;
1309 }
1310
Steve Blocka7e24c12009-10-30 11:49:00 +00001311 bool is_committed() { return committed_; }
1312 bool Commit();
1313 bool Uncommit();
1314
Steve Block6ded16b2010-05-10 14:33:55 +01001315#ifdef ENABLE_HEAP_PROTECTION
1316 // Protect/unprotect the space by marking it read-only/writable.
1317 virtual void Protect() {}
1318 virtual void Unprotect() {}
1319#endif
1320
Steve Blocka7e24c12009-10-30 11:49:00 +00001321#ifdef DEBUG
1322 virtual void Print();
1323 virtual void Verify();
1324#endif
1325
1326 // Returns the current capacity of the semi space.
1327 int Capacity() { return capacity_; }
1328
1329 // Returns the maximum capacity of the semi space.
1330 int MaximumCapacity() { return maximum_capacity_; }
1331
1332 // Returns the initial capacity of the semi space.
1333 int InitialCapacity() { return initial_capacity_; }
1334
1335 private:
1336 // The current and maximum capacity of the space.
1337 int capacity_;
1338 int maximum_capacity_;
1339 int initial_capacity_;
1340
1341 // The start address of the space.
1342 Address start_;
1343 // Used to govern object promotion during mark-compact collection.
1344 Address age_mark_;
1345
1346 // Masks and comparison values to test for containment in this semispace.
1347 uintptr_t address_mask_;
1348 uintptr_t object_mask_;
1349 uintptr_t object_expected_;
1350
1351 bool committed_;
1352
1353 public:
1354 TRACK_MEMORY("SemiSpace")
1355};
1356
1357
1358// A SemiSpaceIterator is an ObjectIterator that iterates over the active
1359// semispace of the heap's new space. It iterates over the objects in the
1360// semispace from a given start address (defaulting to the bottom of the
1361// semispace) to the top of the semispace. New objects allocated after the
1362// iterator is created are not iterated.
1363class SemiSpaceIterator : public ObjectIterator {
1364 public:
1365 // Create an iterator over the objects in the given space. If no start
1366 // address is given, the iterator starts from the bottom of the space. If
1367 // no size function is given, the iterator calls Object::Size().
1368 explicit SemiSpaceIterator(NewSpace* space);
1369 SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
1370 SemiSpaceIterator(NewSpace* space, Address start);
1371
Steve Blocka7e24c12009-10-30 11:49:00 +00001372 HeapObject* next() {
Leon Clarked91b9f72010-01-27 17:25:45 +00001373 if (current_ == limit_) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00001374
1375 HeapObject* object = HeapObject::FromAddress(current_);
1376 int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
1377
1378 current_ += size;
1379 return object;
1380 }
1381
1382 // Implementation of the ObjectIterator functions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001383 virtual HeapObject* next_object() { return next(); }
1384
1385 private:
1386 void Initialize(NewSpace* space, Address start, Address end,
1387 HeapObjectCallback size_func);
1388
1389 // The semispace.
1390 SemiSpace* space_;
1391 // The current iteration point.
1392 Address current_;
1393 // The end of iteration.
1394 Address limit_;
1395 // The callback function.
1396 HeapObjectCallback size_func_;
1397};
1398
1399
1400// -----------------------------------------------------------------------------
1401// The young generation space.
1402//
1403// The new space consists of a contiguous pair of semispaces. It simply
1404// forwards most functions to the appropriate semispace.
1405
1406class NewSpace : public Space {
1407 public:
1408 // Constructor.
1409 NewSpace() : Space(NEW_SPACE, NOT_EXECUTABLE) {}
1410
1411 // Sets up the new space using the given chunk.
1412 bool Setup(Address start, int size);
1413
1414 // Tears down the space. Heap memory was not allocated by the space, so it
1415 // is not deallocated here.
1416 void TearDown();
1417
1418 // True if the space has been set up but not torn down.
1419 bool HasBeenSetup() {
1420 return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
1421 }
1422
1423 // Flip the pair of spaces.
1424 void Flip();
1425
1426 // Grow the capacity of the semispaces. Assumes that they are not at
1427 // their maximum capacity.
1428 void Grow();
1429
1430 // Shrink the capacity of the semispaces.
1431 void Shrink();
1432
1433 // True if the address or object lies in the address range of either
1434 // semispace (not necessarily below the allocation pointer).
1435 bool Contains(Address a) {
1436 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1437 == reinterpret_cast<uintptr_t>(start_);
1438 }
1439 bool Contains(Object* o) {
1440 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1441 }
1442
1443 // Return the allocated bytes in the active semispace.
Steve Blockd0582a62009-12-15 09:54:21 +00001444 virtual int Size() { return static_cast<int>(top() - bottom()); }
Steve Block3ce2e202009-11-05 08:53:23 +00001445
Steve Blocka7e24c12009-10-30 11:49:00 +00001446 // Return the current capacity of a semispace.
1447 int Capacity() {
1448 ASSERT(to_space_.Capacity() == from_space_.Capacity());
1449 return to_space_.Capacity();
1450 }
Steve Block3ce2e202009-11-05 08:53:23 +00001451
1452 // Return the total amount of memory committed for new space.
1453 int CommittedMemory() {
1454 if (from_space_.is_committed()) return 2 * Capacity();
1455 return Capacity();
1456 }
1457
Steve Blocka7e24c12009-10-30 11:49:00 +00001458 // Return the available bytes without growing in the active semispace.
1459 int Available() { return Capacity() - Size(); }
1460
1461 // Return the maximum capacity of a semispace.
1462 int MaximumCapacity() {
1463 ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
1464 return to_space_.MaximumCapacity();
1465 }
1466
1467 // Returns the initial capacity of a semispace.
1468 int InitialCapacity() {
1469 ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
1470 return to_space_.InitialCapacity();
1471 }
1472
1473 // Return the address of the allocation pointer in the active semispace.
1474 Address top() { return allocation_info_.top; }
1475 // Return the address of the first object in the active semispace.
1476 Address bottom() { return to_space_.low(); }
1477
1478 // Get the age mark of the inactive semispace.
1479 Address age_mark() { return from_space_.age_mark(); }
1480 // Set the age mark in the active semispace.
1481 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
1482
1483 // The start address of the space and a bit mask. Anding an address in the
1484 // new space with the mask will result in the start address.
1485 Address start() { return start_; }
1486 uintptr_t mask() { return address_mask_; }
1487
1488 // The allocation top and limit addresses.
1489 Address* allocation_top_address() { return &allocation_info_.top; }
1490 Address* allocation_limit_address() { return &allocation_info_.limit; }
1491
1492 Object* AllocateRaw(int size_in_bytes) {
1493 return AllocateRawInternal(size_in_bytes, &allocation_info_);
1494 }
1495
1496 // Allocate the requested number of bytes for relocation during mark-compact
1497 // collection.
1498 Object* MCAllocateRaw(int size_in_bytes) {
1499 return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
1500 }
1501
1502 // Reset the allocation pointer to the beginning of the active semispace.
1503 void ResetAllocationInfo();
1504 // Reset the reloction pointer to the bottom of the inactive semispace in
1505 // preparation for mark-compact collection.
1506 void MCResetRelocationInfo();
1507 // Update the allocation pointer in the active semispace after a
1508 // mark-compact collection.
1509 void MCCommitRelocationInfo();
1510
1511 // Get the extent of the inactive semispace (for use as a marking stack).
1512 Address FromSpaceLow() { return from_space_.low(); }
1513 Address FromSpaceHigh() { return from_space_.high(); }
1514
1515 // Get the extent of the active semispace (to sweep newly copied objects
1516 // during a scavenge collection).
1517 Address ToSpaceLow() { return to_space_.low(); }
1518 Address ToSpaceHigh() { return to_space_.high(); }
1519
1520 // Offsets from the beginning of the semispaces.
1521 int ToSpaceOffsetForAddress(Address a) {
1522 return to_space_.SpaceOffsetForAddress(a);
1523 }
1524 int FromSpaceOffsetForAddress(Address a) {
1525 return from_space_.SpaceOffsetForAddress(a);
1526 }
1527
1528 // True if the object is a heap object in the address range of the
1529 // respective semispace (not necessarily below the allocation pointer of the
1530 // semispace).
1531 bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
1532 bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
1533
1534 bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
1535 bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
1536
Leon Clarkee46be812010-01-19 14:06:41 +00001537 virtual bool ReserveSpace(int bytes);
1538
Steve Blocka7e24c12009-10-30 11:49:00 +00001539#ifdef ENABLE_HEAP_PROTECTION
1540 // Protect/unprotect the space by marking it read-only/writable.
1541 virtual void Protect();
1542 virtual void Unprotect();
1543#endif
1544
1545#ifdef DEBUG
1546 // Verify the active semispace.
1547 virtual void Verify();
1548 // Print the active semispace.
1549 virtual void Print() { to_space_.Print(); }
1550#endif
1551
1552#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1553 // Iterates the active semispace to collect statistics.
1554 void CollectStatistics();
1555 // Reports previously collected statistics of the active semispace.
1556 void ReportStatistics();
1557 // Clears previously collected statistics.
1558 void ClearHistograms();
1559
1560 // Record the allocation or promotion of a heap object. Note that we don't
1561 // record every single allocation, but only those that happen in the
1562 // to space during a scavenge GC.
1563 void RecordAllocation(HeapObject* obj);
1564 void RecordPromotion(HeapObject* obj);
1565#endif
1566
1567 // Return whether the operation succeded.
1568 bool CommitFromSpaceIfNeeded() {
1569 if (from_space_.is_committed()) return true;
1570 return from_space_.Commit();
1571 }
1572
1573 bool UncommitFromSpace() {
1574 if (!from_space_.is_committed()) return true;
1575 return from_space_.Uncommit();
1576 }
1577
1578 private:
1579 // The semispaces.
1580 SemiSpace to_space_;
1581 SemiSpace from_space_;
1582
1583 // Start address and bit mask for containment testing.
1584 Address start_;
1585 uintptr_t address_mask_;
1586 uintptr_t object_mask_;
1587 uintptr_t object_expected_;
1588
1589 // Allocation pointer and limit for normal allocation and allocation during
1590 // mark-compact collection.
1591 AllocationInfo allocation_info_;
1592 AllocationInfo mc_forwarding_info_;
1593
1594#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1595 HistogramInfo* allocated_histogram_;
1596 HistogramInfo* promoted_histogram_;
1597#endif
1598
1599 // Implementation of AllocateRaw and MCAllocateRaw.
1600 inline Object* AllocateRawInternal(int size_in_bytes,
1601 AllocationInfo* alloc_info);
1602
1603 friend class SemiSpaceIterator;
1604
1605 public:
1606 TRACK_MEMORY("NewSpace")
1607};
1608
1609
1610// -----------------------------------------------------------------------------
1611// Free lists for old object spaces
1612//
1613// Free-list nodes are free blocks in the heap. They look like heap objects
1614// (free-list node pointers have the heap object tag, and they have a map like
1615// a heap object). They have a size and a next pointer. The next pointer is
1616// the raw address of the next free list node (or NULL).
1617class FreeListNode: public HeapObject {
1618 public:
1619 // Obtain a free-list node from a raw address. This is not a cast because
1620 // it does not check nor require that the first word at the address is a map
1621 // pointer.
1622 static FreeListNode* FromAddress(Address address) {
1623 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1624 }
1625
Steve Block3ce2e202009-11-05 08:53:23 +00001626 static inline bool IsFreeListNode(HeapObject* object);
1627
Steve Blocka7e24c12009-10-30 11:49:00 +00001628 // Set the size in bytes, which can be read with HeapObject::Size(). This
1629 // function also writes a map to the first word of the block so that it
1630 // looks like a heap object to the garbage collector and heap iteration
1631 // functions.
1632 void set_size(int size_in_bytes);
1633
1634 // Accessors for the next field.
1635 inline Address next();
1636 inline void set_next(Address next);
1637
1638 private:
1639 static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
1640
1641 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1642};
1643
1644
1645// The free list for the old space.
1646class OldSpaceFreeList BASE_EMBEDDED {
1647 public:
1648 explicit OldSpaceFreeList(AllocationSpace owner);
1649
1650 // Clear the free list.
1651 void Reset();
1652
1653 // Return the number of bytes available on the free list.
1654 int available() { return available_; }
1655
1656 // Place a node on the free list. The block of size 'size_in_bytes'
1657 // starting at 'start' is placed on the free list. The return value is the
1658 // number of bytes that have been lost due to internal fragmentation by
1659 // freeing the block. Bookkeeping information will be written to the block,
1660 // ie, its contents will be destroyed. The start address should be word
1661 // aligned, and the size should be a non-zero multiple of the word size.
1662 int Free(Address start, int size_in_bytes);
1663
1664 // Allocate a block of size 'size_in_bytes' from the free list. The block
1665 // is unitialized. A failure is returned if no block is available. The
1666 // number of bytes lost to fragmentation is returned in the output parameter
1667 // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
1668 Object* Allocate(int size_in_bytes, int* wasted_bytes);
1669
1670 private:
1671 // The size range of blocks, in bytes. (Smaller allocations are allowed, but
1672 // will always result in waste.)
1673 static const int kMinBlockSize = 2 * kPointerSize;
1674 static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
1675
1676 // The identity of the owning space, for building allocation Failure
1677 // objects.
1678 AllocationSpace owner_;
1679
1680 // Total available bytes in all blocks on this free list.
1681 int available_;
1682
1683 // Blocks are put on exact free lists in an array, indexed by size in words.
1684 // The available sizes are kept in an increasingly ordered list. Entries
1685 // corresponding to sizes < kMinBlockSize always have an empty free list
1686 // (but index kHead is used for the head of the size list).
1687 struct SizeNode {
1688 // Address of the head FreeListNode of the implied block size or NULL.
1689 Address head_node_;
1690 // Size (words) of the next larger available size if head_node_ != NULL.
1691 int next_size_;
1692 };
1693 static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
1694 SizeNode free_[kFreeListsLength];
1695
1696 // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
1697 static const int kHead = kMinBlockSize / kPointerSize - 1;
1698 static const int kEnd = kMaxInt;
1699
1700 // We keep a "finger" in the size list to speed up a common pattern:
1701 // repeated requests for the same or increasing sizes.
1702 int finger_;
1703
1704 // Starting from *prev, find and return the smallest size >= index (words),
1705 // or kEnd. Update *prev to be the largest size < index, or kHead.
1706 int FindSize(int index, int* prev) {
1707 int cur = free_[*prev].next_size_;
1708 while (cur < index) {
1709 *prev = cur;
1710 cur = free_[cur].next_size_;
1711 }
1712 return cur;
1713 }
1714
1715 // Remove an existing element from the size list.
1716 void RemoveSize(int index) {
1717 int prev = kHead;
1718 int cur = FindSize(index, &prev);
1719 ASSERT(cur == index);
1720 free_[prev].next_size_ = free_[cur].next_size_;
1721 finger_ = prev;
1722 }
1723
1724 // Insert a new element into the size list.
1725 void InsertSize(int index) {
1726 int prev = kHead;
1727 int cur = FindSize(index, &prev);
1728 ASSERT(cur != index);
1729 free_[prev].next_size_ = index;
1730 free_[index].next_size_ = cur;
1731 }
1732
1733 // The size list is not updated during a sequence of calls to Free, but is
1734 // rebuilt before the next allocation.
1735 void RebuildSizeList();
1736 bool needs_rebuild_;
1737
1738#ifdef DEBUG
1739 // Does this free list contain a free block located at the address of 'node'?
1740 bool Contains(FreeListNode* node);
1741#endif
1742
1743 DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
1744};
1745
1746
1747// The free list for the map space.
1748class FixedSizeFreeList BASE_EMBEDDED {
1749 public:
1750 FixedSizeFreeList(AllocationSpace owner, int object_size);
1751
1752 // Clear the free list.
1753 void Reset();
1754
1755 // Return the number of bytes available on the free list.
1756 int available() { return available_; }
1757
1758 // Place a node on the free list. The block starting at 'start' (assumed to
1759 // have size object_size_) is placed on the free list. Bookkeeping
1760 // information will be written to the block, ie, its contents will be
1761 // destroyed. The start address should be word aligned.
1762 void Free(Address start);
1763
1764 // Allocate a fixed sized block from the free list. The block is unitialized.
1765 // A failure is returned if no block is available.
1766 Object* Allocate();
1767
1768 private:
1769 // Available bytes on the free list.
1770 int available_;
1771
1772 // The head of the free list.
1773 Address head_;
1774
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001775 // The tail of the free list.
1776 Address tail_;
1777
Steve Blocka7e24c12009-10-30 11:49:00 +00001778 // The identity of the owning space, for building allocation Failure
1779 // objects.
1780 AllocationSpace owner_;
1781
1782 // The size of the objects in this space.
1783 int object_size_;
1784
1785 DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
1786};
1787
1788
1789// -----------------------------------------------------------------------------
1790// Old object space (excluding map objects)
1791
1792class OldSpace : public PagedSpace {
1793 public:
1794 // Creates an old space object with a given maximum capacity.
1795 // The constructor does not allocate pages from OS.
1796 explicit OldSpace(int max_capacity,
1797 AllocationSpace id,
1798 Executability executable)
1799 : PagedSpace(max_capacity, id, executable), free_list_(id) {
1800 page_extra_ = 0;
1801 }
1802
1803 // The bytes available on the free list (ie, not above the linear allocation
1804 // pointer).
1805 int AvailableFree() { return free_list_.available(); }
1806
Steve Block6ded16b2010-05-10 14:33:55 +01001807 // The limit of allocation for a page in this space.
1808 virtual Address PageAllocationLimit(Page* page) {
1809 return page->ObjectAreaEnd();
Steve Blocka7e24c12009-10-30 11:49:00 +00001810 }
1811
1812 // Give a block of memory to the space's free list. It might be added to
1813 // the free list or accounted as waste.
Steve Block6ded16b2010-05-10 14:33:55 +01001814 // If add_to_freelist is false then just accounting stats are updated and
1815 // no attempt to add area to free list is made.
1816 void Free(Address start, int size_in_bytes, bool add_to_freelist) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001817 accounting_stats_.DeallocateBytes(size_in_bytes);
Steve Block6ded16b2010-05-10 14:33:55 +01001818
1819 if (add_to_freelist) {
1820 int wasted_bytes = free_list_.Free(start, size_in_bytes);
1821 accounting_stats_.WasteBytes(wasted_bytes);
1822 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001823 }
1824
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001825 virtual void DeallocateBlock(Address start,
1826 int size_in_bytes,
1827 bool add_to_freelist);
1828
Steve Blocka7e24c12009-10-30 11:49:00 +00001829 // Prepare for full garbage collection. Resets the relocation pointer and
1830 // clears the free list.
1831 virtual void PrepareForMarkCompact(bool will_compact);
1832
1833 // Updates the allocation pointer to the relocation top after a mark-compact
1834 // collection.
1835 virtual void MCCommitRelocationInfo();
1836
Leon Clarkee46be812010-01-19 14:06:41 +00001837 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1838
Steve Blocka7e24c12009-10-30 11:49:00 +00001839#ifdef DEBUG
1840 // Reports statistics for the space
1841 void ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00001842#endif
1843
1844 protected:
1845 // Virtual function in the superclass. Slow path of AllocateRaw.
1846 HeapObject* SlowAllocateRaw(int size_in_bytes);
1847
1848 // Virtual function in the superclass. Allocate linearly at the start of
1849 // the page after current_page (there is assumed to be one).
1850 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1851
1852 private:
1853 // The space's free list.
1854 OldSpaceFreeList free_list_;
1855
1856 public:
1857 TRACK_MEMORY("OldSpace")
1858};
1859
1860
1861// -----------------------------------------------------------------------------
1862// Old space for objects of a fixed size
1863
1864class FixedSpace : public PagedSpace {
1865 public:
1866 FixedSpace(int max_capacity,
1867 AllocationSpace id,
1868 int object_size_in_bytes,
1869 const char* name)
1870 : PagedSpace(max_capacity, id, NOT_EXECUTABLE),
1871 object_size_in_bytes_(object_size_in_bytes),
1872 name_(name),
1873 free_list_(id, object_size_in_bytes) {
1874 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
1875 }
1876
Steve Block6ded16b2010-05-10 14:33:55 +01001877 // The limit of allocation for a page in this space.
1878 virtual Address PageAllocationLimit(Page* page) {
1879 return page->ObjectAreaEnd() - page_extra_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001880 }
1881
1882 int object_size_in_bytes() { return object_size_in_bytes_; }
1883
1884 // Give a fixed sized block of memory to the space's free list.
Steve Block6ded16b2010-05-10 14:33:55 +01001885 // If add_to_freelist is false then just accounting stats are updated and
1886 // no attempt to add area to free list is made.
1887 void Free(Address start, bool add_to_freelist) {
1888 if (add_to_freelist) {
1889 free_list_.Free(start);
1890 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001891 accounting_stats_.DeallocateBytes(object_size_in_bytes_);
1892 }
1893
1894 // Prepares for a mark-compact GC.
1895 virtual void PrepareForMarkCompact(bool will_compact);
1896
1897 // Updates the allocation pointer to the relocation top after a mark-compact
1898 // collection.
1899 virtual void MCCommitRelocationInfo();
1900
Leon Clarkee46be812010-01-19 14:06:41 +00001901 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1902
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001903 virtual void DeallocateBlock(Address start,
1904 int size_in_bytes,
1905 bool add_to_freelist);
Steve Blocka7e24c12009-10-30 11:49:00 +00001906#ifdef DEBUG
1907 // Reports statistic info of the space
1908 void ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00001909#endif
1910
1911 protected:
1912 // Virtual function in the superclass. Slow path of AllocateRaw.
1913 HeapObject* SlowAllocateRaw(int size_in_bytes);
1914
1915 // Virtual function in the superclass. Allocate linearly at the start of
1916 // the page after current_page (there is assumed to be one).
1917 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1918
Leon Clarkee46be812010-01-19 14:06:41 +00001919 void ResetFreeList() {
1920 free_list_.Reset();
1921 }
1922
Steve Blocka7e24c12009-10-30 11:49:00 +00001923 private:
1924 // The size of objects in this space.
1925 int object_size_in_bytes_;
1926
1927 // The name of this space.
1928 const char* name_;
1929
1930 // The space's free list.
1931 FixedSizeFreeList free_list_;
1932};
1933
1934
1935// -----------------------------------------------------------------------------
1936// Old space for all map objects
1937
1938class MapSpace : public FixedSpace {
1939 public:
1940 // Creates a map space object with a maximum capacity.
Leon Clarked91b9f72010-01-27 17:25:45 +00001941 MapSpace(int max_capacity, int max_map_space_pages, AllocationSpace id)
1942 : FixedSpace(max_capacity, id, Map::kSize, "map"),
1943 max_map_space_pages_(max_map_space_pages) {
1944 ASSERT(max_map_space_pages < kMaxMapPageIndex);
1945 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001946
1947 // Prepares for a mark-compact GC.
1948 virtual void PrepareForMarkCompact(bool will_compact);
1949
1950 // Given an index, returns the page address.
1951 Address PageAddress(int page_index) { return page_addresses_[page_index]; }
1952
Leon Clarked91b9f72010-01-27 17:25:45 +00001953 static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
Steve Blocka7e24c12009-10-30 11:49:00 +00001954
Leon Clarkee46be812010-01-19 14:06:41 +00001955 // Are map pointers encodable into map word?
1956 bool MapPointersEncodable() {
1957 if (!FLAG_use_big_map_space) {
Leon Clarked91b9f72010-01-27 17:25:45 +00001958 ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
Leon Clarkee46be812010-01-19 14:06:41 +00001959 return true;
1960 }
Leon Clarked91b9f72010-01-27 17:25:45 +00001961 return CountPagesToTop() <= max_map_space_pages_;
Leon Clarkee46be812010-01-19 14:06:41 +00001962 }
1963
1964 // Should be called after forced sweep to find out if map space needs
1965 // compaction.
1966 bool NeedsCompaction(int live_maps) {
Leon Clarked91b9f72010-01-27 17:25:45 +00001967 return !MapPointersEncodable() && live_maps <= CompactionThreshold();
Leon Clarkee46be812010-01-19 14:06:41 +00001968 }
1969
1970 Address TopAfterCompaction(int live_maps) {
1971 ASSERT(NeedsCompaction(live_maps));
1972
1973 int pages_left = live_maps / kMapsPerPage;
1974 PageIterator it(this, PageIterator::ALL_PAGES);
1975 while (pages_left-- > 0) {
1976 ASSERT(it.has_next());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001977 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
Leon Clarkee46be812010-01-19 14:06:41 +00001978 }
1979 ASSERT(it.has_next());
1980 Page* top_page = it.next();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001981 top_page->SetRegionMarks(Page::kAllRegionsCleanMarks);
Leon Clarkee46be812010-01-19 14:06:41 +00001982 ASSERT(top_page->is_valid());
1983
1984 int offset = live_maps % kMapsPerPage * Map::kSize;
1985 Address top = top_page->ObjectAreaStart() + offset;
1986 ASSERT(top < top_page->ObjectAreaEnd());
1987 ASSERT(Contains(top));
1988
1989 return top;
1990 }
1991
1992 void FinishCompaction(Address new_top, int live_maps) {
1993 Page* top_page = Page::FromAddress(new_top);
1994 ASSERT(top_page->is_valid());
1995
1996 SetAllocationInfo(&allocation_info_, top_page);
1997 allocation_info_.top = new_top;
1998
1999 int new_size = live_maps * Map::kSize;
2000 accounting_stats_.DeallocateBytes(accounting_stats_.Size());
2001 accounting_stats_.AllocateBytes(new_size);
2002
2003#ifdef DEBUG
2004 if (FLAG_enable_slow_asserts) {
Leon Clarked91b9f72010-01-27 17:25:45 +00002005 intptr_t actual_size = 0;
Leon Clarkee46be812010-01-19 14:06:41 +00002006 for (Page* p = first_page_; p != top_page; p = p->next_page())
2007 actual_size += kMapsPerPage * Map::kSize;
2008 actual_size += (new_top - top_page->ObjectAreaStart());
2009 ASSERT(accounting_stats_.Size() == actual_size);
2010 }
2011#endif
2012
2013 Shrink();
2014 ResetFreeList();
2015 }
2016
Steve Blocka7e24c12009-10-30 11:49:00 +00002017 protected:
2018#ifdef DEBUG
2019 virtual void VerifyObject(HeapObject* obj);
2020#endif
2021
2022 private:
Leon Clarkee46be812010-01-19 14:06:41 +00002023 static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
2024
2025 // Do map space compaction if there is a page gap.
Leon Clarked91b9f72010-01-27 17:25:45 +00002026 int CompactionThreshold() {
2027 return kMapsPerPage * (max_map_space_pages_ - 1);
2028 }
2029
2030 const int max_map_space_pages_;
Leon Clarkee46be812010-01-19 14:06:41 +00002031
Steve Blocka7e24c12009-10-30 11:49:00 +00002032 // An array of page start address in a map space.
Leon Clarked91b9f72010-01-27 17:25:45 +00002033 Address page_addresses_[kMaxMapPageIndex];
Steve Blocka7e24c12009-10-30 11:49:00 +00002034
2035 public:
2036 TRACK_MEMORY("MapSpace")
2037};
2038
2039
2040// -----------------------------------------------------------------------------
2041// Old space for all global object property cell objects
2042
2043class CellSpace : public FixedSpace {
2044 public:
2045 // Creates a property cell space object with a maximum capacity.
2046 CellSpace(int max_capacity, AllocationSpace id)
2047 : FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {}
2048
2049 protected:
2050#ifdef DEBUG
2051 virtual void VerifyObject(HeapObject* obj);
2052#endif
2053
2054 public:
2055 TRACK_MEMORY("CellSpace")
2056};
2057
2058
2059// -----------------------------------------------------------------------------
2060// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2061// the large object space. A large object is allocated from OS heap with
2062// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2063// A large object always starts at Page::kObjectStartOffset to a page.
2064// Large objects do not move during garbage collections.
2065
2066// A LargeObjectChunk holds exactly one large object page with exactly one
2067// large object.
2068class LargeObjectChunk {
2069 public:
2070 // Allocates a new LargeObjectChunk that contains a large object page
2071 // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002072 // object) bytes after the object area start of that page.
2073 // The allocated chunk size is set in the output parameter chunk_size.
Steve Blocka7e24c12009-10-30 11:49:00 +00002074 static LargeObjectChunk* New(int size_in_bytes,
2075 size_t* chunk_size,
2076 Executability executable);
2077
2078 // Interpret a raw address as a large object chunk.
2079 static LargeObjectChunk* FromAddress(Address address) {
2080 return reinterpret_cast<LargeObjectChunk*>(address);
2081 }
2082
2083 // Returns the address of this chunk.
2084 Address address() { return reinterpret_cast<Address>(this); }
2085
2086 // Accessors for the fields of the chunk.
2087 LargeObjectChunk* next() { return next_; }
2088 void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
2089
Steve Block791712a2010-08-27 10:21:07 +01002090 size_t size() { return size_ & ~Page::kPageFlagMask; }
Steve Blocka7e24c12009-10-30 11:49:00 +00002091 void set_size(size_t size_in_bytes) { size_ = size_in_bytes; }
2092
2093 // Returns the object in this chunk.
2094 inline HeapObject* GetObject();
2095
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002096 // Given a requested size returns the physical size of a chunk to be
2097 // allocated.
Steve Blocka7e24c12009-10-30 11:49:00 +00002098 static int ChunkSizeFor(int size_in_bytes);
2099
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002100 // Given a chunk size, returns the object size it can accommodate. Used by
2101 // LargeObjectSpace::Available.
Steve Blocka7e24c12009-10-30 11:49:00 +00002102 static int ObjectSizeFor(int chunk_size) {
2103 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2104 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2105 }
2106
2107 private:
2108 // A pointer to the next large object chunk in the space or NULL.
2109 LargeObjectChunk* next_;
2110
2111 // The size of this chunk.
2112 size_t size_;
2113
2114 public:
2115 TRACK_MEMORY("LargeObjectChunk")
2116};
2117
2118
2119class LargeObjectSpace : public Space {
2120 public:
2121 explicit LargeObjectSpace(AllocationSpace id);
2122 virtual ~LargeObjectSpace() {}
2123
2124 // Initializes internal data structures.
2125 bool Setup();
2126
2127 // Releases internal resources, frees objects in this space.
2128 void TearDown();
2129
2130 // Allocates a (non-FixedArray, non-Code) large object.
2131 Object* AllocateRaw(int size_in_bytes);
2132 // Allocates a large Code object.
2133 Object* AllocateRawCode(int size_in_bytes);
2134 // Allocates a large FixedArray.
2135 Object* AllocateRawFixedArray(int size_in_bytes);
2136
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002137 // Available bytes for objects in this space.
Steve Blocka7e24c12009-10-30 11:49:00 +00002138 int Available() {
2139 return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
2140 }
2141
2142 virtual int Size() {
2143 return size_;
2144 }
2145
2146 int PageCount() {
2147 return page_count_;
2148 }
2149
2150 // Finds an object for a given address, returns Failure::Exception()
2151 // if it is not found. The function iterates through all objects in this
2152 // space, may be slow.
2153 Object* FindObject(Address a);
2154
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002155 // Finds a large object page containing the given pc, returns NULL
2156 // if such a page doesn't exist.
2157 LargeObjectChunk* FindChunkContainingPc(Address pc);
2158
2159
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002160 // Iterates objects covered by dirty regions.
2161 void IterateDirtyRegions(ObjectSlotCallback func);
Steve Blocka7e24c12009-10-30 11:49:00 +00002162
2163 // Frees unmarked objects.
2164 void FreeUnmarkedObjects();
2165
2166 // Checks whether a heap object is in this space; O(1).
2167 bool Contains(HeapObject* obj);
2168
2169 // Checks whether the space is empty.
2170 bool IsEmpty() { return first_chunk_ == NULL; }
2171
Leon Clarkee46be812010-01-19 14:06:41 +00002172 // See the comments for ReserveSpace in the Space class. This has to be
2173 // called after ReserveSpace has been called on the paged spaces, since they
2174 // may use some memory, leaving less for large objects.
2175 virtual bool ReserveSpace(int bytes);
2176
Steve Blocka7e24c12009-10-30 11:49:00 +00002177#ifdef ENABLE_HEAP_PROTECTION
2178 // Protect/unprotect the space by marking it read-only/writable.
2179 void Protect();
2180 void Unprotect();
2181#endif
2182
2183#ifdef DEBUG
2184 virtual void Verify();
2185 virtual void Print();
2186 void ReportStatistics();
2187 void CollectCodeStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00002188#endif
2189 // Checks whether an address is in the object area in this space. It
2190 // iterates all objects in the space. May be slow.
2191 bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
2192
2193 private:
2194 // The head of the linked list of large object chunks.
2195 LargeObjectChunk* first_chunk_;
2196 int size_; // allocated bytes
2197 int page_count_; // number of chunks
2198
2199
2200 // Shared implementation of AllocateRaw, AllocateRawCode and
2201 // AllocateRawFixedArray.
2202 Object* AllocateRawInternal(int requested_size,
2203 int object_size,
2204 Executability executable);
2205
Steve Blocka7e24c12009-10-30 11:49:00 +00002206 friend class LargeObjectIterator;
2207
2208 public:
2209 TRACK_MEMORY("LargeObjectSpace")
2210};
2211
2212
2213class LargeObjectIterator: public ObjectIterator {
2214 public:
2215 explicit LargeObjectIterator(LargeObjectSpace* space);
2216 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
2217
Steve Blocka7e24c12009-10-30 11:49:00 +00002218 HeapObject* next();
2219
2220 // implementation of ObjectIterator.
Steve Blocka7e24c12009-10-30 11:49:00 +00002221 virtual HeapObject* next_object() { return next(); }
2222
2223 private:
2224 LargeObjectChunk* current_;
2225 HeapObjectCallback size_func_;
2226};
2227
2228
2229} } // namespace v8::internal
2230
2231#endif // V8_SPACES_H_