blob: df42d515d4467fa9b4ed8a2cf05120b327d87a65 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_SPACES_H_
29#define V8_SPACES_H_
30
31#include "list-inl.h"
32#include "log.h"
33
34namespace v8 {
35namespace internal {
36
37// -----------------------------------------------------------------------------
38// Heap structures:
39//
40// A JS heap consists of a young generation, an old generation, and a large
41// object space. The young generation is divided into two semispaces. A
42// scavenger implements Cheney's copying algorithm. The old generation is
43// separated into a map space and an old object space. The map space contains
44// all (and only) map objects, the rest of old objects go into the old space.
45// The old generation is collected by a mark-sweep-compact collector.
46//
47// The semispaces of the young generation are contiguous. The old and map
48// spaces consists of a list of pages. A page has a page header, a remembered
49// set area, and an object area. A page size is deliberately chosen as 8K
50// bytes. The first word of a page is an opaque page header that has the
51// address of the next page and its ownership information. The second word may
52// have the allocation top address of this page. The next 248 bytes are
53// remembered sets. Heap objects are aligned to the pointer size (4 bytes). A
54// remembered set bit corresponds to a pointer in the object area.
55//
56// There is a separate large object space for objects larger than
57// Page::kMaxHeapObjectSize, so that they do not have to move during
58// collection. The large object space is paged and uses the same remembered
59// set implementation. Pages in large object space may be larger than 8K.
60//
61// NOTE: The mark-compact collector rebuilds the remembered set after a
62// collection. It reuses first a few words of the remembered set for
63// bookkeeping relocation information.
64
65
66// Some assertion macros used in the debugging mode.
67
Leon Clarkee46be812010-01-19 14:06:41 +000068#define ASSERT_PAGE_ALIGNED(address) \
Steve Blocka7e24c12009-10-30 11:49:00 +000069 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
70
Leon Clarkee46be812010-01-19 14:06:41 +000071#define ASSERT_OBJECT_ALIGNED(address) \
Steve Blocka7e24c12009-10-30 11:49:00 +000072 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
73
Leon Clarkee46be812010-01-19 14:06:41 +000074#define ASSERT_MAP_ALIGNED(address) \
75 ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
76
77#define ASSERT_OBJECT_SIZE(size) \
Steve Blocka7e24c12009-10-30 11:49:00 +000078 ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
79
Leon Clarkee46be812010-01-19 14:06:41 +000080#define ASSERT_PAGE_OFFSET(offset) \
81 ASSERT((Page::kObjectStartOffset <= offset) \
Steve Blocka7e24c12009-10-30 11:49:00 +000082 && (offset <= Page::kPageSize))
83
Leon Clarkee46be812010-01-19 14:06:41 +000084#define ASSERT_MAP_PAGE_INDEX(index) \
Steve Blocka7e24c12009-10-30 11:49:00 +000085 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
86
87
88class PagedSpace;
89class MemoryAllocator;
90class AllocationInfo;
91
92// -----------------------------------------------------------------------------
93// A page normally has 8K bytes. Large object pages may be larger. A page
94// address is always aligned to the 8K page size. A page is divided into
95// three areas: the first two words are used for bookkeeping, the next 248
96// bytes are used as remembered set, and the rest of the page is the object
97// area.
98//
99// Pointers are aligned to the pointer size (4), only 1 bit is needed
100// for a pointer in the remembered set. Given an address, its remembered set
101// bit position (offset from the start of the page) is calculated by dividing
102// its page offset by 32. Therefore, the object area in a page starts at the
103// 256th byte (8K/32). Bytes 0 to 255 do not need the remembered set, so that
104// the first two words (64 bits) in a page can be used for other purposes.
105//
106// On the 64-bit platform, we add an offset to the start of the remembered set,
107// and pointers are aligned to 8-byte pointer size. This means that we need
108// only 128 bytes for the RSet, and only get two bytes free in the RSet's RSet.
109// For this reason we add an offset to get room for the Page data at the start.
110//
111// The mark-compact collector transforms a map pointer into a page index and a
Leon Clarkee46be812010-01-19 14:06:41 +0000112// page offset. The excact encoding is described in the comments for
113// class MapWord in objects.h.
Steve Blocka7e24c12009-10-30 11:49:00 +0000114//
115// The only way to get a page pointer is by calling factory methods:
116// Page* p = Page::FromAddress(addr); or
117// Page* p = Page::FromAllocationTop(top);
118class Page {
119 public:
120 // Returns the page containing a given address. The address ranges
121 // from [page_addr .. page_addr + kPageSize[
122 //
123 // Note that this function only works for addresses in normal paged
124 // spaces and addresses in the first 8K of large object pages (i.e.,
125 // the start of large objects but not necessarily derived pointers
126 // within them).
127 INLINE(static Page* FromAddress(Address a)) {
128 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
129 }
130
131 // Returns the page containing an allocation top. Because an allocation
132 // top address can be the upper bound of the page, we need to subtract
133 // it with kPointerSize first. The address ranges from
134 // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
135 INLINE(static Page* FromAllocationTop(Address top)) {
136 Page* p = FromAddress(top - kPointerSize);
137 ASSERT_PAGE_OFFSET(p->Offset(top));
138 return p;
139 }
140
141 // Returns the start address of this page.
142 Address address() { return reinterpret_cast<Address>(this); }
143
144 // Checks whether this is a valid page address.
145 bool is_valid() { return address() != NULL; }
146
147 // Returns the next page of this page.
148 inline Page* next_page();
149
150 // Return the end of allocation in this page. Undefined for unused pages.
151 inline Address AllocationTop();
152
153 // Returns the start address of the object area in this page.
154 Address ObjectAreaStart() { return address() + kObjectStartOffset; }
155
156 // Returns the end address (exclusive) of the object area in this page.
157 Address ObjectAreaEnd() { return address() + Page::kPageSize; }
158
159 // Returns the start address of the remembered set area.
160 Address RSetStart() { return address() + kRSetStartOffset; }
161
162 // Returns the end address of the remembered set area (exclusive).
163 Address RSetEnd() { return address() + kRSetEndOffset; }
164
165 // Checks whether an address is page aligned.
166 static bool IsAlignedToPageSize(Address a) {
167 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
168 }
169
Steve Block6ded16b2010-05-10 14:33:55 +0100170 // True if this page was in use before current compaction started.
171 // Result is valid only for pages owned by paged spaces and
172 // only after PagedSpace::PrepareForMarkCompact was called.
173 inline bool WasInUseBeforeMC();
174
175 inline void SetWasInUseBeforeMC(bool was_in_use);
176
Steve Blocka7e24c12009-10-30 11:49:00 +0000177 // True if this page is a large object page.
Steve Block6ded16b2010-05-10 14:33:55 +0100178 inline bool IsLargeObjectPage();
179
180 inline void SetIsLargeObjectPage(bool is_large_object_page);
Steve Blocka7e24c12009-10-30 11:49:00 +0000181
182 // Returns the offset of a given address to this page.
183 INLINE(int Offset(Address a)) {
Steve Blockd0582a62009-12-15 09:54:21 +0000184 int offset = static_cast<int>(a - address());
Steve Blocka7e24c12009-10-30 11:49:00 +0000185 ASSERT_PAGE_OFFSET(offset);
186 return offset;
187 }
188
189 // Returns the address for a given offset to the this page.
190 Address OffsetToAddress(int offset) {
191 ASSERT_PAGE_OFFSET(offset);
192 return address() + offset;
193 }
194
195 // ---------------------------------------------------------------------
196 // Remembered set support
197
198 // Clears remembered set in this page.
199 inline void ClearRSet();
200
201 // Return the address of the remembered set word corresponding to an
202 // object address/offset pair, and the bit encoded as a single-bit
203 // mask in the output parameter 'bitmask'.
204 INLINE(static Address ComputeRSetBitPosition(Address address, int offset,
205 uint32_t* bitmask));
206
207 // Sets the corresponding remembered set bit for a given address.
208 INLINE(static void SetRSet(Address address, int offset));
209
210 // Clears the corresponding remembered set bit for a given address.
211 static inline void UnsetRSet(Address address, int offset);
212
213 // Checks whether the remembered set bit for a given address is set.
214 static inline bool IsRSetSet(Address address, int offset);
215
216#ifdef DEBUG
217 // Use a state to mark whether remembered set space can be used for other
218 // purposes.
219 enum RSetState { IN_USE, NOT_IN_USE };
220 static bool is_rset_in_use() { return rset_state_ == IN_USE; }
221 static void set_rset_state(RSetState state) { rset_state_ = state; }
222#endif
223
Steve Blocka7e24c12009-10-30 11:49:00 +0000224 // Page size in bytes. This must be a multiple of the OS page size.
225 static const int kPageSize = 1 << kPageSizeBits;
226
227 // Page size mask.
228 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
229
230 // The offset of the remembered set in a page, in addition to the empty bytes
231 // formed as the remembered bits of the remembered set itself.
232#ifdef V8_TARGET_ARCH_X64
233 static const int kRSetOffset = 4 * kPointerSize; // Room for four pointers.
234#else
235 static const int kRSetOffset = 0;
236#endif
237 // The end offset of the remembered set in a page
238 // (heaps are aligned to pointer size).
239 static const int kRSetEndOffset = kRSetOffset + kPageSize / kBitsPerPointer;
240
241 // The start offset of the object area in a page.
242 // This needs to be at least (bits per uint32_t) * kBitsPerPointer,
243 // to align start of rset to a uint32_t address.
244 static const int kObjectStartOffset = 256;
245
246 // The start offset of the used part of the remembered set in a page.
247 static const int kRSetStartOffset = kRSetOffset +
248 kObjectStartOffset / kBitsPerPointer;
249
250 // Object area size in bytes.
251 static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
252
253 // Maximum object size that fits in a page.
254 static const int kMaxHeapObjectSize = kObjectAreaSize;
255
Steve Block6ded16b2010-05-10 14:33:55 +0100256 enum PageFlag {
257 IS_NORMAL_PAGE = 1 << 0,
258 WAS_IN_USE_BEFORE_MC = 1 << 1
259 };
260
261 inline bool GetPageFlag(PageFlag flag);
262 inline void SetPageFlag(PageFlag flag, bool value);
263
Steve Blocka7e24c12009-10-30 11:49:00 +0000264 //---------------------------------------------------------------------------
265 // Page header description.
266 //
267 // If a page is not in the large object space, the first word,
268 // opaque_header, encodes the next page address (aligned to kPageSize 8K)
269 // and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use
270 // opaque_header. The value range of the opaque_header is [0..kPageSize[,
271 // or [next_page_start, next_page_end[. It cannot point to a valid address
272 // in the current page. If a page is in the large object space, the first
273 // word *may* (if the page start and large object chunk start are the
274 // same) contain the address of the next large object chunk.
275 intptr_t opaque_header;
276
277 // If the page is not in the large object space, the low-order bit of the
278 // second word is set. If the page is in the large object space, the
279 // second word *may* (if the page start and large object chunk start are
280 // the same) contain the large object chunk size. In either case, the
281 // low-order bit for large object pages will be cleared.
Steve Block6ded16b2010-05-10 14:33:55 +0100282 // For normal pages this word is used to store various page flags.
283 int flags;
Steve Blocka7e24c12009-10-30 11:49:00 +0000284
285 // The following fields may overlap with remembered set, they can only
286 // be used in the mark-compact collector when remembered set is not
287 // used.
288
289 // The index of the page in its owner space.
290 int mc_page_index;
291
292 // The allocation pointer after relocating objects to this page.
293 Address mc_relocation_top;
294
295 // The forwarding address of the first live object in this page.
296 Address mc_first_forwarded;
297
298#ifdef DEBUG
299 private:
300 static RSetState rset_state_; // state of the remembered set
301#endif
302};
303
304
305// ----------------------------------------------------------------------------
306// Space is the abstract superclass for all allocation spaces.
307class Space : public Malloced {
308 public:
309 Space(AllocationSpace id, Executability executable)
310 : id_(id), executable_(executable) {}
311
312 virtual ~Space() {}
313
314 // Does the space need executable memory?
315 Executability executable() { return executable_; }
316
317 // Identity used in error reporting.
318 AllocationSpace identity() { return id_; }
319
320 virtual int Size() = 0;
321
Steve Block6ded16b2010-05-10 14:33:55 +0100322#ifdef ENABLE_HEAP_PROTECTION
323 // Protect/unprotect the space by marking it read-only/writable.
324 virtual void Protect() = 0;
325 virtual void Unprotect() = 0;
326#endif
327
Steve Blocka7e24c12009-10-30 11:49:00 +0000328#ifdef DEBUG
329 virtual void Print() = 0;
330#endif
331
Leon Clarkee46be812010-01-19 14:06:41 +0000332 // After calling this we can allocate a certain number of bytes using only
333 // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
334 // without using freelists or causing a GC. This is used by partial
335 // snapshots. It returns true of space was reserved or false if a GC is
336 // needed. For paged spaces the space requested must include the space wasted
337 // at the end of each when allocating linearly.
338 virtual bool ReserveSpace(int bytes) = 0;
339
Steve Blocka7e24c12009-10-30 11:49:00 +0000340 private:
341 AllocationSpace id_;
342 Executability executable_;
343};
344
345
346// ----------------------------------------------------------------------------
347// All heap objects containing executable code (code objects) must be allocated
348// from a 2 GB range of memory, so that they can call each other using 32-bit
349// displacements. This happens automatically on 32-bit platforms, where 32-bit
350// displacements cover the entire 4GB virtual address space. On 64-bit
351// platforms, we support this using the CodeRange object, which reserves and
352// manages a range of virtual memory.
353class CodeRange : public AllStatic {
354 public:
355 // Reserves a range of virtual memory, but does not commit any of it.
356 // Can only be called once, at heap initialization time.
357 // Returns false on failure.
358 static bool Setup(const size_t requested_size);
359
360 // Frees the range of virtual memory, and frees the data structures used to
361 // manage it.
362 static void TearDown();
363
364 static bool exists() { return code_range_ != NULL; }
365 static bool contains(Address address) {
366 if (code_range_ == NULL) return false;
367 Address start = static_cast<Address>(code_range_->address());
368 return start <= address && address < start + code_range_->size();
369 }
370
371 // Allocates a chunk of memory from the large-object portion of
372 // the code range. On platforms with no separate code range, should
373 // not be called.
374 static void* AllocateRawMemory(const size_t requested, size_t* allocated);
375 static void FreeRawMemory(void* buf, size_t length);
376
377 private:
378 // The reserved range of virtual memory that all code objects are put in.
379 static VirtualMemory* code_range_;
380 // Plain old data class, just a struct plus a constructor.
381 class FreeBlock {
382 public:
383 FreeBlock(Address start_arg, size_t size_arg)
384 : start(start_arg), size(size_arg) {}
385 FreeBlock(void* start_arg, size_t size_arg)
386 : start(static_cast<Address>(start_arg)), size(size_arg) {}
387
388 Address start;
389 size_t size;
390 };
391
392 // Freed blocks of memory are added to the free list. When the allocation
393 // list is exhausted, the free list is sorted and merged to make the new
394 // allocation list.
395 static List<FreeBlock> free_list_;
396 // Memory is allocated from the free blocks on the allocation list.
397 // The block at current_allocation_block_index_ is the current block.
398 static List<FreeBlock> allocation_list_;
399 static int current_allocation_block_index_;
400
401 // Finds a block on the allocation list that contains at least the
402 // requested amount of memory. If none is found, sorts and merges
403 // the existing free memory blocks, and searches again.
404 // If none can be found, terminates V8 with FatalProcessOutOfMemory.
405 static void GetNextAllocationBlock(size_t requested);
406 // Compares the start addresses of two free blocks.
407 static int CompareFreeBlockAddress(const FreeBlock* left,
408 const FreeBlock* right);
409};
410
411
412// ----------------------------------------------------------------------------
413// A space acquires chunks of memory from the operating system. The memory
414// allocator manages chunks for the paged heap spaces (old space and map
415// space). A paged chunk consists of pages. Pages in a chunk have contiguous
416// addresses and are linked as a list.
417//
418// The allocator keeps an initial chunk which is used for the new space. The
419// leftover regions of the initial chunk are used for the initial chunks of
420// old space and map space if they are big enough to hold at least one page.
421// The allocator assumes that there is one old space and one map space, each
422// expands the space by allocating kPagesPerChunk pages except the last
423// expansion (before running out of space). The first chunk may contain fewer
424// than kPagesPerChunk pages as well.
425//
426// The memory allocator also allocates chunks for the large object space, but
427// they are managed by the space itself. The new space does not expand.
Steve Block6ded16b2010-05-10 14:33:55 +0100428//
429// The fact that pages for paged spaces are allocated and deallocated in chunks
430// induces a constraint on the order of pages in a linked lists. We say that
431// pages are linked in the chunk-order if and only if every two consecutive
432// pages from the same chunk are consecutive in the linked list.
433//
434
Steve Blocka7e24c12009-10-30 11:49:00 +0000435
436class MemoryAllocator : public AllStatic {
437 public:
438 // Initializes its internal bookkeeping structures.
439 // Max capacity of the total space.
440 static bool Setup(int max_capacity);
441
442 // Deletes valid chunks.
443 static void TearDown();
444
445 // Reserves an initial address range of virtual memory to be split between
446 // the two new space semispaces, the old space, and the map space. The
447 // memory is not yet committed or assigned to spaces and split into pages.
448 // The initial chunk is unmapped when the memory allocator is torn down.
449 // This function should only be called when there is not already a reserved
450 // initial chunk (initial_chunk_ should be NULL). It returns the start
451 // address of the initial chunk if successful, with the side effect of
452 // setting the initial chunk, or else NULL if unsuccessful and leaves the
453 // initial chunk NULL.
454 static void* ReserveInitialChunk(const size_t requested);
455
456 // Commits pages from an as-yet-unmanaged block of virtual memory into a
457 // paged space. The block should be part of the initial chunk reserved via
458 // a call to ReserveInitialChunk. The number of pages is always returned in
459 // the output parameter num_pages. This function assumes that the start
460 // address is non-null and that it is big enough to hold at least one
461 // page-aligned page. The call always succeeds, and num_pages is always
462 // greater than zero.
463 static Page* CommitPages(Address start, size_t size, PagedSpace* owner,
464 int* num_pages);
465
466 // Commit a contiguous block of memory from the initial chunk. Assumes that
467 // the address is not NULL, the size is greater than zero, and that the
468 // block is contained in the initial chunk. Returns true if it succeeded
469 // and false otherwise.
470 static bool CommitBlock(Address start, size_t size, Executability executable);
471
Steve Blocka7e24c12009-10-30 11:49:00 +0000472 // Uncommit a contiguous block of memory [start..(start+size)[.
473 // start is not NULL, the size is greater than zero, and the
474 // block is contained in the initial chunk. Returns true if it succeeded
475 // and false otherwise.
476 static bool UncommitBlock(Address start, size_t size);
477
Leon Clarke4515c472010-02-03 11:58:03 +0000478 // Zaps a contiguous block of memory [start..(start+size)[ thus
479 // filling it up with a recognizable non-NULL bit pattern.
480 static void ZapBlock(Address start, size_t size);
481
Steve Blocka7e24c12009-10-30 11:49:00 +0000482 // Attempts to allocate the requested (non-zero) number of pages from the
483 // OS. Fewer pages might be allocated than requested. If it fails to
484 // allocate memory for the OS or cannot allocate a single page, this
485 // function returns an invalid page pointer (NULL). The caller must check
486 // whether the returned page is valid (by calling Page::is_valid()). It is
487 // guaranteed that allocated pages have contiguous addresses. The actual
488 // number of allocated pages is returned in the output parameter
489 // allocated_pages. If the PagedSpace owner is executable and there is
490 // a code range, the pages are allocated from the code range.
491 static Page* AllocatePages(int requested_pages, int* allocated_pages,
492 PagedSpace* owner);
493
Steve Block6ded16b2010-05-10 14:33:55 +0100494 // Frees pages from a given page and after. Requires pages to be
495 // linked in chunk-order (see comment for class).
496 // If 'p' is the first page of a chunk, pages from 'p' are freed
497 // and this function returns an invalid page pointer.
498 // Otherwise, the function searches a page after 'p' that is
499 // the first page of a chunk. Pages after the found page
500 // are freed and the function returns 'p'.
Steve Blocka7e24c12009-10-30 11:49:00 +0000501 static Page* FreePages(Page* p);
502
Steve Block6ded16b2010-05-10 14:33:55 +0100503 // Frees all pages owned by given space.
504 static void FreeAllPages(PagedSpace* space);
505
Steve Blocka7e24c12009-10-30 11:49:00 +0000506 // Allocates and frees raw memory of certain size.
507 // These are just thin wrappers around OS::Allocate and OS::Free,
508 // but keep track of allocated bytes as part of heap.
509 // If the flag is EXECUTABLE and a code range exists, the requested
510 // memory is allocated from the code range. If a code range exists
511 // and the freed memory is in it, the code range manages the freed memory.
512 static void* AllocateRawMemory(const size_t requested,
513 size_t* allocated,
514 Executability executable);
515 static void FreeRawMemory(void* buf, size_t length);
516
517 // Returns the maximum available bytes of heaps.
518 static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
519
520 // Returns allocated spaces in bytes.
521 static int Size() { return size_; }
522
523 // Returns maximum available bytes that the old space can have.
524 static int MaxAvailable() {
525 return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
526 }
527
528 // Links two pages.
529 static inline void SetNextPage(Page* prev, Page* next);
530
531 // Returns the next page of a given page.
532 static inline Page* GetNextPage(Page* p);
533
534 // Checks whether a page belongs to a space.
535 static inline bool IsPageInSpace(Page* p, PagedSpace* space);
536
537 // Returns the space that owns the given page.
538 static inline PagedSpace* PageOwner(Page* page);
539
540 // Finds the first/last page in the same chunk as a given page.
541 static Page* FindFirstPageInSameChunk(Page* p);
542 static Page* FindLastPageInSameChunk(Page* p);
543
Steve Block6ded16b2010-05-10 14:33:55 +0100544 // Relinks list of pages owned by space to make it chunk-ordered.
545 // Returns new first and last pages of space.
546 // Also returns last page in relinked list which has WasInUsedBeforeMC
547 // flag set.
548 static void RelinkPageListInChunkOrder(PagedSpace* space,
549 Page** first_page,
550 Page** last_page,
551 Page** last_page_in_use);
552
Steve Blocka7e24c12009-10-30 11:49:00 +0000553#ifdef ENABLE_HEAP_PROTECTION
554 // Protect/unprotect a block of memory by marking it read-only/writable.
555 static inline void Protect(Address start, size_t size);
556 static inline void Unprotect(Address start, size_t size,
557 Executability executable);
558
559 // Protect/unprotect a chunk given a page in the chunk.
560 static inline void ProtectChunkFromPage(Page* page);
561 static inline void UnprotectChunkFromPage(Page* page);
562#endif
563
564#ifdef DEBUG
565 // Reports statistic info of the space.
566 static void ReportStatistics();
567#endif
568
569 // Due to encoding limitation, we can only have 8K chunks.
Leon Clarkee46be812010-01-19 14:06:41 +0000570 static const int kMaxNofChunks = 1 << kPageSizeBits;
Steve Blocka7e24c12009-10-30 11:49:00 +0000571 // If a chunk has at least 16 pages, the maximum heap size is about
572 // 8K * 8K * 16 = 1G bytes.
573#ifdef V8_TARGET_ARCH_X64
574 static const int kPagesPerChunk = 32;
575#else
576 static const int kPagesPerChunk = 16;
577#endif
578 static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
579
580 private:
581 // Maximum space size in bytes.
582 static int capacity_;
583
584 // Allocated space size in bytes.
585 static int size_;
586
587 // The initial chunk of virtual memory.
588 static VirtualMemory* initial_chunk_;
589
590 // Allocated chunk info: chunk start address, chunk size, and owning space.
591 class ChunkInfo BASE_EMBEDDED {
592 public:
593 ChunkInfo() : address_(NULL), size_(0), owner_(NULL) {}
594 void init(Address a, size_t s, PagedSpace* o) {
595 address_ = a;
596 size_ = s;
597 owner_ = o;
598 }
599 Address address() { return address_; }
600 size_t size() { return size_; }
601 PagedSpace* owner() { return owner_; }
602
603 private:
604 Address address_;
605 size_t size_;
606 PagedSpace* owner_;
607 };
608
609 // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
610 static List<ChunkInfo> chunks_;
611 static List<int> free_chunk_ids_;
612 static int max_nof_chunks_;
613 static int top_;
614
615 // Push/pop a free chunk id onto/from the stack.
616 static void Push(int free_chunk_id);
617 static int Pop();
618 static bool OutOfChunkIds() { return top_ == 0; }
619
620 // Frees a chunk.
621 static void DeleteChunk(int chunk_id);
622
623 // Basic check whether a chunk id is in the valid range.
624 static inline bool IsValidChunkId(int chunk_id);
625
626 // Checks whether a chunk id identifies an allocated chunk.
627 static inline bool IsValidChunk(int chunk_id);
628
629 // Returns the chunk id that a page belongs to.
630 static inline int GetChunkId(Page* p);
631
632 // True if the address lies in the initial chunk.
633 static inline bool InInitialChunk(Address address);
634
635 // Initializes pages in a chunk. Returns the first page address.
636 // This function and GetChunkId() are provided for the mark-compact
637 // collector to rebuild page headers in the from space, which is
638 // used as a marking stack and its page headers are destroyed.
639 static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
640 PagedSpace* owner);
Steve Block6ded16b2010-05-10 14:33:55 +0100641
642 static Page* RelinkPagesInChunk(int chunk_id,
643 Address chunk_start,
644 size_t chunk_size,
645 Page* prev,
646 Page** last_page_in_use);
Steve Blocka7e24c12009-10-30 11:49:00 +0000647};
648
649
650// -----------------------------------------------------------------------------
651// Interface for heap object iterator to be implemented by all object space
652// object iterators.
653//
Leon Clarked91b9f72010-01-27 17:25:45 +0000654// NOTE: The space specific object iterators also implements the own next()
655// method which is used to avoid using virtual functions
Steve Blocka7e24c12009-10-30 11:49:00 +0000656// iterating a specific space.
657
658class ObjectIterator : public Malloced {
659 public:
660 virtual ~ObjectIterator() { }
661
Steve Blocka7e24c12009-10-30 11:49:00 +0000662 virtual HeapObject* next_object() = 0;
663};
664
665
666// -----------------------------------------------------------------------------
667// Heap object iterator in new/old/map spaces.
668//
669// A HeapObjectIterator iterates objects from a given address to the
670// top of a space. The given address must be below the current
671// allocation pointer (space top). There are some caveats.
672//
673// (1) If the space top changes upward during iteration (because of
674// allocating new objects), the iterator does not iterate objects
675// above the original space top. The caller must create a new
676// iterator starting from the old top in order to visit these new
677// objects.
678//
679// (2) If new objects are allocated below the original allocation top
680// (e.g., free-list allocation in paged spaces), the new objects
681// may or may not be iterated depending on their position with
682// respect to the current point of iteration.
683//
684// (3) The space top should not change downward during iteration,
685// otherwise the iterator will return not-necessarily-valid
686// objects.
687
688class HeapObjectIterator: public ObjectIterator {
689 public:
690 // Creates a new object iterator in a given space. If a start
691 // address is not given, the iterator starts from the space bottom.
692 // If the size function is not given, the iterator calls the default
693 // Object::Size().
694 explicit HeapObjectIterator(PagedSpace* space);
695 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
696 HeapObjectIterator(PagedSpace* space, Address start);
697 HeapObjectIterator(PagedSpace* space,
698 Address start,
699 HeapObjectCallback size_func);
700
Leon Clarked91b9f72010-01-27 17:25:45 +0000701 inline HeapObject* next() {
702 return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
703 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000704
705 // implementation of ObjectIterator.
Steve Blocka7e24c12009-10-30 11:49:00 +0000706 virtual HeapObject* next_object() { return next(); }
707
708 private:
709 Address cur_addr_; // current iteration point
710 Address end_addr_; // end iteration point
711 Address cur_limit_; // current page limit
712 HeapObjectCallback size_func_; // size function
713 Page* end_page_; // caches the page of the end address
714
Leon Clarked91b9f72010-01-27 17:25:45 +0000715 HeapObject* FromCurrentPage() {
716 ASSERT(cur_addr_ < cur_limit_);
717
718 HeapObject* obj = HeapObject::FromAddress(cur_addr_);
719 int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
720 ASSERT_OBJECT_SIZE(obj_size);
721
722 cur_addr_ += obj_size;
723 ASSERT(cur_addr_ <= cur_limit_);
724
725 return obj;
726 }
727
728 // Slow path of next, goes into the next page.
729 HeapObject* FromNextPage();
Steve Blocka7e24c12009-10-30 11:49:00 +0000730
731 // Initializes fields.
732 void Initialize(Address start, Address end, HeapObjectCallback size_func);
733
734#ifdef DEBUG
735 // Verifies whether fields have valid values.
736 void Verify();
737#endif
738};
739
740
741// -----------------------------------------------------------------------------
742// A PageIterator iterates the pages in a paged space.
743//
744// The PageIterator class provides three modes for iterating pages in a space:
745// PAGES_IN_USE iterates pages containing allocated objects.
746// PAGES_USED_BY_MC iterates pages that hold relocated objects during a
747// mark-compact collection.
748// ALL_PAGES iterates all pages in the space.
749//
750// There are some caveats.
751//
752// (1) If the space expands during iteration, new pages will not be
753// returned by the iterator in any mode.
754//
755// (2) If new objects are allocated during iteration, they will appear
756// in pages returned by the iterator. Allocation may cause the
757// allocation pointer or MC allocation pointer in the last page to
758// change between constructing the iterator and iterating the last
759// page.
760//
761// (3) The space should not shrink during iteration, otherwise the
762// iterator will return deallocated pages.
763
764class PageIterator BASE_EMBEDDED {
765 public:
766 enum Mode {
767 PAGES_IN_USE,
768 PAGES_USED_BY_MC,
769 ALL_PAGES
770 };
771
772 PageIterator(PagedSpace* space, Mode mode);
773
774 inline bool has_next();
775 inline Page* next();
776
777 private:
778 PagedSpace* space_;
779 Page* prev_page_; // Previous page returned.
780 Page* stop_page_; // Page to stop at (last page returned by the iterator).
781};
782
783
784// -----------------------------------------------------------------------------
785// A space has a list of pages. The next page can be accessed via
786// Page::next_page() call. The next page of the last page is an
787// invalid page pointer. A space can expand and shrink dynamically.
788
789// An abstraction of allocation and relocation pointers in a page-structured
790// space.
791class AllocationInfo {
792 public:
793 Address top; // current allocation top
794 Address limit; // current allocation limit
795
796#ifdef DEBUG
797 bool VerifyPagedAllocation() {
798 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
799 && (top <= limit);
800 }
801#endif
802};
803
804
805// An abstraction of the accounting statistics of a page-structured space.
806// The 'capacity' of a space is the number of object-area bytes (ie, not
807// including page bookkeeping structures) currently in the space. The 'size'
808// of a space is the number of allocated bytes, the 'waste' in the space is
809// the number of bytes that are not allocated and not available to
810// allocation without reorganizing the space via a GC (eg, small blocks due
811// to internal fragmentation, top of page areas in map space), and the bytes
812// 'available' is the number of unallocated bytes that are not waste. The
813// capacity is the sum of size, waste, and available.
814//
815// The stats are only set by functions that ensure they stay balanced. These
816// functions increase or decrease one of the non-capacity stats in
817// conjunction with capacity, or else they always balance increases and
818// decreases to the non-capacity stats.
819class AllocationStats BASE_EMBEDDED {
820 public:
821 AllocationStats() { Clear(); }
822
823 // Zero out all the allocation statistics (ie, no capacity).
824 void Clear() {
825 capacity_ = 0;
826 available_ = 0;
827 size_ = 0;
828 waste_ = 0;
829 }
830
831 // Reset the allocation statistics (ie, available = capacity with no
832 // wasted or allocated bytes).
833 void Reset() {
834 available_ = capacity_;
835 size_ = 0;
836 waste_ = 0;
837 }
838
839 // Accessors for the allocation statistics.
840 int Capacity() { return capacity_; }
841 int Available() { return available_; }
842 int Size() { return size_; }
843 int Waste() { return waste_; }
844
845 // Grow the space by adding available bytes.
846 void ExpandSpace(int size_in_bytes) {
847 capacity_ += size_in_bytes;
848 available_ += size_in_bytes;
849 }
850
851 // Shrink the space by removing available bytes.
852 void ShrinkSpace(int size_in_bytes) {
853 capacity_ -= size_in_bytes;
854 available_ -= size_in_bytes;
855 }
856
857 // Allocate from available bytes (available -> size).
858 void AllocateBytes(int size_in_bytes) {
859 available_ -= size_in_bytes;
860 size_ += size_in_bytes;
861 }
862
863 // Free allocated bytes, making them available (size -> available).
864 void DeallocateBytes(int size_in_bytes) {
865 size_ -= size_in_bytes;
866 available_ += size_in_bytes;
867 }
868
869 // Waste free bytes (available -> waste).
870 void WasteBytes(int size_in_bytes) {
871 available_ -= size_in_bytes;
872 waste_ += size_in_bytes;
873 }
874
875 // Consider the wasted bytes to be allocated, as they contain filler
876 // objects (waste -> size).
877 void FillWastedBytes(int size_in_bytes) {
878 waste_ -= size_in_bytes;
879 size_ += size_in_bytes;
880 }
881
882 private:
883 int capacity_;
884 int available_;
885 int size_;
886 int waste_;
887};
888
889
890class PagedSpace : public Space {
891 public:
892 // Creates a space with a maximum capacity, and an id.
893 PagedSpace(int max_capacity, AllocationSpace id, Executability executable);
894
895 virtual ~PagedSpace() {}
896
897 // Set up the space using the given address range of virtual memory (from
898 // the memory allocator's initial chunk) if possible. If the block of
899 // addresses is not big enough to contain a single page-aligned page, a
900 // fresh chunk will be allocated.
901 bool Setup(Address start, size_t size);
902
903 // Returns true if the space has been successfully set up and not
904 // subsequently torn down.
905 bool HasBeenSetup();
906
907 // Cleans up the space, frees all pages in this space except those belonging
908 // to the initial chunk, uncommits addresses in the initial chunk.
909 void TearDown();
910
911 // Checks whether an object/address is in this space.
912 inline bool Contains(Address a);
913 bool Contains(HeapObject* o) { return Contains(o->address()); }
914
915 // Given an address occupied by a live object, return that object if it is
916 // in this space, or Failure::Exception() if it is not. The implementation
917 // iterates over objects in the page containing the address, the cost is
918 // linear in the number of objects in the page. It may be slow.
919 Object* FindObject(Address addr);
920
921 // Checks whether page is currently in use by this space.
922 bool IsUsed(Page* page);
923
924 // Clears remembered sets of pages in this space.
925 void ClearRSet();
926
927 // Prepares for a mark-compact GC.
Steve Block6ded16b2010-05-10 14:33:55 +0100928 virtual void PrepareForMarkCompact(bool will_compact);
Steve Blocka7e24c12009-10-30 11:49:00 +0000929
Steve Block6ded16b2010-05-10 14:33:55 +0100930 // The top of allocation in a page in this space. Undefined if page is unused.
931 Address PageAllocationTop(Page* page) {
932 return page == TopPageOf(allocation_info_) ? top()
933 : PageAllocationLimit(page);
934 }
935
936 // The limit of allocation for a page in this space.
937 virtual Address PageAllocationLimit(Page* page) = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000938
939 // Current capacity without growing (Size() + Available() + Waste()).
940 int Capacity() { return accounting_stats_.Capacity(); }
941
Steve Block3ce2e202009-11-05 08:53:23 +0000942 // Total amount of memory committed for this space. For paged
943 // spaces this equals the capacity.
944 int CommittedMemory() { return Capacity(); }
945
Steve Blocka7e24c12009-10-30 11:49:00 +0000946 // Available bytes without growing.
947 int Available() { return accounting_stats_.Available(); }
948
949 // Allocated bytes in this space.
950 virtual int Size() { return accounting_stats_.Size(); }
951
952 // Wasted bytes due to fragmentation and not recoverable until the
953 // next GC of this space.
954 int Waste() { return accounting_stats_.Waste(); }
955
956 // Returns the address of the first object in this space.
957 Address bottom() { return first_page_->ObjectAreaStart(); }
958
959 // Returns the allocation pointer in this space.
960 Address top() { return allocation_info_.top; }
961
962 // Allocate the requested number of bytes in the space if possible, return a
963 // failure object if not.
964 inline Object* AllocateRaw(int size_in_bytes);
965
966 // Allocate the requested number of bytes for relocation during mark-compact
967 // collection.
968 inline Object* MCAllocateRaw(int size_in_bytes);
969
Leon Clarkee46be812010-01-19 14:06:41 +0000970 virtual bool ReserveSpace(int bytes);
971
972 // Used by ReserveSpace.
973 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000974
Steve Block6ded16b2010-05-10 14:33:55 +0100975 // Free all pages in range from prev (exclusive) to last (inclusive).
976 // Freed pages are moved to the end of page list.
977 void FreePages(Page* prev, Page* last);
978
979 // Set space allocation info.
980 void SetTop(Address top) {
981 allocation_info_.top = top;
982 allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top));
983 }
984
Steve Blocka7e24c12009-10-30 11:49:00 +0000985 // ---------------------------------------------------------------------------
986 // Mark-compact collection support functions
987
988 // Set the relocation point to the beginning of the space.
989 void MCResetRelocationInfo();
990
991 // Writes relocation info to the top page.
992 void MCWriteRelocationInfoToPage() {
993 TopPageOf(mc_forwarding_info_)->mc_relocation_top = mc_forwarding_info_.top;
994 }
995
996 // Computes the offset of a given address in this space to the beginning
997 // of the space.
998 int MCSpaceOffsetForAddress(Address addr);
999
1000 // Updates the allocation pointer to the relocation top after a mark-compact
1001 // collection.
1002 virtual void MCCommitRelocationInfo() = 0;
1003
1004 // Releases half of unused pages.
1005 void Shrink();
1006
1007 // Ensures that the capacity is at least 'capacity'. Returns false on failure.
1008 bool EnsureCapacity(int capacity);
1009
1010#ifdef ENABLE_HEAP_PROTECTION
1011 // Protect/unprotect the space by marking it read-only/writable.
1012 void Protect();
1013 void Unprotect();
1014#endif
1015
1016#ifdef DEBUG
1017 // Print meta info and objects in this space.
1018 virtual void Print();
1019
1020 // Verify integrity of this space.
1021 virtual void Verify(ObjectVisitor* visitor);
1022
1023 // Overridden by subclasses to verify space-specific object
1024 // properties (e.g., only maps or free-list nodes are in map space).
1025 virtual void VerifyObject(HeapObject* obj) {}
1026
1027 // Report code object related statistics
1028 void CollectCodeStatistics();
1029 static void ReportCodeStatistics();
1030 static void ResetCodeStatistics();
1031#endif
1032
Steve Block6ded16b2010-05-10 14:33:55 +01001033 // Returns the page of the allocation pointer.
1034 Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
1035
Steve Blocka7e24c12009-10-30 11:49:00 +00001036 protected:
1037 // Maximum capacity of this space.
1038 int max_capacity_;
1039
1040 // Accounting information for this space.
1041 AllocationStats accounting_stats_;
1042
1043 // The first page in this space.
1044 Page* first_page_;
1045
1046 // The last page in this space. Initially set in Setup, updated in
1047 // Expand and Shrink.
1048 Page* last_page_;
1049
Steve Block6ded16b2010-05-10 14:33:55 +01001050 // True if pages owned by this space are linked in chunk-order.
1051 // See comment for class MemoryAllocator for definition of chunk-order.
1052 bool page_list_is_chunk_ordered_;
1053
Steve Blocka7e24c12009-10-30 11:49:00 +00001054 // Normal allocation information.
1055 AllocationInfo allocation_info_;
1056
1057 // Relocation information during mark-compact collections.
1058 AllocationInfo mc_forwarding_info_;
1059
1060 // Bytes of each page that cannot be allocated. Possibly non-zero
1061 // for pages in spaces with only fixed-size objects. Always zero
1062 // for pages in spaces with variable sized objects (those pages are
1063 // padded with free-list nodes).
1064 int page_extra_;
1065
1066 // Sets allocation pointer to a page bottom.
1067 static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
1068
1069 // Returns the top page specified by an allocation info structure.
1070 static Page* TopPageOf(AllocationInfo alloc_info) {
1071 return Page::FromAllocationTop(alloc_info.limit);
1072 }
1073
Leon Clarked91b9f72010-01-27 17:25:45 +00001074 int CountPagesToTop() {
1075 Page* p = Page::FromAllocationTop(allocation_info_.top);
1076 PageIterator it(this, PageIterator::ALL_PAGES);
1077 int counter = 1;
1078 while (it.has_next()) {
1079 if (it.next() == p) return counter;
1080 counter++;
1081 }
1082 UNREACHABLE();
1083 return -1;
1084 }
1085
Steve Blocka7e24c12009-10-30 11:49:00 +00001086 // Expands the space by allocating a fixed number of pages. Returns false if
1087 // it cannot allocate requested number of pages from OS. Newly allocated
1088 // pages are append to the last_page;
1089 bool Expand(Page* last_page);
1090
1091 // Generic fast case allocation function that tries linear allocation in
1092 // the top page of 'alloc_info'. Returns NULL on failure.
1093 inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
1094 int size_in_bytes);
1095
1096 // During normal allocation or deserialization, roll to the next page in
1097 // the space (there is assumed to be one) and allocate there. This
1098 // function is space-dependent.
1099 virtual HeapObject* AllocateInNextPage(Page* current_page,
1100 int size_in_bytes) = 0;
1101
1102 // Slow path of AllocateRaw. This function is space-dependent.
1103 virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
1104
1105 // Slow path of MCAllocateRaw.
1106 HeapObject* SlowMCAllocateRaw(int size_in_bytes);
1107
1108#ifdef DEBUG
Leon Clarkee46be812010-01-19 14:06:41 +00001109 // Returns the number of total pages in this space.
1110 int CountTotalPages();
1111
Steve Blocka7e24c12009-10-30 11:49:00 +00001112 void DoPrintRSet(const char* space_name);
1113#endif
1114 private:
Steve Blocka7e24c12009-10-30 11:49:00 +00001115
1116 // Returns a pointer to the page of the relocation pointer.
1117 Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
1118
Steve Blocka7e24c12009-10-30 11:49:00 +00001119 friend class PageIterator;
1120};
1121
1122
1123#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1124class NumberAndSizeInfo BASE_EMBEDDED {
1125 public:
1126 NumberAndSizeInfo() : number_(0), bytes_(0) {}
1127
1128 int number() const { return number_; }
1129 void increment_number(int num) { number_ += num; }
1130
1131 int bytes() const { return bytes_; }
1132 void increment_bytes(int size) { bytes_ += size; }
1133
1134 void clear() {
1135 number_ = 0;
1136 bytes_ = 0;
1137 }
1138
1139 private:
1140 int number_;
1141 int bytes_;
1142};
1143
1144
1145// HistogramInfo class for recording a single "bar" of a histogram. This
1146// class is used for collecting statistics to print to stdout (when compiled
1147// with DEBUG) or to the log file (when compiled with
1148// ENABLE_LOGGING_AND_PROFILING).
1149class HistogramInfo: public NumberAndSizeInfo {
1150 public:
1151 HistogramInfo() : NumberAndSizeInfo() {}
1152
1153 const char* name() { return name_; }
1154 void set_name(const char* name) { name_ = name; }
1155
1156 private:
1157 const char* name_;
1158};
1159#endif
1160
1161
1162// -----------------------------------------------------------------------------
1163// SemiSpace in young generation
1164//
1165// A semispace is a contiguous chunk of memory. The mark-compact collector
1166// uses the memory in the from space as a marking stack when tracing live
1167// objects.
1168
1169class SemiSpace : public Space {
1170 public:
1171 // Constructor.
1172 SemiSpace() :Space(NEW_SPACE, NOT_EXECUTABLE) {
1173 start_ = NULL;
1174 age_mark_ = NULL;
1175 }
1176
1177 // Sets up the semispace using the given chunk.
1178 bool Setup(Address start, int initial_capacity, int maximum_capacity);
1179
1180 // Tear down the space. Heap memory was not allocated by the space, so it
1181 // is not deallocated here.
1182 void TearDown();
1183
1184 // True if the space has been set up but not torn down.
1185 bool HasBeenSetup() { return start_ != NULL; }
1186
1187 // Grow the size of the semispace by committing extra virtual memory.
1188 // Assumes that the caller has checked that the semispace has not reached
1189 // its maximum capacity (and thus there is space available in the reserved
1190 // address range to grow).
1191 bool Grow();
1192
1193 // Grow the semispace to the new capacity. The new capacity
1194 // requested must be larger than the current capacity.
1195 bool GrowTo(int new_capacity);
1196
1197 // Shrinks the semispace to the new capacity. The new capacity
1198 // requested must be more than the amount of used memory in the
1199 // semispace and less than the current capacity.
1200 bool ShrinkTo(int new_capacity);
1201
1202 // Returns the start address of the space.
1203 Address low() { return start_; }
1204 // Returns one past the end address of the space.
1205 Address high() { return low() + capacity_; }
1206
1207 // Age mark accessors.
1208 Address age_mark() { return age_mark_; }
1209 void set_age_mark(Address mark) { age_mark_ = mark; }
1210
1211 // True if the address is in the address range of this semispace (not
1212 // necessarily below the allocation pointer).
1213 bool Contains(Address a) {
1214 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1215 == reinterpret_cast<uintptr_t>(start_);
1216 }
1217
1218 // True if the object is a heap object in the address range of this
1219 // semispace (not necessarily below the allocation pointer).
1220 bool Contains(Object* o) {
1221 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1222 }
1223
1224 // The offset of an address from the beginning of the space.
Steve Blockd0582a62009-12-15 09:54:21 +00001225 int SpaceOffsetForAddress(Address addr) {
1226 return static_cast<int>(addr - low());
1227 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001228
Leon Clarkee46be812010-01-19 14:06:41 +00001229 // If we don't have these here then SemiSpace will be abstract. However
1230 // they should never be called.
Steve Blocka7e24c12009-10-30 11:49:00 +00001231 virtual int Size() {
1232 UNREACHABLE();
1233 return 0;
1234 }
1235
Leon Clarkee46be812010-01-19 14:06:41 +00001236 virtual bool ReserveSpace(int bytes) {
1237 UNREACHABLE();
1238 return false;
1239 }
1240
Steve Blocka7e24c12009-10-30 11:49:00 +00001241 bool is_committed() { return committed_; }
1242 bool Commit();
1243 bool Uncommit();
1244
Steve Block6ded16b2010-05-10 14:33:55 +01001245#ifdef ENABLE_HEAP_PROTECTION
1246 // Protect/unprotect the space by marking it read-only/writable.
1247 virtual void Protect() {}
1248 virtual void Unprotect() {}
1249#endif
1250
Steve Blocka7e24c12009-10-30 11:49:00 +00001251#ifdef DEBUG
1252 virtual void Print();
1253 virtual void Verify();
1254#endif
1255
1256 // Returns the current capacity of the semi space.
1257 int Capacity() { return capacity_; }
1258
1259 // Returns the maximum capacity of the semi space.
1260 int MaximumCapacity() { return maximum_capacity_; }
1261
1262 // Returns the initial capacity of the semi space.
1263 int InitialCapacity() { return initial_capacity_; }
1264
1265 private:
1266 // The current and maximum capacity of the space.
1267 int capacity_;
1268 int maximum_capacity_;
1269 int initial_capacity_;
1270
1271 // The start address of the space.
1272 Address start_;
1273 // Used to govern object promotion during mark-compact collection.
1274 Address age_mark_;
1275
1276 // Masks and comparison values to test for containment in this semispace.
1277 uintptr_t address_mask_;
1278 uintptr_t object_mask_;
1279 uintptr_t object_expected_;
1280
1281 bool committed_;
1282
1283 public:
1284 TRACK_MEMORY("SemiSpace")
1285};
1286
1287
1288// A SemiSpaceIterator is an ObjectIterator that iterates over the active
1289// semispace of the heap's new space. It iterates over the objects in the
1290// semispace from a given start address (defaulting to the bottom of the
1291// semispace) to the top of the semispace. New objects allocated after the
1292// iterator is created are not iterated.
1293class SemiSpaceIterator : public ObjectIterator {
1294 public:
1295 // Create an iterator over the objects in the given space. If no start
1296 // address is given, the iterator starts from the bottom of the space. If
1297 // no size function is given, the iterator calls Object::Size().
1298 explicit SemiSpaceIterator(NewSpace* space);
1299 SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
1300 SemiSpaceIterator(NewSpace* space, Address start);
1301
Steve Blocka7e24c12009-10-30 11:49:00 +00001302 HeapObject* next() {
Leon Clarked91b9f72010-01-27 17:25:45 +00001303 if (current_ == limit_) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00001304
1305 HeapObject* object = HeapObject::FromAddress(current_);
1306 int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
1307
1308 current_ += size;
1309 return object;
1310 }
1311
1312 // Implementation of the ObjectIterator functions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001313 virtual HeapObject* next_object() { return next(); }
1314
1315 private:
1316 void Initialize(NewSpace* space, Address start, Address end,
1317 HeapObjectCallback size_func);
1318
1319 // The semispace.
1320 SemiSpace* space_;
1321 // The current iteration point.
1322 Address current_;
1323 // The end of iteration.
1324 Address limit_;
1325 // The callback function.
1326 HeapObjectCallback size_func_;
1327};
1328
1329
1330// -----------------------------------------------------------------------------
1331// The young generation space.
1332//
1333// The new space consists of a contiguous pair of semispaces. It simply
1334// forwards most functions to the appropriate semispace.
1335
1336class NewSpace : public Space {
1337 public:
1338 // Constructor.
1339 NewSpace() : Space(NEW_SPACE, NOT_EXECUTABLE) {}
1340
1341 // Sets up the new space using the given chunk.
1342 bool Setup(Address start, int size);
1343
1344 // Tears down the space. Heap memory was not allocated by the space, so it
1345 // is not deallocated here.
1346 void TearDown();
1347
1348 // True if the space has been set up but not torn down.
1349 bool HasBeenSetup() {
1350 return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
1351 }
1352
1353 // Flip the pair of spaces.
1354 void Flip();
1355
1356 // Grow the capacity of the semispaces. Assumes that they are not at
1357 // their maximum capacity.
1358 void Grow();
1359
1360 // Shrink the capacity of the semispaces.
1361 void Shrink();
1362
1363 // True if the address or object lies in the address range of either
1364 // semispace (not necessarily below the allocation pointer).
1365 bool Contains(Address a) {
1366 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1367 == reinterpret_cast<uintptr_t>(start_);
1368 }
1369 bool Contains(Object* o) {
1370 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1371 }
1372
1373 // Return the allocated bytes in the active semispace.
Steve Blockd0582a62009-12-15 09:54:21 +00001374 virtual int Size() { return static_cast<int>(top() - bottom()); }
Steve Block3ce2e202009-11-05 08:53:23 +00001375
Steve Blocka7e24c12009-10-30 11:49:00 +00001376 // Return the current capacity of a semispace.
1377 int Capacity() {
1378 ASSERT(to_space_.Capacity() == from_space_.Capacity());
1379 return to_space_.Capacity();
1380 }
Steve Block3ce2e202009-11-05 08:53:23 +00001381
1382 // Return the total amount of memory committed for new space.
1383 int CommittedMemory() {
1384 if (from_space_.is_committed()) return 2 * Capacity();
1385 return Capacity();
1386 }
1387
Steve Blocka7e24c12009-10-30 11:49:00 +00001388 // Return the available bytes without growing in the active semispace.
1389 int Available() { return Capacity() - Size(); }
1390
1391 // Return the maximum capacity of a semispace.
1392 int MaximumCapacity() {
1393 ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
1394 return to_space_.MaximumCapacity();
1395 }
1396
1397 // Returns the initial capacity of a semispace.
1398 int InitialCapacity() {
1399 ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
1400 return to_space_.InitialCapacity();
1401 }
1402
1403 // Return the address of the allocation pointer in the active semispace.
1404 Address top() { return allocation_info_.top; }
1405 // Return the address of the first object in the active semispace.
1406 Address bottom() { return to_space_.low(); }
1407
1408 // Get the age mark of the inactive semispace.
1409 Address age_mark() { return from_space_.age_mark(); }
1410 // Set the age mark in the active semispace.
1411 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
1412
1413 // The start address of the space and a bit mask. Anding an address in the
1414 // new space with the mask will result in the start address.
1415 Address start() { return start_; }
1416 uintptr_t mask() { return address_mask_; }
1417
1418 // The allocation top and limit addresses.
1419 Address* allocation_top_address() { return &allocation_info_.top; }
1420 Address* allocation_limit_address() { return &allocation_info_.limit; }
1421
1422 Object* AllocateRaw(int size_in_bytes) {
1423 return AllocateRawInternal(size_in_bytes, &allocation_info_);
1424 }
1425
1426 // Allocate the requested number of bytes for relocation during mark-compact
1427 // collection.
1428 Object* MCAllocateRaw(int size_in_bytes) {
1429 return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
1430 }
1431
1432 // Reset the allocation pointer to the beginning of the active semispace.
1433 void ResetAllocationInfo();
1434 // Reset the reloction pointer to the bottom of the inactive semispace in
1435 // preparation for mark-compact collection.
1436 void MCResetRelocationInfo();
1437 // Update the allocation pointer in the active semispace after a
1438 // mark-compact collection.
1439 void MCCommitRelocationInfo();
1440
1441 // Get the extent of the inactive semispace (for use as a marking stack).
1442 Address FromSpaceLow() { return from_space_.low(); }
1443 Address FromSpaceHigh() { return from_space_.high(); }
1444
1445 // Get the extent of the active semispace (to sweep newly copied objects
1446 // during a scavenge collection).
1447 Address ToSpaceLow() { return to_space_.low(); }
1448 Address ToSpaceHigh() { return to_space_.high(); }
1449
1450 // Offsets from the beginning of the semispaces.
1451 int ToSpaceOffsetForAddress(Address a) {
1452 return to_space_.SpaceOffsetForAddress(a);
1453 }
1454 int FromSpaceOffsetForAddress(Address a) {
1455 return from_space_.SpaceOffsetForAddress(a);
1456 }
1457
1458 // True if the object is a heap object in the address range of the
1459 // respective semispace (not necessarily below the allocation pointer of the
1460 // semispace).
1461 bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
1462 bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
1463
1464 bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
1465 bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
1466
Leon Clarkee46be812010-01-19 14:06:41 +00001467 virtual bool ReserveSpace(int bytes);
1468
Steve Blocka7e24c12009-10-30 11:49:00 +00001469#ifdef ENABLE_HEAP_PROTECTION
1470 // Protect/unprotect the space by marking it read-only/writable.
1471 virtual void Protect();
1472 virtual void Unprotect();
1473#endif
1474
1475#ifdef DEBUG
1476 // Verify the active semispace.
1477 virtual void Verify();
1478 // Print the active semispace.
1479 virtual void Print() { to_space_.Print(); }
1480#endif
1481
1482#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1483 // Iterates the active semispace to collect statistics.
1484 void CollectStatistics();
1485 // Reports previously collected statistics of the active semispace.
1486 void ReportStatistics();
1487 // Clears previously collected statistics.
1488 void ClearHistograms();
1489
1490 // Record the allocation or promotion of a heap object. Note that we don't
1491 // record every single allocation, but only those that happen in the
1492 // to space during a scavenge GC.
1493 void RecordAllocation(HeapObject* obj);
1494 void RecordPromotion(HeapObject* obj);
1495#endif
1496
1497 // Return whether the operation succeded.
1498 bool CommitFromSpaceIfNeeded() {
1499 if (from_space_.is_committed()) return true;
1500 return from_space_.Commit();
1501 }
1502
1503 bool UncommitFromSpace() {
1504 if (!from_space_.is_committed()) return true;
1505 return from_space_.Uncommit();
1506 }
1507
1508 private:
1509 // The semispaces.
1510 SemiSpace to_space_;
1511 SemiSpace from_space_;
1512
1513 // Start address and bit mask for containment testing.
1514 Address start_;
1515 uintptr_t address_mask_;
1516 uintptr_t object_mask_;
1517 uintptr_t object_expected_;
1518
1519 // Allocation pointer and limit for normal allocation and allocation during
1520 // mark-compact collection.
1521 AllocationInfo allocation_info_;
1522 AllocationInfo mc_forwarding_info_;
1523
1524#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1525 HistogramInfo* allocated_histogram_;
1526 HistogramInfo* promoted_histogram_;
1527#endif
1528
1529 // Implementation of AllocateRaw and MCAllocateRaw.
1530 inline Object* AllocateRawInternal(int size_in_bytes,
1531 AllocationInfo* alloc_info);
1532
1533 friend class SemiSpaceIterator;
1534
1535 public:
1536 TRACK_MEMORY("NewSpace")
1537};
1538
1539
1540// -----------------------------------------------------------------------------
1541// Free lists for old object spaces
1542//
1543// Free-list nodes are free blocks in the heap. They look like heap objects
1544// (free-list node pointers have the heap object tag, and they have a map like
1545// a heap object). They have a size and a next pointer. The next pointer is
1546// the raw address of the next free list node (or NULL).
1547class FreeListNode: public HeapObject {
1548 public:
1549 // Obtain a free-list node from a raw address. This is not a cast because
1550 // it does not check nor require that the first word at the address is a map
1551 // pointer.
1552 static FreeListNode* FromAddress(Address address) {
1553 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1554 }
1555
Steve Block3ce2e202009-11-05 08:53:23 +00001556 static inline bool IsFreeListNode(HeapObject* object);
1557
Steve Blocka7e24c12009-10-30 11:49:00 +00001558 // Set the size in bytes, which can be read with HeapObject::Size(). This
1559 // function also writes a map to the first word of the block so that it
1560 // looks like a heap object to the garbage collector and heap iteration
1561 // functions.
1562 void set_size(int size_in_bytes);
1563
1564 // Accessors for the next field.
1565 inline Address next();
1566 inline void set_next(Address next);
1567
1568 private:
1569 static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
1570
1571 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1572};
1573
1574
1575// The free list for the old space.
1576class OldSpaceFreeList BASE_EMBEDDED {
1577 public:
1578 explicit OldSpaceFreeList(AllocationSpace owner);
1579
1580 // Clear the free list.
1581 void Reset();
1582
1583 // Return the number of bytes available on the free list.
1584 int available() { return available_; }
1585
1586 // Place a node on the free list. The block of size 'size_in_bytes'
1587 // starting at 'start' is placed on the free list. The return value is the
1588 // number of bytes that have been lost due to internal fragmentation by
1589 // freeing the block. Bookkeeping information will be written to the block,
1590 // ie, its contents will be destroyed. The start address should be word
1591 // aligned, and the size should be a non-zero multiple of the word size.
1592 int Free(Address start, int size_in_bytes);
1593
1594 // Allocate a block of size 'size_in_bytes' from the free list. The block
1595 // is unitialized. A failure is returned if no block is available. The
1596 // number of bytes lost to fragmentation is returned in the output parameter
1597 // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
1598 Object* Allocate(int size_in_bytes, int* wasted_bytes);
1599
1600 private:
1601 // The size range of blocks, in bytes. (Smaller allocations are allowed, but
1602 // will always result in waste.)
1603 static const int kMinBlockSize = 2 * kPointerSize;
1604 static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
1605
1606 // The identity of the owning space, for building allocation Failure
1607 // objects.
1608 AllocationSpace owner_;
1609
1610 // Total available bytes in all blocks on this free list.
1611 int available_;
1612
1613 // Blocks are put on exact free lists in an array, indexed by size in words.
1614 // The available sizes are kept in an increasingly ordered list. Entries
1615 // corresponding to sizes < kMinBlockSize always have an empty free list
1616 // (but index kHead is used for the head of the size list).
1617 struct SizeNode {
1618 // Address of the head FreeListNode of the implied block size or NULL.
1619 Address head_node_;
1620 // Size (words) of the next larger available size if head_node_ != NULL.
1621 int next_size_;
1622 };
1623 static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
1624 SizeNode free_[kFreeListsLength];
1625
1626 // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
1627 static const int kHead = kMinBlockSize / kPointerSize - 1;
1628 static const int kEnd = kMaxInt;
1629
1630 // We keep a "finger" in the size list to speed up a common pattern:
1631 // repeated requests for the same or increasing sizes.
1632 int finger_;
1633
1634 // Starting from *prev, find and return the smallest size >= index (words),
1635 // or kEnd. Update *prev to be the largest size < index, or kHead.
1636 int FindSize(int index, int* prev) {
1637 int cur = free_[*prev].next_size_;
1638 while (cur < index) {
1639 *prev = cur;
1640 cur = free_[cur].next_size_;
1641 }
1642 return cur;
1643 }
1644
1645 // Remove an existing element from the size list.
1646 void RemoveSize(int index) {
1647 int prev = kHead;
1648 int cur = FindSize(index, &prev);
1649 ASSERT(cur == index);
1650 free_[prev].next_size_ = free_[cur].next_size_;
1651 finger_ = prev;
1652 }
1653
1654 // Insert a new element into the size list.
1655 void InsertSize(int index) {
1656 int prev = kHead;
1657 int cur = FindSize(index, &prev);
1658 ASSERT(cur != index);
1659 free_[prev].next_size_ = index;
1660 free_[index].next_size_ = cur;
1661 }
1662
1663 // The size list is not updated during a sequence of calls to Free, but is
1664 // rebuilt before the next allocation.
1665 void RebuildSizeList();
1666 bool needs_rebuild_;
1667
1668#ifdef DEBUG
1669 // Does this free list contain a free block located at the address of 'node'?
1670 bool Contains(FreeListNode* node);
1671#endif
1672
1673 DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
1674};
1675
1676
1677// The free list for the map space.
1678class FixedSizeFreeList BASE_EMBEDDED {
1679 public:
1680 FixedSizeFreeList(AllocationSpace owner, int object_size);
1681
1682 // Clear the free list.
1683 void Reset();
1684
1685 // Return the number of bytes available on the free list.
1686 int available() { return available_; }
1687
1688 // Place a node on the free list. The block starting at 'start' (assumed to
1689 // have size object_size_) is placed on the free list. Bookkeeping
1690 // information will be written to the block, ie, its contents will be
1691 // destroyed. The start address should be word aligned.
1692 void Free(Address start);
1693
1694 // Allocate a fixed sized block from the free list. The block is unitialized.
1695 // A failure is returned if no block is available.
1696 Object* Allocate();
1697
1698 private:
1699 // Available bytes on the free list.
1700 int available_;
1701
1702 // The head of the free list.
1703 Address head_;
1704
1705 // The identity of the owning space, for building allocation Failure
1706 // objects.
1707 AllocationSpace owner_;
1708
1709 // The size of the objects in this space.
1710 int object_size_;
1711
1712 DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
1713};
1714
1715
1716// -----------------------------------------------------------------------------
1717// Old object space (excluding map objects)
1718
1719class OldSpace : public PagedSpace {
1720 public:
1721 // Creates an old space object with a given maximum capacity.
1722 // The constructor does not allocate pages from OS.
1723 explicit OldSpace(int max_capacity,
1724 AllocationSpace id,
1725 Executability executable)
1726 : PagedSpace(max_capacity, id, executable), free_list_(id) {
1727 page_extra_ = 0;
1728 }
1729
1730 // The bytes available on the free list (ie, not above the linear allocation
1731 // pointer).
1732 int AvailableFree() { return free_list_.available(); }
1733
Steve Block6ded16b2010-05-10 14:33:55 +01001734 // The limit of allocation for a page in this space.
1735 virtual Address PageAllocationLimit(Page* page) {
1736 return page->ObjectAreaEnd();
Steve Blocka7e24c12009-10-30 11:49:00 +00001737 }
1738
1739 // Give a block of memory to the space's free list. It might be added to
1740 // the free list or accounted as waste.
Steve Block6ded16b2010-05-10 14:33:55 +01001741 // If add_to_freelist is false then just accounting stats are updated and
1742 // no attempt to add area to free list is made.
1743 void Free(Address start, int size_in_bytes, bool add_to_freelist) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001744 accounting_stats_.DeallocateBytes(size_in_bytes);
Steve Block6ded16b2010-05-10 14:33:55 +01001745
1746 if (add_to_freelist) {
1747 int wasted_bytes = free_list_.Free(start, size_in_bytes);
1748 accounting_stats_.WasteBytes(wasted_bytes);
1749 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001750 }
1751
1752 // Prepare for full garbage collection. Resets the relocation pointer and
1753 // clears the free list.
1754 virtual void PrepareForMarkCompact(bool will_compact);
1755
1756 // Updates the allocation pointer to the relocation top after a mark-compact
1757 // collection.
1758 virtual void MCCommitRelocationInfo();
1759
Leon Clarkee46be812010-01-19 14:06:41 +00001760 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1761
Steve Blocka7e24c12009-10-30 11:49:00 +00001762#ifdef DEBUG
1763 // Reports statistics for the space
1764 void ReportStatistics();
1765 // Dump the remembered sets in the space to stdout.
1766 void PrintRSet();
1767#endif
1768
1769 protected:
1770 // Virtual function in the superclass. Slow path of AllocateRaw.
1771 HeapObject* SlowAllocateRaw(int size_in_bytes);
1772
1773 // Virtual function in the superclass. Allocate linearly at the start of
1774 // the page after current_page (there is assumed to be one).
1775 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1776
1777 private:
1778 // The space's free list.
1779 OldSpaceFreeList free_list_;
1780
1781 public:
1782 TRACK_MEMORY("OldSpace")
1783};
1784
1785
1786// -----------------------------------------------------------------------------
1787// Old space for objects of a fixed size
1788
1789class FixedSpace : public PagedSpace {
1790 public:
1791 FixedSpace(int max_capacity,
1792 AllocationSpace id,
1793 int object_size_in_bytes,
1794 const char* name)
1795 : PagedSpace(max_capacity, id, NOT_EXECUTABLE),
1796 object_size_in_bytes_(object_size_in_bytes),
1797 name_(name),
1798 free_list_(id, object_size_in_bytes) {
1799 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
1800 }
1801
Steve Block6ded16b2010-05-10 14:33:55 +01001802 // The limit of allocation for a page in this space.
1803 virtual Address PageAllocationLimit(Page* page) {
1804 return page->ObjectAreaEnd() - page_extra_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001805 }
1806
1807 int object_size_in_bytes() { return object_size_in_bytes_; }
1808
1809 // Give a fixed sized block of memory to the space's free list.
Steve Block6ded16b2010-05-10 14:33:55 +01001810 // If add_to_freelist is false then just accounting stats are updated and
1811 // no attempt to add area to free list is made.
1812 void Free(Address start, bool add_to_freelist) {
1813 if (add_to_freelist) {
1814 free_list_.Free(start);
1815 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001816 accounting_stats_.DeallocateBytes(object_size_in_bytes_);
1817 }
1818
1819 // Prepares for a mark-compact GC.
1820 virtual void PrepareForMarkCompact(bool will_compact);
1821
1822 // Updates the allocation pointer to the relocation top after a mark-compact
1823 // collection.
1824 virtual void MCCommitRelocationInfo();
1825
Leon Clarkee46be812010-01-19 14:06:41 +00001826 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1827
Steve Blocka7e24c12009-10-30 11:49:00 +00001828#ifdef DEBUG
1829 // Reports statistic info of the space
1830 void ReportStatistics();
1831
1832 // Dump the remembered sets in the space to stdout.
1833 void PrintRSet();
1834#endif
1835
1836 protected:
1837 // Virtual function in the superclass. Slow path of AllocateRaw.
1838 HeapObject* SlowAllocateRaw(int size_in_bytes);
1839
1840 // Virtual function in the superclass. Allocate linearly at the start of
1841 // the page after current_page (there is assumed to be one).
1842 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1843
Leon Clarkee46be812010-01-19 14:06:41 +00001844 void ResetFreeList() {
1845 free_list_.Reset();
1846 }
1847
Steve Blocka7e24c12009-10-30 11:49:00 +00001848 private:
1849 // The size of objects in this space.
1850 int object_size_in_bytes_;
1851
1852 // The name of this space.
1853 const char* name_;
1854
1855 // The space's free list.
1856 FixedSizeFreeList free_list_;
1857};
1858
1859
1860// -----------------------------------------------------------------------------
1861// Old space for all map objects
1862
1863class MapSpace : public FixedSpace {
1864 public:
1865 // Creates a map space object with a maximum capacity.
Leon Clarked91b9f72010-01-27 17:25:45 +00001866 MapSpace(int max_capacity, int max_map_space_pages, AllocationSpace id)
1867 : FixedSpace(max_capacity, id, Map::kSize, "map"),
1868 max_map_space_pages_(max_map_space_pages) {
1869 ASSERT(max_map_space_pages < kMaxMapPageIndex);
1870 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001871
1872 // Prepares for a mark-compact GC.
1873 virtual void PrepareForMarkCompact(bool will_compact);
1874
1875 // Given an index, returns the page address.
1876 Address PageAddress(int page_index) { return page_addresses_[page_index]; }
1877
Leon Clarked91b9f72010-01-27 17:25:45 +00001878 static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
Steve Blocka7e24c12009-10-30 11:49:00 +00001879
Leon Clarkee46be812010-01-19 14:06:41 +00001880 // Are map pointers encodable into map word?
1881 bool MapPointersEncodable() {
1882 if (!FLAG_use_big_map_space) {
Leon Clarked91b9f72010-01-27 17:25:45 +00001883 ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
Leon Clarkee46be812010-01-19 14:06:41 +00001884 return true;
1885 }
Leon Clarked91b9f72010-01-27 17:25:45 +00001886 return CountPagesToTop() <= max_map_space_pages_;
Leon Clarkee46be812010-01-19 14:06:41 +00001887 }
1888
1889 // Should be called after forced sweep to find out if map space needs
1890 // compaction.
1891 bool NeedsCompaction(int live_maps) {
Leon Clarked91b9f72010-01-27 17:25:45 +00001892 return !MapPointersEncodable() && live_maps <= CompactionThreshold();
Leon Clarkee46be812010-01-19 14:06:41 +00001893 }
1894
1895 Address TopAfterCompaction(int live_maps) {
1896 ASSERT(NeedsCompaction(live_maps));
1897
1898 int pages_left = live_maps / kMapsPerPage;
1899 PageIterator it(this, PageIterator::ALL_PAGES);
1900 while (pages_left-- > 0) {
1901 ASSERT(it.has_next());
1902 it.next()->ClearRSet();
1903 }
1904 ASSERT(it.has_next());
1905 Page* top_page = it.next();
1906 top_page->ClearRSet();
1907 ASSERT(top_page->is_valid());
1908
1909 int offset = live_maps % kMapsPerPage * Map::kSize;
1910 Address top = top_page->ObjectAreaStart() + offset;
1911 ASSERT(top < top_page->ObjectAreaEnd());
1912 ASSERT(Contains(top));
1913
1914 return top;
1915 }
1916
1917 void FinishCompaction(Address new_top, int live_maps) {
1918 Page* top_page = Page::FromAddress(new_top);
1919 ASSERT(top_page->is_valid());
1920
1921 SetAllocationInfo(&allocation_info_, top_page);
1922 allocation_info_.top = new_top;
1923
1924 int new_size = live_maps * Map::kSize;
1925 accounting_stats_.DeallocateBytes(accounting_stats_.Size());
1926 accounting_stats_.AllocateBytes(new_size);
1927
1928#ifdef DEBUG
1929 if (FLAG_enable_slow_asserts) {
Leon Clarked91b9f72010-01-27 17:25:45 +00001930 intptr_t actual_size = 0;
Leon Clarkee46be812010-01-19 14:06:41 +00001931 for (Page* p = first_page_; p != top_page; p = p->next_page())
1932 actual_size += kMapsPerPage * Map::kSize;
1933 actual_size += (new_top - top_page->ObjectAreaStart());
1934 ASSERT(accounting_stats_.Size() == actual_size);
1935 }
1936#endif
1937
1938 Shrink();
1939 ResetFreeList();
1940 }
1941
Steve Blocka7e24c12009-10-30 11:49:00 +00001942 protected:
1943#ifdef DEBUG
1944 virtual void VerifyObject(HeapObject* obj);
1945#endif
1946
1947 private:
Leon Clarkee46be812010-01-19 14:06:41 +00001948 static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
1949
1950 // Do map space compaction if there is a page gap.
Leon Clarked91b9f72010-01-27 17:25:45 +00001951 int CompactionThreshold() {
1952 return kMapsPerPage * (max_map_space_pages_ - 1);
1953 }
1954
1955 const int max_map_space_pages_;
Leon Clarkee46be812010-01-19 14:06:41 +00001956
Steve Blocka7e24c12009-10-30 11:49:00 +00001957 // An array of page start address in a map space.
Leon Clarked91b9f72010-01-27 17:25:45 +00001958 Address page_addresses_[kMaxMapPageIndex];
Steve Blocka7e24c12009-10-30 11:49:00 +00001959
1960 public:
1961 TRACK_MEMORY("MapSpace")
1962};
1963
1964
1965// -----------------------------------------------------------------------------
1966// Old space for all global object property cell objects
1967
1968class CellSpace : public FixedSpace {
1969 public:
1970 // Creates a property cell space object with a maximum capacity.
1971 CellSpace(int max_capacity, AllocationSpace id)
1972 : FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {}
1973
1974 protected:
1975#ifdef DEBUG
1976 virtual void VerifyObject(HeapObject* obj);
1977#endif
1978
1979 public:
1980 TRACK_MEMORY("CellSpace")
1981};
1982
1983
1984// -----------------------------------------------------------------------------
1985// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
1986// the large object space. A large object is allocated from OS heap with
1987// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
1988// A large object always starts at Page::kObjectStartOffset to a page.
1989// Large objects do not move during garbage collections.
1990
1991// A LargeObjectChunk holds exactly one large object page with exactly one
1992// large object.
1993class LargeObjectChunk {
1994 public:
1995 // Allocates a new LargeObjectChunk that contains a large object page
1996 // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
1997 // object and possibly extra remembered set words) bytes after the object
1998 // area start of that page. The allocated chunk size is set in the output
1999 // parameter chunk_size.
2000 static LargeObjectChunk* New(int size_in_bytes,
2001 size_t* chunk_size,
2002 Executability executable);
2003
2004 // Interpret a raw address as a large object chunk.
2005 static LargeObjectChunk* FromAddress(Address address) {
2006 return reinterpret_cast<LargeObjectChunk*>(address);
2007 }
2008
2009 // Returns the address of this chunk.
2010 Address address() { return reinterpret_cast<Address>(this); }
2011
2012 // Accessors for the fields of the chunk.
2013 LargeObjectChunk* next() { return next_; }
2014 void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
2015
2016 size_t size() { return size_; }
2017 void set_size(size_t size_in_bytes) { size_ = size_in_bytes; }
2018
2019 // Returns the object in this chunk.
2020 inline HeapObject* GetObject();
2021
2022 // Given a requested size (including any extra remembered set words),
2023 // returns the physical size of a chunk to be allocated.
2024 static int ChunkSizeFor(int size_in_bytes);
2025
2026 // Given a chunk size, returns the object size it can accommodate (not
2027 // including any extra remembered set words). Used by
2028 // LargeObjectSpace::Available. Note that this can overestimate the size
2029 // of object that will fit in a chunk---if the object requires extra
2030 // remembered set words (eg, for large fixed arrays), the actual object
2031 // size for the chunk will be smaller than reported by this function.
2032 static int ObjectSizeFor(int chunk_size) {
2033 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2034 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2035 }
2036
2037 private:
2038 // A pointer to the next large object chunk in the space or NULL.
2039 LargeObjectChunk* next_;
2040
2041 // The size of this chunk.
2042 size_t size_;
2043
2044 public:
2045 TRACK_MEMORY("LargeObjectChunk")
2046};
2047
2048
2049class LargeObjectSpace : public Space {
2050 public:
2051 explicit LargeObjectSpace(AllocationSpace id);
2052 virtual ~LargeObjectSpace() {}
2053
2054 // Initializes internal data structures.
2055 bool Setup();
2056
2057 // Releases internal resources, frees objects in this space.
2058 void TearDown();
2059
2060 // Allocates a (non-FixedArray, non-Code) large object.
2061 Object* AllocateRaw(int size_in_bytes);
2062 // Allocates a large Code object.
2063 Object* AllocateRawCode(int size_in_bytes);
2064 // Allocates a large FixedArray.
2065 Object* AllocateRawFixedArray(int size_in_bytes);
2066
2067 // Available bytes for objects in this space, not including any extra
2068 // remembered set words.
2069 int Available() {
2070 return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
2071 }
2072
2073 virtual int Size() {
2074 return size_;
2075 }
2076
2077 int PageCount() {
2078 return page_count_;
2079 }
2080
2081 // Finds an object for a given address, returns Failure::Exception()
2082 // if it is not found. The function iterates through all objects in this
2083 // space, may be slow.
2084 Object* FindObject(Address a);
2085
2086 // Clears remembered sets.
2087 void ClearRSet();
2088
2089 // Iterates objects whose remembered set bits are set.
2090 void IterateRSet(ObjectSlotCallback func);
2091
2092 // Frees unmarked objects.
2093 void FreeUnmarkedObjects();
2094
2095 // Checks whether a heap object is in this space; O(1).
2096 bool Contains(HeapObject* obj);
2097
2098 // Checks whether the space is empty.
2099 bool IsEmpty() { return first_chunk_ == NULL; }
2100
Leon Clarkee46be812010-01-19 14:06:41 +00002101 // See the comments for ReserveSpace in the Space class. This has to be
2102 // called after ReserveSpace has been called on the paged spaces, since they
2103 // may use some memory, leaving less for large objects.
2104 virtual bool ReserveSpace(int bytes);
2105
Steve Blocka7e24c12009-10-30 11:49:00 +00002106#ifdef ENABLE_HEAP_PROTECTION
2107 // Protect/unprotect the space by marking it read-only/writable.
2108 void Protect();
2109 void Unprotect();
2110#endif
2111
2112#ifdef DEBUG
2113 virtual void Verify();
2114 virtual void Print();
2115 void ReportStatistics();
2116 void CollectCodeStatistics();
2117 // Dump the remembered sets in the space to stdout.
2118 void PrintRSet();
2119#endif
2120 // Checks whether an address is in the object area in this space. It
2121 // iterates all objects in the space. May be slow.
2122 bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
2123
2124 private:
2125 // The head of the linked list of large object chunks.
2126 LargeObjectChunk* first_chunk_;
2127 int size_; // allocated bytes
2128 int page_count_; // number of chunks
2129
2130
2131 // Shared implementation of AllocateRaw, AllocateRawCode and
2132 // AllocateRawFixedArray.
2133 Object* AllocateRawInternal(int requested_size,
2134 int object_size,
2135 Executability executable);
2136
2137 // Returns the number of extra bytes (rounded up to the nearest full word)
2138 // required for extra_object_bytes of extra pointers (in bytes).
2139 static inline int ExtraRSetBytesFor(int extra_object_bytes);
2140
2141 friend class LargeObjectIterator;
2142
2143 public:
2144 TRACK_MEMORY("LargeObjectSpace")
2145};
2146
2147
2148class LargeObjectIterator: public ObjectIterator {
2149 public:
2150 explicit LargeObjectIterator(LargeObjectSpace* space);
2151 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
2152
Steve Blocka7e24c12009-10-30 11:49:00 +00002153 HeapObject* next();
2154
2155 // implementation of ObjectIterator.
Steve Blocka7e24c12009-10-30 11:49:00 +00002156 virtual HeapObject* next_object() { return next(); }
2157
2158 private:
2159 LargeObjectChunk* current_;
2160 HeapObjectCallback size_func_;
2161};
2162
2163
2164} } // namespace v8::internal
2165
2166#endif // V8_SPACES_H_