blob: 4760a424024af0be225685bf47e3728b33d3b89c [file] [log] [blame]
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_SPACES_H_
29#define V8_SPACES_H_
30
31#include "list-inl.h"
32#include "log.h"
33
kasperl@chromium.org71affb52009-05-26 05:44:31 +000034namespace v8 {
35namespace internal {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000036
37// -----------------------------------------------------------------------------
38// Heap structures:
39//
40// A JS heap consists of a young generation, an old generation, and a large
41// object space. The young generation is divided into two semispaces. A
42// scavenger implements Cheney's copying algorithm. The old generation is
43// separated into a map space and an old object space. The map space contains
44// all (and only) map objects, the rest of old objects go into the old space.
45// The old generation is collected by a mark-sweep-compact collector.
46//
47// The semispaces of the young generation are contiguous. The old and map
48// spaces consists of a list of pages. A page has a page header, a remembered
49// set area, and an object area. A page size is deliberately chosen as 8K
50// bytes. The first word of a page is an opaque page header that has the
51// address of the next page and its ownership information. The second word may
52// have the allocation top address of this page. The next 248 bytes are
53// remembered sets. Heap objects are aligned to the pointer size (4 bytes). A
54// remembered set bit corresponds to a pointer in the object area.
55//
56// There is a separate large object space for objects larger than
57// Page::kMaxHeapObjectSize, so that they do not have to move during
58// collection. The large object space is paged and uses the same remembered
59// set implementation. Pages in large object space may be larger than 8K.
60//
61// NOTE: The mark-compact collector rebuilds the remembered set after a
62// collection. It reuses first a few words of the remembered set for
63// bookkeeping relocation information.
64
65
66// Some assertion macros used in the debugging mode.
67
68#define ASSERT_PAGE_ALIGNED(address) \
69 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
70
71#define ASSERT_OBJECT_ALIGNED(address) \
72 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
73
74#define ASSERT_OBJECT_SIZE(size) \
75 ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
76
77#define ASSERT_PAGE_OFFSET(offset) \
78 ASSERT((Page::kObjectStartOffset <= offset) \
79 && (offset <= Page::kPageSize))
80
81#define ASSERT_MAP_PAGE_INDEX(index) \
82 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
83
84
85class PagedSpace;
86class MemoryAllocator;
kasper.lund7276f142008-07-30 08:49:36 +000087class AllocationInfo;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000088
89// -----------------------------------------------------------------------------
90// A page normally has 8K bytes. Large object pages may be larger. A page
91// address is always aligned to the 8K page size. A page is divided into
92// three areas: the first two words are used for bookkeeping, the next 248
93// bytes are used as remembered set, and the rest of the page is the object
94// area.
95//
kasperl@chromium.org86f77b72009-07-06 08:21:57 +000096// Pointers are aligned to the pointer size (4), only 1 bit is needed
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000097// for a pointer in the remembered set. Given an address, its remembered set
98// bit position (offset from the start of the page) is calculated by dividing
99// its page offset by 32. Therefore, the object area in a page starts at the
100// 256th byte (8K/32). Bytes 0 to 255 do not need the remembered set, so that
101// the first two words (64 bits) in a page can be used for other purposes.
sgjesse@chromium.orgb9d7da12009-08-05 08:38:10 +0000102//
103// On the 64-bit platform, we add an offset to the start of the remembered set,
104// and pointers are aligned to 8-byte pointer size. This means that we need
105// only 128 bytes for the RSet, and only get two bytes free in the RSet's RSet.
106// For this reason we add an offset to get room for the Page data at the start.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000107//
108// The mark-compact collector transforms a map pointer into a page index and a
109// page offset. The map space can have up to 1024 pages, and 8M bytes (1024 *
110// 8K) in total. Because a map pointer is aligned to the pointer size (4
111// bytes), 11 bits are enough to encode the page offset. 21 bits (10 for the
112// page index + 11 for the offset in the page) are required to encode a map
113// pointer.
114//
115// The only way to get a page pointer is by calling factory methods:
116// Page* p = Page::FromAddress(addr); or
117// Page* p = Page::FromAllocationTop(top);
118class Page {
119 public:
120 // Returns the page containing a given address. The address ranges
121 // from [page_addr .. page_addr + kPageSize[
122 //
123 // Note that this function only works for addresses in normal paged
sgjesse@chromium.orgb9d7da12009-08-05 08:38:10 +0000124 // spaces and addresses in the first 8K of large object pages (i.e.,
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000125 // the start of large objects but not necessarily derived pointers
126 // within them).
127 INLINE(static Page* FromAddress(Address a)) {
128 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
129 }
130
131 // Returns the page containing an allocation top. Because an allocation
132 // top address can be the upper bound of the page, we need to subtract
133 // it with kPointerSize first. The address ranges from
134 // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
135 INLINE(static Page* FromAllocationTop(Address top)) {
136 Page* p = FromAddress(top - kPointerSize);
137 ASSERT_PAGE_OFFSET(p->Offset(top));
138 return p;
139 }
140
141 // Returns the start address of this page.
142 Address address() { return reinterpret_cast<Address>(this); }
143
144 // Checks whether this is a valid page address.
145 bool is_valid() { return address() != NULL; }
146
147 // Returns the next page of this page.
148 inline Page* next_page();
149
kasper.lund7276f142008-07-30 08:49:36 +0000150 // Return the end of allocation in this page. Undefined for unused pages.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000151 inline Address AllocationTop();
152
153 // Returns the start address of the object area in this page.
154 Address ObjectAreaStart() { return address() + kObjectStartOffset; }
155
156 // Returns the end address (exclusive) of the object area in this page.
157 Address ObjectAreaEnd() { return address() + Page::kPageSize; }
158
159 // Returns the start address of the remembered set area.
160 Address RSetStart() { return address() + kRSetStartOffset; }
161
162 // Returns the end address of the remembered set area (exclusive).
163 Address RSetEnd() { return address() + kRSetEndOffset; }
164
165 // Checks whether an address is page aligned.
166 static bool IsAlignedToPageSize(Address a) {
167 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
168 }
169
170 // True if this page is a large object page.
171 bool IsLargeObjectPage() { return (is_normal_page & 0x1) == 0; }
172
173 // Returns the offset of a given address to this page.
174 INLINE(int Offset(Address a)) {
175 int offset = a - address();
176 ASSERT_PAGE_OFFSET(offset);
177 return offset;
178 }
179
180 // Returns the address for a given offset to the this page.
181 Address OffsetToAddress(int offset) {
182 ASSERT_PAGE_OFFSET(offset);
183 return address() + offset;
184 }
185
186 // ---------------------------------------------------------------------
187 // Remembered set support
188
189 // Clears remembered set in this page.
190 inline void ClearRSet();
191
192 // Return the address of the remembered set word corresponding to an
193 // object address/offset pair, and the bit encoded as a single-bit
194 // mask in the output parameter 'bitmask'.
195 INLINE(static Address ComputeRSetBitPosition(Address address, int offset,
196 uint32_t* bitmask));
197
198 // Sets the corresponding remembered set bit for a given address.
199 INLINE(static void SetRSet(Address address, int offset));
200
201 // Clears the corresponding remembered set bit for a given address.
202 static inline void UnsetRSet(Address address, int offset);
203
204 // Checks whether the remembered set bit for a given address is set.
205 static inline bool IsRSetSet(Address address, int offset);
206
207#ifdef DEBUG
208 // Use a state to mark whether remembered set space can be used for other
209 // purposes.
210 enum RSetState { IN_USE, NOT_IN_USE };
211 static bool is_rset_in_use() { return rset_state_ == IN_USE; }
212 static void set_rset_state(RSetState state) { rset_state_ = state; }
213#endif
214
215 // 8K bytes per page.
216 static const int kPageSizeBits = 13;
217
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000218 // Page size in bytes. This must be a multiple of the OS page size.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000219 static const int kPageSize = 1 << kPageSizeBits;
220
221 // Page size mask.
kasperl@chromium.org71affb52009-05-26 05:44:31 +0000222 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000223
sgjesse@chromium.orgb9d7da12009-08-05 08:38:10 +0000224 // The offset of the remembered set in a page, in addition to the empty bytes
kasperl@chromium.org86f77b72009-07-06 08:21:57 +0000225 // formed as the remembered bits of the remembered set itself.
226#ifdef V8_TARGET_ARCH_X64
227 static const int kRSetOffset = 4 * kPointerSize; // Room for four pointers.
228#else
229 static const int kRSetOffset = 0;
230#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000231 // The end offset of the remembered set in a page
232 // (heaps are aligned to pointer size).
kasperl@chromium.org86f77b72009-07-06 08:21:57 +0000233 static const int kRSetEndOffset = kRSetOffset + kPageSize / kBitsPerPointer;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000234
235 // The start offset of the object area in a page.
kasperl@chromium.org86f77b72009-07-06 08:21:57 +0000236 // This needs to be at least (bits per uint32_t) * kBitsPerPointer,
237 // to align start of rset to a uint32_t address.
238 static const int kObjectStartOffset = 256;
239
sgjesse@chromium.orgb9d7da12009-08-05 08:38:10 +0000240 // The start offset of the used part of the remembered set in a page.
kasperl@chromium.org86f77b72009-07-06 08:21:57 +0000241 static const int kRSetStartOffset = kRSetOffset +
242 kObjectStartOffset / kBitsPerPointer;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000243
244 // Object area size in bytes.
245 static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
246
247 // Maximum object size that fits in a page.
248 static const int kMaxHeapObjectSize = kObjectAreaSize;
249
250 //---------------------------------------------------------------------------
251 // Page header description.
252 //
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000253 // If a page is not in the large object space, the first word,
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000254 // opaque_header, encodes the next page address (aligned to kPageSize 8K)
255 // and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use
256 // opaque_header. The value range of the opaque_header is [0..kPageSize[,
257 // or [next_page_start, next_page_end[. It cannot point to a valid address
258 // in the current page. If a page is in the large object space, the first
259 // word *may* (if the page start and large object chunk start are the
260 // same) contain the address of the next large object chunk.
kasperl@chromium.org71affb52009-05-26 05:44:31 +0000261 intptr_t opaque_header;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000262
263 // If the page is not in the large object space, the low-order bit of the
264 // second word is set. If the page is in the large object space, the
265 // second word *may* (if the page start and large object chunk start are
266 // the same) contain the large object chunk size. In either case, the
267 // low-order bit for large object pages will be cleared.
268 int is_normal_page;
269
sgjesse@chromium.orgb9d7da12009-08-05 08:38:10 +0000270 // The following fields may overlap with remembered set, they can only
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000271 // be used in the mark-compact collector when remembered set is not
272 // used.
273
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000274 // The index of the page in its owner space.
275 int mc_page_index;
276
sgjesse@chromium.orgb9d7da12009-08-05 08:38:10 +0000277 // The allocation pointer after relocating objects to this page.
278 Address mc_relocation_top;
279
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000280 // The forwarding address of the first live object in this page.
281 Address mc_first_forwarded;
282
283#ifdef DEBUG
284 private:
285 static RSetState rset_state_; // state of the remembered set
286#endif
287};
288
289
290// ----------------------------------------------------------------------------
kasper.lund7276f142008-07-30 08:49:36 +0000291// Space is the abstract superclass for all allocation spaces.
292class Space : public Malloced {
293 public:
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000294 Space(AllocationSpace id, Executability executable)
kasper.lund7276f142008-07-30 08:49:36 +0000295 : id_(id), executable_(executable) {}
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000296
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000297 virtual ~Space() {}
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000298
kasper.lund7276f142008-07-30 08:49:36 +0000299 // Does the space need executable memory?
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000300 Executability executable() { return executable_; }
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000301
kasper.lund7276f142008-07-30 08:49:36 +0000302 // Identity used in error reporting.
303 AllocationSpace identity() { return id_; }
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000304
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000305 virtual int Size() = 0;
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000306
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000307#ifdef DEBUG
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000308 virtual void Print() = 0;
309#endif
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000310
kasper.lund7276f142008-07-30 08:49:36 +0000311 private:
312 AllocationSpace id_;
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000313 Executability executable_;
kasper.lund7276f142008-07-30 08:49:36 +0000314};
315
316
317// ----------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000318// A space acquires chunks of memory from the operating system. The memory
319// allocator manages chunks for the paged heap spaces (old space and map
320// space). A paged chunk consists of pages. Pages in a chunk have contiguous
321// addresses and are linked as a list.
322//
323// The allocator keeps an initial chunk which is used for the new space. The
324// leftover regions of the initial chunk are used for the initial chunks of
325// old space and map space if they are big enough to hold at least one page.
326// The allocator assumes that there is one old space and one map space, each
327// expands the space by allocating kPagesPerChunk pages except the last
328// expansion (before running out of space). The first chunk may contain fewer
329// than kPagesPerChunk pages as well.
330//
331// The memory allocator also allocates chunks for the large object space, but
332// they are managed by the space itself. The new space does not expand.
333
334class MemoryAllocator : public AllStatic {
335 public:
336 // Initializes its internal bookkeeping structures.
337 // Max capacity of the total space.
338 static bool Setup(int max_capacity);
339
340 // Deletes valid chunks.
341 static void TearDown();
342
343 // Reserves an initial address range of virtual memory to be split between
344 // the two new space semispaces, the old space, and the map space. The
345 // memory is not yet committed or assigned to spaces and split into pages.
346 // The initial chunk is unmapped when the memory allocator is torn down.
347 // This function should only be called when there is not already a reserved
348 // initial chunk (initial_chunk_ should be NULL). It returns the start
349 // address of the initial chunk if successful, with the side effect of
350 // setting the initial chunk, or else NULL if unsuccessful and leaves the
351 // initial chunk NULL.
352 static void* ReserveInitialChunk(const size_t requested);
353
354 // Commits pages from an as-yet-unmanaged block of virtual memory into a
355 // paged space. The block should be part of the initial chunk reserved via
356 // a call to ReserveInitialChunk. The number of pages is always returned in
357 // the output parameter num_pages. This function assumes that the start
358 // address is non-null and that it is big enough to hold at least one
359 // page-aligned page. The call always succeeds, and num_pages is always
360 // greater than zero.
361 static Page* CommitPages(Address start, size_t size, PagedSpace* owner,
362 int* num_pages);
363
364 // Commit a contiguous block of memory from the initial chunk. Assumes that
365 // the address is not NULL, the size is greater than zero, and that the
366 // block is contained in the initial chunk. Returns true if it succeeded
367 // and false otherwise.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000368 static bool CommitBlock(Address start, size_t size, Executability executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000369
ager@chromium.orgadd848f2009-08-13 12:44:13 +0000370
371 // Uncommit a contiguous block of memory [start..(start+size)[.
372 // start is not NULL, the size is greater than zero, and the
373 // block is contained in the initial chunk. Returns true if it succeeded
374 // and false otherwise.
375 static bool UncommitBlock(Address start, size_t size);
376
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000377 // Attempts to allocate the requested (non-zero) number of pages from the
378 // OS. Fewer pages might be allocated than requested. If it fails to
379 // allocate memory for the OS or cannot allocate a single page, this
380 // function returns an invalid page pointer (NULL). The caller must check
381 // whether the returned page is valid (by calling Page::is_valid()). It is
382 // guaranteed that allocated pages have contiguous addresses. The actual
383 // number of allocated page is returned in the output parameter
384 // allocated_pages.
385 static Page* AllocatePages(int requested_pages, int* allocated_pages,
386 PagedSpace* owner);
387
388 // Frees pages from a given page and after. If 'p' is the first page
389 // of a chunk, pages from 'p' are freed and this function returns an
390 // invalid page pointer. Otherwise, the function searches a page
391 // after 'p' that is the first page of a chunk. Pages after the
392 // found page are freed and the function returns 'p'.
393 static Page* FreePages(Page* p);
394
395 // Allocates and frees raw memory of certain size.
396 // These are just thin wrappers around OS::Allocate and OS::Free,
397 // but keep track of allocated bytes as part of heap.
kasper.lund7276f142008-07-30 08:49:36 +0000398 static void* AllocateRawMemory(const size_t requested,
399 size_t* allocated,
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000400 Executability executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000401 static void FreeRawMemory(void* buf, size_t length);
402
403 // Returns the maximum available bytes of heaps.
404 static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
405
kasperl@chromium.orge959c182009-07-27 08:59:04 +0000406 // Returns allocated spaces in bytes.
407 static int Size() { return size_; }
408
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000409 // Returns maximum available bytes that the old space can have.
410 static int MaxAvailable() {
411 return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
412 }
413
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000414 // Links two pages.
415 static inline void SetNextPage(Page* prev, Page* next);
416
417 // Returns the next page of a given page.
418 static inline Page* GetNextPage(Page* p);
419
420 // Checks whether a page belongs to a space.
421 static inline bool IsPageInSpace(Page* p, PagedSpace* space);
422
423 // Returns the space that owns the given page.
424 static inline PagedSpace* PageOwner(Page* page);
425
426 // Finds the first/last page in the same chunk as a given page.
427 static Page* FindFirstPageInSameChunk(Page* p);
428 static Page* FindLastPageInSameChunk(Page* p);
429
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000430#ifdef ENABLE_HEAP_PROTECTION
431 // Protect/unprotect a block of memory by marking it read-only/writable.
432 static inline void Protect(Address start, size_t size);
433 static inline void Unprotect(Address start, size_t size,
434 Executability executable);
435
436 // Protect/unprotect a chunk given a page in the chunk.
437 static inline void ProtectChunkFromPage(Page* page);
438 static inline void UnprotectChunkFromPage(Page* page);
439#endif
440
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000441#ifdef DEBUG
442 // Reports statistic info of the space.
443 static void ReportStatistics();
444#endif
445
446 // Due to encoding limitation, we can only have 8K chunks.
447 static const int kMaxNofChunks = 1 << Page::kPageSizeBits;
448 // If a chunk has at least 32 pages, the maximum heap size is about
449 // 8 * 1024 * 32 * 8K = 2G bytes.
kasperl@chromium.orge959c182009-07-27 08:59:04 +0000450#if defined(ANDROID)
451 static const int kPagesPerChunk = 16;
452#else
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000453 static const int kPagesPerChunk = 64;
kasperl@chromium.orge959c182009-07-27 08:59:04 +0000454#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000455 static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
456
457 private:
458 // Maximum space size in bytes.
459 static int capacity_;
460
461 // Allocated space size in bytes.
462 static int size_;
463
464 // The initial chunk of virtual memory.
465 static VirtualMemory* initial_chunk_;
466
467 // Allocated chunk info: chunk start address, chunk size, and owning space.
468 class ChunkInfo BASE_EMBEDDED {
469 public:
470 ChunkInfo() : address_(NULL), size_(0), owner_(NULL) {}
471 void init(Address a, size_t s, PagedSpace* o) {
472 address_ = a;
473 size_ = s;
474 owner_ = o;
475 }
476 Address address() { return address_; }
477 size_t size() { return size_; }
478 PagedSpace* owner() { return owner_; }
479
480 private:
481 Address address_;
482 size_t size_;
483 PagedSpace* owner_;
484 };
485
486 // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
487 static List<ChunkInfo> chunks_;
488 static List<int> free_chunk_ids_;
489 static int max_nof_chunks_;
490 static int top_;
491
492 // Push/pop a free chunk id onto/from the stack.
493 static void Push(int free_chunk_id);
494 static int Pop();
495 static bool OutOfChunkIds() { return top_ == 0; }
496
497 // Frees a chunk.
498 static void DeleteChunk(int chunk_id);
499
500 // Basic check whether a chunk id is in the valid range.
501 static inline bool IsValidChunkId(int chunk_id);
502
503 // Checks whether a chunk id identifies an allocated chunk.
504 static inline bool IsValidChunk(int chunk_id);
505
506 // Returns the chunk id that a page belongs to.
507 static inline int GetChunkId(Page* p);
508
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000509 // True if the address lies in the initial chunk.
510 static inline bool InInitialChunk(Address address);
511
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000512 // Initializes pages in a chunk. Returns the first page address.
513 // This function and GetChunkId() are provided for the mark-compact
514 // collector to rebuild page headers in the from space, which is
515 // used as a marking stack and its page headers are destroyed.
516 static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
517 PagedSpace* owner);
518};
519
520
521// -----------------------------------------------------------------------------
522// Interface for heap object iterator to be implemented by all object space
523// object iterators.
524//
525// NOTE: The space specific object iterators also implements the own has_next()
526// and next() methods which are used to avoid using virtual functions
527// iterating a specific space.
528
529class ObjectIterator : public Malloced {
530 public:
531 virtual ~ObjectIterator() { }
532
533 virtual bool has_next_object() = 0;
534 virtual HeapObject* next_object() = 0;
535};
536
537
538// -----------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000539// Heap object iterator in new/old/map spaces.
540//
541// A HeapObjectIterator iterates objects from a given address to the
542// top of a space. The given address must be below the current
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000543// allocation pointer (space top). There are some caveats.
544//
545// (1) If the space top changes upward during iteration (because of
546// allocating new objects), the iterator does not iterate objects
547// above the original space top. The caller must create a new
548// iterator starting from the old top in order to visit these new
549// objects.
550//
551// (2) If new objects are allocated below the original allocation top
552// (e.g., free-list allocation in paged spaces), the new objects
553// may or may not be iterated depending on their position with
554// respect to the current point of iteration.
555//
556// (3) The space top should not change downward during iteration,
557// otherwise the iterator will return not-necessarily-valid
558// objects.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000559
560class HeapObjectIterator: public ObjectIterator {
561 public:
562 // Creates a new object iterator in a given space. If a start
563 // address is not given, the iterator starts from the space bottom.
564 // If the size function is not given, the iterator calls the default
565 // Object::Size().
566 explicit HeapObjectIterator(PagedSpace* space);
567 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
568 HeapObjectIterator(PagedSpace* space, Address start);
569 HeapObjectIterator(PagedSpace* space,
570 Address start,
571 HeapObjectCallback size_func);
572
573 inline bool has_next();
574 inline HeapObject* next();
575
576 // implementation of ObjectIterator.
577 virtual bool has_next_object() { return has_next(); }
578 virtual HeapObject* next_object() { return next(); }
579
580 private:
581 Address cur_addr_; // current iteration point
582 Address end_addr_; // end iteration point
583 Address cur_limit_; // current page limit
584 HeapObjectCallback size_func_; // size function
585 Page* end_page_; // caches the page of the end address
586
587 // Slow path of has_next, checks whether there are more objects in
588 // the next page.
589 bool HasNextInNextPage();
590
591 // Initializes fields.
592 void Initialize(Address start, Address end, HeapObjectCallback size_func);
593
594#ifdef DEBUG
595 // Verifies whether fields have valid values.
596 void Verify();
597#endif
598};
599
600
601// -----------------------------------------------------------------------------
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000602// A PageIterator iterates the pages in a paged space.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000603//
604// The PageIterator class provides three modes for iterating pages in a space:
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000605// PAGES_IN_USE iterates pages containing allocated objects.
606// PAGES_USED_BY_MC iterates pages that hold relocated objects during a
607// mark-compact collection.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000608// ALL_PAGES iterates all pages in the space.
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000609//
610// There are some caveats.
611//
612// (1) If the space expands during iteration, new pages will not be
613// returned by the iterator in any mode.
614//
615// (2) If new objects are allocated during iteration, they will appear
616// in pages returned by the iterator. Allocation may cause the
617// allocation pointer or MC allocation pointer in the last page to
618// change between constructing the iterator and iterating the last
619// page.
620//
621// (3) The space should not shrink during iteration, otherwise the
622// iterator will return deallocated pages.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000623
624class PageIterator BASE_EMBEDDED {
625 public:
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000626 enum Mode {
627 PAGES_IN_USE,
628 PAGES_USED_BY_MC,
629 ALL_PAGES
630 };
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000631
632 PageIterator(PagedSpace* space, Mode mode);
633
634 inline bool has_next();
635 inline Page* next();
636
637 private:
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000638 PagedSpace* space_;
639 Page* prev_page_; // Previous page returned.
640 Page* stop_page_; // Page to stop at (last page returned by the iterator).
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000641};
642
643
644// -----------------------------------------------------------------------------
645// A space has a list of pages. The next page can be accessed via
646// Page::next_page() call. The next page of the last page is an
647// invalid page pointer. A space can expand and shrink dynamically.
648
649// An abstraction of allocation and relocation pointers in a page-structured
650// space.
kasper.lund7276f142008-07-30 08:49:36 +0000651class AllocationInfo {
652 public:
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000653 Address top; // current allocation top
654 Address limit; // current allocation limit
kasper.lund7276f142008-07-30 08:49:36 +0000655
656#ifdef DEBUG
657 bool VerifyPagedAllocation() {
658 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
659 && (top <= limit);
660 }
661#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000662};
663
664
665// An abstraction of the accounting statistics of a page-structured space.
666// The 'capacity' of a space is the number of object-area bytes (ie, not
667// including page bookkeeping structures) currently in the space. The 'size'
668// of a space is the number of allocated bytes, the 'waste' in the space is
669// the number of bytes that are not allocated and not available to
670// allocation without reorganizing the space via a GC (eg, small blocks due
671// to internal fragmentation, top of page areas in map space), and the bytes
672// 'available' is the number of unallocated bytes that are not waste. The
673// capacity is the sum of size, waste, and available.
674//
675// The stats are only set by functions that ensure they stay balanced. These
676// functions increase or decrease one of the non-capacity stats in
677// conjunction with capacity, or else they always balance increases and
678// decreases to the non-capacity stats.
679class AllocationStats BASE_EMBEDDED {
680 public:
681 AllocationStats() { Clear(); }
682
683 // Zero out all the allocation statistics (ie, no capacity).
684 void Clear() {
685 capacity_ = 0;
686 available_ = 0;
687 size_ = 0;
688 waste_ = 0;
689 }
690
691 // Reset the allocation statistics (ie, available = capacity with no
692 // wasted or allocated bytes).
693 void Reset() {
694 available_ = capacity_;
695 size_ = 0;
696 waste_ = 0;
697 }
698
699 // Accessors for the allocation statistics.
700 int Capacity() { return capacity_; }
701 int Available() { return available_; }
702 int Size() { return size_; }
703 int Waste() { return waste_; }
704
705 // Grow the space by adding available bytes.
706 void ExpandSpace(int size_in_bytes) {
707 capacity_ += size_in_bytes;
708 available_ += size_in_bytes;
709 }
710
711 // Shrink the space by removing available bytes.
712 void ShrinkSpace(int size_in_bytes) {
713 capacity_ -= size_in_bytes;
714 available_ -= size_in_bytes;
715 }
716
717 // Allocate from available bytes (available -> size).
718 void AllocateBytes(int size_in_bytes) {
719 available_ -= size_in_bytes;
720 size_ += size_in_bytes;
721 }
722
723 // Free allocated bytes, making them available (size -> available).
724 void DeallocateBytes(int size_in_bytes) {
725 size_ -= size_in_bytes;
726 available_ += size_in_bytes;
727 }
728
729 // Waste free bytes (available -> waste).
730 void WasteBytes(int size_in_bytes) {
731 available_ -= size_in_bytes;
732 waste_ += size_in_bytes;
733 }
734
735 // Consider the wasted bytes to be allocated, as they contain filler
736 // objects (waste -> size).
737 void FillWastedBytes(int size_in_bytes) {
738 waste_ -= size_in_bytes;
739 size_ += size_in_bytes;
740 }
741
742 private:
743 int capacity_;
744 int available_;
745 int size_;
746 int waste_;
747};
748
749
kasper.lund7276f142008-07-30 08:49:36 +0000750class PagedSpace : public Space {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000751 public:
752 // Creates a space with a maximum capacity, and an id.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000753 PagedSpace(int max_capacity, AllocationSpace id, Executability executable);
kasper.lund7276f142008-07-30 08:49:36 +0000754
755 virtual ~PagedSpace() {}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000756
757 // Set up the space using the given address range of virtual memory (from
758 // the memory allocator's initial chunk) if possible. If the block of
759 // addresses is not big enough to contain a single page-aligned page, a
760 // fresh chunk will be allocated.
761 bool Setup(Address start, size_t size);
762
763 // Returns true if the space has been successfully set up and not
764 // subsequently torn down.
765 bool HasBeenSetup();
766
767 // Cleans up the space, frees all pages in this space except those belonging
768 // to the initial chunk, uncommits addresses in the initial chunk.
769 void TearDown();
770
771 // Checks whether an object/address is in this space.
772 inline bool Contains(Address a);
773 bool Contains(HeapObject* o) { return Contains(o->address()); }
774
kasper.lund7276f142008-07-30 08:49:36 +0000775 // Given an address occupied by a live object, return that object if it is
776 // in this space, or Failure::Exception() if it is not. The implementation
777 // iterates over objects in the page containing the address, the cost is
778 // linear in the number of objects in the page. It may be slow.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000779 Object* FindObject(Address addr);
780
kasper.lund7276f142008-07-30 08:49:36 +0000781 // Checks whether page is currently in use by this space.
782 bool IsUsed(Page* page);
783
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000784 // Clears remembered sets of pages in this space.
785 void ClearRSet();
786
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000787 // Prepares for a mark-compact GC.
788 virtual void PrepareForMarkCompact(bool will_compact) = 0;
789
790 virtual Address PageAllocationTop(Page* page) = 0;
791
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000792 // Current capacity without growing (Size() + Available() + Waste()).
793 int Capacity() { return accounting_stats_.Capacity(); }
794
795 // Available bytes without growing.
796 int Available() { return accounting_stats_.Available(); }
797
798 // Allocated bytes in this space.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000799 virtual int Size() { return accounting_stats_.Size(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000800
801 // Wasted bytes due to fragmentation and not recoverable until the
802 // next GC of this space.
803 int Waste() { return accounting_stats_.Waste(); }
804
805 // Returns the address of the first object in this space.
806 Address bottom() { return first_page_->ObjectAreaStart(); }
807
808 // Returns the allocation pointer in this space.
809 Address top() { return allocation_info_.top; }
810
kasper.lund7276f142008-07-30 08:49:36 +0000811 // Allocate the requested number of bytes in the space if possible, return a
812 // failure object if not.
813 inline Object* AllocateRaw(int size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000814
kasper.lund7276f142008-07-30 08:49:36 +0000815 // Allocate the requested number of bytes for relocation during mark-compact
816 // collection.
817 inline Object* MCAllocateRaw(int size_in_bytes);
818
819
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000820 // ---------------------------------------------------------------------------
821 // Mark-compact collection support functions
822
823 // Set the relocation point to the beginning of the space.
824 void MCResetRelocationInfo();
825
826 // Writes relocation info to the top page.
827 void MCWriteRelocationInfoToPage() {
828 TopPageOf(mc_forwarding_info_)->mc_relocation_top = mc_forwarding_info_.top;
829 }
830
831 // Computes the offset of a given address in this space to the beginning
832 // of the space.
833 int MCSpaceOffsetForAddress(Address addr);
834
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000835 // Updates the allocation pointer to the relocation top after a mark-compact
836 // collection.
837 virtual void MCCommitRelocationInfo() = 0;
838
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000839 // Releases half of unused pages.
840 void Shrink();
841
842 // Ensures that the capacity is at least 'capacity'. Returns false on failure.
843 bool EnsureCapacity(int capacity);
844
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000845#ifdef ENABLE_HEAP_PROTECTION
846 // Protect/unprotect the space by marking it read-only/writable.
847 void Protect();
848 void Unprotect();
849#endif
850
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000851#ifdef DEBUG
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000852 // Print meta info and objects in this space.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000853 virtual void Print();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000854
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +0000855 // Verify integrity of this space.
856 virtual void Verify(ObjectVisitor* visitor);
857
858 // Overridden by subclasses to verify space-specific object
859 // properties (e.g., only maps or free-list nodes are in map space).
860 virtual void VerifyObject(HeapObject* obj) {}
861
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000862 // Report code object related statistics
863 void CollectCodeStatistics();
864 static void ReportCodeStatistics();
865 static void ResetCodeStatistics();
866#endif
867
868 protected:
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000869 // Maximum capacity of this space.
870 int max_capacity_;
871
872 // Accounting information for this space.
873 AllocationStats accounting_stats_;
874
875 // The first page in this space.
876 Page* first_page_;
877
kasperl@chromium.orgb3284ad2009-05-18 06:12:45 +0000878 // The last page in this space. Initially set in Setup, updated in
879 // Expand and Shrink.
880 Page* last_page_;
881
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000882 // Normal allocation information.
883 AllocationInfo allocation_info_;
884
885 // Relocation information during mark-compact collections.
886 AllocationInfo mc_forwarding_info_;
887
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +0000888 // Bytes of each page that cannot be allocated. Possibly non-zero
889 // for pages in spaces with only fixed-size objects. Always zero
890 // for pages in spaces with variable sized objects (those pages are
891 // padded with free-list nodes).
892 int page_extra_;
893
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000894 // Sets allocation pointer to a page bottom.
895 static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
896
897 // Returns the top page specified by an allocation info structure.
898 static Page* TopPageOf(AllocationInfo alloc_info) {
899 return Page::FromAllocationTop(alloc_info.limit);
900 }
901
902 // Expands the space by allocating a fixed number of pages. Returns false if
903 // it cannot allocate requested number of pages from OS. Newly allocated
ager@chromium.org32912102009-01-16 10:38:43 +0000904 // pages are append to the last_page;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000905 bool Expand(Page* last_page);
906
kasper.lund7276f142008-07-30 08:49:36 +0000907 // Generic fast case allocation function that tries linear allocation in
908 // the top page of 'alloc_info'. Returns NULL on failure.
909 inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
910 int size_in_bytes);
911
912 // During normal allocation or deserialization, roll to the next page in
913 // the space (there is assumed to be one) and allocate there. This
914 // function is space-dependent.
915 virtual HeapObject* AllocateInNextPage(Page* current_page,
916 int size_in_bytes) = 0;
917
918 // Slow path of AllocateRaw. This function is space-dependent.
919 virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
920
921 // Slow path of MCAllocateRaw.
922 HeapObject* SlowMCAllocateRaw(int size_in_bytes);
923
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000924#ifdef DEBUG
925 void DoPrintRSet(const char* space_name);
926#endif
927 private:
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000928 // Returns the page of the allocation pointer.
929 Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
930
931 // Returns a pointer to the page of the relocation pointer.
932 Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
933
934#ifdef DEBUG
935 // Returns the number of total pages in this space.
936 int CountTotalPages();
937#endif
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000938
939 friend class PageIterator;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000940};
941
942
943#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
sgjesse@chromium.org0b6db592009-07-30 14:48:31 +0000944class NumberAndSizeInfo BASE_EMBEDDED {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000945 public:
sgjesse@chromium.org0b6db592009-07-30 14:48:31 +0000946 NumberAndSizeInfo() : number_(0), bytes_(0) {}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000947
sgjesse@chromium.org0b6db592009-07-30 14:48:31 +0000948 int number() const { return number_; }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000949 void increment_number(int num) { number_ += num; }
950
sgjesse@chromium.org0b6db592009-07-30 14:48:31 +0000951 int bytes() const { return bytes_; }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000952 void increment_bytes(int size) { bytes_ += size; }
953
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000954 void clear() {
955 number_ = 0;
956 bytes_ = 0;
957 }
958
959 private:
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000960 int number_;
961 int bytes_;
962};
sgjesse@chromium.org0b6db592009-07-30 14:48:31 +0000963
964
965// HistogramInfo class for recording a single "bar" of a histogram. This
966// class is used for collecting statistics to print to stdout (when compiled
967// with DEBUG) or to the log file (when compiled with
968// ENABLE_LOGGING_AND_PROFILING).
969class HistogramInfo: public NumberAndSizeInfo {
970 public:
971 HistogramInfo() : NumberAndSizeInfo() {}
972
973 const char* name() { return name_; }
974 void set_name(const char* name) { name_ = name; }
975
976 private:
977 const char* name_;
978};
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000979#endif
980
981
982// -----------------------------------------------------------------------------
983// SemiSpace in young generation
984//
985// A semispace is a contiguous chunk of memory. The mark-compact collector
986// uses the memory in the from space as a marking stack when tracing live
987// objects.
988
kasper.lund7276f142008-07-30 08:49:36 +0000989class SemiSpace : public Space {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000990 public:
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000991 // Constructor.
992 SemiSpace() :Space(NEW_SPACE, NOT_EXECUTABLE) {
993 start_ = NULL;
994 age_mark_ = NULL;
995 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000996
997 // Sets up the semispace using the given chunk.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000998 bool Setup(Address start, int initial_capacity, int maximum_capacity);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000999
1000 // Tear down the space. Heap memory was not allocated by the space, so it
1001 // is not deallocated here.
1002 void TearDown();
1003
1004 // True if the space has been set up but not torn down.
1005 bool HasBeenSetup() { return start_ != NULL; }
1006
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +00001007 // Grow the size of the semispace by committing extra virtual memory.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001008 // Assumes that the caller has checked that the semispace has not reached
ager@chromium.org32912102009-01-16 10:38:43 +00001009 // its maximum capacity (and thus there is space available in the reserved
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001010 // address range to grow).
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +00001011 bool Grow();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001012
1013 // Returns the start address of the space.
1014 Address low() { return start_; }
1015 // Returns one past the end address of the space.
1016 Address high() { return low() + capacity_; }
1017
1018 // Age mark accessors.
1019 Address age_mark() { return age_mark_; }
1020 void set_age_mark(Address mark) { age_mark_ = mark; }
1021
1022 // True if the address is in the address range of this semispace (not
1023 // necessarily below the allocation pointer).
1024 bool Contains(Address a) {
ager@chromium.org5ec48922009-05-05 07:25:34 +00001025 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1026 == reinterpret_cast<uintptr_t>(start_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001027 }
1028
1029 // True if the object is a heap object in the address range of this
1030 // semispace (not necessarily below the allocation pointer).
1031 bool Contains(Object* o) {
ager@chromium.org5ec48922009-05-05 07:25:34 +00001032 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001033 }
1034
ager@chromium.org32912102009-01-16 10:38:43 +00001035 // The offset of an address from the beginning of the space.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001036 int SpaceOffsetForAddress(Address addr) { return addr - low(); }
1037
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001038 // If we don't have this here then SemiSpace will be abstract. However
1039 // it should never be called.
1040 virtual int Size() {
1041 UNREACHABLE();
1042 return 0;
1043 }
1044
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001045 bool is_committed() { return committed_; }
1046 bool Commit();
1047 bool Uncommit();
1048
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001049#ifdef DEBUG
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001050 virtual void Print();
1051 virtual void Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001052#endif
1053
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +00001054 // Returns the current capacity of the semi space.
1055 int Capacity() { return capacity_; }
1056
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001057 private:
1058 // The current and maximum capacity of the space.
1059 int capacity_;
1060 int maximum_capacity_;
1061
1062 // The start address of the space.
1063 Address start_;
1064 // Used to govern object promotion during mark-compact collection.
1065 Address age_mark_;
1066
1067 // Masks and comparison values to test for containment in this semispace.
ager@chromium.org5ec48922009-05-05 07:25:34 +00001068 uintptr_t address_mask_;
1069 uintptr_t object_mask_;
1070 uintptr_t object_expected_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001071
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001072 bool committed_;
1073
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001074 public:
1075 TRACK_MEMORY("SemiSpace")
1076};
1077
1078
1079// A SemiSpaceIterator is an ObjectIterator that iterates over the active
1080// semispace of the heap's new space. It iterates over the objects in the
1081// semispace from a given start address (defaulting to the bottom of the
1082// semispace) to the top of the semispace. New objects allocated after the
1083// iterator is created are not iterated.
1084class SemiSpaceIterator : public ObjectIterator {
1085 public:
1086 // Create an iterator over the objects in the given space. If no start
1087 // address is given, the iterator starts from the bottom of the space. If
1088 // no size function is given, the iterator calls Object::Size().
1089 explicit SemiSpaceIterator(NewSpace* space);
1090 SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
1091 SemiSpaceIterator(NewSpace* space, Address start);
1092
1093 bool has_next() {return current_ < limit_; }
1094
1095 HeapObject* next() {
1096 ASSERT(has_next());
1097
1098 HeapObject* object = HeapObject::FromAddress(current_);
1099 int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001100
1101 current_ += size;
1102 return object;
1103 }
1104
1105 // Implementation of the ObjectIterator functions.
1106 virtual bool has_next_object() { return has_next(); }
1107 virtual HeapObject* next_object() { return next(); }
1108
1109 private:
1110 void Initialize(NewSpace* space, Address start, Address end,
1111 HeapObjectCallback size_func);
1112
1113 // The semispace.
1114 SemiSpace* space_;
1115 // The current iteration point.
1116 Address current_;
1117 // The end of iteration.
1118 Address limit_;
1119 // The callback function.
1120 HeapObjectCallback size_func_;
1121};
1122
1123
1124// -----------------------------------------------------------------------------
1125// The young generation space.
1126//
1127// The new space consists of a contiguous pair of semispaces. It simply
1128// forwards most functions to the appropriate semispace.
1129
kasper.lund7276f142008-07-30 08:49:36 +00001130class NewSpace : public Space {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001131 public:
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001132 // Constructor.
1133 NewSpace() : Space(NEW_SPACE, NOT_EXECUTABLE) {}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001134
1135 // Sets up the new space using the given chunk.
1136 bool Setup(Address start, int size);
1137
1138 // Tears down the space. Heap memory was not allocated by the space, so it
1139 // is not deallocated here.
1140 void TearDown();
1141
1142 // True if the space has been set up but not torn down.
1143 bool HasBeenSetup() {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001144 return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001145 }
1146
1147 // Flip the pair of spaces.
1148 void Flip();
1149
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +00001150 // Grow the capacity of the semispaces. Assumes that they are not at
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001151 // their maximum capacity. Returns a flag indicating success or failure.
christian.plesner.hansen@gmail.com5a6af922009-08-12 14:20:51 +00001152 bool Grow();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001153
1154 // True if the address or object lies in the address range of either
1155 // semispace (not necessarily below the allocation pointer).
1156 bool Contains(Address a) {
ager@chromium.org5ec48922009-05-05 07:25:34 +00001157 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1158 == reinterpret_cast<uintptr_t>(start_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001159 }
1160 bool Contains(Object* o) {
ager@chromium.org5ec48922009-05-05 07:25:34 +00001161 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001162 }
1163
1164 // Return the allocated bytes in the active semispace.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001165 virtual int Size() { return top() - bottom(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001166 // Return the current capacity of a semispace.
1167 int Capacity() { return capacity_; }
1168 // Return the available bytes without growing in the active semispace.
1169 int Available() { return Capacity() - Size(); }
1170
1171 // Return the maximum capacity of a semispace.
1172 int MaximumCapacity() { return maximum_capacity_; }
1173
1174 // Return the address of the allocation pointer in the active semispace.
1175 Address top() { return allocation_info_.top; }
1176 // Return the address of the first object in the active semispace.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001177 Address bottom() { return to_space_.low(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001178
1179 // Get the age mark of the inactive semispace.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001180 Address age_mark() { return from_space_.age_mark(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001181 // Set the age mark in the active semispace.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001182 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001183
1184 // The start address of the space and a bit mask. Anding an address in the
1185 // new space with the mask will result in the start address.
1186 Address start() { return start_; }
sgjesse@chromium.orgb9d7da12009-08-05 08:38:10 +00001187 uintptr_t mask() { return address_mask_; }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001188
1189 // The allocation top and limit addresses.
1190 Address* allocation_top_address() { return &allocation_info_.top; }
1191 Address* allocation_limit_address() { return &allocation_info_.limit; }
1192
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001193 Object* AllocateRaw(int size_in_bytes) {
1194 return AllocateRawInternal(size_in_bytes, &allocation_info_);
1195 }
1196
1197 // Allocate the requested number of bytes for relocation during mark-compact
1198 // collection.
1199 Object* MCAllocateRaw(int size_in_bytes) {
1200 return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
1201 }
1202
1203 // Reset the allocation pointer to the beginning of the active semispace.
1204 void ResetAllocationInfo();
1205 // Reset the reloction pointer to the bottom of the inactive semispace in
1206 // preparation for mark-compact collection.
1207 void MCResetRelocationInfo();
1208 // Update the allocation pointer in the active semispace after a
1209 // mark-compact collection.
1210 void MCCommitRelocationInfo();
1211
1212 // Get the extent of the inactive semispace (for use as a marking stack).
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001213 Address FromSpaceLow() { return from_space_.low(); }
1214 Address FromSpaceHigh() { return from_space_.high(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001215
1216 // Get the extent of the active semispace (to sweep newly copied objects
1217 // during a scavenge collection).
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001218 Address ToSpaceLow() { return to_space_.low(); }
1219 Address ToSpaceHigh() { return to_space_.high(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001220
1221 // Offsets from the beginning of the semispaces.
1222 int ToSpaceOffsetForAddress(Address a) {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001223 return to_space_.SpaceOffsetForAddress(a);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001224 }
1225 int FromSpaceOffsetForAddress(Address a) {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001226 return from_space_.SpaceOffsetForAddress(a);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001227 }
1228
1229 // True if the object is a heap object in the address range of the
1230 // respective semispace (not necessarily below the allocation pointer of the
1231 // semispace).
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001232 bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
1233 bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001234
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001235 bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
1236 bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001237
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +00001238#ifdef ENABLE_HEAP_PROTECTION
1239 // Protect/unprotect the space by marking it read-only/writable.
1240 virtual void Protect();
1241 virtual void Unprotect();
1242#endif
1243
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001244#ifdef DEBUG
1245 // Verify the active semispace.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001246 virtual void Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001247 // Print the active semispace.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001248 virtual void Print() { to_space_.Print(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001249#endif
1250
1251#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1252 // Iterates the active semispace to collect statistics.
1253 void CollectStatistics();
1254 // Reports previously collected statistics of the active semispace.
1255 void ReportStatistics();
1256 // Clears previously collected statistics.
1257 void ClearHistograms();
1258
1259 // Record the allocation or promotion of a heap object. Note that we don't
1260 // record every single allocation, but only those that happen in the
1261 // to space during a scavenge GC.
1262 void RecordAllocation(HeapObject* obj);
1263 void RecordPromotion(HeapObject* obj);
1264#endif
1265
ager@chromium.orgadd848f2009-08-13 12:44:13 +00001266 // Return whether the operation succeded.
1267 bool CommitFromSpaceIfNeeded() {
1268 if (from_space_.is_committed()) return true;
1269 return from_space_.Commit();
1270 }
1271
1272 bool UncommitFromSpace() {
1273 if (!from_space_.is_committed()) return true;
1274 return from_space_.Uncommit();
1275 }
1276
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001277 private:
1278 // The current and maximum capacities of a semispace.
1279 int capacity_;
1280 int maximum_capacity_;
1281
1282 // The semispaces.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001283 SemiSpace to_space_;
1284 SemiSpace from_space_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001285
1286 // Start address and bit mask for containment testing.
1287 Address start_;
ager@chromium.org9085a012009-05-11 19:22:57 +00001288 uintptr_t address_mask_;
1289 uintptr_t object_mask_;
1290 uintptr_t object_expected_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001291
1292 // Allocation pointer and limit for normal allocation and allocation during
1293 // mark-compact collection.
1294 AllocationInfo allocation_info_;
1295 AllocationInfo mc_forwarding_info_;
1296
1297#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1298 HistogramInfo* allocated_histogram_;
1299 HistogramInfo* promoted_histogram_;
1300#endif
1301
1302 // Implementation of AllocateRaw and MCAllocateRaw.
1303 inline Object* AllocateRawInternal(int size_in_bytes,
1304 AllocationInfo* alloc_info);
1305
1306 friend class SemiSpaceIterator;
1307
1308 public:
1309 TRACK_MEMORY("NewSpace")
1310};
1311
1312
1313// -----------------------------------------------------------------------------
1314// Free lists for old object spaces
1315//
1316// Free-list nodes are free blocks in the heap. They look like heap objects
1317// (free-list node pointers have the heap object tag, and they have a map like
1318// a heap object). They have a size and a next pointer. The next pointer is
1319// the raw address of the next free list node (or NULL).
1320class FreeListNode: public HeapObject {
1321 public:
1322 // Obtain a free-list node from a raw address. This is not a cast because
1323 // it does not check nor require that the first word at the address is a map
1324 // pointer.
1325 static FreeListNode* FromAddress(Address address) {
1326 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1327 }
1328
1329 // Set the size in bytes, which can be read with HeapObject::Size(). This
1330 // function also writes a map to the first word of the block so that it
1331 // looks like a heap object to the garbage collector and heap iteration
1332 // functions.
1333 void set_size(int size_in_bytes);
1334
1335 // Accessors for the next field.
1336 inline Address next();
1337 inline void set_next(Address next);
1338
1339 private:
kasperl@chromium.org2abc4502009-07-02 07:00:29 +00001340 static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001341
1342 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1343};
1344
1345
1346// The free list for the old space.
1347class OldSpaceFreeList BASE_EMBEDDED {
1348 public:
1349 explicit OldSpaceFreeList(AllocationSpace owner);
1350
1351 // Clear the free list.
1352 void Reset();
1353
1354 // Return the number of bytes available on the free list.
1355 int available() { return available_; }
1356
1357 // Place a node on the free list. The block of size 'size_in_bytes'
1358 // starting at 'start' is placed on the free list. The return value is the
1359 // number of bytes that have been lost due to internal fragmentation by
1360 // freeing the block. Bookkeeping information will be written to the block,
1361 // ie, its contents will be destroyed. The start address should be word
1362 // aligned, and the size should be a non-zero multiple of the word size.
1363 int Free(Address start, int size_in_bytes);
1364
1365 // Allocate a block of size 'size_in_bytes' from the free list. The block
1366 // is unitialized. A failure is returned if no block is available. The
1367 // number of bytes lost to fragmentation is returned in the output parameter
1368 // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
1369 Object* Allocate(int size_in_bytes, int* wasted_bytes);
1370
1371 private:
1372 // The size range of blocks, in bytes. (Smaller allocations are allowed, but
1373 // will always result in waste.)
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001374 static const int kMinBlockSize = 2 * kPointerSize;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001375 static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
1376
1377 // The identity of the owning space, for building allocation Failure
1378 // objects.
1379 AllocationSpace owner_;
1380
1381 // Total available bytes in all blocks on this free list.
1382 int available_;
1383
1384 // Blocks are put on exact free lists in an array, indexed by size in words.
1385 // The available sizes are kept in an increasingly ordered list. Entries
1386 // corresponding to sizes < kMinBlockSize always have an empty free list
1387 // (but index kHead is used for the head of the size list).
1388 struct SizeNode {
1389 // Address of the head FreeListNode of the implied block size or NULL.
1390 Address head_node_;
1391 // Size (words) of the next larger available size if head_node_ != NULL.
1392 int next_size_;
1393 };
1394 static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
1395 SizeNode free_[kFreeListsLength];
1396
1397 // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
1398 static const int kHead = kMinBlockSize / kPointerSize - 1;
1399 static const int kEnd = kMaxInt;
1400
1401 // We keep a "finger" in the size list to speed up a common pattern:
1402 // repeated requests for the same or increasing sizes.
1403 int finger_;
1404
1405 // Starting from *prev, find and return the smallest size >= index (words),
1406 // or kEnd. Update *prev to be the largest size < index, or kHead.
1407 int FindSize(int index, int* prev) {
1408 int cur = free_[*prev].next_size_;
1409 while (cur < index) {
1410 *prev = cur;
1411 cur = free_[cur].next_size_;
1412 }
1413 return cur;
1414 }
1415
1416 // Remove an existing element from the size list.
1417 void RemoveSize(int index) {
1418 int prev = kHead;
1419 int cur = FindSize(index, &prev);
1420 ASSERT(cur == index);
1421 free_[prev].next_size_ = free_[cur].next_size_;
1422 finger_ = prev;
1423 }
1424
1425 // Insert a new element into the size list.
1426 void InsertSize(int index) {
1427 int prev = kHead;
1428 int cur = FindSize(index, &prev);
1429 ASSERT(cur != index);
1430 free_[prev].next_size_ = index;
1431 free_[index].next_size_ = cur;
1432 }
1433
1434 // The size list is not updated during a sequence of calls to Free, but is
1435 // rebuilt before the next allocation.
1436 void RebuildSizeList();
1437 bool needs_rebuild_;
1438
kasper.lund7276f142008-07-30 08:49:36 +00001439#ifdef DEBUG
1440 // Does this free list contain a free block located at the address of 'node'?
1441 bool Contains(FreeListNode* node);
1442#endif
1443
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00001444 DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001445};
1446
1447
1448// The free list for the map space.
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001449class FixedSizeFreeList BASE_EMBEDDED {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001450 public:
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001451 FixedSizeFreeList(AllocationSpace owner, int object_size);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001452
1453 // Clear the free list.
1454 void Reset();
1455
1456 // Return the number of bytes available on the free list.
1457 int available() { return available_; }
1458
1459 // Place a node on the free list. The block starting at 'start' (assumed to
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001460 // have size object_size_) is placed on the free list. Bookkeeping
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001461 // information will be written to the block, ie, its contents will be
1462 // destroyed. The start address should be word aligned.
1463 void Free(Address start);
1464
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001465 // Allocate a fixed sized block from the free list. The block is unitialized.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001466 // A failure is returned if no block is available.
1467 Object* Allocate();
1468
1469 private:
1470 // Available bytes on the free list.
1471 int available_;
1472
1473 // The head of the free list.
1474 Address head_;
1475
kasper.lund7276f142008-07-30 08:49:36 +00001476 // The identity of the owning space, for building allocation Failure
1477 // objects.
1478 AllocationSpace owner_;
1479
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001480 // The size of the objects in this space.
1481 int object_size_;
1482
1483 DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001484};
1485
1486
1487// -----------------------------------------------------------------------------
1488// Old object space (excluding map objects)
1489
1490class OldSpace : public PagedSpace {
1491 public:
1492 // Creates an old space object with a given maximum capacity.
1493 // The constructor does not allocate pages from OS.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001494 explicit OldSpace(int max_capacity,
1495 AllocationSpace id,
1496 Executability executable)
kasper.lund7276f142008-07-30 08:49:36 +00001497 : PagedSpace(max_capacity, id, executable), free_list_(id) {
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001498 page_extra_ = 0;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001499 }
1500
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001501 // The bytes available on the free list (ie, not above the linear allocation
1502 // pointer).
1503 int AvailableFree() { return free_list_.available(); }
1504
kasper.lund7276f142008-07-30 08:49:36 +00001505 // The top of allocation in a page in this space. Undefined if page is unused.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001506 virtual Address PageAllocationTop(Page* page) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001507 return page == TopPageOf(allocation_info_) ? top() : page->ObjectAreaEnd();
1508 }
1509
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001510 // Give a block of memory to the space's free list. It might be added to
1511 // the free list or accounted as waste.
1512 void Free(Address start, int size_in_bytes) {
1513 int wasted_bytes = free_list_.Free(start, size_in_bytes);
1514 accounting_stats_.DeallocateBytes(size_in_bytes);
1515 accounting_stats_.WasteBytes(wasted_bytes);
1516 }
1517
1518 // Prepare for full garbage collection. Resets the relocation pointer and
1519 // clears the free list.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001520 virtual void PrepareForMarkCompact(bool will_compact);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001521
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001522 // Updates the allocation pointer to the relocation top after a mark-compact
1523 // collection.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001524 virtual void MCCommitRelocationInfo();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001525
1526#ifdef DEBUG
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001527 // Reports statistics for the space
1528 void ReportStatistics();
1529 // Dump the remembered sets in the space to stdout.
1530 void PrintRSet();
1531#endif
1532
kasper.lund7276f142008-07-30 08:49:36 +00001533 protected:
1534 // Virtual function in the superclass. Slow path of AllocateRaw.
1535 HeapObject* SlowAllocateRaw(int size_in_bytes);
1536
1537 // Virtual function in the superclass. Allocate linearly at the start of
1538 // the page after current_page (there is assumed to be one).
1539 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1540
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001541 private:
1542 // The space's free list.
1543 OldSpaceFreeList free_list_;
1544
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001545 public:
1546 TRACK_MEMORY("OldSpace")
1547};
1548
1549
1550// -----------------------------------------------------------------------------
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001551// Old space for objects of a fixed size
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001552
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001553class FixedSpace : public PagedSpace {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001554 public:
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001555 FixedSpace(int max_capacity,
1556 AllocationSpace id,
1557 int object_size_in_bytes,
1558 const char* name)
1559 : PagedSpace(max_capacity, id, NOT_EXECUTABLE),
1560 object_size_in_bytes_(object_size_in_bytes),
1561 name_(name),
1562 free_list_(id, object_size_in_bytes) {
1563 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
1564 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001565
kasper.lund7276f142008-07-30 08:49:36 +00001566 // The top of allocation in a page in this space. Undefined if page is unused.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001567 virtual Address PageAllocationTop(Page* page) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001568 return page == TopPageOf(allocation_info_) ? top()
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001569 : page->ObjectAreaEnd() - page_extra_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001570 }
1571
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001572 int object_size_in_bytes() { return object_size_in_bytes_; }
1573
1574 // Give a fixed sized block of memory to the space's free list.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001575 void Free(Address start) {
1576 free_list_.Free(start);
1577 accounting_stats_.DeallocateBytes(Map::kSize);
1578 }
1579
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001580 // Prepares for a mark-compact GC.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001581 virtual void PrepareForMarkCompact(bool will_compact);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001582
1583 // Updates the allocation pointer to the relocation top after a mark-compact
1584 // collection.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001585 virtual void MCCommitRelocationInfo();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001586
1587#ifdef DEBUG
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001588 // Reports statistic info of the space
1589 void ReportStatistics();
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001590
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001591 // Dump the remembered sets in the space to stdout.
1592 void PrintRSet();
1593#endif
1594
kasper.lund7276f142008-07-30 08:49:36 +00001595 protected:
1596 // Virtual function in the superclass. Slow path of AllocateRaw.
1597 HeapObject* SlowAllocateRaw(int size_in_bytes);
1598
1599 // Virtual function in the superclass. Allocate linearly at the start of
1600 // the page after current_page (there is assumed to be one).
1601 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1602
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001603 private:
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001604 // The size of objects in this space.
1605 int object_size_in_bytes_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001606
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001607 // The name of this space.
1608 const char* name_;
1609
1610 // The space's free list.
1611 FixedSizeFreeList free_list_;
1612};
1613
1614
1615// -----------------------------------------------------------------------------
1616// Old space for all map objects
1617
1618class MapSpace : public FixedSpace {
1619 public:
1620 // Creates a map space object with a maximum capacity.
1621 MapSpace(int max_capacity, AllocationSpace id)
1622 : FixedSpace(max_capacity, id, Map::kSize, "map") {}
1623
1624 // Prepares for a mark-compact GC.
1625 virtual void PrepareForMarkCompact(bool will_compact);
1626
1627 // Given an index, returns the page address.
1628 Address PageAddress(int page_index) { return page_addresses_[page_index]; }
1629
1630 // Constants.
1631 static const int kMaxMapPageIndex = (1 << MapWord::kMapPageIndexBits) - 1;
1632
1633 protected:
1634#ifdef DEBUG
1635 virtual void VerifyObject(HeapObject* obj);
1636#endif
1637
1638 private:
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001639 // An array of page start address in a map space.
ager@chromium.org3b45ab52009-03-19 22:21:34 +00001640 Address page_addresses_[kMaxMapPageIndex + 1];
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001641
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001642 public:
1643 TRACK_MEMORY("MapSpace")
1644};
1645
1646
1647// -----------------------------------------------------------------------------
kasperl@chromium.orgdefbd102009-07-13 14:04:26 +00001648// Old space for all global object property cell objects
1649
1650class CellSpace : public FixedSpace {
1651 public:
1652 // Creates a property cell space object with a maximum capacity.
1653 CellSpace(int max_capacity, AllocationSpace id)
1654 : FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {}
1655
1656 protected:
1657#ifdef DEBUG
1658 virtual void VerifyObject(HeapObject* obj);
1659#endif
1660
1661 public:
1662 TRACK_MEMORY("MapSpace")
1663};
1664
1665
1666// -----------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001667// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
1668// the large object space. A large object is allocated from OS heap with
1669// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
1670// A large object always starts at Page::kObjectStartOffset to a page.
1671// Large objects do not move during garbage collections.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001672
1673// A LargeObjectChunk holds exactly one large object page with exactly one
1674// large object.
1675class LargeObjectChunk {
1676 public:
1677 // Allocates a new LargeObjectChunk that contains a large object page
1678 // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
1679 // object and possibly extra remembered set words) bytes after the object
1680 // area start of that page. The allocated chunk size is set in the output
1681 // parameter chunk_size.
kasper.lund7276f142008-07-30 08:49:36 +00001682 static LargeObjectChunk* New(int size_in_bytes,
1683 size_t* chunk_size,
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001684 Executability executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001685
1686 // Interpret a raw address as a large object chunk.
1687 static LargeObjectChunk* FromAddress(Address address) {
1688 return reinterpret_cast<LargeObjectChunk*>(address);
1689 }
1690
1691 // Returns the address of this chunk.
1692 Address address() { return reinterpret_cast<Address>(this); }
1693
1694 // Accessors for the fields of the chunk.
1695 LargeObjectChunk* next() { return next_; }
1696 void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
1697
1698 size_t size() { return size_; }
1699 void set_size(size_t size_in_bytes) { size_ = size_in_bytes; }
1700
1701 // Returns the object in this chunk.
1702 inline HeapObject* GetObject();
1703
ager@chromium.org32912102009-01-16 10:38:43 +00001704 // Given a requested size (including any extra remembered set words),
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001705 // returns the physical size of a chunk to be allocated.
1706 static int ChunkSizeFor(int size_in_bytes);
1707
ager@chromium.org32912102009-01-16 10:38:43 +00001708 // Given a chunk size, returns the object size it can accommodate (not
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001709 // including any extra remembered set words). Used by
1710 // LargeObjectSpace::Available. Note that this can overestimate the size
1711 // of object that will fit in a chunk---if the object requires extra
1712 // remembered set words (eg, for large fixed arrays), the actual object
1713 // size for the chunk will be smaller than reported by this function.
1714 static int ObjectSizeFor(int chunk_size) {
1715 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
1716 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
1717 }
1718
1719 private:
1720 // A pointer to the next large object chunk in the space or NULL.
1721 LargeObjectChunk* next_;
1722
1723 // The size of this chunk.
1724 size_t size_;
1725
1726 public:
1727 TRACK_MEMORY("LargeObjectChunk")
1728};
1729
1730
kasper.lund7276f142008-07-30 08:49:36 +00001731class LargeObjectSpace : public Space {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001732 public:
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001733 explicit LargeObjectSpace(AllocationSpace id);
1734 virtual ~LargeObjectSpace() {}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001735
1736 // Initializes internal data structures.
1737 bool Setup();
1738
1739 // Releases internal resources, frees objects in this space.
1740 void TearDown();
1741
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001742 // Allocates a (non-FixedArray, non-Code) large object.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001743 Object* AllocateRaw(int size_in_bytes);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001744 // Allocates a large Code object.
1745 Object* AllocateRawCode(int size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001746 // Allocates a large FixedArray.
1747 Object* AllocateRawFixedArray(int size_in_bytes);
1748
1749 // Available bytes for objects in this space, not including any extra
1750 // remembered set words.
1751 int Available() {
1752 return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
1753 }
1754
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001755 virtual int Size() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001756 return size_;
1757 }
1758
1759 int PageCount() {
1760 return page_count_;
1761 }
1762
1763 // Finds an object for a given address, returns Failure::Exception()
1764 // if it is not found. The function iterates through all objects in this
1765 // space, may be slow.
1766 Object* FindObject(Address a);
1767
1768 // Clears remembered sets.
1769 void ClearRSet();
1770
1771 // Iterates objects whose remembered set bits are set.
1772 void IterateRSet(ObjectSlotCallback func);
1773
1774 // Frees unmarked objects.
1775 void FreeUnmarkedObjects();
1776
1777 // Checks whether a heap object is in this space; O(1).
1778 bool Contains(HeapObject* obj);
1779
1780 // Checks whether the space is empty.
1781 bool IsEmpty() { return first_chunk_ == NULL; }
1782
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +00001783#ifdef ENABLE_HEAP_PROTECTION
1784 // Protect/unprotect the space by marking it read-only/writable.
1785 void Protect();
1786 void Unprotect();
1787#endif
1788
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001789#ifdef DEBUG
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001790 virtual void Verify();
1791 virtual void Print();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001792 void ReportStatistics();
1793 void CollectCodeStatistics();
1794 // Dump the remembered sets in the space to stdout.
1795 void PrintRSet();
1796#endif
1797 // Checks whether an address is in the object area in this space. It
1798 // iterates all objects in the space. May be slow.
1799 bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
1800
1801 private:
1802 // The head of the linked list of large object chunks.
1803 LargeObjectChunk* first_chunk_;
1804 int size_; // allocated bytes
1805 int page_count_; // number of chunks
1806
1807
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001808 // Shared implementation of AllocateRaw, AllocateRawCode and
1809 // AllocateRawFixedArray.
1810 Object* AllocateRawInternal(int requested_size,
1811 int object_size,
1812 Executability executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001813
1814 // Returns the number of extra bytes (rounded up to the nearest full word)
1815 // required for extra_object_bytes of extra pointers (in bytes).
1816 static inline int ExtraRSetBytesFor(int extra_object_bytes);
1817
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +00001818 friend class LargeObjectIterator;
1819
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001820 public:
1821 TRACK_MEMORY("LargeObjectSpace")
1822};
1823
1824
1825class LargeObjectIterator: public ObjectIterator {
1826 public:
1827 explicit LargeObjectIterator(LargeObjectSpace* space);
1828 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
1829
1830 bool has_next() { return current_ != NULL; }
1831 HeapObject* next();
1832
1833 // implementation of ObjectIterator.
1834 virtual bool has_next_object() { return has_next(); }
1835 virtual HeapObject* next_object() { return next(); }
1836
1837 private:
1838 LargeObjectChunk* current_;
1839 HeapObjectCallback size_func_;
1840};
1841
1842
1843} } // namespace v8::internal
1844
1845#endif // V8_SPACES_H_