blob: 718f8881a0d0683454df7179ee9f950f086434f9 [file] [log] [blame]
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_SPACES_H_
29#define V8_SPACES_H_
30
31#include "list-inl.h"
32#include "log.h"
33
34namespace v8 { namespace internal {
35
36// -----------------------------------------------------------------------------
37// Heap structures:
38//
39// A JS heap consists of a young generation, an old generation, and a large
40// object space. The young generation is divided into two semispaces. A
41// scavenger implements Cheney's copying algorithm. The old generation is
42// separated into a map space and an old object space. The map space contains
43// all (and only) map objects, the rest of old objects go into the old space.
44// The old generation is collected by a mark-sweep-compact collector.
45//
46// The semispaces of the young generation are contiguous. The old and map
47// spaces consists of a list of pages. A page has a page header, a remembered
48// set area, and an object area. A page size is deliberately chosen as 8K
49// bytes. The first word of a page is an opaque page header that has the
50// address of the next page and its ownership information. The second word may
51// have the allocation top address of this page. The next 248 bytes are
52// remembered sets. Heap objects are aligned to the pointer size (4 bytes). A
53// remembered set bit corresponds to a pointer in the object area.
54//
55// There is a separate large object space for objects larger than
56// Page::kMaxHeapObjectSize, so that they do not have to move during
57// collection. The large object space is paged and uses the same remembered
58// set implementation. Pages in large object space may be larger than 8K.
59//
60// NOTE: The mark-compact collector rebuilds the remembered set after a
61// collection. It reuses first a few words of the remembered set for
62// bookkeeping relocation information.
63
64
65// Some assertion macros used in the debugging mode.
66
67#define ASSERT_PAGE_ALIGNED(address) \
68 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
69
70#define ASSERT_OBJECT_ALIGNED(address) \
71 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
72
73#define ASSERT_OBJECT_SIZE(size) \
74 ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
75
76#define ASSERT_PAGE_OFFSET(offset) \
77 ASSERT((Page::kObjectStartOffset <= offset) \
78 && (offset <= Page::kPageSize))
79
80#define ASSERT_MAP_PAGE_INDEX(index) \
81 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
82
83
84class PagedSpace;
85class MemoryAllocator;
kasper.lund7276f142008-07-30 08:49:36 +000086class AllocationInfo;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000087
88// -----------------------------------------------------------------------------
89// A page normally has 8K bytes. Large object pages may be larger. A page
90// address is always aligned to the 8K page size. A page is divided into
91// three areas: the first two words are used for bookkeeping, the next 248
92// bytes are used as remembered set, and the rest of the page is the object
93// area.
94//
95// Pointers are aligned to the pointer size (4 bytes), only 1 bit is needed
96// for a pointer in the remembered set. Given an address, its remembered set
97// bit position (offset from the start of the page) is calculated by dividing
98// its page offset by 32. Therefore, the object area in a page starts at the
99// 256th byte (8K/32). Bytes 0 to 255 do not need the remembered set, so that
100// the first two words (64 bits) in a page can be used for other purposes.
101//
102// The mark-compact collector transforms a map pointer into a page index and a
103// page offset. The map space can have up to 1024 pages, and 8M bytes (1024 *
104// 8K) in total. Because a map pointer is aligned to the pointer size (4
105// bytes), 11 bits are enough to encode the page offset. 21 bits (10 for the
106// page index + 11 for the offset in the page) are required to encode a map
107// pointer.
108//
109// The only way to get a page pointer is by calling factory methods:
110// Page* p = Page::FromAddress(addr); or
111// Page* p = Page::FromAllocationTop(top);
112class Page {
113 public:
114 // Returns the page containing a given address. The address ranges
115 // from [page_addr .. page_addr + kPageSize[
116 //
117 // Note that this function only works for addresses in normal paged
118 // spaces and addresses in the first 8K of large object pages (ie,
119 // the start of large objects but not necessarily derived pointers
120 // within them).
121 INLINE(static Page* FromAddress(Address a)) {
122 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
123 }
124
125 // Returns the page containing an allocation top. Because an allocation
126 // top address can be the upper bound of the page, we need to subtract
127 // it with kPointerSize first. The address ranges from
128 // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
129 INLINE(static Page* FromAllocationTop(Address top)) {
130 Page* p = FromAddress(top - kPointerSize);
131 ASSERT_PAGE_OFFSET(p->Offset(top));
132 return p;
133 }
134
135 // Returns the start address of this page.
136 Address address() { return reinterpret_cast<Address>(this); }
137
138 // Checks whether this is a valid page address.
139 bool is_valid() { return address() != NULL; }
140
141 // Returns the next page of this page.
142 inline Page* next_page();
143
kasper.lund7276f142008-07-30 08:49:36 +0000144 // Return the end of allocation in this page. Undefined for unused pages.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000145 inline Address AllocationTop();
146
147 // Returns the start address of the object area in this page.
148 Address ObjectAreaStart() { return address() + kObjectStartOffset; }
149
150 // Returns the end address (exclusive) of the object area in this page.
151 Address ObjectAreaEnd() { return address() + Page::kPageSize; }
152
153 // Returns the start address of the remembered set area.
154 Address RSetStart() { return address() + kRSetStartOffset; }
155
156 // Returns the end address of the remembered set area (exclusive).
157 Address RSetEnd() { return address() + kRSetEndOffset; }
158
159 // Checks whether an address is page aligned.
160 static bool IsAlignedToPageSize(Address a) {
161 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
162 }
163
164 // True if this page is a large object page.
165 bool IsLargeObjectPage() { return (is_normal_page & 0x1) == 0; }
166
167 // Returns the offset of a given address to this page.
168 INLINE(int Offset(Address a)) {
169 int offset = a - address();
170 ASSERT_PAGE_OFFSET(offset);
171 return offset;
172 }
173
174 // Returns the address for a given offset to the this page.
175 Address OffsetToAddress(int offset) {
176 ASSERT_PAGE_OFFSET(offset);
177 return address() + offset;
178 }
179
180 // ---------------------------------------------------------------------
181 // Remembered set support
182
183 // Clears remembered set in this page.
184 inline void ClearRSet();
185
186 // Return the address of the remembered set word corresponding to an
187 // object address/offset pair, and the bit encoded as a single-bit
188 // mask in the output parameter 'bitmask'.
189 INLINE(static Address ComputeRSetBitPosition(Address address, int offset,
190 uint32_t* bitmask));
191
192 // Sets the corresponding remembered set bit for a given address.
193 INLINE(static void SetRSet(Address address, int offset));
194
195 // Clears the corresponding remembered set bit for a given address.
196 static inline void UnsetRSet(Address address, int offset);
197
198 // Checks whether the remembered set bit for a given address is set.
199 static inline bool IsRSetSet(Address address, int offset);
200
201#ifdef DEBUG
202 // Use a state to mark whether remembered set space can be used for other
203 // purposes.
204 enum RSetState { IN_USE, NOT_IN_USE };
205 static bool is_rset_in_use() { return rset_state_ == IN_USE; }
206 static void set_rset_state(RSetState state) { rset_state_ = state; }
207#endif
208
209 // 8K bytes per page.
210 static const int kPageSizeBits = 13;
211
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000212 // Page size in bytes. This must be a multiple of the OS page size.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000213 static const int kPageSize = 1 << kPageSizeBits;
214
215 // Page size mask.
216 static const int kPageAlignmentMask = (1 << kPageSizeBits) - 1;
217
218 // The end offset of the remembered set in a page
219 // (heaps are aligned to pointer size).
220 static const int kRSetEndOffset= kPageSize / kBitsPerPointer;
221
222 // The start offset of the remembered set in a page.
223 static const int kRSetStartOffset = kRSetEndOffset / kBitsPerPointer;
224
225 // The start offset of the object area in a page.
226 static const int kObjectStartOffset = kRSetEndOffset;
227
228 // Object area size in bytes.
229 static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
230
231 // Maximum object size that fits in a page.
232 static const int kMaxHeapObjectSize = kObjectAreaSize;
233
234 //---------------------------------------------------------------------------
235 // Page header description.
236 //
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000237 // If a page is not in the large object space, the first word,
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000238 // opaque_header, encodes the next page address (aligned to kPageSize 8K)
239 // and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use
240 // opaque_header. The value range of the opaque_header is [0..kPageSize[,
241 // or [next_page_start, next_page_end[. It cannot point to a valid address
242 // in the current page. If a page is in the large object space, the first
243 // word *may* (if the page start and large object chunk start are the
244 // same) contain the address of the next large object chunk.
245 int opaque_header;
246
247 // If the page is not in the large object space, the low-order bit of the
248 // second word is set. If the page is in the large object space, the
249 // second word *may* (if the page start and large object chunk start are
250 // the same) contain the large object chunk size. In either case, the
251 // low-order bit for large object pages will be cleared.
252 int is_normal_page;
253
254 // The following fields overlap with remembered set, they can only
255 // be used in the mark-compact collector when remembered set is not
256 // used.
257
258 // The allocation pointer after relocating objects to this page.
259 Address mc_relocation_top;
260
261 // The index of the page in its owner space.
262 int mc_page_index;
263
264 // The forwarding address of the first live object in this page.
265 Address mc_first_forwarded;
266
267#ifdef DEBUG
268 private:
269 static RSetState rset_state_; // state of the remembered set
270#endif
271};
272
273
274// ----------------------------------------------------------------------------
kasper.lund7276f142008-07-30 08:49:36 +0000275// Space is the abstract superclass for all allocation spaces.
276class Space : public Malloced {
277 public:
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000278 Space(AllocationSpace id, Executability executable)
kasper.lund7276f142008-07-30 08:49:36 +0000279 : id_(id), executable_(executable) {}
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000280 virtual ~Space() {}
kasper.lund7276f142008-07-30 08:49:36 +0000281 // Does the space need executable memory?
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000282 Executability executable() { return executable_; }
kasper.lund7276f142008-07-30 08:49:36 +0000283 // Identity used in error reporting.
284 AllocationSpace identity() { return id_; }
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000285 virtual int Size() = 0;
286#ifdef DEBUG
287 virtual void Verify() = 0;
288 virtual void Print() = 0;
289#endif
kasper.lund7276f142008-07-30 08:49:36 +0000290 private:
291 AllocationSpace id_;
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000292 Executability executable_;
kasper.lund7276f142008-07-30 08:49:36 +0000293};
294
295
296// ----------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000297// A space acquires chunks of memory from the operating system. The memory
298// allocator manages chunks for the paged heap spaces (old space and map
299// space). A paged chunk consists of pages. Pages in a chunk have contiguous
300// addresses and are linked as a list.
301//
302// The allocator keeps an initial chunk which is used for the new space. The
303// leftover regions of the initial chunk are used for the initial chunks of
304// old space and map space if they are big enough to hold at least one page.
305// The allocator assumes that there is one old space and one map space, each
306// expands the space by allocating kPagesPerChunk pages except the last
307// expansion (before running out of space). The first chunk may contain fewer
308// than kPagesPerChunk pages as well.
309//
310// The memory allocator also allocates chunks for the large object space, but
311// they are managed by the space itself. The new space does not expand.
312
313class MemoryAllocator : public AllStatic {
314 public:
315 // Initializes its internal bookkeeping structures.
316 // Max capacity of the total space.
317 static bool Setup(int max_capacity);
318
319 // Deletes valid chunks.
320 static void TearDown();
321
322 // Reserves an initial address range of virtual memory to be split between
323 // the two new space semispaces, the old space, and the map space. The
324 // memory is not yet committed or assigned to spaces and split into pages.
325 // The initial chunk is unmapped when the memory allocator is torn down.
326 // This function should only be called when there is not already a reserved
327 // initial chunk (initial_chunk_ should be NULL). It returns the start
328 // address of the initial chunk if successful, with the side effect of
329 // setting the initial chunk, or else NULL if unsuccessful and leaves the
330 // initial chunk NULL.
331 static void* ReserveInitialChunk(const size_t requested);
332
333 // Commits pages from an as-yet-unmanaged block of virtual memory into a
334 // paged space. The block should be part of the initial chunk reserved via
335 // a call to ReserveInitialChunk. The number of pages is always returned in
336 // the output parameter num_pages. This function assumes that the start
337 // address is non-null and that it is big enough to hold at least one
338 // page-aligned page. The call always succeeds, and num_pages is always
339 // greater than zero.
340 static Page* CommitPages(Address start, size_t size, PagedSpace* owner,
341 int* num_pages);
342
343 // Commit a contiguous block of memory from the initial chunk. Assumes that
344 // the address is not NULL, the size is greater than zero, and that the
345 // block is contained in the initial chunk. Returns true if it succeeded
346 // and false otherwise.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000347 static bool CommitBlock(Address start, size_t size, Executability executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000348
349 // Attempts to allocate the requested (non-zero) number of pages from the
350 // OS. Fewer pages might be allocated than requested. If it fails to
351 // allocate memory for the OS or cannot allocate a single page, this
352 // function returns an invalid page pointer (NULL). The caller must check
353 // whether the returned page is valid (by calling Page::is_valid()). It is
354 // guaranteed that allocated pages have contiguous addresses. The actual
355 // number of allocated page is returned in the output parameter
356 // allocated_pages.
357 static Page* AllocatePages(int requested_pages, int* allocated_pages,
358 PagedSpace* owner);
359
360 // Frees pages from a given page and after. If 'p' is the first page
361 // of a chunk, pages from 'p' are freed and this function returns an
362 // invalid page pointer. Otherwise, the function searches a page
363 // after 'p' that is the first page of a chunk. Pages after the
364 // found page are freed and the function returns 'p'.
365 static Page* FreePages(Page* p);
366
367 // Allocates and frees raw memory of certain size.
368 // These are just thin wrappers around OS::Allocate and OS::Free,
369 // but keep track of allocated bytes as part of heap.
kasper.lund7276f142008-07-30 08:49:36 +0000370 static void* AllocateRawMemory(const size_t requested,
371 size_t* allocated,
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000372 Executability executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000373 static void FreeRawMemory(void* buf, size_t length);
374
375 // Returns the maximum available bytes of heaps.
376 static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
377
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000378 // Returns maximum available bytes that the old space can have.
379 static int MaxAvailable() {
380 return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
381 }
382
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000383 // Links two pages.
384 static inline void SetNextPage(Page* prev, Page* next);
385
386 // Returns the next page of a given page.
387 static inline Page* GetNextPage(Page* p);
388
389 // Checks whether a page belongs to a space.
390 static inline bool IsPageInSpace(Page* p, PagedSpace* space);
391
392 // Returns the space that owns the given page.
393 static inline PagedSpace* PageOwner(Page* page);
394
395 // Finds the first/last page in the same chunk as a given page.
396 static Page* FindFirstPageInSameChunk(Page* p);
397 static Page* FindLastPageInSameChunk(Page* p);
398
399#ifdef DEBUG
400 // Reports statistic info of the space.
401 static void ReportStatistics();
402#endif
403
404 // Due to encoding limitation, we can only have 8K chunks.
405 static const int kMaxNofChunks = 1 << Page::kPageSizeBits;
406 // If a chunk has at least 32 pages, the maximum heap size is about
407 // 8 * 1024 * 32 * 8K = 2G bytes.
408 static const int kPagesPerChunk = 64;
409 static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
410
411 private:
412 // Maximum space size in bytes.
413 static int capacity_;
414
415 // Allocated space size in bytes.
416 static int size_;
417
418 // The initial chunk of virtual memory.
419 static VirtualMemory* initial_chunk_;
420
421 // Allocated chunk info: chunk start address, chunk size, and owning space.
422 class ChunkInfo BASE_EMBEDDED {
423 public:
424 ChunkInfo() : address_(NULL), size_(0), owner_(NULL) {}
425 void init(Address a, size_t s, PagedSpace* o) {
426 address_ = a;
427 size_ = s;
428 owner_ = o;
429 }
430 Address address() { return address_; }
431 size_t size() { return size_; }
432 PagedSpace* owner() { return owner_; }
433
434 private:
435 Address address_;
436 size_t size_;
437 PagedSpace* owner_;
438 };
439
440 // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
441 static List<ChunkInfo> chunks_;
442 static List<int> free_chunk_ids_;
443 static int max_nof_chunks_;
444 static int top_;
445
446 // Push/pop a free chunk id onto/from the stack.
447 static void Push(int free_chunk_id);
448 static int Pop();
449 static bool OutOfChunkIds() { return top_ == 0; }
450
451 // Frees a chunk.
452 static void DeleteChunk(int chunk_id);
453
454 // Basic check whether a chunk id is in the valid range.
455 static inline bool IsValidChunkId(int chunk_id);
456
457 // Checks whether a chunk id identifies an allocated chunk.
458 static inline bool IsValidChunk(int chunk_id);
459
460 // Returns the chunk id that a page belongs to.
461 static inline int GetChunkId(Page* p);
462
463 // Initializes pages in a chunk. Returns the first page address.
464 // This function and GetChunkId() are provided for the mark-compact
465 // collector to rebuild page headers in the from space, which is
466 // used as a marking stack and its page headers are destroyed.
467 static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
468 PagedSpace* owner);
469};
470
471
472// -----------------------------------------------------------------------------
473// Interface for heap object iterator to be implemented by all object space
474// object iterators.
475//
476// NOTE: The space specific object iterators also implements the own has_next()
477// and next() methods which are used to avoid using virtual functions
478// iterating a specific space.
479
480class ObjectIterator : public Malloced {
481 public:
482 virtual ~ObjectIterator() { }
483
484 virtual bool has_next_object() = 0;
485 virtual HeapObject* next_object() = 0;
486};
487
488
489// -----------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000490// Heap object iterator in new/old/map spaces.
491//
492// A HeapObjectIterator iterates objects from a given address to the
493// top of a space. The given address must be below the current
494// allocation pointer (space top). If the space top changes during
495// iteration (because of allocating new objects), the iterator does
496// not iterate new objects. The caller function must create a new
497// iterator starting from the old top in order to visit these new
498// objects. Heap::Scavenage() is such an example.
499
500class HeapObjectIterator: public ObjectIterator {
501 public:
502 // Creates a new object iterator in a given space. If a start
503 // address is not given, the iterator starts from the space bottom.
504 // If the size function is not given, the iterator calls the default
505 // Object::Size().
506 explicit HeapObjectIterator(PagedSpace* space);
507 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
508 HeapObjectIterator(PagedSpace* space, Address start);
509 HeapObjectIterator(PagedSpace* space,
510 Address start,
511 HeapObjectCallback size_func);
512
513 inline bool has_next();
514 inline HeapObject* next();
515
516 // implementation of ObjectIterator.
517 virtual bool has_next_object() { return has_next(); }
518 virtual HeapObject* next_object() { return next(); }
519
520 private:
521 Address cur_addr_; // current iteration point
522 Address end_addr_; // end iteration point
523 Address cur_limit_; // current page limit
524 HeapObjectCallback size_func_; // size function
525 Page* end_page_; // caches the page of the end address
526
527 // Slow path of has_next, checks whether there are more objects in
528 // the next page.
529 bool HasNextInNextPage();
530
531 // Initializes fields.
532 void Initialize(Address start, Address end, HeapObjectCallback size_func);
533
534#ifdef DEBUG
535 // Verifies whether fields have valid values.
536 void Verify();
537#endif
538};
539
540
541// -----------------------------------------------------------------------------
542// A PageIterator iterates pages in a space.
543//
544// The PageIterator class provides three modes for iterating pages in a space:
545// PAGES_IN_USE iterates pages that are in use by the allocator;
546// PAGES_USED_BY_GC iterates pages that hold relocated objects during a
547// mark-compact collection;
548// ALL_PAGES iterates all pages in the space.
549
550class PageIterator BASE_EMBEDDED {
551 public:
552 enum Mode {PAGES_IN_USE, PAGES_USED_BY_MC, ALL_PAGES};
553
554 PageIterator(PagedSpace* space, Mode mode);
555
556 inline bool has_next();
557 inline Page* next();
558
559 private:
560 Page* cur_page_; // next page to return
561 Page* stop_page_; // page where to stop
562};
563
564
565// -----------------------------------------------------------------------------
566// A space has a list of pages. The next page can be accessed via
567// Page::next_page() call. The next page of the last page is an
568// invalid page pointer. A space can expand and shrink dynamically.
569
570// An abstraction of allocation and relocation pointers in a page-structured
571// space.
kasper.lund7276f142008-07-30 08:49:36 +0000572class AllocationInfo {
573 public:
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000574 Address top; // current allocation top
575 Address limit; // current allocation limit
kasper.lund7276f142008-07-30 08:49:36 +0000576
577#ifdef DEBUG
578 bool VerifyPagedAllocation() {
579 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
580 && (top <= limit);
581 }
582#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000583};
584
585
586// An abstraction of the accounting statistics of a page-structured space.
587// The 'capacity' of a space is the number of object-area bytes (ie, not
588// including page bookkeeping structures) currently in the space. The 'size'
589// of a space is the number of allocated bytes, the 'waste' in the space is
590// the number of bytes that are not allocated and not available to
591// allocation without reorganizing the space via a GC (eg, small blocks due
592// to internal fragmentation, top of page areas in map space), and the bytes
593// 'available' is the number of unallocated bytes that are not waste. The
594// capacity is the sum of size, waste, and available.
595//
596// The stats are only set by functions that ensure they stay balanced. These
597// functions increase or decrease one of the non-capacity stats in
598// conjunction with capacity, or else they always balance increases and
599// decreases to the non-capacity stats.
600class AllocationStats BASE_EMBEDDED {
601 public:
602 AllocationStats() { Clear(); }
603
604 // Zero out all the allocation statistics (ie, no capacity).
605 void Clear() {
606 capacity_ = 0;
607 available_ = 0;
608 size_ = 0;
609 waste_ = 0;
610 }
611
612 // Reset the allocation statistics (ie, available = capacity with no
613 // wasted or allocated bytes).
614 void Reset() {
615 available_ = capacity_;
616 size_ = 0;
617 waste_ = 0;
618 }
619
620 // Accessors for the allocation statistics.
621 int Capacity() { return capacity_; }
622 int Available() { return available_; }
623 int Size() { return size_; }
624 int Waste() { return waste_; }
625
626 // Grow the space by adding available bytes.
627 void ExpandSpace(int size_in_bytes) {
628 capacity_ += size_in_bytes;
629 available_ += size_in_bytes;
630 }
631
632 // Shrink the space by removing available bytes.
633 void ShrinkSpace(int size_in_bytes) {
634 capacity_ -= size_in_bytes;
635 available_ -= size_in_bytes;
636 }
637
638 // Allocate from available bytes (available -> size).
639 void AllocateBytes(int size_in_bytes) {
640 available_ -= size_in_bytes;
641 size_ += size_in_bytes;
642 }
643
644 // Free allocated bytes, making them available (size -> available).
645 void DeallocateBytes(int size_in_bytes) {
646 size_ -= size_in_bytes;
647 available_ += size_in_bytes;
648 }
649
650 // Waste free bytes (available -> waste).
651 void WasteBytes(int size_in_bytes) {
652 available_ -= size_in_bytes;
653 waste_ += size_in_bytes;
654 }
655
656 // Consider the wasted bytes to be allocated, as they contain filler
657 // objects (waste -> size).
658 void FillWastedBytes(int size_in_bytes) {
659 waste_ -= size_in_bytes;
660 size_ += size_in_bytes;
661 }
662
663 private:
664 int capacity_;
665 int available_;
666 int size_;
667 int waste_;
668};
669
670
kasper.lund7276f142008-07-30 08:49:36 +0000671class PagedSpace : public Space {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000672 friend class PageIterator;
673 public:
674 // Creates a space with a maximum capacity, and an id.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000675 PagedSpace(int max_capacity, AllocationSpace id, Executability executable);
kasper.lund7276f142008-07-30 08:49:36 +0000676
677 virtual ~PagedSpace() {}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000678
679 // Set up the space using the given address range of virtual memory (from
680 // the memory allocator's initial chunk) if possible. If the block of
681 // addresses is not big enough to contain a single page-aligned page, a
682 // fresh chunk will be allocated.
683 bool Setup(Address start, size_t size);
684
685 // Returns true if the space has been successfully set up and not
686 // subsequently torn down.
687 bool HasBeenSetup();
688
689 // Cleans up the space, frees all pages in this space except those belonging
690 // to the initial chunk, uncommits addresses in the initial chunk.
691 void TearDown();
692
693 // Checks whether an object/address is in this space.
694 inline bool Contains(Address a);
695 bool Contains(HeapObject* o) { return Contains(o->address()); }
696
kasper.lund7276f142008-07-30 08:49:36 +0000697 // Given an address occupied by a live object, return that object if it is
698 // in this space, or Failure::Exception() if it is not. The implementation
699 // iterates over objects in the page containing the address, the cost is
700 // linear in the number of objects in the page. It may be slow.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000701 Object* FindObject(Address addr);
702
kasper.lund7276f142008-07-30 08:49:36 +0000703 // Checks whether page is currently in use by this space.
704 bool IsUsed(Page* page);
705
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000706 // Clears remembered sets of pages in this space.
707 void ClearRSet();
708
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000709 // Prepares for a mark-compact GC.
710 virtual void PrepareForMarkCompact(bool will_compact) = 0;
711
712 virtual Address PageAllocationTop(Page* page) = 0;
713
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000714 // Current capacity without growing (Size() + Available() + Waste()).
715 int Capacity() { return accounting_stats_.Capacity(); }
716
717 // Available bytes without growing.
718 int Available() { return accounting_stats_.Available(); }
719
720 // Allocated bytes in this space.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000721 virtual int Size() { return accounting_stats_.Size(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000722
723 // Wasted bytes due to fragmentation and not recoverable until the
724 // next GC of this space.
725 int Waste() { return accounting_stats_.Waste(); }
726
727 // Returns the address of the first object in this space.
728 Address bottom() { return first_page_->ObjectAreaStart(); }
729
730 // Returns the allocation pointer in this space.
731 Address top() { return allocation_info_.top; }
732
kasper.lund7276f142008-07-30 08:49:36 +0000733 // Allocate the requested number of bytes in the space if possible, return a
734 // failure object if not.
735 inline Object* AllocateRaw(int size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000736
kasper.lund7276f142008-07-30 08:49:36 +0000737 // Allocate the requested number of bytes for relocation during mark-compact
738 // collection.
739 inline Object* MCAllocateRaw(int size_in_bytes);
740
741
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000742 // ---------------------------------------------------------------------------
743 // Mark-compact collection support functions
744
745 // Set the relocation point to the beginning of the space.
746 void MCResetRelocationInfo();
747
748 // Writes relocation info to the top page.
749 void MCWriteRelocationInfoToPage() {
750 TopPageOf(mc_forwarding_info_)->mc_relocation_top = mc_forwarding_info_.top;
751 }
752
753 // Computes the offset of a given address in this space to the beginning
754 // of the space.
755 int MCSpaceOffsetForAddress(Address addr);
756
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000757 // Updates the allocation pointer to the relocation top after a mark-compact
758 // collection.
759 virtual void MCCommitRelocationInfo() = 0;
760
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000761 // Releases half of unused pages.
762 void Shrink();
763
764 // Ensures that the capacity is at least 'capacity'. Returns false on failure.
765 bool EnsureCapacity(int capacity);
766
767#ifdef DEBUG
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000768 // Print meta info and objects in this space.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000769 virtual void Print();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000770
771 // Report code object related statistics
772 void CollectCodeStatistics();
773 static void ReportCodeStatistics();
774 static void ResetCodeStatistics();
775#endif
776
777 protected:
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000778 // Maximum capacity of this space.
779 int max_capacity_;
780
781 // Accounting information for this space.
782 AllocationStats accounting_stats_;
783
784 // The first page in this space.
785 Page* first_page_;
786
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000787 // Normal allocation information.
788 AllocationInfo allocation_info_;
789
790 // Relocation information during mark-compact collections.
791 AllocationInfo mc_forwarding_info_;
792
793 // Sets allocation pointer to a page bottom.
794 static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
795
796 // Returns the top page specified by an allocation info structure.
797 static Page* TopPageOf(AllocationInfo alloc_info) {
798 return Page::FromAllocationTop(alloc_info.limit);
799 }
800
801 // Expands the space by allocating a fixed number of pages. Returns false if
802 // it cannot allocate requested number of pages from OS. Newly allocated
803 // pages are appened to the last_page;
804 bool Expand(Page* last_page);
805
kasper.lund7276f142008-07-30 08:49:36 +0000806 // Generic fast case allocation function that tries linear allocation in
807 // the top page of 'alloc_info'. Returns NULL on failure.
808 inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
809 int size_in_bytes);
810
811 // During normal allocation or deserialization, roll to the next page in
812 // the space (there is assumed to be one) and allocate there. This
813 // function is space-dependent.
814 virtual HeapObject* AllocateInNextPage(Page* current_page,
815 int size_in_bytes) = 0;
816
817 // Slow path of AllocateRaw. This function is space-dependent.
818 virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
819
820 // Slow path of MCAllocateRaw.
821 HeapObject* SlowMCAllocateRaw(int size_in_bytes);
822
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000823#ifdef DEBUG
824 void DoPrintRSet(const char* space_name);
825#endif
826 private:
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000827 // Returns the page of the allocation pointer.
828 Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
829
830 // Returns a pointer to the page of the relocation pointer.
831 Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
832
833#ifdef DEBUG
834 // Returns the number of total pages in this space.
835 int CountTotalPages();
836#endif
837};
838
839
840#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
841// HistogramInfo class for recording a single "bar" of a histogram. This
842// class is used for collecting statistics to print to stdout (when compiled
843// with DEBUG) or to the log file (when compiled with
844// ENABLE_LOGGING_AND_PROFILING).
845class HistogramInfo BASE_EMBEDDED {
846 public:
847 HistogramInfo() : number_(0), bytes_(0) {}
848
849 const char* name() { return name_; }
850 void set_name(const char* name) { name_ = name; }
851
852 int number() { return number_; }
853 void increment_number(int num) { number_ += num; }
854
855 int bytes() { return bytes_; }
856 void increment_bytes(int size) { bytes_ += size; }
857
858 // Clear the number of objects and size fields, but not the name.
859 void clear() {
860 number_ = 0;
861 bytes_ = 0;
862 }
863
864 private:
865 const char* name_;
866 int number_;
867 int bytes_;
868};
869#endif
870
871
872// -----------------------------------------------------------------------------
873// SemiSpace in young generation
874//
875// A semispace is a contiguous chunk of memory. The mark-compact collector
876// uses the memory in the from space as a marking stack when tracing live
877// objects.
878
kasper.lund7276f142008-07-30 08:49:36 +0000879class SemiSpace : public Space {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000880 public:
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000881 // Constructor.
882 SemiSpace() :Space(NEW_SPACE, NOT_EXECUTABLE) {
883 start_ = NULL;
884 age_mark_ = NULL;
885 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000886
887 // Sets up the semispace using the given chunk.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000888 bool Setup(Address start, int initial_capacity, int maximum_capacity);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000889
890 // Tear down the space. Heap memory was not allocated by the space, so it
891 // is not deallocated here.
892 void TearDown();
893
894 // True if the space has been set up but not torn down.
895 bool HasBeenSetup() { return start_ != NULL; }
896
897 // Double the size of the semispace by committing extra virtual memory.
898 // Assumes that the caller has checked that the semispace has not reached
899 // its maxmimum capacity (and thus there is space available in the reserved
900 // address range to grow).
901 bool Double();
902
903 // Returns the start address of the space.
904 Address low() { return start_; }
905 // Returns one past the end address of the space.
906 Address high() { return low() + capacity_; }
907
908 // Age mark accessors.
909 Address age_mark() { return age_mark_; }
910 void set_age_mark(Address mark) { age_mark_ = mark; }
911
912 // True if the address is in the address range of this semispace (not
913 // necessarily below the allocation pointer).
914 bool Contains(Address a) {
915 return (reinterpret_cast<uint32_t>(a) & address_mask_)
916 == reinterpret_cast<uint32_t>(start_);
917 }
918
919 // True if the object is a heap object in the address range of this
920 // semispace (not necessarily below the allocation pointer).
921 bool Contains(Object* o) {
922 return (reinterpret_cast<uint32_t>(o) & object_mask_) == object_expected_;
923 }
924
925 // The offset of an address from the begining of the space.
926 int SpaceOffsetForAddress(Address addr) { return addr - low(); }
927
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000928 // If we don't have this here then SemiSpace will be abstract. However
929 // it should never be called.
930 virtual int Size() {
931 UNREACHABLE();
932 return 0;
933 }
934
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000935#ifdef DEBUG
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000936 virtual void Print();
937 virtual void Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000938#endif
939
940 private:
941 // The current and maximum capacity of the space.
942 int capacity_;
943 int maximum_capacity_;
944
945 // The start address of the space.
946 Address start_;
947 // Used to govern object promotion during mark-compact collection.
948 Address age_mark_;
949
950 // Masks and comparison values to test for containment in this semispace.
951 uint32_t address_mask_;
952 uint32_t object_mask_;
953 uint32_t object_expected_;
954
955 public:
956 TRACK_MEMORY("SemiSpace")
957};
958
959
960// A SemiSpaceIterator is an ObjectIterator that iterates over the active
961// semispace of the heap's new space. It iterates over the objects in the
962// semispace from a given start address (defaulting to the bottom of the
963// semispace) to the top of the semispace. New objects allocated after the
964// iterator is created are not iterated.
965class SemiSpaceIterator : public ObjectIterator {
966 public:
967 // Create an iterator over the objects in the given space. If no start
968 // address is given, the iterator starts from the bottom of the space. If
969 // no size function is given, the iterator calls Object::Size().
970 explicit SemiSpaceIterator(NewSpace* space);
971 SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
972 SemiSpaceIterator(NewSpace* space, Address start);
973
974 bool has_next() {return current_ < limit_; }
975
976 HeapObject* next() {
977 ASSERT(has_next());
978
979 HeapObject* object = HeapObject::FromAddress(current_);
980 int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
981 ASSERT_OBJECT_SIZE(size);
982
983 current_ += size;
984 return object;
985 }
986
987 // Implementation of the ObjectIterator functions.
988 virtual bool has_next_object() { return has_next(); }
989 virtual HeapObject* next_object() { return next(); }
990
991 private:
992 void Initialize(NewSpace* space, Address start, Address end,
993 HeapObjectCallback size_func);
994
995 // The semispace.
996 SemiSpace* space_;
997 // The current iteration point.
998 Address current_;
999 // The end of iteration.
1000 Address limit_;
1001 // The callback function.
1002 HeapObjectCallback size_func_;
1003};
1004
1005
1006// -----------------------------------------------------------------------------
1007// The young generation space.
1008//
1009// The new space consists of a contiguous pair of semispaces. It simply
1010// forwards most functions to the appropriate semispace.
1011
kasper.lund7276f142008-07-30 08:49:36 +00001012class NewSpace : public Space {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001013 public:
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001014 // Constructor.
1015 NewSpace() : Space(NEW_SPACE, NOT_EXECUTABLE) {}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001016
1017 // Sets up the new space using the given chunk.
1018 bool Setup(Address start, int size);
1019
1020 // Tears down the space. Heap memory was not allocated by the space, so it
1021 // is not deallocated here.
1022 void TearDown();
1023
1024 // True if the space has been set up but not torn down.
1025 bool HasBeenSetup() {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001026 return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001027 }
1028
1029 // Flip the pair of spaces.
1030 void Flip();
1031
1032 // Doubles the capacity of the semispaces. Assumes that they are not at
1033 // their maximum capacity. Returns a flag indicating success or failure.
1034 bool Double();
1035
1036 // True if the address or object lies in the address range of either
1037 // semispace (not necessarily below the allocation pointer).
1038 bool Contains(Address a) {
1039 return (reinterpret_cast<uint32_t>(a) & address_mask_)
1040 == reinterpret_cast<uint32_t>(start_);
1041 }
1042 bool Contains(Object* o) {
1043 return (reinterpret_cast<uint32_t>(o) & object_mask_) == object_expected_;
1044 }
1045
1046 // Return the allocated bytes in the active semispace.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001047 virtual int Size() { return top() - bottom(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001048 // Return the current capacity of a semispace.
1049 int Capacity() { return capacity_; }
1050 // Return the available bytes without growing in the active semispace.
1051 int Available() { return Capacity() - Size(); }
1052
1053 // Return the maximum capacity of a semispace.
1054 int MaximumCapacity() { return maximum_capacity_; }
1055
1056 // Return the address of the allocation pointer in the active semispace.
1057 Address top() { return allocation_info_.top; }
1058 // Return the address of the first object in the active semispace.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001059 Address bottom() { return to_space_.low(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001060
1061 // Get the age mark of the inactive semispace.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001062 Address age_mark() { return from_space_.age_mark(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001063 // Set the age mark in the active semispace.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001064 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001065
1066 // The start address of the space and a bit mask. Anding an address in the
1067 // new space with the mask will result in the start address.
1068 Address start() { return start_; }
1069 uint32_t mask() { return address_mask_; }
1070
1071 // The allocation top and limit addresses.
1072 Address* allocation_top_address() { return &allocation_info_.top; }
1073 Address* allocation_limit_address() { return &allocation_info_.limit; }
1074
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001075 Object* AllocateRaw(int size_in_bytes) {
1076 return AllocateRawInternal(size_in_bytes, &allocation_info_);
1077 }
1078
1079 // Allocate the requested number of bytes for relocation during mark-compact
1080 // collection.
1081 Object* MCAllocateRaw(int size_in_bytes) {
1082 return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
1083 }
1084
1085 // Reset the allocation pointer to the beginning of the active semispace.
1086 void ResetAllocationInfo();
1087 // Reset the reloction pointer to the bottom of the inactive semispace in
1088 // preparation for mark-compact collection.
1089 void MCResetRelocationInfo();
1090 // Update the allocation pointer in the active semispace after a
1091 // mark-compact collection.
1092 void MCCommitRelocationInfo();
1093
1094 // Get the extent of the inactive semispace (for use as a marking stack).
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001095 Address FromSpaceLow() { return from_space_.low(); }
1096 Address FromSpaceHigh() { return from_space_.high(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001097
1098 // Get the extent of the active semispace (to sweep newly copied objects
1099 // during a scavenge collection).
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001100 Address ToSpaceLow() { return to_space_.low(); }
1101 Address ToSpaceHigh() { return to_space_.high(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001102
1103 // Offsets from the beginning of the semispaces.
1104 int ToSpaceOffsetForAddress(Address a) {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001105 return to_space_.SpaceOffsetForAddress(a);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001106 }
1107 int FromSpaceOffsetForAddress(Address a) {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001108 return from_space_.SpaceOffsetForAddress(a);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001109 }
1110
1111 // True if the object is a heap object in the address range of the
1112 // respective semispace (not necessarily below the allocation pointer of the
1113 // semispace).
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001114 bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
1115 bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001116
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001117 bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
1118 bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001119
1120#ifdef DEBUG
1121 // Verify the active semispace.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001122 virtual void Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001123 // Print the active semispace.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001124 virtual void Print() { to_space_.Print(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001125#endif
1126
1127#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1128 // Iterates the active semispace to collect statistics.
1129 void CollectStatistics();
1130 // Reports previously collected statistics of the active semispace.
1131 void ReportStatistics();
1132 // Clears previously collected statistics.
1133 void ClearHistograms();
1134
1135 // Record the allocation or promotion of a heap object. Note that we don't
1136 // record every single allocation, but only those that happen in the
1137 // to space during a scavenge GC.
1138 void RecordAllocation(HeapObject* obj);
1139 void RecordPromotion(HeapObject* obj);
1140#endif
1141
1142 private:
1143 // The current and maximum capacities of a semispace.
1144 int capacity_;
1145 int maximum_capacity_;
1146
1147 // The semispaces.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001148 SemiSpace to_space_;
1149 SemiSpace from_space_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001150
1151 // Start address and bit mask for containment testing.
1152 Address start_;
1153 uint32_t address_mask_;
1154 uint32_t object_mask_;
1155 uint32_t object_expected_;
1156
1157 // Allocation pointer and limit for normal allocation and allocation during
1158 // mark-compact collection.
1159 AllocationInfo allocation_info_;
1160 AllocationInfo mc_forwarding_info_;
1161
1162#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1163 HistogramInfo* allocated_histogram_;
1164 HistogramInfo* promoted_histogram_;
1165#endif
1166
1167 // Implementation of AllocateRaw and MCAllocateRaw.
1168 inline Object* AllocateRawInternal(int size_in_bytes,
1169 AllocationInfo* alloc_info);
1170
1171 friend class SemiSpaceIterator;
1172
1173 public:
1174 TRACK_MEMORY("NewSpace")
1175};
1176
1177
1178// -----------------------------------------------------------------------------
1179// Free lists for old object spaces
1180//
1181// Free-list nodes are free blocks in the heap. They look like heap objects
1182// (free-list node pointers have the heap object tag, and they have a map like
1183// a heap object). They have a size and a next pointer. The next pointer is
1184// the raw address of the next free list node (or NULL).
1185class FreeListNode: public HeapObject {
1186 public:
1187 // Obtain a free-list node from a raw address. This is not a cast because
1188 // it does not check nor require that the first word at the address is a map
1189 // pointer.
1190 static FreeListNode* FromAddress(Address address) {
1191 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1192 }
1193
1194 // Set the size in bytes, which can be read with HeapObject::Size(). This
1195 // function also writes a map to the first word of the block so that it
1196 // looks like a heap object to the garbage collector and heap iteration
1197 // functions.
1198 void set_size(int size_in_bytes);
1199
1200 // Accessors for the next field.
1201 inline Address next();
1202 inline void set_next(Address next);
1203
1204 private:
1205 static const int kNextOffset = Array::kHeaderSize;
1206
1207 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1208};
1209
1210
1211// The free list for the old space.
1212class OldSpaceFreeList BASE_EMBEDDED {
1213 public:
1214 explicit OldSpaceFreeList(AllocationSpace owner);
1215
1216 // Clear the free list.
1217 void Reset();
1218
1219 // Return the number of bytes available on the free list.
1220 int available() { return available_; }
1221
1222 // Place a node on the free list. The block of size 'size_in_bytes'
1223 // starting at 'start' is placed on the free list. The return value is the
1224 // number of bytes that have been lost due to internal fragmentation by
1225 // freeing the block. Bookkeeping information will be written to the block,
1226 // ie, its contents will be destroyed. The start address should be word
1227 // aligned, and the size should be a non-zero multiple of the word size.
1228 int Free(Address start, int size_in_bytes);
1229
1230 // Allocate a block of size 'size_in_bytes' from the free list. The block
1231 // is unitialized. A failure is returned if no block is available. The
1232 // number of bytes lost to fragmentation is returned in the output parameter
1233 // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
1234 Object* Allocate(int size_in_bytes, int* wasted_bytes);
1235
1236 private:
1237 // The size range of blocks, in bytes. (Smaller allocations are allowed, but
1238 // will always result in waste.)
1239 static const int kMinBlockSize = Array::kHeaderSize + kPointerSize;
1240 static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
1241
1242 // The identity of the owning space, for building allocation Failure
1243 // objects.
1244 AllocationSpace owner_;
1245
1246 // Total available bytes in all blocks on this free list.
1247 int available_;
1248
1249 // Blocks are put on exact free lists in an array, indexed by size in words.
1250 // The available sizes are kept in an increasingly ordered list. Entries
1251 // corresponding to sizes < kMinBlockSize always have an empty free list
1252 // (but index kHead is used for the head of the size list).
1253 struct SizeNode {
1254 // Address of the head FreeListNode of the implied block size or NULL.
1255 Address head_node_;
1256 // Size (words) of the next larger available size if head_node_ != NULL.
1257 int next_size_;
1258 };
1259 static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
1260 SizeNode free_[kFreeListsLength];
1261
1262 // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
1263 static const int kHead = kMinBlockSize / kPointerSize - 1;
1264 static const int kEnd = kMaxInt;
1265
1266 // We keep a "finger" in the size list to speed up a common pattern:
1267 // repeated requests for the same or increasing sizes.
1268 int finger_;
1269
1270 // Starting from *prev, find and return the smallest size >= index (words),
1271 // or kEnd. Update *prev to be the largest size < index, or kHead.
1272 int FindSize(int index, int* prev) {
1273 int cur = free_[*prev].next_size_;
1274 while (cur < index) {
1275 *prev = cur;
1276 cur = free_[cur].next_size_;
1277 }
1278 return cur;
1279 }
1280
1281 // Remove an existing element from the size list.
1282 void RemoveSize(int index) {
1283 int prev = kHead;
1284 int cur = FindSize(index, &prev);
1285 ASSERT(cur == index);
1286 free_[prev].next_size_ = free_[cur].next_size_;
1287 finger_ = prev;
1288 }
1289
1290 // Insert a new element into the size list.
1291 void InsertSize(int index) {
1292 int prev = kHead;
1293 int cur = FindSize(index, &prev);
1294 ASSERT(cur != index);
1295 free_[prev].next_size_ = index;
1296 free_[index].next_size_ = cur;
1297 }
1298
1299 // The size list is not updated during a sequence of calls to Free, but is
1300 // rebuilt before the next allocation.
1301 void RebuildSizeList();
1302 bool needs_rebuild_;
1303
kasper.lund7276f142008-07-30 08:49:36 +00001304#ifdef DEBUG
1305 // Does this free list contain a free block located at the address of 'node'?
1306 bool Contains(FreeListNode* node);
1307#endif
1308
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00001309 DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001310};
1311
1312
1313// The free list for the map space.
1314class MapSpaceFreeList BASE_EMBEDDED {
1315 public:
kasper.lund7276f142008-07-30 08:49:36 +00001316 explicit MapSpaceFreeList(AllocationSpace owner);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001317
1318 // Clear the free list.
1319 void Reset();
1320
1321 // Return the number of bytes available on the free list.
1322 int available() { return available_; }
1323
1324 // Place a node on the free list. The block starting at 'start' (assumed to
1325 // have size Map::kSize) is placed on the free list. Bookkeeping
1326 // information will be written to the block, ie, its contents will be
1327 // destroyed. The start address should be word aligned.
1328 void Free(Address start);
1329
1330 // Allocate a map-sized block from the free list. The block is unitialized.
1331 // A failure is returned if no block is available.
1332 Object* Allocate();
1333
1334 private:
1335 // Available bytes on the free list.
1336 int available_;
1337
1338 // The head of the free list.
1339 Address head_;
1340
kasper.lund7276f142008-07-30 08:49:36 +00001341 // The identity of the owning space, for building allocation Failure
1342 // objects.
1343 AllocationSpace owner_;
1344
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00001345 DISALLOW_COPY_AND_ASSIGN(MapSpaceFreeList);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001346};
1347
1348
1349// -----------------------------------------------------------------------------
1350// Old object space (excluding map objects)
1351
1352class OldSpace : public PagedSpace {
1353 public:
1354 // Creates an old space object with a given maximum capacity.
1355 // The constructor does not allocate pages from OS.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001356 explicit OldSpace(int max_capacity,
1357 AllocationSpace id,
1358 Executability executable)
kasper.lund7276f142008-07-30 08:49:36 +00001359 : PagedSpace(max_capacity, id, executable), free_list_(id) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001360 }
1361
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001362 // The bytes available on the free list (ie, not above the linear allocation
1363 // pointer).
1364 int AvailableFree() { return free_list_.available(); }
1365
kasper.lund7276f142008-07-30 08:49:36 +00001366 // The top of allocation in a page in this space. Undefined if page is unused.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001367 virtual Address PageAllocationTop(Page* page) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001368 return page == TopPageOf(allocation_info_) ? top() : page->ObjectAreaEnd();
1369 }
1370
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001371 // Give a block of memory to the space's free list. It might be added to
1372 // the free list or accounted as waste.
1373 void Free(Address start, int size_in_bytes) {
1374 int wasted_bytes = free_list_.Free(start, size_in_bytes);
1375 accounting_stats_.DeallocateBytes(size_in_bytes);
1376 accounting_stats_.WasteBytes(wasted_bytes);
1377 }
1378
1379 // Prepare for full garbage collection. Resets the relocation pointer and
1380 // clears the free list.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001381 virtual void PrepareForMarkCompact(bool will_compact);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001382
1383 // Adjust the top of relocation pointer to point to the end of the object
1384 // given by 'address' and 'size_in_bytes'. Move it to the next page if
1385 // necessary, ensure that it points to the address, then increment it by the
1386 // size.
1387 void MCAdjustRelocationEnd(Address address, int size_in_bytes);
1388
1389 // Updates the allocation pointer to the relocation top after a mark-compact
1390 // collection.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001391 virtual void MCCommitRelocationInfo();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001392
1393#ifdef DEBUG
1394 // Verify integrity of this space.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001395 virtual void Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001396
1397 // Reports statistics for the space
1398 void ReportStatistics();
1399 // Dump the remembered sets in the space to stdout.
1400 void PrintRSet();
1401#endif
1402
kasper.lund7276f142008-07-30 08:49:36 +00001403 protected:
1404 // Virtual function in the superclass. Slow path of AllocateRaw.
1405 HeapObject* SlowAllocateRaw(int size_in_bytes);
1406
1407 // Virtual function in the superclass. Allocate linearly at the start of
1408 // the page after current_page (there is assumed to be one).
1409 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1410
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001411 private:
1412 // The space's free list.
1413 OldSpaceFreeList free_list_;
1414
1415 // During relocation, we keep a pointer to the most recently relocated
1416 // object in order to know when to move to the next page.
1417 Address mc_end_of_relocation_;
1418
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001419 public:
1420 TRACK_MEMORY("OldSpace")
1421};
1422
1423
1424// -----------------------------------------------------------------------------
1425// Old space for all map objects
1426
1427class MapSpace : public PagedSpace {
1428 public:
1429 // Creates a map space object with a maximum capacity.
kasper.lund7276f142008-07-30 08:49:36 +00001430 explicit MapSpace(int max_capacity, AllocationSpace id)
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001431 : PagedSpace(max_capacity, id, NOT_EXECUTABLE), free_list_(id) { }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001432
kasper.lund7276f142008-07-30 08:49:36 +00001433 // The top of allocation in a page in this space. Undefined if page is unused.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001434 virtual Address PageAllocationTop(Page* page) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001435 return page == TopPageOf(allocation_info_) ? top()
1436 : page->ObjectAreaEnd() - kPageExtra;
1437 }
1438
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001439 // Give a map-sized block of memory to the space's free list.
1440 void Free(Address start) {
1441 free_list_.Free(start);
1442 accounting_stats_.DeallocateBytes(Map::kSize);
1443 }
1444
1445 // Given an index, returns the page address.
1446 Address PageAddress(int page_index) { return page_addresses_[page_index]; }
1447
1448 // Prepares for a mark-compact GC.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001449 virtual void PrepareForMarkCompact(bool will_compact);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001450
1451 // Updates the allocation pointer to the relocation top after a mark-compact
1452 // collection.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001453 virtual void MCCommitRelocationInfo();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001454
1455#ifdef DEBUG
1456 // Verify integrity of this space.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001457 virtual void Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001458
1459 // Reports statistic info of the space
1460 void ReportStatistics();
1461 // Dump the remembered sets in the space to stdout.
1462 void PrintRSet();
1463#endif
1464
1465 // Constants.
1466 static const int kMapPageIndexBits = 10;
1467 static const int kMaxMapPageIndex = (1 << kMapPageIndexBits) - 1;
1468
1469 static const int kPageExtra = Page::kObjectAreaSize % Map::kSize;
1470
kasper.lund7276f142008-07-30 08:49:36 +00001471 protected:
1472 // Virtual function in the superclass. Slow path of AllocateRaw.
1473 HeapObject* SlowAllocateRaw(int size_in_bytes);
1474
1475 // Virtual function in the superclass. Allocate linearly at the start of
1476 // the page after current_page (there is assumed to be one).
1477 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1478
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001479 private:
1480 // The space's free list.
1481 MapSpaceFreeList free_list_;
1482
1483 // An array of page start address in a map space.
1484 Address page_addresses_[kMaxMapPageIndex];
1485
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001486 public:
1487 TRACK_MEMORY("MapSpace")
1488};
1489
1490
1491// -----------------------------------------------------------------------------
1492// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
1493// the large object space. A large object is allocated from OS heap with
1494// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
1495// A large object always starts at Page::kObjectStartOffset to a page.
1496// Large objects do not move during garbage collections.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001497
1498// A LargeObjectChunk holds exactly one large object page with exactly one
1499// large object.
1500class LargeObjectChunk {
1501 public:
1502 // Allocates a new LargeObjectChunk that contains a large object page
1503 // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
1504 // object and possibly extra remembered set words) bytes after the object
1505 // area start of that page. The allocated chunk size is set in the output
1506 // parameter chunk_size.
kasper.lund7276f142008-07-30 08:49:36 +00001507 static LargeObjectChunk* New(int size_in_bytes,
1508 size_t* chunk_size,
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001509 Executability executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001510
1511 // Interpret a raw address as a large object chunk.
1512 static LargeObjectChunk* FromAddress(Address address) {
1513 return reinterpret_cast<LargeObjectChunk*>(address);
1514 }
1515
1516 // Returns the address of this chunk.
1517 Address address() { return reinterpret_cast<Address>(this); }
1518
1519 // Accessors for the fields of the chunk.
1520 LargeObjectChunk* next() { return next_; }
1521 void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
1522
1523 size_t size() { return size_; }
1524 void set_size(size_t size_in_bytes) { size_ = size_in_bytes; }
1525
1526 // Returns the object in this chunk.
1527 inline HeapObject* GetObject();
1528
1529 // Given a requested size (including any extra remembereed set words),
1530 // returns the physical size of a chunk to be allocated.
1531 static int ChunkSizeFor(int size_in_bytes);
1532
1533 // Given a chunk size, returns the object size it can accomodate (not
1534 // including any extra remembered set words). Used by
1535 // LargeObjectSpace::Available. Note that this can overestimate the size
1536 // of object that will fit in a chunk---if the object requires extra
1537 // remembered set words (eg, for large fixed arrays), the actual object
1538 // size for the chunk will be smaller than reported by this function.
1539 static int ObjectSizeFor(int chunk_size) {
1540 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
1541 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
1542 }
1543
1544 private:
1545 // A pointer to the next large object chunk in the space or NULL.
1546 LargeObjectChunk* next_;
1547
1548 // The size of this chunk.
1549 size_t size_;
1550
1551 public:
1552 TRACK_MEMORY("LargeObjectChunk")
1553};
1554
1555
kasper.lund7276f142008-07-30 08:49:36 +00001556class LargeObjectSpace : public Space {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001557 friend class LargeObjectIterator;
1558 public:
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001559 explicit LargeObjectSpace(AllocationSpace id);
1560 virtual ~LargeObjectSpace() {}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001561
1562 // Initializes internal data structures.
1563 bool Setup();
1564
1565 // Releases internal resources, frees objects in this space.
1566 void TearDown();
1567
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001568 // Allocates a (non-FixedArray, non-Code) large object.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001569 Object* AllocateRaw(int size_in_bytes);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001570 // Allocates a large Code object.
1571 Object* AllocateRawCode(int size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001572 // Allocates a large FixedArray.
1573 Object* AllocateRawFixedArray(int size_in_bytes);
1574
1575 // Available bytes for objects in this space, not including any extra
1576 // remembered set words.
1577 int Available() {
1578 return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
1579 }
1580
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001581 virtual int Size() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001582 return size_;
1583 }
1584
1585 int PageCount() {
1586 return page_count_;
1587 }
1588
1589 // Finds an object for a given address, returns Failure::Exception()
1590 // if it is not found. The function iterates through all objects in this
1591 // space, may be slow.
1592 Object* FindObject(Address a);
1593
1594 // Clears remembered sets.
1595 void ClearRSet();
1596
1597 // Iterates objects whose remembered set bits are set.
1598 void IterateRSet(ObjectSlotCallback func);
1599
1600 // Frees unmarked objects.
1601 void FreeUnmarkedObjects();
1602
1603 // Checks whether a heap object is in this space; O(1).
1604 bool Contains(HeapObject* obj);
1605
1606 // Checks whether the space is empty.
1607 bool IsEmpty() { return first_chunk_ == NULL; }
1608
1609#ifdef DEBUG
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001610 virtual void Verify();
1611 virtual void Print();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001612 void ReportStatistics();
1613 void CollectCodeStatistics();
1614 // Dump the remembered sets in the space to stdout.
1615 void PrintRSet();
1616#endif
1617 // Checks whether an address is in the object area in this space. It
1618 // iterates all objects in the space. May be slow.
1619 bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
1620
1621 private:
1622 // The head of the linked list of large object chunks.
1623 LargeObjectChunk* first_chunk_;
1624 int size_; // allocated bytes
1625 int page_count_; // number of chunks
1626
1627
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001628 // Shared implementation of AllocateRaw, AllocateRawCode and
1629 // AllocateRawFixedArray.
1630 Object* AllocateRawInternal(int requested_size,
1631 int object_size,
1632 Executability executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001633
1634 // Returns the number of extra bytes (rounded up to the nearest full word)
1635 // required for extra_object_bytes of extra pointers (in bytes).
1636 static inline int ExtraRSetBytesFor(int extra_object_bytes);
1637
1638 public:
1639 TRACK_MEMORY("LargeObjectSpace")
1640};
1641
1642
1643class LargeObjectIterator: public ObjectIterator {
1644 public:
1645 explicit LargeObjectIterator(LargeObjectSpace* space);
1646 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
1647
1648 bool has_next() { return current_ != NULL; }
1649 HeapObject* next();
1650
1651 // implementation of ObjectIterator.
1652 virtual bool has_next_object() { return has_next(); }
1653 virtual HeapObject* next_object() { return next(); }
1654
1655 private:
1656 LargeObjectChunk* current_;
1657 HeapObjectCallback size_func_;
1658};
1659
1660
1661} } // namespace v8::internal
1662
1663#endif // V8_SPACES_H_