blob: 9e1d873c99a3bf07de8fa1ef37f4ab32af864f21 [file] [log] [blame]
Steve Blocka7e24c12009-10-30 11:49:00 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_SPACES_H_
29#define V8_SPACES_H_
30
31#include "list-inl.h"
32#include "log.h"
33
34namespace v8 {
35namespace internal {
36
37// -----------------------------------------------------------------------------
38// Heap structures:
39//
40// A JS heap consists of a young generation, an old generation, and a large
41// object space. The young generation is divided into two semispaces. A
42// scavenger implements Cheney's copying algorithm. The old generation is
43// separated into a map space and an old object space. The map space contains
44// all (and only) map objects, the rest of old objects go into the old space.
45// The old generation is collected by a mark-sweep-compact collector.
46//
47// The semispaces of the young generation are contiguous. The old and map
48// spaces consists of a list of pages. A page has a page header, a remembered
49// set area, and an object area. A page size is deliberately chosen as 8K
50// bytes. The first word of a page is an opaque page header that has the
51// address of the next page and its ownership information. The second word may
52// have the allocation top address of this page. The next 248 bytes are
53// remembered sets. Heap objects are aligned to the pointer size (4 bytes). A
54// remembered set bit corresponds to a pointer in the object area.
55//
56// There is a separate large object space for objects larger than
57// Page::kMaxHeapObjectSize, so that they do not have to move during
58// collection. The large object space is paged and uses the same remembered
59// set implementation. Pages in large object space may be larger than 8K.
60//
61// NOTE: The mark-compact collector rebuilds the remembered set after a
62// collection. It reuses first a few words of the remembered set for
63// bookkeeping relocation information.
64
65
66// Some assertion macros used in the debugging mode.
67
68#define ASSERT_PAGE_ALIGNED(address) \
69 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
70
71#define ASSERT_OBJECT_ALIGNED(address) \
72 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
73
74#define ASSERT_OBJECT_SIZE(size) \
75 ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
76
77#define ASSERT_PAGE_OFFSET(offset) \
78 ASSERT((Page::kObjectStartOffset <= offset) \
79 && (offset <= Page::kPageSize))
80
81#define ASSERT_MAP_PAGE_INDEX(index) \
82 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
83
84
85class PagedSpace;
86class MemoryAllocator;
87class AllocationInfo;
88
89// -----------------------------------------------------------------------------
90// A page normally has 8K bytes. Large object pages may be larger. A page
91// address is always aligned to the 8K page size. A page is divided into
92// three areas: the first two words are used for bookkeeping, the next 248
93// bytes are used as remembered set, and the rest of the page is the object
94// area.
95//
96// Pointers are aligned to the pointer size (4), only 1 bit is needed
97// for a pointer in the remembered set. Given an address, its remembered set
98// bit position (offset from the start of the page) is calculated by dividing
99// its page offset by 32. Therefore, the object area in a page starts at the
100// 256th byte (8K/32). Bytes 0 to 255 do not need the remembered set, so that
101// the first two words (64 bits) in a page can be used for other purposes.
102//
103// On the 64-bit platform, we add an offset to the start of the remembered set,
104// and pointers are aligned to 8-byte pointer size. This means that we need
105// only 128 bytes for the RSet, and only get two bytes free in the RSet's RSet.
106// For this reason we add an offset to get room for the Page data at the start.
107//
108// The mark-compact collector transforms a map pointer into a page index and a
109// page offset. The map space can have up to 1024 pages, and 8M bytes (1024 *
110// 8K) in total. Because a map pointer is aligned to the pointer size (4
111// bytes), 11 bits are enough to encode the page offset. 21 bits (10 for the
112// page index + 11 for the offset in the page) are required to encode a map
113// pointer.
114//
115// The only way to get a page pointer is by calling factory methods:
116// Page* p = Page::FromAddress(addr); or
117// Page* p = Page::FromAllocationTop(top);
118class Page {
119 public:
120 // Returns the page containing a given address. The address ranges
121 // from [page_addr .. page_addr + kPageSize[
122 //
123 // Note that this function only works for addresses in normal paged
124 // spaces and addresses in the first 8K of large object pages (i.e.,
125 // the start of large objects but not necessarily derived pointers
126 // within them).
127 INLINE(static Page* FromAddress(Address a)) {
128 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
129 }
130
131 // Returns the page containing an allocation top. Because an allocation
132 // top address can be the upper bound of the page, we need to subtract
133 // it with kPointerSize first. The address ranges from
134 // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
135 INLINE(static Page* FromAllocationTop(Address top)) {
136 Page* p = FromAddress(top - kPointerSize);
137 ASSERT_PAGE_OFFSET(p->Offset(top));
138 return p;
139 }
140
141 // Returns the start address of this page.
142 Address address() { return reinterpret_cast<Address>(this); }
143
144 // Checks whether this is a valid page address.
145 bool is_valid() { return address() != NULL; }
146
147 // Returns the next page of this page.
148 inline Page* next_page();
149
150 // Return the end of allocation in this page. Undefined for unused pages.
151 inline Address AllocationTop();
152
153 // Returns the start address of the object area in this page.
154 Address ObjectAreaStart() { return address() + kObjectStartOffset; }
155
156 // Returns the end address (exclusive) of the object area in this page.
157 Address ObjectAreaEnd() { return address() + Page::kPageSize; }
158
159 // Returns the start address of the remembered set area.
160 Address RSetStart() { return address() + kRSetStartOffset; }
161
162 // Returns the end address of the remembered set area (exclusive).
163 Address RSetEnd() { return address() + kRSetEndOffset; }
164
165 // Checks whether an address is page aligned.
166 static bool IsAlignedToPageSize(Address a) {
167 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
168 }
169
170 // True if this page is a large object page.
171 bool IsLargeObjectPage() { return (is_normal_page & 0x1) == 0; }
172
173 // Returns the offset of a given address to this page.
174 INLINE(int Offset(Address a)) {
175 int offset = a - address();
176 ASSERT_PAGE_OFFSET(offset);
177 return offset;
178 }
179
180 // Returns the address for a given offset to the this page.
181 Address OffsetToAddress(int offset) {
182 ASSERT_PAGE_OFFSET(offset);
183 return address() + offset;
184 }
185
186 // ---------------------------------------------------------------------
187 // Remembered set support
188
189 // Clears remembered set in this page.
190 inline void ClearRSet();
191
192 // Return the address of the remembered set word corresponding to an
193 // object address/offset pair, and the bit encoded as a single-bit
194 // mask in the output parameter 'bitmask'.
195 INLINE(static Address ComputeRSetBitPosition(Address address, int offset,
196 uint32_t* bitmask));
197
198 // Sets the corresponding remembered set bit for a given address.
199 INLINE(static void SetRSet(Address address, int offset));
200
201 // Clears the corresponding remembered set bit for a given address.
202 static inline void UnsetRSet(Address address, int offset);
203
204 // Checks whether the remembered set bit for a given address is set.
205 static inline bool IsRSetSet(Address address, int offset);
206
207#ifdef DEBUG
208 // Use a state to mark whether remembered set space can be used for other
209 // purposes.
210 enum RSetState { IN_USE, NOT_IN_USE };
211 static bool is_rset_in_use() { return rset_state_ == IN_USE; }
212 static void set_rset_state(RSetState state) { rset_state_ = state; }
213#endif
214
215 // 8K bytes per page.
216 static const int kPageSizeBits = 13;
217
218 // Page size in bytes. This must be a multiple of the OS page size.
219 static const int kPageSize = 1 << kPageSizeBits;
220
221 // Page size mask.
222 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
223
224 // The offset of the remembered set in a page, in addition to the empty bytes
225 // formed as the remembered bits of the remembered set itself.
226#ifdef V8_TARGET_ARCH_X64
227 static const int kRSetOffset = 4 * kPointerSize; // Room for four pointers.
228#else
229 static const int kRSetOffset = 0;
230#endif
231 // The end offset of the remembered set in a page
232 // (heaps are aligned to pointer size).
233 static const int kRSetEndOffset = kRSetOffset + kPageSize / kBitsPerPointer;
234
235 // The start offset of the object area in a page.
236 // This needs to be at least (bits per uint32_t) * kBitsPerPointer,
237 // to align start of rset to a uint32_t address.
238 static const int kObjectStartOffset = 256;
239
240 // The start offset of the used part of the remembered set in a page.
241 static const int kRSetStartOffset = kRSetOffset +
242 kObjectStartOffset / kBitsPerPointer;
243
244 // Object area size in bytes.
245 static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
246
247 // Maximum object size that fits in a page.
248 static const int kMaxHeapObjectSize = kObjectAreaSize;
249
250 //---------------------------------------------------------------------------
251 // Page header description.
252 //
253 // If a page is not in the large object space, the first word,
254 // opaque_header, encodes the next page address (aligned to kPageSize 8K)
255 // and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use
256 // opaque_header. The value range of the opaque_header is [0..kPageSize[,
257 // or [next_page_start, next_page_end[. It cannot point to a valid address
258 // in the current page. If a page is in the large object space, the first
259 // word *may* (if the page start and large object chunk start are the
260 // same) contain the address of the next large object chunk.
261 intptr_t opaque_header;
262
263 // If the page is not in the large object space, the low-order bit of the
264 // second word is set. If the page is in the large object space, the
265 // second word *may* (if the page start and large object chunk start are
266 // the same) contain the large object chunk size. In either case, the
267 // low-order bit for large object pages will be cleared.
268 int is_normal_page;
269
270 // The following fields may overlap with remembered set, they can only
271 // be used in the mark-compact collector when remembered set is not
272 // used.
273
274 // The index of the page in its owner space.
275 int mc_page_index;
276
277 // The allocation pointer after relocating objects to this page.
278 Address mc_relocation_top;
279
280 // The forwarding address of the first live object in this page.
281 Address mc_first_forwarded;
282
283#ifdef DEBUG
284 private:
285 static RSetState rset_state_; // state of the remembered set
286#endif
287};
288
289
290// ----------------------------------------------------------------------------
291// Space is the abstract superclass for all allocation spaces.
292class Space : public Malloced {
293 public:
294 Space(AllocationSpace id, Executability executable)
295 : id_(id), executable_(executable) {}
296
297 virtual ~Space() {}
298
299 // Does the space need executable memory?
300 Executability executable() { return executable_; }
301
302 // Identity used in error reporting.
303 AllocationSpace identity() { return id_; }
304
305 virtual int Size() = 0;
306
307#ifdef DEBUG
308 virtual void Print() = 0;
309#endif
310
311 private:
312 AllocationSpace id_;
313 Executability executable_;
314};
315
316
317// ----------------------------------------------------------------------------
318// All heap objects containing executable code (code objects) must be allocated
319// from a 2 GB range of memory, so that they can call each other using 32-bit
320// displacements. This happens automatically on 32-bit platforms, where 32-bit
321// displacements cover the entire 4GB virtual address space. On 64-bit
322// platforms, we support this using the CodeRange object, which reserves and
323// manages a range of virtual memory.
324class CodeRange : public AllStatic {
325 public:
326 // Reserves a range of virtual memory, but does not commit any of it.
327 // Can only be called once, at heap initialization time.
328 // Returns false on failure.
329 static bool Setup(const size_t requested_size);
330
331 // Frees the range of virtual memory, and frees the data structures used to
332 // manage it.
333 static void TearDown();
334
335 static bool exists() { return code_range_ != NULL; }
336 static bool contains(Address address) {
337 if (code_range_ == NULL) return false;
338 Address start = static_cast<Address>(code_range_->address());
339 return start <= address && address < start + code_range_->size();
340 }
341
342 // Allocates a chunk of memory from the large-object portion of
343 // the code range. On platforms with no separate code range, should
344 // not be called.
345 static void* AllocateRawMemory(const size_t requested, size_t* allocated);
346 static void FreeRawMemory(void* buf, size_t length);
347
348 private:
349 // The reserved range of virtual memory that all code objects are put in.
350 static VirtualMemory* code_range_;
351 // Plain old data class, just a struct plus a constructor.
352 class FreeBlock {
353 public:
354 FreeBlock(Address start_arg, size_t size_arg)
355 : start(start_arg), size(size_arg) {}
356 FreeBlock(void* start_arg, size_t size_arg)
357 : start(static_cast<Address>(start_arg)), size(size_arg) {}
358
359 Address start;
360 size_t size;
361 };
362
363 // Freed blocks of memory are added to the free list. When the allocation
364 // list is exhausted, the free list is sorted and merged to make the new
365 // allocation list.
366 static List<FreeBlock> free_list_;
367 // Memory is allocated from the free blocks on the allocation list.
368 // The block at current_allocation_block_index_ is the current block.
369 static List<FreeBlock> allocation_list_;
370 static int current_allocation_block_index_;
371
372 // Finds a block on the allocation list that contains at least the
373 // requested amount of memory. If none is found, sorts and merges
374 // the existing free memory blocks, and searches again.
375 // If none can be found, terminates V8 with FatalProcessOutOfMemory.
376 static void GetNextAllocationBlock(size_t requested);
377 // Compares the start addresses of two free blocks.
378 static int CompareFreeBlockAddress(const FreeBlock* left,
379 const FreeBlock* right);
380};
381
382
383// ----------------------------------------------------------------------------
384// A space acquires chunks of memory from the operating system. The memory
385// allocator manages chunks for the paged heap spaces (old space and map
386// space). A paged chunk consists of pages. Pages in a chunk have contiguous
387// addresses and are linked as a list.
388//
389// The allocator keeps an initial chunk which is used for the new space. The
390// leftover regions of the initial chunk are used for the initial chunks of
391// old space and map space if they are big enough to hold at least one page.
392// The allocator assumes that there is one old space and one map space, each
393// expands the space by allocating kPagesPerChunk pages except the last
394// expansion (before running out of space). The first chunk may contain fewer
395// than kPagesPerChunk pages as well.
396//
397// The memory allocator also allocates chunks for the large object space, but
398// they are managed by the space itself. The new space does not expand.
399
400class MemoryAllocator : public AllStatic {
401 public:
402 // Initializes its internal bookkeeping structures.
403 // Max capacity of the total space.
404 static bool Setup(int max_capacity);
405
406 // Deletes valid chunks.
407 static void TearDown();
408
409 // Reserves an initial address range of virtual memory to be split between
410 // the two new space semispaces, the old space, and the map space. The
411 // memory is not yet committed or assigned to spaces and split into pages.
412 // The initial chunk is unmapped when the memory allocator is torn down.
413 // This function should only be called when there is not already a reserved
414 // initial chunk (initial_chunk_ should be NULL). It returns the start
415 // address of the initial chunk if successful, with the side effect of
416 // setting the initial chunk, or else NULL if unsuccessful and leaves the
417 // initial chunk NULL.
418 static void* ReserveInitialChunk(const size_t requested);
419
420 // Commits pages from an as-yet-unmanaged block of virtual memory into a
421 // paged space. The block should be part of the initial chunk reserved via
422 // a call to ReserveInitialChunk. The number of pages is always returned in
423 // the output parameter num_pages. This function assumes that the start
424 // address is non-null and that it is big enough to hold at least one
425 // page-aligned page. The call always succeeds, and num_pages is always
426 // greater than zero.
427 static Page* CommitPages(Address start, size_t size, PagedSpace* owner,
428 int* num_pages);
429
430 // Commit a contiguous block of memory from the initial chunk. Assumes that
431 // the address is not NULL, the size is greater than zero, and that the
432 // block is contained in the initial chunk. Returns true if it succeeded
433 // and false otherwise.
434 static bool CommitBlock(Address start, size_t size, Executability executable);
435
436
437 // Uncommit a contiguous block of memory [start..(start+size)[.
438 // start is not NULL, the size is greater than zero, and the
439 // block is contained in the initial chunk. Returns true if it succeeded
440 // and false otherwise.
441 static bool UncommitBlock(Address start, size_t size);
442
443 // Attempts to allocate the requested (non-zero) number of pages from the
444 // OS. Fewer pages might be allocated than requested. If it fails to
445 // allocate memory for the OS or cannot allocate a single page, this
446 // function returns an invalid page pointer (NULL). The caller must check
447 // whether the returned page is valid (by calling Page::is_valid()). It is
448 // guaranteed that allocated pages have contiguous addresses. The actual
449 // number of allocated pages is returned in the output parameter
450 // allocated_pages. If the PagedSpace owner is executable and there is
451 // a code range, the pages are allocated from the code range.
452 static Page* AllocatePages(int requested_pages, int* allocated_pages,
453 PagedSpace* owner);
454
455 // Frees pages from a given page and after. If 'p' is the first page
456 // of a chunk, pages from 'p' are freed and this function returns an
457 // invalid page pointer. Otherwise, the function searches a page
458 // after 'p' that is the first page of a chunk. Pages after the
459 // found page are freed and the function returns 'p'.
460 static Page* FreePages(Page* p);
461
462 // Allocates and frees raw memory of certain size.
463 // These are just thin wrappers around OS::Allocate and OS::Free,
464 // but keep track of allocated bytes as part of heap.
465 // If the flag is EXECUTABLE and a code range exists, the requested
466 // memory is allocated from the code range. If a code range exists
467 // and the freed memory is in it, the code range manages the freed memory.
468 static void* AllocateRawMemory(const size_t requested,
469 size_t* allocated,
470 Executability executable);
471 static void FreeRawMemory(void* buf, size_t length);
472
473 // Returns the maximum available bytes of heaps.
474 static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
475
476 // Returns allocated spaces in bytes.
477 static int Size() { return size_; }
478
479 // Returns maximum available bytes that the old space can have.
480 static int MaxAvailable() {
481 return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
482 }
483
484 // Links two pages.
485 static inline void SetNextPage(Page* prev, Page* next);
486
487 // Returns the next page of a given page.
488 static inline Page* GetNextPage(Page* p);
489
490 // Checks whether a page belongs to a space.
491 static inline bool IsPageInSpace(Page* p, PagedSpace* space);
492
493 // Returns the space that owns the given page.
494 static inline PagedSpace* PageOwner(Page* page);
495
496 // Finds the first/last page in the same chunk as a given page.
497 static Page* FindFirstPageInSameChunk(Page* p);
498 static Page* FindLastPageInSameChunk(Page* p);
499
500#ifdef ENABLE_HEAP_PROTECTION
501 // Protect/unprotect a block of memory by marking it read-only/writable.
502 static inline void Protect(Address start, size_t size);
503 static inline void Unprotect(Address start, size_t size,
504 Executability executable);
505
506 // Protect/unprotect a chunk given a page in the chunk.
507 static inline void ProtectChunkFromPage(Page* page);
508 static inline void UnprotectChunkFromPage(Page* page);
509#endif
510
511#ifdef DEBUG
512 // Reports statistic info of the space.
513 static void ReportStatistics();
514#endif
515
516 // Due to encoding limitation, we can only have 8K chunks.
517 static const int kMaxNofChunks = 1 << Page::kPageSizeBits;
518 // If a chunk has at least 16 pages, the maximum heap size is about
519 // 8K * 8K * 16 = 1G bytes.
520#ifdef V8_TARGET_ARCH_X64
521 static const int kPagesPerChunk = 32;
522#else
523 static const int kPagesPerChunk = 16;
524#endif
525 static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
526
527 private:
528 // Maximum space size in bytes.
529 static int capacity_;
530
531 // Allocated space size in bytes.
532 static int size_;
533
534 // The initial chunk of virtual memory.
535 static VirtualMemory* initial_chunk_;
536
537 // Allocated chunk info: chunk start address, chunk size, and owning space.
538 class ChunkInfo BASE_EMBEDDED {
539 public:
540 ChunkInfo() : address_(NULL), size_(0), owner_(NULL) {}
541 void init(Address a, size_t s, PagedSpace* o) {
542 address_ = a;
543 size_ = s;
544 owner_ = o;
545 }
546 Address address() { return address_; }
547 size_t size() { return size_; }
548 PagedSpace* owner() { return owner_; }
549
550 private:
551 Address address_;
552 size_t size_;
553 PagedSpace* owner_;
554 };
555
556 // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
557 static List<ChunkInfo> chunks_;
558 static List<int> free_chunk_ids_;
559 static int max_nof_chunks_;
560 static int top_;
561
562 // Push/pop a free chunk id onto/from the stack.
563 static void Push(int free_chunk_id);
564 static int Pop();
565 static bool OutOfChunkIds() { return top_ == 0; }
566
567 // Frees a chunk.
568 static void DeleteChunk(int chunk_id);
569
570 // Basic check whether a chunk id is in the valid range.
571 static inline bool IsValidChunkId(int chunk_id);
572
573 // Checks whether a chunk id identifies an allocated chunk.
574 static inline bool IsValidChunk(int chunk_id);
575
576 // Returns the chunk id that a page belongs to.
577 static inline int GetChunkId(Page* p);
578
579 // True if the address lies in the initial chunk.
580 static inline bool InInitialChunk(Address address);
581
582 // Initializes pages in a chunk. Returns the first page address.
583 // This function and GetChunkId() are provided for the mark-compact
584 // collector to rebuild page headers in the from space, which is
585 // used as a marking stack and its page headers are destroyed.
586 static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
587 PagedSpace* owner);
588};
589
590
591// -----------------------------------------------------------------------------
592// Interface for heap object iterator to be implemented by all object space
593// object iterators.
594//
595// NOTE: The space specific object iterators also implements the own has_next()
596// and next() methods which are used to avoid using virtual functions
597// iterating a specific space.
598
599class ObjectIterator : public Malloced {
600 public:
601 virtual ~ObjectIterator() { }
602
603 virtual bool has_next_object() = 0;
604 virtual HeapObject* next_object() = 0;
605};
606
607
608// -----------------------------------------------------------------------------
609// Heap object iterator in new/old/map spaces.
610//
611// A HeapObjectIterator iterates objects from a given address to the
612// top of a space. The given address must be below the current
613// allocation pointer (space top). There are some caveats.
614//
615// (1) If the space top changes upward during iteration (because of
616// allocating new objects), the iterator does not iterate objects
617// above the original space top. The caller must create a new
618// iterator starting from the old top in order to visit these new
619// objects.
620//
621// (2) If new objects are allocated below the original allocation top
622// (e.g., free-list allocation in paged spaces), the new objects
623// may or may not be iterated depending on their position with
624// respect to the current point of iteration.
625//
626// (3) The space top should not change downward during iteration,
627// otherwise the iterator will return not-necessarily-valid
628// objects.
629
630class HeapObjectIterator: public ObjectIterator {
631 public:
632 // Creates a new object iterator in a given space. If a start
633 // address is not given, the iterator starts from the space bottom.
634 // If the size function is not given, the iterator calls the default
635 // Object::Size().
636 explicit HeapObjectIterator(PagedSpace* space);
637 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
638 HeapObjectIterator(PagedSpace* space, Address start);
639 HeapObjectIterator(PagedSpace* space,
640 Address start,
641 HeapObjectCallback size_func);
642
643 inline bool has_next();
644 inline HeapObject* next();
645
646 // implementation of ObjectIterator.
647 virtual bool has_next_object() { return has_next(); }
648 virtual HeapObject* next_object() { return next(); }
649
650 private:
651 Address cur_addr_; // current iteration point
652 Address end_addr_; // end iteration point
653 Address cur_limit_; // current page limit
654 HeapObjectCallback size_func_; // size function
655 Page* end_page_; // caches the page of the end address
656
657 // Slow path of has_next, checks whether there are more objects in
658 // the next page.
659 bool HasNextInNextPage();
660
661 // Initializes fields.
662 void Initialize(Address start, Address end, HeapObjectCallback size_func);
663
664#ifdef DEBUG
665 // Verifies whether fields have valid values.
666 void Verify();
667#endif
668};
669
670
671// -----------------------------------------------------------------------------
672// A PageIterator iterates the pages in a paged space.
673//
674// The PageIterator class provides three modes for iterating pages in a space:
675// PAGES_IN_USE iterates pages containing allocated objects.
676// PAGES_USED_BY_MC iterates pages that hold relocated objects during a
677// mark-compact collection.
678// ALL_PAGES iterates all pages in the space.
679//
680// There are some caveats.
681//
682// (1) If the space expands during iteration, new pages will not be
683// returned by the iterator in any mode.
684//
685// (2) If new objects are allocated during iteration, they will appear
686// in pages returned by the iterator. Allocation may cause the
687// allocation pointer or MC allocation pointer in the last page to
688// change between constructing the iterator and iterating the last
689// page.
690//
691// (3) The space should not shrink during iteration, otherwise the
692// iterator will return deallocated pages.
693
694class PageIterator BASE_EMBEDDED {
695 public:
696 enum Mode {
697 PAGES_IN_USE,
698 PAGES_USED_BY_MC,
699 ALL_PAGES
700 };
701
702 PageIterator(PagedSpace* space, Mode mode);
703
704 inline bool has_next();
705 inline Page* next();
706
707 private:
708 PagedSpace* space_;
709 Page* prev_page_; // Previous page returned.
710 Page* stop_page_; // Page to stop at (last page returned by the iterator).
711};
712
713
714// -----------------------------------------------------------------------------
715// A space has a list of pages. The next page can be accessed via
716// Page::next_page() call. The next page of the last page is an
717// invalid page pointer. A space can expand and shrink dynamically.
718
719// An abstraction of allocation and relocation pointers in a page-structured
720// space.
721class AllocationInfo {
722 public:
723 Address top; // current allocation top
724 Address limit; // current allocation limit
725
726#ifdef DEBUG
727 bool VerifyPagedAllocation() {
728 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
729 && (top <= limit);
730 }
731#endif
732};
733
734
735// An abstraction of the accounting statistics of a page-structured space.
736// The 'capacity' of a space is the number of object-area bytes (ie, not
737// including page bookkeeping structures) currently in the space. The 'size'
738// of a space is the number of allocated bytes, the 'waste' in the space is
739// the number of bytes that are not allocated and not available to
740// allocation without reorganizing the space via a GC (eg, small blocks due
741// to internal fragmentation, top of page areas in map space), and the bytes
742// 'available' is the number of unallocated bytes that are not waste. The
743// capacity is the sum of size, waste, and available.
744//
745// The stats are only set by functions that ensure they stay balanced. These
746// functions increase or decrease one of the non-capacity stats in
747// conjunction with capacity, or else they always balance increases and
748// decreases to the non-capacity stats.
749class AllocationStats BASE_EMBEDDED {
750 public:
751 AllocationStats() { Clear(); }
752
753 // Zero out all the allocation statistics (ie, no capacity).
754 void Clear() {
755 capacity_ = 0;
756 available_ = 0;
757 size_ = 0;
758 waste_ = 0;
759 }
760
761 // Reset the allocation statistics (ie, available = capacity with no
762 // wasted or allocated bytes).
763 void Reset() {
764 available_ = capacity_;
765 size_ = 0;
766 waste_ = 0;
767 }
768
769 // Accessors for the allocation statistics.
770 int Capacity() { return capacity_; }
771 int Available() { return available_; }
772 int Size() { return size_; }
773 int Waste() { return waste_; }
774
775 // Grow the space by adding available bytes.
776 void ExpandSpace(int size_in_bytes) {
777 capacity_ += size_in_bytes;
778 available_ += size_in_bytes;
779 }
780
781 // Shrink the space by removing available bytes.
782 void ShrinkSpace(int size_in_bytes) {
783 capacity_ -= size_in_bytes;
784 available_ -= size_in_bytes;
785 }
786
787 // Allocate from available bytes (available -> size).
788 void AllocateBytes(int size_in_bytes) {
789 available_ -= size_in_bytes;
790 size_ += size_in_bytes;
791 }
792
793 // Free allocated bytes, making them available (size -> available).
794 void DeallocateBytes(int size_in_bytes) {
795 size_ -= size_in_bytes;
796 available_ += size_in_bytes;
797 }
798
799 // Waste free bytes (available -> waste).
800 void WasteBytes(int size_in_bytes) {
801 available_ -= size_in_bytes;
802 waste_ += size_in_bytes;
803 }
804
805 // Consider the wasted bytes to be allocated, as they contain filler
806 // objects (waste -> size).
807 void FillWastedBytes(int size_in_bytes) {
808 waste_ -= size_in_bytes;
809 size_ += size_in_bytes;
810 }
811
812 private:
813 int capacity_;
814 int available_;
815 int size_;
816 int waste_;
817};
818
819
820class PagedSpace : public Space {
821 public:
822 // Creates a space with a maximum capacity, and an id.
823 PagedSpace(int max_capacity, AllocationSpace id, Executability executable);
824
825 virtual ~PagedSpace() {}
826
827 // Set up the space using the given address range of virtual memory (from
828 // the memory allocator's initial chunk) if possible. If the block of
829 // addresses is not big enough to contain a single page-aligned page, a
830 // fresh chunk will be allocated.
831 bool Setup(Address start, size_t size);
832
833 // Returns true if the space has been successfully set up and not
834 // subsequently torn down.
835 bool HasBeenSetup();
836
837 // Cleans up the space, frees all pages in this space except those belonging
838 // to the initial chunk, uncommits addresses in the initial chunk.
839 void TearDown();
840
841 // Checks whether an object/address is in this space.
842 inline bool Contains(Address a);
843 bool Contains(HeapObject* o) { return Contains(o->address()); }
844
845 // Given an address occupied by a live object, return that object if it is
846 // in this space, or Failure::Exception() if it is not. The implementation
847 // iterates over objects in the page containing the address, the cost is
848 // linear in the number of objects in the page. It may be slow.
849 Object* FindObject(Address addr);
850
851 // Checks whether page is currently in use by this space.
852 bool IsUsed(Page* page);
853
854 // Clears remembered sets of pages in this space.
855 void ClearRSet();
856
857 // Prepares for a mark-compact GC.
858 virtual void PrepareForMarkCompact(bool will_compact) = 0;
859
860 virtual Address PageAllocationTop(Page* page) = 0;
861
862 // Current capacity without growing (Size() + Available() + Waste()).
863 int Capacity() { return accounting_stats_.Capacity(); }
864
Steve Block3ce2e202009-11-05 08:53:23 +0000865 // Total amount of memory committed for this space. For paged
866 // spaces this equals the capacity.
867 int CommittedMemory() { return Capacity(); }
868
Steve Blocka7e24c12009-10-30 11:49:00 +0000869 // Available bytes without growing.
870 int Available() { return accounting_stats_.Available(); }
871
872 // Allocated bytes in this space.
873 virtual int Size() { return accounting_stats_.Size(); }
874
875 // Wasted bytes due to fragmentation and not recoverable until the
876 // next GC of this space.
877 int Waste() { return accounting_stats_.Waste(); }
878
879 // Returns the address of the first object in this space.
880 Address bottom() { return first_page_->ObjectAreaStart(); }
881
882 // Returns the allocation pointer in this space.
883 Address top() { return allocation_info_.top; }
884
885 // Allocate the requested number of bytes in the space if possible, return a
886 // failure object if not.
887 inline Object* AllocateRaw(int size_in_bytes);
888
889 // Allocate the requested number of bytes for relocation during mark-compact
890 // collection.
891 inline Object* MCAllocateRaw(int size_in_bytes);
892
893
894 // ---------------------------------------------------------------------------
895 // Mark-compact collection support functions
896
897 // Set the relocation point to the beginning of the space.
898 void MCResetRelocationInfo();
899
900 // Writes relocation info to the top page.
901 void MCWriteRelocationInfoToPage() {
902 TopPageOf(mc_forwarding_info_)->mc_relocation_top = mc_forwarding_info_.top;
903 }
904
905 // Computes the offset of a given address in this space to the beginning
906 // of the space.
907 int MCSpaceOffsetForAddress(Address addr);
908
909 // Updates the allocation pointer to the relocation top after a mark-compact
910 // collection.
911 virtual void MCCommitRelocationInfo() = 0;
912
913 // Releases half of unused pages.
914 void Shrink();
915
916 // Ensures that the capacity is at least 'capacity'. Returns false on failure.
917 bool EnsureCapacity(int capacity);
918
919#ifdef ENABLE_HEAP_PROTECTION
920 // Protect/unprotect the space by marking it read-only/writable.
921 void Protect();
922 void Unprotect();
923#endif
924
925#ifdef DEBUG
926 // Print meta info and objects in this space.
927 virtual void Print();
928
929 // Verify integrity of this space.
930 virtual void Verify(ObjectVisitor* visitor);
931
932 // Overridden by subclasses to verify space-specific object
933 // properties (e.g., only maps or free-list nodes are in map space).
934 virtual void VerifyObject(HeapObject* obj) {}
935
936 // Report code object related statistics
937 void CollectCodeStatistics();
938 static void ReportCodeStatistics();
939 static void ResetCodeStatistics();
940#endif
941
942 protected:
943 // Maximum capacity of this space.
944 int max_capacity_;
945
946 // Accounting information for this space.
947 AllocationStats accounting_stats_;
948
949 // The first page in this space.
950 Page* first_page_;
951
952 // The last page in this space. Initially set in Setup, updated in
953 // Expand and Shrink.
954 Page* last_page_;
955
956 // Normal allocation information.
957 AllocationInfo allocation_info_;
958
959 // Relocation information during mark-compact collections.
960 AllocationInfo mc_forwarding_info_;
961
962 // Bytes of each page that cannot be allocated. Possibly non-zero
963 // for pages in spaces with only fixed-size objects. Always zero
964 // for pages in spaces with variable sized objects (those pages are
965 // padded with free-list nodes).
966 int page_extra_;
967
968 // Sets allocation pointer to a page bottom.
969 static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
970
971 // Returns the top page specified by an allocation info structure.
972 static Page* TopPageOf(AllocationInfo alloc_info) {
973 return Page::FromAllocationTop(alloc_info.limit);
974 }
975
976 // Expands the space by allocating a fixed number of pages. Returns false if
977 // it cannot allocate requested number of pages from OS. Newly allocated
978 // pages are append to the last_page;
979 bool Expand(Page* last_page);
980
981 // Generic fast case allocation function that tries linear allocation in
982 // the top page of 'alloc_info'. Returns NULL on failure.
983 inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
984 int size_in_bytes);
985
986 // During normal allocation or deserialization, roll to the next page in
987 // the space (there is assumed to be one) and allocate there. This
988 // function is space-dependent.
989 virtual HeapObject* AllocateInNextPage(Page* current_page,
990 int size_in_bytes) = 0;
991
992 // Slow path of AllocateRaw. This function is space-dependent.
993 virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
994
995 // Slow path of MCAllocateRaw.
996 HeapObject* SlowMCAllocateRaw(int size_in_bytes);
997
998#ifdef DEBUG
999 void DoPrintRSet(const char* space_name);
1000#endif
1001 private:
1002 // Returns the page of the allocation pointer.
1003 Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
1004
1005 // Returns a pointer to the page of the relocation pointer.
1006 Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
1007
1008#ifdef DEBUG
1009 // Returns the number of total pages in this space.
1010 int CountTotalPages();
1011#endif
1012
1013 friend class PageIterator;
1014};
1015
1016
1017#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1018class NumberAndSizeInfo BASE_EMBEDDED {
1019 public:
1020 NumberAndSizeInfo() : number_(0), bytes_(0) {}
1021
1022 int number() const { return number_; }
1023 void increment_number(int num) { number_ += num; }
1024
1025 int bytes() const { return bytes_; }
1026 void increment_bytes(int size) { bytes_ += size; }
1027
1028 void clear() {
1029 number_ = 0;
1030 bytes_ = 0;
1031 }
1032
1033 private:
1034 int number_;
1035 int bytes_;
1036};
1037
1038
1039// HistogramInfo class for recording a single "bar" of a histogram. This
1040// class is used for collecting statistics to print to stdout (when compiled
1041// with DEBUG) or to the log file (when compiled with
1042// ENABLE_LOGGING_AND_PROFILING).
1043class HistogramInfo: public NumberAndSizeInfo {
1044 public:
1045 HistogramInfo() : NumberAndSizeInfo() {}
1046
1047 const char* name() { return name_; }
1048 void set_name(const char* name) { name_ = name; }
1049
1050 private:
1051 const char* name_;
1052};
1053#endif
1054
1055
1056// -----------------------------------------------------------------------------
1057// SemiSpace in young generation
1058//
1059// A semispace is a contiguous chunk of memory. The mark-compact collector
1060// uses the memory in the from space as a marking stack when tracing live
1061// objects.
1062
1063class SemiSpace : public Space {
1064 public:
1065 // Constructor.
1066 SemiSpace() :Space(NEW_SPACE, NOT_EXECUTABLE) {
1067 start_ = NULL;
1068 age_mark_ = NULL;
1069 }
1070
1071 // Sets up the semispace using the given chunk.
1072 bool Setup(Address start, int initial_capacity, int maximum_capacity);
1073
1074 // Tear down the space. Heap memory was not allocated by the space, so it
1075 // is not deallocated here.
1076 void TearDown();
1077
1078 // True if the space has been set up but not torn down.
1079 bool HasBeenSetup() { return start_ != NULL; }
1080
1081 // Grow the size of the semispace by committing extra virtual memory.
1082 // Assumes that the caller has checked that the semispace has not reached
1083 // its maximum capacity (and thus there is space available in the reserved
1084 // address range to grow).
1085 bool Grow();
1086
1087 // Grow the semispace to the new capacity. The new capacity
1088 // requested must be larger than the current capacity.
1089 bool GrowTo(int new_capacity);
1090
1091 // Shrinks the semispace to the new capacity. The new capacity
1092 // requested must be more than the amount of used memory in the
1093 // semispace and less than the current capacity.
1094 bool ShrinkTo(int new_capacity);
1095
1096 // Returns the start address of the space.
1097 Address low() { return start_; }
1098 // Returns one past the end address of the space.
1099 Address high() { return low() + capacity_; }
1100
1101 // Age mark accessors.
1102 Address age_mark() { return age_mark_; }
1103 void set_age_mark(Address mark) { age_mark_ = mark; }
1104
1105 // True if the address is in the address range of this semispace (not
1106 // necessarily below the allocation pointer).
1107 bool Contains(Address a) {
1108 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1109 == reinterpret_cast<uintptr_t>(start_);
1110 }
1111
1112 // True if the object is a heap object in the address range of this
1113 // semispace (not necessarily below the allocation pointer).
1114 bool Contains(Object* o) {
1115 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1116 }
1117
1118 // The offset of an address from the beginning of the space.
1119 int SpaceOffsetForAddress(Address addr) { return addr - low(); }
1120
1121 // If we don't have this here then SemiSpace will be abstract. However
1122 // it should never be called.
1123 virtual int Size() {
1124 UNREACHABLE();
1125 return 0;
1126 }
1127
1128 bool is_committed() { return committed_; }
1129 bool Commit();
1130 bool Uncommit();
1131
1132#ifdef DEBUG
1133 virtual void Print();
1134 virtual void Verify();
1135#endif
1136
1137 // Returns the current capacity of the semi space.
1138 int Capacity() { return capacity_; }
1139
1140 // Returns the maximum capacity of the semi space.
1141 int MaximumCapacity() { return maximum_capacity_; }
1142
1143 // Returns the initial capacity of the semi space.
1144 int InitialCapacity() { return initial_capacity_; }
1145
1146 private:
1147 // The current and maximum capacity of the space.
1148 int capacity_;
1149 int maximum_capacity_;
1150 int initial_capacity_;
1151
1152 // The start address of the space.
1153 Address start_;
1154 // Used to govern object promotion during mark-compact collection.
1155 Address age_mark_;
1156
1157 // Masks and comparison values to test for containment in this semispace.
1158 uintptr_t address_mask_;
1159 uintptr_t object_mask_;
1160 uintptr_t object_expected_;
1161
1162 bool committed_;
1163
1164 public:
1165 TRACK_MEMORY("SemiSpace")
1166};
1167
1168
1169// A SemiSpaceIterator is an ObjectIterator that iterates over the active
1170// semispace of the heap's new space. It iterates over the objects in the
1171// semispace from a given start address (defaulting to the bottom of the
1172// semispace) to the top of the semispace. New objects allocated after the
1173// iterator is created are not iterated.
1174class SemiSpaceIterator : public ObjectIterator {
1175 public:
1176 // Create an iterator over the objects in the given space. If no start
1177 // address is given, the iterator starts from the bottom of the space. If
1178 // no size function is given, the iterator calls Object::Size().
1179 explicit SemiSpaceIterator(NewSpace* space);
1180 SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
1181 SemiSpaceIterator(NewSpace* space, Address start);
1182
1183 bool has_next() {return current_ < limit_; }
1184
1185 HeapObject* next() {
1186 ASSERT(has_next());
1187
1188 HeapObject* object = HeapObject::FromAddress(current_);
1189 int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
1190
1191 current_ += size;
1192 return object;
1193 }
1194
1195 // Implementation of the ObjectIterator functions.
1196 virtual bool has_next_object() { return has_next(); }
1197 virtual HeapObject* next_object() { return next(); }
1198
1199 private:
1200 void Initialize(NewSpace* space, Address start, Address end,
1201 HeapObjectCallback size_func);
1202
1203 // The semispace.
1204 SemiSpace* space_;
1205 // The current iteration point.
1206 Address current_;
1207 // The end of iteration.
1208 Address limit_;
1209 // The callback function.
1210 HeapObjectCallback size_func_;
1211};
1212
1213
1214// -----------------------------------------------------------------------------
1215// The young generation space.
1216//
1217// The new space consists of a contiguous pair of semispaces. It simply
1218// forwards most functions to the appropriate semispace.
1219
1220class NewSpace : public Space {
1221 public:
1222 // Constructor.
1223 NewSpace() : Space(NEW_SPACE, NOT_EXECUTABLE) {}
1224
1225 // Sets up the new space using the given chunk.
1226 bool Setup(Address start, int size);
1227
1228 // Tears down the space. Heap memory was not allocated by the space, so it
1229 // is not deallocated here.
1230 void TearDown();
1231
1232 // True if the space has been set up but not torn down.
1233 bool HasBeenSetup() {
1234 return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
1235 }
1236
1237 // Flip the pair of spaces.
1238 void Flip();
1239
1240 // Grow the capacity of the semispaces. Assumes that they are not at
1241 // their maximum capacity.
1242 void Grow();
1243
1244 // Shrink the capacity of the semispaces.
1245 void Shrink();
1246
1247 // True if the address or object lies in the address range of either
1248 // semispace (not necessarily below the allocation pointer).
1249 bool Contains(Address a) {
1250 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1251 == reinterpret_cast<uintptr_t>(start_);
1252 }
1253 bool Contains(Object* o) {
1254 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1255 }
1256
1257 // Return the allocated bytes in the active semispace.
1258 virtual int Size() { return top() - bottom(); }
Steve Block3ce2e202009-11-05 08:53:23 +00001259
Steve Blocka7e24c12009-10-30 11:49:00 +00001260 // Return the current capacity of a semispace.
1261 int Capacity() {
1262 ASSERT(to_space_.Capacity() == from_space_.Capacity());
1263 return to_space_.Capacity();
1264 }
Steve Block3ce2e202009-11-05 08:53:23 +00001265
1266 // Return the total amount of memory committed for new space.
1267 int CommittedMemory() {
1268 if (from_space_.is_committed()) return 2 * Capacity();
1269 return Capacity();
1270 }
1271
Steve Blocka7e24c12009-10-30 11:49:00 +00001272 // Return the available bytes without growing in the active semispace.
1273 int Available() { return Capacity() - Size(); }
1274
1275 // Return the maximum capacity of a semispace.
1276 int MaximumCapacity() {
1277 ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
1278 return to_space_.MaximumCapacity();
1279 }
1280
1281 // Returns the initial capacity of a semispace.
1282 int InitialCapacity() {
1283 ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
1284 return to_space_.InitialCapacity();
1285 }
1286
1287 // Return the address of the allocation pointer in the active semispace.
1288 Address top() { return allocation_info_.top; }
1289 // Return the address of the first object in the active semispace.
1290 Address bottom() { return to_space_.low(); }
1291
1292 // Get the age mark of the inactive semispace.
1293 Address age_mark() { return from_space_.age_mark(); }
1294 // Set the age mark in the active semispace.
1295 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
1296
1297 // The start address of the space and a bit mask. Anding an address in the
1298 // new space with the mask will result in the start address.
1299 Address start() { return start_; }
1300 uintptr_t mask() { return address_mask_; }
1301
1302 // The allocation top and limit addresses.
1303 Address* allocation_top_address() { return &allocation_info_.top; }
1304 Address* allocation_limit_address() { return &allocation_info_.limit; }
1305
1306 Object* AllocateRaw(int size_in_bytes) {
1307 return AllocateRawInternal(size_in_bytes, &allocation_info_);
1308 }
1309
1310 // Allocate the requested number of bytes for relocation during mark-compact
1311 // collection.
1312 Object* MCAllocateRaw(int size_in_bytes) {
1313 return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
1314 }
1315
1316 // Reset the allocation pointer to the beginning of the active semispace.
1317 void ResetAllocationInfo();
1318 // Reset the reloction pointer to the bottom of the inactive semispace in
1319 // preparation for mark-compact collection.
1320 void MCResetRelocationInfo();
1321 // Update the allocation pointer in the active semispace after a
1322 // mark-compact collection.
1323 void MCCommitRelocationInfo();
1324
1325 // Get the extent of the inactive semispace (for use as a marking stack).
1326 Address FromSpaceLow() { return from_space_.low(); }
1327 Address FromSpaceHigh() { return from_space_.high(); }
1328
1329 // Get the extent of the active semispace (to sweep newly copied objects
1330 // during a scavenge collection).
1331 Address ToSpaceLow() { return to_space_.low(); }
1332 Address ToSpaceHigh() { return to_space_.high(); }
1333
1334 // Offsets from the beginning of the semispaces.
1335 int ToSpaceOffsetForAddress(Address a) {
1336 return to_space_.SpaceOffsetForAddress(a);
1337 }
1338 int FromSpaceOffsetForAddress(Address a) {
1339 return from_space_.SpaceOffsetForAddress(a);
1340 }
1341
1342 // True if the object is a heap object in the address range of the
1343 // respective semispace (not necessarily below the allocation pointer of the
1344 // semispace).
1345 bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
1346 bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
1347
1348 bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
1349 bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
1350
1351#ifdef ENABLE_HEAP_PROTECTION
1352 // Protect/unprotect the space by marking it read-only/writable.
1353 virtual void Protect();
1354 virtual void Unprotect();
1355#endif
1356
1357#ifdef DEBUG
1358 // Verify the active semispace.
1359 virtual void Verify();
1360 // Print the active semispace.
1361 virtual void Print() { to_space_.Print(); }
1362#endif
1363
1364#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1365 // Iterates the active semispace to collect statistics.
1366 void CollectStatistics();
1367 // Reports previously collected statistics of the active semispace.
1368 void ReportStatistics();
1369 // Clears previously collected statistics.
1370 void ClearHistograms();
1371
1372 // Record the allocation or promotion of a heap object. Note that we don't
1373 // record every single allocation, but only those that happen in the
1374 // to space during a scavenge GC.
1375 void RecordAllocation(HeapObject* obj);
1376 void RecordPromotion(HeapObject* obj);
1377#endif
1378
1379 // Return whether the operation succeded.
1380 bool CommitFromSpaceIfNeeded() {
1381 if (from_space_.is_committed()) return true;
1382 return from_space_.Commit();
1383 }
1384
1385 bool UncommitFromSpace() {
1386 if (!from_space_.is_committed()) return true;
1387 return from_space_.Uncommit();
1388 }
1389
1390 private:
1391 // The semispaces.
1392 SemiSpace to_space_;
1393 SemiSpace from_space_;
1394
1395 // Start address and bit mask for containment testing.
1396 Address start_;
1397 uintptr_t address_mask_;
1398 uintptr_t object_mask_;
1399 uintptr_t object_expected_;
1400
1401 // Allocation pointer and limit for normal allocation and allocation during
1402 // mark-compact collection.
1403 AllocationInfo allocation_info_;
1404 AllocationInfo mc_forwarding_info_;
1405
1406#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1407 HistogramInfo* allocated_histogram_;
1408 HistogramInfo* promoted_histogram_;
1409#endif
1410
1411 // Implementation of AllocateRaw and MCAllocateRaw.
1412 inline Object* AllocateRawInternal(int size_in_bytes,
1413 AllocationInfo* alloc_info);
1414
1415 friend class SemiSpaceIterator;
1416
1417 public:
1418 TRACK_MEMORY("NewSpace")
1419};
1420
1421
1422// -----------------------------------------------------------------------------
1423// Free lists for old object spaces
1424//
1425// Free-list nodes are free blocks in the heap. They look like heap objects
1426// (free-list node pointers have the heap object tag, and they have a map like
1427// a heap object). They have a size and a next pointer. The next pointer is
1428// the raw address of the next free list node (or NULL).
1429class FreeListNode: public HeapObject {
1430 public:
1431 // Obtain a free-list node from a raw address. This is not a cast because
1432 // it does not check nor require that the first word at the address is a map
1433 // pointer.
1434 static FreeListNode* FromAddress(Address address) {
1435 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1436 }
1437
Steve Block3ce2e202009-11-05 08:53:23 +00001438 static inline bool IsFreeListNode(HeapObject* object);
1439
Steve Blocka7e24c12009-10-30 11:49:00 +00001440 // Set the size in bytes, which can be read with HeapObject::Size(). This
1441 // function also writes a map to the first word of the block so that it
1442 // looks like a heap object to the garbage collector and heap iteration
1443 // functions.
1444 void set_size(int size_in_bytes);
1445
1446 // Accessors for the next field.
1447 inline Address next();
1448 inline void set_next(Address next);
1449
1450 private:
1451 static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
1452
1453 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1454};
1455
1456
1457// The free list for the old space.
1458class OldSpaceFreeList BASE_EMBEDDED {
1459 public:
1460 explicit OldSpaceFreeList(AllocationSpace owner);
1461
1462 // Clear the free list.
1463 void Reset();
1464
1465 // Return the number of bytes available on the free list.
1466 int available() { return available_; }
1467
1468 // Place a node on the free list. The block of size 'size_in_bytes'
1469 // starting at 'start' is placed on the free list. The return value is the
1470 // number of bytes that have been lost due to internal fragmentation by
1471 // freeing the block. Bookkeeping information will be written to the block,
1472 // ie, its contents will be destroyed. The start address should be word
1473 // aligned, and the size should be a non-zero multiple of the word size.
1474 int Free(Address start, int size_in_bytes);
1475
1476 // Allocate a block of size 'size_in_bytes' from the free list. The block
1477 // is unitialized. A failure is returned if no block is available. The
1478 // number of bytes lost to fragmentation is returned in the output parameter
1479 // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
1480 Object* Allocate(int size_in_bytes, int* wasted_bytes);
1481
1482 private:
1483 // The size range of blocks, in bytes. (Smaller allocations are allowed, but
1484 // will always result in waste.)
1485 static const int kMinBlockSize = 2 * kPointerSize;
1486 static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
1487
1488 // The identity of the owning space, for building allocation Failure
1489 // objects.
1490 AllocationSpace owner_;
1491
1492 // Total available bytes in all blocks on this free list.
1493 int available_;
1494
1495 // Blocks are put on exact free lists in an array, indexed by size in words.
1496 // The available sizes are kept in an increasingly ordered list. Entries
1497 // corresponding to sizes < kMinBlockSize always have an empty free list
1498 // (but index kHead is used for the head of the size list).
1499 struct SizeNode {
1500 // Address of the head FreeListNode of the implied block size or NULL.
1501 Address head_node_;
1502 // Size (words) of the next larger available size if head_node_ != NULL.
1503 int next_size_;
1504 };
1505 static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
1506 SizeNode free_[kFreeListsLength];
1507
1508 // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
1509 static const int kHead = kMinBlockSize / kPointerSize - 1;
1510 static const int kEnd = kMaxInt;
1511
1512 // We keep a "finger" in the size list to speed up a common pattern:
1513 // repeated requests for the same or increasing sizes.
1514 int finger_;
1515
1516 // Starting from *prev, find and return the smallest size >= index (words),
1517 // or kEnd. Update *prev to be the largest size < index, or kHead.
1518 int FindSize(int index, int* prev) {
1519 int cur = free_[*prev].next_size_;
1520 while (cur < index) {
1521 *prev = cur;
1522 cur = free_[cur].next_size_;
1523 }
1524 return cur;
1525 }
1526
1527 // Remove an existing element from the size list.
1528 void RemoveSize(int index) {
1529 int prev = kHead;
1530 int cur = FindSize(index, &prev);
1531 ASSERT(cur == index);
1532 free_[prev].next_size_ = free_[cur].next_size_;
1533 finger_ = prev;
1534 }
1535
1536 // Insert a new element into the size list.
1537 void InsertSize(int index) {
1538 int prev = kHead;
1539 int cur = FindSize(index, &prev);
1540 ASSERT(cur != index);
1541 free_[prev].next_size_ = index;
1542 free_[index].next_size_ = cur;
1543 }
1544
1545 // The size list is not updated during a sequence of calls to Free, but is
1546 // rebuilt before the next allocation.
1547 void RebuildSizeList();
1548 bool needs_rebuild_;
1549
1550#ifdef DEBUG
1551 // Does this free list contain a free block located at the address of 'node'?
1552 bool Contains(FreeListNode* node);
1553#endif
1554
1555 DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
1556};
1557
1558
1559// The free list for the map space.
1560class FixedSizeFreeList BASE_EMBEDDED {
1561 public:
1562 FixedSizeFreeList(AllocationSpace owner, int object_size);
1563
1564 // Clear the free list.
1565 void Reset();
1566
1567 // Return the number of bytes available on the free list.
1568 int available() { return available_; }
1569
1570 // Place a node on the free list. The block starting at 'start' (assumed to
1571 // have size object_size_) is placed on the free list. Bookkeeping
1572 // information will be written to the block, ie, its contents will be
1573 // destroyed. The start address should be word aligned.
1574 void Free(Address start);
1575
1576 // Allocate a fixed sized block from the free list. The block is unitialized.
1577 // A failure is returned if no block is available.
1578 Object* Allocate();
1579
1580 private:
1581 // Available bytes on the free list.
1582 int available_;
1583
1584 // The head of the free list.
1585 Address head_;
1586
1587 // The identity of the owning space, for building allocation Failure
1588 // objects.
1589 AllocationSpace owner_;
1590
1591 // The size of the objects in this space.
1592 int object_size_;
1593
1594 DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
1595};
1596
1597
1598// -----------------------------------------------------------------------------
1599// Old object space (excluding map objects)
1600
1601class OldSpace : public PagedSpace {
1602 public:
1603 // Creates an old space object with a given maximum capacity.
1604 // The constructor does not allocate pages from OS.
1605 explicit OldSpace(int max_capacity,
1606 AllocationSpace id,
1607 Executability executable)
1608 : PagedSpace(max_capacity, id, executable), free_list_(id) {
1609 page_extra_ = 0;
1610 }
1611
1612 // The bytes available on the free list (ie, not above the linear allocation
1613 // pointer).
1614 int AvailableFree() { return free_list_.available(); }
1615
1616 // The top of allocation in a page in this space. Undefined if page is unused.
1617 virtual Address PageAllocationTop(Page* page) {
1618 return page == TopPageOf(allocation_info_) ? top() : page->ObjectAreaEnd();
1619 }
1620
1621 // Give a block of memory to the space's free list. It might be added to
1622 // the free list or accounted as waste.
1623 void Free(Address start, int size_in_bytes) {
1624 int wasted_bytes = free_list_.Free(start, size_in_bytes);
1625 accounting_stats_.DeallocateBytes(size_in_bytes);
1626 accounting_stats_.WasteBytes(wasted_bytes);
1627 }
1628
1629 // Prepare for full garbage collection. Resets the relocation pointer and
1630 // clears the free list.
1631 virtual void PrepareForMarkCompact(bool will_compact);
1632
1633 // Updates the allocation pointer to the relocation top after a mark-compact
1634 // collection.
1635 virtual void MCCommitRelocationInfo();
1636
1637#ifdef DEBUG
1638 // Reports statistics for the space
1639 void ReportStatistics();
1640 // Dump the remembered sets in the space to stdout.
1641 void PrintRSet();
1642#endif
1643
1644 protected:
1645 // Virtual function in the superclass. Slow path of AllocateRaw.
1646 HeapObject* SlowAllocateRaw(int size_in_bytes);
1647
1648 // Virtual function in the superclass. Allocate linearly at the start of
1649 // the page after current_page (there is assumed to be one).
1650 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1651
1652 private:
1653 // The space's free list.
1654 OldSpaceFreeList free_list_;
1655
1656 public:
1657 TRACK_MEMORY("OldSpace")
1658};
1659
1660
1661// -----------------------------------------------------------------------------
1662// Old space for objects of a fixed size
1663
1664class FixedSpace : public PagedSpace {
1665 public:
1666 FixedSpace(int max_capacity,
1667 AllocationSpace id,
1668 int object_size_in_bytes,
1669 const char* name)
1670 : PagedSpace(max_capacity, id, NOT_EXECUTABLE),
1671 object_size_in_bytes_(object_size_in_bytes),
1672 name_(name),
1673 free_list_(id, object_size_in_bytes) {
1674 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
1675 }
1676
1677 // The top of allocation in a page in this space. Undefined if page is unused.
1678 virtual Address PageAllocationTop(Page* page) {
1679 return page == TopPageOf(allocation_info_) ? top()
1680 : page->ObjectAreaEnd() - page_extra_;
1681 }
1682
1683 int object_size_in_bytes() { return object_size_in_bytes_; }
1684
1685 // Give a fixed sized block of memory to the space's free list.
1686 void Free(Address start) {
1687 free_list_.Free(start);
1688 accounting_stats_.DeallocateBytes(object_size_in_bytes_);
1689 }
1690
1691 // Prepares for a mark-compact GC.
1692 virtual void PrepareForMarkCompact(bool will_compact);
1693
1694 // Updates the allocation pointer to the relocation top after a mark-compact
1695 // collection.
1696 virtual void MCCommitRelocationInfo();
1697
1698#ifdef DEBUG
1699 // Reports statistic info of the space
1700 void ReportStatistics();
1701
1702 // Dump the remembered sets in the space to stdout.
1703 void PrintRSet();
1704#endif
1705
1706 protected:
1707 // Virtual function in the superclass. Slow path of AllocateRaw.
1708 HeapObject* SlowAllocateRaw(int size_in_bytes);
1709
1710 // Virtual function in the superclass. Allocate linearly at the start of
1711 // the page after current_page (there is assumed to be one).
1712 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1713
1714 private:
1715 // The size of objects in this space.
1716 int object_size_in_bytes_;
1717
1718 // The name of this space.
1719 const char* name_;
1720
1721 // The space's free list.
1722 FixedSizeFreeList free_list_;
1723};
1724
1725
1726// -----------------------------------------------------------------------------
1727// Old space for all map objects
1728
1729class MapSpace : public FixedSpace {
1730 public:
1731 // Creates a map space object with a maximum capacity.
1732 MapSpace(int max_capacity, AllocationSpace id)
1733 : FixedSpace(max_capacity, id, Map::kSize, "map") {}
1734
1735 // Prepares for a mark-compact GC.
1736 virtual void PrepareForMarkCompact(bool will_compact);
1737
1738 // Given an index, returns the page address.
1739 Address PageAddress(int page_index) { return page_addresses_[page_index]; }
1740
1741 // Constants.
1742 static const int kMaxMapPageIndex = (1 << MapWord::kMapPageIndexBits) - 1;
1743
1744 protected:
1745#ifdef DEBUG
1746 virtual void VerifyObject(HeapObject* obj);
1747#endif
1748
1749 private:
1750 // An array of page start address in a map space.
1751 Address page_addresses_[kMaxMapPageIndex + 1];
1752
1753 public:
1754 TRACK_MEMORY("MapSpace")
1755};
1756
1757
1758// -----------------------------------------------------------------------------
1759// Old space for all global object property cell objects
1760
1761class CellSpace : public FixedSpace {
1762 public:
1763 // Creates a property cell space object with a maximum capacity.
1764 CellSpace(int max_capacity, AllocationSpace id)
1765 : FixedSpace(max_capacity, id, JSGlobalPropertyCell::kSize, "cell") {}
1766
1767 protected:
1768#ifdef DEBUG
1769 virtual void VerifyObject(HeapObject* obj);
1770#endif
1771
1772 public:
1773 TRACK_MEMORY("CellSpace")
1774};
1775
1776
1777// -----------------------------------------------------------------------------
1778// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
1779// the large object space. A large object is allocated from OS heap with
1780// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
1781// A large object always starts at Page::kObjectStartOffset to a page.
1782// Large objects do not move during garbage collections.
1783
1784// A LargeObjectChunk holds exactly one large object page with exactly one
1785// large object.
1786class LargeObjectChunk {
1787 public:
1788 // Allocates a new LargeObjectChunk that contains a large object page
1789 // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
1790 // object and possibly extra remembered set words) bytes after the object
1791 // area start of that page. The allocated chunk size is set in the output
1792 // parameter chunk_size.
1793 static LargeObjectChunk* New(int size_in_bytes,
1794 size_t* chunk_size,
1795 Executability executable);
1796
1797 // Interpret a raw address as a large object chunk.
1798 static LargeObjectChunk* FromAddress(Address address) {
1799 return reinterpret_cast<LargeObjectChunk*>(address);
1800 }
1801
1802 // Returns the address of this chunk.
1803 Address address() { return reinterpret_cast<Address>(this); }
1804
1805 // Accessors for the fields of the chunk.
1806 LargeObjectChunk* next() { return next_; }
1807 void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
1808
1809 size_t size() { return size_; }
1810 void set_size(size_t size_in_bytes) { size_ = size_in_bytes; }
1811
1812 // Returns the object in this chunk.
1813 inline HeapObject* GetObject();
1814
1815 // Given a requested size (including any extra remembered set words),
1816 // returns the physical size of a chunk to be allocated.
1817 static int ChunkSizeFor(int size_in_bytes);
1818
1819 // Given a chunk size, returns the object size it can accommodate (not
1820 // including any extra remembered set words). Used by
1821 // LargeObjectSpace::Available. Note that this can overestimate the size
1822 // of object that will fit in a chunk---if the object requires extra
1823 // remembered set words (eg, for large fixed arrays), the actual object
1824 // size for the chunk will be smaller than reported by this function.
1825 static int ObjectSizeFor(int chunk_size) {
1826 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
1827 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
1828 }
1829
1830 private:
1831 // A pointer to the next large object chunk in the space or NULL.
1832 LargeObjectChunk* next_;
1833
1834 // The size of this chunk.
1835 size_t size_;
1836
1837 public:
1838 TRACK_MEMORY("LargeObjectChunk")
1839};
1840
1841
1842class LargeObjectSpace : public Space {
1843 public:
1844 explicit LargeObjectSpace(AllocationSpace id);
1845 virtual ~LargeObjectSpace() {}
1846
1847 // Initializes internal data structures.
1848 bool Setup();
1849
1850 // Releases internal resources, frees objects in this space.
1851 void TearDown();
1852
1853 // Allocates a (non-FixedArray, non-Code) large object.
1854 Object* AllocateRaw(int size_in_bytes);
1855 // Allocates a large Code object.
1856 Object* AllocateRawCode(int size_in_bytes);
1857 // Allocates a large FixedArray.
1858 Object* AllocateRawFixedArray(int size_in_bytes);
1859
1860 // Available bytes for objects in this space, not including any extra
1861 // remembered set words.
1862 int Available() {
1863 return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
1864 }
1865
1866 virtual int Size() {
1867 return size_;
1868 }
1869
1870 int PageCount() {
1871 return page_count_;
1872 }
1873
1874 // Finds an object for a given address, returns Failure::Exception()
1875 // if it is not found. The function iterates through all objects in this
1876 // space, may be slow.
1877 Object* FindObject(Address a);
1878
1879 // Clears remembered sets.
1880 void ClearRSet();
1881
1882 // Iterates objects whose remembered set bits are set.
1883 void IterateRSet(ObjectSlotCallback func);
1884
1885 // Frees unmarked objects.
1886 void FreeUnmarkedObjects();
1887
1888 // Checks whether a heap object is in this space; O(1).
1889 bool Contains(HeapObject* obj);
1890
1891 // Checks whether the space is empty.
1892 bool IsEmpty() { return first_chunk_ == NULL; }
1893
1894#ifdef ENABLE_HEAP_PROTECTION
1895 // Protect/unprotect the space by marking it read-only/writable.
1896 void Protect();
1897 void Unprotect();
1898#endif
1899
1900#ifdef DEBUG
1901 virtual void Verify();
1902 virtual void Print();
1903 void ReportStatistics();
1904 void CollectCodeStatistics();
1905 // Dump the remembered sets in the space to stdout.
1906 void PrintRSet();
1907#endif
1908 // Checks whether an address is in the object area in this space. It
1909 // iterates all objects in the space. May be slow.
1910 bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
1911
1912 private:
1913 // The head of the linked list of large object chunks.
1914 LargeObjectChunk* first_chunk_;
1915 int size_; // allocated bytes
1916 int page_count_; // number of chunks
1917
1918
1919 // Shared implementation of AllocateRaw, AllocateRawCode and
1920 // AllocateRawFixedArray.
1921 Object* AllocateRawInternal(int requested_size,
1922 int object_size,
1923 Executability executable);
1924
1925 // Returns the number of extra bytes (rounded up to the nearest full word)
1926 // required for extra_object_bytes of extra pointers (in bytes).
1927 static inline int ExtraRSetBytesFor(int extra_object_bytes);
1928
1929 friend class LargeObjectIterator;
1930
1931 public:
1932 TRACK_MEMORY("LargeObjectSpace")
1933};
1934
1935
1936class LargeObjectIterator: public ObjectIterator {
1937 public:
1938 explicit LargeObjectIterator(LargeObjectSpace* space);
1939 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
1940
1941 bool has_next() { return current_ != NULL; }
1942 HeapObject* next();
1943
1944 // implementation of ObjectIterator.
1945 virtual bool has_next_object() { return has_next(); }
1946 virtual HeapObject* next_object() { return next(); }
1947
1948 private:
1949 LargeObjectChunk* current_;
1950 HeapObjectCallback size_func_;
1951};
1952
1953
1954} } // namespace v8::internal
1955
1956#endif // V8_SPACES_H_