blob: 843981bf125b8a7beb245552a6eea1ee84691f74 [file] [log] [blame]
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001// Copyright 2006-2008 the V8 project authors. All rights reserved.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_SPACES_H_
29#define V8_SPACES_H_
30
31#include "list-inl.h"
32#include "log.h"
33
34namespace v8 { namespace internal {
35
36// -----------------------------------------------------------------------------
37// Heap structures:
38//
39// A JS heap consists of a young generation, an old generation, and a large
40// object space. The young generation is divided into two semispaces. A
41// scavenger implements Cheney's copying algorithm. The old generation is
42// separated into a map space and an old object space. The map space contains
43// all (and only) map objects, the rest of old objects go into the old space.
44// The old generation is collected by a mark-sweep-compact collector.
45//
46// The semispaces of the young generation are contiguous. The old and map
47// spaces consists of a list of pages. A page has a page header, a remembered
48// set area, and an object area. A page size is deliberately chosen as 8K
49// bytes. The first word of a page is an opaque page header that has the
50// address of the next page and its ownership information. The second word may
51// have the allocation top address of this page. The next 248 bytes are
52// remembered sets. Heap objects are aligned to the pointer size (4 bytes). A
53// remembered set bit corresponds to a pointer in the object area.
54//
55// There is a separate large object space for objects larger than
56// Page::kMaxHeapObjectSize, so that they do not have to move during
57// collection. The large object space is paged and uses the same remembered
58// set implementation. Pages in large object space may be larger than 8K.
59//
60// NOTE: The mark-compact collector rebuilds the remembered set after a
61// collection. It reuses first a few words of the remembered set for
62// bookkeeping relocation information.
63
64
65// Some assertion macros used in the debugging mode.
66
67#define ASSERT_PAGE_ALIGNED(address) \
68 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
69
70#define ASSERT_OBJECT_ALIGNED(address) \
71 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
72
73#define ASSERT_OBJECT_SIZE(size) \
74 ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
75
76#define ASSERT_PAGE_OFFSET(offset) \
77 ASSERT((Page::kObjectStartOffset <= offset) \
78 && (offset <= Page::kPageSize))
79
80#define ASSERT_MAP_PAGE_INDEX(index) \
81 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
82
83
84class PagedSpace;
85class MemoryAllocator;
kasper.lund7276f142008-07-30 08:49:36 +000086class AllocationInfo;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +000087
88// -----------------------------------------------------------------------------
89// A page normally has 8K bytes. Large object pages may be larger. A page
90// address is always aligned to the 8K page size. A page is divided into
91// three areas: the first two words are used for bookkeeping, the next 248
92// bytes are used as remembered set, and the rest of the page is the object
93// area.
94//
95// Pointers are aligned to the pointer size (4 bytes), only 1 bit is needed
96// for a pointer in the remembered set. Given an address, its remembered set
97// bit position (offset from the start of the page) is calculated by dividing
98// its page offset by 32. Therefore, the object area in a page starts at the
99// 256th byte (8K/32). Bytes 0 to 255 do not need the remembered set, so that
100// the first two words (64 bits) in a page can be used for other purposes.
101//
102// The mark-compact collector transforms a map pointer into a page index and a
103// page offset. The map space can have up to 1024 pages, and 8M bytes (1024 *
104// 8K) in total. Because a map pointer is aligned to the pointer size (4
105// bytes), 11 bits are enough to encode the page offset. 21 bits (10 for the
106// page index + 11 for the offset in the page) are required to encode a map
107// pointer.
108//
109// The only way to get a page pointer is by calling factory methods:
110// Page* p = Page::FromAddress(addr); or
111// Page* p = Page::FromAllocationTop(top);
112class Page {
113 public:
114 // Returns the page containing a given address. The address ranges
115 // from [page_addr .. page_addr + kPageSize[
116 //
117 // Note that this function only works for addresses in normal paged
118 // spaces and addresses in the first 8K of large object pages (ie,
119 // the start of large objects but not necessarily derived pointers
120 // within them).
121 INLINE(static Page* FromAddress(Address a)) {
122 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
123 }
124
125 // Returns the page containing an allocation top. Because an allocation
126 // top address can be the upper bound of the page, we need to subtract
127 // it with kPointerSize first. The address ranges from
128 // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
129 INLINE(static Page* FromAllocationTop(Address top)) {
130 Page* p = FromAddress(top - kPointerSize);
131 ASSERT_PAGE_OFFSET(p->Offset(top));
132 return p;
133 }
134
135 // Returns the start address of this page.
136 Address address() { return reinterpret_cast<Address>(this); }
137
138 // Checks whether this is a valid page address.
139 bool is_valid() { return address() != NULL; }
140
141 // Returns the next page of this page.
142 inline Page* next_page();
143
kasper.lund7276f142008-07-30 08:49:36 +0000144 // Return the end of allocation in this page. Undefined for unused pages.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000145 inline Address AllocationTop();
146
147 // Returns the start address of the object area in this page.
148 Address ObjectAreaStart() { return address() + kObjectStartOffset; }
149
150 // Returns the end address (exclusive) of the object area in this page.
151 Address ObjectAreaEnd() { return address() + Page::kPageSize; }
152
153 // Returns the start address of the remembered set area.
154 Address RSetStart() { return address() + kRSetStartOffset; }
155
156 // Returns the end address of the remembered set area (exclusive).
157 Address RSetEnd() { return address() + kRSetEndOffset; }
158
159 // Checks whether an address is page aligned.
160 static bool IsAlignedToPageSize(Address a) {
161 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
162 }
163
164 // True if this page is a large object page.
165 bool IsLargeObjectPage() { return (is_normal_page & 0x1) == 0; }
166
167 // Returns the offset of a given address to this page.
168 INLINE(int Offset(Address a)) {
169 int offset = a - address();
170 ASSERT_PAGE_OFFSET(offset);
171 return offset;
172 }
173
174 // Returns the address for a given offset to the this page.
175 Address OffsetToAddress(int offset) {
176 ASSERT_PAGE_OFFSET(offset);
177 return address() + offset;
178 }
179
180 // ---------------------------------------------------------------------
181 // Remembered set support
182
183 // Clears remembered set in this page.
184 inline void ClearRSet();
185
186 // Return the address of the remembered set word corresponding to an
187 // object address/offset pair, and the bit encoded as a single-bit
188 // mask in the output parameter 'bitmask'.
189 INLINE(static Address ComputeRSetBitPosition(Address address, int offset,
190 uint32_t* bitmask));
191
192 // Sets the corresponding remembered set bit for a given address.
193 INLINE(static void SetRSet(Address address, int offset));
194
195 // Clears the corresponding remembered set bit for a given address.
196 static inline void UnsetRSet(Address address, int offset);
197
198 // Checks whether the remembered set bit for a given address is set.
199 static inline bool IsRSetSet(Address address, int offset);
200
201#ifdef DEBUG
202 // Use a state to mark whether remembered set space can be used for other
203 // purposes.
204 enum RSetState { IN_USE, NOT_IN_USE };
205 static bool is_rset_in_use() { return rset_state_ == IN_USE; }
206 static void set_rset_state(RSetState state) { rset_state_ = state; }
207#endif
208
209 // 8K bytes per page.
210 static const int kPageSizeBits = 13;
211
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000212 // Page size in bytes. This must be a multiple of the OS page size.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000213 static const int kPageSize = 1 << kPageSizeBits;
214
215 // Page size mask.
216 static const int kPageAlignmentMask = (1 << kPageSizeBits) - 1;
217
218 // The end offset of the remembered set in a page
219 // (heaps are aligned to pointer size).
220 static const int kRSetEndOffset= kPageSize / kBitsPerPointer;
221
222 // The start offset of the remembered set in a page.
223 static const int kRSetStartOffset = kRSetEndOffset / kBitsPerPointer;
224
225 // The start offset of the object area in a page.
226 static const int kObjectStartOffset = kRSetEndOffset;
227
228 // Object area size in bytes.
229 static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
230
231 // Maximum object size that fits in a page.
232 static const int kMaxHeapObjectSize = kObjectAreaSize;
233
234 //---------------------------------------------------------------------------
235 // Page header description.
236 //
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000237 // If a page is not in the large object space, the first word,
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000238 // opaque_header, encodes the next page address (aligned to kPageSize 8K)
239 // and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use
240 // opaque_header. The value range of the opaque_header is [0..kPageSize[,
241 // or [next_page_start, next_page_end[. It cannot point to a valid address
242 // in the current page. If a page is in the large object space, the first
243 // word *may* (if the page start and large object chunk start are the
244 // same) contain the address of the next large object chunk.
245 int opaque_header;
246
247 // If the page is not in the large object space, the low-order bit of the
248 // second word is set. If the page is in the large object space, the
249 // second word *may* (if the page start and large object chunk start are
250 // the same) contain the large object chunk size. In either case, the
251 // low-order bit for large object pages will be cleared.
252 int is_normal_page;
253
254 // The following fields overlap with remembered set, they can only
255 // be used in the mark-compact collector when remembered set is not
256 // used.
257
258 // The allocation pointer after relocating objects to this page.
259 Address mc_relocation_top;
260
261 // The index of the page in its owner space.
262 int mc_page_index;
263
264 // The forwarding address of the first live object in this page.
265 Address mc_first_forwarded;
266
267#ifdef DEBUG
268 private:
269 static RSetState rset_state_; // state of the remembered set
270#endif
271};
272
273
274// ----------------------------------------------------------------------------
kasper.lund7276f142008-07-30 08:49:36 +0000275// Space is the abstract superclass for all allocation spaces.
276class Space : public Malloced {
277 public:
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000278 Space(AllocationSpace id, Executability executable)
kasper.lund7276f142008-07-30 08:49:36 +0000279 : id_(id), executable_(executable) {}
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000280
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000281 virtual ~Space() {}
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000282
kasper.lund7276f142008-07-30 08:49:36 +0000283 // Does the space need executable memory?
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000284 Executability executable() { return executable_; }
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000285
kasper.lund7276f142008-07-30 08:49:36 +0000286 // Identity used in error reporting.
287 AllocationSpace identity() { return id_; }
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000288
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000289 virtual int Size() = 0;
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000290
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000291#ifdef DEBUG
292 virtual void Verify() = 0;
293 virtual void Print() = 0;
294#endif
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000295
kasper.lund7276f142008-07-30 08:49:36 +0000296 private:
297 AllocationSpace id_;
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000298 Executability executable_;
kasper.lund7276f142008-07-30 08:49:36 +0000299};
300
301
302// ----------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000303// A space acquires chunks of memory from the operating system. The memory
304// allocator manages chunks for the paged heap spaces (old space and map
305// space). A paged chunk consists of pages. Pages in a chunk have contiguous
306// addresses and are linked as a list.
307//
308// The allocator keeps an initial chunk which is used for the new space. The
309// leftover regions of the initial chunk are used for the initial chunks of
310// old space and map space if they are big enough to hold at least one page.
311// The allocator assumes that there is one old space and one map space, each
312// expands the space by allocating kPagesPerChunk pages except the last
313// expansion (before running out of space). The first chunk may contain fewer
314// than kPagesPerChunk pages as well.
315//
316// The memory allocator also allocates chunks for the large object space, but
317// they are managed by the space itself. The new space does not expand.
318
319class MemoryAllocator : public AllStatic {
320 public:
321 // Initializes its internal bookkeeping structures.
322 // Max capacity of the total space.
323 static bool Setup(int max_capacity);
324
325 // Deletes valid chunks.
326 static void TearDown();
327
328 // Reserves an initial address range of virtual memory to be split between
329 // the two new space semispaces, the old space, and the map space. The
330 // memory is not yet committed or assigned to spaces and split into pages.
331 // The initial chunk is unmapped when the memory allocator is torn down.
332 // This function should only be called when there is not already a reserved
333 // initial chunk (initial_chunk_ should be NULL). It returns the start
334 // address of the initial chunk if successful, with the side effect of
335 // setting the initial chunk, or else NULL if unsuccessful and leaves the
336 // initial chunk NULL.
337 static void* ReserveInitialChunk(const size_t requested);
338
339 // Commits pages from an as-yet-unmanaged block of virtual memory into a
340 // paged space. The block should be part of the initial chunk reserved via
341 // a call to ReserveInitialChunk. The number of pages is always returned in
342 // the output parameter num_pages. This function assumes that the start
343 // address is non-null and that it is big enough to hold at least one
344 // page-aligned page. The call always succeeds, and num_pages is always
345 // greater than zero.
346 static Page* CommitPages(Address start, size_t size, PagedSpace* owner,
347 int* num_pages);
348
349 // Commit a contiguous block of memory from the initial chunk. Assumes that
350 // the address is not NULL, the size is greater than zero, and that the
351 // block is contained in the initial chunk. Returns true if it succeeded
352 // and false otherwise.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000353 static bool CommitBlock(Address start, size_t size, Executability executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000354
355 // Attempts to allocate the requested (non-zero) number of pages from the
356 // OS. Fewer pages might be allocated than requested. If it fails to
357 // allocate memory for the OS or cannot allocate a single page, this
358 // function returns an invalid page pointer (NULL). The caller must check
359 // whether the returned page is valid (by calling Page::is_valid()). It is
360 // guaranteed that allocated pages have contiguous addresses. The actual
361 // number of allocated page is returned in the output parameter
362 // allocated_pages.
363 static Page* AllocatePages(int requested_pages, int* allocated_pages,
364 PagedSpace* owner);
365
366 // Frees pages from a given page and after. If 'p' is the first page
367 // of a chunk, pages from 'p' are freed and this function returns an
368 // invalid page pointer. Otherwise, the function searches a page
369 // after 'p' that is the first page of a chunk. Pages after the
370 // found page are freed and the function returns 'p'.
371 static Page* FreePages(Page* p);
372
373 // Allocates and frees raw memory of certain size.
374 // These are just thin wrappers around OS::Allocate and OS::Free,
375 // but keep track of allocated bytes as part of heap.
kasper.lund7276f142008-07-30 08:49:36 +0000376 static void* AllocateRawMemory(const size_t requested,
377 size_t* allocated,
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000378 Executability executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000379 static void FreeRawMemory(void* buf, size_t length);
380
381 // Returns the maximum available bytes of heaps.
382 static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
383
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000384 // Returns maximum available bytes that the old space can have.
385 static int MaxAvailable() {
386 return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
387 }
388
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000389 // Links two pages.
390 static inline void SetNextPage(Page* prev, Page* next);
391
392 // Returns the next page of a given page.
393 static inline Page* GetNextPage(Page* p);
394
395 // Checks whether a page belongs to a space.
396 static inline bool IsPageInSpace(Page* p, PagedSpace* space);
397
398 // Returns the space that owns the given page.
399 static inline PagedSpace* PageOwner(Page* page);
400
401 // Finds the first/last page in the same chunk as a given page.
402 static Page* FindFirstPageInSameChunk(Page* p);
403 static Page* FindLastPageInSameChunk(Page* p);
404
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000405#ifdef ENABLE_HEAP_PROTECTION
406 // Protect/unprotect a block of memory by marking it read-only/writable.
407 static inline void Protect(Address start, size_t size);
408 static inline void Unprotect(Address start, size_t size,
409 Executability executable);
410
411 // Protect/unprotect a chunk given a page in the chunk.
412 static inline void ProtectChunkFromPage(Page* page);
413 static inline void UnprotectChunkFromPage(Page* page);
414#endif
415
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000416#ifdef DEBUG
417 // Reports statistic info of the space.
418 static void ReportStatistics();
419#endif
420
421 // Due to encoding limitation, we can only have 8K chunks.
422 static const int kMaxNofChunks = 1 << Page::kPageSizeBits;
423 // If a chunk has at least 32 pages, the maximum heap size is about
424 // 8 * 1024 * 32 * 8K = 2G bytes.
425 static const int kPagesPerChunk = 64;
426 static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
427
428 private:
429 // Maximum space size in bytes.
430 static int capacity_;
431
432 // Allocated space size in bytes.
433 static int size_;
434
435 // The initial chunk of virtual memory.
436 static VirtualMemory* initial_chunk_;
437
438 // Allocated chunk info: chunk start address, chunk size, and owning space.
439 class ChunkInfo BASE_EMBEDDED {
440 public:
441 ChunkInfo() : address_(NULL), size_(0), owner_(NULL) {}
442 void init(Address a, size_t s, PagedSpace* o) {
443 address_ = a;
444 size_ = s;
445 owner_ = o;
446 }
447 Address address() { return address_; }
448 size_t size() { return size_; }
449 PagedSpace* owner() { return owner_; }
450
451 private:
452 Address address_;
453 size_t size_;
454 PagedSpace* owner_;
455 };
456
457 // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
458 static List<ChunkInfo> chunks_;
459 static List<int> free_chunk_ids_;
460 static int max_nof_chunks_;
461 static int top_;
462
463 // Push/pop a free chunk id onto/from the stack.
464 static void Push(int free_chunk_id);
465 static int Pop();
466 static bool OutOfChunkIds() { return top_ == 0; }
467
468 // Frees a chunk.
469 static void DeleteChunk(int chunk_id);
470
471 // Basic check whether a chunk id is in the valid range.
472 static inline bool IsValidChunkId(int chunk_id);
473
474 // Checks whether a chunk id identifies an allocated chunk.
475 static inline bool IsValidChunk(int chunk_id);
476
477 // Returns the chunk id that a page belongs to.
478 static inline int GetChunkId(Page* p);
479
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000480 // True if the address lies in the initial chunk.
481 static inline bool InInitialChunk(Address address);
482
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000483 // Initializes pages in a chunk. Returns the first page address.
484 // This function and GetChunkId() are provided for the mark-compact
485 // collector to rebuild page headers in the from space, which is
486 // used as a marking stack and its page headers are destroyed.
487 static Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
488 PagedSpace* owner);
489};
490
491
492// -----------------------------------------------------------------------------
493// Interface for heap object iterator to be implemented by all object space
494// object iterators.
495//
496// NOTE: The space specific object iterators also implements the own has_next()
497// and next() methods which are used to avoid using virtual functions
498// iterating a specific space.
499
500class ObjectIterator : public Malloced {
501 public:
502 virtual ~ObjectIterator() { }
503
504 virtual bool has_next_object() = 0;
505 virtual HeapObject* next_object() = 0;
506};
507
508
509// -----------------------------------------------------------------------------
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000510// Heap object iterator in new/old/map spaces.
511//
512// A HeapObjectIterator iterates objects from a given address to the
513// top of a space. The given address must be below the current
514// allocation pointer (space top). If the space top changes during
515// iteration (because of allocating new objects), the iterator does
516// not iterate new objects. The caller function must create a new
517// iterator starting from the old top in order to visit these new
518// objects. Heap::Scavenage() is such an example.
519
520class HeapObjectIterator: public ObjectIterator {
521 public:
522 // Creates a new object iterator in a given space. If a start
523 // address is not given, the iterator starts from the space bottom.
524 // If the size function is not given, the iterator calls the default
525 // Object::Size().
526 explicit HeapObjectIterator(PagedSpace* space);
527 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
528 HeapObjectIterator(PagedSpace* space, Address start);
529 HeapObjectIterator(PagedSpace* space,
530 Address start,
531 HeapObjectCallback size_func);
532
533 inline bool has_next();
534 inline HeapObject* next();
535
536 // implementation of ObjectIterator.
537 virtual bool has_next_object() { return has_next(); }
538 virtual HeapObject* next_object() { return next(); }
539
540 private:
541 Address cur_addr_; // current iteration point
542 Address end_addr_; // end iteration point
543 Address cur_limit_; // current page limit
544 HeapObjectCallback size_func_; // size function
545 Page* end_page_; // caches the page of the end address
546
547 // Slow path of has_next, checks whether there are more objects in
548 // the next page.
549 bool HasNextInNextPage();
550
551 // Initializes fields.
552 void Initialize(Address start, Address end, HeapObjectCallback size_func);
553
554#ifdef DEBUG
555 // Verifies whether fields have valid values.
556 void Verify();
557#endif
558};
559
560
561// -----------------------------------------------------------------------------
562// A PageIterator iterates pages in a space.
563//
564// The PageIterator class provides three modes for iterating pages in a space:
565// PAGES_IN_USE iterates pages that are in use by the allocator;
566// PAGES_USED_BY_GC iterates pages that hold relocated objects during a
567// mark-compact collection;
568// ALL_PAGES iterates all pages in the space.
569
570class PageIterator BASE_EMBEDDED {
571 public:
572 enum Mode {PAGES_IN_USE, PAGES_USED_BY_MC, ALL_PAGES};
573
574 PageIterator(PagedSpace* space, Mode mode);
575
576 inline bool has_next();
577 inline Page* next();
578
579 private:
580 Page* cur_page_; // next page to return
581 Page* stop_page_; // page where to stop
582};
583
584
585// -----------------------------------------------------------------------------
586// A space has a list of pages. The next page can be accessed via
587// Page::next_page() call. The next page of the last page is an
588// invalid page pointer. A space can expand and shrink dynamically.
589
590// An abstraction of allocation and relocation pointers in a page-structured
591// space.
kasper.lund7276f142008-07-30 08:49:36 +0000592class AllocationInfo {
593 public:
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000594 Address top; // current allocation top
595 Address limit; // current allocation limit
kasper.lund7276f142008-07-30 08:49:36 +0000596
597#ifdef DEBUG
598 bool VerifyPagedAllocation() {
599 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
600 && (top <= limit);
601 }
602#endif
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000603};
604
605
606// An abstraction of the accounting statistics of a page-structured space.
607// The 'capacity' of a space is the number of object-area bytes (ie, not
608// including page bookkeeping structures) currently in the space. The 'size'
609// of a space is the number of allocated bytes, the 'waste' in the space is
610// the number of bytes that are not allocated and not available to
611// allocation without reorganizing the space via a GC (eg, small blocks due
612// to internal fragmentation, top of page areas in map space), and the bytes
613// 'available' is the number of unallocated bytes that are not waste. The
614// capacity is the sum of size, waste, and available.
615//
616// The stats are only set by functions that ensure they stay balanced. These
617// functions increase or decrease one of the non-capacity stats in
618// conjunction with capacity, or else they always balance increases and
619// decreases to the non-capacity stats.
620class AllocationStats BASE_EMBEDDED {
621 public:
622 AllocationStats() { Clear(); }
623
624 // Zero out all the allocation statistics (ie, no capacity).
625 void Clear() {
626 capacity_ = 0;
627 available_ = 0;
628 size_ = 0;
629 waste_ = 0;
630 }
631
632 // Reset the allocation statistics (ie, available = capacity with no
633 // wasted or allocated bytes).
634 void Reset() {
635 available_ = capacity_;
636 size_ = 0;
637 waste_ = 0;
638 }
639
640 // Accessors for the allocation statistics.
641 int Capacity() { return capacity_; }
642 int Available() { return available_; }
643 int Size() { return size_; }
644 int Waste() { return waste_; }
645
646 // Grow the space by adding available bytes.
647 void ExpandSpace(int size_in_bytes) {
648 capacity_ += size_in_bytes;
649 available_ += size_in_bytes;
650 }
651
652 // Shrink the space by removing available bytes.
653 void ShrinkSpace(int size_in_bytes) {
654 capacity_ -= size_in_bytes;
655 available_ -= size_in_bytes;
656 }
657
658 // Allocate from available bytes (available -> size).
659 void AllocateBytes(int size_in_bytes) {
660 available_ -= size_in_bytes;
661 size_ += size_in_bytes;
662 }
663
664 // Free allocated bytes, making them available (size -> available).
665 void DeallocateBytes(int size_in_bytes) {
666 size_ -= size_in_bytes;
667 available_ += size_in_bytes;
668 }
669
670 // Waste free bytes (available -> waste).
671 void WasteBytes(int size_in_bytes) {
672 available_ -= size_in_bytes;
673 waste_ += size_in_bytes;
674 }
675
676 // Consider the wasted bytes to be allocated, as they contain filler
677 // objects (waste -> size).
678 void FillWastedBytes(int size_in_bytes) {
679 waste_ -= size_in_bytes;
680 size_ += size_in_bytes;
681 }
682
683 private:
684 int capacity_;
685 int available_;
686 int size_;
687 int waste_;
688};
689
690
kasper.lund7276f142008-07-30 08:49:36 +0000691class PagedSpace : public Space {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000692 public:
693 // Creates a space with a maximum capacity, and an id.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000694 PagedSpace(int max_capacity, AllocationSpace id, Executability executable);
kasper.lund7276f142008-07-30 08:49:36 +0000695
696 virtual ~PagedSpace() {}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000697
698 // Set up the space using the given address range of virtual memory (from
699 // the memory allocator's initial chunk) if possible. If the block of
700 // addresses is not big enough to contain a single page-aligned page, a
701 // fresh chunk will be allocated.
702 bool Setup(Address start, size_t size);
703
704 // Returns true if the space has been successfully set up and not
705 // subsequently torn down.
706 bool HasBeenSetup();
707
708 // Cleans up the space, frees all pages in this space except those belonging
709 // to the initial chunk, uncommits addresses in the initial chunk.
710 void TearDown();
711
712 // Checks whether an object/address is in this space.
713 inline bool Contains(Address a);
714 bool Contains(HeapObject* o) { return Contains(o->address()); }
715
kasper.lund7276f142008-07-30 08:49:36 +0000716 // Given an address occupied by a live object, return that object if it is
717 // in this space, or Failure::Exception() if it is not. The implementation
718 // iterates over objects in the page containing the address, the cost is
719 // linear in the number of objects in the page. It may be slow.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000720 Object* FindObject(Address addr);
721
kasper.lund7276f142008-07-30 08:49:36 +0000722 // Checks whether page is currently in use by this space.
723 bool IsUsed(Page* page);
724
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000725 // Clears remembered sets of pages in this space.
726 void ClearRSet();
727
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000728 // Prepares for a mark-compact GC.
729 virtual void PrepareForMarkCompact(bool will_compact) = 0;
730
731 virtual Address PageAllocationTop(Page* page) = 0;
732
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000733 // Current capacity without growing (Size() + Available() + Waste()).
734 int Capacity() { return accounting_stats_.Capacity(); }
735
736 // Available bytes without growing.
737 int Available() { return accounting_stats_.Available(); }
738
739 // Allocated bytes in this space.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000740 virtual int Size() { return accounting_stats_.Size(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000741
742 // Wasted bytes due to fragmentation and not recoverable until the
743 // next GC of this space.
744 int Waste() { return accounting_stats_.Waste(); }
745
746 // Returns the address of the first object in this space.
747 Address bottom() { return first_page_->ObjectAreaStart(); }
748
749 // Returns the allocation pointer in this space.
750 Address top() { return allocation_info_.top; }
751
kasper.lund7276f142008-07-30 08:49:36 +0000752 // Allocate the requested number of bytes in the space if possible, return a
753 // failure object if not.
754 inline Object* AllocateRaw(int size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000755
kasper.lund7276f142008-07-30 08:49:36 +0000756 // Allocate the requested number of bytes for relocation during mark-compact
757 // collection.
758 inline Object* MCAllocateRaw(int size_in_bytes);
759
760
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000761 // ---------------------------------------------------------------------------
762 // Mark-compact collection support functions
763
764 // Set the relocation point to the beginning of the space.
765 void MCResetRelocationInfo();
766
767 // Writes relocation info to the top page.
768 void MCWriteRelocationInfoToPage() {
769 TopPageOf(mc_forwarding_info_)->mc_relocation_top = mc_forwarding_info_.top;
770 }
771
772 // Computes the offset of a given address in this space to the beginning
773 // of the space.
774 int MCSpaceOffsetForAddress(Address addr);
775
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000776 // Updates the allocation pointer to the relocation top after a mark-compact
777 // collection.
778 virtual void MCCommitRelocationInfo() = 0;
779
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000780 // Releases half of unused pages.
781 void Shrink();
782
783 // Ensures that the capacity is at least 'capacity'. Returns false on failure.
784 bool EnsureCapacity(int capacity);
785
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000786#ifdef ENABLE_HEAP_PROTECTION
787 // Protect/unprotect the space by marking it read-only/writable.
788 void Protect();
789 void Unprotect();
790#endif
791
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000792#ifdef DEBUG
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000793 // Print meta info and objects in this space.
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000794 virtual void Print();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000795
796 // Report code object related statistics
797 void CollectCodeStatistics();
798 static void ReportCodeStatistics();
799 static void ResetCodeStatistics();
800#endif
801
802 protected:
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000803 // Maximum capacity of this space.
804 int max_capacity_;
805
806 // Accounting information for this space.
807 AllocationStats accounting_stats_;
808
809 // The first page in this space.
810 Page* first_page_;
811
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000812 // Normal allocation information.
813 AllocationInfo allocation_info_;
814
815 // Relocation information during mark-compact collections.
816 AllocationInfo mc_forwarding_info_;
817
818 // Sets allocation pointer to a page bottom.
819 static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
820
821 // Returns the top page specified by an allocation info structure.
822 static Page* TopPageOf(AllocationInfo alloc_info) {
823 return Page::FromAllocationTop(alloc_info.limit);
824 }
825
826 // Expands the space by allocating a fixed number of pages. Returns false if
827 // it cannot allocate requested number of pages from OS. Newly allocated
ager@chromium.org32912102009-01-16 10:38:43 +0000828 // pages are append to the last_page;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000829 bool Expand(Page* last_page);
830
kasper.lund7276f142008-07-30 08:49:36 +0000831 // Generic fast case allocation function that tries linear allocation in
832 // the top page of 'alloc_info'. Returns NULL on failure.
833 inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
834 int size_in_bytes);
835
836 // During normal allocation or deserialization, roll to the next page in
837 // the space (there is assumed to be one) and allocate there. This
838 // function is space-dependent.
839 virtual HeapObject* AllocateInNextPage(Page* current_page,
840 int size_in_bytes) = 0;
841
842 // Slow path of AllocateRaw. This function is space-dependent.
843 virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
844
845 // Slow path of MCAllocateRaw.
846 HeapObject* SlowMCAllocateRaw(int size_in_bytes);
847
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000848#ifdef DEBUG
849 void DoPrintRSet(const char* space_name);
850#endif
851 private:
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000852 // Returns the page of the allocation pointer.
853 Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
854
855 // Returns a pointer to the page of the relocation pointer.
856 Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
857
858#ifdef DEBUG
859 // Returns the number of total pages in this space.
860 int CountTotalPages();
861#endif
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +0000862
863 friend class PageIterator;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000864};
865
866
867#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
868// HistogramInfo class for recording a single "bar" of a histogram. This
869// class is used for collecting statistics to print to stdout (when compiled
870// with DEBUG) or to the log file (when compiled with
871// ENABLE_LOGGING_AND_PROFILING).
872class HistogramInfo BASE_EMBEDDED {
873 public:
874 HistogramInfo() : number_(0), bytes_(0) {}
875
876 const char* name() { return name_; }
877 void set_name(const char* name) { name_ = name; }
878
879 int number() { return number_; }
880 void increment_number(int num) { number_ += num; }
881
882 int bytes() { return bytes_; }
883 void increment_bytes(int size) { bytes_ += size; }
884
885 // Clear the number of objects and size fields, but not the name.
886 void clear() {
887 number_ = 0;
888 bytes_ = 0;
889 }
890
891 private:
892 const char* name_;
893 int number_;
894 int bytes_;
895};
896#endif
897
898
899// -----------------------------------------------------------------------------
900// SemiSpace in young generation
901//
902// A semispace is a contiguous chunk of memory. The mark-compact collector
903// uses the memory in the from space as a marking stack when tracing live
904// objects.
905
kasper.lund7276f142008-07-30 08:49:36 +0000906class SemiSpace : public Space {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000907 public:
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000908 // Constructor.
909 SemiSpace() :Space(NEW_SPACE, NOT_EXECUTABLE) {
910 start_ = NULL;
911 age_mark_ = NULL;
912 }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000913
914 // Sets up the semispace using the given chunk.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +0000915 bool Setup(Address start, int initial_capacity, int maximum_capacity);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000916
917 // Tear down the space. Heap memory was not allocated by the space, so it
918 // is not deallocated here.
919 void TearDown();
920
921 // True if the space has been set up but not torn down.
922 bool HasBeenSetup() { return start_ != NULL; }
923
924 // Double the size of the semispace by committing extra virtual memory.
925 // Assumes that the caller has checked that the semispace has not reached
ager@chromium.org32912102009-01-16 10:38:43 +0000926 // its maximum capacity (and thus there is space available in the reserved
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000927 // address range to grow).
928 bool Double();
929
930 // Returns the start address of the space.
931 Address low() { return start_; }
932 // Returns one past the end address of the space.
933 Address high() { return low() + capacity_; }
934
935 // Age mark accessors.
936 Address age_mark() { return age_mark_; }
937 void set_age_mark(Address mark) { age_mark_ = mark; }
938
939 // True if the address is in the address range of this semispace (not
940 // necessarily below the allocation pointer).
941 bool Contains(Address a) {
ager@chromium.org5ec48922009-05-05 07:25:34 +0000942 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
943 == reinterpret_cast<uintptr_t>(start_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000944 }
945
946 // True if the object is a heap object in the address range of this
947 // semispace (not necessarily below the allocation pointer).
948 bool Contains(Object* o) {
ager@chromium.org5ec48922009-05-05 07:25:34 +0000949 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000950 }
951
ager@chromium.org32912102009-01-16 10:38:43 +0000952 // The offset of an address from the beginning of the space.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000953 int SpaceOffsetForAddress(Address addr) { return addr - low(); }
954
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000955 // If we don't have this here then SemiSpace will be abstract. However
956 // it should never be called.
957 virtual int Size() {
958 UNREACHABLE();
959 return 0;
960 }
961
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000962#ifdef DEBUG
ager@chromium.org9258b6b2008-09-11 09:11:10 +0000963 virtual void Print();
964 virtual void Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000965#endif
966
967 private:
968 // The current and maximum capacity of the space.
969 int capacity_;
970 int maximum_capacity_;
971
972 // The start address of the space.
973 Address start_;
974 // Used to govern object promotion during mark-compact collection.
975 Address age_mark_;
976
977 // Masks and comparison values to test for containment in this semispace.
ager@chromium.org5ec48922009-05-05 07:25:34 +0000978 uintptr_t address_mask_;
979 uintptr_t object_mask_;
980 uintptr_t object_expected_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +0000981
982 public:
983 TRACK_MEMORY("SemiSpace")
984};
985
986
987// A SemiSpaceIterator is an ObjectIterator that iterates over the active
988// semispace of the heap's new space. It iterates over the objects in the
989// semispace from a given start address (defaulting to the bottom of the
990// semispace) to the top of the semispace. New objects allocated after the
991// iterator is created are not iterated.
992class SemiSpaceIterator : public ObjectIterator {
993 public:
994 // Create an iterator over the objects in the given space. If no start
995 // address is given, the iterator starts from the bottom of the space. If
996 // no size function is given, the iterator calls Object::Size().
997 explicit SemiSpaceIterator(NewSpace* space);
998 SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
999 SemiSpaceIterator(NewSpace* space, Address start);
1000
1001 bool has_next() {return current_ < limit_; }
1002
1003 HeapObject* next() {
1004 ASSERT(has_next());
1005
1006 HeapObject* object = HeapObject::FromAddress(current_);
1007 int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
1008 ASSERT_OBJECT_SIZE(size);
1009
1010 current_ += size;
1011 return object;
1012 }
1013
1014 // Implementation of the ObjectIterator functions.
1015 virtual bool has_next_object() { return has_next(); }
1016 virtual HeapObject* next_object() { return next(); }
1017
1018 private:
1019 void Initialize(NewSpace* space, Address start, Address end,
1020 HeapObjectCallback size_func);
1021
1022 // The semispace.
1023 SemiSpace* space_;
1024 // The current iteration point.
1025 Address current_;
1026 // The end of iteration.
1027 Address limit_;
1028 // The callback function.
1029 HeapObjectCallback size_func_;
1030};
1031
1032
1033// -----------------------------------------------------------------------------
1034// The young generation space.
1035//
1036// The new space consists of a contiguous pair of semispaces. It simply
1037// forwards most functions to the appropriate semispace.
1038
kasper.lund7276f142008-07-30 08:49:36 +00001039class NewSpace : public Space {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001040 public:
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001041 // Constructor.
1042 NewSpace() : Space(NEW_SPACE, NOT_EXECUTABLE) {}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001043
1044 // Sets up the new space using the given chunk.
1045 bool Setup(Address start, int size);
1046
1047 // Tears down the space. Heap memory was not allocated by the space, so it
1048 // is not deallocated here.
1049 void TearDown();
1050
1051 // True if the space has been set up but not torn down.
1052 bool HasBeenSetup() {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001053 return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001054 }
1055
1056 // Flip the pair of spaces.
1057 void Flip();
1058
1059 // Doubles the capacity of the semispaces. Assumes that they are not at
1060 // their maximum capacity. Returns a flag indicating success or failure.
1061 bool Double();
1062
1063 // True if the address or object lies in the address range of either
1064 // semispace (not necessarily below the allocation pointer).
1065 bool Contains(Address a) {
ager@chromium.org5ec48922009-05-05 07:25:34 +00001066 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1067 == reinterpret_cast<uintptr_t>(start_);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001068 }
1069 bool Contains(Object* o) {
ager@chromium.org5ec48922009-05-05 07:25:34 +00001070 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001071 }
1072
1073 // Return the allocated bytes in the active semispace.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001074 virtual int Size() { return top() - bottom(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001075 // Return the current capacity of a semispace.
1076 int Capacity() { return capacity_; }
1077 // Return the available bytes without growing in the active semispace.
1078 int Available() { return Capacity() - Size(); }
1079
1080 // Return the maximum capacity of a semispace.
1081 int MaximumCapacity() { return maximum_capacity_; }
1082
1083 // Return the address of the allocation pointer in the active semispace.
1084 Address top() { return allocation_info_.top; }
1085 // Return the address of the first object in the active semispace.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001086 Address bottom() { return to_space_.low(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001087
1088 // Get the age mark of the inactive semispace.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001089 Address age_mark() { return from_space_.age_mark(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001090 // Set the age mark in the active semispace.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001091 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001092
1093 // The start address of the space and a bit mask. Anding an address in the
1094 // new space with the mask will result in the start address.
1095 Address start() { return start_; }
1096 uint32_t mask() { return address_mask_; }
1097
1098 // The allocation top and limit addresses.
1099 Address* allocation_top_address() { return &allocation_info_.top; }
1100 Address* allocation_limit_address() { return &allocation_info_.limit; }
1101
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001102 Object* AllocateRaw(int size_in_bytes) {
1103 return AllocateRawInternal(size_in_bytes, &allocation_info_);
1104 }
1105
1106 // Allocate the requested number of bytes for relocation during mark-compact
1107 // collection.
1108 Object* MCAllocateRaw(int size_in_bytes) {
1109 return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
1110 }
1111
1112 // Reset the allocation pointer to the beginning of the active semispace.
1113 void ResetAllocationInfo();
1114 // Reset the reloction pointer to the bottom of the inactive semispace in
1115 // preparation for mark-compact collection.
1116 void MCResetRelocationInfo();
1117 // Update the allocation pointer in the active semispace after a
1118 // mark-compact collection.
1119 void MCCommitRelocationInfo();
1120
1121 // Get the extent of the inactive semispace (for use as a marking stack).
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001122 Address FromSpaceLow() { return from_space_.low(); }
1123 Address FromSpaceHigh() { return from_space_.high(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001124
1125 // Get the extent of the active semispace (to sweep newly copied objects
1126 // during a scavenge collection).
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001127 Address ToSpaceLow() { return to_space_.low(); }
1128 Address ToSpaceHigh() { return to_space_.high(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001129
1130 // Offsets from the beginning of the semispaces.
1131 int ToSpaceOffsetForAddress(Address a) {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001132 return to_space_.SpaceOffsetForAddress(a);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001133 }
1134 int FromSpaceOffsetForAddress(Address a) {
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001135 return from_space_.SpaceOffsetForAddress(a);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001136 }
1137
1138 // True if the object is a heap object in the address range of the
1139 // respective semispace (not necessarily below the allocation pointer of the
1140 // semispace).
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001141 bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
1142 bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001143
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001144 bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
1145 bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001146
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +00001147#ifdef ENABLE_HEAP_PROTECTION
1148 // Protect/unprotect the space by marking it read-only/writable.
1149 virtual void Protect();
1150 virtual void Unprotect();
1151#endif
1152
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001153#ifdef DEBUG
1154 // Verify the active semispace.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001155 virtual void Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001156 // Print the active semispace.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001157 virtual void Print() { to_space_.Print(); }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001158#endif
1159
1160#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1161 // Iterates the active semispace to collect statistics.
1162 void CollectStatistics();
1163 // Reports previously collected statistics of the active semispace.
1164 void ReportStatistics();
1165 // Clears previously collected statistics.
1166 void ClearHistograms();
1167
1168 // Record the allocation or promotion of a heap object. Note that we don't
1169 // record every single allocation, but only those that happen in the
1170 // to space during a scavenge GC.
1171 void RecordAllocation(HeapObject* obj);
1172 void RecordPromotion(HeapObject* obj);
1173#endif
1174
1175 private:
1176 // The current and maximum capacities of a semispace.
1177 int capacity_;
1178 int maximum_capacity_;
1179
1180 // The semispaces.
kasperl@chromium.org5a8ca6c2008-10-23 13:57:19 +00001181 SemiSpace to_space_;
1182 SemiSpace from_space_;
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001183
1184 // Start address and bit mask for containment testing.
1185 Address start_;
1186 uint32_t address_mask_;
1187 uint32_t object_mask_;
1188 uint32_t object_expected_;
1189
1190 // Allocation pointer and limit for normal allocation and allocation during
1191 // mark-compact collection.
1192 AllocationInfo allocation_info_;
1193 AllocationInfo mc_forwarding_info_;
1194
1195#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1196 HistogramInfo* allocated_histogram_;
1197 HistogramInfo* promoted_histogram_;
1198#endif
1199
1200 // Implementation of AllocateRaw and MCAllocateRaw.
1201 inline Object* AllocateRawInternal(int size_in_bytes,
1202 AllocationInfo* alloc_info);
1203
1204 friend class SemiSpaceIterator;
1205
1206 public:
1207 TRACK_MEMORY("NewSpace")
1208};
1209
1210
1211// -----------------------------------------------------------------------------
1212// Free lists for old object spaces
1213//
1214// Free-list nodes are free blocks in the heap. They look like heap objects
1215// (free-list node pointers have the heap object tag, and they have a map like
1216// a heap object). They have a size and a next pointer. The next pointer is
1217// the raw address of the next free list node (or NULL).
1218class FreeListNode: public HeapObject {
1219 public:
1220 // Obtain a free-list node from a raw address. This is not a cast because
1221 // it does not check nor require that the first word at the address is a map
1222 // pointer.
1223 static FreeListNode* FromAddress(Address address) {
1224 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1225 }
1226
1227 // Set the size in bytes, which can be read with HeapObject::Size(). This
1228 // function also writes a map to the first word of the block so that it
1229 // looks like a heap object to the garbage collector and heap iteration
1230 // functions.
1231 void set_size(int size_in_bytes);
1232
1233 // Accessors for the next field.
1234 inline Address next();
1235 inline void set_next(Address next);
1236
1237 private:
1238 static const int kNextOffset = Array::kHeaderSize;
1239
1240 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1241};
1242
1243
1244// The free list for the old space.
1245class OldSpaceFreeList BASE_EMBEDDED {
1246 public:
1247 explicit OldSpaceFreeList(AllocationSpace owner);
1248
1249 // Clear the free list.
1250 void Reset();
1251
1252 // Return the number of bytes available on the free list.
1253 int available() { return available_; }
1254
1255 // Place a node on the free list. The block of size 'size_in_bytes'
1256 // starting at 'start' is placed on the free list. The return value is the
1257 // number of bytes that have been lost due to internal fragmentation by
1258 // freeing the block. Bookkeeping information will be written to the block,
1259 // ie, its contents will be destroyed. The start address should be word
1260 // aligned, and the size should be a non-zero multiple of the word size.
1261 int Free(Address start, int size_in_bytes);
1262
1263 // Allocate a block of size 'size_in_bytes' from the free list. The block
1264 // is unitialized. A failure is returned if no block is available. The
1265 // number of bytes lost to fragmentation is returned in the output parameter
1266 // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
1267 Object* Allocate(int size_in_bytes, int* wasted_bytes);
1268
1269 private:
1270 // The size range of blocks, in bytes. (Smaller allocations are allowed, but
1271 // will always result in waste.)
1272 static const int kMinBlockSize = Array::kHeaderSize + kPointerSize;
1273 static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
1274
1275 // The identity of the owning space, for building allocation Failure
1276 // objects.
1277 AllocationSpace owner_;
1278
1279 // Total available bytes in all blocks on this free list.
1280 int available_;
1281
1282 // Blocks are put on exact free lists in an array, indexed by size in words.
1283 // The available sizes are kept in an increasingly ordered list. Entries
1284 // corresponding to sizes < kMinBlockSize always have an empty free list
1285 // (but index kHead is used for the head of the size list).
1286 struct SizeNode {
1287 // Address of the head FreeListNode of the implied block size or NULL.
1288 Address head_node_;
1289 // Size (words) of the next larger available size if head_node_ != NULL.
1290 int next_size_;
1291 };
1292 static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
1293 SizeNode free_[kFreeListsLength];
1294
1295 // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
1296 static const int kHead = kMinBlockSize / kPointerSize - 1;
1297 static const int kEnd = kMaxInt;
1298
1299 // We keep a "finger" in the size list to speed up a common pattern:
1300 // repeated requests for the same or increasing sizes.
1301 int finger_;
1302
1303 // Starting from *prev, find and return the smallest size >= index (words),
1304 // or kEnd. Update *prev to be the largest size < index, or kHead.
1305 int FindSize(int index, int* prev) {
1306 int cur = free_[*prev].next_size_;
1307 while (cur < index) {
1308 *prev = cur;
1309 cur = free_[cur].next_size_;
1310 }
1311 return cur;
1312 }
1313
1314 // Remove an existing element from the size list.
1315 void RemoveSize(int index) {
1316 int prev = kHead;
1317 int cur = FindSize(index, &prev);
1318 ASSERT(cur == index);
1319 free_[prev].next_size_ = free_[cur].next_size_;
1320 finger_ = prev;
1321 }
1322
1323 // Insert a new element into the size list.
1324 void InsertSize(int index) {
1325 int prev = kHead;
1326 int cur = FindSize(index, &prev);
1327 ASSERT(cur != index);
1328 free_[prev].next_size_ = index;
1329 free_[index].next_size_ = cur;
1330 }
1331
1332 // The size list is not updated during a sequence of calls to Free, but is
1333 // rebuilt before the next allocation.
1334 void RebuildSizeList();
1335 bool needs_rebuild_;
1336
kasper.lund7276f142008-07-30 08:49:36 +00001337#ifdef DEBUG
1338 // Does this free list contain a free block located at the address of 'node'?
1339 bool Contains(FreeListNode* node);
1340#endif
1341
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00001342 DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001343};
1344
1345
1346// The free list for the map space.
1347class MapSpaceFreeList BASE_EMBEDDED {
1348 public:
kasper.lund7276f142008-07-30 08:49:36 +00001349 explicit MapSpaceFreeList(AllocationSpace owner);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001350
1351 // Clear the free list.
1352 void Reset();
1353
1354 // Return the number of bytes available on the free list.
1355 int available() { return available_; }
1356
1357 // Place a node on the free list. The block starting at 'start' (assumed to
1358 // have size Map::kSize) is placed on the free list. Bookkeeping
1359 // information will be written to the block, ie, its contents will be
1360 // destroyed. The start address should be word aligned.
1361 void Free(Address start);
1362
1363 // Allocate a map-sized block from the free list. The block is unitialized.
1364 // A failure is returned if no block is available.
1365 Object* Allocate();
1366
1367 private:
1368 // Available bytes on the free list.
1369 int available_;
1370
1371 // The head of the free list.
1372 Address head_;
1373
kasper.lund7276f142008-07-30 08:49:36 +00001374 // The identity of the owning space, for building allocation Failure
1375 // objects.
1376 AllocationSpace owner_;
1377
mads.s.ager@gmail.com9a4089a2008-09-01 08:55:01 +00001378 DISALLOW_COPY_AND_ASSIGN(MapSpaceFreeList);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001379};
1380
1381
1382// -----------------------------------------------------------------------------
1383// Old object space (excluding map objects)
1384
1385class OldSpace : public PagedSpace {
1386 public:
1387 // Creates an old space object with a given maximum capacity.
1388 // The constructor does not allocate pages from OS.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001389 explicit OldSpace(int max_capacity,
1390 AllocationSpace id,
1391 Executability executable)
kasper.lund7276f142008-07-30 08:49:36 +00001392 : PagedSpace(max_capacity, id, executable), free_list_(id) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001393 }
1394
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001395 // The bytes available on the free list (ie, not above the linear allocation
1396 // pointer).
1397 int AvailableFree() { return free_list_.available(); }
1398
kasper.lund7276f142008-07-30 08:49:36 +00001399 // The top of allocation in a page in this space. Undefined if page is unused.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001400 virtual Address PageAllocationTop(Page* page) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001401 return page == TopPageOf(allocation_info_) ? top() : page->ObjectAreaEnd();
1402 }
1403
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001404 // Give a block of memory to the space's free list. It might be added to
1405 // the free list or accounted as waste.
1406 void Free(Address start, int size_in_bytes) {
1407 int wasted_bytes = free_list_.Free(start, size_in_bytes);
1408 accounting_stats_.DeallocateBytes(size_in_bytes);
1409 accounting_stats_.WasteBytes(wasted_bytes);
1410 }
1411
1412 // Prepare for full garbage collection. Resets the relocation pointer and
1413 // clears the free list.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001414 virtual void PrepareForMarkCompact(bool will_compact);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001415
1416 // Adjust the top of relocation pointer to point to the end of the object
1417 // given by 'address' and 'size_in_bytes'. Move it to the next page if
1418 // necessary, ensure that it points to the address, then increment it by the
1419 // size.
1420 void MCAdjustRelocationEnd(Address address, int size_in_bytes);
1421
1422 // Updates the allocation pointer to the relocation top after a mark-compact
1423 // collection.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001424 virtual void MCCommitRelocationInfo();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001425
1426#ifdef DEBUG
1427 // Verify integrity of this space.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001428 virtual void Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001429
1430 // Reports statistics for the space
1431 void ReportStatistics();
1432 // Dump the remembered sets in the space to stdout.
1433 void PrintRSet();
1434#endif
1435
kasper.lund7276f142008-07-30 08:49:36 +00001436 protected:
1437 // Virtual function in the superclass. Slow path of AllocateRaw.
1438 HeapObject* SlowAllocateRaw(int size_in_bytes);
1439
1440 // Virtual function in the superclass. Allocate linearly at the start of
1441 // the page after current_page (there is assumed to be one).
1442 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1443
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001444 private:
1445 // The space's free list.
1446 OldSpaceFreeList free_list_;
1447
1448 // During relocation, we keep a pointer to the most recently relocated
1449 // object in order to know when to move to the next page.
1450 Address mc_end_of_relocation_;
1451
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001452 public:
1453 TRACK_MEMORY("OldSpace")
1454};
1455
1456
1457// -----------------------------------------------------------------------------
1458// Old space for all map objects
1459
1460class MapSpace : public PagedSpace {
1461 public:
1462 // Creates a map space object with a maximum capacity.
kasper.lund7276f142008-07-30 08:49:36 +00001463 explicit MapSpace(int max_capacity, AllocationSpace id)
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001464 : PagedSpace(max_capacity, id, NOT_EXECUTABLE), free_list_(id) { }
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001465
kasper.lund7276f142008-07-30 08:49:36 +00001466 // The top of allocation in a page in this space. Undefined if page is unused.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001467 virtual Address PageAllocationTop(Page* page) {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001468 return page == TopPageOf(allocation_info_) ? top()
1469 : page->ObjectAreaEnd() - kPageExtra;
1470 }
1471
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001472 // Give a map-sized block of memory to the space's free list.
1473 void Free(Address start) {
1474 free_list_.Free(start);
1475 accounting_stats_.DeallocateBytes(Map::kSize);
1476 }
1477
1478 // Given an index, returns the page address.
1479 Address PageAddress(int page_index) { return page_addresses_[page_index]; }
1480
1481 // Prepares for a mark-compact GC.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001482 virtual void PrepareForMarkCompact(bool will_compact);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001483
1484 // Updates the allocation pointer to the relocation top after a mark-compact
1485 // collection.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001486 virtual void MCCommitRelocationInfo();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001487
1488#ifdef DEBUG
1489 // Verify integrity of this space.
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001490 virtual void Verify();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001491
1492 // Reports statistic info of the space
1493 void ReportStatistics();
1494 // Dump the remembered sets in the space to stdout.
1495 void PrintRSet();
1496#endif
1497
1498 // Constants.
1499 static const int kMapPageIndexBits = 10;
1500 static const int kMaxMapPageIndex = (1 << kMapPageIndexBits) - 1;
1501
1502 static const int kPageExtra = Page::kObjectAreaSize % Map::kSize;
1503
kasper.lund7276f142008-07-30 08:49:36 +00001504 protected:
1505 // Virtual function in the superclass. Slow path of AllocateRaw.
1506 HeapObject* SlowAllocateRaw(int size_in_bytes);
1507
1508 // Virtual function in the superclass. Allocate linearly at the start of
1509 // the page after current_page (there is assumed to be one).
1510 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1511
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001512 private:
1513 // The space's free list.
1514 MapSpaceFreeList free_list_;
1515
1516 // An array of page start address in a map space.
ager@chromium.org3b45ab52009-03-19 22:21:34 +00001517 Address page_addresses_[kMaxMapPageIndex + 1];
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001518
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001519 public:
1520 TRACK_MEMORY("MapSpace")
1521};
1522
1523
1524// -----------------------------------------------------------------------------
1525// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
1526// the large object space. A large object is allocated from OS heap with
1527// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
1528// A large object always starts at Page::kObjectStartOffset to a page.
1529// Large objects do not move during garbage collections.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001530
1531// A LargeObjectChunk holds exactly one large object page with exactly one
1532// large object.
1533class LargeObjectChunk {
1534 public:
1535 // Allocates a new LargeObjectChunk that contains a large object page
1536 // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
1537 // object and possibly extra remembered set words) bytes after the object
1538 // area start of that page. The allocated chunk size is set in the output
1539 // parameter chunk_size.
kasper.lund7276f142008-07-30 08:49:36 +00001540 static LargeObjectChunk* New(int size_in_bytes,
1541 size_t* chunk_size,
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001542 Executability executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001543
1544 // Interpret a raw address as a large object chunk.
1545 static LargeObjectChunk* FromAddress(Address address) {
1546 return reinterpret_cast<LargeObjectChunk*>(address);
1547 }
1548
1549 // Returns the address of this chunk.
1550 Address address() { return reinterpret_cast<Address>(this); }
1551
1552 // Accessors for the fields of the chunk.
1553 LargeObjectChunk* next() { return next_; }
1554 void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
1555
1556 size_t size() { return size_; }
1557 void set_size(size_t size_in_bytes) { size_ = size_in_bytes; }
1558
1559 // Returns the object in this chunk.
1560 inline HeapObject* GetObject();
1561
ager@chromium.org32912102009-01-16 10:38:43 +00001562 // Given a requested size (including any extra remembered set words),
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001563 // returns the physical size of a chunk to be allocated.
1564 static int ChunkSizeFor(int size_in_bytes);
1565
ager@chromium.org32912102009-01-16 10:38:43 +00001566 // Given a chunk size, returns the object size it can accommodate (not
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001567 // including any extra remembered set words). Used by
1568 // LargeObjectSpace::Available. Note that this can overestimate the size
1569 // of object that will fit in a chunk---if the object requires extra
1570 // remembered set words (eg, for large fixed arrays), the actual object
1571 // size for the chunk will be smaller than reported by this function.
1572 static int ObjectSizeFor(int chunk_size) {
1573 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
1574 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
1575 }
1576
1577 private:
1578 // A pointer to the next large object chunk in the space or NULL.
1579 LargeObjectChunk* next_;
1580
1581 // The size of this chunk.
1582 size_t size_;
1583
1584 public:
1585 TRACK_MEMORY("LargeObjectChunk")
1586};
1587
1588
kasper.lund7276f142008-07-30 08:49:36 +00001589class LargeObjectSpace : public Space {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001590 public:
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001591 explicit LargeObjectSpace(AllocationSpace id);
1592 virtual ~LargeObjectSpace() {}
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001593
1594 // Initializes internal data structures.
1595 bool Setup();
1596
1597 // Releases internal resources, frees objects in this space.
1598 void TearDown();
1599
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001600 // Allocates a (non-FixedArray, non-Code) large object.
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001601 Object* AllocateRaw(int size_in_bytes);
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001602 // Allocates a large Code object.
1603 Object* AllocateRawCode(int size_in_bytes);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001604 // Allocates a large FixedArray.
1605 Object* AllocateRawFixedArray(int size_in_bytes);
1606
1607 // Available bytes for objects in this space, not including any extra
1608 // remembered set words.
1609 int Available() {
1610 return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
1611 }
1612
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001613 virtual int Size() {
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001614 return size_;
1615 }
1616
1617 int PageCount() {
1618 return page_count_;
1619 }
1620
1621 // Finds an object for a given address, returns Failure::Exception()
1622 // if it is not found. The function iterates through all objects in this
1623 // space, may be slow.
1624 Object* FindObject(Address a);
1625
1626 // Clears remembered sets.
1627 void ClearRSet();
1628
1629 // Iterates objects whose remembered set bits are set.
1630 void IterateRSet(ObjectSlotCallback func);
1631
1632 // Frees unmarked objects.
1633 void FreeUnmarkedObjects();
1634
1635 // Checks whether a heap object is in this space; O(1).
1636 bool Contains(HeapObject* obj);
1637
1638 // Checks whether the space is empty.
1639 bool IsEmpty() { return first_chunk_ == NULL; }
1640
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +00001641#ifdef ENABLE_HEAP_PROTECTION
1642 // Protect/unprotect the space by marking it read-only/writable.
1643 void Protect();
1644 void Unprotect();
1645#endif
1646
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001647#ifdef DEBUG
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001648 virtual void Verify();
1649 virtual void Print();
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001650 void ReportStatistics();
1651 void CollectCodeStatistics();
1652 // Dump the remembered sets in the space to stdout.
1653 void PrintRSet();
1654#endif
1655 // Checks whether an address is in the object area in this space. It
1656 // iterates all objects in the space. May be slow.
1657 bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
1658
1659 private:
1660 // The head of the linked list of large object chunks.
1661 LargeObjectChunk* first_chunk_;
1662 int size_; // allocated bytes
1663 int page_count_; // number of chunks
1664
1665
ager@chromium.org9258b6b2008-09-11 09:11:10 +00001666 // Shared implementation of AllocateRaw, AllocateRawCode and
1667 // AllocateRawFixedArray.
1668 Object* AllocateRawInternal(int requested_size,
1669 int object_size,
1670 Executability executable);
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001671
1672 // Returns the number of extra bytes (rounded up to the nearest full word)
1673 // required for extra_object_bytes of extra pointers (in bytes).
1674 static inline int ExtraRSetBytesFor(int extra_object_bytes);
1675
kasperl@chromium.orgf5aa8372009-03-24 14:47:14 +00001676 friend class LargeObjectIterator;
1677
christian.plesner.hansen43d26ec2008-07-03 15:10:15 +00001678 public:
1679 TRACK_MEMORY("LargeObjectSpace")
1680};
1681
1682
1683class LargeObjectIterator: public ObjectIterator {
1684 public:
1685 explicit LargeObjectIterator(LargeObjectSpace* space);
1686 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
1687
1688 bool has_next() { return current_ != NULL; }
1689 HeapObject* next();
1690
1691 // implementation of ObjectIterator.
1692 virtual bool has_next_object() { return has_next(); }
1693 virtual HeapObject* next_object() { return next(); }
1694
1695 private:
1696 LargeObjectChunk* current_;
1697 HeapObjectCallback size_func_;
1698};
1699
1700
1701} } // namespace v8::internal
1702
1703#endif // V8_SPACES_H_