blob: 4024387cd8791386e6966539d38245159674cd2b [file] [log] [blame]
Ben Murdoch257744e2011-11-30 15:57:28 +00001// Copyright 2011 the V8 project authors. All rights reserved.
Steve Blocka7e24c12009-10-30 11:49:00 +00002// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_SPACES_H_
29#define V8_SPACES_H_
30
Ben Murdoch257744e2011-11-30 15:57:28 +000031#include "allocation.h"
32#include "list.h"
Steve Blocka7e24c12009-10-30 11:49:00 +000033#include "log.h"
34
35namespace v8 {
36namespace internal {
37
Steve Block44f0eee2011-05-26 01:26:41 +010038class Isolate;
39
Steve Blocka7e24c12009-10-30 11:49:00 +000040// -----------------------------------------------------------------------------
41// Heap structures:
42//
43// A JS heap consists of a young generation, an old generation, and a large
44// object space. The young generation is divided into two semispaces. A
45// scavenger implements Cheney's copying algorithm. The old generation is
46// separated into a map space and an old object space. The map space contains
47// all (and only) map objects, the rest of old objects go into the old space.
48// The old generation is collected by a mark-sweep-compact collector.
49//
50// The semispaces of the young generation are contiguous. The old and map
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010051// spaces consists of a list of pages. A page has a page header and an object
52// area. A page size is deliberately chosen as 8K bytes.
53// The first word of a page is an opaque page header that has the
Steve Blocka7e24c12009-10-30 11:49:00 +000054// address of the next page and its ownership information. The second word may
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010055// have the allocation top address of this page. Heap objects are aligned to the
56// pointer size.
Steve Blocka7e24c12009-10-30 11:49:00 +000057//
58// There is a separate large object space for objects larger than
59// Page::kMaxHeapObjectSize, so that they do not have to move during
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010060// collection. The large object space is paged. Pages in large object space
61// may be larger than 8K.
Steve Blocka7e24c12009-10-30 11:49:00 +000062//
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +010063// A card marking write barrier is used to keep track of intergenerational
64// references. Old space pages are divided into regions of Page::kRegionSize
65// size. Each region has a corresponding dirty bit in the page header which is
66// set if the region might contain pointers to new space. For details about
67// dirty bits encoding see comments in the Page::GetRegionNumberForAddress()
68// method body.
69//
70// During scavenges and mark-sweep collections we iterate intergenerational
71// pointers without decoding heap object maps so if the page belongs to old
72// pointer space or large object space it is essential to guarantee that
73// the page does not contain any garbage pointers to new space: every pointer
74// aligned word which satisfies the Heap::InNewSpace() predicate must be a
75// pointer to a live heap object in new space. Thus objects in old pointer
76// and large object spaces should have a special layout (e.g. no bare integer
77// fields). This requirement does not apply to map space which is iterated in
78// a special fashion. However we still require pointer fields of dead maps to
79// be cleaned.
80//
81// To enable lazy cleaning of old space pages we use a notion of allocation
82// watermark. Every pointer under watermark is considered to be well formed.
83// Page allocation watermark is not necessarily equal to page allocation top but
84// all alive objects on page should reside under allocation watermark.
85// During scavenge allocation watermark might be bumped and invalid pointers
86// might appear below it. To avoid following them we store a valid watermark
87// into special field in the page header and set a page WATERMARK_INVALIDATED
88// flag. For details see comments in the Page::SetAllocationWatermark() method
89// body.
90//
Steve Blocka7e24c12009-10-30 11:49:00 +000091
92// Some assertion macros used in the debugging mode.
93
Leon Clarkee46be812010-01-19 14:06:41 +000094#define ASSERT_PAGE_ALIGNED(address) \
Steve Blocka7e24c12009-10-30 11:49:00 +000095 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
96
Leon Clarkee46be812010-01-19 14:06:41 +000097#define ASSERT_OBJECT_ALIGNED(address) \
Steve Blocka7e24c12009-10-30 11:49:00 +000098 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
99
Leon Clarkee46be812010-01-19 14:06:41 +0000100#define ASSERT_MAP_ALIGNED(address) \
101 ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
102
103#define ASSERT_OBJECT_SIZE(size) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000104 ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize))
105
Leon Clarkee46be812010-01-19 14:06:41 +0000106#define ASSERT_PAGE_OFFSET(offset) \
107 ASSERT((Page::kObjectStartOffset <= offset) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000108 && (offset <= Page::kPageSize))
109
Leon Clarkee46be812010-01-19 14:06:41 +0000110#define ASSERT_MAP_PAGE_INDEX(index) \
Steve Blocka7e24c12009-10-30 11:49:00 +0000111 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
112
113
114class PagedSpace;
115class MemoryAllocator;
116class AllocationInfo;
117
118// -----------------------------------------------------------------------------
119// A page normally has 8K bytes. Large object pages may be larger. A page
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100120// address is always aligned to the 8K page size.
Steve Blocka7e24c12009-10-30 11:49:00 +0000121//
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100122// Each page starts with a header of Page::kPageHeaderSize size which contains
123// bookkeeping data.
Steve Blocka7e24c12009-10-30 11:49:00 +0000124//
125// The mark-compact collector transforms a map pointer into a page index and a
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100126// page offset. The exact encoding is described in the comments for
Leon Clarkee46be812010-01-19 14:06:41 +0000127// class MapWord in objects.h.
Steve Blocka7e24c12009-10-30 11:49:00 +0000128//
129// The only way to get a page pointer is by calling factory methods:
130// Page* p = Page::FromAddress(addr); or
131// Page* p = Page::FromAllocationTop(top);
132class Page {
133 public:
134 // Returns the page containing a given address. The address ranges
135 // from [page_addr .. page_addr + kPageSize[
136 //
137 // Note that this function only works for addresses in normal paged
138 // spaces and addresses in the first 8K of large object pages (i.e.,
139 // the start of large objects but not necessarily derived pointers
140 // within them).
141 INLINE(static Page* FromAddress(Address a)) {
142 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
143 }
144
145 // Returns the page containing an allocation top. Because an allocation
146 // top address can be the upper bound of the page, we need to subtract
147 // it with kPointerSize first. The address ranges from
148 // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
149 INLINE(static Page* FromAllocationTop(Address top)) {
150 Page* p = FromAddress(top - kPointerSize);
151 ASSERT_PAGE_OFFSET(p->Offset(top));
152 return p;
153 }
154
155 // Returns the start address of this page.
156 Address address() { return reinterpret_cast<Address>(this); }
157
158 // Checks whether this is a valid page address.
159 bool is_valid() { return address() != NULL; }
160
161 // Returns the next page of this page.
162 inline Page* next_page();
163
164 // Return the end of allocation in this page. Undefined for unused pages.
165 inline Address AllocationTop();
166
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100167 // Return the allocation watermark for the page.
168 // For old space pages it is guaranteed that the area under the watermark
169 // does not contain any garbage pointers to new space.
170 inline Address AllocationWatermark();
171
172 // Return the allocation watermark offset from the beginning of the page.
173 inline uint32_t AllocationWatermarkOffset();
174
175 inline void SetAllocationWatermark(Address allocation_watermark);
176
177 inline void SetCachedAllocationWatermark(Address allocation_watermark);
178 inline Address CachedAllocationWatermark();
179
Steve Blocka7e24c12009-10-30 11:49:00 +0000180 // Returns the start address of the object area in this page.
181 Address ObjectAreaStart() { return address() + kObjectStartOffset; }
182
183 // Returns the end address (exclusive) of the object area in this page.
184 Address ObjectAreaEnd() { return address() + Page::kPageSize; }
185
Steve Blocka7e24c12009-10-30 11:49:00 +0000186 // Checks whether an address is page aligned.
187 static bool IsAlignedToPageSize(Address a) {
188 return 0 == (OffsetFrom(a) & kPageAlignmentMask);
189 }
190
Steve Block6ded16b2010-05-10 14:33:55 +0100191 // True if this page was in use before current compaction started.
192 // Result is valid only for pages owned by paged spaces and
193 // only after PagedSpace::PrepareForMarkCompact was called.
194 inline bool WasInUseBeforeMC();
195
196 inline void SetWasInUseBeforeMC(bool was_in_use);
197
Steve Blocka7e24c12009-10-30 11:49:00 +0000198 // True if this page is a large object page.
Steve Block6ded16b2010-05-10 14:33:55 +0100199 inline bool IsLargeObjectPage();
200
201 inline void SetIsLargeObjectPage(bool is_large_object_page);
Steve Blocka7e24c12009-10-30 11:49:00 +0000202
Steve Block791712a2010-08-27 10:21:07 +0100203 inline bool IsPageExecutable();
204
205 inline void SetIsPageExecutable(bool is_page_executable);
206
Steve Blocka7e24c12009-10-30 11:49:00 +0000207 // Returns the offset of a given address to this page.
208 INLINE(int Offset(Address a)) {
Steve Blockd0582a62009-12-15 09:54:21 +0000209 int offset = static_cast<int>(a - address());
Steve Blocka7e24c12009-10-30 11:49:00 +0000210 ASSERT_PAGE_OFFSET(offset);
211 return offset;
212 }
213
214 // Returns the address for a given offset to the this page.
215 Address OffsetToAddress(int offset) {
216 ASSERT_PAGE_OFFSET(offset);
217 return address() + offset;
218 }
219
220 // ---------------------------------------------------------------------
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100221 // Card marking support
Steve Blocka7e24c12009-10-30 11:49:00 +0000222
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100223 static const uint32_t kAllRegionsCleanMarks = 0x0;
224 static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF;
Steve Blocka7e24c12009-10-30 11:49:00 +0000225
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100226 inline uint32_t GetRegionMarks();
227 inline void SetRegionMarks(uint32_t dirty);
Steve Blocka7e24c12009-10-30 11:49:00 +0000228
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100229 inline uint32_t GetRegionMaskForAddress(Address addr);
230 inline uint32_t GetRegionMaskForSpan(Address start, int length_in_bytes);
231 inline int GetRegionNumberForAddress(Address addr);
Steve Blocka7e24c12009-10-30 11:49:00 +0000232
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100233 inline void MarkRegionDirty(Address addr);
234 inline bool IsRegionDirty(Address addr);
Steve Blocka7e24c12009-10-30 11:49:00 +0000235
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100236 inline void ClearRegionMarks(Address start,
237 Address end,
238 bool reaches_limit);
Steve Blocka7e24c12009-10-30 11:49:00 +0000239
Steve Blocka7e24c12009-10-30 11:49:00 +0000240 // Page size in bytes. This must be a multiple of the OS page size.
241 static const int kPageSize = 1 << kPageSizeBits;
242
243 // Page size mask.
244 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
245
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100246 static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
Steve Block44f0eee2011-05-26 01:26:41 +0100247 kIntSize + kPointerSize + kPointerSize;
Steve Blocka7e24c12009-10-30 11:49:00 +0000248
Kristian Monsen0d5e1162010-09-30 15:31:59 +0100249 // The start offset of the object area in a page. Aligned to both maps and
250 // code alignment to be suitable for both.
251 static const int kObjectStartOffset =
252 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kPageHeaderSize));
Steve Blocka7e24c12009-10-30 11:49:00 +0000253
254 // Object area size in bytes.
255 static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
256
257 // Maximum object size that fits in a page.
258 static const int kMaxHeapObjectSize = kObjectAreaSize;
259
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100260 static const int kDirtyFlagOffset = 2 * kPointerSize;
261 static const int kRegionSizeLog2 = 8;
262 static const int kRegionSize = 1 << kRegionSizeLog2;
263 static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
264
265 STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
266
Steve Block6ded16b2010-05-10 14:33:55 +0100267 enum PageFlag {
Steve Block791712a2010-08-27 10:21:07 +0100268 IS_NORMAL_PAGE = 0,
269 WAS_IN_USE_BEFORE_MC,
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100270
271 // Page allocation watermark was bumped by preallocation during scavenge.
272 // Correct watermark can be retrieved by CachedAllocationWatermark() method
Steve Block791712a2010-08-27 10:21:07 +0100273 WATERMARK_INVALIDATED,
274 IS_EXECUTABLE,
275 NUM_PAGE_FLAGS // Must be last
Steve Block6ded16b2010-05-10 14:33:55 +0100276 };
Steve Block791712a2010-08-27 10:21:07 +0100277 static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1;
Steve Block6ded16b2010-05-10 14:33:55 +0100278
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100279 // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
280 // scavenge we just invalidate the watermark on each old space page after
281 // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
282 // flag at the beginning of the next scavenge and each page becomes marked as
283 // having a valid watermark.
284 //
285 // The following invariant must hold for pages in old pointer and map spaces:
286 // If page is in use then page is marked as having invalid watermark at
287 // the beginning and at the end of any GC.
288 //
289 // This invariant guarantees that after flipping flag meaning at the
290 // beginning of scavenge all pages in use will be marked as having valid
291 // watermark.
Steve Block44f0eee2011-05-26 01:26:41 +0100292 static inline void FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100293
294 // Returns true if the page allocation watermark was not altered during
295 // scavenge.
296 inline bool IsWatermarkValid();
297
298 inline void InvalidateWatermark(bool value);
299
Steve Block6ded16b2010-05-10 14:33:55 +0100300 inline bool GetPageFlag(PageFlag flag);
301 inline void SetPageFlag(PageFlag flag, bool value);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100302 inline void ClearPageFlags();
303
304 inline void ClearGCFields();
305
Steve Block791712a2010-08-27 10:21:07 +0100306 static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100307 static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1;
308 static const uint32_t kAllocationWatermarkOffsetMask =
309 ((1 << kAllocationWatermarkOffsetBits) - 1) <<
310 kAllocationWatermarkOffsetShift;
311
312 static const uint32_t kFlagsMask =
313 ((1 << kAllocationWatermarkOffsetShift) - 1);
314
315 STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
316 kAllocationWatermarkOffsetBits);
317
Steve Blocka7e24c12009-10-30 11:49:00 +0000318 //---------------------------------------------------------------------------
319 // Page header description.
320 //
321 // If a page is not in the large object space, the first word,
322 // opaque_header, encodes the next page address (aligned to kPageSize 8K)
323 // and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use
324 // opaque_header. The value range of the opaque_header is [0..kPageSize[,
325 // or [next_page_start, next_page_end[. It cannot point to a valid address
326 // in the current page. If a page is in the large object space, the first
327 // word *may* (if the page start and large object chunk start are the
328 // same) contain the address of the next large object chunk.
329 intptr_t opaque_header;
330
331 // If the page is not in the large object space, the low-order bit of the
332 // second word is set. If the page is in the large object space, the
333 // second word *may* (if the page start and large object chunk start are
334 // the same) contain the large object chunk size. In either case, the
335 // low-order bit for large object pages will be cleared.
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100336 // For normal pages this word is used to store page flags and
337 // offset of allocation top.
338 intptr_t flags_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000339
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100340 // This field contains dirty marks for regions covering the page. Only dirty
341 // regions might contain intergenerational references.
342 // Only 32 dirty marks are supported so for large object pages several regions
343 // might be mapped to a single dirty mark.
344 uint32_t dirty_regions_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000345
346 // The index of the page in its owner space.
347 int mc_page_index;
348
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100349 // During mark-compact collections this field contains the forwarding address
350 // of the first live object in this page.
351 // During scavenge collection this field is used to store allocation watermark
352 // if it is altered during scavenge.
Steve Blocka7e24c12009-10-30 11:49:00 +0000353 Address mc_first_forwarded;
Steve Block44f0eee2011-05-26 01:26:41 +0100354
355 Heap* heap_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000356};
357
358
359// ----------------------------------------------------------------------------
360// Space is the abstract superclass for all allocation spaces.
361class Space : public Malloced {
362 public:
Steve Block44f0eee2011-05-26 01:26:41 +0100363 Space(Heap* heap, AllocationSpace id, Executability executable)
364 : heap_(heap), id_(id), executable_(executable) {}
Steve Blocka7e24c12009-10-30 11:49:00 +0000365
366 virtual ~Space() {}
367
Steve Block44f0eee2011-05-26 01:26:41 +0100368 Heap* heap() const { return heap_; }
369
Steve Blocka7e24c12009-10-30 11:49:00 +0000370 // Does the space need executable memory?
371 Executability executable() { return executable_; }
372
373 // Identity used in error reporting.
374 AllocationSpace identity() { return id_; }
375
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800376 // Returns allocated size.
Ben Murdochf87a2032010-10-22 12:50:53 +0100377 virtual intptr_t Size() = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +0000378
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -0800379 // Returns size of objects. Can differ from the allocated size
380 // (e.g. see LargeObjectSpace).
381 virtual intptr_t SizeOfObjects() { return Size(); }
382
Steve Block6ded16b2010-05-10 14:33:55 +0100383#ifdef ENABLE_HEAP_PROTECTION
384 // Protect/unprotect the space by marking it read-only/writable.
385 virtual void Protect() = 0;
386 virtual void Unprotect() = 0;
387#endif
388
Steve Blocka7e24c12009-10-30 11:49:00 +0000389#ifdef DEBUG
390 virtual void Print() = 0;
391#endif
392
Leon Clarkee46be812010-01-19 14:06:41 +0000393 // After calling this we can allocate a certain number of bytes using only
394 // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
395 // without using freelists or causing a GC. This is used by partial
396 // snapshots. It returns true of space was reserved or false if a GC is
397 // needed. For paged spaces the space requested must include the space wasted
398 // at the end of each when allocating linearly.
399 virtual bool ReserveSpace(int bytes) = 0;
400
Steve Blocka7e24c12009-10-30 11:49:00 +0000401 private:
Steve Block44f0eee2011-05-26 01:26:41 +0100402 Heap* heap_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000403 AllocationSpace id_;
404 Executability executable_;
405};
406
407
408// ----------------------------------------------------------------------------
409// All heap objects containing executable code (code objects) must be allocated
410// from a 2 GB range of memory, so that they can call each other using 32-bit
411// displacements. This happens automatically on 32-bit platforms, where 32-bit
412// displacements cover the entire 4GB virtual address space. On 64-bit
413// platforms, we support this using the CodeRange object, which reserves and
414// manages a range of virtual memory.
Steve Block44f0eee2011-05-26 01:26:41 +0100415class CodeRange {
Steve Blocka7e24c12009-10-30 11:49:00 +0000416 public:
417 // Reserves a range of virtual memory, but does not commit any of it.
418 // Can only be called once, at heap initialization time.
419 // Returns false on failure.
Steve Block44f0eee2011-05-26 01:26:41 +0100420 bool Setup(const size_t requested_size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000421
422 // Frees the range of virtual memory, and frees the data structures used to
423 // manage it.
Steve Block44f0eee2011-05-26 01:26:41 +0100424 void TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +0000425
Ben Murdoch257744e2011-11-30 15:57:28 +0000426 bool exists() { return code_range_ != NULL; }
Steve Block44f0eee2011-05-26 01:26:41 +0100427 bool contains(Address address) {
Ben Murdoch257744e2011-11-30 15:57:28 +0000428 if (code_range_ == NULL) return false;
Steve Blocka7e24c12009-10-30 11:49:00 +0000429 Address start = static_cast<Address>(code_range_->address());
430 return start <= address && address < start + code_range_->size();
431 }
432
433 // Allocates a chunk of memory from the large-object portion of
434 // the code range. On platforms with no separate code range, should
435 // not be called.
Steve Block44f0eee2011-05-26 01:26:41 +0100436 MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
437 size_t* allocated);
438 void FreeRawMemory(void* buf, size_t length);
Steve Blocka7e24c12009-10-30 11:49:00 +0000439
440 private:
Ben Murdoch257744e2011-11-30 15:57:28 +0000441 CodeRange();
Steve Block44f0eee2011-05-26 01:26:41 +0100442
Steve Blocka7e24c12009-10-30 11:49:00 +0000443 // The reserved range of virtual memory that all code objects are put in.
Steve Block44f0eee2011-05-26 01:26:41 +0100444 VirtualMemory* code_range_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000445 // Plain old data class, just a struct plus a constructor.
446 class FreeBlock {
447 public:
448 FreeBlock(Address start_arg, size_t size_arg)
449 : start(start_arg), size(size_arg) {}
450 FreeBlock(void* start_arg, size_t size_arg)
451 : start(static_cast<Address>(start_arg)), size(size_arg) {}
452
453 Address start;
454 size_t size;
455 };
456
457 // Freed blocks of memory are added to the free list. When the allocation
458 // list is exhausted, the free list is sorted and merged to make the new
459 // allocation list.
Steve Block44f0eee2011-05-26 01:26:41 +0100460 List<FreeBlock> free_list_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000461 // Memory is allocated from the free blocks on the allocation list.
462 // The block at current_allocation_block_index_ is the current block.
Steve Block44f0eee2011-05-26 01:26:41 +0100463 List<FreeBlock> allocation_list_;
464 int current_allocation_block_index_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000465
466 // Finds a block on the allocation list that contains at least the
467 // requested amount of memory. If none is found, sorts and merges
468 // the existing free memory blocks, and searches again.
469 // If none can be found, terminates V8 with FatalProcessOutOfMemory.
Steve Block44f0eee2011-05-26 01:26:41 +0100470 void GetNextAllocationBlock(size_t requested);
Steve Blocka7e24c12009-10-30 11:49:00 +0000471 // Compares the start addresses of two free blocks.
472 static int CompareFreeBlockAddress(const FreeBlock* left,
473 const FreeBlock* right);
Steve Block44f0eee2011-05-26 01:26:41 +0100474
Ben Murdoch257744e2011-11-30 15:57:28 +0000475 friend class Isolate;
476
477 Isolate* isolate_;
478
Steve Block44f0eee2011-05-26 01:26:41 +0100479 DISALLOW_COPY_AND_ASSIGN(CodeRange);
Steve Blocka7e24c12009-10-30 11:49:00 +0000480};
481
482
483// ----------------------------------------------------------------------------
484// A space acquires chunks of memory from the operating system. The memory
485// allocator manages chunks for the paged heap spaces (old space and map
486// space). A paged chunk consists of pages. Pages in a chunk have contiguous
487// addresses and are linked as a list.
488//
489// The allocator keeps an initial chunk which is used for the new space. The
490// leftover regions of the initial chunk are used for the initial chunks of
491// old space and map space if they are big enough to hold at least one page.
492// The allocator assumes that there is one old space and one map space, each
493// expands the space by allocating kPagesPerChunk pages except the last
494// expansion (before running out of space). The first chunk may contain fewer
495// than kPagesPerChunk pages as well.
496//
497// The memory allocator also allocates chunks for the large object space, but
498// they are managed by the space itself. The new space does not expand.
Steve Block6ded16b2010-05-10 14:33:55 +0100499//
500// The fact that pages for paged spaces are allocated and deallocated in chunks
501// induces a constraint on the order of pages in a linked lists. We say that
502// pages are linked in the chunk-order if and only if every two consecutive
503// pages from the same chunk are consecutive in the linked list.
504//
505
Steve Blocka7e24c12009-10-30 11:49:00 +0000506
Steve Block44f0eee2011-05-26 01:26:41 +0100507class MemoryAllocator {
Steve Blocka7e24c12009-10-30 11:49:00 +0000508 public:
509 // Initializes its internal bookkeeping structures.
Russell Brenner90bac252010-11-18 13:33:46 -0800510 // Max capacity of the total space and executable memory limit.
Steve Block44f0eee2011-05-26 01:26:41 +0100511 bool Setup(intptr_t max_capacity, intptr_t capacity_executable);
Steve Blocka7e24c12009-10-30 11:49:00 +0000512
513 // Deletes valid chunks.
Steve Block44f0eee2011-05-26 01:26:41 +0100514 void TearDown();
Steve Blocka7e24c12009-10-30 11:49:00 +0000515
516 // Reserves an initial address range of virtual memory to be split between
517 // the two new space semispaces, the old space, and the map space. The
518 // memory is not yet committed or assigned to spaces and split into pages.
519 // The initial chunk is unmapped when the memory allocator is torn down.
520 // This function should only be called when there is not already a reserved
521 // initial chunk (initial_chunk_ should be NULL). It returns the start
522 // address of the initial chunk if successful, with the side effect of
523 // setting the initial chunk, or else NULL if unsuccessful and leaves the
524 // initial chunk NULL.
Steve Block44f0eee2011-05-26 01:26:41 +0100525 void* ReserveInitialChunk(const size_t requested);
Steve Blocka7e24c12009-10-30 11:49:00 +0000526
527 // Commits pages from an as-yet-unmanaged block of virtual memory into a
528 // paged space. The block should be part of the initial chunk reserved via
529 // a call to ReserveInitialChunk. The number of pages is always returned in
530 // the output parameter num_pages. This function assumes that the start
531 // address is non-null and that it is big enough to hold at least one
532 // page-aligned page. The call always succeeds, and num_pages is always
533 // greater than zero.
Steve Block44f0eee2011-05-26 01:26:41 +0100534 Page* CommitPages(Address start, size_t size, PagedSpace* owner,
535 int* num_pages);
Steve Blocka7e24c12009-10-30 11:49:00 +0000536
537 // Commit a contiguous block of memory from the initial chunk. Assumes that
538 // the address is not NULL, the size is greater than zero, and that the
539 // block is contained in the initial chunk. Returns true if it succeeded
540 // and false otherwise.
Steve Block44f0eee2011-05-26 01:26:41 +0100541 bool CommitBlock(Address start, size_t size, Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +0000542
Steve Blocka7e24c12009-10-30 11:49:00 +0000543 // Uncommit a contiguous block of memory [start..(start+size)[.
544 // start is not NULL, the size is greater than zero, and the
545 // block is contained in the initial chunk. Returns true if it succeeded
546 // and false otherwise.
Steve Block44f0eee2011-05-26 01:26:41 +0100547 bool UncommitBlock(Address start, size_t size);
Steve Blocka7e24c12009-10-30 11:49:00 +0000548
Leon Clarke4515c472010-02-03 11:58:03 +0000549 // Zaps a contiguous block of memory [start..(start+size)[ thus
550 // filling it up with a recognizable non-NULL bit pattern.
Steve Block44f0eee2011-05-26 01:26:41 +0100551 void ZapBlock(Address start, size_t size);
Leon Clarke4515c472010-02-03 11:58:03 +0000552
Steve Blocka7e24c12009-10-30 11:49:00 +0000553 // Attempts to allocate the requested (non-zero) number of pages from the
554 // OS. Fewer pages might be allocated than requested. If it fails to
555 // allocate memory for the OS or cannot allocate a single page, this
556 // function returns an invalid page pointer (NULL). The caller must check
557 // whether the returned page is valid (by calling Page::is_valid()). It is
558 // guaranteed that allocated pages have contiguous addresses. The actual
559 // number of allocated pages is returned in the output parameter
560 // allocated_pages. If the PagedSpace owner is executable and there is
561 // a code range, the pages are allocated from the code range.
Steve Block44f0eee2011-05-26 01:26:41 +0100562 Page* AllocatePages(int requested_pages, int* allocated_pages,
563 PagedSpace* owner);
Steve Blocka7e24c12009-10-30 11:49:00 +0000564
Steve Block6ded16b2010-05-10 14:33:55 +0100565 // Frees pages from a given page and after. Requires pages to be
566 // linked in chunk-order (see comment for class).
567 // If 'p' is the first page of a chunk, pages from 'p' are freed
568 // and this function returns an invalid page pointer.
569 // Otherwise, the function searches a page after 'p' that is
570 // the first page of a chunk. Pages after the found page
571 // are freed and the function returns 'p'.
Steve Block44f0eee2011-05-26 01:26:41 +0100572 Page* FreePages(Page* p);
Steve Blocka7e24c12009-10-30 11:49:00 +0000573
Steve Block6ded16b2010-05-10 14:33:55 +0100574 // Frees all pages owned by given space.
Steve Block44f0eee2011-05-26 01:26:41 +0100575 void FreeAllPages(PagedSpace* space);
Steve Block6ded16b2010-05-10 14:33:55 +0100576
Steve Blocka7e24c12009-10-30 11:49:00 +0000577 // Allocates and frees raw memory of certain size.
578 // These are just thin wrappers around OS::Allocate and OS::Free,
579 // but keep track of allocated bytes as part of heap.
580 // If the flag is EXECUTABLE and a code range exists, the requested
581 // memory is allocated from the code range. If a code range exists
582 // and the freed memory is in it, the code range manages the freed memory.
Steve Block44f0eee2011-05-26 01:26:41 +0100583 MUST_USE_RESULT void* AllocateRawMemory(const size_t requested,
584 size_t* allocated,
585 Executability executable);
586 void FreeRawMemory(void* buf,
587 size_t length,
588 Executability executable);
589 void PerformAllocationCallback(ObjectSpace space,
590 AllocationAction action,
591 size_t size);
Iain Merrick9ac36c92010-09-13 15:29:50 +0100592
Steve Block44f0eee2011-05-26 01:26:41 +0100593 void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
594 ObjectSpace space,
595 AllocationAction action);
596 void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
597 bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
Steve Blocka7e24c12009-10-30 11:49:00 +0000598
599 // Returns the maximum available bytes of heaps.
Steve Block44f0eee2011-05-26 01:26:41 +0100600 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000601
602 // Returns allocated spaces in bytes.
Steve Block44f0eee2011-05-26 01:26:41 +0100603 intptr_t Size() { return size_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000604
Russell Brenner90bac252010-11-18 13:33:46 -0800605 // Returns the maximum available executable bytes of heaps.
Steve Block44f0eee2011-05-26 01:26:41 +0100606 intptr_t AvailableExecutable() {
Russell Brenner90bac252010-11-18 13:33:46 -0800607 if (capacity_executable_ < size_executable_) return 0;
608 return capacity_executable_ - size_executable_;
609 }
610
Steve Block791712a2010-08-27 10:21:07 +0100611 // Returns allocated executable spaces in bytes.
Steve Block44f0eee2011-05-26 01:26:41 +0100612 intptr_t SizeExecutable() { return size_executable_; }
Steve Block791712a2010-08-27 10:21:07 +0100613
Steve Blocka7e24c12009-10-30 11:49:00 +0000614 // Returns maximum available bytes that the old space can have.
Steve Block44f0eee2011-05-26 01:26:41 +0100615 intptr_t MaxAvailable() {
Steve Blocka7e24c12009-10-30 11:49:00 +0000616 return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
617 }
618
619 // Links two pages.
Steve Block44f0eee2011-05-26 01:26:41 +0100620 inline void SetNextPage(Page* prev, Page* next);
Steve Blocka7e24c12009-10-30 11:49:00 +0000621
622 // Returns the next page of a given page.
Steve Block44f0eee2011-05-26 01:26:41 +0100623 inline Page* GetNextPage(Page* p);
Steve Blocka7e24c12009-10-30 11:49:00 +0000624
625 // Checks whether a page belongs to a space.
Steve Block44f0eee2011-05-26 01:26:41 +0100626 inline bool IsPageInSpace(Page* p, PagedSpace* space);
Steve Blocka7e24c12009-10-30 11:49:00 +0000627
628 // Returns the space that owns the given page.
Steve Block44f0eee2011-05-26 01:26:41 +0100629 inline PagedSpace* PageOwner(Page* page);
Steve Blocka7e24c12009-10-30 11:49:00 +0000630
631 // Finds the first/last page in the same chunk as a given page.
Steve Block44f0eee2011-05-26 01:26:41 +0100632 Page* FindFirstPageInSameChunk(Page* p);
633 Page* FindLastPageInSameChunk(Page* p);
Steve Blocka7e24c12009-10-30 11:49:00 +0000634
Steve Block6ded16b2010-05-10 14:33:55 +0100635 // Relinks list of pages owned by space to make it chunk-ordered.
636 // Returns new first and last pages of space.
637 // Also returns last page in relinked list which has WasInUsedBeforeMC
638 // flag set.
Steve Block44f0eee2011-05-26 01:26:41 +0100639 void RelinkPageListInChunkOrder(PagedSpace* space,
640 Page** first_page,
641 Page** last_page,
642 Page** last_page_in_use);
Steve Block6ded16b2010-05-10 14:33:55 +0100643
Steve Blocka7e24c12009-10-30 11:49:00 +0000644#ifdef ENABLE_HEAP_PROTECTION
645 // Protect/unprotect a block of memory by marking it read-only/writable.
Steve Block44f0eee2011-05-26 01:26:41 +0100646 inline void Protect(Address start, size_t size);
647 inline void Unprotect(Address start, size_t size,
648 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +0000649
650 // Protect/unprotect a chunk given a page in the chunk.
Steve Block44f0eee2011-05-26 01:26:41 +0100651 inline void ProtectChunkFromPage(Page* page);
652 inline void UnprotectChunkFromPage(Page* page);
Steve Blocka7e24c12009-10-30 11:49:00 +0000653#endif
654
655#ifdef DEBUG
656 // Reports statistic info of the space.
Steve Block44f0eee2011-05-26 01:26:41 +0100657 void ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +0000658#endif
659
660 // Due to encoding limitation, we can only have 8K chunks.
Leon Clarkee46be812010-01-19 14:06:41 +0000661 static const int kMaxNofChunks = 1 << kPageSizeBits;
Steve Blocka7e24c12009-10-30 11:49:00 +0000662 // If a chunk has at least 16 pages, the maximum heap size is about
663 // 8K * 8K * 16 = 1G bytes.
664#ifdef V8_TARGET_ARCH_X64
665 static const int kPagesPerChunk = 32;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100666 // On 64 bit the chunk table consists of 4 levels of 4096-entry tables.
667 static const int kPagesPerChunkLog2 = 5;
668 static const int kChunkTableLevels = 4;
669 static const int kChunkTableBitsPerLevel = 12;
Steve Blocka7e24c12009-10-30 11:49:00 +0000670#else
671 static const int kPagesPerChunk = 16;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100672 // On 32 bit the chunk table consists of 2 levels of 256-entry tables.
673 static const int kPagesPerChunkLog2 = 4;
674 static const int kChunkTableLevels = 2;
675 static const int kChunkTableBitsPerLevel = 8;
Steve Blocka7e24c12009-10-30 11:49:00 +0000676#endif
Steve Blocka7e24c12009-10-30 11:49:00 +0000677
678 private:
Ben Murdoch257744e2011-11-30 15:57:28 +0000679 MemoryAllocator();
680
Ben Murdochb0fe1622011-05-05 13:52:32 +0100681 static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
682 static const int kChunkSizeLog2 = kPagesPerChunkLog2 + kPageSizeBits;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100683
Steve Blocka7e24c12009-10-30 11:49:00 +0000684 // Maximum space size in bytes.
Steve Block44f0eee2011-05-26 01:26:41 +0100685 intptr_t capacity_;
Russell Brenner90bac252010-11-18 13:33:46 -0800686 // Maximum subset of capacity_ that can be executable
Steve Block44f0eee2011-05-26 01:26:41 +0100687 intptr_t capacity_executable_;
Ben Murdochb0fe1622011-05-05 13:52:32 +0100688
Steve Blocka7e24c12009-10-30 11:49:00 +0000689 // Allocated space size in bytes.
Steve Block44f0eee2011-05-26 01:26:41 +0100690 intptr_t size_;
691
Steve Block791712a2010-08-27 10:21:07 +0100692 // Allocated executable space size in bytes.
Steve Block44f0eee2011-05-26 01:26:41 +0100693 intptr_t size_executable_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000694
Iain Merrick9ac36c92010-09-13 15:29:50 +0100695 struct MemoryAllocationCallbackRegistration {
696 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
697 ObjectSpace space,
698 AllocationAction action)
699 : callback(callback), space(space), action(action) {
700 }
701 MemoryAllocationCallback callback;
702 ObjectSpace space;
703 AllocationAction action;
704 };
705 // A List of callback that are triggered when memory is allocated or free'd
Steve Block44f0eee2011-05-26 01:26:41 +0100706 List<MemoryAllocationCallbackRegistration>
Iain Merrick9ac36c92010-09-13 15:29:50 +0100707 memory_allocation_callbacks_;
708
Steve Blocka7e24c12009-10-30 11:49:00 +0000709 // The initial chunk of virtual memory.
Steve Block44f0eee2011-05-26 01:26:41 +0100710 VirtualMemory* initial_chunk_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000711
712 // Allocated chunk info: chunk start address, chunk size, and owning space.
713 class ChunkInfo BASE_EMBEDDED {
714 public:
Iain Merrick9ac36c92010-09-13 15:29:50 +0100715 ChunkInfo() : address_(NULL),
716 size_(0),
717 owner_(NULL),
Steve Block44f0eee2011-05-26 01:26:41 +0100718 executable_(NOT_EXECUTABLE),
719 owner_identity_(FIRST_SPACE) {}
Iain Merrick9ac36c92010-09-13 15:29:50 +0100720 inline void init(Address a, size_t s, PagedSpace* o);
Steve Blocka7e24c12009-10-30 11:49:00 +0000721 Address address() { return address_; }
722 size_t size() { return size_; }
723 PagedSpace* owner() { return owner_; }
Iain Merrick9ac36c92010-09-13 15:29:50 +0100724 // We save executability of the owner to allow using it
725 // when collecting stats after the owner has been destroyed.
726 Executability executable() const { return executable_; }
Steve Block44f0eee2011-05-26 01:26:41 +0100727 AllocationSpace owner_identity() const { return owner_identity_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000728
729 private:
730 Address address_;
731 size_t size_;
732 PagedSpace* owner_;
Iain Merrick9ac36c92010-09-13 15:29:50 +0100733 Executability executable_;
Steve Block44f0eee2011-05-26 01:26:41 +0100734 AllocationSpace owner_identity_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000735 };
736
737 // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
Steve Block44f0eee2011-05-26 01:26:41 +0100738 List<ChunkInfo> chunks_;
739 List<int> free_chunk_ids_;
740 int max_nof_chunks_;
741 int top_;
Steve Blocka7e24c12009-10-30 11:49:00 +0000742
743 // Push/pop a free chunk id onto/from the stack.
Steve Block44f0eee2011-05-26 01:26:41 +0100744 void Push(int free_chunk_id);
745 int Pop();
746 bool OutOfChunkIds() { return top_ == 0; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000747
748 // Frees a chunk.
Steve Block44f0eee2011-05-26 01:26:41 +0100749 void DeleteChunk(int chunk_id);
Ben Murdochb0fe1622011-05-05 13:52:32 +0100750
Steve Blocka7e24c12009-10-30 11:49:00 +0000751 // Basic check whether a chunk id is in the valid range.
Steve Block44f0eee2011-05-26 01:26:41 +0100752 inline bool IsValidChunkId(int chunk_id);
Steve Blocka7e24c12009-10-30 11:49:00 +0000753
754 // Checks whether a chunk id identifies an allocated chunk.
Steve Block44f0eee2011-05-26 01:26:41 +0100755 inline bool IsValidChunk(int chunk_id);
Steve Blocka7e24c12009-10-30 11:49:00 +0000756
757 // Returns the chunk id that a page belongs to.
Steve Block44f0eee2011-05-26 01:26:41 +0100758 inline int GetChunkId(Page* p);
Steve Blocka7e24c12009-10-30 11:49:00 +0000759
760 // True if the address lies in the initial chunk.
Steve Block44f0eee2011-05-26 01:26:41 +0100761 inline bool InInitialChunk(Address address);
Steve Blocka7e24c12009-10-30 11:49:00 +0000762
763 // Initializes pages in a chunk. Returns the first page address.
764 // This function and GetChunkId() are provided for the mark-compact
765 // collector to rebuild page headers in the from space, which is
766 // used as a marking stack and its page headers are destroyed.
Steve Block44f0eee2011-05-26 01:26:41 +0100767 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
768 PagedSpace* owner);
Steve Block6ded16b2010-05-10 14:33:55 +0100769
Steve Block44f0eee2011-05-26 01:26:41 +0100770 Page* RelinkPagesInChunk(int chunk_id,
771 Address chunk_start,
772 size_t chunk_size,
773 Page* prev,
774 Page** last_page_in_use);
775
Ben Murdoch257744e2011-11-30 15:57:28 +0000776 friend class Isolate;
777
778 Isolate* isolate_;
779
Steve Block44f0eee2011-05-26 01:26:41 +0100780 DISALLOW_COPY_AND_ASSIGN(MemoryAllocator);
Steve Blocka7e24c12009-10-30 11:49:00 +0000781};
782
783
784// -----------------------------------------------------------------------------
785// Interface for heap object iterator to be implemented by all object space
786// object iterators.
787//
Leon Clarked91b9f72010-01-27 17:25:45 +0000788// NOTE: The space specific object iterators also implements the own next()
789// method which is used to avoid using virtual functions
Steve Blocka7e24c12009-10-30 11:49:00 +0000790// iterating a specific space.
791
792class ObjectIterator : public Malloced {
793 public:
794 virtual ~ObjectIterator() { }
795
Steve Blocka7e24c12009-10-30 11:49:00 +0000796 virtual HeapObject* next_object() = 0;
797};
798
799
800// -----------------------------------------------------------------------------
801// Heap object iterator in new/old/map spaces.
802//
803// A HeapObjectIterator iterates objects from a given address to the
804// top of a space. The given address must be below the current
805// allocation pointer (space top). There are some caveats.
806//
807// (1) If the space top changes upward during iteration (because of
808// allocating new objects), the iterator does not iterate objects
809// above the original space top. The caller must create a new
810// iterator starting from the old top in order to visit these new
811// objects.
812//
813// (2) If new objects are allocated below the original allocation top
814// (e.g., free-list allocation in paged spaces), the new objects
815// may or may not be iterated depending on their position with
816// respect to the current point of iteration.
817//
818// (3) The space top should not change downward during iteration,
819// otherwise the iterator will return not-necessarily-valid
820// objects.
821
822class HeapObjectIterator: public ObjectIterator {
823 public:
824 // Creates a new object iterator in a given space. If a start
825 // address is not given, the iterator starts from the space bottom.
826 // If the size function is not given, the iterator calls the default
827 // Object::Size().
828 explicit HeapObjectIterator(PagedSpace* space);
829 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
830 HeapObjectIterator(PagedSpace* space, Address start);
831 HeapObjectIterator(PagedSpace* space,
832 Address start,
833 HeapObjectCallback size_func);
Kristian Monsen80d68ea2010-09-08 11:05:35 +0100834 HeapObjectIterator(Page* page, HeapObjectCallback size_func);
Steve Blocka7e24c12009-10-30 11:49:00 +0000835
Leon Clarked91b9f72010-01-27 17:25:45 +0000836 inline HeapObject* next() {
837 return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
838 }
Steve Blocka7e24c12009-10-30 11:49:00 +0000839
840 // implementation of ObjectIterator.
Steve Blocka7e24c12009-10-30 11:49:00 +0000841 virtual HeapObject* next_object() { return next(); }
842
843 private:
844 Address cur_addr_; // current iteration point
845 Address end_addr_; // end iteration point
846 Address cur_limit_; // current page limit
847 HeapObjectCallback size_func_; // size function
848 Page* end_page_; // caches the page of the end address
849
Leon Clarked91b9f72010-01-27 17:25:45 +0000850 HeapObject* FromCurrentPage() {
851 ASSERT(cur_addr_ < cur_limit_);
852
853 HeapObject* obj = HeapObject::FromAddress(cur_addr_);
854 int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
855 ASSERT_OBJECT_SIZE(obj_size);
856
857 cur_addr_ += obj_size;
858 ASSERT(cur_addr_ <= cur_limit_);
859
860 return obj;
861 }
862
863 // Slow path of next, goes into the next page.
864 HeapObject* FromNextPage();
Steve Blocka7e24c12009-10-30 11:49:00 +0000865
866 // Initializes fields.
867 void Initialize(Address start, Address end, HeapObjectCallback size_func);
868
869#ifdef DEBUG
870 // Verifies whether fields have valid values.
871 void Verify();
872#endif
873};
874
875
876// -----------------------------------------------------------------------------
877// A PageIterator iterates the pages in a paged space.
878//
879// The PageIterator class provides three modes for iterating pages in a space:
880// PAGES_IN_USE iterates pages containing allocated objects.
881// PAGES_USED_BY_MC iterates pages that hold relocated objects during a
882// mark-compact collection.
883// ALL_PAGES iterates all pages in the space.
884//
885// There are some caveats.
886//
887// (1) If the space expands during iteration, new pages will not be
888// returned by the iterator in any mode.
889//
890// (2) If new objects are allocated during iteration, they will appear
891// in pages returned by the iterator. Allocation may cause the
892// allocation pointer or MC allocation pointer in the last page to
893// change between constructing the iterator and iterating the last
894// page.
895//
896// (3) The space should not shrink during iteration, otherwise the
897// iterator will return deallocated pages.
898
899class PageIterator BASE_EMBEDDED {
900 public:
901 enum Mode {
902 PAGES_IN_USE,
903 PAGES_USED_BY_MC,
904 ALL_PAGES
905 };
906
907 PageIterator(PagedSpace* space, Mode mode);
908
909 inline bool has_next();
910 inline Page* next();
911
912 private:
913 PagedSpace* space_;
914 Page* prev_page_; // Previous page returned.
915 Page* stop_page_; // Page to stop at (last page returned by the iterator).
916};
917
918
919// -----------------------------------------------------------------------------
920// A space has a list of pages. The next page can be accessed via
921// Page::next_page() call. The next page of the last page is an
922// invalid page pointer. A space can expand and shrink dynamically.
923
924// An abstraction of allocation and relocation pointers in a page-structured
925// space.
926class AllocationInfo {
927 public:
928 Address top; // current allocation top
929 Address limit; // current allocation limit
930
931#ifdef DEBUG
932 bool VerifyPagedAllocation() {
933 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
934 && (top <= limit);
935 }
936#endif
937};
938
939
940// An abstraction of the accounting statistics of a page-structured space.
941// The 'capacity' of a space is the number of object-area bytes (ie, not
942// including page bookkeeping structures) currently in the space. The 'size'
943// of a space is the number of allocated bytes, the 'waste' in the space is
944// the number of bytes that are not allocated and not available to
945// allocation without reorganizing the space via a GC (eg, small blocks due
946// to internal fragmentation, top of page areas in map space), and the bytes
947// 'available' is the number of unallocated bytes that are not waste. The
948// capacity is the sum of size, waste, and available.
949//
950// The stats are only set by functions that ensure they stay balanced. These
951// functions increase or decrease one of the non-capacity stats in
952// conjunction with capacity, or else they always balance increases and
953// decreases to the non-capacity stats.
954class AllocationStats BASE_EMBEDDED {
955 public:
956 AllocationStats() { Clear(); }
957
958 // Zero out all the allocation statistics (ie, no capacity).
959 void Clear() {
960 capacity_ = 0;
961 available_ = 0;
962 size_ = 0;
963 waste_ = 0;
964 }
965
966 // Reset the allocation statistics (ie, available = capacity with no
967 // wasted or allocated bytes).
968 void Reset() {
969 available_ = capacity_;
970 size_ = 0;
971 waste_ = 0;
972 }
973
974 // Accessors for the allocation statistics.
Ben Murdochf87a2032010-10-22 12:50:53 +0100975 intptr_t Capacity() { return capacity_; }
976 intptr_t Available() { return available_; }
977 intptr_t Size() { return size_; }
978 intptr_t Waste() { return waste_; }
Steve Blocka7e24c12009-10-30 11:49:00 +0000979
980 // Grow the space by adding available bytes.
981 void ExpandSpace(int size_in_bytes) {
982 capacity_ += size_in_bytes;
983 available_ += size_in_bytes;
984 }
985
986 // Shrink the space by removing available bytes.
987 void ShrinkSpace(int size_in_bytes) {
988 capacity_ -= size_in_bytes;
989 available_ -= size_in_bytes;
990 }
991
992 // Allocate from available bytes (available -> size).
Ben Murdochf87a2032010-10-22 12:50:53 +0100993 void AllocateBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +0000994 available_ -= size_in_bytes;
995 size_ += size_in_bytes;
996 }
997
998 // Free allocated bytes, making them available (size -> available).
Ben Murdochf87a2032010-10-22 12:50:53 +0100999 void DeallocateBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001000 size_ -= size_in_bytes;
1001 available_ += size_in_bytes;
1002 }
1003
1004 // Waste free bytes (available -> waste).
1005 void WasteBytes(int size_in_bytes) {
1006 available_ -= size_in_bytes;
1007 waste_ += size_in_bytes;
1008 }
1009
1010 // Consider the wasted bytes to be allocated, as they contain filler
1011 // objects (waste -> size).
Ben Murdochf87a2032010-10-22 12:50:53 +01001012 void FillWastedBytes(intptr_t size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001013 waste_ -= size_in_bytes;
1014 size_ += size_in_bytes;
1015 }
1016
1017 private:
Ben Murdochf87a2032010-10-22 12:50:53 +01001018 intptr_t capacity_;
1019 intptr_t available_;
1020 intptr_t size_;
1021 intptr_t waste_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001022};
1023
1024
1025class PagedSpace : public Space {
1026 public:
1027 // Creates a space with a maximum capacity, and an id.
Steve Block44f0eee2011-05-26 01:26:41 +01001028 PagedSpace(Heap* heap,
1029 intptr_t max_capacity,
Ben Murdochf87a2032010-10-22 12:50:53 +01001030 AllocationSpace id,
1031 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00001032
1033 virtual ~PagedSpace() {}
1034
1035 // Set up the space using the given address range of virtual memory (from
1036 // the memory allocator's initial chunk) if possible. If the block of
1037 // addresses is not big enough to contain a single page-aligned page, a
1038 // fresh chunk will be allocated.
1039 bool Setup(Address start, size_t size);
1040
1041 // Returns true if the space has been successfully set up and not
1042 // subsequently torn down.
1043 bool HasBeenSetup();
1044
1045 // Cleans up the space, frees all pages in this space except those belonging
1046 // to the initial chunk, uncommits addresses in the initial chunk.
1047 void TearDown();
1048
1049 // Checks whether an object/address is in this space.
1050 inline bool Contains(Address a);
1051 bool Contains(HeapObject* o) { return Contains(o->address()); }
Ben Murdochb0fe1622011-05-05 13:52:32 +01001052 // Never crashes even if a is not a valid pointer.
1053 inline bool SafeContains(Address a);
Steve Blocka7e24c12009-10-30 11:49:00 +00001054
1055 // Given an address occupied by a live object, return that object if it is
1056 // in this space, or Failure::Exception() if it is not. The implementation
1057 // iterates over objects in the page containing the address, the cost is
1058 // linear in the number of objects in the page. It may be slow.
John Reck59135872010-11-02 12:39:01 -07001059 MUST_USE_RESULT MaybeObject* FindObject(Address addr);
Steve Blocka7e24c12009-10-30 11:49:00 +00001060
1061 // Checks whether page is currently in use by this space.
1062 bool IsUsed(Page* page);
1063
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001064 void MarkAllPagesClean();
Steve Blocka7e24c12009-10-30 11:49:00 +00001065
1066 // Prepares for a mark-compact GC.
Steve Block6ded16b2010-05-10 14:33:55 +01001067 virtual void PrepareForMarkCompact(bool will_compact);
Steve Blocka7e24c12009-10-30 11:49:00 +00001068
Steve Block6ded16b2010-05-10 14:33:55 +01001069 // The top of allocation in a page in this space. Undefined if page is unused.
1070 Address PageAllocationTop(Page* page) {
1071 return page == TopPageOf(allocation_info_) ? top()
1072 : PageAllocationLimit(page);
1073 }
1074
1075 // The limit of allocation for a page in this space.
1076 virtual Address PageAllocationLimit(Page* page) = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001077
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001078 void FlushTopPageWatermark() {
1079 AllocationTopPage()->SetCachedAllocationWatermark(top());
1080 AllocationTopPage()->InvalidateWatermark(true);
1081 }
1082
Steve Blocka7e24c12009-10-30 11:49:00 +00001083 // Current capacity without growing (Size() + Available() + Waste()).
Ben Murdochf87a2032010-10-22 12:50:53 +01001084 intptr_t Capacity() { return accounting_stats_.Capacity(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001085
Steve Block3ce2e202009-11-05 08:53:23 +00001086 // Total amount of memory committed for this space. For paged
1087 // spaces this equals the capacity.
Ben Murdochf87a2032010-10-22 12:50:53 +01001088 intptr_t CommittedMemory() { return Capacity(); }
Steve Block3ce2e202009-11-05 08:53:23 +00001089
Steve Blocka7e24c12009-10-30 11:49:00 +00001090 // Available bytes without growing.
Ben Murdochf87a2032010-10-22 12:50:53 +01001091 intptr_t Available() { return accounting_stats_.Available(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001092
1093 // Allocated bytes in this space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001094 virtual intptr_t Size() { return accounting_stats_.Size(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001095
1096 // Wasted bytes due to fragmentation and not recoverable until the
1097 // next GC of this space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001098 intptr_t Waste() { return accounting_stats_.Waste(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001099
1100 // Returns the address of the first object in this space.
1101 Address bottom() { return first_page_->ObjectAreaStart(); }
1102
1103 // Returns the allocation pointer in this space.
1104 Address top() { return allocation_info_.top; }
1105
1106 // Allocate the requested number of bytes in the space if possible, return a
1107 // failure object if not.
John Reck59135872010-11-02 12:39:01 -07001108 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001109
1110 // Allocate the requested number of bytes for relocation during mark-compact
1111 // collection.
John Reck59135872010-11-02 12:39:01 -07001112 MUST_USE_RESULT inline MaybeObject* MCAllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001113
Leon Clarkee46be812010-01-19 14:06:41 +00001114 virtual bool ReserveSpace(int bytes);
1115
1116 // Used by ReserveSpace.
1117 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001118
Steve Block6ded16b2010-05-10 14:33:55 +01001119 // Free all pages in range from prev (exclusive) to last (inclusive).
1120 // Freed pages are moved to the end of page list.
1121 void FreePages(Page* prev, Page* last);
1122
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001123 // Deallocates a block.
1124 virtual void DeallocateBlock(Address start,
1125 int size_in_bytes,
1126 bool add_to_freelist) = 0;
1127
Steve Block6ded16b2010-05-10 14:33:55 +01001128 // Set space allocation info.
1129 void SetTop(Address top) {
1130 allocation_info_.top = top;
1131 allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top));
1132 }
1133
Steve Blocka7e24c12009-10-30 11:49:00 +00001134 // ---------------------------------------------------------------------------
1135 // Mark-compact collection support functions
1136
1137 // Set the relocation point to the beginning of the space.
1138 void MCResetRelocationInfo();
1139
1140 // Writes relocation info to the top page.
1141 void MCWriteRelocationInfoToPage() {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001142 TopPageOf(mc_forwarding_info_)->
1143 SetAllocationWatermark(mc_forwarding_info_.top);
Steve Blocka7e24c12009-10-30 11:49:00 +00001144 }
1145
1146 // Computes the offset of a given address in this space to the beginning
1147 // of the space.
1148 int MCSpaceOffsetForAddress(Address addr);
1149
1150 // Updates the allocation pointer to the relocation top after a mark-compact
1151 // collection.
1152 virtual void MCCommitRelocationInfo() = 0;
1153
1154 // Releases half of unused pages.
1155 void Shrink();
1156
1157 // Ensures that the capacity is at least 'capacity'. Returns false on failure.
1158 bool EnsureCapacity(int capacity);
1159
1160#ifdef ENABLE_HEAP_PROTECTION
1161 // Protect/unprotect the space by marking it read-only/writable.
1162 void Protect();
1163 void Unprotect();
1164#endif
1165
1166#ifdef DEBUG
1167 // Print meta info and objects in this space.
1168 virtual void Print();
1169
1170 // Verify integrity of this space.
1171 virtual void Verify(ObjectVisitor* visitor);
1172
1173 // Overridden by subclasses to verify space-specific object
1174 // properties (e.g., only maps or free-list nodes are in map space).
1175 virtual void VerifyObject(HeapObject* obj) {}
1176
1177 // Report code object related statistics
1178 void CollectCodeStatistics();
1179 static void ReportCodeStatistics();
1180 static void ResetCodeStatistics();
1181#endif
1182
Steve Block6ded16b2010-05-10 14:33:55 +01001183 // Returns the page of the allocation pointer.
1184 Page* AllocationTopPage() { return TopPageOf(allocation_info_); }
1185
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001186 void RelinkPageListInChunkOrder(bool deallocate_blocks);
1187
Steve Blocka7e24c12009-10-30 11:49:00 +00001188 protected:
1189 // Maximum capacity of this space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001190 intptr_t max_capacity_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001191
1192 // Accounting information for this space.
1193 AllocationStats accounting_stats_;
1194
1195 // The first page in this space.
1196 Page* first_page_;
1197
1198 // The last page in this space. Initially set in Setup, updated in
1199 // Expand and Shrink.
1200 Page* last_page_;
1201
Steve Block6ded16b2010-05-10 14:33:55 +01001202 // True if pages owned by this space are linked in chunk-order.
1203 // See comment for class MemoryAllocator for definition of chunk-order.
1204 bool page_list_is_chunk_ordered_;
1205
Steve Blocka7e24c12009-10-30 11:49:00 +00001206 // Normal allocation information.
1207 AllocationInfo allocation_info_;
1208
1209 // Relocation information during mark-compact collections.
1210 AllocationInfo mc_forwarding_info_;
1211
1212 // Bytes of each page that cannot be allocated. Possibly non-zero
1213 // for pages in spaces with only fixed-size objects. Always zero
1214 // for pages in spaces with variable sized objects (those pages are
1215 // padded with free-list nodes).
1216 int page_extra_;
1217
1218 // Sets allocation pointer to a page bottom.
1219 static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p);
1220
1221 // Returns the top page specified by an allocation info structure.
1222 static Page* TopPageOf(AllocationInfo alloc_info) {
1223 return Page::FromAllocationTop(alloc_info.limit);
1224 }
1225
Leon Clarked91b9f72010-01-27 17:25:45 +00001226 int CountPagesToTop() {
1227 Page* p = Page::FromAllocationTop(allocation_info_.top);
1228 PageIterator it(this, PageIterator::ALL_PAGES);
1229 int counter = 1;
1230 while (it.has_next()) {
1231 if (it.next() == p) return counter;
1232 counter++;
1233 }
1234 UNREACHABLE();
1235 return -1;
1236 }
1237
Steve Blocka7e24c12009-10-30 11:49:00 +00001238 // Expands the space by allocating a fixed number of pages. Returns false if
1239 // it cannot allocate requested number of pages from OS. Newly allocated
1240 // pages are append to the last_page;
1241 bool Expand(Page* last_page);
1242
1243 // Generic fast case allocation function that tries linear allocation in
1244 // the top page of 'alloc_info'. Returns NULL on failure.
1245 inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info,
1246 int size_in_bytes);
1247
1248 // During normal allocation or deserialization, roll to the next page in
1249 // the space (there is assumed to be one) and allocate there. This
1250 // function is space-dependent.
1251 virtual HeapObject* AllocateInNextPage(Page* current_page,
1252 int size_in_bytes) = 0;
1253
1254 // Slow path of AllocateRaw. This function is space-dependent.
John Reck59135872010-11-02 12:39:01 -07001255 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0;
Steve Blocka7e24c12009-10-30 11:49:00 +00001256
1257 // Slow path of MCAllocateRaw.
John Reck59135872010-11-02 12:39:01 -07001258 MUST_USE_RESULT HeapObject* SlowMCAllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001259
1260#ifdef DEBUG
Leon Clarkee46be812010-01-19 14:06:41 +00001261 // Returns the number of total pages in this space.
1262 int CountTotalPages();
Steve Blocka7e24c12009-10-30 11:49:00 +00001263#endif
1264 private:
Steve Blocka7e24c12009-10-30 11:49:00 +00001265
1266 // Returns a pointer to the page of the relocation pointer.
1267 Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
1268
Steve Blocka7e24c12009-10-30 11:49:00 +00001269 friend class PageIterator;
1270};
1271
1272
1273#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1274class NumberAndSizeInfo BASE_EMBEDDED {
1275 public:
1276 NumberAndSizeInfo() : number_(0), bytes_(0) {}
1277
1278 int number() const { return number_; }
1279 void increment_number(int num) { number_ += num; }
1280
1281 int bytes() const { return bytes_; }
1282 void increment_bytes(int size) { bytes_ += size; }
1283
1284 void clear() {
1285 number_ = 0;
1286 bytes_ = 0;
1287 }
1288
1289 private:
1290 int number_;
1291 int bytes_;
1292};
1293
1294
1295// HistogramInfo class for recording a single "bar" of a histogram. This
1296// class is used for collecting statistics to print to stdout (when compiled
1297// with DEBUG) or to the log file (when compiled with
1298// ENABLE_LOGGING_AND_PROFILING).
1299class HistogramInfo: public NumberAndSizeInfo {
1300 public:
1301 HistogramInfo() : NumberAndSizeInfo() {}
1302
1303 const char* name() { return name_; }
1304 void set_name(const char* name) { name_ = name; }
1305
1306 private:
1307 const char* name_;
1308};
1309#endif
1310
1311
1312// -----------------------------------------------------------------------------
1313// SemiSpace in young generation
1314//
1315// A semispace is a contiguous chunk of memory. The mark-compact collector
1316// uses the memory in the from space as a marking stack when tracing live
1317// objects.
1318
1319class SemiSpace : public Space {
1320 public:
1321 // Constructor.
Steve Block44f0eee2011-05-26 01:26:41 +01001322 explicit SemiSpace(Heap* heap) : Space(heap, NEW_SPACE, NOT_EXECUTABLE) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001323 start_ = NULL;
1324 age_mark_ = NULL;
1325 }
1326
1327 // Sets up the semispace using the given chunk.
1328 bool Setup(Address start, int initial_capacity, int maximum_capacity);
1329
1330 // Tear down the space. Heap memory was not allocated by the space, so it
1331 // is not deallocated here.
1332 void TearDown();
1333
1334 // True if the space has been set up but not torn down.
1335 bool HasBeenSetup() { return start_ != NULL; }
1336
1337 // Grow the size of the semispace by committing extra virtual memory.
1338 // Assumes that the caller has checked that the semispace has not reached
1339 // its maximum capacity (and thus there is space available in the reserved
1340 // address range to grow).
1341 bool Grow();
1342
1343 // Grow the semispace to the new capacity. The new capacity
1344 // requested must be larger than the current capacity.
1345 bool GrowTo(int new_capacity);
1346
1347 // Shrinks the semispace to the new capacity. The new capacity
1348 // requested must be more than the amount of used memory in the
1349 // semispace and less than the current capacity.
1350 bool ShrinkTo(int new_capacity);
1351
1352 // Returns the start address of the space.
1353 Address low() { return start_; }
1354 // Returns one past the end address of the space.
1355 Address high() { return low() + capacity_; }
1356
1357 // Age mark accessors.
1358 Address age_mark() { return age_mark_; }
1359 void set_age_mark(Address mark) { age_mark_ = mark; }
1360
1361 // True if the address is in the address range of this semispace (not
1362 // necessarily below the allocation pointer).
1363 bool Contains(Address a) {
1364 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1365 == reinterpret_cast<uintptr_t>(start_);
1366 }
1367
1368 // True if the object is a heap object in the address range of this
1369 // semispace (not necessarily below the allocation pointer).
1370 bool Contains(Object* o) {
1371 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1372 }
1373
1374 // The offset of an address from the beginning of the space.
Steve Blockd0582a62009-12-15 09:54:21 +00001375 int SpaceOffsetForAddress(Address addr) {
1376 return static_cast<int>(addr - low());
1377 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001378
Leon Clarkee46be812010-01-19 14:06:41 +00001379 // If we don't have these here then SemiSpace will be abstract. However
1380 // they should never be called.
Ben Murdochf87a2032010-10-22 12:50:53 +01001381 virtual intptr_t Size() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001382 UNREACHABLE();
1383 return 0;
1384 }
1385
Leon Clarkee46be812010-01-19 14:06:41 +00001386 virtual bool ReserveSpace(int bytes) {
1387 UNREACHABLE();
1388 return false;
1389 }
1390
Steve Blocka7e24c12009-10-30 11:49:00 +00001391 bool is_committed() { return committed_; }
1392 bool Commit();
1393 bool Uncommit();
1394
Steve Block6ded16b2010-05-10 14:33:55 +01001395#ifdef ENABLE_HEAP_PROTECTION
1396 // Protect/unprotect the space by marking it read-only/writable.
1397 virtual void Protect() {}
1398 virtual void Unprotect() {}
1399#endif
1400
Steve Blocka7e24c12009-10-30 11:49:00 +00001401#ifdef DEBUG
1402 virtual void Print();
1403 virtual void Verify();
1404#endif
1405
1406 // Returns the current capacity of the semi space.
1407 int Capacity() { return capacity_; }
1408
1409 // Returns the maximum capacity of the semi space.
1410 int MaximumCapacity() { return maximum_capacity_; }
1411
1412 // Returns the initial capacity of the semi space.
1413 int InitialCapacity() { return initial_capacity_; }
1414
1415 private:
1416 // The current and maximum capacity of the space.
1417 int capacity_;
1418 int maximum_capacity_;
1419 int initial_capacity_;
1420
1421 // The start address of the space.
1422 Address start_;
1423 // Used to govern object promotion during mark-compact collection.
1424 Address age_mark_;
1425
1426 // Masks and comparison values to test for containment in this semispace.
1427 uintptr_t address_mask_;
1428 uintptr_t object_mask_;
1429 uintptr_t object_expected_;
1430
1431 bool committed_;
1432
1433 public:
1434 TRACK_MEMORY("SemiSpace")
1435};
1436
1437
1438// A SemiSpaceIterator is an ObjectIterator that iterates over the active
1439// semispace of the heap's new space. It iterates over the objects in the
1440// semispace from a given start address (defaulting to the bottom of the
1441// semispace) to the top of the semispace. New objects allocated after the
1442// iterator is created are not iterated.
1443class SemiSpaceIterator : public ObjectIterator {
1444 public:
1445 // Create an iterator over the objects in the given space. If no start
1446 // address is given, the iterator starts from the bottom of the space. If
1447 // no size function is given, the iterator calls Object::Size().
1448 explicit SemiSpaceIterator(NewSpace* space);
1449 SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
1450 SemiSpaceIterator(NewSpace* space, Address start);
1451
Steve Blocka7e24c12009-10-30 11:49:00 +00001452 HeapObject* next() {
Leon Clarked91b9f72010-01-27 17:25:45 +00001453 if (current_ == limit_) return NULL;
Steve Blocka7e24c12009-10-30 11:49:00 +00001454
1455 HeapObject* object = HeapObject::FromAddress(current_);
1456 int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
1457
1458 current_ += size;
1459 return object;
1460 }
1461
1462 // Implementation of the ObjectIterator functions.
Steve Blocka7e24c12009-10-30 11:49:00 +00001463 virtual HeapObject* next_object() { return next(); }
1464
1465 private:
1466 void Initialize(NewSpace* space, Address start, Address end,
1467 HeapObjectCallback size_func);
1468
1469 // The semispace.
1470 SemiSpace* space_;
1471 // The current iteration point.
1472 Address current_;
1473 // The end of iteration.
1474 Address limit_;
1475 // The callback function.
1476 HeapObjectCallback size_func_;
1477};
1478
1479
1480// -----------------------------------------------------------------------------
1481// The young generation space.
1482//
1483// The new space consists of a contiguous pair of semispaces. It simply
1484// forwards most functions to the appropriate semispace.
1485
1486class NewSpace : public Space {
1487 public:
1488 // Constructor.
Steve Block44f0eee2011-05-26 01:26:41 +01001489 explicit NewSpace(Heap* heap)
1490 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
1491 to_space_(heap),
1492 from_space_(heap) {}
Steve Blocka7e24c12009-10-30 11:49:00 +00001493
1494 // Sets up the new space using the given chunk.
1495 bool Setup(Address start, int size);
1496
1497 // Tears down the space. Heap memory was not allocated by the space, so it
1498 // is not deallocated here.
1499 void TearDown();
1500
1501 // True if the space has been set up but not torn down.
1502 bool HasBeenSetup() {
1503 return to_space_.HasBeenSetup() && from_space_.HasBeenSetup();
1504 }
1505
1506 // Flip the pair of spaces.
1507 void Flip();
1508
1509 // Grow the capacity of the semispaces. Assumes that they are not at
1510 // their maximum capacity.
1511 void Grow();
1512
1513 // Shrink the capacity of the semispaces.
1514 void Shrink();
1515
1516 // True if the address or object lies in the address range of either
1517 // semispace (not necessarily below the allocation pointer).
1518 bool Contains(Address a) {
1519 return (reinterpret_cast<uintptr_t>(a) & address_mask_)
1520 == reinterpret_cast<uintptr_t>(start_);
1521 }
1522 bool Contains(Object* o) {
1523 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
1524 }
1525
1526 // Return the allocated bytes in the active semispace.
Ben Murdochf87a2032010-10-22 12:50:53 +01001527 virtual intptr_t Size() { return static_cast<int>(top() - bottom()); }
1528 // The same, but returning an int. We have to have the one that returns
1529 // intptr_t because it is inherited, but if we know we are dealing with the
1530 // new space, which can't get as big as the other spaces then this is useful:
1531 int SizeAsInt() { return static_cast<int>(Size()); }
Steve Block3ce2e202009-11-05 08:53:23 +00001532
Steve Blocka7e24c12009-10-30 11:49:00 +00001533 // Return the current capacity of a semispace.
Ben Murdochf87a2032010-10-22 12:50:53 +01001534 intptr_t Capacity() {
Steve Blocka7e24c12009-10-30 11:49:00 +00001535 ASSERT(to_space_.Capacity() == from_space_.Capacity());
1536 return to_space_.Capacity();
1537 }
Steve Block3ce2e202009-11-05 08:53:23 +00001538
1539 // Return the total amount of memory committed for new space.
Ben Murdochf87a2032010-10-22 12:50:53 +01001540 intptr_t CommittedMemory() {
Steve Block3ce2e202009-11-05 08:53:23 +00001541 if (from_space_.is_committed()) return 2 * Capacity();
1542 return Capacity();
1543 }
1544
Steve Blocka7e24c12009-10-30 11:49:00 +00001545 // Return the available bytes without growing in the active semispace.
Ben Murdochf87a2032010-10-22 12:50:53 +01001546 intptr_t Available() { return Capacity() - Size(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001547
1548 // Return the maximum capacity of a semispace.
1549 int MaximumCapacity() {
1550 ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
1551 return to_space_.MaximumCapacity();
1552 }
1553
1554 // Returns the initial capacity of a semispace.
1555 int InitialCapacity() {
1556 ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
1557 return to_space_.InitialCapacity();
1558 }
1559
1560 // Return the address of the allocation pointer in the active semispace.
1561 Address top() { return allocation_info_.top; }
1562 // Return the address of the first object in the active semispace.
1563 Address bottom() { return to_space_.low(); }
1564
1565 // Get the age mark of the inactive semispace.
1566 Address age_mark() { return from_space_.age_mark(); }
1567 // Set the age mark in the active semispace.
1568 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
1569
1570 // The start address of the space and a bit mask. Anding an address in the
1571 // new space with the mask will result in the start address.
1572 Address start() { return start_; }
1573 uintptr_t mask() { return address_mask_; }
1574
1575 // The allocation top and limit addresses.
1576 Address* allocation_top_address() { return &allocation_info_.top; }
1577 Address* allocation_limit_address() { return &allocation_info_.limit; }
1578
John Reck59135872010-11-02 12:39:01 -07001579 MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001580 return AllocateRawInternal(size_in_bytes, &allocation_info_);
1581 }
1582
1583 // Allocate the requested number of bytes for relocation during mark-compact
1584 // collection.
John Reck59135872010-11-02 12:39:01 -07001585 MUST_USE_RESULT MaybeObject* MCAllocateRaw(int size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001586 return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_);
1587 }
1588
1589 // Reset the allocation pointer to the beginning of the active semispace.
1590 void ResetAllocationInfo();
1591 // Reset the reloction pointer to the bottom of the inactive semispace in
1592 // preparation for mark-compact collection.
1593 void MCResetRelocationInfo();
1594 // Update the allocation pointer in the active semispace after a
1595 // mark-compact collection.
1596 void MCCommitRelocationInfo();
1597
1598 // Get the extent of the inactive semispace (for use as a marking stack).
1599 Address FromSpaceLow() { return from_space_.low(); }
1600 Address FromSpaceHigh() { return from_space_.high(); }
1601
1602 // Get the extent of the active semispace (to sweep newly copied objects
1603 // during a scavenge collection).
1604 Address ToSpaceLow() { return to_space_.low(); }
1605 Address ToSpaceHigh() { return to_space_.high(); }
1606
1607 // Offsets from the beginning of the semispaces.
1608 int ToSpaceOffsetForAddress(Address a) {
1609 return to_space_.SpaceOffsetForAddress(a);
1610 }
1611 int FromSpaceOffsetForAddress(Address a) {
1612 return from_space_.SpaceOffsetForAddress(a);
1613 }
1614
1615 // True if the object is a heap object in the address range of the
1616 // respective semispace (not necessarily below the allocation pointer of the
1617 // semispace).
1618 bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
1619 bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
1620
1621 bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
1622 bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
1623
Leon Clarkee46be812010-01-19 14:06:41 +00001624 virtual bool ReserveSpace(int bytes);
1625
Ben Murdochb0fe1622011-05-05 13:52:32 +01001626 // Resizes a sequential string which must be the most recent thing that was
1627 // allocated in new space.
1628 template <typename StringType>
1629 inline void ShrinkStringAtAllocationBoundary(String* string, int len);
1630
Steve Blocka7e24c12009-10-30 11:49:00 +00001631#ifdef ENABLE_HEAP_PROTECTION
1632 // Protect/unprotect the space by marking it read-only/writable.
1633 virtual void Protect();
1634 virtual void Unprotect();
1635#endif
1636
1637#ifdef DEBUG
1638 // Verify the active semispace.
1639 virtual void Verify();
1640 // Print the active semispace.
1641 virtual void Print() { to_space_.Print(); }
1642#endif
1643
1644#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1645 // Iterates the active semispace to collect statistics.
1646 void CollectStatistics();
1647 // Reports previously collected statistics of the active semispace.
1648 void ReportStatistics();
1649 // Clears previously collected statistics.
1650 void ClearHistograms();
1651
1652 // Record the allocation or promotion of a heap object. Note that we don't
1653 // record every single allocation, but only those that happen in the
1654 // to space during a scavenge GC.
1655 void RecordAllocation(HeapObject* obj);
1656 void RecordPromotion(HeapObject* obj);
1657#endif
1658
1659 // Return whether the operation succeded.
1660 bool CommitFromSpaceIfNeeded() {
1661 if (from_space_.is_committed()) return true;
1662 return from_space_.Commit();
1663 }
1664
1665 bool UncommitFromSpace() {
1666 if (!from_space_.is_committed()) return true;
1667 return from_space_.Uncommit();
1668 }
1669
1670 private:
1671 // The semispaces.
1672 SemiSpace to_space_;
1673 SemiSpace from_space_;
1674
1675 // Start address and bit mask for containment testing.
1676 Address start_;
1677 uintptr_t address_mask_;
1678 uintptr_t object_mask_;
1679 uintptr_t object_expected_;
1680
1681 // Allocation pointer and limit for normal allocation and allocation during
1682 // mark-compact collection.
1683 AllocationInfo allocation_info_;
1684 AllocationInfo mc_forwarding_info_;
1685
1686#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1687 HistogramInfo* allocated_histogram_;
1688 HistogramInfo* promoted_histogram_;
1689#endif
1690
1691 // Implementation of AllocateRaw and MCAllocateRaw.
John Reck59135872010-11-02 12:39:01 -07001692 MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(
1693 int size_in_bytes,
1694 AllocationInfo* alloc_info);
Steve Blocka7e24c12009-10-30 11:49:00 +00001695
1696 friend class SemiSpaceIterator;
1697
1698 public:
1699 TRACK_MEMORY("NewSpace")
1700};
1701
1702
1703// -----------------------------------------------------------------------------
1704// Free lists for old object spaces
1705//
1706// Free-list nodes are free blocks in the heap. They look like heap objects
1707// (free-list node pointers have the heap object tag, and they have a map like
1708// a heap object). They have a size and a next pointer. The next pointer is
1709// the raw address of the next free list node (or NULL).
1710class FreeListNode: public HeapObject {
1711 public:
1712 // Obtain a free-list node from a raw address. This is not a cast because
1713 // it does not check nor require that the first word at the address is a map
1714 // pointer.
1715 static FreeListNode* FromAddress(Address address) {
1716 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1717 }
1718
Steve Block3ce2e202009-11-05 08:53:23 +00001719 static inline bool IsFreeListNode(HeapObject* object);
1720
Steve Blocka7e24c12009-10-30 11:49:00 +00001721 // Set the size in bytes, which can be read with HeapObject::Size(). This
1722 // function also writes a map to the first word of the block so that it
1723 // looks like a heap object to the garbage collector and heap iteration
1724 // functions.
Steve Block44f0eee2011-05-26 01:26:41 +01001725 void set_size(Heap* heap, int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001726
1727 // Accessors for the next field.
Steve Block44f0eee2011-05-26 01:26:41 +01001728 inline Address next(Heap* heap);
1729 inline void set_next(Heap* heap, Address next);
Steve Blocka7e24c12009-10-30 11:49:00 +00001730
1731 private:
1732 static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize);
1733
1734 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1735};
1736
1737
1738// The free list for the old space.
1739class OldSpaceFreeList BASE_EMBEDDED {
1740 public:
Steve Block44f0eee2011-05-26 01:26:41 +01001741 OldSpaceFreeList(Heap* heap, AllocationSpace owner);
Steve Blocka7e24c12009-10-30 11:49:00 +00001742
1743 // Clear the free list.
1744 void Reset();
1745
1746 // Return the number of bytes available on the free list.
Ben Murdochf87a2032010-10-22 12:50:53 +01001747 intptr_t available() { return available_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001748
1749 // Place a node on the free list. The block of size 'size_in_bytes'
1750 // starting at 'start' is placed on the free list. The return value is the
1751 // number of bytes that have been lost due to internal fragmentation by
1752 // freeing the block. Bookkeeping information will be written to the block,
1753 // ie, its contents will be destroyed. The start address should be word
1754 // aligned, and the size should be a non-zero multiple of the word size.
1755 int Free(Address start, int size_in_bytes);
1756
1757 // Allocate a block of size 'size_in_bytes' from the free list. The block
1758 // is unitialized. A failure is returned if no block is available. The
1759 // number of bytes lost to fragmentation is returned in the output parameter
1760 // 'wasted_bytes'. The size should be a non-zero multiple of the word size.
John Reck59135872010-11-02 12:39:01 -07001761 MUST_USE_RESULT MaybeObject* Allocate(int size_in_bytes, int* wasted_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001762
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001763 void MarkNodes();
1764
Steve Blocka7e24c12009-10-30 11:49:00 +00001765 private:
1766 // The size range of blocks, in bytes. (Smaller allocations are allowed, but
1767 // will always result in waste.)
1768 static const int kMinBlockSize = 2 * kPointerSize;
1769 static const int kMaxBlockSize = Page::kMaxHeapObjectSize;
1770
Steve Block44f0eee2011-05-26 01:26:41 +01001771 Heap* heap_;
1772
Steve Blocka7e24c12009-10-30 11:49:00 +00001773 // The identity of the owning space, for building allocation Failure
1774 // objects.
1775 AllocationSpace owner_;
1776
1777 // Total available bytes in all blocks on this free list.
1778 int available_;
1779
1780 // Blocks are put on exact free lists in an array, indexed by size in words.
1781 // The available sizes are kept in an increasingly ordered list. Entries
1782 // corresponding to sizes < kMinBlockSize always have an empty free list
1783 // (but index kHead is used for the head of the size list).
1784 struct SizeNode {
1785 // Address of the head FreeListNode of the implied block size or NULL.
1786 Address head_node_;
1787 // Size (words) of the next larger available size if head_node_ != NULL.
1788 int next_size_;
1789 };
1790 static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1;
1791 SizeNode free_[kFreeListsLength];
1792
1793 // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[.
1794 static const int kHead = kMinBlockSize / kPointerSize - 1;
1795 static const int kEnd = kMaxInt;
1796
1797 // We keep a "finger" in the size list to speed up a common pattern:
1798 // repeated requests for the same or increasing sizes.
1799 int finger_;
1800
1801 // Starting from *prev, find and return the smallest size >= index (words),
1802 // or kEnd. Update *prev to be the largest size < index, or kHead.
1803 int FindSize(int index, int* prev) {
1804 int cur = free_[*prev].next_size_;
1805 while (cur < index) {
1806 *prev = cur;
1807 cur = free_[cur].next_size_;
1808 }
1809 return cur;
1810 }
1811
1812 // Remove an existing element from the size list.
1813 void RemoveSize(int index) {
1814 int prev = kHead;
1815 int cur = FindSize(index, &prev);
1816 ASSERT(cur == index);
1817 free_[prev].next_size_ = free_[cur].next_size_;
1818 finger_ = prev;
1819 }
1820
1821 // Insert a new element into the size list.
1822 void InsertSize(int index) {
1823 int prev = kHead;
1824 int cur = FindSize(index, &prev);
1825 ASSERT(cur != index);
1826 free_[prev].next_size_ = index;
1827 free_[index].next_size_ = cur;
1828 }
1829
1830 // The size list is not updated during a sequence of calls to Free, but is
1831 // rebuilt before the next allocation.
1832 void RebuildSizeList();
1833 bool needs_rebuild_;
1834
1835#ifdef DEBUG
1836 // Does this free list contain a free block located at the address of 'node'?
1837 bool Contains(FreeListNode* node);
1838#endif
1839
1840 DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList);
1841};
1842
1843
1844// The free list for the map space.
1845class FixedSizeFreeList BASE_EMBEDDED {
1846 public:
Steve Block44f0eee2011-05-26 01:26:41 +01001847 FixedSizeFreeList(Heap* heap, AllocationSpace owner, int object_size);
Steve Blocka7e24c12009-10-30 11:49:00 +00001848
1849 // Clear the free list.
1850 void Reset();
1851
1852 // Return the number of bytes available on the free list.
Ben Murdochf87a2032010-10-22 12:50:53 +01001853 intptr_t available() { return available_; }
Steve Blocka7e24c12009-10-30 11:49:00 +00001854
1855 // Place a node on the free list. The block starting at 'start' (assumed to
1856 // have size object_size_) is placed on the free list. Bookkeeping
1857 // information will be written to the block, ie, its contents will be
1858 // destroyed. The start address should be word aligned.
1859 void Free(Address start);
1860
1861 // Allocate a fixed sized block from the free list. The block is unitialized.
1862 // A failure is returned if no block is available.
John Reck59135872010-11-02 12:39:01 -07001863 MUST_USE_RESULT MaybeObject* Allocate();
Steve Blocka7e24c12009-10-30 11:49:00 +00001864
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001865 void MarkNodes();
1866
Steve Blocka7e24c12009-10-30 11:49:00 +00001867 private:
Steve Block44f0eee2011-05-26 01:26:41 +01001868
1869 Heap* heap_;
1870
Steve Blocka7e24c12009-10-30 11:49:00 +00001871 // Available bytes on the free list.
Ben Murdochf87a2032010-10-22 12:50:53 +01001872 intptr_t available_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001873
1874 // The head of the free list.
1875 Address head_;
1876
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01001877 // The tail of the free list.
1878 Address tail_;
1879
Steve Blocka7e24c12009-10-30 11:49:00 +00001880 // The identity of the owning space, for building allocation Failure
1881 // objects.
1882 AllocationSpace owner_;
1883
1884 // The size of the objects in this space.
1885 int object_size_;
1886
1887 DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList);
1888};
1889
1890
1891// -----------------------------------------------------------------------------
1892// Old object space (excluding map objects)
1893
1894class OldSpace : public PagedSpace {
1895 public:
1896 // Creates an old space object with a given maximum capacity.
1897 // The constructor does not allocate pages from OS.
Steve Block44f0eee2011-05-26 01:26:41 +01001898 OldSpace(Heap* heap,
1899 intptr_t max_capacity,
1900 AllocationSpace id,
1901 Executability executable)
1902 : PagedSpace(heap, max_capacity, id, executable),
1903 free_list_(heap, id) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001904 page_extra_ = 0;
1905 }
1906
1907 // The bytes available on the free list (ie, not above the linear allocation
1908 // pointer).
Ben Murdochf87a2032010-10-22 12:50:53 +01001909 intptr_t AvailableFree() { return free_list_.available(); }
Steve Blocka7e24c12009-10-30 11:49:00 +00001910
Steve Block6ded16b2010-05-10 14:33:55 +01001911 // The limit of allocation for a page in this space.
1912 virtual Address PageAllocationLimit(Page* page) {
1913 return page->ObjectAreaEnd();
Steve Blocka7e24c12009-10-30 11:49:00 +00001914 }
1915
1916 // Give a block of memory to the space's free list. It might be added to
1917 // the free list or accounted as waste.
Steve Block6ded16b2010-05-10 14:33:55 +01001918 // If add_to_freelist is false then just accounting stats are updated and
1919 // no attempt to add area to free list is made.
1920 void Free(Address start, int size_in_bytes, bool add_to_freelist) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001921 accounting_stats_.DeallocateBytes(size_in_bytes);
Steve Block6ded16b2010-05-10 14:33:55 +01001922
1923 if (add_to_freelist) {
1924 int wasted_bytes = free_list_.Free(start, size_in_bytes);
1925 accounting_stats_.WasteBytes(wasted_bytes);
1926 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001927 }
1928
Kristian Monsen80d68ea2010-09-08 11:05:35 +01001929 virtual void DeallocateBlock(Address start,
1930 int size_in_bytes,
1931 bool add_to_freelist);
1932
Steve Blocka7e24c12009-10-30 11:49:00 +00001933 // Prepare for full garbage collection. Resets the relocation pointer and
1934 // clears the free list.
1935 virtual void PrepareForMarkCompact(bool will_compact);
1936
1937 // Updates the allocation pointer to the relocation top after a mark-compact
1938 // collection.
1939 virtual void MCCommitRelocationInfo();
1940
Leon Clarkee46be812010-01-19 14:06:41 +00001941 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
1942
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08001943 void MarkFreeListNodes() { free_list_.MarkNodes(); }
1944
Steve Blocka7e24c12009-10-30 11:49:00 +00001945#ifdef DEBUG
1946 // Reports statistics for the space
1947 void ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00001948#endif
1949
1950 protected:
1951 // Virtual function in the superclass. Slow path of AllocateRaw.
John Reck59135872010-11-02 12:39:01 -07001952 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00001953
1954 // Virtual function in the superclass. Allocate linearly at the start of
1955 // the page after current_page (there is assumed to be one).
1956 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
1957
1958 private:
1959 // The space's free list.
1960 OldSpaceFreeList free_list_;
1961
1962 public:
1963 TRACK_MEMORY("OldSpace")
1964};
1965
1966
1967// -----------------------------------------------------------------------------
1968// Old space for objects of a fixed size
1969
1970class FixedSpace : public PagedSpace {
1971 public:
Steve Block44f0eee2011-05-26 01:26:41 +01001972 FixedSpace(Heap* heap,
1973 intptr_t max_capacity,
Steve Blocka7e24c12009-10-30 11:49:00 +00001974 AllocationSpace id,
1975 int object_size_in_bytes,
1976 const char* name)
Steve Block44f0eee2011-05-26 01:26:41 +01001977 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
Steve Blocka7e24c12009-10-30 11:49:00 +00001978 object_size_in_bytes_(object_size_in_bytes),
1979 name_(name),
Steve Block44f0eee2011-05-26 01:26:41 +01001980 free_list_(heap, id, object_size_in_bytes) {
Steve Blocka7e24c12009-10-30 11:49:00 +00001981 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes;
1982 }
1983
Steve Block6ded16b2010-05-10 14:33:55 +01001984 // The limit of allocation for a page in this space.
1985 virtual Address PageAllocationLimit(Page* page) {
1986 return page->ObjectAreaEnd() - page_extra_;
Steve Blocka7e24c12009-10-30 11:49:00 +00001987 }
1988
1989 int object_size_in_bytes() { return object_size_in_bytes_; }
1990
1991 // Give a fixed sized block of memory to the space's free list.
Steve Block6ded16b2010-05-10 14:33:55 +01001992 // If add_to_freelist is false then just accounting stats are updated and
1993 // no attempt to add area to free list is made.
1994 void Free(Address start, bool add_to_freelist) {
1995 if (add_to_freelist) {
1996 free_list_.Free(start);
1997 }
Steve Blocka7e24c12009-10-30 11:49:00 +00001998 accounting_stats_.DeallocateBytes(object_size_in_bytes_);
1999 }
2000
2001 // Prepares for a mark-compact GC.
2002 virtual void PrepareForMarkCompact(bool will_compact);
2003
2004 // Updates the allocation pointer to the relocation top after a mark-compact
2005 // collection.
2006 virtual void MCCommitRelocationInfo();
2007
Leon Clarkee46be812010-01-19 14:06:41 +00002008 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
2009
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002010 virtual void DeallocateBlock(Address start,
2011 int size_in_bytes,
2012 bool add_to_freelist);
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002013
2014 void MarkFreeListNodes() { free_list_.MarkNodes(); }
2015
Steve Blocka7e24c12009-10-30 11:49:00 +00002016#ifdef DEBUG
2017 // Reports statistic info of the space
2018 void ReportStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00002019#endif
2020
2021 protected:
2022 // Virtual function in the superclass. Slow path of AllocateRaw.
John Reck59135872010-11-02 12:39:01 -07002023 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00002024
2025 // Virtual function in the superclass. Allocate linearly at the start of
2026 // the page after current_page (there is assumed to be one).
2027 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
2028
Leon Clarkee46be812010-01-19 14:06:41 +00002029 void ResetFreeList() {
2030 free_list_.Reset();
2031 }
2032
Steve Blocka7e24c12009-10-30 11:49:00 +00002033 private:
2034 // The size of objects in this space.
2035 int object_size_in_bytes_;
2036
2037 // The name of this space.
2038 const char* name_;
2039
2040 // The space's free list.
2041 FixedSizeFreeList free_list_;
2042};
2043
2044
2045// -----------------------------------------------------------------------------
2046// Old space for all map objects
2047
2048class MapSpace : public FixedSpace {
2049 public:
2050 // Creates a map space object with a maximum capacity.
Steve Block44f0eee2011-05-26 01:26:41 +01002051 MapSpace(Heap* heap,
2052 intptr_t max_capacity,
2053 int max_map_space_pages,
2054 AllocationSpace id)
2055 : FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
Leon Clarked91b9f72010-01-27 17:25:45 +00002056 max_map_space_pages_(max_map_space_pages) {
2057 ASSERT(max_map_space_pages < kMaxMapPageIndex);
2058 }
Steve Blocka7e24c12009-10-30 11:49:00 +00002059
2060 // Prepares for a mark-compact GC.
2061 virtual void PrepareForMarkCompact(bool will_compact);
2062
2063 // Given an index, returns the page address.
2064 Address PageAddress(int page_index) { return page_addresses_[page_index]; }
2065
Leon Clarked91b9f72010-01-27 17:25:45 +00002066 static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits;
Steve Blocka7e24c12009-10-30 11:49:00 +00002067
Leon Clarkee46be812010-01-19 14:06:41 +00002068 // Are map pointers encodable into map word?
2069 bool MapPointersEncodable() {
2070 if (!FLAG_use_big_map_space) {
Leon Clarked91b9f72010-01-27 17:25:45 +00002071 ASSERT(CountPagesToTop() <= kMaxMapPageIndex);
Leon Clarkee46be812010-01-19 14:06:41 +00002072 return true;
2073 }
Leon Clarked91b9f72010-01-27 17:25:45 +00002074 return CountPagesToTop() <= max_map_space_pages_;
Leon Clarkee46be812010-01-19 14:06:41 +00002075 }
2076
2077 // Should be called after forced sweep to find out if map space needs
2078 // compaction.
2079 bool NeedsCompaction(int live_maps) {
Leon Clarked91b9f72010-01-27 17:25:45 +00002080 return !MapPointersEncodable() && live_maps <= CompactionThreshold();
Leon Clarkee46be812010-01-19 14:06:41 +00002081 }
2082
2083 Address TopAfterCompaction(int live_maps) {
2084 ASSERT(NeedsCompaction(live_maps));
2085
2086 int pages_left = live_maps / kMapsPerPage;
2087 PageIterator it(this, PageIterator::ALL_PAGES);
2088 while (pages_left-- > 0) {
2089 ASSERT(it.has_next());
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002090 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
Leon Clarkee46be812010-01-19 14:06:41 +00002091 }
2092 ASSERT(it.has_next());
2093 Page* top_page = it.next();
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002094 top_page->SetRegionMarks(Page::kAllRegionsCleanMarks);
Leon Clarkee46be812010-01-19 14:06:41 +00002095 ASSERT(top_page->is_valid());
2096
2097 int offset = live_maps % kMapsPerPage * Map::kSize;
2098 Address top = top_page->ObjectAreaStart() + offset;
2099 ASSERT(top < top_page->ObjectAreaEnd());
2100 ASSERT(Contains(top));
2101
2102 return top;
2103 }
2104
2105 void FinishCompaction(Address new_top, int live_maps) {
2106 Page* top_page = Page::FromAddress(new_top);
2107 ASSERT(top_page->is_valid());
2108
2109 SetAllocationInfo(&allocation_info_, top_page);
2110 allocation_info_.top = new_top;
2111
2112 int new_size = live_maps * Map::kSize;
2113 accounting_stats_.DeallocateBytes(accounting_stats_.Size());
2114 accounting_stats_.AllocateBytes(new_size);
2115
Ben Murdoche0cee9b2011-05-25 10:26:03 +01002116 // Flush allocation watermarks.
2117 for (Page* p = first_page_; p != top_page; p = p->next_page()) {
2118 p->SetAllocationWatermark(p->AllocationTop());
2119 }
2120 top_page->SetAllocationWatermark(new_top);
2121
Leon Clarkee46be812010-01-19 14:06:41 +00002122#ifdef DEBUG
2123 if (FLAG_enable_slow_asserts) {
Leon Clarked91b9f72010-01-27 17:25:45 +00002124 intptr_t actual_size = 0;
Leon Clarkee46be812010-01-19 14:06:41 +00002125 for (Page* p = first_page_; p != top_page; p = p->next_page())
2126 actual_size += kMapsPerPage * Map::kSize;
2127 actual_size += (new_top - top_page->ObjectAreaStart());
2128 ASSERT(accounting_stats_.Size() == actual_size);
2129 }
2130#endif
2131
2132 Shrink();
2133 ResetFreeList();
2134 }
2135
Steve Blocka7e24c12009-10-30 11:49:00 +00002136 protected:
2137#ifdef DEBUG
2138 virtual void VerifyObject(HeapObject* obj);
2139#endif
2140
2141 private:
Leon Clarkee46be812010-01-19 14:06:41 +00002142 static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
2143
2144 // Do map space compaction if there is a page gap.
Leon Clarked91b9f72010-01-27 17:25:45 +00002145 int CompactionThreshold() {
2146 return kMapsPerPage * (max_map_space_pages_ - 1);
2147 }
2148
2149 const int max_map_space_pages_;
Leon Clarkee46be812010-01-19 14:06:41 +00002150
Steve Blocka7e24c12009-10-30 11:49:00 +00002151 // An array of page start address in a map space.
Leon Clarked91b9f72010-01-27 17:25:45 +00002152 Address page_addresses_[kMaxMapPageIndex];
Steve Blocka7e24c12009-10-30 11:49:00 +00002153
2154 public:
2155 TRACK_MEMORY("MapSpace")
2156};
2157
2158
2159// -----------------------------------------------------------------------------
2160// Old space for all global object property cell objects
2161
2162class CellSpace : public FixedSpace {
2163 public:
2164 // Creates a property cell space object with a maximum capacity.
Steve Block44f0eee2011-05-26 01:26:41 +01002165 CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2166 : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
2167 {}
Steve Blocka7e24c12009-10-30 11:49:00 +00002168
2169 protected:
2170#ifdef DEBUG
2171 virtual void VerifyObject(HeapObject* obj);
2172#endif
2173
2174 public:
2175 TRACK_MEMORY("CellSpace")
2176};
2177
2178
2179// -----------------------------------------------------------------------------
2180// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2181// the large object space. A large object is allocated from OS heap with
2182// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2183// A large object always starts at Page::kObjectStartOffset to a page.
2184// Large objects do not move during garbage collections.
2185
2186// A LargeObjectChunk holds exactly one large object page with exactly one
2187// large object.
2188class LargeObjectChunk {
2189 public:
2190 // Allocates a new LargeObjectChunk that contains a large object page
2191 // (Page::kPageSize aligned) that has at least size_in_bytes (for a large
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002192 // object) bytes after the object area start of that page.
Ben Murdochb0fe1622011-05-05 13:52:32 +01002193 static LargeObjectChunk* New(int size_in_bytes, Executability executable);
2194
2195 // Free the memory associated with the chunk.
2196 inline void Free(Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00002197
2198 // Interpret a raw address as a large object chunk.
2199 static LargeObjectChunk* FromAddress(Address address) {
2200 return reinterpret_cast<LargeObjectChunk*>(address);
2201 }
2202
2203 // Returns the address of this chunk.
2204 Address address() { return reinterpret_cast<Address>(this); }
2205
2206 // Accessors for the fields of the chunk.
2207 LargeObjectChunk* next() { return next_; }
2208 void set_next(LargeObjectChunk* chunk) { next_ = chunk; }
Steve Block791712a2010-08-27 10:21:07 +01002209 size_t size() { return size_ & ~Page::kPageFlagMask; }
Ben Murdochb0fe1622011-05-05 13:52:32 +01002210
2211 // Compute the start address in the chunk.
2212 inline Address GetStartAddress();
Steve Blocka7e24c12009-10-30 11:49:00 +00002213
2214 // Returns the object in this chunk.
Ben Murdochb0fe1622011-05-05 13:52:32 +01002215 HeapObject* GetObject() { return HeapObject::FromAddress(GetStartAddress()); }
Steve Blocka7e24c12009-10-30 11:49:00 +00002216
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002217 // Given a requested size returns the physical size of a chunk to be
2218 // allocated.
Steve Blocka7e24c12009-10-30 11:49:00 +00002219 static int ChunkSizeFor(int size_in_bytes);
2220
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002221 // Given a chunk size, returns the object size it can accommodate. Used by
2222 // LargeObjectSpace::Available.
Ben Murdochf87a2032010-10-22 12:50:53 +01002223 static intptr_t ObjectSizeFor(intptr_t chunk_size) {
Steve Blocka7e24c12009-10-30 11:49:00 +00002224 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2225 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2226 }
2227
2228 private:
2229 // A pointer to the next large object chunk in the space or NULL.
2230 LargeObjectChunk* next_;
2231
Ben Murdochb0fe1622011-05-05 13:52:32 +01002232 // The total size of this chunk.
Steve Blocka7e24c12009-10-30 11:49:00 +00002233 size_t size_;
2234
2235 public:
2236 TRACK_MEMORY("LargeObjectChunk")
2237};
2238
2239
2240class LargeObjectSpace : public Space {
2241 public:
Steve Block44f0eee2011-05-26 01:26:41 +01002242 LargeObjectSpace(Heap* heap, AllocationSpace id);
Steve Blocka7e24c12009-10-30 11:49:00 +00002243 virtual ~LargeObjectSpace() {}
2244
2245 // Initializes internal data structures.
2246 bool Setup();
2247
2248 // Releases internal resources, frees objects in this space.
2249 void TearDown();
2250
2251 // Allocates a (non-FixedArray, non-Code) large object.
John Reck59135872010-11-02 12:39:01 -07002252 MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00002253 // Allocates a large Code object.
John Reck59135872010-11-02 12:39:01 -07002254 MUST_USE_RESULT MaybeObject* AllocateRawCode(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00002255 // Allocates a large FixedArray.
John Reck59135872010-11-02 12:39:01 -07002256 MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes);
Steve Blocka7e24c12009-10-30 11:49:00 +00002257
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002258 // Available bytes for objects in this space.
Steve Block44f0eee2011-05-26 01:26:41 +01002259 inline intptr_t Available();
Steve Blocka7e24c12009-10-30 11:49:00 +00002260
Ben Murdochf87a2032010-10-22 12:50:53 +01002261 virtual intptr_t Size() {
Steve Blocka7e24c12009-10-30 11:49:00 +00002262 return size_;
2263 }
2264
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002265 virtual intptr_t SizeOfObjects() {
2266 return objects_size_;
2267 }
2268
Steve Blocka7e24c12009-10-30 11:49:00 +00002269 int PageCount() {
2270 return page_count_;
2271 }
2272
2273 // Finds an object for a given address, returns Failure::Exception()
2274 // if it is not found. The function iterates through all objects in this
2275 // space, may be slow.
John Reck59135872010-11-02 12:39:01 -07002276 MaybeObject* FindObject(Address a);
Steve Blocka7e24c12009-10-30 11:49:00 +00002277
Kristian Monsen80d68ea2010-09-08 11:05:35 +01002278 // Finds a large object page containing the given pc, returns NULL
2279 // if such a page doesn't exist.
2280 LargeObjectChunk* FindChunkContainingPc(Address pc);
2281
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +01002282 // Iterates objects covered by dirty regions.
2283 void IterateDirtyRegions(ObjectSlotCallback func);
Steve Blocka7e24c12009-10-30 11:49:00 +00002284
2285 // Frees unmarked objects.
2286 void FreeUnmarkedObjects();
2287
2288 // Checks whether a heap object is in this space; O(1).
2289 bool Contains(HeapObject* obj);
2290
2291 // Checks whether the space is empty.
2292 bool IsEmpty() { return first_chunk_ == NULL; }
2293
Leon Clarkee46be812010-01-19 14:06:41 +00002294 // See the comments for ReserveSpace in the Space class. This has to be
2295 // called after ReserveSpace has been called on the paged spaces, since they
2296 // may use some memory, leaving less for large objects.
2297 virtual bool ReserveSpace(int bytes);
2298
Steve Blocka7e24c12009-10-30 11:49:00 +00002299#ifdef ENABLE_HEAP_PROTECTION
2300 // Protect/unprotect the space by marking it read-only/writable.
2301 void Protect();
2302 void Unprotect();
2303#endif
2304
2305#ifdef DEBUG
2306 virtual void Verify();
2307 virtual void Print();
2308 void ReportStatistics();
2309 void CollectCodeStatistics();
Steve Blocka7e24c12009-10-30 11:49:00 +00002310#endif
2311 // Checks whether an address is in the object area in this space. It
2312 // iterates all objects in the space. May be slow.
2313 bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }
2314
2315 private:
2316 // The head of the linked list of large object chunks.
2317 LargeObjectChunk* first_chunk_;
Ben Murdochf87a2032010-10-22 12:50:53 +01002318 intptr_t size_; // allocated bytes
Steve Blocka7e24c12009-10-30 11:49:00 +00002319 int page_count_; // number of chunks
Shimeng (Simon) Wang8a31eba2010-12-06 19:01:33 -08002320 intptr_t objects_size_; // size of objects
Steve Blocka7e24c12009-10-30 11:49:00 +00002321
2322 // Shared implementation of AllocateRaw, AllocateRawCode and
2323 // AllocateRawFixedArray.
John Reck59135872010-11-02 12:39:01 -07002324 MUST_USE_RESULT MaybeObject* AllocateRawInternal(int requested_size,
2325 int object_size,
2326 Executability executable);
Steve Blocka7e24c12009-10-30 11:49:00 +00002327
Steve Blocka7e24c12009-10-30 11:49:00 +00002328 friend class LargeObjectIterator;
2329
2330 public:
2331 TRACK_MEMORY("LargeObjectSpace")
2332};
2333
2334
2335class LargeObjectIterator: public ObjectIterator {
2336 public:
2337 explicit LargeObjectIterator(LargeObjectSpace* space);
2338 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
2339
Steve Blocka7e24c12009-10-30 11:49:00 +00002340 HeapObject* next();
2341
2342 // implementation of ObjectIterator.
Steve Blocka7e24c12009-10-30 11:49:00 +00002343 virtual HeapObject* next_object() { return next(); }
2344
2345 private:
2346 LargeObjectChunk* current_;
2347 HeapObjectCallback size_func_;
2348};
2349
2350
Steve Block44f0eee2011-05-26 01:26:41 +01002351#ifdef DEBUG
2352struct CommentStatistic {
2353 const char* comment;
2354 int size;
2355 int count;
2356 void Clear() {
2357 comment = NULL;
2358 size = 0;
2359 count = 0;
2360 }
2361 // Must be small, since an iteration is used for lookup.
2362 static const int kMaxComments = 64;
2363};
2364#endif
2365
2366
Steve Blocka7e24c12009-10-30 11:49:00 +00002367} } // namespace v8::internal
2368
2369#endif // V8_SPACES_H_